code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="mt3Nv5iL_WxK" colab_type="text"
# # 1) Importing the dataset
# + [markdown] id="xZEAlLj7_Zuw" colab_type="text"
# In this section we will classify written restorent reviews, here we will predict a review is positive or negative
#
# You can use same algorithm to other kinds of text, like books (to predict the gerne of book comedy, romance, etc), newspaper article (predicting the category of article)
#
# It is a general model that you can apply on most of the text
# + id="YxUIZpu6u66C" colab_type="code" colab={}
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + id="hzhUX3u4unO_" colab_type="code" colab={}
# Importing the dataset
data = pd.read_csv('Restaurant_Reviews.tsv', sep='\t', quoting=3)
# quoting parameter will avoid any issue related to double quotes in reviews text
# code to ignore double quotes is value 3
# + id="Uz80dPpRus-1" colab_type="code" outputId="df82c4b1-081f-4d88-91f1-87c493745960" colab={"base_uri": "https://localhost:8080/", "height": 196}
data.head()
# + id="RsUe73TPyNCY" colab_type="code" outputId="de6e440f-ae4d-4d85-89c5-9da49f3aedb6" colab={"base_uri": "https://localhost:8080/", "height": 196}
data.tail()
# + id="SuvDcrUFyPZk" colab_type="code" colab={}
# in liked column 1 means review is positive
# and 0 means review is negative
# + [markdown] id="zy5rmU-F_-Vp" colab_type="text"
# # 2) Text Cleaning
# + [markdown] id="Eq8brp-aJVze" colab_type="text"
# ## Step 1: Remove numbers and punctuations
# + id="f_GbPUCfACEq" colab_type="code" outputId="a10d8a00-00b4-4a01-ddba-d04dc4d4767c" colab={"base_uri": "https://localhost:8080/", "height": 50}
import re # regular expression library
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
# + id="2rO1sGmpEDTk" colab_type="code" colab={}
# we will apply the steps of cleaning process to first review
# and then we will apply same to all reviews using for loop
# + id="QJKlbvKCF-H3" colab_type="code" outputId="f0ee964b-7ea6-4a26-9f1d-b98bba17111e" colab={"base_uri": "https://localhost:8080/", "height": 33}
data['Review'][0] # first review
# + id="6ABbcnjWF-yU" colab_type="code" colab={}
review = re.sub('[^a-zA-Z]',' ',data['Review'][0])
# only keeping the letters in the review, that means we will remove numbers, punctuations, etc
# we are keeping all the smallcase and uppercase letters
# data['review'][0] --- our first review
# after removing unwanted charactors, the remaining charactors can form a word
# so we will replace removed charactor by space(' ')
# + id="hAOPDwhgIbS_" colab_type="code" outputId="d7280320-bca8-47cf-a14c-14a2d045420d" colab={"base_uri": "https://localhost:8080/", "height": 33}
print(review)
# only letters remaining in the output
# + [markdown] id="tR3eexj9JdyC" colab_type="text"
# ## Step 2: Convert all the letters in lowercase
# + id="F118Bo7zIe-8" colab_type="code" colab={}
review = re.sub('[^a-zA-Z]',' ',data['Review'][0])
review = review.lower() # convert all the letters in lowercase
# + id="GcZdEjhTIfBT" colab_type="code" outputId="f58566c2-b155-4cc4-ec5c-706b1868eadb" colab={"base_uri": "https://localhost:8080/", "height": 33}
print(review) # all the letters are converted in lower case
# + [markdown] id="HekRlGjPKWac" colab_type="text"
# ## Step 3: Removing the stopwords
# + id="NzcLsGrhKaJd" colab_type="code" colab={}
# stopwords are the words,
# that are non relevant in predicting whether a review is positive or negative
# e.g the, and, in, a
# + id="RvdtFqfwKaL6" colab_type="code" colab={}
# to remove the stopwords we will use the for loop
# + id="furEVfxiKaOb" colab_type="code" colab={}
review = re.sub('[^a-zA-Z]',' ',data['Review'][0])
review = review.lower() # convert all the letters in lowercase
review =review.split() # a single review is a string, words are not seperated
# to gothrough all the words, we have to split the words
# + id="jBpLPoBgSSC2" colab_type="code" outputId="2ea83317-8a20-4e7e-a16c-d7535330fd4e" colab={"base_uri": "https://localhost:8080/", "height": 33}
review
# now this review is a list of elements
# + id="CmPGMgDRSdrd" colab_type="code" colab={}
review = re.sub('[^a-zA-Z]',' ',data['Review'][0])
review = review.lower() # convert all the letters in lowercase
review =review.split() # a single review is a string, words are not seperated
# to gothrough all the words, we have to split the words
review = [word for word in review if not word in set(stopwords.words('english'))] # taking all the words in review except stop words
# + id="uveBcaQVSqTS" colab_type="code" outputId="f76d33db-c637-47dc-a997-398bb7893a48" colab={"base_uri": "https://localhost:8080/", "height": 33}
print(review) # stopwords removed
# + [markdown] id="hCeVTYqvb0Es" colab_type="text"
# ## Step 4: Stemming
# + id="swxp6oy7bxyW" colab_type="code" colab={}
review = re.sub('[^a-zA-Z]',' ',data['Review'][0])
review = review.lower() # convert all the letters in lowercase
review =review.split() # a single review is a string, words are not seperated
# to gothrough all the words, we have to split the words
ps = PorterStemmer() # object of class PorterStemmer
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))] # taking all the words in review except stop words
# + id="MlBWVdDXSqWI" colab_type="code" outputId="0cc03566-8e20-4da1-aae6-95d50bd68346" colab={"base_uri": "https://localhost:8080/", "height": 33}
print(review) # stemming applies successfully to each word in review
# + [markdown] id="RZb5Wh6ugCC3" colab_type="text"
# ## Step 5: Join the tokens
# + id="0QJa5ZW0e2dw" colab_type="code" colab={}
review = re.sub('[^a-zA-Z]',' ',data['Review'][0])
review = review.lower() # convert all the letters in lowercase
review =review.split() # a single review is a string, words are not seperated
# to gothrough all the words, we have to split the words
ps = PorterStemmer() # object of class PorterStemmer
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))] # taking all the words in review except stop words
review = ' '.join(review)
# + id="V7CYu32DgNib" colab_type="code" outputId="fb1a0794-5f13-4871-edb7-cde421ed79f0" colab={"base_uri": "https://localhost:8080/", "height": 33}
print(review) # now it is a string
# + id="csksCDNNgNuZ" colab_type="code" outputId="57f29828-e4bb-447c-b861-d94f526e2768" colab={"base_uri": "https://localhost:8080/", "height": 33}
type(review)
# + [markdown] id="fV4CaR6Lg_wo" colab_type="text"
# ## Step 6: Apply text cleaning to all the reviews in dataset
# + id="D_XFDMuWhD7s" colab_type="code" colab={}
corpus = [] # define an empty list
for i in range(0, 1000): # index locations 0 to 1000
review = re.sub('[^a-zA-Z]',' ',data['Review'][i]) # replace 0 by i
review = review.lower() # convert all the letters in lowercase
review =review.split() # a single review is a string, words are not seperated
# to gothrough all the words, we have to split the words
ps = PorterStemmer() # object of class PorterStemmer
review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))] # taking all the words in review except stop words
review = ' '.join(review)
corpus.append(review)
# + id="QEx0yI4B4YI0" colab_type="code" outputId="85aacc4f-db90-44a0-8429-1ee1094f2e11" colab={"base_uri": "https://localhost:8080/", "height": 53}
print(corpus)
# + [markdown] id="9ZNtQezXi2Y_" colab_type="text"
# # 3) Bag of Words model
#
# + id="0WBSAfj3hFsL" colab_type="code" colab={}
# In Bag of Words model we will take all the words in 1000 reviews
# and here we will select unique words, no repeatition
# + id="KVAwliU1hD-n" colab_type="code" colab={}
# then we will create one column to each unique word
# after that we will put all these columns in a table, where rows = 1000 reviews and columns = words
# + id="3z4CrPLRAIDg" colab_type="code" colab={}
from sklearn.feature_extraction.text import CountVectorizer
# + id="vjzY1ygx89zF" colab_type="code" colab={}
cv = CountVectorizer(max_features=1500) # keep 1500 most frequent words
x = cv.fit_transform(corpus).toarray() # convert x into matrix using toarray method
# + id="cgcMWQFjEhda" colab_type="code" outputId="83c49b94-bed2-44c3-92ed-73733a047691" colab={"base_uri": "https://localhost:8080/", "height": 33}
x.shape # 1000 reviews and 1500 most frequent words
# + id="5e42uKiAEM1L" colab_type="code" colab={}
# dependent variable
y = data.iloc[:, 1].values # selecting column 'liked' as dependent variable to train ML model
# + [markdown] id="J8UJTaIgF6p1" colab_type="text"
# # 4) Splitting the dataset into the Train and Test set
#
# + id="UNhXO9AYF1VJ" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 0)
# + id="jOaYcP-ZGVKa" colab_type="code" outputId="b4321673-34ce-4b94-d49f-c830b7fe00d8" colab={"base_uri": "https://localhost:8080/", "height": 33}
x_train.shape, y_train.shape
# + id="elEOWKVtGctb" colab_type="code" outputId="70db1602-b805-475f-a8fd-7b100a705ad3" colab={"base_uri": "https://localhost:8080/", "height": 33}
x_test.shape, y_test.shape
# + [markdown] id="7dvidc1FGYV-" colab_type="text"
# # 5) Train the model (Naive Bayes)
# + id="p0pstpWjGVS4" colab_type="code" outputId="2b8bd8fd-810d-4b0e-e949-90d89d40f09f" colab={"base_uri": "https://localhost:8080/", "height": 33}
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(x_train, y_train)
# + [markdown] id="oWl5e7OLGupG" colab_type="text"
# # 6) Predict the Test set results
# + id="4sOysfSIHV7O" colab_type="code" colab={}
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
# + id="Z99s9yhaGVYF" colab_type="code" colab={}
y_pred = classifier.predict(x_test)
# + id="zixL_uroF1Sd" colab_type="code" outputId="1311f382-9b6a-40fe-ad59-1a108f19b52e" colab={"base_uri": "https://localhost:8080/", "height": 50}
# confusion_matrix
confusion_matrix(y_test, y_pred)
# + id="yJGTTbfdHaND" colab_type="code" outputId="5bbf7fd1-a278-4082-a6aa-77d96a18cc08" colab={"base_uri": "https://localhost:8080/", "height": 167}
# classification_report
print(classification_report(y_test, y_pred))
# we are getting almost 73% accuracy
# + id="bfxL80wIHkmj" colab_type="code" outputId="411cb4da-86dc-4ea9-e4a4-4541de33a815" colab={"base_uri": "https://localhost:8080/", "height": 33}
accuracy_score(y_test, y_pred)
# 73% accuracy
| 05RestaurantReviewClassificationWithNLTK.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ORGANISM: Homo_sapiens
# # Datasource: https://downloads.thebiogrid.org/BioGRID/Release-Archive/BIOGRID-3.5.182/
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
df_original = pd.read_csv("data/BIOGRID-ORGANISM-Homo_sapiens-3.5.182.tab2.txt", sep='\t')
df_original.head()
df = df_original[["Official Symbol Interactor A", "Official Symbol Interactor B"]]
df.head()
G = nx.from_pandas_edgelist(df, 'Official Symbol Interactor A', 'Official Symbol Interactor B', create_using=nx.DiGraph())
print(G.is_directed(), len(G.nodes), len(G.edges()))
# deleting self-loop edges
for n, nbrs in G.adj.items():
if n in nbrs:
G.remove_edge(n,n)
print(G.is_directed(), len(G.nodes), len(G.edges()))
# ### 3 nodes chain excluding common regulator
def common_regulator(G, node1, node2):
in_node1 = [a[0] for a in G.in_edges(node1)]
in_node2 = [b[0] for b in G.in_edges(node2)]
for i in in_node1:
if i in in_node2:
return True
return False
common_regulator(G,'marA','putP')
node_set = set() # to save the node list
for node1 in G.nodes:
for node2 in G.neighbors(node1):
if (G.out_degree(node2)>1 or G.out_degree(node2)>1 or node2==node1):
continue
else:
for node3 in G.neighbors(node2):
if(node3 not in [node1, node2] and (node3 not in G.neighbors(node1)) and not(common_regulator(G, node1, node3))):
print(node1, node2, node3)
node_set.add(node1); node_set.add(node2); node_set.add(node3);
# +
# H = G.subgraph(list(node_set))
# pos = nx.circular_layout(H)
# nx.draw(H, pos, cmap = plt.get_cmap('jet'), node_size = 2000)
# nx.draw_networkx_labels(H, pos)
# plt.show()
# -
# ### 4 nodes chain
# pattern 2 search
for node1 in G.nodes:
for node2 in G.neighbors(node1):
if (G.out_degree(node2)>1 or G.out_degree(node2)>1 or node2==node1):
continue
else:
for node3 in G.neighbors(node2):
if (G.out_degree(node3)>1 or G.out_degree(node3)>1 or node3 in [node1, node2]):
continue
else:
for node4 in G.neighbors(node3):
if(node4 not in [node1, node2, node3] and (node4 not in G.neighbors(node1)) and not(common_regulator(G, node1, node4))):
print(node1, node2, node3, node4)
# ### 5 nodes chain
# pattern 2 search
for node1 in G.nodes:
for node2 in G.neighbors(node1):
if (G.out_degree(node2)>1 or G.out_degree(node2)>1 or node2==node1):
continue
else:
for node3 in G.neighbors(node2):
if (G.out_degree(node3)>1 or G.out_degree(node3)>1 or node3 in [node1, node2]):
continue
else:
for node4 in G.neighbors(node3):
if (G.out_degree(node4)>1 or G.out_degree(node4)>1 or node4 in [node1, node2, node3]):
continue
else:
for node5 in G.neighbors(node4):
if(node4 not in [node1, node2, node3, node4] and (node5 not in G.neighbors(node1)) and not(common_regulator(G, node1, node5))):
print(node1, node2, node3, node4, node5)
# Comment: No chain of length 5 found
| Homo_sapiens_BIOGRID.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv('accidents_cleanV2.csv')
df.columns
# +
#df['FECHA'] = pd.DatetimeIndex(df['fecha'])
#f['HOUR'] = df['FECHA'].dt.hour
df['hora_incidente'].describe()
# -
# ### 1. How have the number of accidents fluctuated over the past year and a half? Have they increased over the time?
# %%time
# Month and Year columns
df['Year'] = df['fecha'].apply(lambda x: int(x.split('/')[0]))
df['Month'] = df['fecha'].apply(lambda x: int(x.split('/')[1]))
#Unique values
months = sorted(list(set(df['Month'])))
years = sorted(list(set(df['Year'])))
#Dictionary with number of accidents for each year-month combination
YearMonthCount = {y:{m:len(df[(df['Month']==m) & (df['Year']==y)]) for m in months} for y in years}
# posibles duplicados - ver columna radicado
# %%time
# Plot of the number of accidents over time (monthly)
Numbers2Plot = [YearMonthCount[y][m] for y in years for m in months if YearMonthCount[y][m]]
Labels = [str(y)[-2:]+'-'+str(m) for y in years for m in months if YearMonthCount[y][m]]
MonthlyCount=plt.figure(figsize=(17,5))
ax = MonthlyCount.add_subplot(111)
ax.plot(Labels, Numbers2Plot)
plt.xticks(rotation=90)
plt.show()
# %%time
#print(df.shape)
df['Month'] = pd.DatetimeIndex(df['fecha']).month
df['Year'] = pd.DatetimeIndex(df['fecha']).year
#grouped_df = df.groupby(['Year','Month']).count()['radicado']
#print(grouped_df.head(20))
fig, ax = plt.subplots(figsize=(20,5))
df.groupby(['Year', 'Month']).count()['radicado'].plot(ax=ax)
# ### 2. For any particular day, during which hours are accidents most likely to occur?
# %%time
x=df['hora_incidente'].value_counts(dropna=True)
HourlyCount=plt.figure(figsize=(12,5))
ax = HourlyCount.add_subplot(111)
plt.bar(x.index, x.values)
plt.xticks(x.index)
plt.title("Hourly accident count in Medellin")
plt.show()
x.values
fig, ax = plt.subplots(figsize=(20,5))
df.groupby('hora_incidente').count()[['radicado']].plot.bar(ax=ax)
df.groupby('hora_incidente').count()[['radicado']]
df['hora_incidente'].describe()
# ### 3. Are there more accidents on weekdays than weekends?
# Los nombres de los días son strings con 9 caracteres
df['dia_nombre']=df['dia_nombre'].apply(lambda x: x.replace(' ', ''))
# %%time
x=df['dia_nombre'].value_counts()
OrderedDays = ['LUNES', 'MARTES', 'MIÉRCOLES', 'JUEVES', 'VIERNES', 'SÁBADO', 'DOMINGO']
AccidentCount = [x[i] for i in OrderedDays]
DayOfWeekCount=plt.figure(figsize=(12,5))
ax = DayOfWeekCount.add_subplot(111)
plt.bar(OrderedDays, AccidentCount)
plt.xticks(x.index)
plt.title("Accident count by day of the week in Medellin")
for i in ax.patches:
ax.text(i.get_x()+0.3, i.get_height()+30, i.get_height())
plt.show()
df['FECHA'] = pd.to_datetime(df['fecha'])
df['WEEKDAYNAME'] = df['FECHA'].dt.day_name()
df['WEEKDAY'] = df['FECHA'].dt.weekday
fig, ax = plt.subplots(figsize=(20,5))
df.groupby('WEEKDAY').count()[['radicado']].plot.bar(ax=ax)
df.groupby('WEEKDAY').count()[['radicado']]
| backend/src/playground/MJ/TrafficAccidents.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from SimPEG import *
from simpegEM1D import *
from scipy.constants import mu_0
import numpy as np
frequency = np.array([382, 1822, 7970, 35920, 130100], dtype=float)
hz = get_vertical_discretization_frequency(frequency, sigma_background=0.01)
mesh1D = set_mesh_1d(hz)
depth = -mesh1D.gridN[:-1]
LocSigZ = -mesh1D.gridCC
FDsurvey = EM1DSurveyFD(
rx_location = np.array([0., 0., 100.+30.]),
src_location = np.array([0., 0., 100.+30.]),
field_type = 'secondary',
rx_type = 'ppm',
src_type = 'VMD',
offset = np.ones(5)*7.86,
topo = np.r_[0., 0., 100.],
depth = depth,
frequency = frequency
)
sig_half = 1e-3
sig_blk = sig_half*50.
chi_half = 0.
expmap = Maps.ExpMap(mesh1D)
sig = np.ones(FDsurvey.n_layer)*sig_half
blk_ind = (-50>LocSigZ) & (-80<LocSigZ)
sig[blk_ind] = sig_blk
# sig[-150>LocSigZ] = 0.1
m_true = np.r_[np.log(sig)]
# -
prob = EM1D(
mesh1D, sigmaMap=expmap,
chi= np.zeros(FDsurvey.n_layer),
verbose=False
)
if prob.ispaired:
prob.unpair()
if FDsurvey.ispaired:
FDsurvey.unpair()
prob.pair(FDsurvey)
d_true = FDsurvey.dpred(m_true)
m0 = np.ones_like(m_true) * np.log(sig_half)
d_0 = FDsurvey.dpred(m0)
FDsurvey.dtrue = d_true
std = 0.05
floor = 0.
np.random.seed(1)
uncert = std*abs(FDsurvey.dtrue)+floor
noise = std*FDsurvey.dtrue*np.random.randn(FDsurvey.dtrue.size)
FDsurvey.dobs = FDsurvey.dtrue+noise
doi, _=prob.depth_of_investigation(uncert)
delta = prob.get_threshold(uncert)
print (doi, delta)
from SimPEG import Regularization
# +
dmisfit = DataMisfit.l2_DataMisfit(FDsurvey)
dmisfit.W = 1./(abs(FDsurvey.dobs)*std+floor)
reg = Regularization.Sparse(
mesh1D, mapping=Maps.IdentityMap(mesh1D)
)
p=0.
qx, qz = 2., 2.
reg.norms = np.c_[p, qx, qz, 0.]
IRLS = Directives.Update_IRLS(
maxIRLSiter=20, minGNiter=1, fix_Jmatrix=True, coolingRate=2,
betaSearch=False,
chifact_start = 1.
)
opt = Optimization.ProjectedGNCG(maxIter = 40)
opt.maxIterLS = 5
invProb = InvProblem.BaseInvProblem(dmisfit, reg, opt)
beta = Directives.BetaSchedule(coolingFactor=2, coolingRate=2)
betaest = Directives.BetaEstimate_ByEig(beta0_ratio=1.)
target = Directives.TargetMisfit()
sense = Directives.UpdateSensitivityWeights(threshold=delta)
# inv = Inversion.BaseInversion(invProb, directiveList=[beta,betaest,target])
inv = Inversion.BaseInversion(invProb, directiveList=[IRLS, betaest])
prob.counter = opt.counter = Utils.Counter()
opt.LSshorten = 0.5
opt.remember('xc')
mopt = inv.run(m0)
# -
doi, act_ind = prob.depth_of_investigation(uncert)
print (doi)
# %pylab inline
fig, ax = subplots(1,1, figsize=(5, 8))
Utils1D.plotLayer(sig, mesh1D, showlayers=False)
Utils1D.plotLayer(expmap*mopt, mesh1D, showlayers=False, **{'color':'r'})
Utils1D.plotLayer(expmap*invProb.l2model, mesh1D, showlayers=False, **{'color':'b', 'lw':1})
print (doi)
delta = prob.get_threshold(uncert)
print (delta)
# +
# # !python run_em1d.py
# -
fig, axes = subplots(1,1, figsize = (7,5))
axes.plot(FDsurvey.frequency, -invProb.dpred[0:FDsurvey.n_frequency], 'k.-')
axes.plot(FDsurvey.frequency, -invProb.dpred[FDsurvey.n_frequency:], 'b.-')
axes.plot(FDsurvey.frequency, -FDsurvey.dobs[0:FDsurvey.n_frequency], 'ko')
axes.plot(FDsurvey.frequency, -FDsurvey.dobs[FDsurvey.n_frequency:], 'bo')
# axes.plot(FDsurvey.frequency, uncert[0:FDsurvey.n_frequency], 'k*')
# axes.plot(FDsurvey.frequency, uncert[FDsurvey.n_frequency:], 'r*')
axes.set_xscale('log')
from scipy.linalg.blas import cgemm
import numpy as np
fig, ax = subplots(1,1, figsize=(5, 8))
Utils1D.plotLayer(sig, mesh1D, showlayers=True)
| notebooks/examples/EM1D_inversion_FD_sensitivity_weight.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Functions and Methods Homework Solutions
# ____
# **Write a function that computes the volume of a sphere given its radius.**
def vol(rad):
return (4.0/3)*(3.14)*(rad**3)
# ___
# **Write a function that checks whether a number is in a given range (Inclusive of high and low)**
def ran_check(num,low,high):
#Check if num is between low and high (including low and high)
if num in range(low,high+1):
print " %s is in the range" %str(num)
else :
print "The number is outside the range."
# If you only wanted to return a boolean:
def ran_bool(num,low,high):
return num in range(low,high+1)
ran_bool(3,1,10)
# ____
# **Write a Python function that accepts a string and calculate the number of upper case letters and lower case letters.**
#
# Sample String : 'Hello <NAME>, how are you this fine Tuesday?'
# Expected Output :
# No. of Upper case characters : 4
# No. of Lower case Characters : 33
#
# If you feel ambitious, explore the Collections module to solve this problem!
def up_low(s):
d={"upper":0, "lower":0}
for c in s:
if c.isupper():
d["upper"]+=1
elif c.islower():
d["lower"]+=1
else:
pass
print "Original String : ", s
print "No. of Upper case characters : ", d["upper"]
print "No. of Lower case Characters : ", d["lower"]
s = 'Hello <NAME>, how are you this fine Tuesday?'
up_low(s)
# ____
# **Write a Python function that takes a list and returns a new list with unique elements of the first list.**
#
# Sample List : [1,1,1,1,2,2,3,3,3,3,4,5]
# Unique List : [1, 2, 3, 4, 5]
def unique_list(l):
# Also possible to use list(set())
x = []
for a in l:
if a not in x:
x.append(a)
return x
unique_list([1,1,1,1,2,2,3,3,3,3,4,5])
# ____
# **Write a Python function to multiply all the numbers in a list.**
#
# Sample List : [1, 2, 3, -4]
# Expected Output : -24
def multiply(numbers):
total = 1
for x in numbers:
total *= x
return total
multiply([1,2,3,-4])
# ____
# **Write a Python function that checks whether a passed string is palindrome or not.**
#
# Note: A palindrome is word, phrase, or sequence that reads the same backward as forward, e.g., madam or nurses run.
def palindrome(s):
s = s.replace(' ','') # This replaces all spaces " " with no space ''. (Fixes issues with strings that have spaces)
return s == s[::-1] # Check through slicing
palindrome('nurses run')
palindrome('abcba')
# ____
# **Hard**:
#
# Write a Python function to check whether a string is pangram or not.
#
# Note : Pangrams are words or sentences containing every letter of the alphabet at least once.
# For example : "The quick brown fox jumps over the lazy dog"
#
# Hint: Look at the string module
# +
import string
def ispangram(str1, alphabet=string.ascii_lowercase):
alphaset = set(alphabet)
return alphaset <= set(str1.lower())
# -
ispangram("The quick brown fox jumps over the lazy dog")
string.ascii_lowercase
| Complete-Python-Bootcamp-master/Functions and Methods Homework - Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pseudo-LIDAR
# # Overview
# An approach to creating LIDAR-like point clouds without working with stereo images, or depth camera but just working with a single image input (is applicable to Tesla).
#
# ## First Step:
# - Perform monocular depth estimation and generate pseudo-LIDAR for the entire scene by lifting every pixel within the image into its 3D coordinate.
# - Then train LIDAR-based 3D detection network with the pseudo-LIDAR.
# - Using LIDAR-based 3D detector, **Frustum PointNets**, we detect the 2D object proposals in the input image and extract a point cloud frustum from the pseudo-LIDAR for each 2D proposal. Then, an oriented 3D bounding box is detected for each frustum.
# ## Problems:
# 1. Depth estimation based on a monocular image is inaccurate because of local misalignment, especially for the objects that are far off.
# 2. The extracted Point cloud always has a long-tail because it is hard to estimate depth near the edge/periphery of an object. This means that there are always extra points that are shown as belonging to the object when they actually don't.
# ## Solutions:
# 1. To solve local misalignment, when projecting the 3D box onto the image, we use a 2D-3D bounding box consistency constraint i.e. the 3D bounding box overlaps with the 2D detected proposals on the image. During training, we formulate the constraint as bounding box consistency loss (BBCL) to supervise learning.
# - During testing, a bounding box consistency optimization (BBCO) is solved subject to this constraint using a global optimization method to further improve the prediction results.
#
# 2. To deal with the long-tail of points proposed as belonging to the object, we porpose to use mask segmentation instead of 2D bounding boxes around the object because that would define the object pixel by pixel.
#
# # Other Approaches
# Models using 2D-3D bounding box consistency constraint are also used to predict 3D bounding boxes using 2D processing. For example, one proposal is to use 2D CNNs to predict a subset of features like the object orientation and size. During testing, we combine the estimates with the constraint to compute te remaining parameters like the object center location.
# # Pseudo-LiDAR Approach:
#
# Goal: Using one RGB image to estimate 3D bounding box of objects.
# Parameters for the 3D bounding box (total 7):
# Object center: (x,y,z)
# Object's size: (h, w, l)
# Heading angle: (theta)
#
#
# 
#
| Research Papers/PseudoLiDAR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.9 64-bit (''pyvenv'': venv)'
# language: python
# name: python3
# ---
# import sys
# sys.path.append("../")
import pandas as pd
from pyecharts.faker import Faker
from pandasecharts import echart
products = Faker.choose()
dfs = []
for year in (2012, 2013, 2014, 2015):
dfs.append(
pd.DataFrame(
zip(products, Faker.values(), Faker.values(), [year]*len(products)),
columns=["商品", "商家A", "商家B", "年份"]
)
)
df = pd.concat(dfs, axis=0)
df.head()
df.echart.bar3d("年份", "商品", "商家A", visualmap=True, visualmap_opts={"max_": 170}).render_notebook()
| examples/bar3d_examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise: Mean & Median Customer Spend
# Here's some code that will generate some random e-commerce data; just an array of total amount spent per transaction. Select the code block, and hit "play" to execute it:
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
incomes = np.random.normal(100.0, 20.0, 10000)
plt.hist(incomes, 50)
plt.show()
# -
# Now, find the mean and median of this data. In the code block below, write your code, and see if your result makes sense:
# This is pretty much the world's easiest assignment, but we're just trying to get your hands on iPython and writing code with numpy to get you comfortable with it.
#
# Try playing with the code above to generate different distributions of data, or add outliers to it to see their effect.
| MeanMedianExercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py3]
# language: python
# name: conda-env-py3-py
# ---
import numpy as np
import random
x = 1
x += 1
x = x + 1 # exact equivalent
x
k1 = 0.015
k2 = 0.2
def get_rate(r):
k1 = 2. # we can assign the local name k1 to a new object, '2.'
print("inside k1=",k1)
print("inside k2=",k2) # and we can access the global k2
get_rate(1)
print("outside k1=",k1) # outside, the name k1 still refers to the old object
print("outside k2=",k2)
# +
k1 = 0.015
k2 = 0.2
def get_rate(r):
k1 = 2.
print("inside k1=",k1)
k2 = k2 + 1 # if we wish to assign the name k2 to something
# then that makes it a local variable, which means
# we can't access it before we assign it. Thus error.
print("inside k2=",k2)
get_rate(1)
print("outside k1=",k1)
print("outside k2=",k2)
# +
k1 = 0.015
k2 = 0.2
def get_rate(r):
global k2 # if we declare the name k2 to be global within this function...
k1 = 2.
print("inside k1=",k1)
k2 = k2 + 1 # ...then we can access it *and* reassign it...
print("inside k2=",k2)
get_rate(1)
print("outside k1=",k1)
print("outside k2=",k2) # ...and we've changed what k2 refers to globally
# -
a = 1.
def function():
print(a) # we can ACCESS a global variable
function()
a = 1.
def function():
a += 1 # but we can't ASSIGN to a global variable
# (without declaring it global)
print(a)
function()
a = [1.]
def function():
a[0] += 1 # here we aren't REASSIGNING the name 'a'
# but ACCESSING it and changing a value within it!
print(a)
function()
rate = 10.
# There's a (very small) chance random.random() == 0 and this crashes
rate**-1 * np.log( 1./(random.random()))
# We can solve that by subtracting it from 1.0
rate**-1 * np.log( 1./(1.0-random.random()) )
# or we can use the built in function
random.expovariate(rate)
# +
# this will show its source code
# random.expovariate??
# -
# to time how long something takes, use this %timeit 'magic'
# %timeit rate ** -1
# This is faster, because no need to take logarithms, multiply, and exponentiate
# %timeit 1./rate
# This is slower (has to cast 1 from an int to a float when it realizes rate is a float)
# %timeit 1/rate
x=2.5
# %%timeit
# to time an entire notebook cell, start with %%timeit
x**8
# %%timeit
x*x*x*x*x*x*x*x
# %%timeit
xsquared = x*x
xfour = xsquared*xsquared
xfour*xfour
# +
# You'll get a sense for what takes time,
# but it's hard to know for sure what will be slow until you try it!
# -
| 2016-10-05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# +
# !pip install beautifulsoup4
# !pip install lxml
import requests # library to handle requests
import pandas as pd # library for data analsysis
import numpy as np # library to handle data in a vectorized manner
import random # library for random number generation
# #!conda install -c conda-forge geopy --yes
#from geopy.geocoders import Nominatim # module to convert an address into latitude and longitude values
# libraries for displaying images
from IPython.display import Image
from IPython.core.display import HTML
from IPython.display import display_html
import pandas as pd
import numpy as np
# tranforming json file into a pandas dataframe library
from pandas.io.json import json_normalize
# !conda install -c conda-forge folium=0.5.0 --yes
import folium # plotting library
from bs4 import BeautifulSoup
from sklearn.cluster import KMeans
import matplotlib.cm as cm
import matplotlib.colors as colors
print('Folium installed')
print('Libraries imported.')
# -
source = requests.get('https://en.wikipedia.org/wiki/List_of_postal_codes_of_Canada:_M').text
soup=BeautifulSoup(source,'lxml')
print(soup.title)
from IPython.display import display_html
tab = str(soup.table)
display_html(tab,raw=True)
dfs = pd.read_html(tab)
df=dfs[0]
df.head()
df.shape
# !pip install geopy
from geopy.geocoders import Nominatim # module to convert an address into latitude and longitude values
# +
# Dropping the rows where Borough is 'Not assigned'
df1 = df[df.Borough != 'Not assigned']
# Combining the neighbourhoods with same Postalcode
df2 = df1.groupby(['Postal Code','Borough'], sort=False).agg(', '.join)
df2.reset_index(inplace=True)
# Replacing the name of the neighbourhoods which are 'Not assigned' with names of Borough
df2['Neighborhood'] = np.where(df2['Neighborhood'] == 'Not assigned',df2['Borough'], df2['Neighborhood'])
df2
# -
df2.shape
lat_lon = pd.read_csv('https://cocl.us/Geospatial_data')
lat_lon.head()
df2.rename(columns={'Postal Code':'Postcode'},inplace=True)
df2
lat_lon.rename(columns={'Postal Code':'Postcode'},inplace=True)
df3 = pd.merge(df2,lat_lon,on='Postcode')
df3.head()
df4 = df3[df3['Borough'].str.contains('Toronto',regex=False)]
df4
# +
map_toronto = folium.Map(location=[43.651070,-79.347015],zoom_start=10)
for lat,lng,borough,Neighborhood in zip(df4['Latitude'],df4['Longitude'],df4['Borough'],df4['Neighborhood']):
label = '{}, {}'.format(Neighborhood, borough)
label = folium.Popup(label, parse_html=True)
folium.CircleMarker(
[lat,lng],
radius=5,
popup=label,
color='blue',
fill=True,
fill_color='#3186cc',
fill_opacity=0.7,
parse_html=False).add_to(map_toronto)
map_toronto
# -
k=5
toronto_clustering = df4.drop(['Postcode','Borough','Neighborhood'],1)
kmeans = KMeans(n_clusters = k,random_state=0).fit(toronto_clustering)
kmeans.labels_
df4.insert(0, 'Cluster Labels', kmeans.labels_)
df4
# +
# create map
map_clusters = folium.Map(location=[43.651070,-79.347015],zoom_start=10)
# set color scheme for the clusters
x = np.arange(k)
ys = [i + x + (i*x)**2 for i in range(k)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, neighbourhood, cluster in zip(df4['Latitude'], df4['Longitude'], df4['Neighborhood'], df4['Cluster Labels']):
label = folium.Popup(' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=0.7).add_to(map_clusters)
map_clusters
# -
| Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import pandas as pd
import pyprind
from skfeature.function.information_theoretical_based import FCBF
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import KFold, train_test_split,cross_val_score
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
df_clicks = pd.read_csv('dataset/dataset_clicks.csv')
df_clicks.shape
df_clicks.head()
# ## Prepare dataset
#
# This dataset has nearly 10 million rows and 182 columns. There is not much information on what each column means other than its name. The target variable 'clicks' has only 1869 rows with value of one, and the rest is zero. I chose to randomly sample 100,000 rows out of every 1 million rows as df_zero, and concat with df_clicks for modeling.
#
# To prepare dataframe for modeling, first drop all columns with null values. Separate columns into numerical variables (num_var) and categorical variables (cat_var). From preliminary modeling, I noticed both num_var and cat_var have some columns with many unique values and/or has 'id' in column names, indicating they could be unique identifiers. I chose to throw out such columns in cat_var with >100 unique values, and columns in num_var with >100 unique values and has 'id' in its name.
def prepare_df(df_clicks,df_zero):
df = pd.concat([df_clicks,df_zero],axis=0,ignore_index=True)
#remove columns with null
df = df.dropna(axis=1)
print df.shape
#separate numerical and categorical variables
cols_total = df.columns
num_var = []
cat_var = []
for col in cols_total:
if df[col].dtype =='object':
cat_var.append(col)
else:
num_var.append(col)
print len(num_var), len(cat_var)
#check unique values in cat_var
for cat in cat_var:
print cat, len(df[cat].unique())
#threw away columns with more than 100 unique values
cat_var2=[]
for cat in cat_var:
if len(df[cat].unique())<100:
cat_var2.append(cat)
print len(cat_var2),len(cat_var)
#drop columns not in cat_Var2 and convert the rest with LabelEncoder()
df_new = df.copy()
for cat in cat_var:
if cat in cat_var2:
le = LabelEncoder()
unq_vals = df_new[cat].unique()
le.fit(unq_vals)
df_new[cat] = le.transform(df_new[cat])
else:
df_new.drop(cat,axis=1,inplace=True)
#drop numerical variables with 'id' in name and >100 unique values
for col in num_var:
if 'id' in col and len(df_new[col].unique())>100:
df_new.drop(col,axis=1,inplace=True)
print col
print df_new.shape
return df_new
# +
#sample 1/10 or 100,000 for the first 1 million rows
num = 500000
temp = pd.read_csv('dataset/dataset.csv',skiprows=500000,\
nrows=num, names=df_clicks.columns,low_memory=False)
print temp.shape
df_zero1 = temp[temp['clicks']==0].sample(100000)
print df_zero1.shape
# -
df_new1 = prepare_df(df_clicks,df_zero1)
# ## Feature Selection with skfeature
#
# Use skfeature.FCBF for feature selection. This is a fast correlation-based filter solution, suitable for high-dimension data.
# Split data into 5 fold, run FCBF to select features, model with randomforest and get average of roc_auc_score as metrics. To my surprise, with less than 5 features, the roc_auc_score is already 1.0.
#
# Randomly sample another df_zero from row number 1 million to 2 million and run the model again. Got similar features and perfect score.
#
# Randomly sample another df_zero from row number 2 million to 3 million and run one of the model to get test score. Perfect score.
def test_FCBF(df):
df_X = df.drop('clicks',axis=1)
X = df_X.values # data
y = df['clicks'] # label
n_samples, n_features = X.shape
# split data into 5 folds
ss = KFold(n_samples, n_folds=5, shuffle=True)
# perform evaluation on classification task
num_fea = 20 # number of selected features
clf = RandomForestClassifier(n_estimators=100,n_jobs=-1)
score = 0.
for train, test in ss:
# obtain the index of each feature on the training set
idx = FCBF.fcbf(X[train], y[train], n_selected_features=num_fea)
print idx
# obtain the dataset on the selected features
features = X[:, idx[0:num_fea]]
features_names = df_X.columns[idx]
clf.fit(features[train], y[train])
#print clf.feature_importances_, cannot use df because dropped clicks
feature_importance = [format(x,'.2f') for x in clf.feature_importances_]
zip_feature = zip(features_names, feature_importance)
print sorted(zip_feature, key = lambda x: x[1], reverse=True)
# predict the class labels of test data
y_predict = clf.predict(features[test])
# obtain the classification accuracy on the test data
score+= metrics.roc_auc_score(y[test], y_predict)
# output the average classification accuracy over all 5 folds
print '\nROC_AUC_Score:', float(score)/5
test_FCBF(df_new1)
#
# +
#sample 1/10 or 100,000 for row number after 1 million
num = 500000
temp = pd.read_csv('dataset/dataset.csv',skiprows=1000000,\
nrows=num, names=df_clicks.columns,low_memory=False)
print temp.shape
df_zero2 = temp[temp['clicks']==0].sample(100000)
print df_zero2.shape
# -
df_new2 = prepare_df(df_clicks,df_zero2)
test_FCBF(df_new2)
# +
#sample 1/10 or 100,000 for row number after 2 million as test set
num = 500000
temp = pd.read_csv('dataset/dataset.csv',skiprows=2000000,\
nrows=num, names=df_clicks.columns,low_memory=False)
print temp.shape
df_zero3 = temp[temp['clicks']==0].sample(100000)
print df_zero3.shape
# -
df_new3 = prepare_df(df_clicks,df_zero3)
#pick a simple three-feature model from df_new2
cols_use = ['spend_hours','imps','advertiser_id']
X_train = df_new2[cols_use].values
y_train = df_new2['clicks']
clf = RandomForestClassifier(n_estimators=100,n_jobs=-1)
clf.fit(X_train,y_train)
X_test = df_new3[cols_use].values
y_test = df_new3['clicks']
y_pred = clf.predict(X_test)
print metrics.roc_auc_score(y_test,y_pred)
print len(y_test==y_pred),len(y_test)
| 12GB_feature_selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from tqdm import tqdm
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import keras
from keras.layers import Input
from keras.models import Model, Sequential
from keras.layers.core import Reshape, Dense, Dropout, Flatten
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import Convolution2D, UpSampling2D
from keras.layers.normalization import BatchNormalization
from keras.datasets import mnist
from keras.optimizers import Adam
from keras import backend as K
from keras import initializers
# +
K.set_image_dim_ordering('th')
# Deterministic output.
# Tired of seeing the same results every time? Remove the line below.
np.random.seed(1000)
# The results are a little better when the dimensionality of the random vector is only 10.
# The dimensionality has been left at 100 for consistency with other GAN implementations.
randomDim = 100
# Load MNIST data
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = X_train.reshape(60000, 784)
# Optimizer
adam = Adam(lr=0.0002, beta_1=0.5)
generator = Sequential()
generator.add(
Dense(
256,
input_dim=randomDim,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
generator.add(LeakyReLU(0.2))
generator.add(Dense(512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(1024))
generator.add(LeakyReLU(0.2))
generator.add(Dense(784, activation='tanh'))
generator.compile(loss='binary_crossentropy', optimizer=adam)
discriminator = Sequential()
discriminator.add(
Dense(
1024,
input_dim=784,
kernel_initializer=initializers.RandomNormal(stddev=0.02)))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(512))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(256))
discriminator.add(LeakyReLU(0.2))
discriminator.add(Dropout(0.3))
discriminator.add(Dense(1, activation='sigmoid'))
discriminator.compile(loss='binary_crossentropy', optimizer=adam)
# Combined network
discriminator.trainable = False
ganInput = Input(shape=(randomDim, ))
x = generator(ganInput)
ganOutput = discriminator(x)
gan = Model(inputs=ganInput, outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=adam)
dLosses = []
gLosses = []
# Plot the loss from each batch
def plotLoss(epoch):
plt.figure(figsize=(10, 8))
plt.plot(dLosses, label='Discriminitive loss')
plt.plot(gLosses, label='Generative loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.savefig('images/loss_epoch_%06d.png' % epoch)
# Create a wall of generated MNIST images
def plotGeneratedImages(epoch, examples=100, dim=(10, 10), figsize=(10, 10)):
noise = np.random.normal(0, 1, size=[examples, randomDim])
generatedImages = generator.predict(noise)
generatedImages = generatedImages.reshape(examples, 28, 28)
plt.figure(figsize=figsize)
for i in range(generatedImages.shape[0]):
plt.subplot(dim[0], dim[1], i + 1)
plt.imshow(generatedImages[i], interpolation='nearest', cmap='gray_r')
plt.axis('off')
plt.tight_layout()
plt.savefig('images/generated_image_epoch_%06d.png' % epoch)
# Save the generator and discriminator networks (and weights) for later use
def saveModels(epoch):
generator.save('models/generator_epoch_%06d.h5' % epoch)
discriminator.save('models/discriminator_epoch_%06d.h5' % epoch)
def createNoise(generatedImages, thresholding, depth):
if depth > 20:
return generatedImages
noise_samples = discriminator.predict(generatedImages)
flag = False
for index, ns in enumerate(noise_samples):
if ns > thresholding:
flag = True
generatedImages[index] = generator.predict(
np.random.normal(0, 1, size=[1, randomDim]))
if flag == True:
generatedImages = createNoise(generatedImages, thresholding, depth + 1)
return generatedImages
def set_step(epoch):
dloss_npy = np.load('Loss/dynamic_D_loss.npy')
gloss_npy = np.load('Loss/dynamic_G_loss.npy')
loss_all = dloss_npy[-1] + gloss_npy[-1]
d_step = dloss_npy[-1] / loss_all
g_step = gloss_npy[-1] / loss_all
return int(d_step * 20), int(g_step * 20)
def train(epochs=1, batchSize=128, thresholding=0.95):
batchCount = X_train.shape[0] / batchSize
print('Epochs:', epochs)
print('Batch size:', batchSize)
print('Batches per epoch:', batchCount)
if not os.path.exists('images'):
os.makedirs('images')
if not os.path.exists('models'):
os.makedirs('models')
if not os.path.exists('Loss'):
os.makedirs('Loss')
for e in range(1, epochs + 1):
print('-' * 15, 'Epoch %d' % e, '-' * 15)
#tq = tqdm(range(int(batchCount)))
#for _ in tqdm(range(0,int(batchCount),10)):
dloss_l = []
gloss_l = []
if e <= 20:
d_step, g_step = 10, 10
else:
d_step, g_step = set_step(e)
for _ in tqdm(range(0, int(batchCount), 20)):
# Get a random set of input noise and images
for i in range(d_step):
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
imageBatch = X_train[np.random.randint(
0, X_train.shape[0], size=batchSize)]
#import pdb;pdb.set_trace()
# Generate fake MNIST images
generatedImages = generator.predict(noise)
generatedImages = createNoise(generatedImages, thresholding, 1)
#import pdb;pdb.set_trace()
#print(np.shape(imageBatch), np.shape(generatedImages))
X = np.concatenate([imageBatch, generatedImages])
#import pdb;pdb.set_trace()
# Labels for generated and real data
yDis = np.zeros(2 * batchSize)
# One-sided label smoothing
yDis[:batchSize] = 1
# Train discriminator
discriminator.trainable = True
dloss = discriminator.train_on_batch(X, yDis)
dloss_l.append(dloss)
# Train generator
discriminator.trainable = False
for j in range(g_step):
noise = np.random.normal(0, 1, size=[batchSize, randomDim])
yGen = np.ones(batchSize)
gloss = gan.train_on_batch(noise, yGen)
gloss_l.append(gloss)
# Store loss of most recent batch from this epoch
dLosses.append(np.mean(dloss_l))
gLosses.append(np.mean(gloss_l))
print("D_loss:", dLosses[e - 1])
print("G_loss:", gLosses[e - 1])
np.save("Loss/dynamic_D_loss.npy", dLosses)
np.save("Loss/dynamic_G_loss.npy", gLosses)
if e == 1 or e % 10 == 0:
plotGeneratedImages(e)
saveModels(e)
if e % 50 == 0:
plotLoss(e)
# Plot losses from every epoch
plotLoss(e)
if __name__ == '__main__':
train(20000, 128)
| zhoukai_gan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import theano.tensor as T
import lasagne
import theano
from scipy.stats import norm
# %matplotlib inline
mu, sigma = -1,1
xs = np.linspace(-5,5,1000)
plt.plot(xs, norm.pdf(xs, loc=mu, scale=sigma))
train_iters = 500
M = 200
input_var_d = T.col('input_d')
target_var_d = T.col('output_d')
input_var_g = T.col('input_g')
target_var_g = T.col('output_g')
# +
#### Pre-Training
##Discriminator
l_in = lasagne.layers.InputLayer(shape=(M, 1), input_var=input_var_d)
l_fw_1 = lasagne.layers.DenseLayer(l_in, num_units = 6, nonlinearity=lasagne.nonlinearities.tanh,
W = lasagne.init.GlorotUniform(),
b = lasagne.init.Constant(0.0))
l_fw_2 = lasagne.layers.DenseLayer(l_fw_1, num_units = 5, nonlinearity=lasagne.nonlinearities.tanh,
W = lasagne.init.GlorotUniform(),
b = lasagne.init.Constant(0.0))
l_out = lasagne.layers.DenseLayer(l_fw_2, num_units = 1, nonlinearity =lasagne.nonlinearities.tanh,
W = lasagne.init.GlorotUniform(),
b = lasagne.init.Constant(0.0))
# -
prediction = lasagne.layers.get_output(l_out)
loss = lasagne.objectives.squared_error(prediction, target_var_d)
loss = loss.mean()
params = lasagne.layers.get_all_params(l_out, trainable=True)
updates = lasagne.updates.momentum(loss, params, learning_rate = 0.03)
train = theano.function([input_var_d, target_var_d], loss, updates=updates, allow_input_downcast=True)
output = theano.function([input_var_d], prediction, allow_input_downcast=True)
###Plotting the decision surface
def plot_d0():
f, ax = plt.subplots(1)
xs = np.linspace(-5,5,1000)
ax.plot(xs, norm.pdf(xs, loc=mu, scale=sigma), label='p_data')
###Decision Boundry
r = 1000 #Resolution
xs = np.linspace(-5,5,r)
ds = np.zeros((r,1)) #Decision Surface
####We process multiple points in parallel in a minibatch
for i in range(r/M):
x = np.reshape(xs[M*i:M*(i+1)], (M,1))
ds[M*i:M*(i+1)] = output(x)
ax.plot(xs, ds, label='decision boundry')
ax.set_ylim(0,1.1)
plt.legend()
plot_d0()
plt.title('Initial Decision Boundry')
# +
## Pretraining starts
lh = np.zeros(1000)
for i in range(1000):
d = (np.random.random(M))
d = np.reshape(d, (M,1))
labels =norm.pdf(d, loc=mu, scale=sigma)
labels = np.reshape(labels, (M,1))
lh[i] = train(d, labels)
if i%100 == 0:
print i
# -
plt.plot(lh)
plot_d0()
# +
##### The actual generative adversarial network #######
## Generator
g_in = lasagne.layers.InputLayer(shape=(M,1), input_var=input_var_g)
g_fw_1 = lasagne.layers.DenseLayer(g_in, num_units=6, nonlinearity=lasagne.nonlinearities.tanh,
W = lasagne.init.GlorotUniform(),
b = lasagne.init.Constant(0.0))
g_fw_2 = lasagne.layers.DenseLayer(g_fw_1, num_units=5, nonlinearity=lasagne.nonlinearities.tanh,
W = lasagne.init.GlorotUniform(),
b = lasagne.init.Constant(0.0))
g_out = lasagne.layers.DenseLayer(g_fw_2, num_units=1, nonlinearity=lasagne.nonlinearities.tanh,
W = lasagne.init.GlorotUniform(),
b = lasagne.init.Constant(0.0))
#Generated_sample
prediction_g = lasagne.layers.get_output(g_out)
# +
## Discriminator - D(G(x))
dg_fw_1 = lasagne.layers.DenseLayer(g_out, num_units = 6, nonlinearity=lasagne.nonlinearities.tanh,
W = lasagne.init.GlorotUniform(),
b = lasagne.init.Constant(0.0))
dg_fw_2 = lasagne.layers.DenseLayer(dg_fw_1, num_units = 5, nonlinearity=lasagne.nonlinearities.tanh,
W = lasagne.init.GlorotUniform(),
b = lasagne.init.Constant(0.0))
dg_out = lasagne.layers.DenseLayer(dg_fw_2, num_units = 1, nonlinearity =lasagne.nonlinearities.tanh,
W = lasagne.init.GlorotUniform(),
b = lasagne.init.Constant(0.0))
#Prediction
prediction_dg = lasagne.layers.get_output(dg_out)
# +
## Discriminator - D((x))
#Shares weights with D(G(x))
d_in = lasagne.layers.InputLayer(shape=(M, 1), input_var=input_var_d)
d_fw_1 = lasagne.layers.DenseLayer(d_in, num_units = 6, nonlinearity=lasagne.nonlinearities.tanh,
W = dg_fw_1.W,
b = dg_fw_1.b)
d_fw_2 = lasagne.layers.DenseLayer(d_fw_1, num_units = 5, nonlinearity=lasagne.nonlinearities.tanh,
W = dg_fw_2.W,
b = dg_fw_2.b)
d_out = lasagne.layers.DenseLayer(d_fw_2, num_units = 1, nonlinearity =lasagne.nonlinearities.tanh,
W = dg_out.W,
b = dg_out.b)
#Prediction
prediction_d = lasagne.layers.get_output(d_out)
# +
params_d_g_values = lasagne.layers.get_all_param_values(dg_out)
params_d_values = lasagne.layers.get_all_param_values(d_out)
params_g_values = lasagne.layers.get_all_param_values(g_out)
params_pretrained_d_values = lasagne.layers.get_all_param_values(l_out)
# -
# +
## Using pretrained weights to imporve D
lasagne.layers.set_all_param_values(d_out, params_pretrained_d_values)
# +
## Getting the parameters
params_d_g = lasagne.layers.get_all_params(dg_out)
params_g = lasagne.layers.get_all_params(g_out)
params_d = lasagne.layers.get_all_params(d_out)
# -
# +
## Objectives
obj_d = T.mean(T.log(prediction_d) + T.log(1-prediction_dg))
obj_g = T.mean(T.log(prediction_dg))
## Updates
updates_d = lasagne.updates.momentum(1-obj_d, params_d, learning_rate = 0.01)
updates_g = lasagne.updates.momentum(1-obj_g, params_d_g, learning_rate = 0.01)
# +
## Train functions ##
train_d = theano.function([input_var_g, input_var_d], obj_d, updates=updates_d, allow_input_downcast=True)
train_g = theano.function([input_var_g], obj_g, updates=updates_g, allow_input_downcast=True)
# -
## Output functions##
out_d = theano.function([input_var_d], prediction_d, allow_input_downcast=True)
out_dg = theano.function([input_var_g], prediction_dg, allow_input_downcast=True)
out_g = theano.function([input_var_g], prediction_g, allow_input_downcast=True)
# +
###### MULTIPLY GEN OUT BY 5######
def plot_fig():
f, ax = plt.subplots(1)
#p_data
xs = np.linspace(-5,5,1000)
ax.plot(xs, norm.pdf(xs, loc=mu, scale=sigma), label='p_data')
#decision_boundry
r=5000
xs = np.linspace(-5,5,r)
ds=np.zeros((r,1))
#process muliple points in parallel
for i in range(r/M):
x=np.reshape(xs[M*i:M*(i+1)], (M,1))
ds[M*i:M*(i+1)]=out_d(x)
ax.plot(xs, ds, label='decision_boundry')
# distribution of inverse mapped points
zs = np.linspace(-5,5,r)
gs = np.zeros((r,1))
for i in range(r/M):
z=np.reshape(zs[M*i:M*(i+1)],(M,1))
gs[M*i:M*(i+1)]=out_g(z)
histc, edges = np.histogram(gs, bins=10)
ax.plot(np.linspace(-5,5,10), histc/float(r), label='p_g')
ax.set_ylim(-2,1.1)
plt.legend()
# -
plot_fig()
# +
#Training algo
k=1
histd, histg = np.zeros(train_iters), np.zeros(train_iters)
for i in range(train_iters):
for j in range(k):
x = np.random.normal(mu, sigma, M)
x.sort()
z = np.linspace(-5.0,5.0,M)+np.random.random(M)*0.01
histd[i] = train_d(np.reshape(z,(M,1)), np.reshape(x,(M,1)))
z = np.linspace(-5.0,5.0,M)+np.random.random(M)*0.01
histg[i] = train_g(np.reshape(z,(M,1)))
if i%(train_iters//10) == 0:
print(float(i)/float(train_iters))
# -
plt.plot(histd, label='obj_d')
plt.plot(1-histg, label='obj_g')
plt.legend()
plot_fig()
| Generative Adversarial Network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py27]
# language: python
# name: conda-env-py27-py
# ---
# # Contents
#
# We train an LSTM with gumbel-sigmoid gates on a toy language modelling problem.
# Such LSTM can than be binarized to reach signifficantly greater speed.
# %env THEANO_FLAGS="device=gpu2"
import numpy as np
import theano
import theano.tensor as T
import lasagne
import os
# # Generate mtg cards
# * Regular RNN language modelling done by LSTM with "binary" gates
# +
start_token = " "
with open("mtg_card_names.txt") as f:
names = f.read()[:-1].split('\n')
names = [start_token+name for name in names]
# -
print 'n samples = ',len(names)
for x in names[::1000]:
print x
# # Text processing
# +
#all unique characters go here
token_set = set()
for name in names:
for letter in name:
token_set.add(letter)
tokens = list(token_set)
print 'n_tokens = ',len(tokens)
# +
# #!token_to_id = <dictionary of symbol -> its identifier (index in tokens list)>
token_to_id = {t:i for i,t in enumerate(tokens) }
# #!id_to_token = < dictionary of symbol identifier -> symbol itself>
id_to_token = {i:t for i,t in enumerate(tokens)}
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.hist(map(len,names),bins=25);
# truncate names longer than MAX_LEN characters.
MAX_LEN = min([60,max(list(map(len,names)))])
#ADJUST IF YOU ARE UP TO SOMETHING SERIOUS
# -
# ### Cast everything from symbols into identifiers
# +
names_ix = list(map(lambda name: list(map(token_to_id.get,name)),names))
#crop long names and pad short ones
for i in range(len(names_ix)):
names_ix[i] = names_ix[i][:MAX_LEN] #crop too long
if len(names_ix[i]) < MAX_LEN:
names_ix[i] += [token_to_id[" "]]*(MAX_LEN - len(names_ix[i])) #pad too short
assert len(set(map(len,names_ix)))==1
names_ix = np.array(names_ix)
# -
# # Input variables
from agentnet import Recurrence
from lasagne.layers import *
from agentnet.memory import *
from agentnet.resolver import ProbabilisticResolver
from gumbel_sigmoid import GumbelSigmoid
# +
sequence = T.matrix('token sequence','int64')
inputs = sequence[:,:-1]
targets = sequence[:,1:]
l_input_sequence = InputLayer(shape=(None, None),input_var=inputs)
# -
# # Build NN
#
# You'll be building a model that takes token sequence and predicts next tokens at each tick
#
# This is basically equivalent to how rnn step was described in the lecture
# +
###One step of rnn
class rnn:
n_hid = 100
temp = theano.shared(np.float32(1.0))
#inputs
inp = InputLayer((None,),name='current character')
prev_cell = InputLayer((None,n_hid),name='previous lstm cell')
prev_hid = InputLayer((None,n_hid),name='previous ltsm output')
#recurrent part
emb = EmbeddingLayer(inp, len(tokens), 30,name='emb')
new_cell,new_hid = LSTMCell(prev_cell,prev_hid,emb,
inputgate_nonlinearity=GumbelSigmoid(temp),
forgetgate_nonlinearity=GumbelSigmoid(temp),
#outputgate_nonlinearity=GumbelSigmoid(temp),
name="rnn")
next_token_probas = DenseLayer(new_hid,len(tokens),nonlinearity=T.nnet.softmax)
#pick next token from predicted probas
next_token = ProbabilisticResolver(next_token_probas)
# -
# ### Loss && Training
training_loop = Recurrence(
state_variables={rnn.new_hid:rnn.prev_hid,
rnn.new_cell:rnn.prev_cell},
input_sequences={rnn.inp:l_input_sequence},
tracked_outputs=[rnn.next_token_probas,],
unroll_scan=False,
)
# Model weights
weights = lasagne.layers.get_all_params(training_loop,trainable=True)
print weights
# +
predicted_probabilities = lasagne.layers.get_output(training_loop[rnn.next_token_probas])
#If you use dropout do not forget to create deterministic version for evaluation
loss = lasagne.objectives.categorical_crossentropy(predicted_probabilities.reshape((-1,len(tokens))),
targets.reshape((-1,))).mean()
#<Loss function - a simple categorical crossentropy will do, maybe add some regularizer>
updates = lasagne.updates.adam(loss,weights)
# -
#training
train_step = theano.function([sequence], loss,
updates=training_loop.get_automatic_updates()+updates)
# # generation
#
# here we re-wire the recurrent network so that it's output is fed back to it's input
n_steps = T.scalar(dtype='int32')
feedback_loop = Recurrence(
state_variables={rnn.new_cell:rnn.prev_cell,
rnn.new_hid:rnn.prev_hid,
rnn.next_token:rnn.inp},
tracked_outputs=[rnn.next_token_probas,],
batch_size=1,
n_steps=n_steps,
unroll_scan=False,
)
generated_tokens = get_output(feedback_loop[rnn.next_token])
generate_sample = theano.function([n_steps],generated_tokens,updates=feedback_loop.get_automatic_updates())
def generate_string(length=MAX_LEN):
output_indices = generate_sample(length)[0]
return ''.join(tokens[i] for i in output_indices)
generate_string()
# # Model training
#
# Here you can tweak parameters or insert your generation function
#
#
# __Once something word-like starts generating, try increasing seq_length__
#
#
def sample_batch(data, batch_size):
rows = data[np.random.randint(0,len(data),size=batch_size)]
return rows
# +
print("Training ...")
#total N iterations
n_epochs=100
# how many minibatches are there in the epoch
batches_per_epoch = 500
#how many training sequences are processed in a single function call
batch_size=32
loss_history = []
for epoch,t in enumerate(np.logspace(0,-2,n_epochs)):
rnn.temp.set_value(np.float32(t))
avg_cost = 0;
for _ in range(batches_per_epoch):
avg_cost += train_step(sample_batch(names_ix,batch_size))
loss_history.append(avg_cost)
print("\n\nEpoch {} average loss = {}".format(epoch, avg_cost / batches_per_epoch))
print "Generated names"
for i in range(10):
print generate_string(),
# -
plt.plot(loss_history)
# # And now,
# * try lstm/gru
# * try several layers
# * try mtg cards
# * try your own dataset of any kind
| binary_lstm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="VTze-VbeU1c0"
# # Fine-tune a DialoGPT model
#
# Adapted from the notebook in [this Medium post](https://towardsdatascience.com/make-your-own-rick-sanchez-bot-with-transformers-and-dialogpt-fine-tuning-f85e6d1f4e30?gi=e4a72d1510f0).
# + [markdown] id="Y17kuzFNUSrZ"
# ## Setup
# + colab={"base_uri": "https://localhost:8080/"} id="GBfltjGHT6KG" outputId="7822e15b-9c77-412a-a6ed-20100243db13"
from google.colab import drive
drive.mount('/content/drive/')
# + id="T8fgmjaqUErq"
# !pip -q install transformers
# + id="EtCreyG8UG1s"
import os
os.chdir("/content/drive/My Drive/Colab Notebooks")
# + id="dnv5kT-mLsB-"
# all the imports
import glob
import logging
import os
import pickle
import random
import re
import shutil
from typing import Dict, List, Tuple
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from torch.nn.utils.rnn import pad_sequence
from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm.notebook import tqdm, trange
from pathlib import Path
from transformers import (
MODEL_WITH_LM_HEAD_MAPPING,
WEIGHTS_NAME,
AdamW,
AutoConfig,
PreTrainedModel,
PreTrainedTokenizer,
get_linear_schedule_with_warmup,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
# + [markdown] id="BmrbGB8aUmBm"
# ## Get Data from Kaggle
# + colab={"base_uri": "https://localhost:8080/"} id="ftBYBoOoV_Er" outputId="07da0a13-6112-4c4e-cb49-51580c2d9e7a"
# !mkdir ~/.kaggle
# !cp kaggle.json ~/.kaggle/kaggle.json
# + colab={"base_uri": "https://localhost:8080/"} id="fbITTMcLVbI_" outputId="fb4c8bf1-ff2d-4952-a451-62cdd0655aea"
# !kaggle datasets download ruolinzheng/twewy-game-script -f twewy-name-line-full.csv
# + id="RXdJTSVwWGHj"
data = pd.read_csv('twewy-name-line-full.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 238} id="h6kGx-9eG7qA" outputId="bd2efe43-1e50-4716-81a2-bf15a3dd03bd"
data.sample(6)
# + id="PG8v6--qWUwj"
CHARACTER_NAME = 'Joshua'
# + id="GZUcEMd2WLDT"
contexted = []
# context window of size 7
n = 7
for i in data[data.name == CHARACTER_NAME].index:
if i < n:
continue
row = []
prev = i - 1 - n # we additionally substract 1, so row will contain current responce and 7 previous responces
for j in range(i, prev, -1):
row.append(data.line[j])
contexted.append(row)
columns = ['response', 'context']
columns = columns + ['context/' + str(i) for i in range(n - 1)]
df = pd.DataFrame.from_records(contexted, columns=columns)
# + colab={"base_uri": "https://localhost:8080/", "height": 446} id="4T5OlNZHUxij" outputId="895603a6-ca02-4301-c4b0-5bccbee8a3b8"
df.sample(6)
# + colab={"base_uri": "https://localhost:8080/", "height": 380} id="NGy0MxMQVIAP" outputId="08b7f0eb-6a38-4b83-efdc-e53778d7547a"
trn_df, val_df = train_test_split(df, test_size=0.1)
trn_df.head()
# + id="aEeJQlAKWtiJ"
# create dataset suitable for our model
def construct_conv(row, tokenizer, eos = True):
flatten = lambda l: [item for sublist in l for item in sublist]
conv = list(reversed([tokenizer.encode(x) + [tokenizer.eos_token_id] for x in row]))
conv = flatten(conv)
return conv
class ConversationDataset(Dataset):
def __init__(self, tokenizer: PreTrainedTokenizer, args, df, block_size=512):
block_size = block_size - (tokenizer.model_max_length - tokenizer.max_len_single_sentence)
directory = args.cache_dir
cached_features_file = os.path.join(
directory, args.model_type + "_cached_lm_" + str(block_size)
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, "rb") as handle:
self.examples = pickle.load(handle)
else:
logger.info("Creating features from dataset file at %s", directory)
self.examples = []
for _, row in df.iterrows():
conv = construct_conv(row, tokenizer)
self.examples.append(conv)
logger.info("Saving features into cached file %s", cached_features_file)
with open(cached_features_file, "wb") as handle:
pickle.dump(self.examples, handle, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
return torch.tensor(self.examples[item], dtype=torch.long)
# + id="-3iHwoKlWyrs"
# Cacheing and storing of data/checkpoints
def load_and_cache_examples(args, tokenizer, df_trn, df_val, evaluate=False):
return ConversationDataset(tokenizer, args, df_val if evaluate else df_trn)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = glob.glob(os.path.join(args.output_dir, "{}-*".format(checkpoint_prefix)))
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(".*{}-([0-9]+)".format(checkpoint_prefix), path)
if regex_match and regex_match.groups():
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix="checkpoint", use_mtime=False) -> None:
if not args.save_total_limit:
return
if args.save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime)
if len(checkpoints_sorted) <= args.save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - args.save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint))
shutil.rmtree(checkpoint)
# + [markdown] id="EEDdTJTqUwZJ"
# ## Build Model
# + colab={"base_uri": "https://localhost:8080/"} id="r2cE0fY5UHpz" outputId="e4f382cd-57d9-49b7-9da4-4b44fe57df5b"
from transformers import AutoModelWithLMHead, AutoModelForCausalLM, AutoTokenizer
import torch
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
model = AutoModelWithLMHead.from_pretrained("microsoft/DialoGPT-small")
# + id="ra2vsRp-UMXo"
"""
Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, BERT, RoBERTa).
GPT and GPT-2 are fine-tuned using a causal language modeling (CLM) loss while BERT and RoBERTa are fine-tuned
using a masked language modeling (MLM) loss.
"""
# Configs
logger = logging.getLogger(__name__)
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
# + id="2OnASqJjUNJa"
# Args to allow for easy convertion of python script to notebook
class Args():
def __init__(self):
self.output_dir = 'output-small'
self.model_type = 'gpt2'
self.model_name_or_path = 'microsoft/DialoGPT-small'
self.config_name = 'microsoft/DialoGPT-small'
self.tokenizer_name = 'microsoft/DialoGPT-small'
self.cache_dir = 'cached'
self.block_size = 512
self.do_train = True
self.do_eval = True
self.evaluate_during_training = False
self.per_gpu_train_batch_size = 4
self.per_gpu_eval_batch_size = 4
self.gradient_accumulation_steps = 1
self.learning_rate = 5e-5
self.weight_decay = 0.0
self.adam_epsilon = 1e-8
self.max_grad_norm = 1.0
self.num_train_epochs = 4
self.max_steps = -1
self.warmup_steps = 0
self.logging_steps = 1000
self.save_steps = 3500
self.save_total_limit = None
self.eval_all_checkpoints = False
self.no_cuda = False
self.overwrite_output_dir = True
self.overwrite_cache = True
self.should_continue = False
self.seed = 42
self.local_rank = -1
self.fp16 = False
self.fp16_opt_level = 'O1'
args = Args()
# + [markdown] id="9Q1dTFXxW9NE"
# ## Train and Evaluate
# + id="PaarIDZrW81h"
def train(args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer) -> Tuple[int, float]:
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate, drop_last = True
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
model = model.module if hasattr(model, "module") else model # Take care of distributed/parallel training
model.resize_token_embeddings(len(tokenizer))
# add_special_tokens_(model, tokenizer)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if (
args.model_name_or_path
and os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt"))
and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt"))
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if args.model_name_or_path and os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproducibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
inputs, labels = (batch, batch)
if inputs.shape[1] > 1024: continue
inputs = inputs.to(args.device)
labels = labels.to(args.device)
model.train()
outputs = model(inputs, labels=labels)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
checkpoint_prefix = "checkpoint"
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "{}-{}".format(checkpoint_prefix, global_step))
os.makedirs(output_dir, exist_ok=True)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
_rotate_checkpoints(args, checkpoint_prefix)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
# Evaluation of some model
def evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, df_trn, df_val, prefix="") -> Dict:
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_output_dir = args.output_dir
eval_dataset = load_and_cache_examples(args, tokenizer, df_trn, df_val, evaluate=True)
os.makedirs(eval_output_dir, exist_ok=True)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
def collate(examples: List[torch.Tensor]):
if tokenizer._pad_token is None:
return pad_sequence(examples, batch_first=True)
return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate, drop_last = True
)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
inputs, labels = (batch, batch)
inputs = inputs.to(args.device)
labels = labels.to(args.device)
with torch.no_grad():
outputs = model(inputs, labels=labels)
lm_loss = outputs[0]
eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity}
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return result
# + id="SCnGAJWbXD9C"
# Main runner
def main(df_trn, df_val):
args = Args()
if args.should_continue:
sorted_checkpoints = _sorted_checkpoints(args)
if len(sorted_checkpoints) == 0:
raise ValueError("Used --should_continue but no checkpoint was found in --output_dir.")
else:
args.model_name_or_path = sorted_checkpoints[-1]
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
and not args.should_continue
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup CUDA, GPU & distributed training
device = torch.device("cuda")
args.n_gpu = torch.cuda.device_count()
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
config = AutoConfig.from_pretrained(args.config_name, cache_dir=args.cache_dir)
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir)
model = AutoModelWithLMHead.from_pretrained(
args.model_name_or_path,
from_tf=False,
config=config,
cache_dir=args.cache_dir,
)
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, df_trn, df_val, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use save_pretrained for the model and tokenizer, you can reload them using from_pretrained()
if args.do_train:
# Create output directory if needed
os.makedirs(args.output_dir, exist_ok=True)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = AutoModelWithLMHead.from_pretrained(args.output_dir)
tokenizer = AutoTokenizer.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = AutoModelWithLMHead.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, df_trn, df_val, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
# + [markdown] id="7NWvkdR-XHeB"
# ## Run the Main Function
# + colab={"base_uri": "https://localhost:8080/", "height": 780, "referenced_widgets": ["1d7f4c82687540f1ad69eb54ac3c25b4", "e7b9f3fc77a24259a87ef0dc735dfecb", "<KEY>", "<KEY>", "021b771a270f479aa3b9e2b5f17e3d97", "450b0e7fd7a347c7beb78b7d72f64385", "9391d7abf6ed4400903995f56d7a1260", "ea6b919964d24c2f9de1c64c9cefaf23", "2fa1fa2407384cb98d79a912de2d5b8f", "<KEY>", "e38fb98fd7b3413392dc39c93a107a35", "855ca0a6125a4d698416214a9425ad98", "<KEY>", "<KEY>", "de252cd193114c40ad5f5e9622b7abc7", "<KEY>", "<KEY>", "b4e00059cf3a49929978ed780aae8358", "0ff5f4e3506b493a98d72008a467f35f", "<KEY>", "a937f1dfeee5432ba31b3016fd30e9e2", "<KEY>", "<KEY>", "<KEY>", "8b8a7c771d234f6c9d758a1f07f75a90", "c6518c4a721745bf97ee682f2ebe4635", "<KEY>", "<KEY>", "8c016a54f0a24fcdacf369baa9d24f1e", "7fe5b457ca0f417f90a20d235e9cec07", "<KEY>", "8e3f1740c82f47949eefc2eb53052eae", "<KEY>", "175e94deab7f4d20b99b419bea33583b", "41f26f7210e540479814e5d68de13ddb", "<KEY>", "<KEY>", "810ac22adad344b7bf8b556ded990122", "<KEY>", "<KEY>", "<KEY>", "850b5411122e4d608511fe26818bea68", "0663fb4bd85f4d87a7d61910b995be14", "<KEY>", "0ca29b4a62e04d9c937189ea19b25de8", "f871b83632974e0088bae65e78efaf28", "4cacf7fc20754a7ca7fe08c8ec187a81", "8bcc625c0f284398bbd287fe45021b17"]} id="e61zo2JtXGNX" outputId="22d4916e-7169-44b5-f9d8-79b9c43fab2e"
main(trn_df, val_df)
# + [markdown] id="YRpQ_n2zXQj-"
# ## Load the Trained Model
# + colab={"base_uri": "https://localhost:8080/"} id="HGw3qgfaXQHX" outputId="93e84cfd-9718-42e5-bd11-418112c91d71"
tokenizer = AutoTokenizer.from_pretrained('microsoft/DialoGPT-small')
model = AutoModelWithLMHead.from_pretrained('output-small')
# + colab={"base_uri": "https://localhost:8080/"} id="lAWsiAvNXbxd" outputId="0fd2541e-ee68-4976-b098-8483efe38d5e"
# Let's chat for 4 lines
for step in range(4):
# encode the new user input, add the eos_token and return a tensor in Pytorch
new_user_input_ids = tokenizer.encode(input(">> User:") + tokenizer.eos_token, return_tensors='pt')
# print(new_user_input_ids)
# append the new user input tokens to the chat history
bot_input_ids = torch.cat([chat_history_ids, new_user_input_ids], dim=-1) if step > 0 else new_user_input_ids
# generated a response while limiting the total chat history to 1000 tokens,
chat_history_ids = model.generate(
bot_input_ids, max_length=200,
pad_token_id=tokenizer.eos_token_id,
no_repeat_ngram_size=3,
do_sample=True,
top_k=100,
top_p=0.7,
temperature=0.8
)
# pretty print last ouput tokens from bot
print("JoshuaBot: {}".format(tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)))
# + [markdown] id="ANSQlQezXqwn"
# ## Push Model to Hugging Face
# + id="VgnHRgHKXwDd"
# !sudo apt-get install git-lfs
# + id="uhqMtvfmXei8"
# !git config --global user.email ""
# Tip: using the same email as your huggingface.co account will link your commits to your profile
# !git config --global user.name ""
# + id="tfUsrKR7YLT1"
MY_MODEL_NAME = 'DialoGPT-small-joshua'
with open('HuggingFace-API-key.txt', 'rt') as f:
HUGGINGFACE_API_KEY = f.read().strip()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="_65nsiLcYNXI" outputId="0dbf0cb1-957c-4adb-bf55-4222d2cc85bc"
model.push_to_hub(MY_MODEL_NAME, use_auth_token=HUGGINGFACE_API_KEY)
tokenizer.push_to_hub(MY_MODEL_NAME, use_auth_token=HUGGINGFACE_API_KEY)
# + [markdown] id="D_XfXTCrZKmO"
# ## All Done!
# + id="_tIwK7G8ZLrd"
| model_train_upload_workflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Unfair coin tosses
#
# Copyright <NAME> 2018
#
# [MIT License](https://opensource.org/licenses/MIT)
# +
# Configure Jupyter so figures appear in the notebook
# %matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
# %config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
import numpy as np
# -
# Here's a puzzle from [a recent FiveThirtyEight post](https://fivethirtyeight.com/features/how-far-would-you-go-to-rig-a-coin-flip/):
#
# >Coin flips are a handy way to determine a winner — if the goal is to give two people an equal chance of winning. But sometimes it’s not. Suppose Anna and Barry aren’t interested in equity. All they have is a fair coin marked heads and tails. How can they devise a game that gives Anna a 1 in 3 chance of winning? What about a 1 in 4 chance? What about a 1 in 5 chance?
#
#
# ### Solution
#
# The key to the solution is to take advantage of Pascal's triangle. If you toss `n` coins, the number of heads, `k`, follows a binomial distribution. The relative probabilities of the totals `k = 0...n` are the coefficients in Pascal's triangle.
#
# Here's a function that uses `np.convolve` to generate the rows of Pascal's triangle.
def pascal(n_rows):
a = [1]
yield(a)
for i in range(n_rows):
a = np.convolve(a, [1,1])
yield(a)
for row in pascal(5):
print(row)
# The third row is [1 2 1], which indicates that if we flip two coins, the chance of getting 1 head is twice the chance of getting 0 or 2 heads. So if we want to give Anna a 1/4 chance of winning, we could toss two coins: if we get 0 heads, Anna wins; otherwise Barry wins.
#
# More generally, we can toss `n` coins and choose
#
# 1. A set of totals where Anna wins,
# 2. A set of totals where Barry wins, and maybe
# 3. A leftover set where we have to toss again.
#
# For example, if Anna should win `1/3` of the time, we could toss `2` coins; Anna wins if we get `0` heads, Barry wins if we get `1`, and we have to toss again if we get `2`.
#
# The chance that we have to try again is `1/4`, so the expected number of attempts is `4/3`. Each attempt requires 2 coins tosses, so the expected number of flips is `8/3`.
#
# For each desired probability, we can search for the process that yields the desired outcome with the minimum expected number of flips.
# I'll start with a function that yields all combinations from a set.
# +
from itertools import combinations
def all_combos(s):
n = len(s)
for r in range(1, n+1):
for comb in combinations(s, r):
yield list(comb)
# -
# For example, if we take the third row from Pascal's triangle, there are three outcomes, `{0, 1, 2}`, with relative frequencies `[1, 2, 1]`.
row = np.array([1, 2, 1])
ind = set(range(len(row)))
# Here are all subsets of the outcomes:
for comb in all_combos(ind):
print(comb)
# Now we'd like to enumerate all partitions of the set into `win` and `lose`, allowing the possiblity of leaving out some outcomes.
def all_partitions(s):
for win in all_combos(s):
rest = s - set(win)
for lose in all_combos(rest):
yield win, lose
# Here are the ways to partition `{0, 1, 2}`, and the total relative frequency for each subset.
for win, lose in all_partitions(ind):
print(win, lose, row[win].sum(), row[lose].sum())
# Now we can enumerate the rows of Pascal's triangle and for each set of outcomes, enumerate the partitions.
#
# For each partition, we compute the odds Anna wins and yield:
#
# * `expected_coins`: the expected total number of coins we have to toss.
# * `n_coins`: number of coins we toss for each attempt
# * `win`: set of outcomes where Anna wins
# * `lose`: set of outcomes where Anna loses
# * `odds`: the odds that Anna wins.
#
# +
import math
def reduce(a, b):
d = math.gcd(a, b)
return a//d, b//d
# +
from fractions import Fraction
def enumerate_rows(n_rows):
# loop through the rows of Pascal's triangle
for n_coins, row in enumerate(pascal(n_rows)):
index = set(range(len(row)))
# loop through ways to partition the outcomes
for win, lose in all_partitions(index):
# compute the odds Anna wins
numer, denom = row[win].sum(), row[lose].sum()
odds = reduce(numer, denom)
# compute the expected number of tosses
efficency = Fraction(numer + denom, 2**n_coins)
expected_coins = n_coins / efficency
yield expected_coins, n_coins, efficency, win, lose, odds
# -
# Here are the results from tossing one coin or two.
for result in enumerate_rows(2):
print(result)
# There are several ways to get `1:1` odds, but the most efficient is to toss one coin once. No surprise there.
#
# The best ways to get `1:2` odds (so far) is to toss 2 coins, but we might have to try several times, so the expected number of tosses is `8/3`, as in the example above.
#
# Now we can enumerate the rows of Pascal's triangle and for each effective odds, record the best way to achieve it.
#
# `best` maps from `odds`, represented by a tuple of integers, to `results`, which is a tuple.
#
# The elements of `results` are in order so we can use tuple comparison to select the results with the lowest expected number of coin tosses, and the lowest number of tosses per round, as a tie-breaker.
def run_rows(n):
absent = (np.inf,)
for result in enumerate_rows(n):
odds = result[-1]
t = best.get(odds, absent)
if result < t:
best[odds] = result
best = {}
# %time run_rows(17)
# Here are the results for odds `1:n`, for various `n`:
# +
res = []
for denom in range(1, 70):
result = best[1, denom]
expected_coins, n_coins, efficiency, win, lose, odds = result
print(odds, n_coins, efficiency, expected_coins, win, lose)
res.append((denom, efficiency, expected_coins))
# -
# The fourth line is `(1, 4) 3 5/8 24/5 [0] [1, 3]`, which indicates that if we want odds `1:4`, we should flip `3` coins. Alice wins if we get `0` heads; Barry wins if we get `1` or `3`. `5/8` of the time we are done; the remaining `3/8` we flip again; on average we expect to toss `24/5` coins.
# Let's see what the results look like graphically. Here's efficiency for each value of odds against.
odds, effs, coins = np.array(res).T
import matplotlib.pyplot as plt
plt.bar(odds, effs)
plt.xlabel('Odds against')
plt.ylabel('Efficiency');
# And here's the expected number of coin tosses for each value of odds against.
plt.bar(odds, coins)
plt.xlabel('Odds against')
plt.ylabel('Expected number of coins');
# We can also visualize the efficiency for all values of odds, not just `1:k`.
# +
import warnings
warnings.filterwarnings('ignore', category=DeprecationWarning)
high = 150
z1 = np.full((high, high), np.nan)
for i in range(1, high):
for j in range(1, high):
odds = reduce(i, j)
if odds in best:
result = best[odds]
expected_coins = result[0]
efficiency = result[2]
z1[i, j] = efficiency
np.nanmean(z1)
# -
plt.pcolormesh(zs)
plt.colorbar();
# We can also look at the expected number of coins (clipped at 15).
# +
high = 150
z2 = np.full((high, high), np.nan)
for i in range(1, high):
for j in range(1, high):
odds = reduce(i, j)
if odds in best:
result = best[odds]
expected_coins = result[0]
z2[i, j] = min(expected_coins, 15)
np.nanmean(z2)
# -
plt.pcolormesh(z2)
plt.colorbar();
| unfair_coins.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# <center>
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width="300" alt="cognitiveclass.ai logo" />
# </center>
#
# <h1>Extracting Stock Data Using a Web Scraping</h1>
#
# Not all stock data is available via API in this assignment; you will use web-scraping to obtain financial data. You will be quizzed on your results.\
# Using beautiful soup we will extract historical share data from a web-page.
#
# <h2>Table of Contents</h2>
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ul>
# <li>Downloading the Webpage Using Requests Library</li>
# <li>Parsing Webpage HTML Using BeautifulSoup</li>
# <li>Extracting Data and Building DataFrame</li>
# </ul>
# <p>
# Estimated Time Needed: <strong>30 min</strong></p>
# </div>
#
# <hr>
#
# #!pip install pandas==1.3.3
# #!pip install requests==2.26.0
# !mamba install bs4==4.10.0 -y
# !mamba install html5lib==1.1 -y
# !pip install lxml==4.6.4
# #!pip install plotly==5.3.1
import pandas as pd
import requests
from bs4 import BeautifulSoup
# ## Using Webscraping to Extract Stock Data Example
#
# First we must use the `request` library to downlaod the webpage, and extract the text. We will extract Netflix stock data <https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0220EN-SkillsNetwork/labs/project/netflix_data_webpage.html>.
#
# +
url = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0220EN-SkillsNetwork/labs/project/netflix_data_webpage.html"
data = requests.get(url).text
# -
# Next we must parse the text into html using `beautiful_soup`
#
soup = BeautifulSoup(data, 'html5lib')
#soup
# Now we can turn the html table into a pandas dataframe
#
# +
netflix_data = pd.DataFrame(columns=["Date", "Open", "High", "Low", "Close", "Volume"])
# First we isolate the body of the table which contains all the information
# Then we loop through each row and find all the column values for each row
for row in soup.find("tbody").find_all('tr'):
col = row.find_all("td")
date = col[0].text
Open = col[1].text
high = col[2].text
low = col[3].text
close = col[4].text
adj_close = col[5].text
volume = col[6].text
# Finally we append the data of each row to the table
netflix_data = netflix_data.append({"Date":date, "Open":Open, "High":high, "Low":low, "Close":close, "Adj Close":adj_close, "Volume":volume}, ignore_index=True)
# -
# We can now print out the dataframe
#
netflix_data.head()
# We can also use the pandas `read_html` function using the url
#
read_html_pandas_data = pd.read_html(url)
# Or we can convert the BeautifulSoup object to a string
#
read_html_pandas_data = pd.read_html(str(soup))
# Beacause there is only one table on the page, we just take the first table in the list returned
#
# +
netflix_dataframe = read_html_pandas_data[0]
netflix_dataframe.head()
# -
# ## Using Webscraping to Extract Stock Data Exercise
#
# Use the `requests` library to download the webpage <https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0220EN-SkillsNetwork/labs/project/amazon_data_webpage.html>. Save the text of the response as a variable named `html_data`.
#
html_data = requests.get('https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0220EN-SkillsNetwork/labs/project/amazon_data_webpage.html').text
# Parse the html data using `beautiful_soup`.
#
soup = BeautifulSoup(html_data,'html5lib')
# <b>Question 1</b> What is the content of the title attribute:
#
soup.title.text
# Using beautiful soup extract the table with historical share prices and store it into a dataframe named `amazon_data`. The dataframe should have columns Date, Open, High, Low, Close, Adj Close, and Volume. Fill in each variable with the correct data from the list `col`.
#
# +
amazon_data = pd.DataFrame(columns=["Date", "Open", "High", "Low", "Close", "Volume"])
for row in soup.find("tbody").find_all("tr"):
col = row.find_all("td")
date = col[0].text
Open = col[1].text
high = col[2].text
low = col[3].text
close = col[4].text
adj_close = col[5].text
volume = col[6].text
amazon_data = amazon_data.append({"Date":date, "Open":Open, "High":high, "Low":low, "Close":close, "Adj Close":adj_close, "Volume":volume}, ignore_index=True)
# -
# Print out the first five rows of the `amazon_data` dataframe you created.
#
amazon_data.head()
# <b>Question 2</b> What is the name of the columns of the dataframe
#
amazon_data.columns
# <b>Question 3</b> What is the `Open` of the last row of the amazon_data dataframe?
#
amazon_data.iloc[-1]['Open']
# <h2>About the Authors:</h2>
#
# <a href="https://www.linkedin.com/in/joseph-s-50398b136/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkPY0220ENSkillsNetwork23455606-2021-01-01"><NAME></a> has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
#
# <NAME>
#
# ## Change Log
#
# | Date (YYYY-MM-DD) | Version | Changed By | Change Description |
# | ----------------- | ------- | ---------- | ------------------ |
#
# ```
# | 2021-06-09 | 1.2 | <NAME>|Added URL in question 3 |
# ```
#
# \| 2020-11-10 | 1.1 | <NAME> | Deleted the Optional part |
# \| 2020-08-27 | 1.0 | <NAME> | Added lab to GitLab |
#
# <hr>
#
# ## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
#
# <p>
#
| 5_Python Project for Data Science/Extracting Stock Data Using a Web Scraping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/skywalker00001/Conterfactual-Reasoning-Project/blob/main/some_examples_1.1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Q2aKroRZLmfj"
# # how to find out the different or changed blocks in sentence b compared to the sentence a (base)
# + colab={"base_uri": "https://localhost:8080/"} id="J4QazUl7U6UF" outputId="65c3aaa8-aedd-42c7-ae40-e3c0dc38a8fd"
from google.colab import drive
drive.mount('/content/drive')
root = 'drive/MyDrive/LM/'
# + colab={"base_uri": "https://localhost:8080/"} id="ZghQ4Tb8kr3S" outputId="43f9a013-d044-47cd-dfd6-ab37b0ab66af"
# !pip install sentencepiece
# !pip install transformers -q
# + id="KwAz8ZN7Wgb2" colab={"base_uri": "https://localhost:8080/"} outputId="53c21984-bdad-4d96-bcb1-77af52e8a4d3"
# Importing stock libraries
import numpy as np
import pandas as pd
import difflib
import nltk
import copy
import regex as re
import sys
import torch
sys.path.append('/content/drive/MyDrive/LM/')
from global_param import MyConfig
nltk.download("punkt")
# Importing the T5 modules from huggingface/transformers
from transformers import T5Tokenizer, T5ForConditionalGeneration
# + id="0XCj427dklXZ"
myconfig = MyConfig()
# + id="_tPukXF-WWkp"
PRETRAINED_MODEL_NAME = myconfig.PRETRAINED_MODEL_NAME
# tokenzier for encoding the text
t5_tokenizer = T5Tokenizer.from_pretrained(PRETRAINED_MODEL_NAME)
# + colab={"base_uri": "https://localhost:8080/"} id="lVcZDsHHWw-_" outputId="1e6722ec-233d-440f-95e4-27baab83f98b"
input_ids = t5_tokenizer.encode_plus("The passenger walking in the park", max_length= myconfig.SOURCE_LEN, padding='max_length', return_tensors='pt')['input_ids'].squeeze()
print(input_ids)
print(len(input_ids))
# + colab={"base_uri": "https://localhost:8080/"} id="xIBPXS_yl8KJ" outputId="4e70d204-619a-4eb7-dfdf-bce37e166cab"
input_ids = t5_tokenizer.encode_plus("The passenger walks in the park. I hate the movie.",return_tensors='pt')['input_ids'].squeeze()
print(input_ids)
print(len(input_ids))
# + colab={"base_uri": "https://localhost:8080/"} id="4Vr4ehcUnT2g" outputId="941546ed-07d5-493a-8cd5-b8c9ea5a972a"
input_ids = t5_tokenizer.encode_plus("The passenger <extra_id_2> in the park. I hate the movie.</s>",return_tensors='pt')['input_ids'].squeeze()
print(input_ids)
print(len(input_ids))
# + colab={"base_uri": "https://localhost:8080/"} id="ylpmRS0LmjJP" outputId="3dc7991e-077d-4348-b3d7-9642f1dffe46"
test_ids = torch.tensor([5])
output = t5_tokenizer.decode(input_ids, skip_special_tokens=True)
print(output)
# + id="WctEfr47WxBj"
input_ids = t5_tokenizer("The <extra_id_0> walks in <extra_id_1> park", return_tensors="pt").input_ids
# + id="0mHaJ-32WxFd"
# + id="-OgJ1tHMQyto"
'''
Compared the changes between origin to edited setnence.
For example: Input:
origin = "I want to acchieve something to school."
edited = "I do something to go for it to school."
Output:
"1-2:\"do something\".3-5:\"go for it\"."
'''
# nltk.word_tokenize() as tokenizer
def words_origin2edited(origin, edited):
origin_list = nltk.word_tokenize(origin)
edited_list = nltk.word_tokenize(edited)
s = difflib.SequenceMatcher(None, origin_list, edited_list)
cgs = []
for tag, i1, i2, j1, j2 in s.get_opcodes():
if (tag != 'equal'):
cgs.append(f'{i1}-{i2}:\"{(" ").join(edited_list[j1:j2])}\".')
conclusion = "".join(cgs)
return conclusion
# t5tokenizer() as tokenizer
def t5_origin2edited(origin, edited):
#origin_list = nltk.word_tokenize(origin)
origin_list = t5_tokenizer.encode_plus(origin,return_tensors='pt')['input_ids'].squeeze()
#edited_list = nltk.word_tokenize(edited)
edited_list = t5_tokenizer.encode_plus(edited,return_tensors='pt')['input_ids'].squeeze()
origin_list = origin_list.tolist()
edited_list = edited_list.tolist()
#print('origin_list: ', origin_list)
#print('edited_list: ', edited_list)
s = difflib.SequenceMatcher(None, origin_list, edited_list)
cgs = []
for tag, i1, i2, j1, j2 in s.get_opcodes():
if (tag != 'equal'):
changed_text = t5_tokenizer.decode(torch.tensor(edited_list[j1:j2]), skip_special_tokens=True)
#print(changed_text)
cgs.append(f'{i1}-{i2}:\"{changed_text}\".')
conclusion = "".join(cgs)
return conclusion
# + id="oiKBmucSQyvq"
'''
Restore the edited sentence based on the changes cg and the original sentence.
For example: Input:
cg = 1-2:\"do something\".3-5:\"go for it\".
origin = "I want to acchieve something to school."
Output:
"I do something to go for it to school."
'''
# nltk.word_tokenize() as tokenizer
def words_restore(cg, origin):
cgs = cg.split('.')[0:-1]
origin_list = nltk.word_tokenize(origin)
#blocks = copy.deepcopy(origin)
for j in list(reversed(cgs)):
#print("unchanged: ", origin_list)
#print(j)
pattern = re.compile(r'^(\d+)-(\d+):\"(.*?)\"') # matches 1-2:"do something". (1), (2), (do something)
results = re.search(pattern, j) # i3 means the content between ""
i1, i2, i3 = results.group(1), results.group(2), results.group(3)
origin_list[int(i1): int(i2)] = nltk.word_tokenize(i3)
#print("changed: ", origin_list)
return (" ").join(origin_list[0:-1])+'.' #because we don't want any space before '.', which is the last element in the origin_list
# t5_tokenizer() as tokenizer
def t5_restore(cg, origin):
cgs = cg.split('.')[0:-1]
origin_list = t5_tokenizer.encode_plus(origin,return_tensors='pt')['input_ids'].squeeze()
origin_list = origin_list.tolist()
#blocks = copy.deepcopy(origin)
for j in list(reversed(cgs)):
#print("unchanged: ", origin_list)
#print(j)
pattern = re.compile(r'^(\d+)-(\d+):\"(.*?)\"') # matches 1-2:"do something". (1), (2), (do something)
results = re.search(pattern, j) # i3 means the content between ""
i1, i2, i3 = results.group(1), results.group(2), results.group(3)
i3_list = t5_tokenizer.encode_plus(i3,return_tensors='pt')['input_ids'].squeeze().tolist()
origin_list[int(i1): int(i2)] = i3_list
#print("changed: ", origin_list)
resotred_sts = t5_tokenizer.decode(torch.tensor(origin_list), skip_special_tokens=True)
return resotred_sts #because we don't want any space before '.', which is the last element in the origin_list
# + id="b2zNbh4ZQyyA"
# testing
a = "I want to achieve something to school."
b = "I do something to go for it to school."
# a_list = nltk.word_tokenize(a)
# b_list = nltk.word_tokenize(b)
# a_list = "qabxcd"
# b_list = "abycdf"
# print(a_list)
# print(b_list)
# + colab={"base_uri": "https://localhost:8080/"} id="4H632KtKePTl" outputId="4143a992-af14-4d85-a3bc-27224deb5767"
print(words_origin2edited(a,b))
print(words_restore(words_origin2edited(a,b), a))
# + colab={"base_uri": "https://localhost:8080/"} id="pcXzSQgIeRdA" outputId="875c2ee5-a035-48c7-d514-9aa3c2313fef"
print(t5_origin2edited(a,b))
print(t5_restore(t5_origin2edited(a,b), a))
# + colab={"base_uri": "https://localhost:8080/"} id="KbmTecmleTOG" outputId="91c4790a-639d-41b1-c785-a89972bffea3"
print(t5_restore(words_origin2edited(a,b), a))
# + colab={"base_uri": "https://localhost:8080/"} id="swrP7Dfxuxkg" outputId="dce41f8d-dff2-44da-aaa5-8cc32934d662"
ss = t5_tokenizer.encode_plus("I want to achieve something to school.",return_tensors='pt')['input_ids'].squeeze()
print(ss)
print(len(ss))
# + colab={"base_uri": "https://localhost:8080/"} id="avCsN8xevRke" outputId="7ecd9bc1-a1a6-441e-a8fe-b05d77ebf4c0"
test_ids = torch.tensor([9, 75, 9781,162,424])
#output = t5_tokenizer.decode(ss, skip_special_tokens=True, clean_up_tokenization_spaces=True)
output = t5_tokenizer.decode(test_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)
print(output)
# + colab={"base_uri": "https://localhost:8080/"} id="5MxurvH2Qy0F" outputId="323e6292-a4e6-42a3-94b6-e5513fae5760"
print(words_origin2edited(a,b))
print(words_restore(words_origin2edited(a,b), a))
# + colab={"base_uri": "https://localhost:8080/"} id="cmScuC7irn62" outputId="9a1a0e8f-9ce3-45bf-c7b1-e723a7e78d1f"
print(t5_origin2edited(a,b))
# + colab={"base_uri": "https://localhost:8080/"} id="n8i4hUMcdmw5" outputId="b6eb64a0-d732-4ae8-fc95-0e78a7e3dd78"
print(t5_restore(t5_origin2edited(a,b), a))
# + id="Yqynmuvyvo5-"
# testing
a = "I want to achieve something to school."
b = "I do something to go for it to school."
# + id="2TL3a7EfQy16"
print(t5_origin2edited(a,b))
print(t5_restore(words_origin2edited(a,b), a))
# + [markdown] id="H0_o1eQTgC08"
# # 1. Preparation
# + colab={"base_uri": "https://localhost:8080/"} id="sM7Mi7sKgG7_" outputId="c9d86575-49b6-4ef6-d2e3-0ab9ef16ee41"
#training df
small_path = root + '/TimeTravel/train_supervised_small.json'
small_df = pd.read_json(small_path, lines=True)
small_df.head()
print(len(small_df))
# + colab={"base_uri": "https://localhost:8080/"} id="wlHNWbSSgG-S" outputId="b46612a9-c372-4e45-b5bc-f55d92ba527d"
# Data cleaning, remove the examples with the same original_ending and edited_ending
comb_small_text=[]
for i in range(len(small_df)):
comb_small_text.append(small_df.loc[i, 'edited_ending'][0] +" "+ small_df.loc[i, 'edited_ending'][1] +" "+ \
small_df.loc[i, 'edited_ending'][2])
small_df.insert(6, "ground_truth", comb_small_text)
small_cleaned_df = small_df[small_df.ground_truth != small_df.original_ending]
small_cleaned_df = small_cleaned_df.reset_index()
print(len(small_cleaned_df))
# + id="oZU2FDtwgHAq"
# text_a: source, text_b: target,
text_a, text_b, word_changes = [], [], []
for i in range(len(small_cleaned_df)):
ori_text = small_cleaned_df.loc[i, 'original_ending']
text_a.append("premise: " + small_cleaned_df.loc[i, 'premise'] + " initial: " + \
small_cleaned_df.loc[i, 'initial'] + " counterfactual: " + small_cleaned_df.loc[i, 'counterfactual'] + \
" original_ending: " + ori_text)
#text_a.append(re.sub(re_pat, df.loc[i, 'edit1'], df.loc[i, 'original1']))
edited_text = small_cleaned_df.loc[i, 'edited_ending'][0] +" "+ small_cleaned_df.loc[i, 'edited_ending'][1] +" "+ \
small_cleaned_df.loc[i, 'edited_ending'][2]
text_b.append("edited_ending: " + edited_text)
word_change = words_origin2edited(ori_text, edited_text)
word_changes.append(word_change)
# + id="stGCfTLppaEQ"
# + id="LDjraQLJpaGF"
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="7S08NkWiUNH5" outputId="45d0b623-7c91-4947-abcb-743b80f3d8a7"
train_df = pd.DataFrame({'source_text': text_a, 'target_text': text_b, 'word_changes': word_changes})
train_df.head()
# + id="l7kJnlQQUafL"
#train_df.to_excel(root + 'TimeTravel/' + 'output' + model_version + '.xlsx')
train_df.to_excel(root + 'TimeTravel/' + 'cleaned_small_1.0.xlsx')
# + id="kDNlr47epblq"
# + id="mCvfEieQpbog"
# + id="rXeA26D2pbs9"
# + id="F9U8a3hfpbvT"
# + id="9yBnqBn_pbw2"
# + id="EbX5_NkQpbz-"
# + id="Lx1IpsX9pb1l"
# + id="8wWFnf1Bpb59"
# + id="n2EYZ4Zppb7y"
# + id="09bqdL5lpcAN"
# + [markdown] id="aWS7stjGgJXr"
# # trash code
# + colab={"base_uri": "https://localhost:8080/"} id="OqWZJ5B-Wmjt" outputId="5943d55c-c437-4645-d245-865047c371d2"
# a = "I paid the cashier and patiently waited for my drink."
# b = "I paid the cashier and patiently waited at the counter for my drink."
a = "I want to acchieve something to school."
b = "I do something to go for it to school."
a_list = nltk.word_tokenize(a)
b_list = nltk.word_tokenize(b)
a_list = "qabxcd"
b_list = "abycdf"
print(a_list)
print(b_list)
# + colab={"base_uri": "https://localhost:8080/"} id="tWdWTQNhW2dv" outputId="93043461-79df-493d-cc4b-aabcb0650024"
s = difflib.SequenceMatcher(None, a_list, b_list)
for block in s.get_matching_blocks():
print(block)
# + colab={"base_uri": "https://localhost:8080/"} id="q5ETmjnvG11Y" outputId="aef84da6-c2aa-44d0-a1c6-da179e7071c3"
print(a_list[2])
# + colab={"base_uri": "https://localhost:8080/"} id="_MEe1kh5XGgS" outputId="1da758e1-55fa-4c0f-9e81-7a540df6ccef"
print(a_list[0:0+8])
print(a_list[8:8+3])
print(a_list[11:11+0])
# + colab={"base_uri": "https://localhost:8080/"} id="5gE7MKlhjkhL" outputId="fc72c8ea-e9f0-4258-b99b-14ba812b3da7"
matches = []
matches.append([0, 0, 0])
for block in s.get_matching_blocks():
#matches.append([block[0], block[1], block[2]])
matches.append([i for i in block])
#matches.append(block)
print(matches)
# explanation: matches[i][0] are the a index, matches[i][1] are the b index, matches[i] [2] are the lengths of same (matched) words.
# + id="AnfI2FCJE3zY"
changes = []
for i in range(len(matches) - 1):
#print(matches[i])
if ((matches[i][0]+ matches[i][2] < matches[i+1][0]) & (matches[i][1]+ matches[i][2] < matches[i+1][1])): # replace
string = (" ").join(b_list[(matches[i][1]+matches[i][2]) : matches[i+1][1]])
changes.append(f"replacing_{matches[i][0]+matches[i][2]}-{matches[i+1][0]}:\"{string}\".")
elif ((matches[i][0]+ matches[i][2] < matches[i+1][0]) & (matches[i][1]+ matches[i][2] == matches[i+1][1])): # delete
string = (" ").join(b_list[(matches[i][1]+matches[i][2]) : matches[i+1][1]])
changes.append(f"deleting_{matches[i][0]+matches[i][2]}-{matches[i+1][0]}:\"{string}\".")
elif ((matches[i][0]+ matches[i][2] == matches[i+1][0]) & (matches[i][1]+ matches[i][2] < matches[i+1][1])): # insert
string = (" ").join(b_list[(matches[i][1]+matches[i][2]) : matches[i+1][1]])
changes.append(f"inserting_{matches[i][0]+matches[i][2]}-{matches[i+1][0]}:\"{string}\".")
# + colab={"base_uri": "https://localhost:8080/"} id="xa7DymdFjp5L" outputId="cbb026bd-90b3-406a-a734-2d14c7a1351c"
print(changes)
# + colab={"base_uri": "https://localhost:8080/", "height": 183} id="DKcE0ZamHvXp" outputId="372fd634-2978-4042-ce3f-da641ae8be91"
c = copy.deepcopy(a_list)
c[7:7] = nltk.word_tokenize("at the counter")
# + colab={"base_uri": "https://localhost:8080/"} id="8XVQ6JC1IoVJ" outputId="95c98ba2-baa2-4058-9346-4365167819fd"
print(a_list)
# + [markdown] id="9NPsRv5fLlqu"
#
# + colab={"base_uri": "https://localhost:8080/"} id="-gCx3vMlIXAH" outputId="d01f8ab7-e7b8-41db-f675-778f315033ca"
print(c)
# + id="IAQMdU-Y7Fdx"
# + id="jTp3jU6r7FgH"
# + id="6poKpW-Y7Fjb"
# + [markdown] id="SSnIVhGr7FsD"
# # another way
# + colab={"base_uri": "https://localhost:8080/"} id="j6MMOFuy7HaO" outputId="2ac7d059-09ae-4d92-949a-d8b06efd1da9"
# a = "qabxcd"
# b = "abycdf"
cgs = []
s = difflib.SequenceMatcher(None, a_list, b_list)
for tag, i1, i2, j1, j2 in s.get_opcodes():
print('{:7} a[{}:{}] --> b[{}:{}] {!r:>8} --> {!r}'.format(tag, i1, i2, j1, j2, a_list[i1:i2], b_list[j1:j2]))
#print('{:7} a[{}:{}] --> b[{}:{}] {!r} --> {!r}'.format(tag, i1, i2, j1, j2, a[i1:i2], b[j1:j2]))
if (tag != 'equal'):
#cgs.append(f'{tag}_{i1}-{i2}:\"{(" ").join(b_list[j1:j2])}\".')
cgs.append(f{i1}-{i2}:\"{(" ").join(b_list[j1:j2])}\".')
# + colab={"base_uri": "https://localhost:8080/"} id="-K39xwnj8-22" outputId="1ef0ec24-15cb-417b-8789-b02837bee324"
print(cgs)
# + colab={"base_uri": "https://localhost:8080/"} id="HpD7DiEJ_0on" outputId="8ef05a93-760f-43e0-8f30-9777b6f20235"
t=(" ".join(cgs))
print(t)
# + colab={"base_uri": "https://localhost:8080/"} id="fErC-Sgg_8Jq" outputId="87c06da8-ad8b-4e88-e10c-b8d3ea675691"
print(t.split(' '))
# + [markdown] id="LbPSYyrnAzG2"
# function
# + id="ItJzZVixAz3C"
def origin2edited(origin, edited):
origin_list = nltk.word_tokenize(origin)
editeed_list = nltk.word_tokenize(edited)
s = difflib.SequenceMatcher(None, origin_list, editeed_list)
cgs = []
for tag, i1, i2, j1, j2 in s.get_opcodes():
if (tag != 'equal'):
cgs.append(f'{i1}-{i2}:\"{(" ").join(editeed_list[j1:j2])}\".')
conclusion = "".join(cgs)
return conclusion
# + colab={"base_uri": "https://localhost:8080/"} id="0JdPesuILBZH" outputId="a704ecc3-c93d-4b9b-d007-370b1c3fdae5"
print(origin2edited(a, b))
# + colab={"base_uri": "https://localhost:8080/"} id="VdQnOsuQML3b" outputId="e32b8444-3d92-4dde-d677-e25d49826494"
cgs = sen.split('.')[0:-1]
print(len(cgs))
# + id="2VRXAb0aCN6D"
def restore(cg, origin):
cgs = cg.split('.')[0:-1]
origin_list = nltk.word_tokenize(origin)
#blocks = copy.deepcopy(origin)
for j in list(reversed(cgs)):
#print("unchanged: ", origin_list)
#print(j)
pattern = re.compile(r'^(\d+)-(\d+):\"(.*?)\"') # matches 1-2:"do something". (1), (2), (do something)
results = re.search(pattern, j) # i3 means the content between ""
i1, i2, i3 = results.group(1), results.group(2), results.group(3)
origin_list[int(i1): int(i2)] = nltk.word_tokenize(i3)
#print("changed: ", origin_list)
return (" ").join(origin_list[0:-1])+'.' #because we don't want any space before '.', which is the last element in the origin_list
# + colab={"base_uri": "https://localhost:8080/"} id="syvfOb8MJLCX" outputId="33401bcb-fd57-4213-8258-746be6ea25c8"
print(a)
# + colab={"base_uri": "https://localhost:8080/"} id="zMikObXEJv8o" outputId="14be344a-7e8b-4235-e668-6726d4c9ae3d"
print(sen)
# + id="N9OlZnDbL_uJ"
b = "I do something to go for it to school."
# + colab={"base_uri": "https://localhost:8080/"} id="Dtjnr-1xI-D8" outputId="616b150f-766e-4cab-9c72-4a6764ed6a23"
print(restore(origin2edited(a, b), a))
# + colab={"base_uri": "https://localhost:8080/"} id="ikRFtl1AEfxw" outputId="1f86e345-d03a-4cd4-ce3a-59299c8eafdb"
i1 = re.compile(r'^(\d+)-(\d+):\"(.*?)\"')
#i1 = re.compile(r'(\d+)')
sen = "1-2:\"do something\".3-5:\"go for it\"."
i3 = re.search(i1, sen).group(1)
print(i3)
# + colab={"base_uri": "https://localhost:8080/"} id="Ihy8hE46H-Nx" outputId="a29229dd-3e22-4e3b-8081-5a3e3abf6e78"
pattern = re.compile(r'^(\d+)-(\d+):\"(.*?)\"') # matches 1-2:"do something". (1), (2), (do something)
results = re.search(pattern, sen) # i3 means the content between ""
i1, i2, i3 = results.group(1), results.group(2), results.group(3)
print(i1, i2, i3)
# + colab={"base_uri": "https://localhost:8080/", "height": 270} id="k1E9T6KrIxva" outputId="a456e082-8d8d-456e-8885-8bff19f98743"
print(restore(orgin2edited(a, b), a))
# + id="p2IPbkPPNJHV"
# + id="erxSByCNNJQt"
origin_list = t5_tokenizer.encode_plus(a,return_tensors='pt')['input_ids'].squeeze()
edited_list = t5_tokenizer.encode_plus(b,return_tensors='pt')['input_ids'].squeeze()
print(origin_list)
print(edited_list)
print("-----------")
print(origin_list[2] == edited_list[3])
# for i in range(len(origin_list)):
# print(origin_list[i] == edited_list[i])
| some_examples_1.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GC plot of SCT libs
#
# Line plot of average GC% distribution of each SCT lib
#
# files referenced (and sampled) below were lists of GC prop for each read (tallied using awk one liner)
# +
# %matplotlib inline
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import random
sns.set(style="darkgrid")
sns.set_context("paper")
# -
def reservoir_sample(filename, n=10000):
sample = []
with open(filename) as fh:
for i, line in enumerate(fh):
if i < n:
sample.append(float(line))
else:
j = random.randint(0, n)
if j < n:
sample[j] = float(line)
return sample
def seek_sample(filename, n=10000):
sample = []
with open(filename, 'rb') as fh:
fh.seek(0, 2)
filesize = fh.tell()
random_set = np.sort(np.random.randint(filesize, size=n))
for loc in random_set:
fh.seek(loc)
fh.readline()
sample.append(float(fh.readline()))
return np.array(sample)
# %timeit -n1 a=reservoir_sample("Pb_DARK2_2_ATCACG_L002_R1_001.fastq")
# %timeit -n100 b=seek_sample("Pb_DARK2_2_ATCACG_L002_R1_001.fastq")
d2_2=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_2_ATCACG_L002_R1_001.fastq")
d2_3=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_3_TTAGGC_L002_R1_001.fastq")
d2_6=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_6_CTTGTA_L002_R1_001.fastq")
d2_7=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_7_GATCAG_L002_R1_001.fastq")
d2_8=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_8_TAGCTT_L002_R1_001.fastq")
d1_2=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_dark_2_TAGCTT_L001_R1_001.fastq")
d1_3=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_dark_3_GGCTAC_L001_R1_001.fastq")
d1_5=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_dark_5_CTTGTA_L001_R1_001.fastq")
l1_9=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_light_9_ATGTCA_L001_R1_001.fastq")
l1_10=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_light_10_CCGTCC_L001_R1_001.fastq")
l1_11=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_light_11_GTCCGC_L001_R1_001.fastq")
b1=seek_sample('bulk1_r1_gc.txt')
b2=seek_sample('bulk2_r1_gc.txt')
# +
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import random
sns.set(style="darkgrid")
sns.set_context("paper")
def seek_sample(filename, n=10000):
sample = []
with open(filename, 'rb') as fh:
fh.seek(0, 2)
filesize = fh.tell()
random_set = np.sort(np.random.randint(filesize, size=n))
for loc in random_set:
fh.seek(loc)
fh.readline()
sample.append(float(fh.readline()))
return np.array(sample)
d2_2=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_2_ATCACG_L002_R1_001.fastq")
d2_3=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_3_TTAGGC_L002_R1_001.fastq")
d2_6=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_6_CTTGTA_L002_R1_001.fastq")
d2_7=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_7_GATCAG_L002_R1_001.fastq")
d2_8=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_DARK2_8_TAGCTT_L002_R1_001.fastq")
d1_2=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_dark_2_TAGCTT_L001_R1_001.fastq")
d1_3=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_dark_3_GGCTAC_L001_R1_001.fastq")
d1_5=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_dark_5_CTTGTA_L001_R1_001.fastq")
l1_9=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_light_9_ATGTCA_L001_R1_001.fastq")
l1_10=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_light_10_CCGTCC_L001_R1_001.fastq")
l1_11=seek_sample("/data/sequencing_projects/single_cell_transcriptomes/Pb_light_11_GTCCGC_L001_R1_001.fastq")
b1=seek_sample('bulk1_r1_gc.txt')
b2=seek_sample('bulk2_r1_gc.txt')
#d2_2=parse_dist("Pb_DARK2_2_ATCACG_L002_R1_001.fastq")
fig = plt.figure(figsize=(8,10))
plt.suptitle("Kernel Density Estimates of Read GC Proportion", size=14)
ax1 = fig.add_subplot(421)
ax1.xaxis.set_major_formatter(plt.NullFormatter())
ax2 = fig.add_subplot(422)
ax2.xaxis.set_major_formatter(plt.NullFormatter())
ax2.yaxis.set_major_formatter(plt.NullFormatter())
ax3 = fig.add_subplot(423)
ax4 = fig.add_subplot(424)
ax4.yaxis.set_major_formatter(plt.NullFormatter())
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=0.3)
sns.kdeplot(d1_2, ax=ax1, label='Dark1-2').set(xlim=(0, 1), ylim=(0, 9), ylabel="Density", title="Dark 1 Libraries")
sns.kdeplot(d1_3, ax=ax1, label='Dark1-3').set(xlim=(0, 1), ylim=(0, 9), ylabel="Density", title="Dark 1 Libraries")
sns.kdeplot(d1_5, ax=ax1, label='Dark1-5').set(xlim=(0, 1), ylim=(0, 9), ylabel="Density", title="Dark 1 Libraries")
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(frameon=True)
#for i in [d2_2, d2_3, d2_6, d2_7, d2_8]:
# sns.kdeplot(i, ax=ax2).set(xlim=(0, 1), ylim=(0, 9), title="Dark 2 Libraries")
sns.kdeplot(d2_2, ax=ax2, label='Dark2-2').set(xlim=(0, 1), ylim=(0, 9), title="Dark 2 Libraries")
sns.kdeplot(d2_3, ax=ax2, label='Dark2-3').set(xlim=(0, 1), ylim=(0, 9), title="Dark 2 Libraries")
sns.kdeplot(d2_6, ax=ax2, label='Dark2-6').set(xlim=(0, 1), ylim=(0, 9), title="Dark 2 Libraries")
sns.kdeplot(d2_7, ax=ax2, label='Dark2-7').set(xlim=(0, 1), ylim=(0, 9), title="Dark 2 Libraries")
sns.kdeplot(d2_8, ax=ax2, label='Dark2-8').set(xlim=(0, 1), ylim=(0, 9), title="Dark 2 Libraries")
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(frameon=True)
#for i in [l1_9, l1_10, l1_11]:
# sns.kdeplot(i, ax=ax3).set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Light 1 Libraries")
sns.kdeplot(l1_9, ax=ax3, label="Light1-9").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Light 1 Libraries")
sns.kdeplot(l1_10, ax=ax3, label="Light1-10").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Light 1 Libraries")
sns.kdeplot(l1_11, ax=ax3, label="Light1-11").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Light 1 Libraries")
ax3.legend(frameon=True)
sns.kdeplot(b1, ax=ax4, label="Bulk1").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Trimmed Bulk Libraries")
sns.kdeplot(b2, ax=ax4, label="Bulk2").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Trimmed Bulk Libraries")
ax4.legend(frameon=True)
fig.savefig("raw_lib_gc_prop.svg")
#sns.kdeplot(np.hstack([l1_9, l1_10, l1_11, d1_2, d1_3, d1_5, d2_2, d2_3, d2_6, d2_7, d2_8]), ax=ax4, shade=True).set(ylim=(0, 9), xlim=(0, 1), xlabel="Read Mean GC Proportion", title="All Single Cell Libraries")
###NOTE SCT ARE RAW BULK IS TRIMMED
#plt.savefig("lib_gc_prop.svg")
# -
fig.savefig("raw_lib_gc_prop.svg")
# ### Plot trimmed
#
# +
## TRImmed
def seek_sample2(filename, n=10000):
sample = []
with open(filename, 'rb') as fh:
sample = np.array([float(x) for x in fh.readlines()])
return sample
d2_2=seek_sample2("/data/sequencing_projects/gc_prop/dark2_2_Q30_R1_paired.fastq_gc_prop.txt")
d2_3=seek_sample2("/data/sequencing_projects/gc_prop/dark2_3_Q30_R1_paired.fastq_gc_prop.txt")
d2_6=seek_sample2("/data/sequencing_projects/gc_prop/dark2_6_Q30_R1_paired.fastq_gc_prop.txt")
d2_7=seek_sample2("/data/sequencing_projects/gc_prop/dark2_7_Q30_R1_paired.fastq_gc_prop.txt")
d2_8=seek_sample2("/data/sequencing_projects/gc_prop/dark2_8_Q30_R1_paired.fastq_gc_prop.txt")
d1_2=seek_sample2("/data/sequencing_projects/gc_prop/dark1_2_Q30_R1_paired.fastq_gc_prop.txt")
d1_3=seek_sample2("/data/sequencing_projects/gc_prop/dark1_3_Q30_R1_paired.fastq_gc_prop.txt")
d1_5=seek_sample2("/data/sequencing_projects/gc_prop/dark1_5_Q30_R1_paired.fastq_gc_prop.txt")
l1_9=seek_sample2("/data/sequencing_projects/gc_prop/light1_9_Q30_R1_paired.fastq_gc_prop.txt")
l1_10=seek_sample2("/data/sequencing_projects/gc_prop/light1_10_Q30_R1_paired.fastq_gc_prop.txt")
l1_11=seek_sample2("/data/sequencing_projects/gc_prop/light1_11_Q30_R1_paired.fastq_gc_prop.txt")
# -
d2_2
# +
#d2_2=parse_dist("Pb_DARK2_2_ATCACG_L002_R1_001.fastq")
fig = plt.figure(figsize=(8,10))
plt.suptitle("GC Proportion KDE of Trimmed Read", size=14)
ax1 = fig.add_subplot(421)
ax1.xaxis.set_major_formatter(plt.NullFormatter())
ax2 = fig.add_subplot(422)
ax2.xaxis.set_major_formatter(plt.NullFormatter())
ax2.yaxis.set_major_formatter(plt.NullFormatter())
ax3 = fig.add_subplot(423)
ax4 = fig.add_subplot(424)
fig.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=0.3)
sns.kdeplot(d1_2, ax=ax1, label='Dark1-2').set(xlim=(0, 1), ylim=(0, 9), ylabel="Density", title="Q30 Trimmed Dark 1 Libraries")
sns.kdeplot(d1_3, ax=ax1, label='Dark1-3').set(xlim=(0, 1), ylim=(0, 9), ylabel="Density", title="Q30 Trimmed Dark 1 Libraries")
sns.kdeplot(d1_5, ax=ax1, label='Dark1-5').set(xlim=(0, 1), ylim=(0, 9), ylabel="Density", title="Q30 Trimmed Dark 1 Libraries")
handles, labels = ax1.get_legend_handles_labels()
ax1.legend(frameon=True)
#for i in [d2_2, d2_3, d2_6, d2_7, d2_8]:
# sns.kdeplot(i, ax=ax2).set(xlim=(0, 1), ylim=(0, 9), title="Dark 2 Libraries")
sns.kdeplot(d2_2, ax=ax2, label='Dark2-2').set(xlim=(0, 1), ylim=(0, 9), title="Q30 Trimmed Dark 2 Libraries")
sns.kdeplot(d2_3, ax=ax2, label='Dark2-3').set(xlim=(0, 1), ylim=(0, 9), title="Q30 Trimmed Dark 2 Libraries")
sns.kdeplot(d2_6, ax=ax2, label='Dark2-6').set(xlim=(0, 1), ylim=(0, 9), title="Q30 Trimmed Dark 2 Libraries")
sns.kdeplot(d2_7, ax=ax2, label='Dark2-7').set(xlim=(0, 1), ylim=(0, 9), title="Q30 Trimmed Dark 2 Libraries")
sns.kdeplot(d2_8, ax=ax2, label='Dark2-8').set(xlim=(0, 1), ylim=(0, 9), title="Q30 Trimmed Dark 2 Libraries")
handles, labels = ax2.get_legend_handles_labels()
ax2.legend(frameon=True)
#for i in [l1_9, l1_10, l1_11]:
# sns.kdeplot(i, ax=ax3).set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Light 1 Libraries")
sns.kdeplot(l1_9, ax=ax3, label="Light1-9").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Q30 Trimmed Light 1 Libraries")
sns.kdeplot(l1_10, ax=ax3, label="Light1-10").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Q30 Trimmed Light 1 Libraries")
sns.kdeplot(l1_11, ax=ax3, label="Light1-11").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Q30 Trimmed Light 1 Libraries")
ax3.legend(frameon=True)
sns.kdeplot(b1, ax=ax4, label="Bulk1").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Trimmed Bulk Libraries")
sns.kdeplot(b2, ax=ax4, label="Bulk2").set(xlim=(0, 1), ylim=(0, 9), xlabel="Read Mean GC Proportion", ylabel="Density", title="Trimmed Bulk Libraries")
ax4.legend(frameon=True)
#sns.kdeplot(np.hstack([l1_9, l1_10, l1_11, d1_2, d1_3, d1_5, d2_2, d2_3, d2_6, d2_7, d2_8]), ax=ax4, shade=True).set(ylim=(0, 9), xlim=(0, 1), xlabel="Read Mean GC Proportion", title="All Single Cell Libraries")
###NOTE SCT ARE RAW BULK IS TRIMMED
plt.savefig("lib_gc_prop_q30_trimmed.svg")
# -
# !ls
# +
## Kodama GC Prop
d54=seek_sample2("/data/kodama/DRR003754_1.fastq.bz2_gc.txt")
d55=seek_sample2("/data/kodama/DRR003755_1.fastq.bz2_gc.txt")
d56=seek_sample2("/data/kodama/DRR003756_1.fastq.bz2_gc.txt")
d57=seek_sample2("/data/kodama/DRR003757_1.fastq.bz2_gc.txt")
d58=seek_sample2("/data/kodama/DRR003758_1.fastq.bz2_gc.txt")
d59=seek_sample2("/data/kodama/DRR003759_1.fastq.bz2_gc.txt")
fig = plt.figure(figsize=(8,10))
plt.suptitle("GC Proportion KDE of Kodama Read", size=14)
ax1 = fig.add_subplot(421)
for i, j in [(d54, "DRR003754"), (d55, "DRR003755"), (d56,"DRR003756"), (d57,"DRR003757"), (d58, "DRR003758"), (d59, "DRR003759")]:
sns.kdeplot(i, ax=ax1, label=j).set(xlim=(0, 1), ylim=(0, 9), title="Raw Kodama Bulk Transcriptomes", ylabel="Density", xlabel="Read Mean GC Proportion")
fig.savefig("kodama_gc_prop.svg")
# -
# ## Conclusions
#
# So the libraries which actually assemble all lack this GC rich peak or have a relatively higher 40-50 GC peak which is where we'd expect
| chapters/4.Chapter_2/figures/plot_notebooks/GC library plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.0 64-bit (''harmonizome'': venv)'
# name: python_defaultSpec_1593410149462
# ---
# # Harmonizome ETL: The Human Protein Atlas (THPA) - RNA-Seq
# Created by: <NAME> <br>
# Credit to: <NAME>
#
# Data Source: http://www.proteinatlas.org/about/download
# appyter init
from appyter import magic
magic.init(lambda _=globals: _())
# +
import sys
import os
from datetime import date
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import harmonizome.utility_functions as uf
import harmonizome.lookup as lookup
# -
# %load_ext autoreload
# %autoreload 2
# ### Notebook Information
print('This notebook was run on:', date.today(), '\nPython version:', sys.version)
# # Initialization
# +
# %%appyter hide_code
{% do SectionField(
name='data',
title='Upload Data',
img='load_icon.png'
) %}
{% do SectionField(
name='settings',
title='Settings',
img='setting_icon.png'
) %}
# +
# %%appyter code_eval
{% do DescriptionField(
name='description',
text='The examples below were sourced from <a href="http://www.proteinatlas.org/about/download" target="_blank">www.proteinatlas.org</a>. If clicking on the examples does not work, they should be downloaded directly from the source. The first example provided below is for Tissue Gene data, while the second is for Cell Line data.',
section='data'
) %}
{% set matrix_file = FileField(
constraint='.*\.zip$',
name='expression_matrix',
label='Expression Matrix (tsv.zip)',
default='rna_tissue_consensus.tsv.zip',
examples={
'rna_tissue_consensus.tsv.zip': 'https://www.proteinatlas.org/download/rna_tissue_consensus.tsv.zip',
'rna_celline.tsv.zip': 'https://www.proteinatlas.org/download/rna_celline.tsv.zip'
},
section='data'
) %}
# +
# %%appyter code_eval
{% set group = ChoiceField(
name='dataset',
label='Dataset',
choices={
'RNA Consensus Tissue Gene Data': 'Tissue',
'RNA HPA Cell Line Gene Data': 'Cell line'
},
default='RNA Consensus Tissue Gene Data',
section='settings'
) %}
# -
# ### Load Mapping Dictionaries
symbol_lookup, geneid_lookup = lookup.get_lookups()
# ### Output Path
# +
# %%appyter code_exec
output_name = 'thpa-rnaseq-{{group}}'.split(' ')[0].lower()
path = 'Output/THPA-RNASeq-{{group}}'.split(' ')[0]
if not os.path.exists(path):
os.makedirs(path)
# -
# # Load Data
# +
# %%appyter code_exec
matrix = pd.read_csv(
{{matrix_file}},
sep='\t', usecols=['Gene name', '{{group}}', 'NX']
)
# -
matrix.head()
matrix.shape
# # Pre-process Data
# ## Merge Duplicates
# %%appyter code_exec
matrix = matrix.groupby(['Gene name', '{{group}}']).mean()
# ## Unstack to Create Matrix
matrix = matrix.unstack()
matrix.head()
matrix.columns = matrix.columns.droplevel()
matrix.index.name = 'Gene Symbol'
matrix.head()
# ## Save Unfiltered Matrix to file
uf.save_data(matrix, path, output_name + '_matrix_unfiltered',
compression='gzip', dtype=np.float32)
# # Filter Data
# ## Map Gene Symbols to Up-to-date Approved Gene Symbols
matrix = uf.map_symbols(matrix, symbol_lookup)
matrix.shape
# ## Merge Duplicate Genes By Rows and Duplicate Columns
matrix = uf.merge(matrix, 'row')
matrix = uf.merge(matrix, 'column')
matrix.shape
# ## Remove Data that is More Than 95% Missing and Impute Missing Data
matrix = uf.remove_impute(matrix)
matrix.head()
matrix.shape
# ## Log2 Transform
matrix = uf.log2(matrix)
matrix.head()
# ## Normalize Matrix (Quantile Normalize the Matrix by Column)
matrix = uf.quantile_normalize(matrix)
matrix.head()
# ## Normalize Matrix (Z-Score the Rows)
matrix = uf.zscore(matrix)
matrix.head()
# ## Histogram of First Sample
matrix.iloc[:, 0].hist(bins=100)
# ## Histogram of First Gene
matrix.iloc[0, :].hist(bins=100)
# ## Save Filtered Matrix
uf.save_data(matrix, path, output_name + '_matrix_filtered',
ext='tsv', compression='gzip')
# # Analyze Data
# ## Create Gene List
gene_list = uf.gene_list(matrix, geneid_lookup)
gene_list.head()
gene_list.shape
uf.save_data(gene_list, path, output_name + '_gene_list',
ext='tsv', compression='gzip', index=False)
# ## Create Attribute List
attribute_list = uf.attribute_list(matrix)
attribute_list.head()
attribute_list.shape
uf.save_data(attribute_list, path, output_name + '_attribute_list',
ext='tsv', compression='gzip')
# ## Create matrix of Standardized values (values between -1, and 1)
standard_matrix = uf.standardized_matrix(matrix)
standard_matrix.head()
uf.save_data(standard_matrix, path, output_name + '_standard_matrix',
ext='tsv', compression='gzip')
# ## Plot of A Single Celltype, Normalized Value vs. Standardized Value
plt.plot(matrix[matrix.columns[0]],
standard_matrix[standard_matrix.columns[0]], 'bo')
plt.xlabel('Normalized Values')
plt.ylabel('Standardized Values')
plt.title(standard_matrix.columns[0])
plt.grid(True)
# ## Create Ternary Matrix
ternary_matrix = uf.ternary_matrix(standard_matrix)
ternary_matrix.head()
uf.save_data(ternary_matrix, path, output_name + '_ternary_matrix',
ext='tsv', compression='gzip')
# ## Create Gene and Attribute Set Libraries
uf.save_setlib(ternary_matrix, 'gene', 'up', path, output_name + '_gene_up_set')
uf.save_setlib(ternary_matrix, 'gene', 'down', path, output_name + '_gene_down_set')
uf.save_setlib(ternary_matrix, 'attribute', 'up', path,
output_name + '_attribute_up_set')
uf.save_setlib(ternary_matrix, 'attribute', 'down', path,
output_name + '_attribute_down_set')
# ## Create Attribute Similarity Matrix
attribute_similarity_matrix = uf.similarity_matrix(standard_matrix.T, 'cosine')
attribute_similarity_matrix.head()
uf.save_data(attribute_similarity_matrix, path,
output_name + '_attribute_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
# ## Create Gene Similarity Matrix
gene_similarity_matrix = uf.similarity_matrix(standard_matrix, 'cosine')
gene_similarity_matrix.head()
uf.save_data(gene_similarity_matrix, path,
output_name + '_gene_similarity_matrix',
compression='npz', symmetric=True, dtype=np.float32)
# ## Create Gene-Attribute Edge List
edge_list = uf.edge_list(standard_matrix)
uf.save_data(edge_list, path, output_name + '_edge_list',
ext='tsv', compression='gzip')
# # Create Downloadable Save File
uf.archive(path)
# ### Link to download output files: [click here](./output_archive.zip)
| appyters/THPA_RNA_Seq_Harmonizome_ETL/THPA (RNA-Seq).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
amplifiers = np.genfromtxt('amplifiers_0.csv',delimiter=',').astype(int)
print(amplifiers)
normals = 1-amplifiers
print(normals)
weights_biased = np.atleast_2d(np.genfromtxt('weights-biased_0.csv', delimiter=','))
weights_unbiased = np.atleast_2d(np.genfromtxt('weights-unbiased_0.csv', delimiter=','))
condorcet_biased = np.atleast_2d(np.genfromtxt('condorcet-biased_0.csv', delimiter=','))
unanimity_biased = np.atleast_2d(np.genfromtxt('unanimity_0.csv', delimiter=','))
print(weights_biased[:,amplifiers.astype(bool)].mean(axis=0))
print(weights_biased[:,normals.astype(bool)].mean(axis=0))
print(weights_unbiased[:,amplifiers.astype(bool)].mean(axis=0))
print(weights_unbiased[:,normals.astype(bool)].mean(axis=0))
print(condorcet_biased[:,amplifiers.astype(bool)].mean(axis=0))
print(condorcet_biased[:,normals.astype(bool)].mean(axis=0))
print(unanimity_biased[:,amplifiers.astype(bool)].mean(axis=0))
print(unanimity_biased[:,normals.astype(bool)].mean(axis=0))
# +
n_experiments = 200
all_weights_ub_amp_means = []
all_weights_ub_namp_means = []
all_weights_amp_means = []
all_weights_namp_means = []
all_condorcet_amp_means = []
all_condorcet_namp_means = []
all_unanimity_amp_means = []
all_unanimity_namp_means = []
for i in range(n_experiments):
amplifiers = np.genfromtxt('amplifiers_%d.csv' %i ,delimiter=',').astype(int)
normals = 1-amplifiers
if(sum(amplifiers) == 0 or sum(normals) == 0):
continue
weights_biased = np.atleast_2d(np.genfromtxt('weights-biased_%d.csv' % i, delimiter=','))
#print("----- WEIGHTS ")
#print("----- amplifiers")
#print(weights_biased[:,amplifiers.astype(bool)])
#print(weights_biased[:,amplifiers.astype(bool)].mean())
all_weights_amp_means += [weights_biased[:,amplifiers.astype(bool)].mean()]
#print("----- non amplifiers")
#print(weights_biased[:,normals.astype(bool)])
#print(weights_biased[:,normals.astype(bool)].mean())
all_weights_namp_means += [weights_biased[:,normals.astype(bool)].mean()]
weights_unbiased = np.atleast_2d(np.genfromtxt('weights-unbiased_%d.csv' % i, delimiter=','))
all_weights_ub_amp_means += [weights_unbiased[:,amplifiers.astype(bool)].mean()]
all_weights_ub_namp_means += [weights_unbiased[:,normals.astype(bool)].mean()]
condorcet_biased = np.atleast_2d(np.genfromtxt('condorcet-biased_%d.csv' % i, delimiter=','))
#print("----- CONDORCET ")
#print("----- amplifiers")
#print(condorcet_biased[:,amplifiers.astype(bool)])
#print(condorcet_biased[:,amplifiers.astype(bool)].mean())
all_condorcet_amp_means += [condorcet_biased[:,amplifiers.astype(bool)].mean()]
#print("----- non amplifiers")
#print(condorcet_biased[:,normals.astype(bool)])
#print(condorcet_biased[:,normals.astype(bool)].mean())
all_condorcet_namp_means += [condorcet_biased[:,normals.astype(bool)].mean()]
unanimity_biased = np.atleast_2d(np.genfromtxt('unanimity_%d.csv' % i, delimiter=','))
all_unanimity_amp_means += [unanimity_biased[:,amplifiers.astype(bool)].mean()]
all_unanimity_namp_means += [unanimity_biased[:,normals.astype(bool)].mean()]
# -
all_weights_amp_means
all_weights_namp_means
all_condorcet_amp_means
all_condorcet_namp_means
# +
# Credit: <NAME>
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from collections import namedtuple
n_groups = 5
means_men = (20, 35, 30, 35, 27)
std_men = (2, 3, 4, 1, 2)
means_women = (25, 32, 34, 20, 25)
std_women = (3, 5, 2, 3, 3)
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = ax.bar(index, means_men, bar_width,
alpha=opacity, color='b',
yerr=std_men, error_kw=error_config,
label='Men')
rects2 = ax.bar(index + bar_width, means_women, bar_width,
alpha=opacity, color='r',
yerr=std_women, error_kw=error_config,
label='Women')
ax.set_xlabel('Group')
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(('A', 'B', 'C', 'D', 'E'))
ax.legend()
fig.tight_layout()
plt.show()
# +
# Credit: <NAME>
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from collections import namedtuple
n_groups = 4 # weighted and condorcet
# within each group amplifiers and non-amplifiers correspond to men and women
all_weights_ub_amp_means = np.array(all_weights_ub_amp_means)
all_weights_ub_namp_means = np.array(all_weights_ub_namp_means)
all_weights_amp_means = np.array(all_weights_amp_means)
all_weights_namp_means = np.array(all_weights_namp_means)
all_condorcet_amp_means = np.array(all_condorcet_amp_means)
all_condorcet_namp_means = np.array(all_condorcet_namp_means)
all_unanimity_amp_means = np.array(all_unanimity_amp_means)
all_unanimity_namp_means = np.array(all_unanimity_namp_means)
means_amp = (all_weights_ub_amp_means.mean(), all_weights_amp_means.mean(), all_condorcet_amp_means.mean(), all_unanimity_amp_means.mean())
std_amp = (all_weights_ub_amp_means.std(), all_weights_amp_means.std(), all_condorcet_amp_means.std(), all_unanimity_amp_means.std())
means_namp = (all_weights_ub_namp_means.mean(), all_weights_namp_means.mean(), all_condorcet_namp_means.mean(), all_unanimity_namp_means.mean())
std_namp = (all_weights_ub_namp_means.std(), all_weights_namp_means.std(), all_condorcet_namp_means.std(), all_unanimity_namp_means.std())
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
opacity = 0.4
error_config = {'ecolor': '0.3'}
rects1 = ax.bar(index, means_amp, bar_width,
alpha=opacity, color='b',
yerr=std_amp, error_kw=error_config,
label='Amplifiers')
rects2 = ax.bar(index + bar_width, means_namp, bar_width,
alpha=opacity, color='r',
yerr=std_namp, error_kw=error_config,
label='Non-amplifiers')
ax.set_xlabel('Group')
ax.set_ylabel('Satisfaction degrees')
ax.set_title('Satisfaction degrees by voting function and group')
ax.set_xticks(index + bar_width / 2)
ax.set_xticklabels(('WCSP Unbiased', 'WCSP Biased', 'Condorcet', 'Pareto / Unanimity'))
ax.legend()
fig.tight_layout()
plt.savefig("lunch-selection-comparison.pdf")
plt.show()
# -
all_weights_amp_means
means_amp
means_namp
means_amp
| evaluation/minibrass-voting-experiment/evaluation/archive/lunch-200-0,75/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Feature Engineering, Scaling, and Cross Validation
#
# ## Feature Engineering
#
# ### Dealing with Categorical Features
#
# Every machine learning process starts with a data set from which you wish to extract information. Feature engineering lies at the beginning of this process. It deals with cleaning data and the extraction or creation of features from the data set in order to facilitate the prediction of a response. The tools for data cleaning we discussed in the first three chapters. Thankfully, the data sets we use in this course are mostly already cleansed but you should be aware that in practice this is what consumes a significant amount of time. As for the feature extraction part, we saw an example of it in the chapter on KNN where we downloaded share prices and "generated" features (i.e. lagged returns) out of it. In that setup all of the feature values were of numerical kind. In the chapters on logistic regression, LDA and QDA we worked with the 'Default' data set. This set had a **binary categorical feature** (e.g. student: yes/no). In order to work with these string values we simply used Pandas' `factorize()` function. Here's the code we applied:
#
# `# Factorize 'No' and 'Yes' in columns 'default' and 'student'
# df['defaultFac'] = df.default.factorize()[0]
# df['studentFac'] = df.student.factorize()[0]`
#
# For the case of binary categories this works perfectly fine. But what if the number of categories is greater than two? How should we deal with it? To illustrate this, imagine the following sample data set:
import pandas as pd
data = [{'price': 1390000, 'rooms': 3.5, 'dist': 5, 'fab': 'bricks'},
{'price': 1300000, 'rooms': 4.5, 'dist': 2, 'fab': 'concrete'},
{'price': 840000, 'rooms': 3.5, 'dist': 12, 'fab': 'wood'},
{'price': 1400000, 'rooms': 5.5, 'dist': 9, 'fab': 'concrete'}]
data = pd.DataFrame(data)
data
# How do we deal with the 'fab' column? Easy, you might think: we just use a numerical mapping, for example 0 = bricks, 1 = concrete, 2 = wood. This is precisely what the output of `pd.factorize()` would be.
pd.factorize(data['fab'])
# Well, if we would follow through with this approach and feed these values into a Scikit-learn ML function, the model would make the fundamental assumption that bricks > concrete > wood. Furthermore, we have numerical values for `dist` (the districts of Zurich) where we know precisely that these represent categorical values (2 = Wollishofen, 5 = Industrie, 9 = Altstetten, 12 = Schwamendingen). And this - geographic or demographic jokes aside - would not make much sense (VanderPlas (2016)). Houston we've got a problem!
#
# >**NOTE:**
# * **In the binary case with $X_i \in \{0, 1\}$ ordering is not an issue in scikit-learn.**
# * **Ordering is relevant for features. Class labels for response $y$ are not ordinal for the ML algorithm we discuss in this course. Therefore it doesn't matter what number we assign them, and factorization is still valid (e.g. $y \in \{\text{Product X}=0, \text{Product Y}=1, \text{Product Z}=2\}$.**
#
#
#
# As soon as we have more than 2 categories, factorizing is in most cases no longer the appropriate solution. Instead, we should make use of a method called **one-hot encoding**, which effectively creates extra columns with dummy variables (booleans) indicating the presence of a category with a value of 1 or 0, respectively. There are numerous ways of encoding categorical values. In this chapter we will briefly touch upon Pandas' and Scikit-Learn's tools. For those looking for a thorough tutorial please refer to [<NAME>'s (2017) *Guide to Encoding Categorical Values in Python *](http://pbpython.com/categorical-encoding.html).
#
#
# #### get_dummies()
#
# We start with Pandas' `pd.get_dummies()` function, which works of course seamlessly with `DataFrames` and is truly easy to work with. The two caveats it has is that (a) if a column contains numbers, it will not transform them into categorical values and (b) this approach does not work in scikit-learn piepelines (Pipelines are a ML workflow management tool that we will discuss in more detail in later chapters.). Regarding (a) here's what is meant:
pd.get_dummies(data)
# Only the column 'fab', which contained strings, was converted. To make `get_dummies()` bend to our will, we have to convert the district values into strings first. Then we can apply the conversion.
data['dist'] = data['dist'].astype(str)
pd.get_dummies(data)
# #### LabelEncoder()
#
# The alternatives in Scikit-learn are called `LabelEncoder` (equivalent to `pd.facorize()`) and `OneHotEncoder()` (similar to `pd.get_dummies()`). Their setup is a bit more abstract and cumbersome, but it also has advantages compared to Pandas' solution. For example the `OneHotEncoder()` can be used in Scikit-learn pipelines and it returns a sparse matrix which is highly efficient computationally. But for the sake of brefity we will not go into too much detail here but simply show for reference how these Scikit-learn functions are applied. Ultimately it is at this stage a question of preference what functions you want to use for your task. Important is just that this step is done at the very beginning of the process - even before a train-test split is applied. That way you make sure the mapping is the same for both train and test split.
# +
from sklearn import preprocessing as pp
# Factorize 'fab' column (similar to pd.factorize())
le = pp.LabelEncoder()
data_le = le.fit_transform(data['fab'])
data_le
# -
# To convert the integer class labels back into their original representation we can simply use the `.inverse_transform()` method.
le.inverse_transform(data_le)
# #### OneHotEncoder
#
# The corresponding Scikit-learn function to Panda's `get_dummies()` is called `OneHotEncoder` from the preprocessing sublibrary.
# +
# Select categorical columns
X_cat = data[['dist', 'fab']]
# One-hot-encoding of said columns
ohe = pp.OneHotEncoder(sparse=True)
ohe.fit_transform(X_cat)
# -
print(pd.DataFrame(ohe.fit_transform(X_cat).toarray()))
ohe.categories_
# The output here is a sparse matrix. Computationally and from a data storage perspective this is highly efficient because the matrix contains only integers as entries. However, the print-out of this sparse matrix is less indicative. All column labels are empty. To make sense of it we have to understand the setup of this sparse matrix. Each column is a binary representation of a value. The first column indicates which of the rows had a `dist=12` (only row index 2), second `dist=2` (only row index 1), third `dist=5`, fourth `dist=9`, fifth `fab=bricks` etc.
#
# Beyond the convoluted output a further problem of the above approach is that this still leaves us with the task to combine numeric ('price', 'rooms') and categorical output (the aforementioned sparse matrix) into one feature matrix X. To simplify this task, there is a wrapper function `ColumnTransformer` that facilitates this in a simple way.
# +
from sklearn.compose import ColumnTransformer
# Intatiate OneHotEncoder
ohe = pp.OneHotEncoder()
# create list of columns that should be transformed,
# i.e. columns 2 and 3 are categorical and should be
# one-hot-encoded
trnsfrms_list = [('cat', ohe, [2, 3])]
# Make use of the ColumnTransformer function,
# fit/transpose df and display output
trnsfrms = ColumnTransformer(transformers=trnsfrms_list, remainder='passthrough')
X_ohe = trnsfrms.fit_transform(data)
X_ohe
# -
# Here's a short overview of the discussed functions and how they relate to each other:
#
# |No. of Categories | Applicable Functions |
# |:----------------:|:---------------------|
# | 2 | `pd.factorize(), sklearn.preprocessing.LabelEncoder()` |
# | > 2 | `pd.get_dummies(), sklearn.preprocessing.OneHotEncoder()`|
# > Incorrect class labeling is one of the more common mistake made in machine learning. The crux of the matter is that even if we forget to One-Hot-Encode our features, ML algorithms might still yield good results. Yet, as we know now, such results would be flawed. Therefore it is important to correctly preprocess the data set before applying any ML algorithms.
# ## Partitioning a Data Set Into Train, Test Sets
#
# In section 4.5 of the script we have briefly touched upon the importance of randomly splitting our data into a training set (to train/calibrate our ML algorithms) and a test set (to evaluate the accuracy of our model). As [Pedregosa et al. (2011) put it in the scikit-learn documentation](http://scikit-learn.org/stable/modules/cross_validation.html): "*Learning the parameters of a prediction function and testing it on the same data is a methodological mistake: a model that would just repeat the labels of the samples that it has just seen would have a perfect score but would fail to predict anything useful on yet-unseen data. This situation is called overfitting. To avoid it, it is common practice when performing a (supervised) machine learning experiment to hold out part of the available data as a test set `X_test, y_test`.*" The split ratio is somewhat arbitrary but literature suggests a range between 60:40 (train:test) and 80:20 for smaller samples and 90:10 to 99:1 for large sets (several thousands of observations) (Raschka (2015)).
#
# In its `model_selection` submodule scikit-learn offers a convenient function called `train_test_split` that randomly splits a data set into separate train and holdout sets. To show how this function is applied we first load the publicly available 'adult' data set. It includes 14 features from the 1994 US census that measure an individual's characteristics. The response vector is `income`. The prediction task is to determine whether a person earns over 50K a year. For more information see [the data description](https://archive.ics.uci.edu/ml/datasets/adult). This is of course a fairly simple data set but will do for our purposes. Nevertheless, in reality related tasks such as this are fairly common for credit card companies, leasing firms or banks that need to verify a customer's application for a credit (card), leasing, loan etc. (One could design this either as regression task or classification task with an adequate number of classes).
#
# Before we apply the `train_test_split` function we convert categorical columns in a first step - as learned above.
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
df = pd.read_csv('Data/adult.csv', sep=',')
df.head(3)
# The first six columns are numeric values. The remaining columns are categorical information and thus need to be converted.
# +
# Get column names
cols = df.columns.values[6:]
# Factorize 'sex' and 'income' column (both binary)
df[cols[-2:]] = df[cols[-2:]].apply(lambda x: pd.factorize(x)[0])
df.head(3)
# +
# Assign response to y
y = df[cols[-1]]
# Factorize categorical values, assign output to X
X = pd.get_dummies(df.iloc[:, :-1])
X.head()
# -
X.shape
# Now we are ready to perform the train-test split. For this example we use a test set size of 30%. The parameter `random_state=0` fixes the random split in a way such that results are reproducible.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, stratify=y)
# A stratified sample is one that maintains the proportion of values as in the original data set. If, for example, the response vector $y$ is a binary categorical variable with 25% zeros and 75% ones, `stratify=y` ensures that the random splits have 25% zeros and 75% ones too. Note that `stratify=y` does not mean `stratify=yes` but rather tells the function to take the categorical proportions from response vector `y`.
# ## Feature Scaling
#
# Feature scaling is a crucial step in preparing data for ML applications. While some of the algorithms are invariant to the feature's scale (e.g. decision trees and random forests, which we will discuss in the next chapter), the majority of machine learning and optimization algorithms perform much better if features are scaled. The reasons are two-fold: for one, most ML algorithm have optimization functions to find the optimal coefficients/hyperparameter and these functions work more efficient on scaled values; for another, algorithms such as KNN, which use a distance measure, will put much more weight on features that have a larger scale than others (Raschka (2015), Müller and Guido (2017)). A good example for the latter is a data set that contains 'age' as well as 'income' variables. For a KNN model a difference in $1'000 salary is enormous compared to a difference of 10 years in age. Thus such a model would be driven by 'income' - and this would be contrary to our intuition (James et al. (2013)).
#
# There are two common approaches to bringing different features onto the same scale: **normalization and standardization**. Unfortunately these terms are often used quite loosely in different fields so that the meaning has to be derived from the context they are mentioned (Raschka (2015)).
#
#
# ### Normalization
#
# In general, normalization refers to the process of rescaling the features to a range of $[0, 1]$. This can be viewed as a special case of min-max scaling. In our context, we normalize a feature column $X_i$ by the following convention:
#
# $$\begin{equation}
# X_{i}^{\text{norm}} = \frac{X_i - \min(X_i)}{\max(X_i) - \min(X_i)}
# \end{equation}$$
#
# With Scikit-learn this is applied as follows:
# +
from sklearn.preprocessing import MinMaxScaler
# Get cols to scale
cols_scl = X.columns.values[:6]
# Apply MinMaxScaler on continuous columns only
mms = MinMaxScaler()
X_train_norm = mms.fit_transform(X_train[cols_scl]) # fit & transform
X_test_norm = mms.transform(X_test[cols_scl]) # ONLY transform
# -
# It is important to point out that we fit the `MinMaxScaler` **only once on the training data**, not the test data. On the test data we only run the `.transform()` method. The parameters we get out of the training set (i.e. `mms.data_min_` and `mms.data_max_`) are then used to transform any new data (such as the holdout set).
#
# > **Why aren't we applying the scaling on the full data set, before we split the data into train and test set? This would not be a good idea because of one particular reason: we would "contaminate" our model with information about the test set. If we were to do it we run the risk of having a very good model for our train AND test data but this model would perform poorly on new data that previously was not part of any train or test set. The effect of such an approach would be poor generalization. By the way, the same is true for feature selection. This too should not be done on the full set but only on a training set.**
#
#
# ### Standardization
#
# When we need values in a bounded interval, normalization via min-max scaling is a useful technique. Yet for many machine learning algorithms standardization is a more practical approach. The rationale behind it is that most linear models (such as logistic regression, LDA or support vector machines) initialize coefficients/parameters/weights to small random values close to 0. By standardizing our features, we center the values at mean 0 with standard deviation of 1. This makes the computations easier to learn the coefficients/parameters/weights. Beyond that, standardization does not distort useful statistical information about outliers and with that makes the algorithm less sensitive to them in contrast to min-max scaling (Raschka (2015)). We express the process of standarizing a feature column by the following equation:
#
# $$\begin{equation}
# X_{i}^{\text{std}} = \frac{X_i - \bar{X}_i}{\sigma_{X_i}}
# \end{equation}$$
#
# Here we follow the common notation that $\bar{X}_i$ is the mean of vector $X_i$ and $\sigma_{X_i}$ the vector's standard deviation.
# +
from sklearn.preprocessing import StandardScaler
# Apply StandardScaler on continuous columns only
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train[cols_scl]) # fit & transform
X_test_std = stdsc.transform(X_test[cols_scl]) # ONLY transform
# -
# Again we fit the `StandardScaler()` only on the train set. The parameters of this first step (`stdsc.mean_, stdsc.var_`) are then applied to the test set with the code `stdsc.transform(X_test)`. [Note that the variance in `StandardScaler()` is based on the population variance](https://stackoverflow.com/questions/44220290/sklearn-standardscaler-result-different-to-manual-result) (division by $n$; degree of freedom of 0, biased estimator). This explains differences if you compare `stdsc.var_` with `X_train.var()` for small data sets. Use `X_train.var(ddof=1)` instead to get the same result as with the `StandardScaler`.
# ### Other Scaling Methods
#
# For our purposes the above introduced two scaling methods are sufficient. However, this is not to say others are not helpful and unnecessary. Scikit-learn of course offers many more scaling procedures. For an overview see [Scikit-learn's guide on the topic](http://scikit-learn.org/stable/modules/preprocessing.html). For those who like to see the effects of the different scaling methods in 2D plots, see Scikit-learn's tutorial ["Compare the effect of different scalers on data with outliers"](http://scikit-learn.org/stable/auto_examples/preprocessing/plot_all_scaling.html#sphx-glr-auto-examples-preprocessing-plot-all-scaling-py). Finally, <NAME> published a text ["About Feature Scaling and Normalization"](http://sebastianraschka.com/Articles/2014_about_feature_scaling.html) that not only discusses the different scaling methods but also shows the positive effect scaling can have on ML predictions/scores. Please give it a read to understand the importance of scaling.
# ## Cross Validation
#
# ### Model Evaluation
#
# To evaluate our models, we have discussed the necessity to split our data set into training and test set. For this we introduced the `train_test_split()` function. A ML algorithm is calibrated on the training set and then, to see how well our model generalizes to new, previously unseen data, applied to the test data. In this section we expand on aspects of evaluating a model by discussing cross validation (CV). This is a statistical method of performance evaluation that is more stable and thorough than a simple split into training and test set (Müller and Guido (2017)).
#
#
# #### $k$-Fold Cross Validation
#
# In $k$-fold CV we randomly split the training data set into $k$ folds without replacement, where $k − 1$
# folds are used for the model training and the remaining fold is used for testing. This procedure is repeated $k$ times so that we obtain $k$ performance estimates. The performance of a ML algorithm is then simply the average of the performance for the $k$ different, individual folds.
#
# * **What is the benefit of $k$-Fold Cross Validation?** Performance estimates are less sensitive to the subpartitioning of the training data compared to a simple train/test split (Raschka (2015)). By applying it, we receive a more stable indication of a model's generalization performance.
# * **How many folds $k$ should we use?** The number of folds $k$ is usually 5 (large data sets) or 10 (small data sets) as suggested by Breiman and Spector (1992) or Kohavi (1995).
#
# In general, CV is best used in combination with hyperparameter tuning. Raschka (2015, p. 175) writes:
#
# > **"Typically, we use k-fold cross-validation for model tuning, that is, finding the optimal hyperparameter values that yield a satisfying generalization performance. Once we have found satisfactory hyperparameter values, we can retrain the model on the complete training set and obtain a final performance estimate using the independent
# test set."**
#
# The following figure displays the concept of $k$-fold CV with $k=5$. The training data set is divided into 5
# folds. For each iteration 4 folds are used for training and 1 fold for model evaluation. The estimated performance $E$, this could be classification accuracy, an error rate etc., is calculated on the basis of the five performances $E_i$ (one for each iteration).
# <img src="Graphics/0209_kFoldCV.png" alt="kFoldCV" style="width: 1000px;"/>
# > **Despite the fact that many text books are applying CV on the full data set, students should be aware that in order to be truly consistent, $k$-fold CV should essentially be applied to the training data only. Similar to our discussion on feature scaling, the argument is again that if we use the full available data set, our results are somewhat contaminated or distorted.**
#
# Below we show how to use Scikit-learn to perform $k$-fold CV. The data is our 'adult' set from before and the algorithm we apply is logistic regression. Note here that we will use Scikit-learn's implementation of logistic regression (see [here](https://scikit-learn.org/stable/whats_new/v0.21.html#known-major-bugs) for the `max_iter` parameter). We start with the `StratifiedKFold()` function. This is a slight variation of the standard CV introduced above. Stratifying in this context means that proportions between classes are the same in each fold. To evaluate classifiers this `StratifiedKFold()` function is usually preferred (over the simple `KFold()` function) as it preserves the class proportions and thus results in more reliable estimates of generalization performance.
# +
# Import necessary functions
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.linear_model import LogisticRegression
# Create k-Fold CV and LogReg object
kFold = StratifiedKFold(n_splits=5)
logReg = LogisticRegression(max_iter=1000)
# Run CV and print results
scores = cross_val_score(logReg, X_train_std, y_train, cv=kFold)
print(scores)
print('CV accuracy on train set: {0: .3f} +/- {1: .3f}'.format(np.mean(scores), np.std(scores)))
# -
# As a comparison we can also run a test on the unscaled data set `X_train`. The results are indeed worse as literature suggests.
# Run CV on unscaled values and print results
scores = cross_val_score(logReg, X_train, y_train, cv=kFold)
print('CV accuracy on train set: {0: .3f} +/- {1: .3f}'.format(np.mean(scores), np.std(scores)))
# By default, `cross_val_score` returns the accuracy (or score) of the model. Recall that this is simply the number of correctly classified samples. If we instead wish to have another output, we do this by providing a `scoring` parameter. Below is an example to call for `scoring='roc_auc'`, which will provide the ROC's area under the curve. The full list of available scoring parameter [can be found here](http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter).
scores = cross_val_score(logReg, X_train_std, y_train, cv=kFold, scoring='roc_auc')
print('CV AUC on train set: {0: .3f} +/- {1: .3f}'.format(np.mean(scores), np.std(scores)))
# Noteworthy is one last function: `cross_validate`. This allows us to define a list of measures that we can pass on to the function as scoring parameter. The output is a dictionary with `fit_time` and `score_time` (time elapsed to fit model/calculate scores), `test_accuracy` and `train_accuracy` (accuracy for the test set and training set, the output on the latter though only if `return_train_score` is manually set to `True`), etc.
# +
from sklearn.model_selection import cross_validate
# Calculate return
measures = ['accuracy', 'recall', 'roc_auc']
scores = cross_validate(logReg, X_train_std, y_train, cv=kFold,
scoring=measures, return_train_score=True, n_jobs=2)
scores
# -
print('Train set accuracy (CV=5): ', scores['train_accuracy'].mean())
print('Validation set scores (CV=5): ', scores['test_accuracy'].mean())
print('Test set accuracy: ', logReg.fit(X_test_std, y_test).score(X_test_std, y_test))
# A great feature available in all discussed CV functions is the possibility to set the number of CPUs to use to do the computation on. For this define `n_jobs=n`, where `n` is the number of CPU's you want to use. `n_jobs=-1` will use all available cores. This parallelization is especially helpful if you work on large data sets and/or computationally expensive tasks such as CV. If you want to know how many CPUs your computer runs on you can type the following code:
import multiprocessing
multiprocessing.cpu_count()
# #### Leave-One-Out Cross Validation
#
# If we increase the number of folds to $n-1$ ($n$ = number of observations), that is, we train on all points but one in each trial, we call this Leave-One-Out CV or LOOCV. This can be seen as an extreme case of $k$-fold CV. On small data sets this might provide better estimates, but it can be very time consuming, particularly on larger data sets.
# +
from sklearn.model_selection import LeaveOneOut
# Create objects
loocv = LeaveOneOut()
#logReg = LogisticRegression()
# Calculate & print scores
scores = cross_val_score(logReg, X_train_std, y_train)
print('LOOCV accuracy on train set:', np.mean(scores))
# -
# ### Other Cross Validation Approaches
#
# While we limit ourselves to the two most known and used splitting strategies, Scikit-learn offers more than just the two presented here. A good overview is provided in the packages' [tutorial on cross validation](http://scikit-learn.org/stable/modules/cross_validation.html). However, as an introduction to the topic and given the use cases in this seminar the above two serve the purpose well.
# # Further Ressources
#
#
# In writing this notebook, many ressources were consulted. For internet ressources the links are provided within the textflow above and will therefore not be listed again. Beyond these links, the following ressources were consulted and are recommended as further reading on the discussed topics:
#
# * <NAME>, and and <NAME>, 1992, Submodel selection and Evaluation in Regression: the X-Random Case, *International Statistical Review* 60: 291–319.
# * <NAME>, <NAME>, and <NAME>, 2001, *The Elements of Statistical Learning* (Springer, New York, NY).
# * <NAME>, <NAME>, <NAME>, and <NAME>, 2013, *An Introduction to Statistical Learning: With Applications in R* (Springer Science & Business Media, New York, NY).
# * <NAME>, 1995, A Study of Cross-Validation and Bootstrap for Accuracy Estimation and Model Selection, in *International Joint Conference on Artificial Intelligence (IJCAI)*, 1137-145, Stanford, CA
# * Müller, <NAME>., and <NAME>, 2017, *Introduction to Machine Learning with Python* (O’Reilly Media, Sebastopol, CA).
# * <NAME>, 2015, *Python Machine Learning* (Packt Publishing Ltd., Birmingham, UK)
#
#
| 0209_CrossValidation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # RNN
#
# ## Overview
#
# When human is thinking, they are thinking based on the understanding of previous time steps but not from scratch. Traditional neural networks can’t do this, and it seems like a major shortcoming. For example, imagine you want to do sentimental analysis of some texts. It will be unclear if the traditional network cannot recognize the short phrase and sentences.
#
# Recurrent neural networks address this issue. They are networks with loops in them, allowing information to persist.
#
# <img src="images/rnn_unit.png" width="500"/>
# A recurrent neural network can be thought of as multiple copies of the same network, each passing a message to a successor. Consider what happens if we unroll the above loop:
#
# <img src="images/rnn_units.png" width="500"/>
# As demonstrated in the book, recurrent neural networks may be connected in many different ways: sequences in the input, the output, or in the most general case both.
#
# <img src="images/rnn_connections.png" width="700"/>
# ## Implementation
#
# In our case, we implemented rnn with modules offered by the package of `keras`. To use `keras` and our module, you must have both `tensorflow` and `keras` installed as a prerequisite. `keras` offered very well defined high-level neural networks API which allows for easy and fast prototyping. `keras` supports many different types of networks such as convolutional and recurrent neural networks as well as user-defined networks. About how to get started with `keras`, please read the [tutorial](https://keras.io/).
#
# To view our implementation of a simple rnn, please use the following code:
import warnings
warnings.filterwarnings("ignore", category=FutureWarning)
import os, sys
sys.path = [os.path.abspath("../../")] + sys.path
from deep_learning4e import *
from notebook4e import *
psource(SimpleRNNLearner)
# `train_data` and `val_data` are needed when creating a simple rnn learner. Both attributes take lists of examples and the targets in a tuple. Please note that we build the network by adding layers to a `Sequential()` model which means data are passed through the network one by one. `SimpleRNN` layer is the key layer of rnn which acts the recursive role. Both `Embedding` and `Dense` layers before and after the rnn layer are used to map inputs and outputs to data in rnn form. And the optimizer used in this case is the Adam optimizer.
# ## Example
#
# Here is an example of how we train the rnn network made with `keras`. In this case, we used the IMDB dataset which can be viewed [here](https://keras.io/datasets/#imdb-movie-reviews-sentiment-classification) in detail. In short, the dataset is consist of movie reviews in text and their labels of sentiment (positive/negative). After loading the dataset we use `keras_dataset_loader` to split it into training, validation and test datasets.
from keras.datasets import imdb
data = imdb.load_data(num_words=5000)
train, val, test = keras_dataset_loader(data)
# Then we build and train the rnn model for 10 epochs:
model = SimpleRNNLearner(train, val, epochs=10)
# The accuracy of the training dataset and validation dataset are both over 80% which is very promising. Now let's try on some random examples in the test set:
# ## Autoencoder
#
# Autoencoders are an unsupervised learning technique in which we leverage neural networks for the task of representation learning. It works by compressing the input into a latent-space representation, to do transformations on the data.
#
# <img src="images/autoencoder.png" width="800"/>
# Autoencoders are learned automatically from data examples. It means that it is easy to train specialized instances of the algorithm that will perform well on a specific type of input and that it does not require any new engineering, only the appropriate training data.
#
# Autoencoders have different architectures for different kinds of data. Here we only provide a simple example of a vanilla encoder, which means they're only one hidden layer in the network:
#
# <img src="images/vanilla.png" width="500"/>
#
# You can view the source code by:
psource(AutoencoderLearner)
# It shows we added two dense layers to the network structures.
| notebooks/chapter19/RNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#export
# temptation
# night watcher to avoid unexpected halt
# when performing request, which freeze the program and won't be able to throw exception
# the best way to limit the run time of a fn, is to run as subprocess
# subprocessing and manipuating threads is too raw for me
# so, i use async program model to deal with the mess
# the program still freeze sometimes
# https://stackoverflow.com/questions/49572547/python-application-freezes-only-ctrl-c-helps
# https://stackoverflow.com/questions/29649173/what-is-the-global-default-timeout
import socket
socket.setdefaulttimeout(30.)
import os, sys
from os.path import join
wd = os.getcwd()
pyf = join(wd, 'nbexp_main.py')
code = f'{sys.executable} {pyf}'
# -
#export
from functools import partial
print=partial(print, flush=True)
in_jupyter = (__doc__ == 'Automatically created module for IPython interactive environment')
# +
#export
# https://docs.python.org/3/library/asyncio-subprocess.html
import asyncio
import sys
import datetime
import time
# https://stackoverflow.com/questions/44633458/why-am-i-getting-notimplementederror-with-async-and-await-on-windows
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
async def get_date(run_args=[sys.executable, '-c', "import time;print('ing', flush=True);time.sleep(2);print('done')",]
, wait_child_sec=3):
# Create the subprocess; redirect the standard output
# into a pipe.
proc = await asyncio.create_subprocess_exec(
*run_args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
await asyncio.wait_for(proc.wait(), timeout=wait_child_sec)
line=''
data = await proc.stdout.read()
line += data.decode('ascii').rstrip()
line += '\n'
data = await proc.stderr.read()
line += data.decode('ascii').rstrip()
if line: print(line)
# -
# testing
if not in_jupyter: asyncio.run(get_date())
# +
#export
hrs = 0.5
sec = hrs * 60 * 60
from time import sleep
import traceback
from nbexp_main import timer, save_notebook
while True:
if in_jupyter:
break;
try:
t = timer()
next(t)
date = asyncio.run(get_date([sys.executable, pyf,], 30))
next(t)
except asyncio.TimeoutError:
print('timeout!')
except Exception as e:
print("unexpected err")
print(traceback.print_exc())
# better behaviour if your put computer to sleep
t = timer(slient_mode=True)
while True:
sleep(10)
_, time_cost = next(t)
if time_cost > sec:
break;
# -
save_notebook()
# !python notebook2script.py watcher.ipynb
save_notebook()
# +
# # !{code}
# +
# # !python nbexp_watcher.py
# -
| nb/watcher.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# ***
# # Python Basics
# ***
# In this class we're going to look at the basics of the Python programming language. This class and those to come are designed to teach you Python and more specifically Python for data science. Together, we will learn how to store and manipulate data, use and deploy various data science tools for analysis, explore data science projects and much more. Each class has sample code and exercises for you to follow along with.
#
# Throughout this book we will be exploring Python using Jupyter Notebooks. In a previous class we installed Python and the Anaconda data science platform.
#
# For more information on Anaconda and Jupyter Notebook take a look at:
#
# - https://www.anaconda.com
# - https://jupyter.org
#
# ## What is Python?
# Python was created by <NAME> and first released in 1991 as a general purpose programming language. In today's world Python is used almost everywhere to build practically any piece of software you can think of. World class companies such as Google, Facebook, Instagram, Spotify, Netflix and many more use Python to power their applications. We'll take a look at examples of just how they do it in later classes.
#
# Python is open source and available on several operating systems including macOS, Windows, Linux and more.
#
# Python has become almost the standard programming language when working in the field of data science. There are several reasons for this, one being as mentioned the fact that it is open source another is the amazing community supporting Python. Probably the main reason is because of Python’s libraries or packages. Whatever you need to do with Python, develop a website, create a game, or build your companies data science platform, no doubt Python already has a package that can assist you. We will be taking a close look at several packages in later classes.
#
# As just mentioned we will be exploring data science with Python using the Jupyter Notebook. One of the main reasons for this choice of coding tool is that it provides us with instant feedback on our code.
#
# You can, if you so choose, use your current code editor of choice. For every programming task that you perform you will need to use the tools that suit your task best. For data science Jupyter Notebook is that tool but there are other times when I use a more conventional text editor such as the Atom Text Editor from GitHub. I discussed Atom at great length in **class 2 Getting Setup**.
#
# For the remainder of this class we are going to be exploring the basic concepts of Python such as simple calculations, variables and data types. If you are already somewhat familiar with Python feel free to skip to the next class.
# ## Hello World
# Any programming introduction you have every read or watched has started with the traditional Hello World! program and this class is no different!
print("Hello World")
'Hello World'
# Already with two simple lines of code we've introduced some core Python programming concepts, mainly the print statement and strings.
#
# Strings or when you are writing code 'str' is Pythons way of representing several characters in a sequence such as a sentence. When creating strings we can use either single or double quotes as shown in the code samples above.
# ## Using Python as a Simple Calculator
# Let's try some simple calculations.
1 + 1
8 * 2
124 / 4
# ## Adding Comments to our Code
# When writing code, not just Python, it's always a best practice to add comments to your code. Comments allow you to let other developers, who may be using or reviewing your code in the future, know what you were thinking at the time of writing. After you have written a piece of code it may be several weeks, months or even years before you need to look at it again. By adding comments you can save yourself and other developers a lot of hassle in the future.
# +
# Starting a line of code with '#' creates a comment.
# A comment is ignored when you run your Python code and so has no impact on your code.
# -
# This program adds 1 + 1 together
print(1+1)
# As you can see in the code example above adding a comment had no impact. Take a moment now to try using Python for subtraction, division, exponentiation and modulo.
# ## Variables
# In Python you can assign and save specific values to variables. Variables are something that you create or in Python speak, you declare. Once you declare a variable you can then call it at any time by typing the variable name. Lets take a look at some examples.
#
# Lets create several different kinds of variables.
height = 1.83
weight = 168
employee1 = "<NAME>"
# The examples above are called **assignment statements**. In each example we create a variable name on the right and use the assignment(=) operator to assign it a value. In the first assignment statement above we assign the value of 1.83 to the variable named **height**, the second statement assigns the value of 168 to **weight** and the third assigns the string "<NAME>" to **employee1**.
#
# If we now enter a variable name Python will look for this variable and output it to the screen.
# Print height variable
height
# Print weight variable
weight
# Print employee1 variable
employee1
# Throughout this class we will be making extensive use of variables. They will help to make our code easier to reproduce which in turn will help make our lives easier.
# ## Data Types
# As we progress through our data science journey it will become important to know what is the type of data that we are dealing with. We have already seen one specific example of a data type, string or 'str' which we know is Pythons way of representing a sequence of characters. We can confirm the data type of a variable with the function call **type()**.
#
# Let's use this function call now on the variables that we have already created to find out what is their type.
# Find type of variable height
type(height)
# Find type of variable weight
type(weight)
# Find type of variable employee1
type(employee1)
# From the outputs we can see that we are dealing with three different data types, **float**, **int** and **str**. We have already discussed strings.
#
# The data type float is Pythons way of telling us that we are dealing with a real number. Real numbers can have both an integer part and a fraction part, for example 38.4.
#
# The second output tells us that we are dealing with an 'int' or integer. Integers are simply whole numbers, 1,2,3,4 and so on or negative numbers, -1, -2, -3, -4.
#
# Python has several more data types to explore. Another common data type is **boolean**. Boolean data types can either be True or False. Think of True as yes and False as no. Or another way is True as 1 and No as 0.
#
# Let's look at some more examples:
# Create a variable: savings
savings = 600
# Print out savings
savings
# Create a yearly interest rate of 1%
yearly_interest = (savings) * 1/100
# Print out value of yearly_interest
yearly_interest
# Show total savings after 1 year
total_savings = savings + yearly_interest
# Print out total_savings
total_savings
# As you explore Python further, you will begin to notice that how your code behaves depends on the data type you are working with. Let's take a look.
# Simple addition of integers
print(1+1)
# Simple addition of strings
print("ab" + "cd")
# In the second code example above the '+' combined "ab" with "cd" to form "abcd". This is called **concatenation**.
#
# The '+' operator is incredibly versatile and can be used to create outputs which combine different data types. Suppose we want to print out our savings in a statement rather than just a non descriptive number. We could do the following:
# Print out amount of savings in a sentence
print("I have" + total_savings + " in my bank account.")
# Why the error? Let's take a look at what we are trying to do. Inside a print statement we have a string which we are combining with the variable total_savings. We are then appending another string to try and form a complete sentence. However in Python we cannot sum strings and floats together in this manner. What we need to do is to explicitly convert the variable total_savings to a string. To achieve this we use str(total_savings) to convert the variable total_savings to a string.
# Print out amount of savings in a sentence
print("I have €" + str(total_savings) + " in my bank account.")
# This same form of type conversion can be used with:
# - int()
# - float()
# - bool()
| Learn-python/Part 1 - Introduction to Python/01-Python Basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="af71ec61"
# # 6. La classification hiérarchique ascendante et le K-Means selon le taux d'activité
# + id="061a8b36"
import pandas as pd
from sklearn.preprocessing import StandardScaler
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
import seaborn as sns
# + id="d2c4e18d" outputId="ce86a821-1f72-4381-e9f4-14f4f6d20d67"
data_communes = pd.read_excel(".../Inegalites2018_IDFCommunes.xlsx")
data_communes
# + id="a1ef985f" outputId="150f182f-6737-4136-8189-c633b418bc8c"
#Copier le DataSet
communes_socioeco = data_communes.copy()
#Accéder aux colonnes du DataSet
communes_socioeco.columns
# + id="38ce7d2b"
#Les librairies pour le clustering
import scipy.cluster.hierarchy as sch
from sklearn.cluster import KMeans
# + id="0a996c16" outputId="c2ad7b18-0376-49e2-f05a-3c1f29a3a692"
activite = communes_socioeco["Taux d'emploi 15-64 ans"]
data_actv = pd.DataFrame({"Taux d'emploi 15-64 ans":activite}).set_index(communes_socioeco['Code géographique'])
data_actv
# + id="c1a04ab4" outputId="f9fd3c5c-1751-4a14-a034-c2632aadf8fe"
#Convertir la variable "Taux d'emploi" en vecteur
emploi = np.array(activite)
emploi=emploi.reshape(-1,1)
emploi
# + id="912f7818" outputId="151d6c47-d7a9-479b-d680-c3abd2f1f443"
#Mise en place de la classification hiérarchique ascendante du niveau d'immigration
Z = sch.linkage(emploi,method='ward',metric='euclidean')
#Afficher le dendrogramme
plt.figure(figsize=(15,15))
plt.title("Dendogramme de la Classification hiérarchique des communes d'Ile de France selon le niveau d'activité")
dendrogram = sch.dendrogram(sch.linkage(emploi, method = "ward"))
plt.show()
# + id="c2a5aa05" outputId="9e91df35-4a50-446b-99ad-1037672a6b0e"
#Courbe de l'inertie intra-cluster pour le K-Means
inertie_intra = []
for i in range(1,20):
kmeans_pca = KMeans(n_clusters = i, init='k-means++',random_state=42)
kmeans_pca.fit(emploi)
inertie_intra.append(kmeans_pca.inertia_)
plt.figure(figsize=(10,8))
plt.plot(range(1,20),inertie_intra, marker='o',linestyle='--')
plt.xlabel('Nombre de Clusters')
plt.ylabel('Inertie intra-cluster')
plt.title('K-Means par la méthode des centres mobiles')
# + [markdown] id="ee9b445c"
# En utilisant la "méthode du coude", nous devons prendre 3 clusters selon le niveau d'activité : **passable, bon, très bon**
# + id="901798fe" outputId="2ef2b44a-acf8-4587-fafd-1fc9a0239375"
#Nuage des individus avec les 3 strates
kmeans_pca_act = KMeans(n_clusters=3,init='k-means++',random_state=42)
kmeans_pca_act.fit(emploi)
# + id="66b7b0cd" outputId="27896c6a-9e34-4304-8178-15e3af9e50c8"
#Créer les nouvelles colonnes dans le DataFrame
data_actv['Numéro de cluster']=kmeans_pca_act.labels_
data_actv
# + id="44496ba4" outputId="4acda2f6-c1d3-40c8-aa42-fc3a85a5afbc"
#Traduire les numéros de clusters
data_actv["Niveau d'activité"]=data_actv["Numéro de cluster"].map({0:'Très satisfaisant',2:'Satisfaisant',1:'Passable'})
data_actv
# + id="8200c902" outputId="7c37711a-fd2e-4133-ca23-3fa987ef4d1a"
fig, axes = plt.subplots(figsize=(8,8))
sns.scatterplot(data_actv.iloc[:,0],data_actv.iloc[:,0], hue = data_actv["Niveau d'activité"], palette = ['r','b','y'])
plt.title("Nuage des communes selon le niveau d'activité à une dimension")
plt.xlabel("Taux d'emploi")
plt.ylabel("Taux d'emploi")
plt.show()
# + id="ac62d042"
data_actv.to_excel('.../ActivitéCommunes2018IDF.xlsx')
# + id="47eb8f2b"
| Emploi_2018Communes.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .rs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// # G2ConfigMgr
// The G2ConfigMgr API is used to add specific JSON configurations to the database,
// so that they may be shared across remote systems.
// Such configurations are added to the database,
// and a configuration ID is created for each of them.
// The system may then be configured with a specific configuratin ID
// that points to one of those configurations.
// That configuration ID will then be the shared global config.
// ## Prepare environment
import com.senzing.g2.engine.G2ConfigMgr;
import com.senzing.g2.engine.G2ConfigMgrJNI;
import com.senzing.g2.engine.G2Config;
import com.senzing.g2.engine.G2ConfigJNI;
import com.senzing.g2.engine.Result;
// + [markdown] heading_collapsed=true
// ### Helper class for Json Rendering
// + hidden=true
// %%loadFromPOM
<dependency>
<groupId>org.glassfish</groupId>
<artifactId>javax.json</artifactId>
<version>1.1.4</version>
</dependency>
// + hidden=true
import javax.json.*;
import static java.util.Collections.*;
import static javax.json.stream.JsonGenerator.PRETTY_PRINTING;
// + hidden=true
public class JsonUtil {
private static final JsonWriterFactory PRETTY_FACTORY
= Json.createWriterFactory(singletonMap(PRETTY_PRINTING, true));
private static final JsonWriterFactory UGLY_FACTORY
= Json.createWriterFactory(emptyMap());
public static String toJsonText(JsonValue val) {
return toJsonText(val, true);
}
public static String toJsonText(JsonValue val, boolean prettyPrint) {
JsonWriterFactory factory = (prettyPrint) ? PRETTY_FACTORY : UGLY_FACTORY;
StringWriter sw = new StringWriter();
JsonWriter writer = factory.createWriter(sw);
writer.write(val);
sw.flush();
return sw.toString();
}
public static JsonObject parseJsonObject(String jsonText) {
if (jsonText == null) return null;
StringReader sr = new StringReader(jsonText);
JsonReader jsonReader = Json.createReader(sr);
return jsonReader.readObject();
}
public static JsonArray parseJsonArray(String jsonText) {
if (jsonText == null) return null;
StringReader sr = new StringReader(jsonText);
JsonReader jsonReader = Json.createReader(sr);
return jsonReader.readArray();
}
}
// + hidden=true
import java.util.UUID;
String str;
public static void RenderJSON(Object obj){
str = obj.toString();
JsonObject json = JsonUtil.parseJsonObject(str);
String Config = JsonUtil.toJsonText(json, false);
UUID id = UUID.randomUUID();
String uuid = id.toString();
String div = "<div id=\""+ uuid +"\" style=\"height:100%; width:100%; background-color: LightCyan\"></div>";
display(div, "text/html");
String jav = "require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {document.getElementById(\'"+ uuid +"\').appendChild(renderjson("+json+"))});";
display(jav, "application/javascript");
}
// -
// ### Initialize Senzing configuration
//
// Using environment variables and default values, create `senzingConfigJson`.
// This value is used when instantiating Senzing objects.
// +
// Get variables used in constructing Senzing Engine configuration.
String configPath = System.getenv("SENZING_ETC_DIR");
if (configPath == null) {
configPath = "/etc/opt/senzing";
}
String supportPath = System.getenv("SENZING_DATA_VERSION_DIR");
if (supportPath == null) {
supportPath = "/opt/senzing/data";
}
String g2Path = System.getenv("SENZING_G2_DIR");
if (g2Path == null) {
g2Path = "/opt/senzing/g2";
}
String resourcePath = g2Path + "/resources";
String sqlConnection = System.getenv("SENZING_SQL_CONNECTION");
if (sqlConnection == null) {
sqlConnection = "sqlite3://na:na@/var/opt/senzing/sqlite/G2C.db";
}
// Construct the JSON string used for Senzing Engine configuration.
String senzingConfigJson = "{"
+ "\"PIPELINE\": {"
+ "\"CONFIGPATH\": \"" + configPath + "\","
+ "\"SUPPORTPATH\": \"" + supportPath + "\","
+ "\"RESOURCEPATH\": \"" + resourcePath + "\""
+ "},"
+ "\"SQL\": {"
+ "\"CONNECTION\": \"" + sqlConnection + "\""
+ "}}";
RenderJSON(senzingConfigJson);
// -
// ## G2Config
// ### Create G2Config instance
// Create an instance of `G2Config` for use with managing configuration.
// +
G2Config g2Config = new G2ConfigJNI();
String moduleName = "ExampleG2Config";
boolean verboseLogging = true;
int returnCode = g2Config.initV2(moduleName, senzingConfigJson, verboseLogging);
System.out.print(returnCode);
// -
// ## G2ConfigMgr
//
// The G2ConfigMgr API is used to add specific JSON configurations to the database,
// so that they may be shared across remote systems.
// Such configurations are added to the database, and a configuration ID is created for each of them.
// The system may then be configured with a specific configuratin ID that points to one of those configurations.
// That configuration ID will then be the shared global config.
// ### G2ConfigMgr Initialization
//
// To start using G2ConfigMgr, you must first create and initialize an instance of the config manager. This should be done once per process.
//
// Create a new instance of the config manager and assign it to a variable. Then, call the appropriate initialization method (such as initV2) to initialize the config manager.
//
// During the call, the initialization methods accept the following parameters:
//
// - **moduleName:** A short name given to this instance of the G2 engine (i.e. your G2Module object)
// - **senzingConfigJson:** A JSON document containing system parameters (see the section called "Initialization Parameters")
// - **verboseLogging:** A boolean which enables diagnostic logging - this will print a massive amount of information to stdout (default = False)
// Calling these functions will return "0" upon success - useful for error handling.
// +
G2ConfigMgr g2ConfigMgr = new G2ConfigMgrJNI();
String moduleName = "ExampleG2ConfigMgr";
boolean verboseLogging = true;
int returnCode = g2ConfigMgr.initV2(moduleName, senzingConfigJson, verboseLogging);
g2ConfigMgr.getLastException();
System.out.print(returnCode);
// -
// ### Managing configurations
// Configuration JSON documents may be added to the datastore, so that they can be shared across remote systems.
// The `create()` method makes a new in-memory configuration from the `g2config.json` file.
long configHandle = g2Config.create();
// Save the in-memory configuration to a string buffer.
StringBuffer configStringBuffer = new StringBuffer();
int returnCode = g2Config.save(configHandle, configStringBuffer);
if (returnCode != 0)
System.out.print(g2Config.getLastException());
else
RenderJSON(configStringBuffer);
// #### addConfig
//
// Use `addConfig()` to add a configuration JSON document to the data repository.
//
// The `addConfig()` function accepts the following parameters as input:
//
// - **configStr:** The configuration JSON document.
// - **configComments:** A free-form string of comments describing the configuration document.
// - **configID:** The returned configID for the new config document registered in the data store.
// The function returns "0" upon success.
// +
String configStr = configStringBuffer.toString();
String timeStamp = new java.text.SimpleDateFormat("yyyy.MM.dd.HH.mm.ss").format(new java.util.Date());
String configComments = "senzing-G2ConfigMgr-reference.ipynb added at " + timeStamp;
Result<Long> configID = new Result<Long>();
int returnCode = g2ConfigMgr.addConfig(configStr, configComments, configID);
System.out.print("CONFIG ID: " + configID.getValue());
// -
// #### getConfigList
//
// Use `getConfigList()` to retrieve a list of the configuration JSON documents contained in the data repository.
// The `getConfigList()` function has no input arguments, and various arguments used to return response documents.
// +
StringBuffer response = new StringBuffer();
int returnCode = g2ConfigMgr.getConfigList(response);
RenderJSON(response);
// -
// #### getConfig
// Use `getConfig()` to retrieve a specific configuration JSON document from the data repository.
//
// The `getConfig()` function accepts the following parameters as input:
//
// - **configID:** The configID for the config document that you wish to retrieve.
// They also have various arguments used to return response documents.
// +
StringBuffer response = new StringBuffer();
int returnCode = g2ConfigMgr.getConfig(configID.getValue(), response);
RenderJSON(response);
// -
// ### Default configurations
// Multiple configuration JSON documents may be added to the datastore. Each of them may be referenced to start the engine and other API's with different configurations.
//
// In order to specify that one of the configuration documents should be the global shared config, the config ID for that configuration document must be set as the default config ID.
// #### setDefaultConfigID
//
// Use `setDefaultConfigID()` to set the default configuration JSON document in the data repository.
//
// The `setDefaultConfigID()` function accepts the following parameters as input:
//
// - **configID:** The configuration ID for a configuration JSON document previously added to the database.
//
// The function returns "0" upon success.
int returnCode = g2ConfigMgr.setDefaultConfigID(configID.getValue());
System.out.print(returnCode);
// #### getDefaultConfigID
//
// Use `getDefaultConfigID()` to retrieve a specific configuration JSON document from the data repository.
// The `getDefaultConfigID()` function accepts the following parameters as input:
//
// - **configID:** Returns the configID for the current default configuration, or 0 if none is set.
Result<Long> configID = new Result<Long>();
int returnCode = g2ConfigMgr.getDefaultConfigID(configID);
if(returnCode!=0)
System.out.print(g2Config.getLastException());
else
System.out.print(configID.getValue());
| notebooks/java/senzing-G2ConfigMgr-sdk-api-specification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="2rer_bFvB6yT" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
# + id="0ryOPD5BCSCg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="11ab2934-2e3e-468f-cb3b-c605e6fc449e" executionInfo={"status": "ok", "timestamp": 1581622458564, "user_tz": -60, "elapsed": 874, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix"
# + id="ePNTit5uCxg3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0c16b865-18e2-4315-95dd-8eae03436b0c" executionInfo={"status": "ok", "timestamp": 1581622462546, "user_tz": -60, "elapsed": 3349, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
# ls data
# + id="JRBNu7J2CydL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8cf1dd7a-358c-44b6-f90b-6539a40af318" executionInfo={"status": "ok", "timestamp": 1581622498854, "user_tz": -60, "elapsed": 5231, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
df = pd.read_csv('data/men_shoes.csv', low_memory=False)
df.shape
# + id="9l4SL66KDADT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="6df1b13e-b756-41f7-eab7-c787e982c257" executionInfo={"status": "ok", "timestamp": 1581622499776, "user_tz": -60, "elapsed": 919, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
df.columns
# + id="tW-l4tWGDBWz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f242266f-5cbd-468a-f645-09bbe118c0eb" executionInfo={"status": "ok", "timestamp": 1581622565094, "user_tz": -60, "elapsed": 829, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
mean_price = np.mean( df.prices_amountmin )
mean_price
# + id="hVG2rVdhDMlU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ee10e05d-4325-40af-8bc4-dced42608ed9" executionInfo={"status": "ok", "timestamp": 1581623011416, "user_tz": -60, "elapsed": 912, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
y_true = df.prices_amountmin
y_pred = [mean_price] * y_true.shape[0]
mean_absolute_error(y_true, y_pred)
# + id="YbbH-kr0DmUb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="e0852e70-ebc5-4a36-91aa-4de8c596b84a" executionInfo={"status": "ok", "timestamp": 1581623069428, "user_tz": -60, "elapsed": 813, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
df.prices_amountmin.hist(bins=100)
# + id="TTySIE2tFMdq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} outputId="2cbb350a-4600-4380-f53a-4b0bb622f6a0" executionInfo={"status": "ok", "timestamp": 1581623153231, "user_tz": -60, "elapsed": 1147, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
np.log1p(df.prices_amountmin).hist(bins=100)
# + id="wxOtCyF2FU-u" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4e69db76-18f2-46a1-ab6c-03395a30a54e" executionInfo={"status": "ok", "timestamp": 1581623199428, "user_tz": -60, "elapsed": 673, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
y_true = df.prices_amountmin
y_pred = [np.median(y_true)] * y_true.shape[0]
mean_absolute_error(y_true, y_pred)
# + id="PGqvKIBwFsPZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9422fc96-47f2-4f63-cf90-4461855d0de4" executionInfo={"status": "ok", "timestamp": 1581623417230, "user_tz": -60, "elapsed": 749, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
y_true = df.prices_amountmin
price_log_mean = np.expm1( np.mean(np.log1p(y_true)) )
y_pred = [price_log_mean] * y_true.shape[0]
mean_absolute_error(y_true, y_pred)
# + id="FRZ6qKJAGePg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="99ce1ee7-2d5f-4cac-9a12-cad316eb6720" executionInfo={"status": "ok", "timestamp": 1581623468225, "user_tz": -60, "elapsed": 1153, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
df.columns
# + id="aPJvxeP3Gtt-" colab_type="code" colab={}
df['brand_cat'] = df.brand.factorize()[0]
# + id="xgsubTPTI5WX" colab_type="code" colab={}
def run_model(feats):
X = df[ feats ].values
y = df.prices_amountmin.values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="41hJYvEfIMAp" colab_type="code" colab={}
run_model(['brand_cat'])
# + id="tHk-9IjQJePN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bcef464e-955a-4c73-8d5c-48db86e94ed9" executionInfo={"status": "ok", "timestamp": 1581624197020, "user_tz": -60, "elapsed": 885, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
run_model(['brand_cat'])
# + id="iEqImJbNJfvI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="23b4573d-0090-43fe-92ca-3f74a64b6b00" executionInfo={"status": "ok", "timestamp": 1581624334567, "user_tz": -60, "elapsed": 902, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
df['manufacturer_cat'] = df.manufacturer.factorize()[0]
run_model(['brand_cat', 'manufacturer_cat'])
# + id="VORTcc0LJuFZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="a28b5388-9b0f-47b2-8594-1bc53bc3c854" executionInfo={"status": "ok", "timestamp": 1581624480350, "user_tz": -60, "elapsed": 24766, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mAFc85IEwvZX1ovV_u9FyVYs7-UOwJFjKa3MRovGmg=s64", "userId": "08401139221304384439"}}
# !git add matrix_one/day4.ipynb
# !git commit -m "day4"
# !git push
# + id="zc3_2R7BKOyc" colab_type="code" colab={}
| matrix_one/day4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.11 64-bit (''base'': conda)'
# name: python3
# ---
# Interconnected Neurons
# +
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
# Declare inputs.
inputs = Input(shape=(10,))
# Declare layers.
layer1 = Dense(64, activation='relu')
layer2 = Dense(64, activation='relu')
# Connect inputs and layers.
layer1_outputs = layer1(inputs)
layer2_outputs = layer2(layer1_outputs)
# Create model.
model = Model(inputs=inputs, outputs=layer2_outputs)
model.summary()
# +
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Concatenate
# Declare inputs.
inputs = Input(shape=(10,))
bypass_inputs = Input(shape=(5,))
# Declare layers.
layer1 = Dense(64, activation='relu')
concat_layer = Concatenate()
layer2 = Dense(64, activation='relu')
# Connect inputs and layers.
layer1_outputs = layer1(inputs)
layer2_inputs = concat_layer([layer1_outputs, bypass_inputs])
layer2_outputs = layer2(layer2_inputs)
# Create model.
model = Model(inputs=[inputs, bypass_inputs],
outputs=layer2_outputs)
model.summary()
| tf_framework/c13_InterconnectedLayers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
#IMPORT LIBRARIES
import numpy as np
from sklearn.linear_model import *
from sklearn.preprocessing import StandardScaler
import pandas
# +
#SET UP TRAINING DATA
train_data = np.genfromtxt('train.csv',skip_header = 1,usecols = np.arange(0,12),delimiter= ',',skip_footer=1)
targets = train_data[:,1]
X = train_data[:,[2,5,10]]
X = np.insert(X,0,np.ones(len(targets)),axis = 1)
#Feature Scaling
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
# +
#USING A STOCHASTIC GRADIENT DESCENT CLASSIFIER
clf = SGDClassifier(loss='log',n_iter=10000)
clf.fit(X,targets)
# +
#LEARNT COEFFICIENTS
print "Coefficients are as follows:- "
print clf.coef_
# +
#PREDICTIONS ON TEST DATA
test_data = np.genfromtxt('test.csv',delimiter=',',skip_header=1)
p = test_data[:,[1,4,9]]
p = np.insert(p,0,np.ones(len(p)),axis = 1)
scaler.fit(p)
p = scaler.transform(p)
a = clf.predict(p)
a = np.reshape(a,(-1,1))
a = np.insert(a,0,np.arange(892,1310),axis=1)
a = a.astype(int)
print "PassengerID Survived"
print a
# -
np.savetxt('predictions.csv',a,delimiter=',',header="PassengerId,Survived",fmt="%i")
| Titanic-Survival/Titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Creating and simulating a simple model
# Here we show how to create a basic model using basiCO, and simulating it. We start as usual by importing basiCO.
import sys
if '../..' not in sys.path:
sys.path.append('../..')
from basico import *
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Now lets create a new model, passing along the `name` that we want to give it. Additional supported parameters for the model consist of:
#
# * `quantity_unit`: which sets the unit to use for species concentrations (defaults to mol)
# * `volume_unit`: the unit to use for three dimensional compartments (defaults to litre (l))
# * `time_unit`: the unit to use for time (defaults to second (s))
# * `area_unit`: the unit to use for two dimensional compartments
# * `length_unit`: the unit to use for one dimensional compartments
new_model(name='Simple Model');
# now we add a basic rection that converts a chemical species `A` irreversibly into `B`. We can do that by just calling `addReaction` with the chemical formula to use. In this case this would be: `A -> B`. The reaction will be automatically created using mass action kinetics.
add_reaction('R1', 'A -> B');
# Since we had a new model, this created the Species `A` and `B` as well as a compartment `compartment`, in which those chemicals reside. The species have an initial concentration of 1. To verify we can call `get_species`, which returns a dataframe with all information about the species (either all species, or the one filtered to):
#
get_species().initial_concentration
# to change the initial concentration, we use `set_species`, and specify which property we want to change:
set_species('B', initial_concentration=0)
set_species('A', initial_concentration=10)
get_species().initial_concentration
# to see the kinetic paramters of our recation we can use `get_reaction_parameters`, and we see that the parameter has been created by default with a value of `0.1`
get_reaction_parameters()
# to change that parameter, we use `set_reaction_parameters`, specifying the value to be changed:
set_reaction_parameters('(R1).k1', value=1)
get_reaction_parameters('k1')
# now lets simulate our model for 10 seconds:
result = run_time_course(duration=50)
result.plot();
# to simulate the model stochastically, you can specify the simulation method. COPASI supports many different simulations methods:
#
# * `deterministic`: using the COPASI LSODA implementation
# * `stochastic`: using the Gibson Bruck algorithm
# * `directMethod`: using the Gillespie direct method
#
# others are:
#
# * `tauleap`, `adaptivesa`, `radau5`, `hybridlsoda`, `hybridode45`
#
# So lets try and simulate the model stochastically:
result = run_time_course(duration=50, method='stochastic')
# simulation failed in this time because the particle numbers, that the stochastic simulation is based upon is too high! Lets check:
get_species().initial_particle_number
# so we just set the initial particle number of a to be smaller, and run the simulation again, this time returning particle numbers rather than concentrations for the resulting dataframe
set_species('A', initial_particle_number=100)
# Alternatively we could have modified the models quantity unit, which currently was set to:
get_model_units()
# So initially we had a concentration of 10 mol/l, which does not lend itself for stochastic simulation. Using the `set_model_unit` command with a more apropriate `quantity_unit` and `volume_unit` would be the propper solution.
# When running stochastic simulations, you might want to specify the `seed` to be used, so that traces become reproducible. In COPASI you have two parameters for that `seed`, the actual seed, and `use_seed` a boolean indicating whether that seed is to be used for the next simulation. For a single trace we use both:
result = run_time_course(duration=10, method='stochastic', use_numbers=True, seed=1234, use_seed=True)
result.plot();
# of course one stochastic trace will not be enough, so lets run many of them. This time the species will be plotted separately, so that it is easy to reuse the same color. This time we also don't use the seed specified before.
fig, ax = plt.subplots()
for i in range(100):
result = run_time_course(duration=10, method='stochastic', use_numbers=True, use_seed=False)
result.plot(y='A', color='r', ax=ax, legend=None);
result.plot(y='B', color='b', ax=ax, legend=None);
# so far, we were only using mass action kinetics, but of course we could use any other kinetic as well. COPASI comes with a large number of functions already inbuilt. You can see those, running the `get_functions` command. It is filterable by name, and whether or not the formula is reversible, or general (general reactions can be used for either reversibility). Since we modelled our reaction as irreversible, lets look at the irreversible functions we have:
get_functions(reversible=False)
# So lets change the kinetic function that our reaction should use. Here we simply specify the function name that we got from the call before. This will introduce new local parameters, for `Km` and `Vmax` to the model. (We of course could have used the function parameter already add the `add_reaction` command above.
set_reaction('R1', function='Henri-Michaelis-Menten (irreversible)')
get_reactions()
get_reaction_parameters()
# and now we can look at how the plot would look at repeating the simulation for several vmax values:
fig, ax = plt.subplots()
for vm in [0.1, 0.5, 3]:
set_reaction_parameters('(R1).V', value=vm)
result = run_time_course(duration=10, method='deterministic', use_numbers=True)
result.plot(y='A', color='r', ax=ax, legend=None);
result.plot(y='B', color='b', ax=ax, legend=None);
| docs/notebooks/Creating_a_simple_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Combinatorial Explosion for Multicomponent Crystals
# ---
#
# **"During the past century, science has developed a limited capability to design materials, but we are still too dependent on serendipity"** - [<NAME>, Looking for design in materials design (2004)](http://www.nature.com/nmat/journal/v3/n10/abs/nmat1229.html)
#
# This practical explores how materials design can be approached by using the simplest of rules to narrow down the combinations of elements to those that might be considered legitimate. It will demonstrate the scale of the problem, even after some chemical rules are applied.
#
# #### PRE-READING
#
# The approach outlined in this notebook was used in the publication [Computational Screening of All Stoichiometric Inorganic Materials](https://doi.org/10.1016/j.chempr.2016.09.010). It may be useful to read through the paper in advance.
#
# #### NOTES ON USING THE NOTEBOOK
# - This notebook is divided into "cells" which either contain Markdown (text, equations and images) or Python code
# - A cell can be "run" by selecting it and either
# - pressing the Run button in the toolbar above (triangle/arrow symbol)
# - Using Cell > Run in the menu above
# - Holding the Ctrl key and pressing Enter
# - Running Markdown cells just displays them nicely (like this text!). Running Python code cells runs the code and displays any output below
# - When you run a cell and it appears to not be doing anything, if there is no number in the square brackets and instead you see ```In [*] ```, it is still running!
# - If the output produces a lot of lines, you can minimise the output box by clicking on the white space to the left of it
# - You can clear the output of a cell or all cells by going to Cell > Current output/All output > Clear
# - Be sure to run the code cells in order, as some of the code of the late cells can depend on early cells
#
# #### INSTALLING SMACT
# - This tutorial uses a Python library called SMACT, to retrieve various chemical data. To install it, run the cell below, it will take a minute.
# !pip install smact > /dev/null
# ## 1. Back to basics: Forget your chemistry
# (From the blog of <NAME>: [www.hackingmaterials.com](http://www.hackingmaterials.com))
#
# Imagine:
#
# 1. You have the first 50 elements of the periodic table
# 2. You also have a box with a 10 x 10 x 10 grid
# 3. You are allowed to arrange 30 of the elements at a time in some combination on the grid to make a 'compound'
#
# How many different arrangements (different compounds) could you make?
#
# <img src = "Images/assign_atoms.png">
#
# The answer is about $10^{108}$, *over a googol of compounds!*
#
# <div class="alert alert-success">
# <p><b>TASKS:</b></p>
# <ol>
# <li><b>Use the cell below to arrive at the conclusion above. Hints for the formula required are below the cell.</b></li>
# </ol>
# </div>
# +
from math import factorial as factorial
grid_points = 1000.0
atoms = 30.0
elements = 50.0
##########
# PYTHON HINTS
#
# the factorial of 3 is written as factorial(3), which returns 3*2*1 = 6
#
# 3 squared is written as 3**2, which returns 9
#
##########
# A. Show that assigning each of the 30 atoms as one of 50 elements is ~ 9e50 (permutations)
element_assignment = 0
print('Number of possible element assignments is: ', float(element_assignment))
# B. Show that the number of possible arrangements of 30 atoms on a grid of 10x10x10 is ~2e57 (combinations)
atom_arrangements = 0
print('Number of atom arrangements is: ', float(atom_arrangements))
# C. Finally, show that the total number of potential "materials" is ~ 2e108
total_materials = 0
print('Total number of "materials" is: ', float(total_materials))
# -
# <img src = "Images/Combinations_vs_Permutations.png">
# ## 2. Counting combinations: Remember your chemistry
#
# We will use well-known elemental properties along with the criterion that stable ionic compounds must not have an overall charge in order to sequentially apply different levels of screening and count the possible combinations:
#
# i. Setting up the search space - Defining which elements we want to include
#
# ii. Element combination counting - Considering combinations of elements and ignore oxidation states
#
# iii. Ion combination counting - Considering combinations of elements in their allowed oxidation states
#
# iv. Charge neutrality - Discarding any combinations that would not make a charge neutral compound
#
# v. Electronegativity - Discarding any combinations which exhibit a cation which is more electronegative than an anion
#
# ### i. Setting up and choosing the search-space
#
# The code below imports the element data that we need in order to do our counting. The main variable in the cell below for this practical is the ```max_atomic_number``` which dictates how many elements to consider.
#
# For example, when ```max_atomic_number = 10``` the elements from H to Ne are considered in the search.
#
# <div class="alert alert-success">
# <p><b>TASKS:</b></p>
# <ol>
# <li><b>Change the variable <code>max_atomic_number</code> so that it includes elements from H to Ar.</b></li>
# <li><b>Get the code to print out the actual list of elements that will be considered.</b></li>
# </ol>
# </div>
# +
# Imports the SMACT toolkit for later on #
import smact
# Gets element data from file and puts into a list #
with open('Counting/element_data.txt','r') as f:
data = f.readlines()
list_of_elements = []
# Specify the range of elements to include #
### EDIT BELOW ###
max_atomic_number = 18
##################
# Populates a list with the elements we are concerned with #
for line in data:
if not line.startswith('#'):
# Grab first three items from table row
symbol, name, Z = line.split()[:3]
if int(Z) > 0 and int(Z) < max_atomic_number + 1:
list_of_elements.append(symbol)
print('--- Considering the {0} elements from {1} to {2} ---'.format(len(list_of_elements),
list_of_elements[0],
list_of_elements[-1]))
# -
# ### ii. Element combination counting
#
# This first procedure simply counts how many binary combinations are possible for a given set of elements. This is a numerical (combinations) problem, as we are not considering element properties in any way for the time being.
#
# <div class="alert alert-success">
# <p><b>TASKS:</b></p>
# <ol>
# <li><b>Increase the number of elements to consider (max_atomic_number in the cell above) to see how this affects the number of combinations.</b></li>
# <li><b>If you can, add another for statement (e.g. <code>for k, ele_c...</code>) to make the cell count up ternary combinations. It is advisable to change the number of elements to include back to 10 first! Hint: The next exercise is set up for ternary counting so you could come back and do this after looking at that.</b></li>
# </ol>
# </div>
# +
# Counts up possibilities and prints the output #
element_count = 0
for i, ele_a in enumerate(list_of_elements):
for j, ele_b in enumerate(list_of_elements[i+1:]):
element_count = element_count + 1
print('{0:2s} {1:2s}'.format(ele_a, ele_b))
# Prints the total number of combinations found
print('Number of combinations = {0}'.format(str(element_count)))
# -
# ### iii. Ion combination counting
#
# We now consider each known oxidation state of an element (so strictly speaking we are not dealing with 'ions'). The procedure incorporates a library of known oxidation states for each element and is this time already set up to search for ternary combinations. The code prints out the combination of elements including their oxidation states. There is also a timer so that you can see how long it takes to run the program.
#
# <div class="alert alert-success">
# <p><b>TASKS:</b></p>
# <ol>
# <li><b>Reset the search space to ~10 elements, read through (feel free to ask if you don't understand any parts!) and run the code below.</b></li>
# <li><b>Change <code>max_atomic_number</code> again in the cell above and see how this affects the number of combinations. Hint: It is advisable to increase the search-space gradually and see how long the calculation takes. Big numbers mean you could be waiting a while for the calculation to run.</b></li>
# </ol>
# </div>
# +
# Sets up the timer to see how long the program takes to run #
import time
start_time = time.time()
ion_count = 0
for i, ele_a in enumerate(list_of_elements):
for ox_a in smact.Element(ele_a).oxidation_states:
for j, ele_b in enumerate(list_of_elements[i+1:]):
for ox_b in smact.Element(ele_b).oxidation_states:
for k, ele_c in enumerate(list_of_elements[i+j+2:]):
for ox_c in smact.Element(ele_c).oxidation_states:
ion_count = ion_count + 1
print('{0:2s} {1:2d} {2:2s} {3:2d} {4:2s} {5:2d}'.format(ele_a, ox_a, ele_b, ox_b, ele_c, ox_c))
# Prints the total number of combinations found and the time taken to run.
print('Number of combinations = {0}'.format(ion_count))
print("--- {0} seconds to run ---".format((time.time() - start_time)))
# -
# All we seem to have done is make matters worse!
#
# We are introducing many more species by further splitting each element in our search-space into separate ions, one for each allowed oxidation state. When we get to max_atomic_number > 20, we are including the transition metals and their many oxidation states.
# ### iv. Charge neutrality
#
# The previous step is necessary to incorporate our filter that viable compounds must be charge neutral overall. Scrolling through the output from above, it is easy to see that the vast majority of the combinations are not charge neutral overall. We can discard these combinations to start narrowing our search down to more 'sensible' (or at least not totally unreasonable) ones. In this cell, we will use the `neutral_ratios` function in smact to do this.
#
# <div class="alert alert-success">
# <p><b>TASKS:</b></p>
# <ol>
# <li><b>Reset the search space to ~10 elements, read through (feel free to ask if you don't understand any parts!) and run the code below.</b></li>
# <li><b>Edit the code so that it also prints out the oxidation state next to each element.</b></li>
# <li><b>Increase the number of elements to consider again (<code>max_atomic_number</code> in the cell above) and compare the output of i. and ii. with that of the below cell</b></li>
# </ol>
# </div>
# +
import time
from smact import neutral_ratios
start_time = time.time()
charge_neutral_count = 0
for i, ele_a in enumerate(list_of_elements):
for ox_a in smact.Element(ele_a).oxidation_states:
for j, ele_b in enumerate(list_of_elements[i+1:]):
for ox_b in smact.Element(ele_b).oxidation_states:
for k, ele_c in enumerate(list_of_elements[i+j+2:]):
for ox_c in smact.Element(ele_c).oxidation_states:
# Checks if the combination is charge neutral before printing it out! #
cn_e, cn_r = neutral_ratios([ox_a, ox_b, ox_c], threshold=1)
if cn_e:
charge_neutral_count = charge_neutral_count + 1
print('{0:3s} {1:3s} {2:3s}'.format(ele_a, ele_b, ele_c))
print('Number of combinations = {0}'.format(charge_neutral_count))
print("--- {0} seconds to run ---".format(time.time() - start_time))
# -
# This drastically reduces the number of combinations we get out and we can even begin to see some compounds that we recognise and know exist.
#
# ### v. Electronegativity
#
# The last step is to incorporate the key chemical property of electronegativity, i.e. the propensity of an element to attract electron density to itself in a bond. This is a logical step as inspection of the output from above reveals that some combinations feature a species in a higher (more positive) oxidation state which is more elecronegative than other species present.
#
# With this in mind, we now incorporate another filter which checks that the species with higher oxidation states have lower electronegativities. The library of values used is of the widely accepted electronegativity scale as developed by <NAME>. The scale is based on the dissociation energies of heteronuclear diatomic molecules and their corresponding homonuclear diatomic molecules:
#
# <img src = 'Images/pauling-equation.png'>
# +
import time
from smact.screening import pauling_test
start_time = time.time()
pauling_count = 0
for i, ele_a in enumerate(list_of_elements):
paul_a = smact.Element(ele_a).pauling_eneg
for ox_a in smact.Element(ele_a).oxidation_states:
for j, ele_b in enumerate(list_of_elements[i+1:]):
paul_b = smact.Element(ele_b).pauling_eneg
for ox_b in smact.Element(ele_b).oxidation_states:
for k, ele_c in enumerate(list_of_elements[i+j+2:]):
paul_c = smact.Element(ele_c).pauling_eneg
for ox_c in smact.Element(ele_c).oxidation_states:
# Puts elements, oxidation states and electronegativites into lists for convenience #
elements = [ele_a, ele_b, ele_c]
oxidation_states = [ox_a, ox_b, ox_c]
pauling_electro = [paul_a, paul_b, paul_c]
# Checks if the electronegativity makes sense and if the combination is charge neutral #
electroneg_makes_sense = pauling_test(oxidation_states, pauling_electro)
cn_e, cn_r = smact.neutral_ratios([ox_a, ox_b, ox_c], threshold=1)
if cn_e:
if electroneg_makes_sense:
pauling_count = pauling_count + 1
print('{0:2s}{1:3d} {2:2s}{3:3d} {4:2s}{5:3d}'.format(ele_a, ox_a, ele_b,
ox_b, ele_c, ox_c))
print('Number of combinations = {0}'.format(pauling_count))
print("--- {0} seconds to run ---".format(time.time() - start_time))
# -
# ## 3. Speedy calculations
#
# For a given search-space of elements, the number of combinations in the output has decreased each time we've applied a filter. However, the time taken to carry out the calculation has increased. This highlights a fundamental trade-off, however there are some clever coding techniqes that can be used to considerably speed things up.
#
# By employing multi-threading (similataneously using multiple parts of a computer processor) and reworking the above code to reduce the number of times it has to look up element properties from other files, the time taken to carry out the ternary count including checks for charge neutrality and electronegativity for 103 elements was reduced from ~ 1 hour to just 26 seconds (carried out on the same workstation)!
#
# Furthermore, a quaternary count for 103 atoms took only 2 hours. This was reducecd to 40 minutes using a 16-core workstation. The code used to carry out these calculations is available in the examples folder of the [main smact repository.](http://www.github.com/WMD-group/SMACT)
#
# N.B. For the numbers quoted above, the stoichiometry of each site was also allowed to vary between 1 and 8 which significantly increases the number of combinations (see below).
# ## 4. Some technicalities...
#
# **A note on multiplicity:** In this exercise, we have not considered higher multiplicies (stoichiometries) in our combinations at all, i.e. we have considered only AB, ABC (and ABCD for quaternary) type combinations. When extended to AxByCz... where x,y,z > 1, the numbers involved get considerably larger still. This can be adjusted by setting `threshold` in the `charge neutrality` function to > 1 in the cells above. The threshold defines the maximum values of x,y,z... . If this is changed, the sum of the oxidation states printed will not always sum to zero, however some multiples (between 1 and `threshold`) of them always will.
#
#
# **Finally, some wisdom from our friend, <NAME> (double Nobel laureate):**
#
# <img src = 'Images/linus-pauling.png'>
| outreach_2020/materials_combinations_2020.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Demonstration of MR reconstruction with CCP PET-MR Software
#
# This demonstration shows how to reconstruct MR images from fully sampled
# Cartesian k-psace data.
# Additional information on the MR raw data format is provided.
#
# This demo is a 'script', i.e. intended to be run step by step in a
# Python notebook such as Jupyter. It is organised in 'cells'. Jupyter displays these
# cells nicely and allows you to run each cell on its own.
# First version: 27th of May 2017
# Updated: 1st of April 2019
# Author: <NAME>, <NAME>
#
# CCP PETMR Synergistic Image Reconstruction Framework (SIRF).
# Copyright 2015 - 2017 Rutherford Appleton Laboratory STFC.
# Copyright 2015 - 2017 University College London.
# Copyright 2015 - 2017, 2019 Physikalisch-Technische Bundesanstalt.
#
# This is software developed for the Collaborative Computational
# Project in Positron Emission Tomography and Magnetic Resonance imaging
# (http://www.ccppetmr.ac.uk/).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ### Fully Sampled Reconstruction
# #### Goals of this notebook:
# - Make sure everybody has a working version of SIRF.
# - Achieve mastery in running Jupyter notebookes.
# - Schematic knowledge of ISMRMRD format.
# - Schematic knowledge of MR reconstruction with SIRF.
# - Minimalistic introduction into Python.
# - Reconstruct our first MR image.
#
# ### Synergistic Image Reconstruction Framework (SIRF) for MRI
# Now to the question everybody has been asking themself: __[What is SIRF?](https://www.youtube.com/watch?v=HEXWRTEbj1I])__
# The __Synergistic Image Reconstruction Framework__ is:
# - an open source reconstruction framework for PET-MR.
# - has a Matlab and Python interface.
# - coupled to the MR open source reconstruction engine '__The Gadgetron__' (a whole universe on its own).
#
# Of course, nobody expects you to already know any of the functions and classes we will see today.
# If you want to use SIRF there is a [software documentation](https://github.com/CCPPETMR/SIRF/wiki/Software-Documentation) where there is a complete list of what you want to know. More on this in a bit.
#
#
# 
#
# ### Reasons to use an open-source reconstruction framework:
# - be __independent__ of what your vendor provides as image output.
# - __profit from__ the latest developments in the field which are often shared.
# - __share data__ with others more easily.
# - __others will continue to use your work!__
#
#
# #### We will use SIRF to
# - send data to The Gadgetron and get reconstructions back.
# - extract MRI data and process it on our own using SIRF functionality.
# ### Hence we need to start The Gadgetron!
# Open a new terminal. On Linux etc you can do this with `Ctrl + Alt + t`. If you use the jupyter notebook, use `New->terminal` from its home screen.
# Type the word `gadgetron` and press enter.
# Afterwards come back here to the notebook.
#
# ## ISMRM Raw Data (ISMRMRD) Format
#
# MR raw- (or acquisition-) data is stored in a multitude of different data formats depending on the system vendor ( Siemens, GE, Philips, Bruker etc.). However, there is a community-supported, open-source magnetric resonance raw data or acquisition data format called "ISMRMRD" which carries the extension ".h5".
#
# A wholesome explanation and introduction into the file format is provided on the [ISMRMRD website](http://ismrmrd.github.io/).
#
# Of course to make the file format feasible, MR raw data formats from different vendors can be transformed to the ISMRMRD file format using siemens_to_ismrmrd, philips_to_ismrmrd or bruker_to_ismrmrd available on the [ISMRMRD Github]( https://github.com/ismrmrd/ ).
#
#
# ### ISMRMRD Schematic Overview
# #### Raw and Metadata
# To have a self-contained acquisition data output from the scanner the files contain one data portion and one header portion.
#
# 
# 
#%% make sure figures appears inline and animations works
# %matplotlib notebook
# +
__version__ = '0.1.0'
# import engine module
import sirf.Gadgetron as pMR
from sirf.Utilities import examples_data_path
# import further modules
import os
import matplotlib.pyplot as plt
#%% GO TO MR FOLDER
os.chdir(examples_data_path('MR'))
# -
# ### A note on Python programming
#
# After the above `import` statement, the functionalities of the Synergistic Image Reconstruction Framework (SIRF) on the MR side are available in the package `sirf.Gadgetron` which in the code we will refer to as `pMR`. So whenever we want to call anything from this package we must preceed the call with `pMR.` (the dot is important).
# +
#%% LOAD RAW MR DATA
filename = 'ptb_resolutionphantom_fully_ismrmrd.h5'
acq_data = pMR.AcquisitionData(filename)
print(type(acq_data))
# -
# ### A second note on Python programming
#
# As you might be familiar with from other languages you can create objects in Python which cluster together data and functionality. This clustering is called a __class__ .
#
# In this case the object is a variable named `acq_data` and is of __class__ `AcquisitionData`. As you can see the prefix `pMR.` appears because `AcquisitionData` is defined in `pMR`.
#
# The object `acq_data` is generated by calling the __constructor__ of the class. The constructor is the name of the class itself. As you can see the constructor of `AcquisitionData` takes an argument, namely a string, which is the name of the .h5 file with the acquisition data.
#
# __Please remember this syntax you will need it later in small programming tasks.__
# ### Preprocessing MR Acquisition Data
#
# Prior to image reconstruction several pre-processing steps must be undertaken.
# These include
# - asymmetric echo compensation
# - noise decorrelation for multi-coil data
# - removal of oversampling along frequency encoding ( also called "readout" or "kx" -direction).
#
#%% PRE-PROCESSING CELL
# preprocessed_data = acq_data
preprocessed_data = pMR.preprocess_acquisition_data(acq_data)
# ### Yet another note on Python programming
#
# As you can see, we now used a function call from the `pMR` package to act on the object we created before.
# (Just as in many other programming languages) The syntax is of the form:
# ```python
# output = function_name(argument)
# ```
# +
#%% SETUP MR RECONSTRUCTION
# create a reconstruction object using 2D inverse Fourier transform
recon = pMR.FullySampledReconstructor()
#%% PASS RAW DATA TO RECONSTRUCTOR
recon.set_input(preprocessed_data)
# -
# ### A last note on Python programming
# The last two lines of code contained the syntax of how to call the functionality of an object.
# We see that a variable called `recon` is created and assigned the call to the constructor of the class `FullySampledReconstructor`. This constructor does not take any argument.
#
# Since classes cluster together data and the functionality of how to act on the data, this functionality must be accessible somehow. The functions which are associated to a class are called __methods__.
# You can call them by writing:
# ```python
# output = variable_name.method_name(arguments)
# ```
# The object `recon` possesses the __method__ `set_input` which takes as argument `AcquisitionData`. It does not return anything.
# __Please remember how to call methods of an object, you will need it later on.__
#
# ### Yeah, but now what?
# We mentioned software documentation earlier. Since nobody knows what functionality these objects have we can call for help!
#
# #### Programming exercise
# Please write the following code in the next cell:
# - Call the built-in `help()` function with the argument `recon` to view the documentation of `FullySampledReconstructor`.
#
# Afterwards run the cell.
# write your code here and run the cell
# So now we see that there is a list of methods we can use. We used `set_input()` already. Since we already set the input we will use `process()` next.
#
# ### Programming task
# Please write code executing the following task:
# - call the `process()` method of the variable `recon`.
# __Hint:__ Don't forget empty parentheses if you don't pass any arguments to signalize Python you are calling a function.
# Write your code here and run the cell.
# It is just a one-liner!
# +
#%% SOLUTION CELL: DON'T VIEW UNLESS YOU TRIED!
# as promised just one single line.
# calling the method using the dot and empty parentheses since we don't supply arguments.
recon.process()
# +
#%% RETRIEVE AND VIEW IMAGE
image_data = recon.get_output()
image_array = abs(image_data.as_array())
image_array = image_array/image_array.max()
plt.figure(1)
plt.set_cmap('gray')
plt.imshow(image_array[0,:,:], vmin=0, vmax=0.7)
plt.title('Reconstructed image data (magnitude)')
# -
# ### Recap
# #### We just learned how
# - to run a Jupyter notebook.
# - data is stored in the ISMRMRD format.
# - to call Python functionality from the pMR (sirf.Gadgtetron) package.
# - wrote our first lines of code! Bam!
# - to reconstruct fully sampled MR kspace data using SIRF.
#
| notebooks/MR/a_fully_sampled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Data crawling on Twitter: Full-archive search
#
# Documentation: https://developer.twitter.com/en/docs/twitter-api/tweets/search/api-reference/get-tweets-search-all
#
# Endpoint URL: https://api.twitter.com/2/tweets/search/all
#
# +
from dotenv import load_dotenv
import pandas as pd
import requests
import json
import time
import os
query='((vacina%20vacinacao)%20OR%20(vacina%20OR%20vacinacao))%20-rt'
start_time='2020-02-29T00%3A00%3A00Z'
end_time='2021-05-04T00%3A00%3A00Z'
load_dotenv()
auth_token = os.environ.get('AUTH_TOKEN')
header = {'Authorization': 'Bearer ' + auth_token}
max_results='500'
next_token=''
url='https://api.twitter.com/2/tweets/search/all?query='+query+'&start_time='+start_time+'&end_time='+end_time+'&max_results='+max_results+'&expansions=author_id&tweet.fields=created_at'
response = requests.get(url,headers=header)
time.sleep(1)
listOfTweets = json.loads(response.content)
print('New Request on',url)
tweets = pd.DataFrame(listOfTweets['data'])
if 'next_token' in listOfTweets['meta']:
next_token = listOfTweets['meta']['next_token']
while 'next_token' in listOfTweets['meta']:
url='https://api.twitter.com/2/tweets/search/all?query='+query+'&start_time='+start_time+'&end_time='+end_time+'&max_results='+max_results+'&next_token='+next_token+'&expansions=author_id&tweet.fields=created_at'
response = requests.get(url,headers=header)
time.sleep(1)
listOfTweets = json.loads(response.content)
print('New Request on',url)
if 'data' in listOfTweets:
tweets = tweets.append(pd.DataFrame(listOfTweets['data']),ignore_index=True)
if 'meta' in listOfTweets:
if 'next_token' in listOfTweets['meta']:
next_token = listOfTweets['meta']['next_token']
else:
print('Done! Total of ', len(tweets), 'tweets collected.')
break
else:
break
else:
print('Missing request')
break
else:
tweets = pd.DataFrame(listOfTweets['data'])
print('Done! Total of', len(tweets), 'tweets collected.')
# -
twitterData.to_csv('./tweets.csv',index=False)
# # Text pre-processing
# +
import pandas as pd
#provaxxers
provaxxers = pd.read_csv('./datasets/provaxxersTweets.csv', low_memory=False)
#antivaxxers
antivaxxers = pd.read_csv('./datasets/antivaxxersTweets.csv', low_memory=False)
# +
import nltk
from nltk import tokenize
import numpy as np
from string import punctuation
import unidecode
stemmer = nltk.RSLPStemmer()
# Removendo hashtags, menções a usuários, numeros, termos curtos e links
def proccess_text(text):
twitterData = pd.DataFrame(text)
twitterData['processed_text'] = twitterData.text.str.replace(r'(http\S+)', '') \
.str.replace(r'@[\w]*', '') \
.str.replace(r'#[\w]*','')
textWords = ' '.join([text for text in twitterData.processed_text])
# Removendo acentuação
textWords = [unidecode.unidecode(text) for text in twitterData.processed_text ]
# Criando lista com palavras e caracteres (stopwords) a serem removidos do texto
stopWords = nltk.corpus.stopwords.words("portuguese")
# Separando a pontuação das palavras
punctSeparator = tokenize.WordPunctTokenizer()
punctuationList = list()
for punct in punctuation:
punctuationList.append(punct)
stopWords = punctuationList + stopWords
# Iterando o texto removendo as stopwords
trasnformedText = list()
for text in textWords:
newText = list()
text = text.lower()
textWords = punctSeparator.tokenize(text)
for words in textWords:
if words not in stopWords:
#newText.append(stemmer.stem(words))
newText.append(words)
trasnformedText.append(' '.join(newText))
twitterData.processed_text = trasnformedText
twitterData.processed_text = twitterData.processed_text.str.replace(r"[^a-zA-Z#]", " ")
return twitterData.processed_text
# +
tweets = {'created_at': tweets.created_at, 'text': tweets.text,'id':tweets.id}
rawTweets = pd.DataFrame(tweets)
rawTweets['processed_text'] = proccess_text(rawTweets.text)
processedTweets = rawTweets.drop(columns=["text"])
trasnformedText = list()
for phrase in processedTweets.processed_text:
newPhrase = list()
newPhrase.append(' '.join(phrase.split()))
for words in newPhrase:
trasnformedText.append(''.join(newPhrase))
processedTweets.processed_text = trasnformedText
index=[x for x in processedTweets.index if processedTweets.processed_text[x].count(' ') < 3]
processedTweets = processedTweets.drop(index)
removeEmpty = processedTweets.processed_text != ' '
processedTweets = processedTweets[removeEmpty]
processedTweets.reset_index(inplace=True)
tweets = {'created_at': processedTweets.created_at, 'text': processedTweets.processed_text, 'id':processedTweets.id}
docs = pd.DataFrame(tweets)
docs = docs.sort_values(['created_at']).reset_index()
docs = docs.drop(columns=["index"])
#docs.to_csv('./datasets/kdmile/provaxxers.csv',index=False)
# -
| GetTwitter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 251} colab_type="code" id="5_Xm1E_kf5FQ" outputId="f9c547c4-7834-451a-a4d0-6b5e7d8c8f0f"
# code by <NAME>(<NAME>) @graykode
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
dtype = torch.FloatTensor
# S: Symbol that shows starting of decoding input
# E: Symbol that shows starting of decoding output
# P: Symbol that will fill in blank sequence if current batch data size is short than time steps
char_arr = [c for c in 'SEPabcdefghijklmnopqrstuvwxyz']
num_dic = {n: i for i, n in enumerate(char_arr)}
print(num_dic)
seq_data = [['man', 'women'], ['black', 'white'], ['king', 'queen'], ['girl', 'boy'], ['up', 'down'], ['high', 'low']]
# task: translate
# + colab={"base_uri": "https://localhost:8080/", "height": 251} colab_type="code" id="5_Xm1E_kf5FQ" outputId="f9c547c4-7834-451a-a4d0-6b5e7d8c8f0f"
# Seq2Seq Parameter
n_step = 5
n_hidden = 128
n_class = len(num_dic)
batch_size = len(seq_data)
# + colab={"base_uri": "https://localhost:8080/", "height": 251} colab_type="code" id="5_Xm1E_kf5FQ" outputId="f9c547c4-7834-451a-a4d0-6b5e7d8c8f0f"
def make_batch(seq_data):
input_batch, output_batch, target_batch = [], [], []
for seq in seq_data:
for i in range(2):
seq[i] = seq[i] + 'P' * (n_step - len(seq[i]))
input = [num_dic[n] for n in seq[0]]
output = [num_dic[n] for n in ('S' + seq[1])]
target = [num_dic[n] for n in (seq[1] + 'E')]
print(seq[0],('S' + seq[1]),(seq[1] + 'E'))
input_batch.append(np.eye(n_class)[input])
output_batch.append(np.eye(n_class)[output])
target_batch.append(target) # not one-hot
# make tensor
return Variable(torch.Tensor(input_batch)), Variable(torch.Tensor(output_batch)), Variable(torch.LongTensor(target_batch))
# -
input_batch, output_batch, target_batch = make_batch(seq_data)
# + colab={"base_uri": "https://localhost:8080/", "height": 251} colab_type="code" id="5_Xm1E_kf5FQ" outputId="f9c547c4-7834-451a-a4d0-6b5e7d8c8f0f"
# Model
class Seq2Seq(nn.Module):
def __init__(self):
super(Seq2Seq, self).__init__()
self.enc_cell = nn.RNN(input_size=n_class, hidden_size=n_hidden, dropout=0.5)
self.dec_cell = nn.RNN(input_size=n_class, hidden_size=n_hidden, dropout=0.5)
self.fc = nn.Linear(n_hidden, n_class)
def forward(self, enc_input, enc_hidden, dec_input):
enc_input = enc_input.transpose(0, 1) # enc_input: [max_len(=n_step, time step), batch_size, n_hidden]
dec_input = dec_input.transpose(0, 1) # dec_input: [max_len(=n_step, time step), batch_size, n_hidden]
# enc_states : [num_layers(=1) * num_directions(=1), batch_size, n_hidden]
_, enc_states = self.enc_cell(enc_input, enc_hidden)
# outputs : [max_len+1(=6), batch_size, num_directions(=1) * n_hidden(=128)]
outputs, _ = self.dec_cell(dec_input, enc_states)
model = self.fc(outputs) # model : [max_len+1(=6), batch_size, n_class]
return model
# + colab={"base_uri": "https://localhost:8080/", "height": 251} colab_type="code" id="5_Xm1E_kf5FQ" outputId="f9c547c4-7834-451a-a4d0-6b5e7d8c8f0f"
model = Seq2Seq()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in range(5000):
# make hidden shape [num_layers * num_directions, batch_size, n_hidden]
hidden = Variable(torch.zeros(1, batch_size, n_hidden))
optimizer.zero_grad()
# input_batch : [batch_size, max_len(=n_step, time step), n_hidden]
# output_batch : [batch_size, max_len+1(=n_step, time step) (becase of 'S' or 'E'), n_hidden]
# target_batch : [batch_size, max_len+1(=n_step, time step)], not one-hot
output = model(input_batch, hidden, output_batch)
# output : [max_len+1, batch_size, num_directions(=1) * n_hidden]
output = output.transpose(0, 1) # [batch_size, max_len+1(=6), num_directions(=1) * n_hidden]
loss = 0
for i in range(0, len(target_batch)):
# output[i] : [max_len+1, num_directions(=1) * n_hidden, target_batch[i] : max_len+1]
loss += criterion(output[i], target_batch[i])
if (epoch + 1) % 1000 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
loss.backward()
optimizer.step()
# + colab={"base_uri": "https://localhost:8080/", "height": 251} colab_type="code" id="5_Xm1E_kf5FQ" outputId="f9c547c4-7834-451a-a4d0-6b5e7d8c8f0f"
# Test
def translate(word):
input_batch, output_batch, _ = make_batch([[word, 'P' * len(word)]])
# make hidden shape [num_layers * num_directions, batch_size, n_hidden]
hidden = Variable(torch.zeros(1, 1, n_hidden))
output = model(input_batch, hidden, output_batch)
# output : [max_len+1(=6), batch_size(=1), n_class]
predict = output.data.max(2, keepdim=True)[1] # select n_class dimension
decoded = [char_arr[i] for i in predict]
end = decoded.index('E')
translated = ''.join(decoded[:end])
return translated.replace('P', '')
# + colab={"base_uri": "https://localhost:8080/", "height": 251} colab_type="code" id="5_Xm1E_kf5FQ" outputId="f9c547c4-7834-451a-a4d0-6b5e7d8c8f0f"
print('test')
print('mann ->', translate('mann'))
print('mans ->', translate('mans'))
print('kang ->', translate('kang'))
print('black ->', translate('black'))
print('upp ->', translate('upp'))
| 4-1.Seq2Seq/Seq2Seq_Torch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.9 64-bit
# name: python3
# ---
# + [markdown] colab_type="text" id="OE4xjKWgtIX2" cell_id="00000-a21c7b2e-3ec4-42ae-ad36-1236eeaeb8dc" deepnote_cell_type="markdown"
# # Solving for static equilibrium
# This notebook will help you assess in simulation which of the sphere configurations in the problem represent configurations at equilibrium and which. **You do not need to turn in this notebook, and there is no autograded component.** It is just to help you build intuition, show you how to use Drake for problems like this, and check your answers!
#
# ## Imports and function definitions
# + colab={} colab_type="code" id="KdXAerwV13rQ" cell_id="00001-ca672b35-7279-4dab-9dd0-6ab261e18cc8" deepnote_to_be_reexecuted=false source_hash="a55838a8" execution_start=1633609324746 execution_millis=0 output_cleared=false deepnote_cell_type="code"
# python libraries
import numpy as np
import pydrake
from pydrake.all import (
AddMultibodyPlantSceneGraph, DiagramBuilder, FixedOffsetFrame, MeshcatVisualizerCpp,
RigidTransform, RotationMatrix, Simulator, Solve, Sphere, StaticEquilibriumProblem
)
from manipulation.meshcat_cpp_utils import StartMeshcat
from manipulation.scenarios import AddShape
from manipulation import running_as_notebook
# + tags=[] cell_id="00002-ea0c2af3-ef76-4b74-a470-53d13808645c" deepnote_to_be_reexecuted=false source_hash="6d42057f" execution_start=1633609325171 execution_millis=181 deepnote_cell_type="code"
# Start the visualizer.
meshcat = StartMeshcat()
# + [markdown] tags=[] cell_id="00002-05b1e502-5c03-415c-8e3e-ef22b18ecfa9" deepnote_cell_type="markdown"
# ## Initialization
# + tags=[] cell_id="00006-6bd424de-1c38-46ba-8d68-ff1dede7916e" deepnote_to_be_reexecuted=false source_hash="3f5297f9" execution_start=1633609332544 execution_millis=0 deepnote_cell_type="code"
mu = 0.5
r = 0.3
m = 1
builder = DiagramBuilder()
plant, scene_graph = AddMultibodyPlantSceneGraph(builder, time_step=1e-4)
plant.set_name("plant")
world_offset_frame = pydrake.multibody.tree.FixedOffsetFrame(
"world_joint_frame",
plant.world_frame(),
RigidTransform(
RotationMatrix.MakeXRotation(np.pi/2),
[0, 0, 0]))
plant.AddFrame(world_offset_frame)
# Create the sphere bodies
spheres = []
sphere_joints = []
for i in range(3):
sphere_name = "sphere_{}".format(i)
color = [0, 0, 0, 1]
color[i] = 1
spheres.append(AddShape(plant, pydrake.geometry.Sphere(r), name=sphere_name, mass=m, mu=mu, color=color))
# Set up planar joint
sphere_joints.append(plant.AddJoint(pydrake.multibody.tree.PlanarJoint(
"sphere_{}_joint".format(i),
world_offset_frame,
plant.GetFrameByName(sphere_name))))
ground = AddShape(plant, pydrake.geometry.Box(10,10,2.0), name="ground", mu=mu)
plant.WeldFrames(plant.world_frame(), plant.GetFrameByName("ground"), RigidTransform(p=[0,0,-1.0]))
plant.Finalize()
visualizer = MeshcatVisualizerCpp.AddToBuilder(builder, scene_graph, meshcat)
diagram = builder.Build()
context = diagram.CreateDefaultContext()
plant_context = plant.GetMyMutableContextFromRoot(context)
# + [markdown] tags=[] cell_id="00010-2b1553e5-310a-49b1-a036-01094c34268e" deepnote_cell_type="markdown"
# # Using the plant
# This is the main of the notebook for you to edit. (The other spot is where the system parameters are defined near the top of the script.) There are three sections:
#
# 1. **Initializing your guess for a static equilibrium position**: You can specify the $xyz$ position of each of the sphere. (To answer the question, you'll want to make it match one of the configurations from the problem, but feel free to experiment/try others.)
# 2. **Computing the static equilibrium position**: The `StaticEquilibriumProblem` class allows us to automatically set up the optimization problem for static equilibrium for a given plant. We use this class to compute an actual equilibrium position.
# 3. **Simulating the plant.** Given a configuration for the system, simulate how it evolves over time.
# + [markdown] tags=[] cell_id="00011-ba8cf8f2-1453-423d-afc5-75d417fd2674" deepnote_cell_type="markdown"
# ## Initializing your guess for a static equilibrium position
# Specify the x and z of the center of mass of each of the spheres. (The spheres are fixed in the $xz$ plane, so that's all you have to specify.)
# + tags=[] cell_id="00012-386f5e93-c2de-4b3f-a5f8-d000166de3a4" deepnote_to_be_reexecuted=false source_hash="c29bf571" execution_start=1633609446283 execution_millis=0 deepnote_cell_type="code"
#########
# REPLACE WITH YOUR CODE
guesses = [
[0, r], # Red sphere xz
[2*r, r], # Green sphere xz
[4*r, r] # Blue sphere xz
]
#########
# + [markdown] tags=[] cell_id="00012-e686da53-f1d5-4c87-8c65-d9a83ce9f22a" deepnote_cell_type="markdown"
# ### Visualizing your guess
# Run the following cell to see your guess rendered in meshcat. **This does not check for static equilibrium or run any physics simulation,** but it will let you verify you've set your pose how you intended.
# + tags=[] cell_id="00010-17d4b544-4e9b-4b26-b15d-d1f78b12864d" deepnote_to_be_reexecuted=false source_hash="2c3e43ba" execution_start=1633609447006 execution_millis=0 deepnote_cell_type="code"
for i, guess in enumerate(guesses):
sphere_joints[i].set_translation(plant_context, guess)
diagram.Publish(context)
# + [markdown] tags=[] cell_id="00015-03a43957-700c-4f9a-9118-890d07c17399" deepnote_cell_type="markdown"
# ## Computing the static equilibrium position
# This cell computes a static equilibrium postion. If it's close to your original guess, then you initialized the system at equilibrium. If not, your guess is not an equilibrium.
# + tags=[] cell_id="00008-4e831255-59f3-4385-82e3-17eefcda5a0f" deepnote_to_be_reexecuted=false source_hash="614edfda" execution_start=1633609451706 execution_millis=12 deepnote_cell_type="code"
# The StaticEquilibriumProblem needs an "autodiff" version of the diagram/multibody plant to
# use gradient-based optimization.
autodiff_diagram = diagram.ToAutoDiffXd()
autodiff_context = autodiff_diagram.CreateDefaultContext()
autodiff_plant = autodiff_diagram.GetSubsystemByName("plant")
static_equilibrium_problem = StaticEquilibriumProblem(autodiff_plant, autodiff_plant.GetMyContextFromRoot(autodiff_context), set())
initial_guess = np.zeros(plant.num_positions())
for i, guess in enumerate(guesses):
initial_guess[3*i] = guess[0] # x
initial_guess[3*i+1] = guess[1] # z
static_equilibrium_problem.get_mutable_prog().SetInitialGuess(
static_equilibrium_problem.q_vars(), initial_guess)
result = Solve(static_equilibrium_problem.prog())
result.is_success()
q_sol = result.GetSolution(static_equilibrium_problem.q_vars())
for i, guess in enumerate(guesses):
print("Guess for position of {}:".format(i), guess, "\tEquilibrium position of sphere {}:".format(i), q_sol[3*i:3*i+2])
# The StaticEquilibriumProblem needs an "autodiff" version of the diagram/multibody plant to
# use gradient-based optimization.
autodiff_diagram = diagram.ToAutoDiffXd()
autodiff_context = autodiff_diagram.CreateDefaultContext()
autodiff_plant = autodiff_diagram.GetSubsystemByName("plant")
static_equilibrium_problem = StaticEquilibriumProblem(autodiff_plant, autodiff_plant.GetMyContextFromRoot(autodiff_context), set())
initial_guess = np.zeros(plant.num_positions())
for i, guess in enumerate(guesses):
initial_guess[3*i] = guess[0] # x
initial_guess[3*i+1] = guess[1] # z
static_equilibrium_problem.get_mutable_prog().SetInitialGuess(
static_equilibrium_problem.q_vars(), initial_guess)
result = Solve(static_equilibrium_problem.prog())
result.is_success()
q_sol = result.GetSolution(static_equilibrium_problem.q_vars())
for i, guess in enumerate(guesses):
print("Guess for position of {}:".format(i), guess, "\tEquilibrium position of sphere {}:".format(i), q_sol[3*i:3*i+2])
for wrench in static_equilibrium_problem.GetContactWrenchSolution(result):
print(f"Spatial force at world position {wrench.p_WCb_W} between {wrench.bodyA_index} and {wrench.bodyB_index}:")
print(f" translational: {wrench.F_Cb_W.translational()}")
print(f" rotational: {wrench.F_Cb_W.rotational()}")
# + [markdown] tags=[] cell_id="00018-3e976d88-1a6f-43cf-8ccb-a87d224ac961" deepnote_cell_type="markdown"
# ### Visualizing the solution configuration
# This doesn't yet run the dynamics for the system (so the objects won't move), but it *will* update their poses to match the results of `StaticEquilibriumProblem`.
# + tags=[] cell_id="00015-814bd494-f4e6-49b6-b394-f2d9a2a3b0bf" deepnote_to_be_reexecuted=false source_hash="fdff2164" execution_start=1633609459704 execution_millis=1 deepnote_cell_type="code"
plant.SetPositions(plant_context, q_sol)
diagram.Publish(context)
# + [markdown] tags=[] cell_id="00019-c6b4c5e2-5448-4d54-9cf7-1d5702e4f499" deepnote_cell_type="markdown"
# ## Simulating the solution
#
# You may see simulations of the static equilibrium that result in the spheres moving. Why is that?
#
# Keep in mind that
# - A static equilibrium might not be a *stable* equilibrium. States close to the equilibrium might diverge.
# - The optimization solver satisfies the equations only up to a numerical tolerance.
# + tags=[] cell_id="00009-ccaf5755-99ec-472e-ab73-bfe84cccd788" deepnote_to_be_reexecuted=false source_hash="6371fa1b" execution_start=1633609302580 execution_millis=2 deepnote_cell_type="code"
simulator = Simulator(diagram)
plant.SetPositions(plant.GetMyContextFromRoot(simulator.get_mutable_context()), q_sol)
if running_as_notebook:
simulator.set_target_realtime_rate(1.0)
simulator.AdvanceTo(5.0);
else:
simulator.AdvanceTo(0.1);
# + tags=[] cell_id="00020-c97afac7-1a72-4bb6-8e9a-61b6f57050ee" deepnote_to_be_reexecuted=false source_hash="b623e53d" execution_start=1633609302587 execution_millis=3 deepnote_cell_type="code"
# + [markdown] tags=[] created_in_deepnote_cell=true deepnote_cell_type="markdown"
# <a style='text-decoration:none;line-height:16px;display:flex;color:#5B5B62;padding:10px;justify-content:end;' href='https://deepnote.com?utm_source=created-in-deepnote-cell&projectId=da179554-1a2d-4268-85aa-b1e5b071712b' target="_blank">
# <img alt='Created in deepnote.com' style='display:inline;max-height:16px;margin:0px;margin-right:7.5px;' src='data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iODBweCIgaGVpZ2h0PSI4MHB4IiB2aWV3Qm94PSIwIDAgODAgODAiIHZlcnNpb249IjEuMSIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayI+CiAgICA8IS0tIEdlbmVyYXRvcjogU2tldGNoIDU0LjEgKDc2NDkwKSAtIGh0dHBzOi8vc2tldGNoYXBwLmNvbSAtLT4KICAgIDx0aXRsZT5Hcm91cCAzPC90aXRsZT4KICAgIDxkZXNjPkNyZWF0ZWQgd2l0aCBTa2V0Y2guPC9kZXNjPgogICAgPGcgaWQ9IkxhbmRpbmciIHN0cm9rZT0ibm9uZSIgc3Ryb2tlLXdpZHRoPSIxIiBmaWxsPSJub25lIiBmaWxsLXJ1bGU9ImV2ZW5vZGQiPgogICAgICAgIDxnIGlkPSJBcnRib2FyZCIgdHJhbnNmb3JtPSJ0cmFuc2xhdGUoLTEyMzUuMDAwMDAwLCAtNzkuMDAwMDAwKSI+CiAgICAgICAgICAgIDxnIGlkPSJHcm91cC0zIiB0cmFuc2Zvcm09InRyYW5zbGF0ZSgxMjM1LjAwMDAwMCwgNzkuMDAwMDAwKSI+CiAgICAgICAgICAgICAgICA8cG9seWdvbiBpZD0iUGF0aC0yMCIgZmlsbD0iIzAyNjVCNCIgcG9pbnRzPSIyLjM3NjIzNzYyIDgwIDM4LjA0NzY2NjcgODAgNTcuODIxNzgyMiA3My44MDU3NTkyIDU3LjgyMTc4MjIgMzIuNzU5MjczOSAzOS4xNDAyMjc4IDMxLjY4MzE2ODMiPjwvcG9seWdvbj4KICAgICAgICAgICAgICAgIDxwYXRoIGQ9Ik0zNS4wMDc3MTgsODAgQzQyLjkwNjIwMDcsNzYuNDU0OTM1OCA0Ny41NjQ5MTY3LDcxLjU0MjI2NzEgNDguOTgzODY2LDY1LjI2MTk5MzkgQzUxLjExMjI4OTksNTUuODQxNTg0MiA0MS42NzcxNzk1LDQ5LjIxMjIyODQgMjUuNjIzOTg0Niw0OS4yMTIyMjg0IEMyNS40ODQ5Mjg5LDQ5LjEyNjg0NDggMjkuODI2MTI5Niw0My4yODM4MjQ4IDM4LjY0NzU4NjksMzEuNjgzMTY4MyBMNzIuODcxMjg3MSwzMi41NTQ0MjUgTDY1LjI4MDk3Myw2Ny42NzYzNDIxIEw1MS4xMTIyODk5LDc3LjM3NjE0NCBMMzUuMDA3NzE4LDgwIFoiIGlkPSJQYXRoLTIyIiBmaWxsPSIjMDAyODY4Ij48L3BhdGg+CiAgICAgICAgICAgICAgICA8cGF0aCBkPSJNMCwzNy43MzA0NDA1IEwyNy4xMTQ1MzcsMC4yNTcxMTE0MzYgQzYyLjM3MTUxMjMsLTEuOTkwNzE3MDEgODAsMTAuNTAwMzkyNyA4MCwzNy43MzA0NDA1IEM4MCw2NC45NjA0ODgyIDY0Ljc3NjUwMzgsNzkuMDUwMzQxNCAzNC4zMjk1MTEzLDgwIEM0Ny4wNTUzNDg5LDc3LjU2NzA4MDggNTMuNDE4MjY3Nyw3MC4zMTM2MTAzIDUzLjQxODI2NzcsNTguMjM5NTg4NSBDNTMuNDE4MjY3Nyw0MC4xMjg1NTU3IDM2LjMwMzk1NDQsMzcuNzMwNDQwNSAyNS4yMjc0MTcsMzcuNzMwNDQwNSBDMTcuODQzMDU4NiwzNy43MzA0NDA1IDkuNDMzOTE5NjYsMzcuNzMwNDQwNSAwLDM3LjczMDQ0MDUgWiIgaWQ9IlBhdGgtMTkiIGZpbGw9IiMzNzkzRUYiPjwvcGF0aD4KICAgICAgICAgICAgPC9nPgogICAgICAgIDwvZz4KICAgIDwvZz4KPC9zdmc+' > </img>
# Created in <span style='font-weight:600;margin-left:4px;'>Deepnote</span></a>
| exercises/clutter/static_equilibrium.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp dtm
# -
# # gensim.models.wrappers.DtmModel
#
# > Run DTM Model using 'gensim'
#
# Run DTM Model using 'gensim'.
import gensim
print('gensim: ',gensim.__version__)
import pyLDAvis
print('pyLDAvis: ',pyLDAvis.__version__)
import pandas as pd
print('pandas: ',pd.__version__)
# # %load_ext blackcellmagic
from dynamic_topic_modeling.sklearn_lda import *
affirmative = pd.read_csv("data/affirmative_modifed.csv")
affirmative.head()
series_slices = affirmative.groupby(["group"]).size().values
# regex https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.Series.str.replace.html
# reindex https://blog.csdn.net/songyunli1111/article/details/78953841
series_slices
stopwords = get_custom_stopwords("data/stopwords.txt", encoding='utf-8') # HIT停用词词典
max_df = 0.9 # 在超过这一比例的文档中出现的关键词(过于平凡),去除掉。
min_df = 5 # 在低于这一数量的文档中出现的关键词(过于独特),去除掉。
n_features = 1000 # 最大提取特征数量
n_top_words = 20 # 显示主题下关键词的时候,显示多少个
col_content = "text" # 说明其中的文本信息所在列名称
# +
#export
import jieba
def build_docs(text):
raw_documents = text.tolist()
# 参考 https://blog.csdn.net/kwame211/article/details/78963517
docs = [[word for word in jieba.cut(document, cut_all=True)] for document in raw_documents]
return docs
print('build_docs is depreciated, use `build_corpus`.')
build_corpus = build_docs
# -
corpus = build_corpus(affirmative['content'])
#export
from gensim.corpora import Dictionary
def build_dict(docs, no_below=5, no_above=0.9):
# 参考 https://radimrehurek.com/gensim/auto_examples/tutorials/run_lda.html#sphx-glr-auto-examples-tutorials-run-lda-py
# Create a dictionary representation of the documents.
dictionary = Dictionary(docs)
# Filter out words that occur less than 5 documents, or more than 90% of the documents.
dictionary.filter_extremes(no_below=no_below, no_above=no_above)
return dictionary
dictionary = build_dict(corpus, no_below=5, no_above=0.9)
# ```python
# TypeError: doc2bow expects an array of unicode tokens on input, not a single string
# ```
# - [x] list 化 https://blog.csdn.net/cg_amaz1ng/article/details/79567583
bow = [dictionary.doc2bow(doc) for doc in docs]
bow[0:1]
print('Number of unique tokens: %d' % len(dictionary))
print('Number of documents: %d' % len(corpus))
# +
from gensim.models.wrappers import DtmModel
# Set training parameters.
num_topics = (8,)
chunksize = (2000,)
passes = (20,)
iterations = (400,)
eval_every = None # Don't evaluate model perplexity, takes too much time.
# Make a index to word dictionary.
id2word = dictionary.id2token
# 参考 https://radimrehurek.com/gensim/models/wrappers/dtmmodel.html
# dtm-win64.exe
model = DtmModel(
"src/dtm-win64.exe",
corpus=bow,
id2word=id2word,
num_topics=num_topics,
time_slices=series_slices,
model="fixed",
)
# ~~time_slices=series_slices~~model='fixed' 可能会导致 influences_time = []
# 跑的很慢 time_slices=[1] * len(corpus)
# time_slices (list of int) – Sequence of timestamps.
# -
# 无监督学习,一些超参数调整可以自行实验。
# ```python
# model.influences_time[t][document][topic]
# ```
#
# 但是目的是这里可以拿到三个维度的,`model.gamma_`拿不到。
# `model.influences_time`是空的。
# +
# model.influences_time.__class__
# np.asarray(model.influences_time).shape
# 为什么没有给出正确的 shape 呢?
# -
# ## Word Evolution
# +
#export
# cite https://github.com/GSukr/dtmvisual
import pandas as pd
import matplotlib.pyplot as plt
def display_topic(timespans, num_topics, model, num_words = 10):
"""
:param timespans: number od timespans/periods
:param num_topics: number of topics
:param model: DTM trained model
:param num_words: number of words to display for the topicid at the time period
:return: Dataframe with corresponding weight for each top word in each topic of each period
"""
topicId, period, weight, word = [], [], [], []
for t in range(timespans):
for s in range (num_topics):
topics = model.show_topic(topicid=s, time=t, topn=num_words)
# num_words : int, optional
# DEPRECATED PARAMETER, use `topn` instead.
for i, (word_, w) in enumerate(topics):
topicId.append(s)
period.append(t)
weight.append(w)
word.append(word_)
return pd.DataFrame(list(zip(topicId, period, weight, word)), columns = ['topicId', 'period', 'word', 'weight'])
# -
model_df = display_topic(timespans=len(series_slices), num_topics=num_topics, model=model, num_words=10)
model_df.head()
model_df.to_csv("output/model_df.csv", index = False)
# +
# 参考 https://github.com/le-hoang-nhan/dynamic-topic-modeling
# print(model.show_topic(topicid=1, time=0, topn=10))
# print(model.show_topic(topicid=2, time=0, topn=10))
# **第一次**执行很慢
# -
# plot by ggplot2 using R
# 
# ## Topic Evolution
# +
# export
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
def document_influence_dim(num_topics, model, time_seq = []):
"""
function to compute the document influence on a topic: http://users.umiacs.umd.edu/~jbg/nips_tm_workshop/30.pdf
:param num_topics: number of topics
"""
doc, topicId, period, distributions=[], [], [], []
for topic in range(num_topics):
for t in range(len(time_seq)):
for document in range(time_seq[t]):
distribution = round(model.influences_time[t][document][topic], 4)
# print(len(model.influences_time))
# print(len(model.influences_time[0]))
# print(len(model.influences_time[0][0]))
# 确定好正确的顺序
period.append(t)
doc.append(document)
topicId.append(topic)
distributions.append(distribution)
return pd.DataFrame(list(zip(doc, topicId, period, distributions)), columns=['document','topicId', 'period','distribution'])
def topic_distribution(num_topics, model, time_seq = []):
"""
function to compute the topical distribution in a document
:param num_topics: number of topics
"""
doc, topicId, distributions=[], [], []
df_dim = document_influence_dim(num_topics = num_topics, model = model, time_seq = time_seq)
for document in range(0, sum(time_seq)):
for topic in range(0, num_topics):
distribution = round(model.gamma_[document][topic], 4)
doc.append(document)
topicId.append(topic)
distributions.append(distribution)
return pd.DataFrame(list(zip(doc, topicId, distributions, df_dim.period)), columns=['document','topicId', 'distribution', 'period'])
def visualize_topics(df):
"""
function to vizualise mean topic distribution over defined periods.
the topic distribution is defined by the average level by documents.
:param num_topics: number of topics
"""
fig, ax = plt.subplots(figsize=(30,10))
df.groupby(['period', 'topicId'], sort=False).mean()['distribution'].unstack().plot(ax=ax,grid=True, linewidth =3.0, sharex=True)
plt.ylabel("Topic Distribution", fontsize=16)
plt.xlabel("Period", fontsize=16)
plt.title("Topic evolution")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), title = "Topics", fontsize='large', labelspacing=0.6, fancybox = True)
# -
topic_df = topic_distribution(num_topics=num_topics, model=model, time_seq=series_slices)
topic_df.to_csv("output/topic_df.csv", index=False)
visualize_topics(topic_df)
# plot by ggplot2 using R
# 
# ## Misc
#Distance between documents: compare the documents across different time-frames and see how similar they are topic-wise
#considering document 0
doc = 0
print("doc = 0, model.gamma_[doc]",model.gamma_[doc])
# +
#The distance between documents based on their topic distribution: lower, more related
from gensim.matutils import hellinger
# considering document 4 and 5
doc1 = 4
doc2 = 5
hellinger(model.gamma_[doc1], model.gamma_[doc2])
# +
# import pyLDAvis
# doc_topic, topic_term, doc_lengths, term_frequency, vocab = model.dtm_vis(time=0, corpus=corpus)
# vis_wrapper = pyLDAvis.prepare(topic_term_dists=topic_term, doc_topic_dists=doc_topic, doc_lengths=doc_lengths, vocab=vocab, term_frequency=term_frequency)
# pyLDAvis.display(vis_wrapper)
# -
# ## Save
import pickle as pkl
with open("model/dtm.pkl", 'wb') as fp:
pkl.dump(model, fp)
with open("model/dtm.pkl", 'rb') as fp:
model0 = pkl.load(fp)
print(model0.__class__)
| dtm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content-dl/blob/main/tutorials/W2D3_ModernRecurrentNeuralNetworks/student/W2D3_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# # Tutorial 1: Modeling sequencies and encoding text
# **Week 2, Day 3: Modern RNNs**
#
# **By Neuromatch Academy**
#
# __Content creators:__ <NAME>, <NAME>, <NAME>
#
# __Content reviewers:__ <NAME>, <NAME>, <NAME>, <NAME>
#
# __Content editors:__ <NAME>, <NAME>
#
# __Production editors:__ <NAME>, <NAME>
#
# **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
#
# <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
# ----
# # Tutorial objectives
#
# Before we begin with exploring how RNNs excel at modelling sequences, we will explore some of the other ways we can model sequences, encode text, and make meaningful measurements using such encodings and embeddings.
# + cellView="form"
# @title Tutorial slides
# @markdown These are the slides for the videos in this tutorial
# @markdown If you want to locally download the slides, click [here](https://osf.io/n263c/download)
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/n263c/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
# -
# ---
# ## Setup
# + cellView="form"
# @title Install dependencies
# @markdown There may be `Errors`/`Warnings` reported during the installation. However, they are to be ignored.
# !pip install torchtext==0.4.0 --quiet
# !pip install --upgrade gensim --quiet
# !pip install unidecode --quiet
# !pip install hmmlearn --quiet
# !pip install fasttext --quiet
# !pip install nltk --quiet
# !pip install pandas --quiet
# !pip install python-Levenshtein --quiet
# !pip install git+https://github.com/NeuromatchAcademy/evaltools --quiet
from evaltools.airtable import AirtableForm
# generate airtable form
atform = AirtableForm('appn7VdPRseSoMXEG','W2D3_T1','https://portal.neuromatchacademy.org/api/redirect/to/9c55f6cb-cdf9-4429-ac1c-ec44fe64c303')
# +
# Imports
import time
import fasttext
import numpy as np
import pandas as pd
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.nn import functional as F
from hmmlearn import hmm
from scipy.sparse import dok_matrix
from torchtext import data, datasets
from torchtext.vocab import FastText
import nltk
from nltk import FreqDist
from nltk.corpus import brown
from nltk.tokenize import word_tokenize
from gensim.models import Word2Vec
from sklearn.manifold import TSNE
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm_notebook as tqdm
# + cellView="form"
# @title Figure Settings
import ipywidgets as widgets
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/content-creation/main/nma.mplstyle")
# + cellView="form"
# @title Load Dataset from `nltk`
# no critical warnings, so we supress it
import warnings
warnings.simplefilter("ignore")
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
nltk.download('brown')
nltk.download('webtext')
# + cellView="form"
# @title Helper functions
import requests
def cosine_similarity(vec_a, vec_b):
"""Compute cosine similarity between vec_a and vec_b"""
return np.dot(vec_a, vec_b) / (np.linalg.norm(vec_a) * np.linalg.norm(vec_b))
def tokenize(sentences):
#Tokenize the sentence
#from nltk.tokenize library use word_tokenize
token = word_tokenize(sentences)
return token
def plot_train_val(x, train, val, train_label, val_label, title, y_label,
color):
plt.plot(x, train, label=train_label, color=color)
plt.plot(x, val, label=val_label, color=color, linestyle='--')
plt.legend(loc='lower right')
plt.xlabel('epoch')
plt.ylabel(y_label)
plt.title(title)
def load_dataset(emb_vectors, sentence_length=50, seed=522):
TEXT = data.Field(sequential=True,
tokenize=tokenize,
lower=True,
include_lengths=True,
batch_first=True,
fix_length=sentence_length)
LABEL = data.LabelField(dtype=torch.float)
train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)
TEXT.build_vocab(train_data, vectors=emb_vectors)
LABEL.build_vocab(train_data)
train_data, valid_data = train_data.split(split_ratio=0.7,
random_state=random.seed(seed))
train_iter, valid_iter, test_iter = data.BucketIterator.splits((train_data,
valid_data,
test_data),
batch_size=32,
sort_key=lambda x: len(x.text),
repeat=False,
shuffle=True)
vocab_size = len(TEXT.vocab)
print(f'Data are loaded. sentence length: {sentence_length} '
f'seed: {seed}')
return TEXT, vocab_size, train_iter, valid_iter, test_iter
def download_file_from_google_drive(id, destination):
URL = "https://docs.google.com/uc?export=download"
session = requests.Session()
response = session.get(URL, params={ 'id': id }, stream=True)
token = get_confirm_token(response)
if token:
params = { 'id': id, 'confirm': token }
response = session.get(URL, params=params, stream=True)
save_response_content(response, destination)
def get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
# + cellView="form"
# @title Set random seed
# @markdown Executing `set_seed(seed=seed)` you are setting the seed
# for DL its critical to set the random seed so that students can have a
# baseline to compare their results to expected results.
# Read more here: https://pytorch.org/docs/stable/notes/randomness.html
# Call `set_seed` function in the exercises to ensure reproducibility.
import random
import torch
def set_seed(seed=None, seed_torch=True):
if seed is None:
seed = np.random.choice(2 ** 32)
random.seed(seed)
np.random.seed(seed)
if seed_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
print(f'Random seed {seed} has been set.')
# In case that `DataLoader` is used
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
# + cellView="form"
# @title Set device (GPU or CPU). Execute `set_device()`
# inform the user if the notebook uses GPU or CPU.
def set_device():
device = "cuda" if torch.cuda.is_available() else "cpu"
if device != "cuda":
print("WARNING: For this notebook to perform best, "
"if possible, in the menu under `Runtime` -> "
"`Change runtime type.` select `GPU` ")
else:
print("GPU is enabled in this notebook.")
return device
# -
DEVICE = set_device()
SEED = 2021
set_seed(seed=SEED)
# ---
# # Section 1: Sequences, Markov Chains & HMMs
#
# *Time estimate: ~45mins*
# + cellView="form"
# @title Video 1: Sequences & Markov Processes
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1jg411774B", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"ApkE7UFaJAQ", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 1: Sequences & Markov Processes')
display(out)
# -
#
#
# In this notebook we will be exploring the world of sequences - thinking of what kind of data can be thought of as sequences, and how these sequences can be represented as Markov Chains and Hidden Markov Models. These ideas and methods were an important part of natural language processing and language modelling, and serve as a useful way to ground ourselves before we dive into neural network methods.
#
# ## Why is this relevant? How are these sequences related to modern recurrent neural networks?
#
# Like we mentioned before, the notion of modelling sequences of data - in this particular case, **language**, is an ideal place to start. RNNs themselves were constructed keeping in mind sequences, and the ability to temporally model sequences is what inspired RNNs (and the family of LSTM, GRUs - we will see this in the next notebook).
#
# Markov models and hidden markov models serve as an introduction to these concepts because they were some of the earliest ways to think about sequences. They do not capture a lot of the complexity that RNNs excel at, but are an useful way of thinking of sequences, probabilities, and how we can use these concepts to perform tasks such as text generation, or classification - tasks that RNNs excel at today.
#
# Think of this section as an introduction to thinking with sequences and text data, and as a historical introduction to the world of modelling sequential data.
# ## Section 1.1: What data are sequences?
#
# Native Sequences:
#
# - Temporally occurring events (e.g., history, stock prices)
# - Temporally processed events (e.g., communication)
# - Topologically connected components (e.g., polymers, peptides)
#
# Synthetic Sequences:
#
# - Anything processed as a sequence (e.g., scanned pixels in an image)
#
# Sequences can be represented as a Markov Process - since this notion of sequential data is intrinsically linked to RNNs, it is a good place for us to start, and natural language (text!) will be our sequence of choice.
#
# We will be using the Brown corpus which comes loaded with NLTK, and using the entire corpus - this requires a lot of RAM for some of the methods, so we recommend using a smaller subset of categories if you do not have enough RAM.
#
# We will be using some of the code from this [tutorial](https://www.kdnuggets.com/2019/11/markov-chains-train-text-generation.html) and this [Jupyter notebook](https://github.com/StrikingLoo/ASOIAF-Markov/blob/master/ASOIAF.ipynb)
#
# The first few cells of code all involve set-up; some of this code will be hidden because they are not necessary to understand the ideas of markov models, but the way data is setup can be vital to the way the model performs (something in common with neural network models!).
#
# Let us start with loading our corpus.
#
#
category = ['editorial', 'fiction', 'government', 'news', 'religion']
sentences = brown.sents(categories=category)
# Now that we have our sentences, let us look at some statistics to get an idea of what we are dealing with.
lengths = [len(sentence) for sentence in sentences]
lengths = pd.Series(lengths)
# Find the 80-th percentile: the minimal length of such a sentence, which is longer than at least 80% of sentences in the *Brown corpus*.
lengths.quantile(.8)
lengths.describe()
sentences[0:2]
# This gives us an idea of what our dataset looks like, along with some average lengths. This kind of quick data exploration can be very useful - we know how long different sequences are, and how we might want to collect these words.
# Since we will be modelling words as sequences in sentences, let us first collect all the words in our corpus.
corpus_words = []
for sentence in sentences:
for word in sentence:
if "''" not in word and "``" not in word:
corpus_words.append(word)
print(f"Corpus length: {len(corpus_words)}")
corpus_words[0:20]
# We'll now get distinct (unique) words and create a matrix to represent all these words. This is necessary because we will be using this matrix to look at the probability of the words in sequences.
# + cellView="form"
# @title Creating Matrices and Distinct Words
distinct_words = list(set(corpus_words))
word_idx_dict = {word: i for i, word in enumerate(distinct_words)}
distinct_words_count = len(list(set(corpus_words)))
next_word_matrix = np.zeros([distinct_words_count, distinct_words_count])
# -
print("Number of distinct words: " + str(distinct_words_count))
# In the following lines of code we are populating the matrix that tracks the next word in a sentence.
# + cellView="form"
# @title Populating Matric that tracks next word
for i, word in enumerate(corpus_words[:-1]):
first_word_idx = word_idx_dict[word]
next_word_idx = word_idx_dict[corpus_words[i+1]]
next_word_matrix[first_word_idx][next_word_idx] +=1
# -
# Now we have the information ready to construct a markov chain. The next word matrix is crucial in this, as it allows us to go from one word in the sequence to the next. We will soon see how this is used.
# ## Section 1.2: What is a Markov Chain or Model?
#
# A Markov Chain (or Model) is a:
# - stochastic model describing a sequence of possible events
# - the probability of each event depends only on the state attained in the previous event.
# - a countably infinite sequence, in which the chain moves state at discrete time steps, gives a discrete-time Markov chain (DTMC) [vs. a continuous-time process or CTMC].
# - The classic formal language model is a Markov Model
#
# *Helpful explanations from [eric mjl's tutorial](https://ericmjl.github.io/essays-on-data-science/machine-learning/markov-models/#non-autoregressive-homoskedastic-emissions)*!
#
#
#
# The simplest Markov models assume that we have a _system_ that contains a finite set of states,
# and that the _system_ transitions between these states with some probability at each time step $t$,
# thus generating a sequence of states over time.
# Let's call these states $S$, where
#
# \begin{equation}
# S = \{s_1, s_2, ..., s_n\}
# \end{equation}
#
# To keep things simple, let's start with three states:
#
# \begin{equation}
# S = \{s_1, s_2, s_3\}
# \end{equation}
#
# A Markov model generates a sequence of states, with one possible realization being:
#
# \begin{equation}
# \{s_1, s_1, s_1, s_3, s_3, s_3, s_2, s_2, s_3, s_3, s_3, s_3, s_1, ...\}
# \end{equation}
#
# And generically, we represent it as a sequence of states $x_t, x_{t+1}... x_{t+n}$. (We have chosen a different symbol to not confuse the "generic" state with the specific realization. Graphically, a plain and simple Markov model looks like the following:
#
# <center><img src="https://raw.githubusercontent.com/NeuromatchAcademy/course-content-dl/main/tutorials/W2D3_ModernRecurrentNeuralNetworks/static/cell_chain.png" width="500"/></center>
# ### Modelling transitions between states
#
# To know how a system transitions between states, we now need a **transition matrix**.
#
# The transition matrix describes the probability of transitioning from one state to another (The probability of staying in the same state is semantically equivalent to transitioning to the same state).
#
# By convention, transition matrix rows correspond to the state at time $t$,
# while columns correspond to state at time $t+1$.
# Hence, row probabilities sum to one, because the probability of transitioning to the next state depends on only the current state, and all possible states are known and enumerated.
#
# Let's call the transition matrix $P_{transition}$:
#
# \begin{equation}
# P_{transition} =
# \begin{pmatrix}
# p_{11} & p_{12} & p_{13} \\
# p_{21} & p_{22} & p_{23} \\
# p_{31} & p_{32} & p_{33} \\
# \end{pmatrix}
# \end{equation}
#
# Using the transition matrix, we can express different behaviors of the system. For example:
# 1. by assigning larger probability mass to the diagonals, we can express that the system likes to stay in the current state;
# 2. by assigning larger probability mass to the off-diagonal, we can express that the system likes to transition out of its current state.
#
# In our case, this matrix is created by measuring how often one word appeared after another.
# + cellView="form"
# @title Function for most likely word
def most_likely_word_after(word):
# we check for the word most likely to occur using the matrix
most_likely = next_word_matrix[word_idx_dict[word]].argmax()
return distinct_words[most_likely]
# -
# Using our most likely word function, we can begin to create chains of words and create sequences. In the code below we create a naive chain that simply choses the most likely word.
# + cellView="form"
# @title Function for building Naive Chain
def naive_chain(word, length=15):
current_word = word
sentence = word
# we now build a naive chain by picking up the most likely word
for _ in range(length):
sentence += ' '
next_word = most_likely_word_after(current_word)
sentence += next_word
current_word = next_word
return sentence
# -
# Let us now use this naive chain to see what comes up, using some simple words.
print(naive_chain('the'))
print(naive_chain('I'))
print(naive_chain('What'))
print(naive_chain('park'))
# We notice that after the word `the`, `United States` comes up each time. All the other sequencies starting from other words also end up at `the` quite often. Since we use a *deterministic* markov chain model, its next state only depends on the previous one. Therefore, once the sequence comes to `the`, it inevitably continues the sequence with the `United States`.
# We can now be a little more sophisticated, and return words in a sequence using a *weighted choice*, which randomly selects the next word from a set of words with some probability (weight).
# + cellView="form"
# @title Function for weighted choice
def weighted_choice(objects, weights):
"""
Returns randomly an element from the sequence of 'objects',
the likelihood of the objects is weighted according
to the sequence of 'weights', i.e. percentages.
"""
weights = np.array(weights, dtype=np.float64)
sum_of_weights = weights.sum()
# standardization:
np.multiply(weights, 1 / sum_of_weights)
weights = weights.cumsum()
x = random.random()
for i in range(len(weights)):
if x < weights[i]:
return objects[i]
# + cellView="form"
# @title Function for sampling next word with weights
def sample_next_word_after(word, alpha=0):
next_word_vector = next_word_matrix[word_idx_dict[word]] + alpha
likelihoods = next_word_vector/next_word_vector.sum()
return weighted_choice(distinct_words, likelihoods)
# -
sample_next_word_after('The')
sample_next_word_after('The')
# There! We don't see the same word twice, because of the added randomisation (i.e., stochasticity). Our algorithm calculates how likely it is to find a certain word after a given word (`The` in this case) in the corpus, and then generates 1 sample of the next word with a matching probability.
#
# In this example, we generated only one next word. Now, using this function, we'll build a chain.
# + cellView="form"
# @title Function for a stochastic chain using weighted choice
def stochastic_chain(word, length=15):
current_word = word
sentence = word
for _ in range(length):
sentence += ' '
next_word = sample_next_word_after(current_word)
sentence += next_word
current_word = next_word
return sentence
# -
stochastic_chain('Hospital')
# Neat - we can create stochastic chains for a single word. For a more effective language model, we would want to model sets of words - in the following cells, we create sets of words to predict a chain after a sequence.
k = 3
# + cellView="form"
def sequences_matrices(k=3):
# @title Code to build sets of words for more realistic sequences
sets_of_k_words = [' '.join(corpus_words[i:i+k]) for i, _ in enumerate(corpus_words[:-k])]
sets_count = len(list(set(sets_of_k_words)))
next_after_k_words_matrix = dok_matrix((sets_count, len(distinct_words)))
distinct_sets_of_k_words = list(set(sets_of_k_words))
k_words_idx_dict = {word: i for i, word in enumerate(distinct_sets_of_k_words)}
distinct_k_words_count = len(list(set(sets_of_k_words)))
for i, word in tqdm(enumerate(sets_of_k_words[:-k])):
word_sequence_idx = k_words_idx_dict[word]
next_word_idx = word_idx_dict[corpus_words[i+k]]
next_after_k_words_matrix[word_sequence_idx, next_word_idx] += 1
return k_words_idx_dict,distinct_sets_of_k_words,next_after_k_words_matrix
k_words_idx_dict, distinct_sets_of_k_words, next_after_k_words_matrix = sequences_matrices(k=k)
# -
# Let's have a look at what that bit of code did.
distinct_sets_of_k_words[:10]
# Great! Now we are going to create a transition matrix for the sets of words.
# + cellView="form"
# @title Code to populate matrix of sets of words
for i, word in tqdm(enumerate(distinct_sets_of_k_words[:-k])):
word_sequence_idx = k_words_idx_dict[word]
next_word_idx = word_idx_dict[corpus_words[i+k]]
next_after_k_words_matrix[word_sequence_idx, next_word_idx] += 1
# -
# We now have what we need to build a stochastic chain over a `K` set of words.
# + cellView="form"
# @title Function for stochastic Chain for sets of words
def stochastic_chain_sequence(words, chain_length=15, k=2):
current_words = words.split(' ')
if len(current_words) != k:
raise ValueError(f'wrong number of words, expected {k}')
sentence = words
# pre-calculate seq embedding + transition matrix for a given k
matrices = sequences_matrices(k=k)
for _ in range(chain_length):
sentence += ' '
next_word = sample_next_word_after_sequence(matrices,' '.join(current_words))
sentence += next_word
current_words = current_words[1:]+[next_word]
return sentence
# + cellView="form"
# @title Function to sample next word after sequence
def sample_next_word_after_sequence(matrices, word_sequence, alpha=0):
# unpack a tuple of matrices
k_words_idx_dict,distinct_sets_of_k_words, next_after_k_words_matrix = matrices
next_word_vector = next_after_k_words_matrix[k_words_idx_dict[word_sequence]] + alpha
likelihoods = next_word_vector/next_word_vector.sum()
return weighted_choice(distinct_words, likelihoods.toarray())
# -
stochastic_chain_sequence('Judges under the', chain_length=3, k=3)
# Great! This sentence was created using two of the techniques we recently saw - creating sets of words, and using a weighted average stochastic chain. Both of these methods contributed in making it a more meaningful sequence of words. Some of these notions are also captured by Recurrent Neural Networks!
# ### Think! 1.2: How does changing parameters affect the generated sentences?
#
# Try and use a set of words but using a naive chain, and try a stochastic chain with a low value of k (i.e., 2), and a higher value (i.e., 5). How do these different configurations change the quality of the sequences produced? Below you have sample code to try these out.
#
# ```python
# stochastic_chain_sequence(..., chain_length=..., k=...)
# ```
#
# You should be able to use these matrices and the previous functions to be able to create the necessary configurations.
# + cellView="form"
# @title Student Response
from ipywidgets import widgets
text=widgets.Textarea(
value='Type your answer here and click on `Submit!`',
placeholder='Type something',
description='',
disabled=False
)
button = widgets.Button(description="Submit!")
display(text,button)
def on_button_clicked(b):
atform.add_answer('q1', text.value)
print("Submission successful!")
button.on_click(on_button_clicked)
# -
# ## Section 1.3: What is a Hidden Markov Model?
#
# A 1960s advance (by <NAME> and colleagues): Hidden Markov Models are:
# - a Markov model in which the system modeled is assumed to be a Markov process/chain with unobservable ("hidden") states.
# - HMM assumes there is another surrogate process whose behavior "depends" on the state--you learn about the state by observing the surrogate process.
# - HMMs have successfully been applied in fields where the goal is to recover a data sequence not immediately observable (but other data that depend on the sequence are).
# - The first dominant application: Speech and text processing (1970s)
#
# In this sub-section we will use the python library [hmmlearn](https://hmmlearn.readthedocs.io/en/latest/tutorial.html#training-hmm-parameters-and-inferring-the-hidden-states), which is part of the *scikit-learn* ecosystem. [nlg-with-hmmlearn](https://github.com/mfilej/nlg-with-hmmlearn) offers useful code snippets to adapt ```hmmlearn``` for text data. Because we are using a package that offers many out of the box implementations for HMMs, we don't have to worry about the states, transition matrices,
# +
# load the data
sentences = brown.sents(categories=category)
words = [word.lower() for sentence in sentences for word in sentence]
lengths = [len(sentence) for sentence in sentences]
alphabet = set(words)
# Encode words
le = LabelEncoder()
_ = le.fit(list(alphabet))
# Find word freqeuncies
seq = le.transform(words)
features = np.fromiter(seq, np.int64)
features = np.atleast_2d(features).T
fd = FreqDist(seq)
# -
# Now that we have our data setup, we can create our model. We use a multinomial HMM with 8 states, and can either do a random initialisation or use word frequences. We recommend trying both options!
# + cellView="form"
# @title Function to create default Multinomial HMM model
def get_model(num_states):
print("Initial parameter estimation using built-in method")
model = hmm.MultinomialHMM(n_components=num_states, init_params='ste')
return model
# + cellView="form"
# @title Function to create default Multinomial HMM model information of relative frequencies of words
def frequencies(num_states):
print("Initial parameter estimation using relative frequencies")
frequencies = np.fromiter((fd.freq(i) for i in range(len(alphabet))),
dtype=np.float64)
emission_prob = np.stack([frequencies]*num_states)
model = hmm.MultinomialHMM(n_components=num_states, init_params='st')
model.emissionprob_ = emission_prob
return model
print(frequencies(2))
# -
# **Note**:
#
# The following lines of code are commented out because they take a long time (~17 mins for default Brown corpus categories).
#
# If you do not have that time, you can download the default model to try to generate text. You have to uncomment the appropriate lines.
#
# **Note:** Either you may want to uncomment Line 11 or Line 14, not both, as the output variable `model` will be overwritten.
# +
## Fitting a default multinomial HMM. This is lengthy (~17 mins)
def run_model(features, length, num_states):
model = get_model(num_states)
model = model.fit(features, lengths)
return model
num_states = 8
## Uncomment, if you have time!
# model = run_model(features, lengths, num_states)
## Another way to get a model is to use default frequencies when initialising the model
# model = frequencies(num_states)
# -
# Alternatively, you could use a saved model. Here is a [link](https://drive.google.com/file/d/1IymcmcO48V6q3x-6dhf7-OU5NByo5W2F/view?usp=sharing) to the default model, which you can download and then upload into Colab.
# + cellView="form"
# @markdown Execute this cell to download the saved model.
import pickle
url = "https://osf.io/5k6cs/download"
r = requests.get(url)
with open('model_w2d3_t1.pkl', 'wb') as fd:
fd.write(r.content)
# Load the pickle file
with open("model_w2d3_t1.pkl", "rb") as file:
model = pickle.load(file)
# + cellView="form"
# @title Function to generate words given a hmm model
def generate_text(model, num_lines = 5, random_len=15):
for _i in range(num_lines):
set_seed(_i)
symbols, _states = model.sample(random_len)
output = le.inverse_transform(np.squeeze(symbols))
for word in output:
print(word, end=" ")
print()
# -
generate_text(model, num_lines=2, random_len=20)
# We see that a hidden markov model also does well in generating text. We encourage you to try out different initialisations and hyperparameters to see how the model does.
# ### Exercise 1.3: Transition probabilities
#
#
# We have seen how we can use sequences of text to form probability chains, as well as how we can use out of the box models to generate text. In this exercise, you will be using your own data to generate sequences using ```hmmlearn``` or any other implementation of a markov model. Explore the transition probabilities in your corpus and generate sentences. For example, one such exploration can be - how does using a model with the word frequencies incorporated in compare to using a default model?
#
# Perform any one such comparison or exploration, and generate 3 sentences or 50 words using your model. You should be able to use all the existing functions defined for this exercise.
# +
# load your own dataset and create a model using the frequencies based HMM model!
# -
# ### Useful links for Markov Models and HMM:
#
# Here are some useful links if you wish to explore this topic further.
#
# - [Markov Chain Text](https://towardsdatascience.com/simulating-text-with-markov-chains-in-python-1a27e6d13fc6)
#
# - [Python QuantEcon: Finite Markov Chains with Finance](https://python.quantecon.org/finite_markov.html)
#
# - [Markov Models from the ground up, with python](https://ericmjl.github.io/essays-on-data-science/machine-learning/markov-models/)
#
# - [GenTex](https://github.com/nareshkumar66675/GenTex)
#
# - [HMM learn](https://hmmlearn.readthedocs.io/en/latest/tutorial.html)
# ---
# # Section 2: Word Embeddings
#
# *Time estimate: ~60mins*
# + cellView="form"
# @title Video 2: Textual Dimension Reduction
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1oM4y1P7Mn", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"kweySXAZ1os", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 2: Textual Dimension Reduction')
display(out)
# -
#
# Words or subword units such as morphemes are the basic units that we use to express meaning in language. The technique of mapping words to vectors of real numbers is known as word embedding.
#
# Word2vec is based on theories of distributional semantics - words that appear around each other are more likely to mean similar things than words that do not appear around each other. Keeping this in mind, our job is to create a high dimensional space where these semantic relations are preserved. The innovation in word2vec is the realisation that we can use unlabelled, running text in sentences as inputs for a supervised learning algorithm--as a self-supervision task. It is supervised because we use the words in a sentence to serve as positive and negative examples. Let’s break this down:
#
# ... "use the kitchen knife to chop the vegetables"…
#
# **C1 C2 C3 T C4 C5 C6 C7**
#
# Here, the target word is knife, and the context words are the ones in its immediate (6-word) window.
# The first word2vec method we’ll see is called skipgram, where the task is to assign a probability for how likely it is that the context window appears around the target word. In the training process, positive examples are samples of words and their context words, and negative examples are created by sampling from pairs of words that do not appear nearby one another.
#
# This method of implementing word2vec is called skipgram with negative sampling. So while the algorithm tries to better learn which context words are likely to appear around a target word, it ends up pushing the embedded representations for every word so that they are located optimally (e.g., with minimal semantic distortion). In this process of adjusting embedding values, the algorithm brings semantically similar words close together in the resulting high dimensional space, and dissimilar words far away.
#
# Another word2vec training method, Continuous Bag of Words (CBOW), works in a similar fashion, and tries to predict the target word, given context. This is converse of skipgram, which tries to predict the context, given the target word. Skip-gram represents rare words and phrases well, often requiring more data for stable representations, while CBOW is several times faster to train than the skip-gram, but with slightly better accuracy for the frequent words in its prediction task. The popular gensim implementation of word2vec has both the methods included.
# ## Section 2.1: Creating Word Embeddings
#
# We will create embeddings for a subset of categories in [Brown corpus](https://www1.essex.ac.uk/linguistics/external/clmt/w3c/corpus_ling/content/corpora/list/private/brown/brown.html). In order to achieve this task we will use [gensim](https://radimrehurek.com/gensim/) library to create word2vec embeddings. Gensim’s word2vec expects a sequence of sentences as its input. Each sentence is a list of words.
# Calling `Word2Vec(sentences, iter=1)` will run two passes over the sentences iterator (or, in general iter+1 passes). The first pass collects words and their frequencies to build an internal dictionary tree structure. The second and subsequent passes train the neural model.
# `Word2vec` accepts several parameters that affect both training speed and quality.
#
# One of them is for pruning the internal dictionary. Words that appear only once or twice in a billion-word corpus are probably uninteresting typos and garbage. In addition, there’s not enough data to make any meaningful training on those words, so it’s best to ignore them:
#
# `model = Word2Vec(sentences, min_count=10) # default value is 5`
#
#
# A reasonable value for min_count is between 0-100, depending on the size of your dataset.
#
# Another parameter is the size of the NN layers, which correspond to the “degrees” of freedom the training algorithm has:
#
# `model = Word2Vec(sentences, size=200) # default value is 100`
#
#
# Bigger size values require more training data, but can lead to better (more accurate) models. Reasonable values are in the tens to hundreds.
#
# The last of the major parameters (full list [here](https://radimrehurek.com/gensim/models/word2vec.html#gensim.models.word2vec.Word2Vec)) is for training parallelization, to speed up training:
#
# `model = Word2Vec(sentences, workers=4) # default = 1 worker = no parallelization`
category = ['editorial', 'fiction', 'government', 'mystery', 'news', 'religion',
'reviews', 'romance', 'science_fiction']
# +
def create_word2vec_model(category='news', size=50, sg=1, min_count=5):
try:
sentences = brown.sents(categories=category)
model = Word2Vec(sentences, vector_size=size, sg=sg, min_count=min_count)
except (AttributeError, TypeError):
raise AssertionError('Input variable "category" should be a string or list,'
'"size", "sg", "min_count" should be integers')
return model
def model_dictionary(model):
words = list(model.wv.key_to_index)
return words
def get_embedding(word, model):
if word in model.wv.key_to_index:
return model.wv[word]
else:
return None
# -
all_categories = brown.categories()
all_categories
w2vmodel = create_word2vec_model(all_categories)
print(model_dictionary(w2vmodel))
print(get_embedding('weather', w2vmodel))
# ## Section 2.2: Visualizing Word Embedding
#
# We can now obtain the word embeddings for any word in the dictionary using word2vec. Let's visualize these embeddings to get an inuition of what these embeddings mean. The word embeddings obtained from word2vec model are in high dimensional space. We will use `tSNE` (t-distributed stochastic neighbor embedding), a statistical method for dimensionality deduction that allow us to visualize high-dimensional data in a 2D or 3D space. Here, we will use `tSNE` from [`scikit-learn`] module(https://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html) (if you are not familiar with this method, think about `PCA`) to project our high dimensional embeddings in the 2D space.
#
#
# For each word in `keys`, we pick the top 10 similar words (using cosine similarity) and plot them.
#
# What should be the arrangement of similar words?
# What should be arrangement of the key clusters with respect to each other?
#
keys = ['voters', 'magic', 'love', 'God', 'evidence', 'administration', 'governments']
def get_cluster_embeddings(keys):
embedding_clusters = []
word_clusters = []
# find closest words and add them to cluster
for word in keys:
embeddings = []
words = []
if not word in w2vmodel.wv.key_to_index:
print('The word ', word, 'is not in the dictionary')
continue
for similar_word, _ in w2vmodel.wv.most_similar(word, topn=10):
words.append(similar_word)
embeddings.append(w2vmodel.wv[similar_word])
embedding_clusters.append(embeddings)
word_clusters.append(words)
# get embeddings for the words in clusers
embedding_clusters = np.array(embedding_clusters)
n, m, k = embedding_clusters.shape
tsne_model_en_2d = TSNE(perplexity=10, n_components=2, init='pca', n_iter=3500, random_state=32)
embeddings_en_2d = np.array(tsne_model_en_2d.fit_transform(embedding_clusters.reshape(n * m, k))).reshape(n, m, 2)
return embeddings_en_2d, word_clusters
def tsne_plot_similar_words(title, labels, embedding_clusters,
word_clusters, a, filename=None):
plt.figure(figsize=(16, 9))
colors = cm.rainbow(np.linspace(0, 1, len(labels)))
for label, embeddings, words, color in zip(labels, embedding_clusters, word_clusters, colors):
x = embeddings[:, 0]
y = embeddings[:, 1]
plt.scatter(x, y, color=color, alpha=a, label=label)
for i, word in enumerate(words):
plt.annotate(word,
alpha=0.5,
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom',
size=10)
plt.legend(loc="lower left")
plt.title(title)
plt.grid(True)
if filename:
plt.savefig(filename, format='png', dpi=150, bbox_inches='tight')
plt.show()
embeddings_en_2d, word_clusters = get_cluster_embeddings(keys)
tsne_plot_similar_words('Similar words from Brown Corpus', keys, embeddings_en_2d, word_clusters, 0.7)
# ## Section 2.3: Exploring meaning with word embeddings
#
# While word2vec was the method that started it all, research has since boomed, and we now have more sophisticated ways to represent words. One such method is FastText, developed at Facebook AI research, which breaks words into sub-words: such a technique also allows us to create embedding representations for unseen words. In this section, we will explore how semantics and meaning are captured using embedidngs, after downloading a pre-trained FastText model. Downloading pre-trained models is a way for us to plug in word embeddings and explore them without training them ourselves.
# + cellView="form"
# @title Download FastText English Embeddings of dimension 100
import os, io, zipfile
from urllib.request import urlopen
zipurl = 'https://osf.io/w9sr7/download'
print(f"Downloading and unzipping the file... Please wait.")
with urlopen(zipurl) as zipresp:
with zipfile.ZipFile(io.BytesIO(zipresp.read())) as zfile:
zfile.extractall('.')
print("Download completed!")
# -
# Load 100 dimension FastText Vectors using FastText library
ft_en_vectors = fasttext.load_model('cc.en.100.bin')
print(f"Length of the embedding is: {len(ft_en_vectors.get_word_vector('king'))}")
print(f"Embedding for the word King is: {ft_en_vectors.get_word_vector('king')}")
# Cosine similarity is used for similarities between words. Similarity is a scalar between 0 and 1.
# Now find the 10 most similar words to "King"
ft_en_vectors.get_nearest_neighbors("king", 10) # Most similar by key
# ### Word Similarity
# + cellView="form"
# @title Video 3: Semantic Measurements
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV15w411R7SW", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"Y45KIAOw4OY", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
# add event to airtable
atform.add_event('Video 3: Semantic Measurements')
display(out)
# -
# More on similarity between words. Let's check how similar different pairs of word are. Feel free to play around.
#
#
# +
def getSimilarity(word1, word2):
v1 = ft_en_vectors.get_word_vector(word1)
v2 = ft_en_vectors.get_word_vector(word2)
return cosine_similarity(v1, v2)
print("Similarity between the words King and Queen: ", getSimilarity("king", "queen"))
print("Similarity between the words King and Knight: ", getSimilarity("king", "knight"))
print("Similarity between the words King and Rock: ", getSimilarity("king", "rock"))
print("Similarity between the words King and Twenty: ", getSimilarity("king", "twenty"))
## Try the same for two more pairs
# print("Similarity between the words ___ and ___: ", getSimilarity(...))
# print("Similarity between the words ___ and ___: ", getSimilarity(...))
# print("Similarity between the words ___ and ___: ", getSimilarity(...))
# print("Similarity between the words ___ and ___: ", getSimilarity(...))
# -
# ### Homonym Words$^\dagger$
#
# Find the similarity for homonym words with their different meanings. The first one has been implemented for you.
#
#
# $^\dagger$: Two or more words having the same spelling or pronunciation but different meanings and origins are called *homonyms*. E.g.,
# +
####################### Words with multiple meanings ##########################
print("Similarity between the words Cricket and Insect: ", getSimilarity("cricket", "insect"))
print("Similarity between the words Cricket and Sport: ", getSimilarity("cricket", "sport"))
## Try the same for two more pairs
# print("Similarity between the words ___ and ___: ", getSimilarity(...))
# print("Similarity between the words ___ and ___: ", getSimilarity(...))
# print("Similarity between the words ___ and ___: ", getSimilarity(...))
# print("Similarity between the words ___ and ___: ", getSimilarity(...))
# -
# ### Word Analogies
#
# Embeddings can be used to find word analogies.
# Let's try it:
# 1. Man : Woman :: King : _____
# 2. Germany: Berlin :: France : ______
# 3. Leaf : Tree :: Petal : _____
# +
## Use get_analogies() funnction. The words have to be in the order Positive, negative, Positve
# Man : Woman :: King : _____
# Positive=(woman, king), Negative=(man)
print(ft_en_vectors.get_analogies("woman", "man", "king",1))
# Germany: Berlin :: France : ______
# Positive=(berlin, frannce), Negative=(germany)
print(ft_en_vectors.get_analogies("berlin", "germany", "france",1))
# Leaf : Tree :: Petal : _____
# Positive=(tree, petal), Negative=(leaf)
print(ft_en_vectors.get_analogies("tree", "leaf", "petal",1))
# Hammer : Nail :: Comb : _____
# Positive=(nail, comb), Negative=(hammer)
print(ft_en_vectors.get_analogies("nail", "hammer", "comb",1))
# -
# But, does it always work?
#
#
# 1. Poverty : Wealth :: Sickness : _____
# 2. train : board :: horse : _____
# +
# Poverty : Wealth :: Sickness : _____
print(ft_en_vectors.get_analogies("wealth", "poverty", "sickness",1))
# train : board :: horse : _____
print(ft_en_vectors.get_analogies("board", "train", "horse",1))
# -
# ---
# # Section 3: Neural Net with word embeddings
#
# *Time estimate: ~16mins*
# Let's use the pretrained FastText embeddings to train a neural network on the IMDB dataset.
#
# To recap, the data consists of reviews and sentiments attached to it. It is a binary classification task. As a simple preview of the upcoming neural networks, we are going to introduce neural net with word embeddings. We'll see detailed networks in the next tutorial.
#
#
#
# ## Coding Exercise 3.1: Simple Feed Forward Net
# This will load 300 dim FastText embeddings. It will take around 2-3 minutes.
# Define a vanilla neural network with linear layers. Then average the word embeddings to get an embedding for the entire review.
# The neural net will have one hidden layer of size 128.
# + cellView="form"
# @title Download embeddings and clear old variables to clean memory.
# @markdown #### Execute this cell!
if 'ft_en_vectors' in locals():
del ft_en_vectors
if 'w2vmodel' in locals():
del w2vmodel
embedding_fasttext = FastText('simple')
# + cellView="form"
# @markdown Load the Dataset
TEXT, vocab_size, train_iter, valid_iter, test_iter = load_dataset(embedding_fasttext, seed=SEED)
# +
class NeuralNet(nn.Module):
def __init__(self, output_size, hidden_size, vocab_size, embedding_length,
word_embeddings):
super(NeuralNet, self).__init__()
self.word_embeddings = nn.Embedding(vocab_size, embedding_length)
self.word_embeddings.weight = nn.Parameter(word_embeddings,
requires_grad=False)
self.fc1 = nn.Linear(embedding_length, hidden_size)
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, inputs):
input = self.word_embeddings(inputs) # convert text to embeddings
####################################################################
# Fill in missing code below (...)
raise NotImplementedError("Fill in the Neural Net")
####################################################################
# Average the word embeddings in a sentence
# Use torch.nn.functional.avg_pool2d to compute the averages
pooled = ...
# Pass the embeddings through the neural net
# A fully-connected layer
x = ...
# ReLU activation
x = ...
# Another fully-connected layer
x = ...
output = F.log_softmax(x, dim=1)
return output
# add event to airtable
atform.add_event('Coding Exercise 3.1: Simple Feed Forward Net')
# Uncomment to check your code
# nn_model = NeuralNet(2, 128, 100, 300, TEXT.vocab.vectors)
# print(nn_model)
# + [markdown] colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content-dl/tree/main//tutorials/W2D3_ModernRecurrentNeuralNetworks/solutions/W2D3_Tutorial1_Solution_6b55212b.py)
#
#
# -
# ```
# NeuralNet(
# (word_embeddings): Embedding(100, 300)
# (fc1): Linear(in_features=300, out_features=128, bias=True)
# (fc2): Linear(in_features=128, out_features=2, bias=True)
# )
# ```
# + cellView="form"
# @title Training and Testing Functions
# @markdown #### `train(model, device, train_iter, valid_iter, epochs, learning_rate)`
# @markdown #### `test(model, device, test_iter)`
def train(model, device, train_iter, valid_iter, epochs, learning_rate):
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
train_loss, validation_loss = [], []
train_acc, validation_acc = [], []
for epoch in range(epochs):
# train
model.train()
running_loss = 0.
correct, total = 0, 0
steps = 0
for idx, batch in enumerate(train_iter):
text = batch.text[0]
target = batch.label
target = torch.autograd.Variable(target).long()
text, target = text.to(device), target.to(device)
# add micro for coding training loop
optimizer.zero_grad()
output = model(text)
loss = criterion(output, target)
loss.backward()
optimizer.step()
steps += 1
running_loss += loss.item()
# get accuracy
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
train_loss.append(running_loss/len(train_iter))
train_acc.append(correct/total)
print(f'Epoch: {epoch + 1}, '
f'Training Loss: {running_loss/len(train_iter):.4f}, '
f'Training Accuracy: {100*correct/total: .2f}%')
# evaluate on validation data
model.eval()
running_loss = 0.
correct, total = 0, 0
with torch.no_grad():
for idx, batch in enumerate(valid_iter):
text = batch.text[0]
target = batch.label
target = torch.autograd.Variable(target).long()
text, target = text.to(device), target.to(device)
optimizer.zero_grad()
output = model(text)
loss = criterion(output, target)
running_loss += loss.item()
# get accuracy
_, predicted = torch.max(output, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
validation_loss.append(running_loss/len(valid_iter))
validation_acc.append(correct/total)
print (f'Validation Loss: {running_loss/len(valid_iter):.4f}, '
f'Validation Accuracy: {100*correct/total: .2f}%')
return train_loss, train_acc, validation_loss, validation_acc
def test(model, device, test_iter):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for idx, batch in enumerate(test_iter):
text = batch.text[0]
target = batch.label
target = torch.autograd.Variable(target).long()
text, target = text.to(device), target.to(device)
outputs = model(text)
_, predicted = torch.max(outputs, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
acc = 100 * correct / total
return acc
# +
# Model hyperparameters
learning_rate = 0.0003
output_size = 2
hidden_size = 128
embedding_length = 300
epochs = 15
word_embeddings = TEXT.vocab.vectors
vocab_size = len(TEXT.vocab)
# Model set-up
nn_model = NeuralNet(output_size,
hidden_size,
vocab_size,
embedding_length,
word_embeddings)
nn_model.to(DEVICE)
nn_start_time = time.time()
set_seed(522)
nn_train_loss, nn_train_acc, nn_validation_loss, nn_validation_acc = train(nn_model,
DEVICE,
train_iter,
valid_iter,
epochs,
learning_rate)
print(f"--- Time taken to train = {(time.time() - nn_start_time)} seconds ---")
test_accuracy = test(nn_model, DEVICE, test_iter)
print(f'\n\nTest Accuracy: {test_accuracy}%')
# -
# Plot accuracy curves
plt.figure()
plt.subplot(211)
plot_train_val(np.arange(0, epochs), nn_train_acc, nn_validation_acc,
'train accuracy', 'val accuracy',
'Neural Net on IMDB text classification', 'accuracy',
color='C0')
plt.legend(loc='upper left')
plt.subplot(212)
plot_train_val(np.arange(0, epochs), nn_train_loss,
nn_validation_loss,
'train loss', 'val loss',
'',
'loss [a.u.]',
color='C0')
plt.legend(loc='upper left')
plt.show()
# ---
# # Summary
#
# In this tutorial, we explored two different concepts linked to sequences, and text in particular, that will be the conceptual foundation for Recurrent Neural Networks.
#
# The first concept was that of sequences and probabilities. We saw how we can model language as sequences of text, and use this analogy to generate text. Such a setup is also used to classify text or identify parts of speech. We can either build chains manually using simple python and numerical computation, or use a package such as ```hmmlearn``` that allows us to train models a lot easier. These notions of sequences and probabilities (i.e, creating language models!) are key to the internals of a recurrent neural network as well.
#
# The second concept is that of word embeddings, now a mainstay of natural language processing. By using a neural network to predict context of words, these neural networks learn internal representions of words that are a decent approximation of semantic meaning (i.e embeddings!). We saw how these embeddings can be visualised, as well as how they capture meaning. We finally saw how they can be integrated into neural networks to better classify text documents.
# + cellView="form"
# @title Airtable Submission Link
from IPython import display as IPydisplay
IPydisplay.HTML(
f"""
<div>
<a href= "{atform.url()}" target="_blank">
<img src="https://github.com/NeuromatchAcademy/course-content-dl/blob/main/tutorials/static/AirtableSubmissionButton.png?raw=1"
alt="button link to Airtable" style="width:410px"></a>
</div>""" )
| tutorials/W2D3_ModernRecurrentNeuralNetworks/student/W2D3_Tutorial1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] ein.tags="worksheet-0" slideshow={"slide_type": "-"}
# # Reference
#
# 1. <NAME>. (1983) The search behavior of the desert isopod _Hemilepistus reaumuri_ as compared with a systematic search. Behavioral Ecology and Sociobiology August 1983, Volume 13, Issue 2, pp 93-106. https://doi.org/10.1007/BF00293799
| src/ipython/99 Reference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
'''
This notebook is used to merge exported data from Reaxys,
clean the data, filter, tokenize and preprocess the dataset
for the training of the Enzymatic Transformer available at
https://github.com/reymond-group/OpenNMT-py
The environment is detailed on GitHub.
Initial .xls Reaxys extracted files are placed in /data
Dataset is output in /dataset
(The code is not perfectly clean and optimized, some steps might take
some time. Need around 4 min for an 83k reaction initial Reaxys extract)
'''
import pandas as pd
import glob, os
from tqdm import tqdm
# +
''' COLLECT ALL EXCELS CSV SUB FILES AND MERGE THEM INTO A UNIQUE DATABASE '''
directory = str(os.getcwd()) + "/Data"
iteration= 0
for filename in os.listdir(directory):
if filename.endswith(".xls"):
if iteration == 0:
df = pd.read_csv('Data/' + filename, sep='\t')
else:
df2 = pd.read_csv('Data/' + filename, sep='\t')
df = df.append(df2, 2)
iteration +=1
print("Total enteries in merged DF: \t" + str(len(df)))
# +
''' FILTER REACTIONS THAT ARE NOT COMPLETE: '''
#Copy:
df_filter = df.reset_index(drop=True)
df_filter['Drop'] = ''
count_no_Brak = 0
count_not_2 = 0
count_no_reactant = 0
count_no_product = 0
count_no_catalyst_nor_reagent_text = 0
remaining = 0
totalenteries = 0
reaction_smiles = []
for item in range(0, len(df_filter)-1):
reaction = df_filter.at[int(item), 'Reaction']
reagents = str(df_filter.at[int(item), 'Reagent'])
catalysts = str(df_filter.at[int(item), 'Catalyst'])
totalenteries += 1
if not ">" in str(reaction): #at least one >
count_no_Brak += 1
df_filter['Drop'][item] = '1'
continue
if not ">>" in str(reaction): #
count_not_2 += 1
df_filter['Drop'][item] = '1'
continue
if str(reaction).split(">>")[0] == "":
count_no_reactant += 1
df_filter['Drop'][item] = '1'
continue
if str(reaction).split(">>")[1] == "":
count_no_product += 1
df_filter['Drop'][item] = '1'
continue
if reagents == "nan":
reagents = ""
if catalysts == "nan":
catalysts = ""
if reagents == "" and catalysts == "":
count_no_catalyst_nor_reagent_text += 1
df_filter['Drop'][item] = '1'
continue
remaining += 1
reaction_smiles.append(reaction)
print(str(count_no_Brak) + "\t no braket at all, means not a reaction at all")
print(str(count_not_2) + "\t no '>>' together, means potential reactants between both '>'")
print(str(count_no_reactant) + "\t no reactant at all before '>>")
print(str(count_no_product) + "\t no products at all after '>>'")
print(str(count_no_catalyst_nor_reagent_text) + "\t no catalyst or reagent description")
print('')
print(str(totalenteries) + "\t total enteries")
print(str(remaining) + "\t remaining enteries")
print(str(len(set(reaction_smiles))) + "\t unique reaction SMILES")
# +
''' Delete useless columns '''
df_filter2 = df_filter[df_filter['Drop'] == ''].reset_index(drop=True)
del df_filter
df_filter2 = df_filter2.drop_duplicates(subset=["Catalyst", "Reagent", "Reaction"]).reset_index(drop=True)
del df_filter2["Reaction: Links to Reaxys"]
del df_filter2["Data Count"]
del df_filter2["Number of Reaction Details"]
del df_filter2["Reaction Rank"]
del df_filter2["Record Type"]
del df_filter2["Bin"]
del df_filter2["Reaction Details: Reaction Classification"]
del df_filter2["Example label"]
del df_filter2["Multi-step Scheme"]
del df_filter2["Multi-step Details"]
del df_filter2["Named Reaction"]
del df_filter2["Type of reaction description (Reaction Details)"]
del df_filter2["Location"]
del df_filter2["References"]
del df_filter2["Unnamed: 41"]
del df_filter2["Links to Reaxys"]
df_filter2.shape
# +
''' CREATE NEW COLUMN: Add to "Enzyme Keyword" the FULL ";" splitted element from Reagent or Catalyst '''
df_filter4 = df_filter2
df_filter4["Enzyme Keyword"] = ""
#Keyword that are allowed to pass the filter:
White_List = ["Ase", "ase", "lysozyme"]
#For each reaction entery:
for item in range(0, len(df_filter4)):
reagents = str(df_filter4.at[int(item), 'Reagent'])
catalysts = str(df_filter4.at[int(item), 'Catalyst'])
list_reag_cat = []
#Concatenate Catalysts and Reagents:
for words in reagents.split("; "):
list_reag_cat.append(words)
for words in catalysts.split("; "):
list_reag_cat.append(words)
for white in White_List:
for element in list_reag_cat:
if str(white).casefold() in str(element).casefold():
if not str(element).casefold() in df_filter4["Enzyme Keyword"][item]:
if df_filter4["Enzyme Keyword"][item] == "":
df_filter4["Enzyme Keyword"][item] = [str(element).casefold()]
else:
df_filter4["Enzyme Keyword"][item].append(str(element).casefold())
# +
''' CREATE NEW COLUMN: EXTRACTED ENZYME SINGLE NAME ONLY '''
df_filter5 = df_filter4
df_filter5["Enzyme Name"] = ""
#Keyword that are allowed to pass the filter:
White_List = ["Ase", "ase", "lysozyme"]
#List of ENZYME SINGLE WORD:
Enzyme_ASE = []
#For each reaction entery:
for item in range(0, len(df_filter5)):
reagents = str(df_filter5.at[int(item), 'Reagent'])
catalysts = str(df_filter5.at[int(item), 'Catalyst'])
list_reag_cat = []
#Concatenate Catalysts and Reagents:
for sentenses in reagents.split("; "):
for word in sentenses.split(" "):
list_reag_cat.append(word)
for sentenses in catalysts.split("; "):
for word in sentenses.split(" "):
list_reag_cat.append(word)
#for each element in the White List:
for white in White_List:
for element in list_reag_cat:
if str(white).casefold() in str(element).casefold():
if not str(element).casefold() in df_filter5["Enzyme Name"][item]:
Enzyme_ASE.append(element)
if df_filter5["Enzyme Name"][item] == "":
df_filter5["Enzyme Name"][item] = [str(element).casefold()]
else:
df_filter5["Enzyme Name"][item].append(str(element).casefold())
print("Enzyme presents: ", len(Enzyme_ASE))
print("UNIQUE Enzyme presents: ", len(set(Enzyme_ASE)))
# +
''' Replace list by a string for "Enzyme Name" '''
df_filter5_2 = df_filter5
for element in tqdm(range(0, len(df_filter5_2["Enzyme Name"]))):
df_filter5_2["Enzyme Name"][element] = ' '.join(df_filter5_2["Enzyme Name"][element])
for element in tqdm(range(0, len(df_filter5_2["Enzyme Keyword"]))):
df_filter5_2["Enzyme Keyword"][element] = ' '.join(df_filter5_2["Enzyme Keyword"][element])
# +
''' Cleaning Names '''
df_filter6 = df_filter5_2
for element in tqdm(range(0, len(df_filter6["Enzyme Name"]))):
if "(" in df_filter6["Enzyme Name"][element]:
if not ")" in df_filter6["Enzyme Name"][element]:
df_filter6["Enzyme Name"][element] = df_filter6["Enzyme Name"][element].replace("(", "")
elif ")" in df_filter6["Enzyme Name"][element]:
if not "(" in df_filter6["Enzyme Name"][element]:
df_filter6["Enzyme Name"][element] = df_filter6["Enzyme Name"][element].replace(")", "")
df_filter6["Enzyme Name"][element] = df_filter6["Enzyme Name"][element].casefold()
if "ase," in df_filter6["Enzyme Name"][element]:
df_filter6["Enzyme Name"][element] = df_filter6["Enzyme Name"][element].replace("ase,", "ase")
if "ases," in df_filter6["Enzyme Name"][element]:
df_filter6["Enzyme Name"][element] = df_filter6["Enzyme Name"][element].replace("ases", "ase")
for element in tqdm(range(0, len(df_filter6["Enzyme Keyword"]))):
if "(" in df_filter6["Enzyme Keyword"][element]:
if not ")" in df_filter6["Enzyme Keyword"][element]:
df_filter6["Enzyme Keyword"][element] = df_filter6["Enzyme Keyword"][element].replace("(", "")
elif ")" in df_filter6["Enzyme Keyword"][element]:
if not "(" in df_filter6["Enzyme Keyword"][element]:
df_filter6["Enzyme Keyword"][element] = df_filter6["Enzyme Keyword"][element].replace(")", "")
df_filter6["Enzyme Keyword"][element] = df_filter6["Enzyme Keyword"][element].casefold()
if "ase," in df_filter6["Enzyme Keyword"][element]:
df_filter6["Enzyme Keyword"][element] = df_filter6["Enzyme Keyword"][element].replace("ase,", "ase")
if "ases," in df_filter6["Enzyme Keyword"][element]:
df_filter6["Enzyme Keyword"][element] = df_filter6["Enzyme Keyword"][element].replace("ases", "ase")
# -
# +
''' PREPROCESSING DATASETS '''
import pandas as pd
import numpy as np
import glob, os
from tqdm import tqdm
from itertools import groupby
import random
from collections import Counter
import re
from rdkit import Chem
from tokenizers import Tokenizer, models, pre_tokenizers, decoders, trainers, processors
# Initialize a tokenizer
tokenizer2 = Tokenizer(models.BPE())
# Customize pre-tokenization and decoding
tokenizer2.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=True)
tokenizer2.decoder = decoders.ByteLevel()
tokenizer2.post_processor = processors.ByteLevel(trim_offsets=True)
# And then train
trainer = trainers.BpeTrainer(vocab_size=9000, min_frequency=2, limit_alphabet=55, special_tokens=['ase', 'hydro', 'mono', 'cyclo', 'thermo', 'im'])
tokenizer2.train(trainer, ["Tokenizer/Enzyme_Name_ForTocken.txt"])
# +
def smi_tokenizer(smi):
"""
Tokenize a SMILES molecule or reaction
"""
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
tokens = [token for token in regex.findall(smi)]
assert smi == ''.join(tokens)
return ' '.join(tokens)
def enzyme_sentence_tokenizer(sentence):
'''
Tokenize a sentenze, optimized for enzyme-like descriptions & names
'''
encoded = tokenizer2.encode(sentence)
my_list = [item for item in encoded.tokens if 'Ġ' != item]
my_list = [item.replace('Ġ', '_') for item in my_list]
my_list = ' '.join(my_list)
return my_list
def Canonicalize_Reaction(smiles):
for elements in smiles.replace('>>', '>').replace('>', '.').split('.'):
m = Chem.MolFromSmiles(elements, sanitize=False)
if m is None:
return False
precursors = smiles.split('>')[0]
reactants = smiles.split('>')[1]
products = smiles.split('>')[2]
can_precursors = []
can_reactants = []
can_products = []
for precurs in precursors.split('.'):
mol = Chem.MolFromSmiles(precurs)
can_precursors.append(Chem.MolToSmiles(mol, canonical=True))
for reactant in reactants.split('.'):
mol = Chem.MolFromSmiles(reactant)
can_reactants.append(Chem.MolToSmiles(mol, canonical=True))
for product in products.split('.'):
mol = Chem.MolFromSmiles(product)
can_products.append(Chem.MolToSmiles(mol, canonical=True))
canon = '.'.join(can_precursors) + '>' + '.'.join(can_reactants) + '>' + '.'.join(can_products)
return canon
# +
''' FILTER DATAFRAME
SELECTION_ENZYME_DESC = 'Enzyme Keyword' for full sentences
SELECTION_ENZYME_DESC = 'Enzyme Name' for "-ase" word only
'''
SELECTION_ENZYME_DESC = 'Enzyme Keyword'
df_filter = df_filter6[['Reaction', SELECTION_ENZYME_DESC]].copy()
df_filter.shape
# +
''' Remove reaction with no Enzyme Name '''
df_filter.dropna(subset = [SELECTION_ENZYME_DESC], inplace=True)
df_filter = df_filter[df_filter['Enzyme Keyword'].str.len() > 0]
df_filter.drop_duplicates(inplace=True)
df_filter.reset_index(inplace=True)
del df_filter['index']
df_filter.shape
# +
''' CANONICALIZE ALL REACTIONS AND REMOVE INVALIDS '''
count = 0
for element in tqdm(range(0, len(df_filter))):
try:
reaction = Canonicalize_Reaction(df_filter['Reaction'][element])
if reaction != False:
df_filter['Reaction'][element] = reaction
except:
df_filter['Reaction'][element] = 'invalid'
count += 1
df_filter = df_filter[~df_filter['Reaction'].str.contains('invalid')].reset_index(drop=True)
print(count, " invalid smiles reactions removed")
# -
''' CHECK AGAIN FOR CANONICAL DUPLICATES '''
print("Before ", len(df_filter))
df_filter = df_filter.drop_duplicates(subset=["Enzyme Keyword", "Reaction"]).reset_index(drop=True)
print("After ", len(df_filter))
# +
''' Remove reaction with Multiple products '''
count = 0
df_filter['Reaction_Multi'] = ""
for element in range(0, len(df_filter)):
if '.' in df_filter['Reaction'][element].split('>>')[1]:
df_filter['Reaction_Multi'][element] = 'DROP'
count += 1
print('Removed: ', count, ' reactions')
indexNames = df_filter[df_filter['Reaction_Multi'] == 'DROP'].index
df_filter.drop(indexNames, inplace=True)
df_filter.reset_index(inplace=True)
del df_filter['Reaction_Multi']
del df_filter['index']
print("Initially: " + str(len(set(df['Reaction']))) + " reactions")
print("After removing 'NaN' Enzyme Names, remains: " + str(len(set(df_filter['Reaction']))) + " unique reactions, " + str(len(df_filter)) + " reactions in total")
# +
''' Combine Enzyme Name and Reaction SMILES '''
df_filter['Product'] = ''
df_filter['TransformerIn'] = ''
df_filter['TransformerOut'] = ''
for index in tqdm(range(0, len(df_filter))):
df_filter['Product'][index] = df_filter['Reaction'][index].split('>>')[1]
df_filter['TransformerIn'][index] = smi_tokenizer(df_filter['Reaction'][index].split('>>')[0]) + " > " + enzyme_sentence_tokenizer(df_filter[SELECTION_ENZYME_DESC][index])
df_filter['TransformerOut'][index] = smi_tokenizer(df_filter['Product'][index])
# -
print(str(len(set(df_filter['Product']))) + " different Products are presents")
# +
''' Distribute the PRODUCTS with weight '''
Products_org = df_filter['Product']
count_train = 0
count_test = 0
count_val = 0
Products_org = pd.DataFrame.from_dict(Counter(Products_org), orient='index').reset_index().rename(columns={0: 'count'})
df_shuffled = Products_org.sample(len(Products_org)).reset_index()
df_shuffled["set"] = ""
for item in tqdm(range(0, len(df_shuffled))):
#In case
if count_train != 0 and count_test != 0 and count_val != 0:
ratio_train = (count_train / (count_train + count_test + count_val)) / 0.8
ratio_test = (count_test / (count_train + count_test + count_val)) / 0.1
ratio_val = (count_val / (count_train + count_test + count_val)) / 0.1
if ratio_train < 1:
count_train += df_shuffled['count'][item]
df_shuffled['set'][item] = "Train"
elif ratio_test < 1:
count_test += df_shuffled['count'][item]
df_shuffled['set'][item] = "Test"
elif ratio_val < 1:
count_val += df_shuffled['count'][item]
df_shuffled['set'][item] = "Val"
else:
assignation = random.randint(1, 3)
if assignation == 1:
count_train += df_shuffled['count'][item]
df_shuffled['set'][item] = "Train"
elif assignation == 2:
count_test += df_shuffled['count'][item]
df_shuffled['set'][item] = "Test"
elif assignation == 3:
count_val += df_shuffled['count'][item]
df_shuffled['set'][item] = "Val"
#In case no assignment yet:
else:
assignation = random.randint(1, 3)
if assignation == 1:
count_train += df_shuffled['count'][item]
df_shuffled['set'][item] = "Train"
elif assignation == 2:
count_test += df_shuffled['count'][item]
df_shuffled['set'][item] = "Test"
elif assignation == 3:
count_val += df_shuffled['count'][item]
df_shuffled['set'][item] = "Val"
if count_train != 0 and count_test != 0 and count_val != 0:
print('Train proportion: ', str(round(count_train / (count_train + count_test + count_val), 4)), '%')
print('Test proportion: ', str(round(count_test / (count_train + count_test + count_val), 4)), '%')
print('Val proportion: ', str(round(count_val / (count_train + count_test + count_val), 4)), '%')
df_shuffled = df_shuffled.set_index("index")
# +
''' Assign the assignment of the Product to the initial DF (df_filter) '''
df_filter["Set"] = ''
for item_toset in tqdm(range(0, len(df_filter))):
product = df_filter['Product'][item_toset]
df_filter['Set'][item_toset] = df_shuffled.loc[[product]]['set'][0]
df_filter.head(5)
# +
''' Distribute the Train / Test / Val splits into lists AND write into files '''
count_train_2 = 0
count_test_2 = 0
count_val_2 = 0
TRAIN = []
TEST = []
VAL = []
for item_toset in range(0, len(df_filter)):
if df_filter['Set'][item_toset] == 'Train':
count_train_2+=1
TRAIN.append(df_filter['TransformerIn'][item_toset] + '¢' + df_filter['TransformerOut'][item_toset])
if df_filter['Set'][item_toset] == 'Test':
count_test_2+=1
TEST.append(df_filter['TransformerIn'][item_toset] + '¢' + df_filter['TransformerOut'][item_toset])
if df_filter['Set'][item_toset] == 'Val':
count_val_2+=1
VAL.append(df_filter['TransformerIn'][item_toset] + '¢' + df_filter['TransformerOut'][item_toset])
np.random.shuffle(TRAIN)
np.random.shuffle(TEST)
np.random.shuffle(VAL)
src_train = []
tgt_train = []
src_test = []
tgt_test = []
src_val = []
tgt_val = []
for element in TRAIN:
src_train.append(element.split('¢')[0])
tgt_train.append(element.split('¢')[1])
for element in TEST:
src_test.append(element.split('¢')[0])
tgt_test.append(element.split('¢')[1])
for element in VAL:
src_val.append(element.split('¢')[0])
tgt_val.append(element.split('¢')[1])
print(count_train_2 / (count_train_2 + count_test_2 + count_val_2))
print(count_test_2 / (count_train_2 + count_test_2 + count_val_2))
print(count_val_2 / (count_train_2 + count_test_2 + count_val_2))
# +
''' WRITE TO FILES '''
target_folder_name = 'dataset/ENZR_Dataset_Full_Sentences/'
#WRITE INTO FILES:
with open(target_folder_name + 'src_train.txt', 'w') as f:
for item in src_train:
f.write("%s\n" % item)
with open(target_folder_name + 'tgt_train.txt', 'w') as f:
for item in tgt_train:
f.write("%s\n" % item)
with open(target_folder_name + 'src_test.txt', 'w') as f:
for item in src_test:
f.write("%s\n" % item)
with open(target_folder_name + 'tgt_test.txt', 'w') as f:
for item in tgt_test:
f.write("%s\n" % item)
with open(target_folder_name + 'src_val.txt', 'w') as f:
for item in src_val:
f.write("%s\n" % item)
with open(target_folder_name + 'tgt_val.txt', 'w') as f:
for item in tgt_val:
f.write("%s\n" % item)
# -
| Merge_Filter_Tok_Prepro_ENZR_Dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from avroconvert import AvroConvert, gcs_reader, s3_reader, fs_reader
# ## Read file from google cloud storage bucket
# ### Write to parquet
# +
reader = gcs_reader(auth_file='<SERVICE_ACCOUNT.json>', bucket='<BUCKET_NAME>',
datatype='avro', prefix='<FILE_PREFIX>')
avro_object = AvroConvert(data=reader.get_data())
avro_object.to_parquet(outfile='<FOLDER/FILENAME.parquet>')
# -
# ### Write to csv
# +
reader = gcs_reader(auth_file='<SERVICE_ACCOUNT.json>', bucket='<BUCKET_NAME>',
datatype='avro', prefix='<FILE_PREFIX>')
avro_object = AvroConvert(data=reader.get_data())
avro_object.to_csv(outfile='<FOLDER/FILENAME.csv>')
# -
# ### Write to json
# +
reader = gcs_reader(auth_file='<SERVICE_ACCOUNT.json>', bucket='<BUCKET_NAME>',
datatype='avro', prefix='<FILE_PREFIX>')
avro_object = AvroConvert(data=reader.get_data())
avro_object.to_json(outfile='<FOLDER/FILENAME.json>')
# -
# ## Read from S3 bucket
# ### Write to parquet
# +
reader = s3_reader(access_key='<AWS ACCESS KEY>', secret_key='<AWS SECRET KEY>', session_token='<AWS SESSION TOKEN>(if any)',
bucket='<S3 BUCKET>', prefix='<FILE PREFIX>', datatype='avro')
avro_object = AvroConvert(data=reader.get_data())
avro_object.to_parquet(outfile='<FOLDER/FILENAME.parquet>')
# -
# ### Write to csv
# +
reader = s3_reader(access_key='<AWS ACCESS KEY>', secret_key='<AWS SECRET KEY>', session_token='<AWS SESSION TOKEN>(if any)',
bucket='<S3 BUCKET>', prefix='<FILE PREFIX>', datatype='avro')
avro_object = AvroConvert(data=reader.get_data())
avro_object.to_parquet(outfile='<FOLDER/FILENAME.csv>')
# -
# ### Write to json
# +
reader = s3_reader(access_key='<AWS ACCESS KEY>', secret_key='<AWS SECRET KEY>', session_token='<AWS SESSION TOKEN>(if any)',
bucket='<S3 BUCKET>', prefix='<FILE PREFIX>', datatype='avro')
avro_object = AvroConvert(data=reader.get_data())
avro_object.to_parquet(outfile='<FOLDER/FILENAME.json>')
# -
# ## Read from local filesystem
# ### Write to parquet
# +
reader = fs_reader(folder='<FOLDER NAME>', prefix='<FILE PREFIX>', datatype='avro')
avro_object = AvroConvert(data=reader.get_data())
avro_object.to_parquet(outfile='<FOLDER/FILENAME.parquet>')
# -
# ### Write to csv
# +
reader = fs_reader(folder='<FOLDER NAME>', prefix='<FILE PREFIX>', datatype='avro')
avro_object = AvroConvert(data=reader.get_data())
avro_object.to_parquet(outfile='<FOLDER/FILENAME.csv>')
# -
# ### Write to json
# +
reader = fs_reader(folder='<FOLDER NAME>', prefix='<FILE PREFIX>', datatype='avro')
avro_object = AvroConvert(data=reader.get_data())
avro_object.to_parquet(outfile='<FOLDER/FILENAME.json>')
# -
| example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn import datasets
iris = datasets.load_iris()
X = iris.data
y = iris.target
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.5)
from sklearn import tree
classifier = tree.DecisionTreeClassifier()
classifier.fit(X_train, y_train)
prediction = classifier.predict(X_test)
print(prediction)
from sklearn.metrics import accuracy_score
print(accuracy_score(y_test, prediction))
#Using a different classifier
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier()
classifier.fit(X_train, y_train)
prediction = classifier.predict(X_test)
print(accuracy_score(y_test, prediction))
# -
| ML-102.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Object Analysis
# ## Instructions
# 1. Fill in the dataset in section 1.1
#
# 2. Run all of the cells
#
# 3. Look at the summary pdf generated AND/OR explore each metric below.
# - Under each Metric there will be a portion of "Setup" and then "Analyses". Ignore the "Setup" unless customization is needed, and in "Analyses" results are shown to be interacted with. The number that comes after the M in the title refers to the measurement number when collecting the metrics.
#
# ## Table of Contents
# 1. [Initial Setup](#setup) <br/>
# 1.1 [Dataset](#dataset)
# 2. (M0) Metric: [Object counts, duplicate annotations, object cooccurrences](#metric0)<br/>
# 2.1 [Setup](#metric0_setup)<br/>
# 2.2 [Analyses](#metric0_analyses)
# 3. (M7) Metric: [Size and distance from center of supercategories](#metric7))<br/>
# 3.1 [Setup](#metric7_setup)<br/>
# 3.2 [Analyses](#metric7_analyses)
# 4. (M8) Metric: [Supercategories w/wo people](#metric8)<br/>
# 4.1 [Setup](#metric8_setup)<br/>
# 4.2 [Analyses](#metric8_analyses)
# 5. (M9) Metric: [Scenes and object appearance diversity](#metric9)<br/>
# 5.1 [Setup](#metric9_setup)<br/>
# 5.2 [Analyses](#metric9_analyses)
# 6. [Setting up summary pdf](#summarypdf)
# # Initial Setup
# <a id="setup"></a>
from __future__ import print_function
import argparse
import datasets
import pickle
import itertools
import torchvision.transforms as transforms
import torch.utils.data as data
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.manifold import TSNE
import seaborn as sns
import numpy as np
from scipy import stats
import PIL.Image
from scipy.cluster.hierarchy import dendrogram, linkage
from math import sqrt
import cv2
import matplotlib.patches as patches
from scipy.spatial.distance import squareform
import pycountry
from geonamescache import GeonamesCache
from matplotlib.patches import Polygon
from matplotlib.collections import PatchCollection
from mpl_toolkits.basemap import Basemap
from sklearn import svm
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
import re
import plotly.graph_objects as go
import textwrap
import matplotlib.patches as mpatches
import operator
from matplotlib.font_manager import FontProperties
import imageio
from ipywidgets import interact, interactive, fixed, interact_manual, HBox, Layout
import ipywidgets as widgets
from IPython.display import Image
from IPython.core.display import HTML
from IPython.display import display
import time
import warnings
import random
from matplotlib.transforms import Bbox
from IPython.display import clear_output
# %matplotlib inline
# +
COLORS = sns.color_palette('Set2', 2)
SAME_EXTENT = (-0.5, 6.5, -0.5, 6.5)
np.seterr(divide='ignore', invalid='ignore')
warnings.filterwarnings("ignore")
if not os.path.exists("dataloader_files"):
os.mkdir("dataloader_files")
if not os.path.exists("results"):
os.mkdir("results")
if not os.path.exists("checkpoints"):
os.mkdir("checkpoints")
# -
# https://stackoverflow.com/questions/31517194/how-to-hide-one-specific-cell-input-or-output-in-ipython-notebook
def hide_toggle(for_next=False, toggle_text='Toggle show/hide'):
this_cell = """$('div.cell.code_cell.rendered.selected')"""
next_cell = this_cell + '.next()'
target_cell = this_cell # target cell to control with toggle
js_hide_current = '' # bit of JS to permanently hide code in current cell (only when toggling next cell)
if for_next:
target_cell = next_cell
js_hide_current = this_cell + '.find("div.input").hide();'
js_f_name = 'code_toggle_{}'.format(str(random.randint(1,2**64)))
html = """
<script>
function {f_name}() {{
{cell_selector}.find('div.input').toggle();
}}
{js_hide_current}
</script>
<a href="javascript:{f_name}()">{toggle_text}</a>
""".format(
f_name=js_f_name,
cell_selector=target_cell,
js_hide_current=js_hide_current,
toggle_text=toggle_text
)
return HTML(html)
hide_toggle(for_next=True, toggle_text='Show/hide helper functions')
# +
def folder(num, folder):
if not os.path.exists("results/{0}/{1}".format(folder, num)):
os.mkdir("results/{0}/{1}".format(folder, num))
file = open("results/{0}/{1}/results.txt".format(folder, num), "w")
return file
# Projecting a set of features into a lower-dimensional subspace with PCA
def project(features, dim):
standardized = StandardScaler().fit_transform(features)
pca = PCA(n_components=dim)
principalComponents = pca.fit_transform(X=standardized)
return principalComponents
# Calculating the binomial proportion confidence interval
def wilson(p, n, z = 1.96):
denominator = 1 + z**2/n
centre_adjusted_probability = p + z*z / (2*n)
adjusted_standard_deviation = sqrt((p*(1 - p) + z*z / (4*n)) / n)
lower_bound = (centre_adjusted_probability - z*adjusted_standard_deviation) / denominator
upper_bound = (centre_adjusted_probability + z*adjusted_standard_deviation) / denominator
return (lower_bound, upper_bound)
def country_to_iso3(country):
missing = {'South+Korea': 'KOR',
'North+Korea': 'PRK',
'Laos': 'LAO',
'Caribbean+Netherlands': 'BES',
'St.+Lucia': 'LCA',
'East+Timor': 'TLS',
'Democratic+Republic+of+Congo': 'COD',
'Swaziland': 'SWZ',
'Cape+Verde': 'CPV',
'C%C3%B4te+d%C2%B4Ivoire': 'CIV',
'Ivory+Coast': 'CIV',
'Channel+Islands': 'GBR'
}
try:
iso3 = pycountry.countries.search_fuzzy(country.replace('+', ' '))[0].alpha_3
except LookupError:
try:
iso3 = missing[country]
except KeyError:
iso3 = None
return iso3
def full_extent(ax, pad=0.0):
"""Get the full extent of an axes, including axes labels, tick labels, and
titles."""
# For text objects, we need to draw the figure first, otherwise the extents
# are undefined.
ax.figure.canvas.draw()
items = ax.get_xticklabels() + ax.get_yticklabels()
# items += [ax, ax.title, ax.xaxis.label, ax.yaxis.label]
items += [ax, ax.title]
bbox = Bbox.union([item.get_window_extent() for item in items])
return bbox.expanded(1.0 + pad, 1.0 + pad)
def display_filepaths(filepaths, width=100, height=100):
sidebyside = widgets.HBox([widgets.Image(value=open(filepath, 'rb').read(), format='png', width=width, height=height) for filepath in filepaths], layout=Layout(height='{}px'.format(height)))
display(sidebyside)
# hide_toggle(toggle_text='Show/hide helper functions')
# -
# ## Dataset
# Fill in below with dataset and file path names
# <a id="dataset"></a>
# +
transform_train = transforms.Compose([
transforms.ToTensor()
])
dataset = datasets.CoCoDataset(transform_train)
folder_name = 'coco_example'
# dataset = datasets.OpenImagesDataset(transform_train)
# folder_name = 'openimages_supp'
# -
save_loc = '1_pager_obj'
os.system("rm -r results/{0}/{1}".format(folder_name, save_loc))
file = folder(save_loc, folder_name)
first_pass = True
to_write = {}
data_folder = None
info = pickle.load(open("results/{}/0.pkl".format(folder_name), "rb"))
sample_file = info['filepaths'][0][0]
if not os.path.exists(sample_file):
assert data_folder is not None, "initialize data_folder with folder path of your data"
dataset.init_folder_path(data_folder)
print("overwriting from_path() function")
dataset.from_path = dataset.from_path_prerun
# # (M0) Metric: Object Counts, Duplicate Annotations, Object Co-Occurrences
# <a id="metric0"></a>
# ## Setup
# <a id="metric0_setup"></a>
hide_toggle(for_next=True, toggle_text='Show/hide M0 Code')
# +
categories = dataset.categories
names = dataset.labels_to_names
if not os.path.exists("results/{0}/0/".format(folder_name)):
os.mkdir("results/{0}/0/".format(folder_name))
info = pickle.load(open("results/{}/0.pkl".format(folder_name), "rb"))
instances_size = info['instances_size']
supercat_filepaths = info['filepaths']
same = []
not_same = {}
with_people = info['with_people']
not_with_people = info['not_with_people']
counts = info['counts']
overlap = info['overlap']
threshold = .6
for key in overlap.keys():
rat = overlap[key] / counts[key]
if overlap[key] / counts[key] > threshold:
same.append(key)
else:
not_same[key] = rat
# Setting up the cooccurrence matrix
counts_mat = np.zeros((len(categories), len(categories)))
cooccurs = []
for key in counts.keys():
a, b = key.split('-')
a, b = int(a), int(b)
counts_mat[b][a] = counts[key]
counts_mat[a][b] = counts[key]
if a != b:
cooccurs.append(counts[key])
instance_counts = np.diagonal(counts_mat)
mi_wilson = np.zeros((len(categories), len(categories)))
mi = np.zeros((len(categories), len(categories)))
for i in range(len(categories)):
for j in range(i+1, len(categories)):
denom = instance_counts[j] + instance_counts[i] - counts_mat[i][j]
mi_wilson[i][j] = wilson(counts_mat[i][j] / denom, denom)[0]
mi[i][j] = counts_mat[i][j]/denom
normalized = np.divide(counts_mat, instance_counts)
for i in range(len(categories)):
normalized[i][i] = -1
mi_wilson[i][i] = -1
for people in dataset.people_labels:
index = categories.index(people)
mi_wilson[index] = -1
mi_wilson[:, index] = -1
normalized[index] = -1
normalized[:, index] -1
flat_norm = normalized.flatten()
def instance_counts_words(topn):
instance_indices = np.argsort(instance_counts)
print("\nTop appearances:")
for i in range(topn):
index = instance_indices[-1-i]
print("{0}: {1}".format(names[categories[index]], instance_counts[index]))
print("\nBottom appearances:")
for i in range(topn):
index = instance_indices[i]
print("{0}: {1}".format(names[categories[index]], instance_counts[index]))
def instance_counts_graph(log):
# %matplotlib inline
if log:
hist, bins = np.histogram(instance_counts, bins='fd')
left = 0 if bins[0] == 0 else np.log10(bins[0])
logbins = np.logspace(left,np.log10(bins[-1]),len(bins) // 3)
n, bins, patches = plt.hist(instance_counts, bins=logbins)
plt.yticks([])
plt.xscale('log')
plt.xlabel('Number of Instance Occurrences')
plt.ylabel('Frequency')
plt.title('Distribution of Log Instance Occurrences')
z = np.abs(stats.zscore(np.log(instance_counts+1e-6)))
outliers = np.where(z > 3)[0]
if first_pass and len(outliers) > 0:
to_write[0] = ["(M0) The outliers shown on the graph for instance count are:"]
outliers_added = 0
for outlier in outliers:
outliers_added += 1
patches[max(np.digitize(instance_counts[outlier], bins, right=False) - 1, 0)].set_facecolor('C1')
if outliers_added < 5:
to_write[0].append("{0}: {1}".format(names[categories[outlier]], int(instance_counts[outlier])))
if outliers_added == 5:
to_write[0].append("Look in the notebook for the rest of the outliers.")
plt.savefig("results/{0}/{1}/1.png".format(folder_name, save_loc))
for outlier in outliers:
patches[max(np.digitize(instance_counts[outlier], bins, right=False) - 1, 0)].set_facecolor('C0')
plt.show()
else:
plt.hist(instance_counts, bins='fd')
plt.xlabel('Number of Instance Occurrences')
plt.ylabel('Frequency')
plt.title('Distribution of Instance Occurrences')
plt.show()
flat_norm = mi_wilson.flatten()
flat_norm[flat_norm!=flat_norm] = -1.
normalized_indices_top = np.argsort(flat_norm)
flat_norm[flat_norm == -1] = float("inf")
normalized_indices_bot = np.argsort(flat_norm)
def cooccurrence_counts_words(topn):
same_notadded = []
print("Top cooccurrences:")
for i in range(topn):
index = normalized_indices_top[-1-i]
a, b = index % len(categories), index // len(categories)
key = '{0}-{1}'.format(b, a)
if key not in same:
print("{0} - {1}: {2}".format(names[categories[a]], names[categories[b]], round(flat_norm[index], 4)))
else:
same_notadded.append(key)
print("\nBottom Cooccurrences:")
for i in range(topn):
index = normalized_indices_bot[i]
a, b = index % len(categories), index // len(categories)
print("{0} - {1}: {2}".format(names[categories[a]], names[categories[b]], round(flat_norm[index], 4)))
print('\nNot included in above rankings because most likely the same object:')
if same_notadded == []:
print("N/A")
for notadded in same_notadded:
a, b = notadded.split('-')
print("{0} - {1}".format(names[categories[int(a)]], names[categories[int(b)]]))
def cooccurence_counts_graph(log):
# %matplotlib inline
if log:
hist, bins = np.histogram(cooccurs, bins='fd')
if len(bins) < 20:
hist, bins = np.histogram(cooccurs, bins=20)
left = 0 if bins[0] == 0 else np.log10(bins[0])
logbins = np.logspace(left,np.log10(bins[-1]),len(bins))
n, bins, patches = plt.hist(cooccurs, bins=logbins)
plt.xscale('log')
plt.xlabel('Number of Instance Cooccurrences')
plt.ylabel('Frequency')
plt.title('Distribution of Log Instance Occurrences')
plt.show()
plt.close()
else:
hist, bins = np.histogram(cooccurs, bins='fd')
if len(bins) < 20:
hist, bins = np.histogram(cooccurs, bins=20)
plt.hist(cooccurs, bins=bins)
plt.xlabel('Number of Instance Cooccurrences')
plt.ylabel('Frequency')
plt.title('Distribution of Cooccurrences')
plt.show()
num_images = len(dataset.image_ids)
group_mapping = dataset.group_mapping
if group_mapping is not None:
num_per_group = np.zeros(len(with_people))
freqs_per_group = [[] for i in range(len(with_people))]
names_per_group = [[] for i in range(len(with_people))]
ps = []
phrases = []
for cat in dataset.categories:
num_per_group[group_mapping(cat)] += 1
with_people = info['with_people']
not_with_people = info['not_with_people']
number_images = np.add(with_people, not_with_people)
counts = info['counts']
for i in range(len(instance_counts)):
supercategory = group_mapping(dataset.categories[i])
freqs_per_group[supercategory].append(instance_counts[i])
names_per_group[supercategory].append(dataset.labels_to_names[dataset.categories[i]])
if num_per_group[supercategory] > 1:
this_counts = np.zeros(num_images)
this_counts[:int(instance_counts[i])] = 1
that_counts = np.zeros(num_images)
rest_counts = (number_images[supercategory] - instance_counts[i]) / (num_per_group[supercategory] - 1)
that_counts[:int(rest_counts)] = 1
p = stats.ttest_ind(this_counts, that_counts)[1]
p = stats.binom_test(int(instance_counts[i]), n=num_images, p=np.mean(that_counts))
if p < .05:
if np.mean(this_counts) > np.mean(that_counts):
phrase = '{0} is over represented in the {1} category: {2}, {3}, {4}\n'.format(dataset.labels_to_names[dataset.categories[i]], datasets.GROUPINGS_TO_NAMES[supercategory], round(np.mean(this_counts), 4), round(np.mean(that_counts), 4), p)
else:
phrase = '{0} is under represented in the {1} category: {2}, {3}, {4}\n'.format(dataset.labels_to_names[dataset.categories[i]], datasets.GROUPINGS_TO_NAMES[supercategory], round(np.mean(this_counts), 4), round(np.mean(that_counts), 4), p)
phrase = '{0} '.format(instance_counts[i]) + phrase
ps.append(p)
phrases.append(phrase)
indices = np.argsort(ps)
def within_category(category):
# %matplotlib inline
topn = 10
# looking at distribution within supercategory
fontsize = 20
fig = plt.figure(figsize=(14, 5))
grid = plt.GridSpec(1, 2, hspace=.2, wspace=.3)
ax1 = fig.add_subplot(grid[0, 0])
ax2 = fig.add_subplot(grid[0, 1])
total = with_people+not_with_people
names = [datasets.GROUPINGS_TO_NAMES[i] for i in range(len(total))]
ax1.set_xlabel('Supercategory', fontsize=fontsize)
ax1.set_ylabel('Frequency', fontsize=fontsize)
ax1.set_title('Supercategories', size=fontsize)
order = np.argsort(total)[::-1]
pltbar = ax1.bar(np.arange(len(total)), np.array(total)[order], tick_label=np.array(names)[order])
for tick in ax1.get_xticklabels():
tick.set_rotation(90)
tick.set_fontsize(fontsize)
for tick in ax1.get_yticklabels():
tick.set_fontsize(fontsize)
ax1.xaxis.labelpad = 10
ax1.yaxis.labelpad = 10
if first_pass and not os.path.exists('results/{0}/{1}/3.png'.format(folder_name, save_loc)):
to_write[2] = ["(M0) Distribution of object categories that appear in dataset."]
extent = full_extent(ax1, pad=0.3).transformed(fig.dpi_scale_trans.inverted())
fig.savefig('results/{0}/{1}/3.png'.format(folder_name, save_loc), bbox_inches=extent)
stds_per_group = [np.std(chunk) for chunk in freqs_per_group]
# peakiest_supercat = np.argmax(stds_per_group)
reverse = {v: k for k, v in datasets.GROUPINGS_TO_NAMES.items()}
peakiest_supercat = reverse[category]
pltbar[list(order).index(peakiest_supercat)].set_color('C1')
ax2.set_xlabel('Instance Label', fontsize=fontsize)
ax2.set_ylabel('Frequency', fontsize=fontsize)
ax2.set_title('Within "{}"'.format(datasets.GROUPINGS_TO_NAMES[peakiest_supercat]), size=fontsize)
freqs = freqs_per_group[peakiest_supercat]
order = np.argsort(freqs)[::-1]
ax2.bar(np.arange(len(freqs_per_group[peakiest_supercat])), np.array(freqs)[order], tick_label=np.array(names_per_group[peakiest_supercat])[order], color='C1')
for tick in ax2.get_xticklabels():
tick.set_rotation(90)
tick.set_fontsize(fontsize)
for tick in ax2.get_yticklabels():
tick.set_fontsize(fontsize)
ax2.xaxis.labelpad = 10
ax2.yaxis.labelpad = 10
plt.show()
plt.close()
try:
filepaths = supercat_filepaths[peakiest_supercat]
fig = plt.figure(figsize=(17,5))
for i in range(21):
filepath = filepaths[i]
image, anns = dataset.from_path(filepath)
image = image.data.cpu().numpy().transpose(1, 2, 0)
ax = fig.add_subplot(3, 7, 1+i)
im = ax.imshow(image, alpha=.6)
this_instances = set()
for ann in anns[0]:
if group_mapping(ann['label']) == peakiest_supercat:
this_instances.add(dataset.labels_to_names[ann['label']])
bbox = ann['bbox']
ann_0 = (bbox[0]*image.shape[1], bbox[2]*image.shape[0])
ann_w = (bbox[1]-bbox[0])*image.shape[1]
ann_h = (bbox[3]-bbox[2])*image.shape[0]
rect = patches.Rectangle(ann_0,ann_w, ann_h, linewidth=2,edgecolor='b',facecolor='none')
ax.add_patch(rect)
ax.set_title(', '.join(list(this_instances)), size=10)
ax.axis("off")
plt.show()
plt.close()
except AttributeError:
print('Some functionality not available for CocoDatasetNoImages Class')
def show_cooccurrence_hierarchy():
reverse = {v: k for k, v in dataset.labels_to_names.items()}
mi_wilson[np.isnan(mi_wilson)] = 0
xaxis = [dataset.labels_to_names[i] for i in categories]
biggest = np.amax(mi_wilson)*1.1
condensed_distance_matrix = []
for i in range(len(categories)):
for j in range(i+1, len(categories)):
condensed_distance_matrix.append(biggest - mi_wilson[i][j])
for p in [20]: # change this to have more or less labels shown
Z = linkage(condensed_distance_matrix, 'ward')
fig = plt.figure(figsize=(15, 10))
dn = dendrogram(Z, orientation='left', labels=xaxis, p=p, truncate_mode='level')
ax = plt.gca()
xlbls = ax.get_ymajorticklabels()
colorz = sns.color_palette('Set2', 12)
if dataset.group_mapping is not None:
for lbl in xlbls:
if lbl.get_text() not in reverse.keys():
continue
ind = reverse[lbl.get_text()]
lbl.set_color(colorz[dataset.group_mapping(ind)])
plt.savefig("results/{0}/{1}/hierarchy_{2}.png".format(folder_name, 0, p), dpi=300)
plt.close()
# Create figure
fig = go.Figure()
# Constants
img_width = 1600
img_height = 900
scale_factor = 0.5
# Add invisible scatter trace.
# This trace is added to help the autoresize logic work.
fig.add_trace(
go.Scatter(
x=[0, img_width * scale_factor],
y=[0, img_height * scale_factor],
mode="markers",
marker_opacity=0
)
)
# Configure axes
fig.update_xaxes(
visible=False,
range=[0, img_width * scale_factor]
)
fig.update_yaxes(
visible=False,
range=[0, img_height * scale_factor],
# the scaleanchor attribute ensures that the aspect ratio stays constant
scaleanchor="x"
)
# Add image
fig.add_layout_image(
dict(
x=0,
sizex=img_width * scale_factor,
y=img_height * scale_factor,
sizey=img_height * scale_factor,
xref="x",
yref="y",
opacity=1.0,
layer="below",
sizing="stretch",
source="results/{0}/{1}/hierarchy_{2}.png".format(folder_name, 0, p))
)
# Configure other layout
fig.update_layout(
width=img_width * scale_factor,
height=img_height * scale_factor,
margin={"l": 0, "r": 0, "t": 0, "b": 0},
)
# Disable the autosize on double click because it adds unwanted margins around the image
# More detail: https://plot.ly/python/configuration-options/
fig.show(config={'doubleClick': 'reset'})
# -
# ## Analyses
# <a id="metric0_analyses"></a>
# Looking at individual object counts
print("Number of images: {}".format(len(dataset.image_ids)))
print("Median and Mean of instance counts: {0}, {1}".format(np.median(instance_counts), np.mean(instance_counts)))
interact(instance_counts_words, topn=widgets.IntSlider(min=1, max=30, step=1, value=10));
interact(instance_counts_graph, log=widgets.ToggleButton(value=True, description="Toggle for log"));
# Looking at cooccurrence counts in the form of numbers, graph, and a hierarchy.
print("Median and Mean of cooccurrence counts: {0}, {1}".format(np.median(cooccurs), np.mean(cooccurs)))
interact(cooccurrence_counts_words, topn=widgets.IntSlider(min=1, max=30, step=1, value=10));
interact(cooccurence_counts_graph, log=widgets.ToggleButton(value=True, description="Toggle for log"));
# Visualizing a hierarchy of terms based on their cooccurrences. Interact with the graph to zoom in and out. Can change settings of graph to have more/less labels in the show_cooccurrence_hierarchy function above.
show_cooccurrence_hierarchy()
# A look at the supercategory level
if group_mapping is not None:
interact(within_category, category=widgets.Dropdown(options=sorted(list(datasets.GROUPINGS_TO_NAMES.values())), value='accessory'));
# # (M7) Metric: Size and Distance from Center of Supercategories
# <a id="metric7"></a>
# ## Setup
# <a id="metric7_setup"></a>
hide_toggle(for_next=True, toggle_text='Show/hide M7 Code')
# +
topn = 15
categories = dataset.categories
idx_to_scenegroup = pickle.load(open('util_files/places_scene_info.pkl', 'rb'))['idx_to_scenegroup']
info = pickle.load(open("results/{}/0.pkl".format(folder_name), "rb"))
if dataset.group_mapping is not None:
sizes = info['sizes']
distances = info['distances']
all_sizes = np.concatenate(np.array(sizes), axis=0)
else:
all_sizes = []
for a_instance_size in instances_size:
for a_size in a_instance_size:
all_sizes.append(a_size[0])
sorted_sizes = np.sort(all_sizes)
fifth = len(sorted_sizes) // 5
bins = [sorted_sizes[i*fifth] for i in range(5)]
bins.append(1.00001)
bar_mapping = {1: 'XS', 2: 'S', 3: 'M', 4: 'L', 5: 'XL'}
reverse_bm = {v: k for k, v in bar_mapping.items()}
instances_sizes = info['instances_size']
instance_deviations = np.zeros(len(instances_sizes))
for i in range(len(instances_sizes)):
this_sizes = [chunk[0] for chunk in instances_sizes[i]]
this_bins = np.digitize(this_sizes, bins)
_, counts = np.unique(this_bins, return_counts=True)
probs = counts / np.sum(counts)
entropy = -np.sum(np.multiply(probs, np.log2(probs+1e-6)), axis=0)
instance_deviations[i] = entropy
indices = np.argsort(instance_deviations)
counts = pickle.load(open('results/{}/0.pkl'.format(folder_name), 'rb'))['counts']
counts_mat = np.zeros((len(categories), len(categories)))
cooccurs = []
for key in counts.keys():
a, b = key.split('-')
a, b = int(a), int(b)
counts_mat[b][a] = counts[key]
counts_mat[a][b] = counts[key]
if a != b:
cooccurs.append(counts[key])
scene_instance = pickle.load(open("results/{}/9.pkl".format(folder_name), "rb"))['scene_instance']
def mean_and_std(data, data_type):
# %matplotlib inline
colorz = sns.color_palette('Set2', len(data))
f = data[0]
m = data[1]
means = []
stds = []
x = []
name = []
for i, cat in enumerate(data):
x.append(i)
means.append(np.mean(cat))
stds.append(np.std(cat))
name.append(datasets.GROUPINGS_TO_NAMES[i])
histogram, bins = np.histogram(cat, bins='auto')
bin_centers = 0.5*(bins[1:] + bins[:-1])
area = np.trapz(histogram, x=bin_centers)
plt.plot(bin_centers, histogram/area, alpha=.75, label=datasets.GROUPINGS_TO_NAMES[i], color=colorz[i])
plt.legend(loc='upper right')
plt.xlabel(data_type)
plt.ylabel('Frequency')
plt.tight_layout()
plt.show()
plt.close()
plt.xlabel('Category Groups')
plt.ylabel(data_type)
plt.bar(x, means, yerr=stds, tick_label=name, capsize=10)
plt.xticks(rotation='vertical')
plt.tight_layout()
z = np.abs(stats.zscore(means))
outliers = np.where(z > 3)[0]
if data_type == 'Distances' and first_pass and len(outliers) > 0:
to_write[3] = ["(M7) In the graph, the following object(s) have outlier distances:"]
for outlier in outliers:
to_write[3].append(name[outlier])
plt.savefig("results/{0}/{1}/4.png".format(folder_name, save_loc))
plt.show()
plt.close()
def size_or_distance(metric):
if metric == 'size':
mean_and_std(sizes, 'Sizes')
elif metric == 'distance':
mean_and_std(distances, 'Distances')
cat_to_ent = [('{0}: {1}'.format(dataset.labels_to_names[categories[index]], round(instance_deviations[index], 3)), index) for index in indices if len(instances_sizes[index]) > 10]
def object_size(object_class, sizes):
try:
plt.clf()
index = object_class
cat = categories[index]
fontsize = 10
this_sizes = [chunk[0] for chunk in instances_sizes[index]]
this_bins = np.digitize(this_sizes, bins)
num, counts = np.unique(this_bins, return_counts=True)
fig = plt.figure(figsize=(5, 4))
ax = plt.subplot(111)
xticks = []
for j in bar_mapping.keys():
if j in num:
ax.bar(j, counts[list(num).index(j)], width=0.8, bottom=0.0, align='center', color='C0')
else:
ax.bar(j, 0, width=0.8, bottom=0.0, align='center', color='C0')
ax.plot(1,1,label = '{0}: {1}-{2}'.format(bar_mapping[j], round(bins[j-1], 3), round(bins[j], 3)),marker = '',ls ='')
xticks.append(bar_mapping[j])
plt.xticks(np.arange(len(xticks))+1, xticks, fontsize=fontsize)
plt.tick_params(labelsize=fontsize)
plt.xlabel('Size Bins', fontsize=fontsize)
plt.ylabel('Frequency', fontsize=fontsize)
plt.tight_layout()
plt.gcf().subplots_adjust(left=0.35)
if sizes is None:
if len(counts) == 1:
size_to_add = [num[0]]
else:
size_to_add = num[np.argpartition(counts, 2)[:2]]
plt.savefig("results/{0}/{1}/5.png".format(folder_name, save_loc))
plt.close()
else:
size_to_add = [reverse_bm[size] for size in sizes]
plt.show()
plt.close()
filepaths = [instances_sizes[index][j][1] for j in range(len(instances_sizes[index])) if this_bins[j] in size_to_add]
if len(filepaths) == 0:
print("No images of objects in this size appear in the dataset. Please select an additional size.")
return
other_instances = counts_mat[index]
other_scenes = scene_instance[:, index]
all_anns = [dataset.from_path(filepath)[1][0] for filepath in filepaths]
instances_per = np.array([list(set([ann['label'] for ann in anns if ann['label'] != cat])) for anns in all_anns])
these_instances = np.concatenate(instances_per, axis=0)
scenes_per = np.array([dataset.from_path(filepath)[1][4] for filepath in filepaths])
these_scenes = np.concatenate(scenes_per, axis=0)
num, counts = np.unique(these_instances, return_counts=True)
num = np.array([categories.index(nu) for nu in num])
these_instances = np.zeros(len(categories))
for i in range(len(num)):
these_instances[num[i]] = counts[i]
num, counts = np.unique(these_scenes, return_counts=True)
these_scenes = np.zeros(len(other_scenes))
for i in range(len(num)):
these_scenes[num[i]] = counts[i]
instance_probs = np.nan_to_num(np.divide(these_instances, other_instances))
instance_probs = np.nan_to_num(np.array([wilson(instance_probs[i], other_instances[i])[0] for i in range(len(instance_probs))]))
scene_probs = np.nan_to_num(np.divide(these_scenes, other_scenes))
scene_probs = np.nan_to_num(np.array([wilson(scene_probs[i], other_scenes[i])[0] for i in range(len(scene_probs))]))
instance_indices = np.argsort(instance_probs)
scene_indices = np.argsort(scene_probs)
i_counter = 1
s_counter = 1
imgs_to_show =3
i_indices = []
s_indices = []
fig = plt.figure(figsize=(8, 6))
i = 0
start = time.time()
# so that it displays combos even of object of interest in non-desired size
if sizes is not None:
print("Please wait, visualization can take ~25 seconds")
filepaths = [instances_sizes[index][j][1] for j in range(len(instances_sizes[index]))]
things_per = np.array([dataset.from_path(filepath)[1] for filepath in filepaths])
scenes_per = np.array([thing[4] for thing in things_per])
all_anns = [thing[0] for thing in things_per]
instances_per = np.array([list(set([ann['label'] for ann in anns if ann['label'] != cat])) for anns in all_anns])
if sizes is not None:
print("Time took: {}".format(time.time() - start))
fontsize = 10
added_filepaths = []
second_queries = []
second_probs = []
while i < imgs_to_show:
if i_counter > len(instance_indices) or s_counter > len(scene_indices):
break
if instance_probs[instance_indices[-i_counter]] < scene_probs[scene_indices[-s_counter]]:
s_index = scene_indices[-s_counter]
s_counter += 1
added = 0
for j, scenes in enumerate(scenes_per):
if s_index in scenes:
filepath = filepaths[j]
if filepath in added_filepaths:
continue
added_filepaths.append(filepath)
image, anns = dataset.from_path(filepath)
image = image.data.cpu().numpy().transpose(1, 2, 0)
ax = fig.add_subplot(3, imgs_to_show, (added*imgs_to_show)+1+i)
ax.clear()
ax.set_title('\n'.join(textwrap.wrap(idx_to_scenegroup[s_index], width=25)), fontsize=fontsize)
ax.axis("off")
for ann in anns[0]:
if ann['label'] == cat:
bbox = ann['bbox']
ann_0 = (bbox[0]*image.shape[1], bbox[2]*image.shape[0])
ann_w = (bbox[1]-bbox[0])*image.shape[1]
ann_h = (bbox[3]-bbox[2])*image.shape[0]
rect = patches.Rectangle(ann_0,ann_w, ann_h, linewidth=2,edgecolor='#ff0000',facecolor='none')
ax.add_patch(rect)
im = ax.imshow(image, alpha=.66)
added += 1
if added == 3:
second_queries.append('\n'.join(textwrap.wrap(idx_to_scenegroup[s_index], width=20)))
second_probs.append(scene_probs[s_index])
break
else:
i_index = instance_indices[-i_counter]
i_counter += 1
added = 0
for j, instances in enumerate(instances_per):
if categories[i_index] in instances:
filepath = filepaths[j]
if filepath in added_filepaths:
continue
added_filepaths.append(filepath)
image, anns = dataset.from_path(filepath)
image = image.data.cpu().numpy().transpose(1, 2, 0)
ax = fig.add_subplot(3, imgs_to_show, (added*imgs_to_show)+1+i)
ax.clear()
ax.set_title(dataset.labels_to_names[categories[i_index]], fontsize=fontsize)
ax.axis("off")
for ann in anns[0]:
if ann['label'] == cat:
bbox = ann['bbox']
ann_0 = (bbox[0]*image.shape[1], bbox[2]*image.shape[0])
ann_w = (bbox[1]-bbox[0])*image.shape[1]
ann_h = (bbox[3]-bbox[2])*image.shape[0]
rect = patches.Rectangle(ann_0,ann_w, ann_h, linewidth=2,edgecolor='#ff0000',facecolor='none')
ax.add_patch(rect)
im = ax.imshow(image, alpha=.66)
added += 1
if added == 3:
second_queries.append(dataset.labels_to_names[categories[i_index]])
second_probs.append(instance_probs[i_index])
break
if added == 3:
i += 1
if sizes is None:
plt.savefig("results/{0}/{1}/6.png".format(folder_name, save_loc))
plt.close()
else:
plt.show()
plt.close()
print()
# graph the new probability using second_queries and second_probs
fig = plt.figure(figsize=(6, 4))
plt.barh(np.arange(len(second_probs))[::-1], second_probs, tick_label=second_queries)
plt.ylabel('Query Term', fontsize=fontsize)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
if sizes is None:
size_names = [bar_mapping[size_add] for size_add in size_to_add]
plt.xlabel('Conditional Probability\n{0} is {1}'.format(dataset.labels_to_names[cat], ', '.join(list(size_names))), fontsize=fontsize)
plt.tight_layout()
plt.savefig("results/{0}/{1}/7.png".format(folder_name, save_loc))
plt.close()
to_write[4] = ["(M7) {0} has the least uniform size distribution.\nShown below is the size distribution for this object, what kinds of pairwise queries are recommended to augment the dataset for more uniform sizing, and qualitative examples of these pairs.\nPairwise queries take the form of \"[Object 1] + [Object 2]\"".format(dataset.labels_to_names[cat])]
else:
plt.xlabel('Conditional Probability\n{0} is {1}'.format(dataset.labels_to_names[cat], ', '.join(list(sizes))), fontsize=fontsize)
plt.tight_layout()
plt.show()
plt.close()
except AttributeError:
print('Some functionality not available for CocoDataNoImages Class')
sizes_widget = widgets.SelectMultiple(
options=['XS', 'S', 'M', 'L', 'XL'],
value=['XS'],
description='',
disabled=False
)
object_class_widget = widgets.Dropdown(options=cat_to_ent,layout=Layout(width='200px'))
all_things = [widgets.Label('[Object]: [amount of size distribution]',layout=Layout(padding='0px 0px 0px 5px', width='270px')), object_class_widget, widgets.Label('Sizes: select with cmd',layout=Layout(padding='0px 5px 0px 40px', width='260px')), sizes_widget]
# -
# ## Analyses
# <a id="metric7_analyses"></a>
# A look at size (percent of image's pixels) and distance (from center) by object category.
if dataset.group_mapping is not None:
interact(size_or_distance, metric=widgets.Dropdown(options=['distance', 'size'], value='distance'));
# Actionable pairwise queries about how to equalize sizes for a particular class.
if first_pass:
object_size(cat_to_ent[0][1], None)
ui = HBox(all_things)
out = widgets.interactive_output(object_size, {'object_class': object_class_widget, 'sizes': sizes_widget})
display(ui, out)
# # (M8) Metric: Supercategories w/wo people
# <a id="metric8"></a>
# ## Setup
# <a id="metric8_setup"></a>
hide_toggle(for_next=True, toggle_text='Show/hide M8 Code')
# +
info = pickle.load(open("results/{}/0.pkl".format(folder_name), "rb"))
with_people = info['with_people']
not_with_people = info['not_with_people']
with_people_instances = info['with_people_instances']
counts = info['counts']
x, means, stds, name = [], [], [], []
# Visualize how each supercategory is represented with people
if dataset.group_mapping is not None:
for i in range(len(with_people)):
if i == 0:
continue
x.append(i)
total = with_people[i]+not_with_people[i]
p = with_people[i] / total
means.append(p)
stds.append((p*(1.-p))/total)
name.append(datasets.GROUPINGS_TO_NAMES[i])
def fraction_with_people():
fig = plt.figure(figsize=(8, 6))
fontsize = 20
plt.xlabel('Supercategory', fontsize=fontsize, labelpad=20)
plt.ylabel('Fraction with People', fontsize=fontsize, labelpad=20, y=0.29)
plt.bar(x, means, yerr=stds, tick_label=name)
plt.xticks(rotation='vertical', fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.title("The Fraction of Images in Each Category of Objects Represented with People")
plt.tight_layout()
if first_pass:
to_write[5] = ["(M8) Distribution of how often object categories are represented with people."]
plt.savefig("results/{0}/{1}/8.png".format(folder_name, save_loc))
plt.show()
group_mapping = dataset.group_mapping
if group_mapping is not None:
sigsOver = {}
sigsUnder = {}
for i in range(len(dataset.categories)):
supercategory = group_mapping(dataset.categories[i])
instance_percent = with_people_instances[i] / counts['{0}-{1}'.format(i, i)]
supercat_percent = with_people[supercategory] / (with_people[supercategory]+not_with_people[supercategory])
p = stats.binom_test(with_people_instances[i], n=counts['{0}-{1}'.format(i, i)], p=supercat_percent)
if p < .05:
if instance_percent < supercat_percent:
phrase = "- {0} is underrepresented with people within {1}: {2}, {3}".format(dataset.labels_to_names[dataset.categories[i]].upper(), datasets.GROUPINGS_TO_NAMES[supercategory].upper(), round(instance_percent, 2), round(supercat_percent, 2))
sigsUnder[phrase] = p
else:
phrase = "- {0} is overrepresented with people within {1}: {2}, {3}".format(dataset.labels_to_names[dataset.categories[i]].upper(), datasets.GROUPINGS_TO_NAMES[supercategory].upper(), round(instance_percent, 2), round(supercat_percent, 2))
sigsOver[phrase] = p
def represented_with_people(topn):
print("\nThe first fraction is this object's representation with people, second fraction is the object category's. \nListed in order of statistical significance.")
i = 1
if first_pass:
to_write[6] = ["(M8) The strongest deviations of an object from its category being represented with people. The first fraction is this object's representation with people, and the second is the object category's:\n"]
for phrase, p in sorted(sigsOver.items(), key=lambda x: x[1], reverse=False)[:4]:
to_write[6].append(phrase)
to_write[6].append("\n")
for phrase, p in sorted(sigsUnder.items(), key=lambda x: x[1], reverse=False)[:4]:
to_write[6].append(phrase)
print("\nOVERrepresentation of instances with people within a supercategory\n")
for phrase, p in sorted(sigsOver.items(), key=lambda x: x[1], reverse=False):
print(phrase)
if i == topn:
break
i += 1
i = 1
print("\nUNDERrepresentation of instances with people within a supercategory\n")
for phrase, p in sorted(sigsUnder.items(), key=lambda x: x[1], reverse=False):
print(phrase)
if i == topn:
break
i += 1
# -
# ## Analyses
# <a id="metric8_analyses"></a>
# Which categories of objects are imaged with people.
if dataset.group_mapping is not None:
fraction_with_people()
# Which specific objects are over/under represented with people within their object category. The first fraction is this object's representation with people, second fraction is the object category's. Listed in order of statistical significance.
if dataset.group_mapping is not None:
interact(represented_with_people, topn=widgets.IntSlider(min=1, max=30, step=1, value=10));
# # (M9) Metric: Scenes and Object Appearance Diversity
# <a id="metric9"></a>
# ## Setup
# <a id="metric9_setup"></a>
hide_toggle(for_next=True, toggle_text='Show/hide M9 Code')
# +
topn = 10
scene_info = pickle.load(open("results/{}/9.pkl".format(folder_name), "rb"))
scene_counts = scene_info['scenes']
scene_supercategory = scene_info['scene_supercategory']
scene_instance = scene_info['scene_instance']
supercat_to_scenes_to_features = scene_info['supercat_to_scenes_to_features']
supercategory_info = pickle.load(open("results/{}/0.pkl".format(folder_name), "rb"))
supercategory_counts = np.add(supercategory_info['with_people'], supercategory_info['not_with_people'])
info = pickle.load(open('util_files/places_scene_info.pkl', 'rb'))
idx_to_scene = info['idx_to_scene']
idx_to_scenegroup = info['idx_to_scenegroup']
sceneidx_to_scenegroupidx = info['sceneidx_to_scenegroupidx']
entropy_per_instance = np.zeros(len(dataset.categories))
totals_per_instance = np.sum(scene_instance, axis=0)
scene_probs = np.divide(scene_instance, totals_per_instance)
entropy = -np.sum(np.multiply(scene_probs, np.log2(scene_probs+1e-6)), axis=0)
indices = np.argsort(entropy)
cat_to_ent = [('{0}: {1}'.format(dataset.labels_to_names[dataset.categories[index]], round(entropy[index], 3)), index) for index in indices if totals_per_instance[index] > 30]
instance_filepaths = pickle.load(open("results/{}/0.pkl".format(folder_name), 'rb'))['instances_size']
class SceneQual():
def __init__(self):
self.label = None
interact(self.obj_scene_div, label=widgets.Dropdown(options=cat_to_ent));
def obj_scene_div(self, label):
try:
self.label = label
fig = plt.figure(figsize=(12, 8))
filepaths = np.unique(np.array([chunk[1] for chunk in instance_filepaths[label]]))
random.shuffle(filepaths)
for i in range(30):
filepath = filepaths[i]
if i < 15:
image, anns = dataset.from_path(filepath)
image = image.data.cpu().numpy().transpose(1, 2, 0)
ax = fig.add_subplot(3, 5, 1+i)
ax.axis("off")
im = ax.imshow(image, extent=SAME_EXTENT)
plt.show()
except AttributeError:
print('Some functionality not available for CocoDatasetNoImages Class')
def click(self, b):
if b != '':
clear_output()
interact(self.obj_scene_div, label=widgets.Dropdown(options=cat_to_ent, value=self.label));
refresh_button = widgets.Button(description="Click to refresh examples", layout=Layout(width='300px'))
refresh_button.on_click(self.click)
output = widgets.Output()
display(refresh_button, output)
def scene_distribution():
indices = np.arange(len(idx_to_scenegroup))
order = np.argsort(scene_counts)
plt.barh(indices, np.array(scene_counts)[order], tick_label=['\n'.join(textwrap.wrap(idx_to_scenegroup[i], width=30)) for i in np.array(indices)[order]])
plt.yticks(rotation='vertical')
plt.yticks(fontsize=10)
ax = plt.gca()
ax.tick_params(axis="x", bottom=True, top=False, labelbottom=True, labeltop=False)
ax.tick_params(axis="y", left=True, right=False, labelleft=True, labelright=False, labelrotation=0)
plt.ylabel('Scenes')
plt.xlabel('Quantity')
plt.tight_layout()
plt.title("Distribution of scenes that appear in dataset")
if first_pass:
to_write[7] = ["(M9) Distribution of scenes that appear in dataset."]
plt.savefig("results/{0}/{1}/9.png".format(folder_name, save_loc))
plt.show()
def scene_supercat(topn):
mi_wilson = np.zeros_like(scene_supercategory)
for i in range(len(mi_wilson)):
for j in range(len(mi_wilson[0])):
denom = scene_counts[i] + supercategory_counts[j] - scene_supercategory[i][j]
mi_wilson[i][j] = wilson(scene_supercategory[i][j] / denom, denom)[0]
flat_norm = mi_wilson.flatten()
flat_norm[flat_norm!=flat_norm] = -1.
normalized_indices = np.argsort(flat_norm)
print("Top cooccurrences:\n")
for i in range(topn):
index = normalized_indices[-1-i]
a, b = index % len(datasets.GROUPINGS_TO_NAMES), index // len(datasets.GROUPINGS_TO_NAMES)
print("{0} - {1}".format(idx_to_scenegroup[b], datasets.GROUPINGS_TO_NAMES[a], round(flat_norm[index], 4)))
print("Bottom cooccurrences:\n")
for i in range(topn):
index = normalized_indices[i]
a, b = index % len(datasets.GROUPINGS_TO_NAMES), index // len(datasets.GROUPINGS_TO_NAMES)
print("{0} - {1}".format(idx_to_scenegroup[b], datasets.GROUPINGS_TO_NAMES[a], round(flat_norm[index], 4)))
def diversify_supercat_by_scene(supercat):
all_scenes = []
big = []
small = []
person = []
obj_area = []
person_area = []
distance = []
filepaths = []
scenes_to_features = supercat_to_scenes_to_features[supercat]
for scene in scenes_to_features.keys():
all_scenes.append((scene, len(scenes_to_features[scene])))
small_ = [chunk[0][0] for chunk in scenes_to_features[scene]]
filepaths_ = [chunk[1] for chunk in scenes_to_features[scene]]
small.extend(small_)
filepaths.extend(filepaths_)
all_features = small
cluster_center = np.mean(all_features, axis=0)
dists = np.linalg.norm(cluster_center-all_features, axis=1)
sorted_indices = np.argsort(dists)
tracker = 0
scene_dists = []
print("\n{} scene contributions\n".format(datasets.GROUPINGS_TO_NAMES[supercat]))
boundaries = [tracker]
tsne_features = []
ind_labels = []
for i, scene in enumerate(all_scenes):
med_dist = np.median(dists[tracker:tracker+scene[1]])
scene_dists.append(med_dist)
tracker += scene[1]
boundaries.append(tracker)
tsne_features.extend(all_features[tracker:tracker+min(100, scene[1])])
ind_labels.extend([i]*min(100, scene[1]))
labels = []
sizes = []
intensities = []
for index in np.argsort(scene_dists):
label = idx_to_scenegroup[all_scenes[index][0]]
size = all_scenes[index][1]
intensity = scene_dists[index]
labels.append(label)
sizes.append(size)
intensities.append(intensity)
plt.close()
intensities = np.array(intensities - min(intensities))
intensities /= np.amax(intensities)
colors = plt.cm.Blues(intensities)
fig, ax = plt.subplots()
def pie_label(pct, values):
amount = int(pct*np.sum(values))
if pct >= 2.0:
return "{0}%".format(round(pct, 1))
else:
return ''
wedges, texts, autotexts = ax.pie(sizes, labels = ['' if sizes[i]/sum(sizes) < .02 else i for i in range(len(sizes))], autopct=lambda pct: pie_label(pct, sizes), shadow=False, startangle=90, colors=colors, radius=100, labeldistance=1.1, explode=[10.] * len(sizes))
for t in texts:
t.set_fontsize('xx-small')
for t in autotexts:
t.set_fontsize('xx-small')
for w in wedges:
w.set_edgecolor('black')
w.set_linewidth(.1)
ax.legend(wedges, ['{0}: {1}'.format(chunk[0], chunk[1]) for chunk in zip(np.arange(len(labels)), labels)], title='Scene Group Categories', loc='center left', bbox_to_anchor=(1., 0, 0.5, 1.), fontsize='xx-small')
plt.tight_layout()
ax.axis('equal')
# Uncomment below to see pi chart version
# plt.show()
plt.close()
colorz = sns.color_palette('hls', 16)
xs = np.array(sizes) / sum(sizes)
xs /= np.amax(xs)
ys = intensities
fig = plt.figure(figsize=(9, 5))
fontsize = 27
sizez = [64]*16
plt.scatter(xs, ys, c=colorz, s=sizez)
plt.xticks(fontsize=fontsize)
plt.yticks(fontsize=fontsize)
plt.xlabel('Relative Commonness', fontsize=fontsize)
plt.ylabel('Relative Diversity', fontsize=fontsize)
plt.title("\n{} scene contributions\n".format(datasets.GROUPINGS_TO_NAMES[supercat]), fontsize=fontsize)
handles = []
for lab in range(len(sizes)):
plt.annotate(lab, (xs[lab]+.015, ys[lab]+.01))
patch = mpatches.Patch(color=colorz[lab], label='{0}: {1}'.format(lab, labels[lab]))
handles.append(patch)
fontP = FontProperties()
fontP.set_size('medium')
plt.legend(handles=handles, prop=fontP, loc='center left', bbox_to_anchor=(1, 0.5))
plt.tight_layout()
if first_pass:
to_write[8] = ["(M9) An example of how to diversify the appearance diversity of the \"{}\" category by augmenting the dataset with images in different scenes. Appearance diversity can thought of as something like intra-class variation, which is an important feature for object detection. However, there is a tradeoff between the amount of appearance diversity an object in a particular scene brings, and how common this object-scene combination is, which contributes to how easy it is to collect this kind of image.".format(datasets.GROUPINGS_TO_NAMES[supercat])]
plt.savefig("results/{0}/{1}/10.png".format(folder_name, save_loc))
plt.show()
# # Visualize the supercategory features colored by their scene as a tsne
# plot_kwds = {'alpha' : .8, 's' : 30, 'linewidths':0}
# projection_instances = TSNE().fit_transform(tsne_features)
# plt.scatter(*projection_instances.T, **plot_kwds, c=[colorz[ind_labels[i]] for i in range(len(tsne_features))])
# lgd = plt.legend(handles=handles, bbox_to_anchor=(1.04,1), loc="upper left", prop=fontP)
# plt.show()
# plt.close()
# -
# ## Analyses
# <a id="metric9_analyses"></a>
# Scene distribution of entire dataset.
scene_distribution()
# Select the object (which has its scene diversity value next to it) to visualize qualitative examples.
scene_qual = SceneQual()
scene_qual.click('')
# Cooccurrences between scenes and object categories.
if dataset.group_mapping is not None:
interact(scene_supercat, topn=widgets.IntSlider(min=1, max=30, step=1, value=10));
# Actionable insights on how to diversify an object category's appearance. Visualization of tradeoff between how easy it is to find a scene vs how much appearance diversity it brings to the object category.
if dataset.group_mapping is not None:
pairs = [(datasets.GROUPINGS_TO_NAMES[index], index) for index in supercat_to_scenes_to_features.keys()]
pairs = sorted(pairs, key=lambda x: x[0])
interact(diversify_supercat_by_scene, supercat=widgets.Dropdown(options=pairs));
# # Setting up summary pdf
# <a id="summarypdf"></a>
first_pass = False
def write_pdf(numbers):
for i in numbers:
if i in to_write.keys():
for sentence in to_write[i]:
pdf.write(5, sentence)
pdf.ln()
if i == 0:
pdf.image('results/{0}/{1}/1.png'.format(folder_name, save_loc), h=80)
pdf.ln()
elif i == 1:
pdf.image('results/{0}/{1}/2.png'.format(folder_name, save_loc), h=80)
pdf.ln()
elif i == 2:
pdf.image('results/{0}/{1}/3.png'.format(folder_name, save_loc), h=80)
pdf.ln()
elif i == 3:
pdf.image('results/{0}/{1}/4.png'.format(folder_name, save_loc), h=80)
pdf.ln()
elif i == 4:
y_spot = pdf.get_y()
pdf.image('results/{0}/{1}/5.png'.format(folder_name, save_loc), w=85)
after_y_spot = pdf.get_y()
if after_y_spot < y_spot:
y_spot = 10
pdf.image('results/{0}/{1}/7.png'.format(folder_name, save_loc), w=85, x=95, y=y_spot)
pdf.ln()
pdf.image('results/{0}/{1}/6.png'.format(folder_name, save_loc),h=80)
pdf.ln()
elif i == 5:
pdf.image('results/{0}/{1}/8.png'.format(folder_name, save_loc), h=80)
pdf.ln()
elif i == 7:
pdf.image('results/{0}/{1}/9.png'.format(folder_name, save_loc), h=80)
pdf.ln()
elif i == 8:
pdf.image('results/{0}/{1}/10.png'.format(folder_name, save_loc), h=80)
pdf.ln()
pdf.ln(h=3)
pdf.dashed_line(10, pdf.get_y(), 200, pdf.get_y())
pdf.ln(h=3)
# +
from fpdf import FPDF
pdf = FPDF()
pdf.add_page()
pdf.set_font('Arial', 'B', 16)
pdf.write(5, "Object-Based Summary")
pdf.ln()
pdf.ln()
# Overview Statistics
pdf.set_font('Arial', 'B', 12)
pdf.write(5, "Overview Statistics")
pdf.ln()
pdf.ln(h=3)
pdf.line(10, pdf.get_y(), 200, pdf.get_y())
pdf.ln(h=3)
pdf.set_font('Arial', '', 12)
write_pdf([2, 5, 7])
# Interesting findings
pdf.set_font('Arial', 'B', 12)
pdf.write(5, "Sample Interesting Findings")
pdf.ln()
pdf.ln(h=3)
pdf.line(10, pdf.get_y(), 200, pdf.get_y())
pdf.ln(h=3)
pdf.set_font('Arial', '', 12)
write_pdf([0, 1, 3, 4, 6, 8])
# Interesting findings
pdf.set_font('Arial', 'B', 12)
pdf.write(5, "Some of the other metrics in the notebook")
pdf.ln()
pdf.ln(h=3)
pdf.line(10, pdf.get_y(), 200, pdf.get_y())
pdf.ln(h=3)
pdf.set_font('Arial', '', 12)
pdf.write(5, "- (M0) Cooccurrences of objects as a hierarchical graph")
pdf.ln()
pdf.write(5, "- (M0) Finer grained look at distribution within each object category ")
pdf.ln()
pdf.write(5, "- (M7) Size of each object category")
pdf.ln()
pdf.write(5, "- (M9) Qualitative look at what each object's scenes are like")
pdf.ln()
pdf.write(5, "- (M9) Highest/lowest cooccurrences between object categories and scenes")
pdf.ln()
pdf.output('results/{0}/{1}/summary.pdf'.format(folder_name, save_loc), "F")
# -
| Object Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Voting based Ensemble learning
# -
# cd E:\karthi_m\wind
import pandas as pd
da=pd.read_csv('data1.csv')
da.head()
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder
y_ = da.apply(LabelEncoder().fit_transform)
y_.head()
x= y_.drop(['wtg_state'],axis=1)
y= y_['wtg_state']
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(x,y,test_size=0.25)
from sklearn.naive_bayes import MultinomialNB
clf = MultinomialNB()
fit_=clf.fit(X_train,y_train)
y_pred=fit_.predict(X_test)
#import the metrics class
from sklearn import metrics
cnf_matrix = metrics.confusion_matrix(y_test, y_pred)
cnf_matrix
# Fitting Classifier to the Training Set
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion='entropy',max_depth=3, random_state=42)
classifier.fit(X_train, y_train)
# +
# Model performance on training set
y_pred=classifier.predict(X_test)
from sklearn import metrics
from sklearn.metrics import confusion_matrix, classification_report
accuracy = metrics.accuracy_score(y_test, y_pred)
print("Accuracy: {:.2f}".format(accuracy))
cm=confusion_matrix(y_test,y_pred)
print('Confusion Matrix: \n', cm)
# -
#Importing Libraries
import numpy as np
from sklearn.ensemble import RandomForestClassifier
# Fitting Random Forest Classification to the Training set
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 42)
classifier.fit(X_train, y_train)
y_pred=classifier.predict(X_test)
# +
# Model performance on training set
y_pred_train =classifier.predict(X_test)
from sklearn import metrics
from sklearn.metrics import confusion_matrix, classification_report
accuracy = metrics.accuracy_score(y_test, y_pred_train)
cm=confusion_matrix(y_test,y_pred_train)
print('Confusion Matrix: \n', cm)
# -
# +
#Defining Hybrid Ensemble Learning Model
estimators = []
#Defining 5 Naive bayes Models
model11 = MultinomialNB()
estimators.append(('NB1', model11))
model12 = MultinomialNB()
estimators.append(('NB2', model12))
model13 = MultinomialNB()
estimators.append(('NB3', model13))
model14 = MultinomialNB()
estimators.append(('NB4', model14))
model15 = MultinomialNB()
estimators.append(('NB5', model15))
# -
#Defining 5 Decision Tree Classifiers
model16 = DecisionTreeClassifier(criterion='entropy',max_depth = 3)
estimators.append(('cart1', model16))
model17 = DecisionTreeClassifier(criterion='entropy',max_depth = 4)
estimators.append(('cart2', model17))
model18 = DecisionTreeClassifier(criterion='entropy',max_depth = 5)
estimators.append(('cart3', model18))
model19 = DecisionTreeClassifier(criterion='entropy',max_depth = 2)
estimators.append(('cart4', model19))
model20 = DecisionTreeClassifier(criterion='entropy',max_depth = 3)
estimators.append(('cart5', model20))
#Defining 5 Random forest Classifiers
model21 = RandomForestClassifier(n_estimators = 10, criterion = 'entropy')
estimators.append(('rf1', model21))
model22 = RandomForestClassifier(n_estimators = 11, criterion = 'entropy')
estimators.append(('rf2', model22))
model23 = RandomForestClassifier(n_estimators = 12, criterion = 'entropy')
estimators.append(('rf3', model23))
model24 = RandomForestClassifier(n_estimators = 14, criterion = 'entropy')
estimators.append(('rf4', model24))
model25 = RandomForestClassifier(n_estimators = 15, criterion = 'entropy')
estimators.append(('rf5', model25))
from sklearn.ensemble import VotingClassifier
# Defining the ensemble model
ensemble = VotingClassifier(estimators)
ensemble.fit(X_train, y_train)
y_pred = ensemble.predict(X_test)
#Confisuin matrix
ensembler_cm = confusion_matrix(y_test, y_pred)
ensembler_cm
from sklearn import model_selection
#Cross-Validation
seed = 7
kfold = model_selection.KFold(n_splits=10, random_state=seed)
results = model_selection.cross_val_score(ensemble, X_train, y_train, cv=kfold)
print(results.mean())
| Voting based Ensemble learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from uuid import uuid4
from powersimdata import Scenario
scenario = Scenario()
print(scenario.state.name)
scenario.set_grid(interconnect="Eastern")
scenario.set_name("test", "comp_" + str(uuid4())[:4] + "_glpk")
scenario.set_time("2016-01-01 00:00:00", "2016-01-01 03:00:00", "1H")
scenario.set_base_profile("demand", "vJan2021")
scenario.set_base_profile("hydro", "vJan2021")
scenario.set_base_profile("solar", "vJan2021")
scenario.set_base_profile("wind", "vJan2021")
scenario.change_table.scale_plant_capacity(
"solar", zone_name={"Arkansas": 1.1})
grid = scenario.get_grid()
ct = scenario.get_ct()
# +
scenario.print_scenario_info()
scenario.create_scenario()
scenario.prepare_simulation_input()
resp = scenario.launch_simulation(solver="glpk")
# -
scenario.check_progress()
Scenario().get_scenario_table()
| standalone/_archive/solveCompare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="b6wNvvAyUD_j" colab_type="code" colab={}
import pandas as pd
# + id="_Xm2sW-2bSm1" colab_type="code" colab={}
# # !wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_11_1.tar' -a UKIRT_2016.log
# !wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_11_2.tar' -a UKIRT_2016.log
# !wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_11_3.tar' -a UKIRT_2016.log
# !wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_11_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_12_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_12_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_12_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_12_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_13_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_13_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_13_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_13_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_14_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_14_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_14_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_14_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_21_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_21_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_21_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_21_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_22_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_22_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_22_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_22_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_23_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_23_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_23_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_23_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_24_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_24_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_24_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_24_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_31_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_31_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_31_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_31_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_32_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_32_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_32_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_32_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_33_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_33_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_33_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_33_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_34_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_34_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_34_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_34_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_41_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_41_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_41_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_41_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_42_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_42_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_42_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_42_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_43_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_43_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_43_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_43_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_44_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_44_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_44_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_44_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_51_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_51_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_51_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_51_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_52_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_52_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_52_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_52_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_53_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_53_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_53_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_53_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_54_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_54_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_54_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_54_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_61_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_61_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_61_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_61_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_62_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_62_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_62_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_62_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_63_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_63_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_63_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_63_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_64_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_64_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_64_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_64_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_71_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_71_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_71_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_71_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_72_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_72_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_72_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_72_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_73_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_73_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_73_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_73_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_74_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_74_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_74_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_74_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_81_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_81_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_81_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_81_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_82_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_82_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_82_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_82_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_83_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_83_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_83_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_83_4.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_84_1.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_84_2.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_84_3.tar' -a UKIRT_2016.log
# wget 'https://exoplanetarchive.ipac.caltech.edu/data/ETSS/UKIRT/metadata/tar/2016/CASU_16_84_4.tar' -a UKIRT_2016.log
# + id="bzCsZJ5ob0Ck" colab_type="code" colab={}
# !tar xvf CASU_16_11_2.tar
# + id="GqdrfP6zd6Ev" colab_type="code" colab={}
# # !for x in CASU_16_11_*.tar; do echo $x; done
# !for x in CASU_16_11_*.tar; do tar xf $x; done
# + id="pU96AlCtcBLM" colab_type="code" outputId="0a67d882-bab5-44bb-e4d9-39eb4861aaf7" executionInfo={"status": "ok", "timestamp": 1575312891689, "user_tz": 300, "elapsed": 222756, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
import os
import re
from tqdm import tqdm
directory = 'CASU_16_11_3'
mag = []
emag = []
threshold = 0.85
for filename in tqdm(sorted(os.listdir(directory))):
if filename.endswith(".tbl"):
# print(os.path.join(directory, filename))
tmp=pd.read_table(os.path.join(directory, filename), skiprows=21,
delim_whitespace=True,
header=None,
index_col=0,
names=['hjd', 'mag', 'emag']
)
# skip if >90% NA
if tmp['mag'].count() / len(tmp['mag']) > threshold:
# Use Regex to get Index ID
idx = re.findall("(\d{7})", filename)[0]
# print(idx)
mag.append(tmp['mag'].rename(idx))
emag.append(tmp['emag'].rename(idx))
else:
continue
# pd.concat(mag,axis=1).T.to_pickle(directory+'_mag2.pkl')
# pd.concat(emag,axis=1).T.to_pickle(directory+'_emag2.pkl')
mag = pd.concat(mag,axis=1).T
emag = pd.concat(emag,axis=1).T
pickle.dump(mag, open('/content/drive/My Drive/pTSA_microlensing/UKIRT/'+directory+'_mag.pkl', 'wb'))
pickle.dump(emag, open('/content/drive/My Drive/pTSA_microlensing/UKIRT/'+directory+'_emag.pkl', 'wb'))
# + [markdown] id="-7coMzhEAPjW" colab_type="text"
#
# + id="fuZK8CqzcguG" colab_type="code" outputId="5ecf6cf1-50eb-4610-d51f-a8411890b346" executionInfo={"status": "ok", "timestamp": 1574281198080, "user_tz": 300, "elapsed": 419, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
tmp['mag'].isna().sum() / len(tmp['mag']) > 0.9
# + id="vmnAbKH3dgGR" colab_type="code" outputId="90eca5e6-f1e1-445b-cf77-4db73913ed4c" executionInfo={"status": "ok", "timestamp": 1574281283334, "user_tz": 300, "elapsed": 401, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
tmp['mag'].count()
# + id="5dT2rikOd9l-" colab_type="code" colab={}
# ls -lh
# + id="0E1P_nx_fsT4" colab_type="code" outputId="a5cdda07-e20f-4d1a-e423-b6261a2e61da" executionInfo={"status": "ok", "timestamp": 1574281645367, "user_tz": 300, "elapsed": 2824, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 451}
pd.concat(mag,axis=1)
# + id="lMY25_7sicCu" colab_type="code" outputId="053dadc5-3aaf-482b-aaff-c0162c4a6b72" executionInfo={"status": "ok", "timestamp": 1575304567313, "user_tz": 300, "elapsed": 21608, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 121}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="66KwEYgse2QV" colab_type="text"
# ## UKIRT Confirmed Microlensing
# + id="j4UFsVJvf09d" colab_type="code" outputId="f474a3e9-89eb-4397-8779-438850542cd7" executionInfo={"status": "ok", "timestamp": 1575305720790, "user_tz": 300, "elapsed": 1680, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 222}
# # !wget https://exoplanetarchive.ipac.caltech.edu/cgi-bin/FileDownload/nph-download?ref=/work/TMP_GwWS7c_5969/IERDownload/5969//download_exoarch_5969.bat
# # !wget https://exoplanetarchive.ipac.caltech.edu/cgi-bin/FileDownload/nph-download?ref=/work/TMP_ivHBn2_8695/IERDownload/8695//download_exoarch_8695.bat
# # !wget https://exoplanetarchive.ipac.caltech.edu/cgi-bin/FileDownload/nph-download?ref=/work/TMP_J7dwYA_9871/IERDownload/9871//download_exoarch_9871.bat
# # !wget https://exoplanetarchive.ipac.caltech.edu/cgi-bin/FileDownload/nph-download?ref=/work/TMP_eDiO1U_9805/IERDownload/9805//download_exoarch_9805.bat
# # !wget https://exoplanetarchive.ipac.caltech.edu/cgi-bin/FileDownload/nph-download?ref=/work/TMP_PB9YU1_9739/IERDownload/9739//download_exoarch_9739.bat
# # !wget https://exoplanetarchive.ipac.caltech.edu/cgi-bin/FileDownload/nph-download?ref=/work/TMP_ZBHfPx_9650/IERDownload/9650//download_exoarch_9650.bat
# # !wget https://exoplanetarchive.ipac.caltech.edu/cgi-bin/FileDownload/nph-download?ref=/work/TMP_xGyjhD_9517/IERDownload/9517//download_exoarch_9517.bat
# # !wget https://exoplanetarchive.ipac.caltech.edu/cgi-bin/FileDownload/nph-download?ref=/work/TMP_dK6gEg_9379/IERDownload/9379//download_exoarch_9379.bat
# # !wget https://exoplanetarchive.ipac.caltech.edu/cgi-bin/FileDownload/nph-download?ref=/work/TMP_AM4LKo_9269/IERDownload/9269//download_exoarch_9269.bat
# + id="W4uz4ygdfHOM" colab_type="code" outputId="3d9d2ee0-da18-4b30-8840-90fef4a76e85" executionInfo={"status": "ok", "timestamp": 1575305750034, "user_tz": 300, "elapsed": 1326, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 255}
# ls
# + id="z8d8KEp9g3Hu" colab_type="code" colab={}
# !chmod +x *.bat
# + id="TcXmYA8MfIsF" colab_type="code" colab={}
# !for x in *.bat; do ./$x; done
# # !./*.bat
# + id="9UKQ_t_sfSsv" colab_type="code" outputId="5baf44d9-d907-44b8-d2d1-9402093201b4" executionInfo={"status": "ok", "timestamp": 1575306052942, "user_tz": 300, "elapsed": 1840, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 541}
# ls -l
# + id="PpN7pB3uteIc" colab_type="code" colab={}
import matplotlib.pyplot as plt
from tqdm import tqdm
import re
import pickle
# + id="DjfQ9CT0gR1C" colab_type="code" outputId="a65c6976-84cb-42a9-f994-71b2eb19def5" executionInfo={"status": "ok", "timestamp": 1575311328257, "user_tz": 300, "elapsed": 428, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 168}
mag, emag = [], []
for filename in sorted(os.listdir(directory)):
if filename.endswith(".tbl"):
# print(os.path.join(directory, filename))
tmp=pd.read_table(os.path.join(directory, filename), skiprows=21,
delim_whitespace=True,
header=None,
index_col=0,
names=['hjd', 'mag', 'emag']
)
# Use Regex to get Index ID
idx = re.findall("(\d{7})", filename)[0]
# print(idx)
mag.append(tmp['mag'].rename(idx))
emag.append(tmp['emag'].rename(idx))
print('{}/{}'.format(tmp['mag'].count(), len(tmp['mag'])), '\t{:.2%}'.format(tmp['mag'].count() / len(tmp['mag'])))
# plt.plot(tmp['mag'])
# plt.title('mag')
# plt.show()
# plt.plot(tmp['emag'])
# plt.title('emag')
# plt.show()
else:
continue
mag = pd.concat(mag,axis=1).T
emag = pd.concat(emag,axis=1).T
# pickle.dump(mag, open('/content/drive/My Drive/pTSA_microlensing/UKIRT/microlensing_mag.pkl', 'wb'))
# pickle.dump(emag, open('/content/drive/My Drive/pTSA_microlensing/UKIRT/microlensing_emag.pkl', 'wb'))
# + id="l31laTqz23S9" colab_type="code" colab={}
mag = pickle.load(open('/content/drive/My Drive/pTSA_microlensing/UKIRT/microlensing_mag.pkl', 'rb'))
emag = pickle.load(open('/content/drive/My Drive/pTSA_microlensing/UKIRT/microlensing_emag.pkl', 'rb'))
# + id="0QVRqXZDtify" colab_type="code" outputId="b6be4b6f-144b-47a2-b7ff-74feb3cca5b5" executionInfo={"status": "ok", "timestamp": 1575311112586, "user_tz": 300, "elapsed": 2042, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
for i in range(mag.shape[0]):
plt.plot(mag.iloc[i].T.dropna())
plt.show()
# + id="UI4Wt9NjvM3I" colab_type="code" outputId="4bf5467f-1e28-4ad5-a3b0-db82e00a91de" executionInfo={"status": "ok", "timestamp": 1575311115950, "user_tz": 300, "elapsed": 2372, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14278274106310182370"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
for i in range(emag.shape[0]):
plt.plot(emag.iloc[i].T.dropna())
plt.show()
# + id="-lPJjY-G0Xta" colab_type="code" colab={}
| PTSA_UKIRTdata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import missingno as mso
import seaborn as sns
import warnings
from pywaffle import Waffle
from sklearn.impute import SimpleImputer
import scipy.stats as sct
from statsmodels.graphics.gofplots import qqplot
from sklearn.preprocessing import (
OneHotEncoder, Binarizer, KBinsDiscretizer,
MinMaxScaler, StandardScaler, PolynomialFeatures
)
warnings.filterwarnings('ignore')
sns.set_style('whitegrid')
plt.rcParams['figure.dpi']=100
# +
cyan_grad = ['#142459', '#176BA0', '#19AADE', '#1AC9E6', '#87EAFA']
purple_grad = ['#491D8B', '#6929C4', '#8A3FFC', '#A56EFF', '#BE95FF']
teal_grad = ['#005D5D', '#007D79', '#009D9A', '#08BDBA', '#3DDBD9']
color_mix = ['#0698DC', '#05E6FA', '#09899B', '#04C4AC', '#AB1CB4']
black_grad = ['#100C07', '#3E3B39', '#6D6A6A', '#9B9A9C', '#CAC9CD']
sns.palplot(cyan_grad)
sns.palplot(purple_grad)
sns.palplot(teal_grad)
sns.palplot(color_mix)
sns.palplot(black_grad)
# -
df = pd.read_csv('callcenter_case.csv')
#Configuração para poder apresentar todas as linhas
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
# ## **Entendimento do problema**
# O entendimento do problema é de suma importância para a construção de um bom modelo.
# **Detecção de aderência em campanha de callcenter**
# Você foi contratado por uma empresa do ramo de Call Center para desenvolver um modelo preditivo de detecção de aderência em uma campanha feita para um determinado banco.
#
# Para esse trabalho iremos utilizar um dataset, enviado em anexo, que contém aproximadamente 41 mil linhas e 19 colunas. Esse dataset traz os registros pessoais dos clientes, como idade, profissão, educação entre outros, dados das campanha de telemarketing realizadas, como número de contatos, duração, alguns índices financeiros e a coluna aderencia_campanha, que mostra se o cliente aderiu ou não a campanha.
#
# Você deverá construir um modelo para detecção de aderência dessa campanha, percorrendo todas as etapas da construção de um modelo de machine learning e documentando os passos seguidos e resultados obtidos em cada uma das etapas do processo, até chegar a etapa final dos resultados encontrados com a predição feita em cima do modelo.
#
#
# **Utilização de aprendizagem supervisionada por clasificação**
# ## **Informações iniciais**
# +
print('\033[36m\033[1m'+'.: Dataset Info :.')
print('\033[0m\033[36m*' * 20)
print('\033[0m'+'Total Rows:'+'\033[36m\033[1m', df.shape[0])
print('\033[0m'+'Total Columns:'+'\033[36m\033[1m', df.shape[1])
print('\033[0m\033[36m*' * 20)
print('\n')
print('\033[1m'+'.: Dataset Details :.')
print('\033[0m\033[36m*' * 22 +'\033[0m')
df.info(memory_usage = False)
# -
# ## **Pré-processamento e manipulação dos dados**
#Ordenando e entendendo o dataset pela coluna idade
df.sample(20).sort_values(by='idade', ascending=False)
# A ideia inicial foi entender de um modo geral, como a idade está sendo relevante para a aderência da campanha. E percebe-se que em sua maioria para essa amostra de dados, as pessoas não aderiram a campanha, indicando um desbalanceamento dos dados, porém irei deixar para verificar o desbalanceamento depois de verificar outros problemas em relação ao conjunto de dados.
df.describe()
# ### **Verificar colunas**
df.columns
df.dtypes
df.dtypes.value_counts()
# #### **Alterando o tipo das colunas**
df['indice_confianca_consumidor'] = df['indice_confianca_consumidor'].astype(int)
df['taxa_juros_media'] = df['taxa_juros_media'].astype(int)
df['indice_precos_consumidor'] = df['indice_precos_consumidor'].astype(int)
# ### **Dados ausentes**
df.isnull().sum()
# Existem muitos dados nulos em relação ao tamanho do dataset nas colunas educação, profissão, inadimplente, emprestimo moradia, emprestimo pessoal e estado civil, que apesar de serem colunas categóricas, podemos realizar tranformações desses dados e precisar delas durante o estudo.
# Verificado que não existe problema em apagar os valores nulos dessas colunas, tendo em vista que tais variáveis não iram impactar tanto o modelo como as outras variáveis disponíveis, em seguida poderia apagar estes dados.
#
# Todavida, irei aplicar a técnica de Simple Imputer para preencher os dados ausentes pelos valores que ocorre o maior número de vezes em uma determinada coluna. Essa abordagem é bastante interessante e aplicável para colunas categóricas, sendo este o problema dos dados ausentes.
# +
#Criando objeto para substituir os valores ausentes
preenche_por_frequencia = SimpleImputer(strategy = "most_frequent")
imputer_frequencia = preenche_por_frequencia.fit_transform(df)
# -
df = pd.DataFrame(imputer_frequencia, columns=df.columns)
df.head()
df.isnull().sum()
# ### **Transformando colunas categóricas**
# As colunas que possuem linhas com sim e não, consequentemente pode-se transformar facilmente em colunas com classes binárias para utilizar no processo de modelagem, então estas colunas são:
df.inadimplente.value_counts()
colunas_classe_binaria = df[['inadimplente','emprestimo_moradia', 'emprestimo_pessoal', 'aderencia_campanha']].copy()
# Definindo as classes
sim_nao_map = {'sim': 1, 'nao': 0}
# Aplicando o mapeamento ao dataset com a função map
for coluna in colunas_classe_binaria:
df[coluna] = df[coluna].map(sim_nao_map)
# Por outro lado, apesar de não ter classificação dos registros como sim ou não, as colunas de meio de contato e campanha anterior, também podem ser transformar em classificação binaria e multiclassificação respectivamente.
def make_contato(x):
if x == 'telefone':
return 0
else: #celular
return 1
df['meio_contato'] = df['meio_contato'].apply(make_contato)
def make_campanha_anterior(x):
if x == 'fracasso':
return 0
if x == 'sucesso':
return 1
else: #não existente
return 2
df['campanha_anterior'] = df['campanha_anterior'].apply(make_campanha_anterior)
df.mes.unique()
# As colunas categóricas dia da semana e mês que contém os dados diários e mensais também podem acabar sendo muito relevantes para este problema em questão, tendo em vista que em muito casos o dia ou mês pode ter um alto impacto para o cliente aderir uma campanha.
dias_semanais = {'seg': 1, 'ter':2, 'qua':3, 'qui':4, 'sex':5}
df['dia_da_semana'] = df['dia_da_semana'].map(dias_semanais)
meses = {'mar':3, 'abr':4, 'mai': 5, 'jun':6, 'jul':7, 'ago':8, 'set': 9, 'out':10, 'nov':11, 'dez': 12}
df['mes'] = df['mes'].map(meses)
df.head()
# Uma outra forma interessantes de se trabalhar com variáveis categóricas é através da utilização do One-hot encoding.
# Com ele, uma variável categórica com categorias é transformada em novas variáveis binárias (0 ou 1), onde a presença do 1 (hot) significa que aquela observação pertence àquela categoria, e 0 (cold) que não pertence.
# O objetivo com o one-hot encoding é transformar determinada variável categórica em uma sequência de variáveis numéricas binárias, cada uma descrevendo uma classe da variável. Algumas variáveis que podemos aplicar está técnica e seja útil para o modelo são:
#
# * estado_civil
# * profissão
# * educação
# Porém, está ténica só será aplicada as variáveis Educação e Profissão, para a variável estado civil não será necessário, então o próximo passo é treinar e transformar o encoder. O motivo é que possivelmente as variáveis educação e profissão tenha um impacto maior, tendo em vista que possivelmente existe uma tendência para as pessoas que tem uma educação e profissão melhores aderir mais a campanha.
one_hot_encoder_educacao = OneHotEncoder(sparse=False, dtype=np.int)
df_encoded_educacao = one_hot_encoder_educacao.fit_transform(df[["educacao"]])
one_hot_encoder_profissao = OneHotEncoder(sparse=False, dtype=np.int)
df_encoded_profissao = one_hot_encoder_profissao.fit_transform(df[["profissao"]])
# Através do atributo categories_ do encoder, pode-se visualizar como ficou as categorias de cada variável que foi transformada.
one_hot_encoder_educacao.categories_
one_hot_encoder_profissao.categories_
# Agora posso criar as novas colunas que descrevem cada categoria.
#
# Repare que, para qualquer linha, apenas uma das colunas contém um 1, indicando a qual categoria aquela observação pertence. Isso acontece, obviamente, se as categorias forem mutuamente exclusivas (uma observação não pode pertencer a mais de uma categoria simultaneamente).
# +
columns_encoded_educacao = one_hot_encoder_educacao.categories_[0]
df = pd.concat([df, pd.DataFrame(df_encoded_educacao, columns=columns_encoded_educacao)], axis=1)
df[['analfabeto','curso_tecnico','ensino_medio','fundamental_4a','fundamental_6a','fundamental_9a','graduacao_completa']].head(10)
# -
# +
columns_encoded_profissao = one_hot_encoder_profissao.categories_[0]
df = pd.concat([df, pd.DataFrame(df_encoded_profissao, columns=columns_encoded_profissao)], axis=1)
# -
df[['admin.','aposentado','colarinho_azul','desempregado','dona_casa','empreendedor','estudante','gerente','informal','servicos','tecnico']].head()
# A maior parte da matriz retornada é composta por zeros, sendo apenas alguns elementos compostos de um.
#
# Dizemos que essa matriz é esparsa. O que pode acabar sendo um grande desperdício de memória trabalhar diretamente como uma matriz esparsa assim. Por isso, existe o default do OneHotEncoder que retorna uma sparse matrix do NumPy, economizando espaço em memória, mas para este problema foi decidido não aplicar.
# ### **Como os dados estão distribuidos?**
# #### **Teste de shapiro-wilk**
# O teste de Shapiro-Wilk é um teste de aderência à distribuição normal, que é abreviado para teste de normalidade. O intuito será verificar se algumas variáveis númericas vem ou não de uma distribuição normal.
# A hipótese nula, é a normalidade dos dados. Se o valor-p for menor que o nível de significância , então temos evidências de que os dados não vêm de uma distribuição normal. Se o valor-p for maior que alpha, então não podemos afimar que os dados não vêm de uma distribuição normal (o que é sutilmente diferente de afirmar que eles vêm de uma distribuição normal).
#
#
# Irei aplicar o teste de shapiro wilk e também verificar a distribuição dos dados para as colunas númericas que considerei importantes para o problema e consequentemente o desempenho do modelo.
def teste_shapiro_wilk(sample):
# Retorne aqui o resultado da questão 1.
# Teste de normalidade:
stat, p = sct.shapiro(sample)
print(f'statistics:{stat}, p-value:{p}')
# Interpretação:
alpha = 0.05
if p > alpha:
return True
else:
return False
df.head()
teste_shapiro_wilk(df['indice_precos_consumidor'])
teste_shapiro_wilk(df['indice_confianca_consumidor'])
teste_shapiro_wilk(df['taxa_juros_media'])
sns.distplot(df['taxa_juros_media'], bins=25)
sns.distplot(df['indice_precos_consumidor'], bins=25)
sns.distplot(df['indice_confianca_consumidor'], bins=25)
# ### *Notas*
#
# * É possível observar que a média da taxa de juros das pessoas está mais concentrada em 4 e não está tão distribuida.
# Apesar do histograma apresentar uma distribuição que aparenta os dados está normalmente distribuidos. O teste de Shapiro-Wilk diz que os dados não são normalmente distruibuidos. Isso significa que é possivel de p-value ser maior que alpha, mostrando que há evidências de que a amostra não se trata de uma distruibição normal.
# ### **Normalizar ou padronizar os dados?**
# Normalmente usamos a normalização quando você não sabe a distribuição dos dados, ou sabe que não é uma gaussiana, e é útil em algoritmos que não fazem suposições sobre a distribuição, como KNN ou redes neurais. Já a padronização a gente usa quando sabemos que a distribuição dos dados tem uma distribuição gaussiana, ou muito parecido com a curva da gaussiana.
# Normalização é o processo de colocar uma variável numérica em uma escala [0,1] pré-determinada, geralmente , mas também é comum ser [-1,1].
#
# Para colocar no intervalo [0,1], basta subtrair cada valor do valor mínimo e dividir pela diferença do valor máximo e mínimo:
#
#
# Neste caso iremos normalizar os dados da coluna X que não estão distribuidos normalmente. Tendo em vista que são colunas númericas importantes para o problema em questão.
minmax_scaler = MinMaxScaler(feature_range=(0, 1))
standard_scaler = StandardScaler()
df['taxa_juros_media_normalizado'] = minmax_scaler.fit_transform(df[['taxa_juros_media']])
df['indice_precos_consumidor_normalizado'] = minmax_scaler.fit_transform(df[['indice_precos_consumidor']])
df['indice_confianca_consumidor_normalizado'] = minmax_scaler.fit_transform(df[['indice_confianca_consumidor']])
print('Taxa de juros media normalizada: ', df['taxa_juros_media_normalizado'].min(), df['taxa_juros_media_normalizado'].max())
print('Indice de preços de consumidor normalizado: ', df['indice_precos_consumidor_normalizado'].min(), df['indice_precos_consumidor_normalizado'].max())
print('Indice de confiança de consumidor normalizado: ', df['indice_confianca_consumidor_normalizado'].min(), df['indice_confianca_consumidor_normalizado'].max())
sns.distplot(df['taxa_juros_media_normalizado'], bins=25)
sns.distplot(df['indice_precos_consumidor_normalizado'], bins=25)
sns.distplot(df['indice_confianca_consumidor_normalizado'], bins=25)
# Se comparar com as distribuições anteriores, é possível notar uma pequena diferença na distribuição dos dados.
# ### **Outliers**
# Devido a algumas concatenações o tipo de dados acabou alterando, então decidi alterar o tipo das colunas novamente, apesar de não ser tão necessário.
# +
df['indice_confianca_consumidor'] = df['indice_confianca_consumidor'].astype(int)
df['taxa_juros_media'] = df['taxa_juros_media'].astype(int)
df['indice_precos_consumidor'] = df['indice_precos_consumidor'].astype(int)
df['dias_ultimo_contato'] = df['dias_ultimo_contato'].astype(int)
df['qtd_contatos_campanha'] = df['qtd_contatos_campanha'].astype(int)
df['qtd_contatos_total'] = df['qtd_contatos_total'].astype(int)
df['idade'] = df['idade'].astype(int)
# -
df.describe()
# Irei continuar a investigação para as mesmas variáveis númericas que foi verificado como os dados estavam distribuidos.
#Plotando gráfico de boxplot para o lado azul
sns.boxplot(x=df["taxa_juros_media_normalizado"], orient="vertical");
sns.boxplot(x=df["indice_confianca_consumidor_normalizado"], orient="vertical");
sns.boxplot(x=df["indice_precos_consumidor_normalizado"], orient="vertical");
# Como estas variáveis através da interpretação do boxplot não aparenta ter outliers, seguiremos para a próxima etapa. Outras variáveis não foram avaliadas pois serão descartadas.
# ### **Dados desbalanceados**
# Para finalizar esta etapa de tratamento dos dados, eu poderia deletar algumas colunas, com o objetivo de trabalhar com mais facilidade e diminuir o tamanho do conjunto de dados, entretanto, acredito que todas elas serão muito utéis tanto para a análise exploratória, como o desenvolvimento do modelo.
#
# Também irei definir inicialmente as variáveis de entrada e a variável de saída, mas essas variáveis podem ser alteradas na etapa de seleção de variáveis e engenharia de recursos. O objetivo é apenas ter uma divisão inicial e corrigir problemas de desbalanceamento dos dados.
X = df.drop(['profissao', 'estado_civil', 'educacao'],1) #A variável targe por enquanto vai permanecer nos dados de entrada para usarmos na análise exploratória
y = df["aderencia_campanha"]
df['aderencia_campanha'].value_counts()
aderencia_distribuicao = df['aderencia_campanha'].value_counts()
# plotar gráfico de waffle
fig = plt.figure(
FigureClass=Waffle,
rows=5,
columns=10,
colors=("#000000", "#983D3D"),
values=aderencia_distribuicao,
title={
'label': 'Aderência da campanha',
'loc': 'left',
'fontdict': {
'fontsize': 15
}},
icons='child',
font_size=50,
legend={
'loc': 'lower left',
'bbox_to_anchor': (-0.04, -0.2),
'ncol': len(aderencia_distribuicao),
'framealpha': 0,
'fontsize': 15
},
labels=['Cliente não aderiu (88%)', 'Cliente aderiu (11%)'],
icon_legend=True,
figsize=(15, 5)
)
plt.tight_layout()
plt.show()
# Nota-se então que os dados estão bastante desbalanceados, o que acaba influenciando negativamente o modelo para um lado, que seria os dados pessoas que não aderiram a campanha, por esse motivo existem algumas ténicas de balanceamento de dados.
# #### **Smote**
# SMOTE é uma das técnicas mais populares de reamostragem em casos de classes desbalanceadas. Trata-se de uma técnica de oversampling que adiciona dados sintéticos à classe minitoritária através de uma heurística relativamente simples.
# O SMOTE pode ser pensado como um oversampling aleatório adicionado de uma perturbação direcionada. Isso evita os problemas de overfitting existentes no oversampling aleatório simples.
#
# Esse algoritmo não está disponível por padrão no sklearn, mas pode ser encontrado nos seus projetos de contribuição na biblioteca imblearn.
from imblearn.over_sampling import SMOTE
from sklearn.decomposition import PCA
# Para definir o X e Y irei descartar apenas as variáveis de entrada que acredito não serem tão relevantes para o problema. E a variável de saída será a classe binária da coluna aderencia da campanha.
# +
smote = SMOTE(sampling_strategy="minority")
X_smote, y_smote = smote.fit_resample(X, y)
# -
df.shape
X_smote.shape
# Dessa forma a quantidade dados positivos e negativos se torna igual:
sum(y_smote == True)/sum(y_smote == False)
#SALVANDO BASE DE DADOS FINAL
df.to_csv("dados_tratados.csv",index= False)
X_smote.to_csv("X_dados_balanceados.csv",index= False)
y_smote.to_csv('y.csv', index=False)
| Case interview/processamento_dos_dados.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS109A Introduction to Data Science
# # Lab 10: Random Forest and Boosting
#
# **Harvard University**<br/>
# **Fall 2021**<br/>
# **Instructors**: <NAME> and <NAME><br/>
# **Lab Team**: <NAME>, <NAME>, <NAME>, and <NAME><br/>
# **Authors**: <NAME>, and <NAME>
# <hr style='height:2px'>
#RUN THIS CELL
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text
HTML(styles)
# This section will work with a spam email dataset again. Our ultimate goal is to be able to build models so that we can predict whether an email is spam or not spam based on word characteristics within each email. We will review Decision Trees and Bagging methods, and introduce Random Forest and Boosting: Ada Boost and XGBoost.
#
# Specifically, we will:
#
# 1. *Quick review of last week*
# 2. What is a Random Forest model?
# 3. Build the Decision Tree model, Bagging model, Random Forest Model for comparison with Boosting.
# 4. *Theory:* What is Boosting?
# 5. Use the Adaboost on the Spam Dataset.
# 6. *Theory:* What is Gradient Boosting and XGBoost?
# 7. Use XGBoost on the Spam Dataset: Extreme Gradient Boosting
#
# Optional: Example to better understand Bias vs Variance tradeoff.
# ---------
# ## 1. *Quick review of last week*
#
# #### The Idea: Decision Trees are just flowcharts and interpretable!
#
# It turns out that simple flow charts can be formulated as mathematical models for classification and these models have the properties we desire;
# - interpretable by humans
# - have sufficiently complex decision boundaries
# - the decision boundaries are locally linear, each component of the decision boundary is simple to describe mathematically.
# ----------
#
# #### How to build Decision Trees (the Learning Algorithm in words):
# To learn a decision tree model, we take a greedy approach:
# 1. Start with an empty decision tree (undivided feature space)
# 2. Choose the ‘optimal’ predictor on which to split and choose the ‘optimal’ threshold value for splitting by applying a **splitting criterion (1)**
# 3. Recurse on on each new node until **stopping condition (2)** is met
# #### So we need a (1) splitting criterion and a (2) stopping condition:
#
# #### (1) Splitting criterion
#
# <img src="data/split2_adj.png" alt="split2" width="70%"/>
# #### (2) Stopping condition
#
# **Not stopping while building a deeper and deeper tree = 100% training accuracy; Yet we will overfit!
#
# To prevent the **overfitting** from happening, we should have stopping condition.
#
# -------------
#
# #### How do we go from Classification to Regression?
#
# - For classification, we return the majority class in the points of each leaf node.
# - For regression we return the average of the outputs for the points in each leaf node.
#
# -------------
#
# ### ensemble: a group of items viewed as a whole rather than individually
#
# #### What is bagging?
#
# One way to adjust for the high variance of the output of an experiment is to perform the experiment multiple times and then average the results.
#
# 1. **Bootstrap:** we generate multiple samples of training data, via bootstrapping. We train a full decision tree on each sample of data.
# 2. **AGgregatiING** for a given input, we output the averaged outputs of all the models for that input.
#
# This method is called **Bagging: B** ootstrap + **AGG**regat**ING**.
#
# -------------
#
# -------------
#
# ## 2. Building the tree models of last week
#
# ### Rebuild the Decision Tree model and Bagging model for comparison with Boosting methods
# +
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import sklearn.metrics as metrics
import time
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
# %matplotlib inline
pd.set_option('display.width', 1500)
pd.set_option('display.max_columns', 100)
from sklearn.model_selection import learning_curve
# -
#
#Import Dataframe and Set Column Names
spam_df = pd.read_csv('data/spam.csv', header=None)
columns = ["Column_"+str(i+1) for i in range(spam_df.shape[1]-1)] + ['Spam']
spam_df.columns = columns
display(spam_df.head())
# +
#Let us split the dataset into a 70-30 split by using the following:
#Split data into train and test
np.random.seed(42)
msk = np.random.rand(len(spam_df)) < 0.7
data_train = spam_df[msk]
data_test = spam_df[~msk]
#Split predictor and response columns
x_train, y_train = data_train.drop(['Spam'], axis=1), data_train['Spam']
x_test , y_test = data_test.drop(['Spam'] , axis=1), data_test['Spam']
print("Shape of Training Set :",data_train.shape)
print("Shape of Testing Set :" ,data_test.shape)
# +
#Check Percentage of Spam in Train and Test Set
percentage_spam_training = 100*y_train.sum()/len(y_train)
percentage_spam_testing = 100*y_test.sum()/len(y_test)
print("Percentage of Spam in Training Set \t : {:0.2f}%.".format(percentage_spam_training))
print("Percentage of Spam in Testing Set \t : {:0.2f}%.".format(percentage_spam_testing))
# -
# -----------
#
# ### Fitting an Optimal Single Decision Tree
# Best depth for single decision trees of last week
best_depth = 7
print("The best depth was found to be:", best_depth)
# +
#Evalaute the performance at the best depth
model_tree = DecisionTreeClassifier(max_depth=best_depth)
model_tree.fit(x_train, y_train)
#Check Accuracy of Spam Detection in Train and Test Set
acc_trees_training = accuracy_score(y_train, model_tree.predict(x_train))
acc_trees_testing = accuracy_score(y_test, model_tree.predict(x_test))
print("Simple Decision Trees: Accuracy, Training Set \t : {:.2%}".format(acc_trees_training))
print("Simple Decision Trees: Accuracy, Testing Set \t : {:.2%}".format(acc_trees_testing))
# -
#
# --------
#
# ### Fitting 100 Single Decision Trees while Bagging
#
# +
n_trees = 100 # we tried a variety of numbers here
#Creating model
np.random.seed(0)
model = DecisionTreeClassifier(max_depth=best_depth+5)
#Initializing variables
predictions_train = np.zeros((data_train.shape[0], n_trees))
predictions_test = np.zeros((data_test.shape[0], n_trees))
#Conduct bootstraping iterations
for i in range(n_trees):
temp = data_train.sample(frac=1, replace=True)
response_variable = temp['Spam']
temp = temp.drop(['Spam'], axis=1)
model.fit(temp, response_variable)
predictions_train[:,i] = model.predict(x_train)
predictions_test[:,i] = model.predict(x_test)
#Make Predictions Dataframe
columns = ["Bootstrap-Model_"+str(i+1) for i in range(n_trees)]
predictions_train = pd.DataFrame(predictions_train, columns=columns)
predictions_test = pd.DataFrame(predictions_test, columns=columns)
# +
#Function to ensemble the prediction of each bagged decision tree model
def get_prediction(df, count=-1):
count = df.shape[1] if count==-1 else count
temp = df.iloc[:,0:count]
return np.mean(temp, axis=1)>0.5
#Check Accuracy of Spam Detection in Train and Test Set
acc_bagging_training = 100*accuracy_score(y_train, get_prediction(predictions_train, count=-1))
acc_bagging_testing = 100*accuracy_score(y_test, get_prediction(predictions_test, count=-1))
print("Bagging: \tAccuracy, Training Set \t: {:0.2f}%".format(acc_bagging_training))
print("Bagging: \tAccuracy, Testing Set \t: {:0.2f}%".format( acc_bagging_testing))
# -
# ### Weaknesses of Bagging
#
# Bagging is a greedy algorithm. What does this mean?
# We always choose the feature with the most impact: ie the most information gain.
#
# In what scenarios is this likely to be a problem?
#
#
# <img src="data/dep_predictors.png" alt="split2" width="40%"/>
#
# Imagine that this is the true underlying data generative process. Here predictors $x_2$ and $x_3$ influence $x_1$.
#
# $\bullet$ Which predictor do you think bagging is likely to select as the root node?
#
# $\bullet$ Why is this likely to be an issue?
#
# $\bullet$ Because of their greedy nature, bagging ensembles are very likely to be correlated, especially in the shallower nodes of the individual decision trees.
#
#
# ### Why are decision trees greedy?
#
# $\bullet$ Decision trees are NP-complete, there is no way to find the global minima ie. the best tree unless we use brute force and try all possible combinations. In practice this is infeasible.
#
# $\bullet$ Thus decision trees are **hueristic algorithms**. Hueristic algorithms are designed to solve problems in a faster more efficient method by sacrificing optimality, accuracy or precision in favor of speed. Hueristic algorithms are often used to solve NP-complete problems.
#
#
# Helpful analogies: playing chess, class tests
#
# # Part 2 : Random Forest vs Bagging
# #### What is Random Forest?
#
# - **Many trees** make a **forest**.
# - **Many random trees** make a **random forest**.
#
#
# Random Forest is a modified form of bagging that creates ensembles of independent decision trees.
# To *de-correlate the trees*, we:
# 1. train each tree on a separate bootstrap **random sample** of the full training set (same as in bagging)
# 2. for each tree, at each split, we **randomly select a set of 𝐽′ predictors from the full set of predictors.** (not done in bagging)
# 3. From amongst the 𝐽′ predictors, we select the optimal predictor and the optimal corresponding threshold for the split.
#
# *Question:* Why would this second step help (only considering random sub-group of features)?
# Now, we will fit an ensemble method, the Random Forest technique, which is different from the decision tree. Refer to the lectures slides for a full treatment on how they are different. Let's use ```n_estimators = predictor_count/2``` and ```max_depth = best_depth```.
# +
#Fit a Random Forest Model
best_depth = 7
#Training
model = RandomForestClassifier(n_estimators=int(x_train.shape[1]/2), max_depth=best_depth)
model.fit(x_train, y_train)
#Predict
y_pred_train = model.predict(x_train)
y_pred_test = model.predict(x_test)
#Performance Evaluation
acc_random_forest_training = accuracy_score(y_train, y_pred_train)*100
acc_random_forest_testing = accuracy_score(y_test, y_pred_test)*100
print("Random Forest: Accuracy, Training Set : {:0.2f}%".format(acc_random_forest_training))
print("Random Forest: Accuracy, Testing Set : {:0.2f}%".format(acc_random_forest_testing))
# -
# <div class="alert alert-success">
# <strong>🏋🏻♂️ TEAM ACTIVITY 1:</strong> Random Forests </div>
#
# *Let's try to improve our accuracy scores on the cancer dataset.
#
from functions import tree_pd
get_tree_scores = tree_pd.get_tree_scores
cancer_scaled, target = tree_pd.load_cancer_dataset(10, 4)
# +
################################### Train Test split
np.random.seed(40)
#test_proportion
test_prop = 0.2
msk = np.random.uniform(0, 1, len(cancer_scaled)) > test_prop
#Split predictor and response columns
ex1_x_train, ex1_y_train = cancer_scaled[msk], target[msk]
ex1_x_test , ex1_y_test = cancer_scaled[~msk], target[~msk]
print("Shape of Training Set :", ex1_x_train.shape)
print("Shape of Testing Set :" , ex1_x_test.shape)
# -
# ## Your tasks:
# 1) Use the `get_tree_scores` function to assign a dataframe `rf_val_acc` using a class instance of `RandomForestClassifier`. As a reminder this function takes four arguments (x_train, y_train, model, tree_depth_range). This time don't feed a random state.
#
# 2) Use pandas groupby function to to get the mean cross-validation accuracy for specific depths. Assign to a new dataframe `rf_mean_acc`.
#
# 3) Visualize the mean cross validation accuracy scores by running the cell provided. Answer the subsequent questions.
#
# 4) Plot the feature importance of the best random forest model.
# # %load 'solutions/sol2.py'
tree_depth_range = range(1, 40, 2)
rf_val_acc = get_tree_scores(ex1_x_train,
ex1_y_train,
RandomForestClassifier(),
tree_depth_range)
#rf_mean_acc = pd.DataFrame(rf_val_acc)
rf_mean_acc = rf_val_acc.groupby("depth").mean()
rf_mean_acc["depth"] = list(tree_depth_range)
rf_mean_acc
# #### Run this code when you are finished with the first exercise to compare random forests and simple decision trees. More questions and one final task lie below.
# +
### add a decision tree classifier for comparison.
tree_val_acc = get_tree_scores(ex1_x_train,
ex1_y_train,
DecisionTreeClassifier(),
tree_depth_range)
tree_mean_acc = tree_val_acc.groupby("depth").mean()
tree_mean_acc["depth"] = list(tree_depth_range)
### Make the plot
plt.figure(figsize=(12, 3))
plt.title('Variation of Accuracy on Validation set with Depth - Simple Decision Tree')
sns.lineplot(x = "depth", y = "cv_acc_score", data = rf_val_acc,
label = "random forest");
sns.lineplot(x = "depth", y = "cv_acc_score", data = tree_val_acc,
label = "simple decision tree");
plt.xlabel("max_depth")
plt.ylabel("validation set accuracy score")
max_idx = tree_mean_acc["cv_acc_score"].idxmax()
best_depth_tree = tree_mean_acc["depth"][max_idx]
best_tree_model = DecisionTreeClassifier(max_depth=best_depth)
best_tree_model.fit(ex1_x_train, ex1_y_train)
tree_test_accuracy = best_tree_model.score(ex1_x_test, ex1_y_test.reshape(-1,))
max_idx = rf_mean_acc["cv_acc_score"].idxmax()
best_depth_rf = rf_mean_acc["depth"][max_idx]
best_rf_model = RandomForestClassifier(max_depth=best_depth_rf, random_state = 42)
best_rf_model.fit(ex1_x_train, ex1_y_train.reshape(-1,))
tree_rf_accuracy = best_rf_model.score(ex1_x_test, ex1_y_test.reshape(-1,))
print("Decision Tree best depth {:}".format(best_depth))
print("Random Forest best depth {:}".format(best_depth_rf))
print("Best Decision Tree test set accuracy: {:0.2f}%".format(tree_test_accuracy*100))
print("Best Random Forest test set accuracy: {:0.2f}%".format(tree_rf_accuracy*100))
# -
#
# $\bullet$ Why doesn't the random forest accuracy score deteriorate in the same way that the decision tree does for deeper trees?
#
#
# $\bullet$ What are the two kinds of stochasticity that lead to the robustness of random forests?
#
# $\bullet$ How do random forests differ from Bagging?
# ### Feature Importance
# #### Lets plot the feature importance of the best random forest model:
# Random Forest gives the above values as ```feature_importance``` where it normalizes the impact of a predictor to the number of times it is useful and thus gives overvall significance for free. Explore the attributes of the Random Forest model object for the best nodes.
#
# Feature importance is calculated as the decrease in node impurity **weighted by the probability of reaching that node. The node probability can be calculated by the number of samples that reach the node**, divided by the total number of samples. The higher the value the more important the feature.
#
# source: https://towardsdatascience.com/the-mathematics-of-decision-trees-random-forest-and-feature-importance-in-scikit-learn-and-spark-f2861df67e3#:~:text=Feature%20importance%20is%20calculated%20as,the%20more%20important%20the%20feature.
# <div class="alert alert-success">
# <strong>🏋🏻♂️ TEAM ACTIVITY 2:</strong> Feature importance </div>
# 1) extract the `.feature_importances_` attribute from your `best_rf_model`. Assign this to a variable called `feature_importance`.
#
# 2) Rescale the feature importances such that the most important feature is has an importance of 100.
#
# 3) use `np.argsort` to return the indices of the sorted features.
#
# 4) finally pass the sorted index to `plt.barh` and plot the feature importances!
# +
#your code here
#help(best_rf_model)
# -
'''feature_importance = ...
feature_importance = ...
sorted_idx = ...
pos = np.arange(sorted_idx.shape[0]) + .5
plt.figure(figsize=(10,12))
plt.barh(pos, ..., align='center')
plt.yticks(pos, ex1_x_train.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()'''
# +
# # %load 'solutions/importance.py'
feature_importance = best_rf_model.feature_importances_
feature_importance = 100.0 * (feature_importance / feature_importance.max())
sorted_idx = np.argsort(feature_importance)
pos = np.arange(sorted_idx.shape[0]) + .5
#Plot
plt.figure(figsize=(10,12))
plt.barh(pos, feature_importance[sorted_idx], align='center')
plt.yticks(pos, ex1_x_train.columns[sorted_idx])
plt.xlabel('Relative Importance')
plt.title('Variable Importance')
plt.show()
# -
# #### Let's compare the performance of our 3 models:
# +
print("Decision Trees:\tAccuracy, Training Set \t: {:.2%}".format(acc_trees_training))
print("Decision Trees:\tAccuracy, Testing Set \t: {:.2%}".format(acc_trees_testing))
print("\nBagging: \tAccuracy, Training Set \t: {:0.2f}%".format(acc_bagging_training))
print("Bagging: \tAccuracy, Testing Set \t: {:0.2f}%".format( acc_bagging_testing))
print("\nRandom Forest: \tAccuracy, Training Set \t: {:0.2f}%".format(acc_random_forest_training))
print("Random Forest: \tAccuracy, Testing Set \t: {:0.2f}%".format(acc_random_forest_testing))
# -
# #### As we see above, the performance of both Bagging and Random Forest was similar, so what is the difference? Do both overfit the data just as much?
#
# Hints :
#
# - What is the only extra parameter we declared when defining a Random Forest Model vs Bagging? Does it have an impact on overfitting?
# +
#Fit a Random Forest Model
new_depth = best_depth + 20
#Training
model = RandomForestClassifier(n_estimators=int(x_train.shape[1]/2), max_depth=new_depth)
model.fit(x_train, y_train)
#Predict
y_pred_train = model.predict(x_train)
y_pred_test = model.predict(x_test)
#Perfromance Evaluation
acc_random_forest_deeper_training = accuracy_score(y_train, y_pred_train)*100
acc_random_forest_deeper_testing = accuracy_score(y_test, y_pred_test)*100
print("Random Forest: Accuracy, Training Set (Deeper): {:0.2f}%".format(acc_random_forest_deeper_training))
print("Random Forest: Accuracy, Testing Set (Deeper): {:0.2f}%".format(acc_random_forest_deeper_testing))
# -
# #### Training accuracies:
print("Training Accuracies:")
print("Decision Trees:\tAccuracy, Training Set \t: {:.2%}".format(acc_trees_training))
print("Bagging: \tAccuracy, Training Set \t: {:0.2f}%".format(acc_bagging_training))
print("Random Forest: \tAccuracy, Training Set \t: {:0.2f}%".format(acc_random_forest_training))
print("RF Deeper: \tAccuracy, Training Set \t: {:0.2f}%".format(acc_random_forest_deeper_training))
# #### Testing accuracies:
print("Testing Accuracies:")
print("Decision Trees:\tAccuracy, Testing Set \t: {:.2%}".format(acc_trees_testing))
print("Bagging: \tAccuracy, Testing Set \t: {:0.2f}%".format( acc_bagging_testing))
print("Random Forest: \tAccuracy, Testing Set \t: {:0.2f}%".format(acc_random_forest_testing))
print("RF Deeper: \tAccuracy, Testing Set \t: {:0.2f}%".format(acc_random_forest_deeper_testing))
# +
#vars(model)
# -
print(len(model.estimators_[0].tree_.feature))
print(len(model.estimators_[0].tree_.threshold))
# <div class="alert alert-success">
# <strong>🏋🏻♂️ TEAM ACTIVITY 2:</strong> Exploring RandomForestClassifier class instances. </div>
#
# For more resources on python classes (we're relying on them all the time via sklearn!) see <a href = "https://docs.python.org/3/tutorial/classes.html#a-first-look-at-classes">this link.</a>
#
# +
from functions import tree_pd
import numpy as np
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as metrics
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix
from sklearn import datasets
from sklearn.ensemble import BaggingRegressor
# %matplotlib inline
pd.set_option('display.width', 1500)
pd.set_option('display.max_columns', 100)
from sklearn.model_selection import learning_curve
get_tree_pd = tree_pd.get_tree_scores
cancer_scaled, target = tree_pd.load_cancer_dataset(10, 4)
################################### Train Test split
np.random.seed(40)
#test_proportion
test_prop = 0.2
msk = np.random.uniform(0, 1, len(cancer_scaled)) > test_prop
#Split predictor and response columns
X_train, y_train = cancer_scaled[msk], target[msk]
X_test , y_test = cancer_scaled[~msk], target[~msk]
print("Shape of Training Set :", X_train.shape)
print("Shape of Testing Set :" , X_test.shape)
################################### Train a bagging and random forest model
depth = 13
n_estimators = 100
best_rf_model = RandomForestClassifier(max_depth=depth, random_state = 42, n_estimators= n_estimators)
best_rf_model.fit(X_train, y_train.reshape(-1,))
tree_rf_accuracy = best_rf_model.score(X_test, y_test.reshape(-1,))
bagging_model = BaggingRegressor(DecisionTreeClassifier(max_depth=depth),
n_estimators = 100,
random_state = 42).fit(X_train, y_train.reshape(-1,))
# -
# #### Directions
# Run the cell below and look at the output. The .estimators_ attribute of a RandomForestClassifier class instance is a list of the individual DecisionTreeClassifier class instance estimators that make up the ensemble model. Calling .tree_ on the DecisionTreeClassifier will give you the individual tree estimator.
# 1. Complete the function by extracting the impurity and feature attributes for each decision tree estimator at a specific decision node.
# 2. Fix the creation of the dictionary at the bottom of the function and return a dataframe.
type(best_rf_model.estimators_[0].tree_)
type(best_rf_model)
help(best_rf_model.estimators_[0].tree_)
# # %load "exercises/exercise1.py"
'''def get_impurity_pd(model, n = 0):
"""
This function returns a pandas dataframe with all of the nth nodes feature impurities.
"""
rf_estimators = model.estimators_.copy()
features = np.array(X_train.columns)
node_impurities, node_features = [], []
for i, estimator in enumerate(rf_estimators):
estimator_impurity = #TODO 0
estimator_feature = #TODO 1
node_impurities.append(estimator_impurity)
node_features.append(estimator_feature)
node_impurity_dict = {"feature": #TODO
"impurity": #TODO
df = #TODO
return(df)'''
# # %load "solutions/impurity.py"
def get_impurity_pd(model, n = 0):
"""
This function returns a pandas dataframe with all of the nth nodes feature impurities.
Arguments:
model: must either be a BaggingRegressor or RandomForestClassifier
n: The desired tree node
"""
rf_estimators = model.estimators_.copy()
features = np.array(X_train.columns)
node_impurities, node_features = [], []
for i, estimator in enumerate(rf_estimators):
estimator_impurity = estimator.tree_.impurity[n]
estimator_feature = estimator.tree_.feature[n]
node_impurities.append(estimator_impurity)
node_features.append(estimator_feature)
node_impurity_dict = {"feature": features[node_features], "impurity":node_impurities} #features[
df = pd.DataFrame(node_impurity_dict)
return(df)
# +
tree_node = 0
rf_df = get_impurity_pd(best_rf_model, tree_node)
bagging_df = get_impurity_pd(bagging_model, tree_node)
#plot
fig, ax = plt.subplots(1,2, figsize = (20, 5))
ax.ravel()
sns.swarmplot(x = "feature", y = "impurity", data = rf_df, ax = ax[0])
#sns.swarmplot(x = "feature", y = "impurities", data = rf_df, ax = ax[0])
ax[0].tick_params(labelrotation=45)
ax[0].set_title("Random Forest: Node 0 impurities after split")
sns.swarmplot(x = "feature", y = "impurity", data = bagging_df, ax = ax[1])
ax[1].set_title("Bagging: Node 0 impurities after split")
plt.xticks(rotation=45);
# -
# ____________
#
# ## The limitations of random forest
#
# #### When can Random Forest overfit?
# - Increasing the number of trees in RF generally doesn't increase the risk of overfitting, BUT if the number of trees in the ensemble is too large then the trees in the ensemble may become correlated, and therefore increase the variance.
#
# #### When can Random Forest fail?
#
# - **When we have a lot of predictors that are completely independent of the response and one overwhelmingly influential predictor**.
#
# #### Why aren't random forests and bagging interpretable? How about a very deep decision tree?
#
# ____________
# ## Bagging and random forest vs. Boosting
#
# - **Bagging and Random Forest:**
# - complex and deep trees **overfit**
# - thus **let's perform variance reduction on complex trees!**
# - **Boosting:**
# - simple and shallow trees **underfit**
# - thus **let's perform bias reduction of simple trees!**
# - make the simple trees more expressive!
#
# **Boosting** attempts to improve the predictive flexibility of simple models.
# - It trains a **large number of “weak” learners in sequence**.
# - A weak learner is a constrained model (limit the max depth of each decision tree).
# - Each one in the sequence focuses on **learning from the mistakes** of the one before it.
# - By more heavily weighting in the mistakes in the next tree, our next tree will learn from the mistakes.
# - A combining all the weak learners into a single strong learner = **a boosted tree**.
# <img src="data/gradient_boosting1.png?" alt="tree_adj" width="70%"/>
#
# ----------
#
# ### Illustrative example (from [source](https://towardsdatascience.com/underfitting-and-overfitting-in-machine-learning-and-how-to-deal-with-it-6fe4a8a49dbf))
#
# <img src="data/boosting.png" alt="tree_adj" width="70%"/>
# We built multiple trees consecutively: Tree 1 -> Tree 2 -> Tree 3 - > ....
#
# **The size of the plus or minus signs indicates the weights of a data points for every Tree**. How do we determine these weights?
#
# For each consecutive tree and iteration we do the following:
# - The **wrongly classified data points ("mistakes" = red circles)** are identified and **more heavily weighted in the next tree (green arrow)**.
# - Thus the size of the plus or minus changes in the next tree
# - This change in weights will influence and change the next simple decision tree
# - The **correct predictions are** identified and **less heavily weighted in the next tree**.
#
# We iterate this process for a certain number of times, stop and construct our final model:
# - The ensemble (**"Final: Combination"**) is a linear combination of the simple trees, and is more expressive!
# - The ensemble (**"Final: Combination"**) has indeed not just one simple decision boundary line, and fits the data better.
#
#
# <img src="data/boosting_2.png?" alt="tree_adj" width="70%"/>
# ### What is Ada Boost?
#
# - Ada Boost = Adaptive Boosting.
# - AdaBoost is adaptive in the sense that subsequent weak learners are tweaked in favor of those instances misclassified by previous classifiers
#
# <img src="data/AdaBoost1.png" alt="tree_adj" width="70%"/>
# <img src="data/AdaBoost2.png" alt="tree_adj" width="70%"/>
#
# For an individual training point the loss can be defined as:
# $$\text{ExpLoss_i} = \begin{cases}
# e^{\hat{y}}, & \text{if}\ y=-1 \\
# e^{-\hat{y}}, & \text{} y=1
# \end{cases}
# $$
# <img src="data/AdaBoost3.png" alt="tree_adj" width="70%"/>
# ### Illustrative Example
# ------
# **Step1. Start with equal distribition initially**
# <img src="data/ADA2.png" alt="tree_adj" width="40%">
#
# ------
# **Step2. Fit a simple classifier**
# <img src="data/ADA3.png" alt="tree_adj" width="40%"/>
#
# ------
# **Step3. Update the weights**
# <img src="data/ADA4.png" alt="tree_adj" width="40%"/>
#
# **Step4. Update the classifier:** First time trivial (we have no model yet.)
#
# ------
# **Step2. Fit a simple classifier**
# <img src="data/ADA5.png" alt="tree_adj" width="40%"/>
#
# **Step3. Update the weights:** not shown.
#
# ------
# **Step4. Update the classifier:**
# <img src="data/ADA6.png" alt="tree_adj" width="40%">
#
#
# -------------
#
# ### Let's talk about random forest and boosting in the context of bias and variance.
# <img src="data/bias_variance.png" alt="split2" width="40%"/>
# <img src="data/fitting.png" alt="split2" width="40%"/>
# ## 4. Use the Adaboost method to visualize Bias-Variance tradeoff.
# Now let's try Boosting!
# +
#Fit an Adaboost Model
x_train, y_train = data_train.drop(['Spam'], axis=1), data_train['Spam']
x_test , y_test = data_test.drop(['Spam'] , axis=1), data_test['Spam']
#Training
model = AdaBoostClassifier(base_estimator= DecisionTreeClassifier(max_depth=3),
n_estimators=200,
learning_rate=0.05)
model.fit(x_train.values, y_train.values)
#Predict
y_pred_train = model.predict(x_train.values)
y_pred_test = model.predict(x_test.values)
#Performance Evaluation
acc_boosting_training = accuracy_score(y_train, y_pred_train)*100
acc_boosting_test = accuracy_score(y_test, y_pred_test)*100
print("Ada Boost:\tAccuracy, Training Set \t: {:0.2f}%".format(acc_boosting_training))
print("Ada Boost:\tAccuracy, Testing Set \t: {:0.2f}%".format(acc_boosting_test))
# -
# **How does the test and training accuracy evolve with every iteration (tree)?**
# +
#Plot Iteration based score
train_scores = list(model.staged_score(x_train.values,y_train))
test_scores = list(model.staged_score(x_test.values, y_test))
plt.figure(figsize=(10,7))
plt.plot(train_scores,label='train')
plt.plot(test_scores,label='test')
plt.xlabel('Iteration')
plt.ylabel('Accuracy')
plt.title("Variation of Accuracy with Iterations - ADA Boost")
plt.legend();
print('best number of iterations', np.array(test_scores).argmax())
# -
# What about performance?
print("Decision Trees:\tAccuracy, Testing Set \t: {:.3%}".format(acc_trees_testing))
print("Bagging: \tAccuracy, Testing Set \t: {:0.3f}%".format( acc_bagging_testing))
print("Random Forest: \tAccuracy, Testing Set \t: {:0.3f}%".format(acc_random_forest_testing))
print("Ada Boost:\tAccuracy, Testing Set \t: {:0.3f}%".format(acc_boosting_test))
# AdaBoost seems to be performing better than Simple Decision Trees and has a similar Test Set Accuracy performance compared to Random Forest.
# **Random tip:** If a "for"-loop takes some time and you want to know the progress while running the loop, use: **tqdm()** ([link](https://github.com/tqdm/tqdm)). No need for 1000's of ```print(i)``` outputs.
#
#
# Usage: ```for i in tqdm( range(start,finish) ):```
#
# - tqdm means *"progress"* in Arabic (taqadum, تقدّم) and
# - tqdm is an abbreviation for *"I love you so much"* in Spanish (te quiero demasiado).
# #### What if we change the depth of our AdaBoost trees?
# +
# #! pip install tqdm
# +
# Start Timer
start = time.time()
#Find Optimal Depth of trees for Boosting
score_train, score_test = {}, {}
depth_start, depth_end = 2, 30
for i in tqdm(range(depth_start, depth_end, 2)):
model = AdaBoostClassifier(
base_estimator=DecisionTreeClassifier(max_depth=i),
n_estimators=200, learning_rate=0.05)
model.fit(x_train, y_train)
score_train[i] = accuracy_score(y_train, model.predict(x_train))
score_test[i] = accuracy_score(y_test, model.predict(x_test))
# Stop Timer
end = time.time()
elapsed_adaboost = end - start
# -
#Plot
lists1 = sorted(score_train.items())
lists2 = sorted(score_test.items())
x1, y1 = zip(*lists1)
x2, y2 = zip(*lists2)
plt.figure(figsize=(10,7))
plt.ylabel("Accuracy")
plt.xlabel("Depth")
plt.title('Variation of Accuracy with Depth - ADA Boost Classifier')
plt.plot(x1, y1, 'b-', label='Train')
plt.plot(x2, y2, 'g-', label='Test')
plt.legend()
plt.show()
# Adaboost complexity depends on both the number of estimators and the base estimator.
# - In the beginning as our model complexity increases (depth 2-3), we first observe a small increase in accuracy.
# - But as we go further to the right of the graph (**deeper trees**), our model **will overfit the data.**
# - **REMINDER and validation: Boosting relies on simple trees!**
# <div class="alert alert-success">
# <strong>🏋🏻♂️ TEAM ACTIVITY 3:</strong> Exploring learning rate and TQDM </div>
#
#
# ### Explore how changing the learning rate changes the training and testing accuracy. Use the Te Quero Demasiado (TQDM) wrap around your range as above. (Hint you will probably want to explore a range from $e^{-6}$ to $e^{-1}$
#
# 1) copy the tqdm loop code and vary the learning rate as suggested.
#
# 2) plot the staged score to visualize how the adaboost model learns differently depending on the learning rate.
# +
#TODO
# +
# # %load "solutions/bo2.py"
from tqdm.notebook import trange
#logarithmic values:
exp_powers = list(range(-6,1))
exp_vals = list(np.exp(exp_powers))
#Find Optimal Learning Rate for Ada-Boosting
staged_train_scores, staged_test_scores = {}, {}
score_train, score_test = {}, {}
for i in trange(len(exp_vals)):
model = AdaBoostClassifier(
base_estimator=DecisionTreeClassifier(max_depth=3),
n_estimators=200, learning_rate=exp_vals[i])
model.fit(x_train.values, y_train)
score_train[exp_vals[i]] = accuracy_score(y_train, model.predict(x_train.values))
score_test[exp_vals[i]] = accuracy_score(y_test, model.predict(x_test.values))
staged_train_scores[exp_vals[i]] = list(model.staged_score(x_train.values, y_train))
staged_test_scores[exp_vals[i]] = list(model.staged_score(x_test.values, y_test))
#Plot
lists1 = sorted(score_train.items())
lists2 = sorted(score_test.items())
x1, y1 = zip(*lists1)
x2, y2 = zip(*lists2)
plt.figure(figsize=(10,7))
plt.ylabel("Accuracy")
plt.xlabel("Log Learning Rate Log($\lambda$)")
plt.title('Variation of Accuracy with Depth - ADA Boost Classifier')
plt.plot(np.log(x1), y1, 'b-', label='Train')
plt.plot(np.log(x2), y2, 'g-', label='Test')
plt.legend()
plt.show()
##export this to a function or delete it.
fig, ax = plt.subplots(1,2, figsize=(10,7))
for key, val in staged_train_scores.items():
ax[0].plot(list(val),label='train')
for i, (key, val) in enumerate(staged_test_scores.items()):
ax[1].plot(list(val),label='$\lambda$=exp({})'.format(exp_powers[i]))
ax[1].set_title("h")
plt.legend(loc = 4)
sets = ["Train", "Test"]
for i in range(2):
ax[i].set_xlabel('Iteration')
ax[i].set_ylabel('Accuracy')
ax[i].set_title(sets[i] + " Set Accuracy vs Iterations - ADA Boost")
plt.show()
# -
# #### Is this exercise useful?
# **Food for Thought :**
# - Are **boosted models independent of one another?** Do they need to wait for the previous model's residuals?
# - Are **bagging or random forest models independent of each other**, can they be trained in a parallel fashion?
# ## End of Standard Lab
# ## 5. *Theory:* What is Gradient Boosting and XGBoost?
# ### What is Gradient Boosting?
#
# To improve its predictions, **gradient boosting looks at the difference between its current approximation, and the known correct target vector, which is called the residual**.
#
# The mathematics:
#
# - It may be assumed that there is some imperfect model $F_{m}$
# - The gradient boosting algorithm improves on $F_{m}$ constructing a new model that adds an estimator $h$ to provide a better model:
# $$F_{m+1}(x)=F_{m}(x)+h(x)$$
#
# - To find $h$, the gradient boosting solution starts with the observation that a perfect **h** would imply
#
# $$F_{m+1}(x)=F_{m}(x)+h(x)=y$$
#
# - or, equivalently solving for h,
#
# $$h(x)=y-F_{m}(x)$$
#
# - Therefore, gradient boosting will fit h to the residual $y-F_{m}(x)$
# <img src="data/gradient_boosting2.png" alt="tree_adj" width="80%"/>
#
# -------
#
# ### XGBoost: ["Long May She Reign!"](https://towardsdatascience.com/https-medium-com-vishalmorde-xgboost-algorithm-long-she-may-rein-edd9f99be63d)
#
# <img src="data/kaggle.png" alt="tree_adj" width="100%"/>
#
# ----------
#
# ### What is XGBoost and why is it so good!?
# - Based on Gradient Boosting
# - XGBoost = **eXtreme Gradient Boosting**; refers to the engineering goal to push the limit of computations resources for boosted tree algorithm
#
# **Accuracy:**
# - XGBoost however uses a **more regularized model formalizaiton to control overfitting** (=better performance) by both L1 and L2 regularization.
# - Tree Pruning methods: more shallow tree will also prevent overfitting
# - Improved convergence techniques (like early stopping when no improvement is made for X number of iterations)
# - Built-in Cross-Validaiton
#
# **Computing Speed:**
# - Special Vector and matrix type data structures for faster results.
# - Parallelized tree building: using all of your CPU cores during training.
# - Distributed Computing: for training very large models using a cluster of machines.
# - Cache Optimization of data structures and algorithm: to make best use of hardware.
#
# **XGBoost is building boosted trees in parallel? What? How?**
# - No: Xgboost doesn't run multiple trees in parallel, you need predictions after each tree to update gradients.
# - Rather it does the parallelization WITHIN a single tree my using openMP to create branches independently.
# ## 6. Use XGBoost: Extreme Gradient Boosting
# +
# Let's install XGBoost
# #! pip install xgboost
# -
y_train
# +
import xgboost as xgb
# Create the training and test data
dtrain = xgb.DMatrix(x_train, label=y_train)
dtest = xgb.DMatrix(x_test, label=y_test)
# Parameters
param = {
'max_depth': best_depth, # the maximum depth of each tree
'eta': 0.3, # the training step for each iteration
'silent': 1, # logging mode - quiet
'objective': 'multi:softprob', # error evaluation for multiclass training
'num_class': 2} # the number of classes that exist in this datset
# Number of training iterations
num_round = 200
# Start timer
start = time.time()
# Train XGBoost
bst = xgb.train(param,
dtrain,
num_round,
evals= [(dtrain, 'train')],
early_stopping_rounds=20, # early stopping
verbose_eval=20)
# Make prediction training set
preds_train = bst.predict(dtrain)
best_preds_train = np.asarray([np.argmax(line) for line in preds_train])
# Make prediction test set
preds_test = bst.predict(dtest)
best_preds_test = np.asarray([np.argmax(line) for line in preds_test])
# Performance Evaluation
acc_XGBoost_training = accuracy_score(y_train, best_preds_train)*100
acc_XGBoost_test = accuracy_score(y_test, best_preds_test)*100
# Stop Timer
end = time.time()
elapsed_xgboost = end - start
print("XGBoost:\tAccuracy, Training Set \t: {:0.2f}%".format(acc_XGBoost_training))
print("XGBoost:\tAccuracy, Testing Set \t: {:0.2f}%".format(acc_XGBoost_test))
# -
# ### What about the accuracy performance: AdaBoost versus XGBoost?
print("Ada Boost:\tAccuracy, Testing Set \t: {:0.2f}%".format(acc_boosting_test))
print("XGBoost:\tAccuracy, Testing Set \t: {:0.2f}%".format(acc_XGBoost_test))
# ### What about the computing performance: AdaBoost versus XGBoost?
print("AdaBoost elapsed time: \t{:0.2f}s".format(elapsed_adaboost))
print("XGBoost elapsed time: \t{:0.2f}s".format(elapsed_xgboost))
# ### What if we change the depth of our XGBoost trees and compare to Ada Boost?
def model_xgboost(best_depth):
param = {
'max_depth': best_depth, # the maximum depth of each tree
'eta': 0.3, # the training step for each iteration
'verbosity': 0, # logging mode - quiet
'objective': 'multi:softprob', # error evaluation for multiclass training
'num_class': 2} # the number of classes that exist in this datset
# the number of training iterations
num_round = 200
bst = xgb.train(param,
dtrain,
num_round,
evals= [(dtrain, 'train')],
early_stopping_rounds=20,
verbose_eval=False)
preds_train = bst.predict(dtrain)
best_preds_train = np.asarray([np.argmax(line) for line in preds_train])
preds_test = bst.predict(dtest)
best_preds_test = np.asarray([np.argmax(line) for line in preds_test])
#Performance Evaluation
XGBoost_training = accuracy_score(y_train, best_preds_train)
XGBoost_test = accuracy_score(y_test, best_preds_test)
return XGBoost_training, XGBoost_test
#Find Optimal Depth of trees for Boosting
score_train_xgb, score_test_xgb = {}, {}
depth_start, depth_end = 2, 30
for i in trange(depth_start, depth_end, 2):
XGBoost_training, XGBoost_test = model_xgboost(i)
score_train_xgb[i] = XGBoost_training
score_test_xgb[i] = XGBoost_test
#Plot
lists1 = sorted(score_train_xgb.items())
lists2 = sorted(score_test_xgb.items())
x3, y3 = zip(*lists1)
x4, y4 = zip(*lists2)
plt.figure(figsize=(10,7))
plt.ylabel("Accuracy")
plt.xlabel("Depth")
plt.title('Variation of Accuracy with Depth - Adaboost & XGBoost Classifier')
plt.plot(x1, y1, label='Train Accuracy Ada Boost')
plt.plot(x2, y2, label='Test Accuracy Ada Boost')
plt.plot(x3, y3, label='Train Accuracy XGBoost')
plt.plot(x4, y4, label='Test Accuracy XGBoost')
plt.legend()
plt.show()
# **Interesting**:
# - No real optimal depth of the simple tree for XGBoost, probably a lot of regularization, pruning, or early stopping when using a deep tree at the start.
# - XGBoost does not seem to overfit when the depth of the tree increases, as opposed to Ada Boost.
# **All the accuracy performances:**
print("Decision Trees:\tAccuracy, Testing Set \t: {:.2%}".format(acc_trees_testing))
print("Bagging: \tAccuracy, Testing Set \t: {:0.2f}%".format( acc_bagging_testing))
print("Random Forest: \tAccuracy, Testing Set \t: {:0.2f}%".format(acc_random_forest_testing))
print("Ada Boost:\tAccuracy, Testing Set \t: {:0.2f}%".format(acc_boosting_test))
print("XGBoost:\tAccuracy, Testing Set \t: {:0.2f}%".format(acc_XGBoost_test))
#
# ----------
#
# **Overview of all the tree algorithms:** [Source](https://towardsdatascience.com/https-medium-com-vishalmorde-xgboost-algorithm-long-she-may-rein-edd9f99be63d)
#
# <img src="data/trees.png" alt="tree_adj" width="100%"/>
#
#
#
#
#
# ----------
# ## Optional: Example to better understand Bias vs Variance tradeoff.
# A central notion underlying what we've been learning in lectures and sections so far is the trade-off between overfitting and underfitting. If you remember back to Homework 3, we had a model that seemed to represent our data accurately. However, we saw that as we made it more and more accurate on the training set, it did not generalize well to unobserved data.
#
# As a different example, in face recognition algorithms, such as that on the iPhone X, a too-accurate model would be unable to identity someone who styled their hair differently that day. The reason is that our model may learn irrelevant features in the training data. On the contrary, an insufficiently trained model would not generalize well either. For example, it was recently reported that a face mask could sufficiently fool the iPhone X.
#
# A widely used solution in statistics to reduce overfitting consists of adding structure to the model, with something like regularization. This method favors simpler models during training.
#
# The bias-variance dilemma is closely related.
# - The **bias** of a model quantifies how precise a model is across training sets.
# - The **variance** quantifies how sensitive the model is to small changes in the training set.
# - A **robust** model is not overly sensitive to small changes.
# - **The dilemma involves minimizing both bias and variance**; we want a precise and robust model. Simpler models tend to be less accurate but more robust. Complex models tend to be more accurate but less robust.
#
# **How to reduce bias:**
# - **Use more complex models, more features, less regularization,** ...
# - **Boosting:** attempts to improve the predictive flexibility of simple models. Boosting uses simple base models and tries to “boost” their aggregate complexity.
#
# **How to reduce variance:**
# - **Early Stopping:** Its rules provide us with guidance as to how many iterations can be run before the learner begins to over-fit.
# - **Pruning:** Pruning is extensively used while building related models. It simply removes the nodes which add little predictive power for the problem in hand.
# - **Regularization:** It introduces a cost term for bringing in more features with the objective function. Hence it tries to push the coefficients for many variables to zero and hence reduce cost term.
# - **Train with more data:** It won’t work every time, but training with more data can help algorithms detect the signal better.
# - **Ensembling:** Ensembles are machine learning methods for combining predictions from multiple separate models. For example:
# - **Bagging** attempts to reduce the chance of overfitting complex models: Bagging uses complex base models and tries to “smooth out” their predictions.
#
# -------------
#
# #### Interesting Piazza post: why randomness in simple decision tree?
#
# ```"Hi there. I notice that there is a parameter called "random_state" in decision tree function and I wonder why we need randomness in simple decision tree. If we add randomness in such case, isn't it the same as random forest?"```
#
# - The problem of learning an optimal decision tree is known to be **NP-complete** under several aspects of optimality and even for simple concepts.
# - Consequently, practical decision-tree learning algorithms are based on **heuristic algorithms such as the greedy algorithm where locally optimal decisions are made at each node**.
# - Such algorithms **cannot guarantee to return the globally optimal decision tree**.
# - This can be mitigated by training multiple trees in an ensemble learner, where the features and samples are randomly sampled with replacement (Bagging).
#
# For example: **What is the defaulth DecisionTreeClassifier behaviour when there are 2 or more best features for a certain split (a tie among "splitters")?** (after a deep dive and internet search [link](https://github.com/scikit-learn/scikit-learn/issues/12259 ) ):
#
# - The current default behaviour when splitter="best" is to shuffle the features at each step and take the best feature to split.
# - In case there is a tie, we take a random one.
| content/labs/lab10/cs109a_lab_10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
modules = [123835
,66973
,63652
,99256
,56009
,58012
,130669
,109933
,52958
,131656
,144786
,50437
,134194
,80230
,50326
,118204
,102780
,135520
,142248
,80341
,51071
,71346
,134081
,142321
,136230
,55934
,79697
,90116
,107825
,133052
,130259
,99566
,83066
,90923
,58475
,134697
,91830
,105838
,109003
,125258
,108679
,87310
,79813
,109814
,65616
,69275
,118405
,105178
,93140
,79535
,138051
,55728
,71875
,121207
,52011
,81209
,129059
,135782
,62791
,72135
,77765
,109498
,73862
,134825
,148898
,81633
,53277
,109858
,91672
,115105
,132871
,138334
,135049
,73083
,79234
,129281
,86062
,88448
,99612
,52138
,149290
,120562
,118975
,92896
,51162
,122410
,75479
,137800
,142149
,123518
,67806
,89937
,85963
,104764
,56710
,51314
,67275
,61135
,77580
,74726]
def mass_to_fuel_recursive(mass):
ff = int(np.floor(mass/3))-2
if (ff<=0): return 0
else: return ff+mass_to_fuel_recursive(ff)
def test(m):
print(str(m)+ " "+str(mass_to_fuel_recursive(m)))
test(12) #2
test(14) #2
test(1969) #654
test(100756) #33583
total_fuel=0
for mm in modules:
mtf=mass_to_fuel_recursive(mm)
#print(str(mm)+ " "+str(mtf))
total_fuel = total_fuel+mtf
print (total_fuel)
# +
# ANSWER = 4837367
| aoc2019/advent-01-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # H1C IDR3 LST-Binning Inspection Notebook
# ### <NAME>
#
# This notebook provides a sense-check for H1C IDR3 LST-binning results for one of the four epochs.
epoch = 0
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from hera_cal.io import HERAData
import glob
from hera_cal import utils, noise, redcal, lstbin
from hera_cal.abscal import match_times
from copy import deepcopy
import os
from IPython.display import display, HTML
import warnings
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
display(HTML("<style>.container { width:100% !important; }</style>"))
# figure out which days are in the epoch
JDs = [int(path.split('/')[-1]) for path in sorted(glob.glob('/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/245*'))]
if epoch == 0:
JDs = [JD for JD in JDs if JD < 2458080]
elif epoch == 1:
JDs = [JD for JD in JDs if JD >= 2458080 and JD < 2458120]
elif epoch == 2:
JDs = [JD for JD in JDs if JD >= 2458120 and JD < 2458170]
elif epoch == 3:
JDs = [JD for JD in JDs if JD >= 2458170]
print(JDs)
# figure out the correct order of files, based on a split at 4.71 radians
def file_lst_for_sort(fn):
lst = float(fn.split('.LST.')[-1].split('.sum.')[0]) - 4.71
if lst < 0:
lst += 2 * np.pi
return lst
lst_bin_files = sorted(glob.glob(f'/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/LSTBIN/epoch_{epoch}/zen.grp1.of1.LST.*.sum.uvh5'), key=file_lst_for_sort)
# load data for the center file in the list
center_lst_bin_file = lst_bin_files[len(lst_bin_files) // 2]
hd = HERAData(center_lst_bin_file)
data, flags, nsamples = hd.read()
# figure out which antenna numbers have more than 0 samples
ants_with_samples = set([])
for bl in nsamples:
sample_sum = np.sum(nsamples[bl])
if sample_sum > 0:
for ant in utils.split_bl(bl):
ants_with_samples.add(ant[0])
def Array_Plot():
plt.figure(figsize=(8, 8), dpi=75)
plt.scatter(np.array([hd.antpos[ant][0] for ant in hd.data_ants]),
np.array([hd.antpos[ant][1] for ant in hd.data_ants]), c='w', s=0)
for ant in hd.data_ants:
pos = hd.antpos[ant]
bad = ant not in ants_with_samples
plt.gca().add_artist(plt.Circle(tuple(pos[0:2]), radius=7,
fill=(~bad), color=['grey','r'][bad]))
plt.text(pos[0],pos[1], str(ant), va='center', ha='center', color='w')
plt.xlabel("Antenna East-West Position (meters)")
plt.ylabel("Antenna North-South Position (meters)")
plt.title(f'Antenna Positions for Epoch {epoch} at LST = {np.mean(data.lsts * 12 / np.pi):.2f} hours (Flags in Red)');
plt.axis('equal')
plt.tight_layout()
# # Figure 1: Array Configuration and Flagged Antennas
#
# This plot shows the array configuration in the central file of the LST-binner, including flagged antennas (those involved in no baselines with any samples).
Array_Plot()
# +
# get largest redundant groups over 40 m (so we get to see some fringes)
reds = redcal.get_reds({ant: hd.antpos[ant] for ant in hd.data_ants}, pols=['ee', 'nn'])
ex_bls=set([bl for bl in nsamples if np.sum(nsamples[bl]) == 0])
reds = redcal.filter_reds(reds, ex_bls=ex_bls, antpos=hd.antpos, min_bl_cut=40)
reds = sorted(reds, key=len, reverse=True)
# pick out middle baselines (one per pol, generally)
bls_to_compare = [sorted(reds[n])[len(reds[n]) // 2] for n in range(2)]
# -
# load bls_to_compare, starting from middle JD and continuing until we find one where the baselines are not entirely flagged
for JD in np.roll(JDs, len(JDs) // 2):
jd_files = sorted(glob.glob(f'/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/{JD}/*.smooth_calibrated.uvh5'))
files_to_load = match_times(center_lst_bin_file, jd_files)
hd2 = HERAData(files_to_load)
d2, f2, n2 = hd2.read(bls=bls_to_compare, axis='blt')
if not np.any([np.all(f2[bl]) for bl in bls_to_compare]):
break # this JD has the baselines not completely flagged
def Compare_To_Night():
for bl in bls_to_compare:
display(HTML(f'<h2>Baseline {bl}:</h2>'))
plt.figure(figsize=(18,8))
# use this to match the yrange to the LST-binned data
ylim=[data.lsts[-1]*12/np.pi, data.lsts[0]*12/np.pi]
# plot LST-binned Phase
plt.subplot(223)
plt.imshow(np.where(flags[bl], np.nan, np.angle(data[bl])), aspect='auto', cmap='twilight',
extent=[data.freqs[0]/1e6, data.freqs[-1]/1e6, data.lsts[-1]*12/np.pi, data.lsts[0]*12/np.pi])
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
plt.ylim(ylim)
plt.title(f'{bl}: Epoch {epoch} LST-Binned Phase')
plt.colorbar(label='Phase (radians)')
# plot LST-binned amplitude
plt.subplot(224)
plt.imshow(np.where(flags[bl], np.nan, np.abs(data[bl])), aspect='auto', cmap='inferno', vmin=0,
extent=[data.freqs[0]/1e6, data.freqs[-1]/1e6, data.lsts[-1]*12/np.pi, data.lsts[0]*12/np.pi])
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
plt.ylim(ylim)
plt.title(f'{bl}: Epoch {epoch} LST-Binned Amplitude')
plt.colorbar(label='Amplitude (Jy)')
vmin, vmax = plt.gci().get_clim()
# plot phase on single night
plt.subplot(221)
plt.imshow(np.where(f2[bl], np.nan, np.angle(d2[bl])), aspect='auto', cmap='twilight',
extent=[d2.freqs[0]/1e6, d2.freqs[-1]/1e6, d2.lsts[-1]*12/np.pi, d2.lsts[0]*12/np.pi])
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
plt.ylim(ylim)
plt.title(f'{bl}: Phase on {JD}')
plt.colorbar(label='Phase (radians)')
# plot amplitude on single night
plt.subplot(222)
plt.imshow(np.where(f2[bl], np.nan, np.abs(d2[bl])), aspect='auto', cmap='inferno', vmin=vmin, vmax=vmax,
extent=[d2.freqs[0]/1e6, d2.freqs[-1]/1e6, d2.lsts[-1]*12/np.pi, d2.lsts[0]*12/np.pi])
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
plt.ylim(ylim)
plt.title(f'{bl}: Amplitude on {JD}')
plt.colorbar(label='Amplitude (Jy)')
plt.tight_layout()
plt.show()
display(HTML('<hr style="height:3px">'))
# # Figure 2: Compare Single Night's Data to LST-Binned Data
#
# Compares amplitude and phase between a single night (picked so that the baselines aren't totally flagged) and the LST-binned results.
Compare_To_Night()
# get original integration time
tint = np.median(np.diff(d2.times)) * 24 * 3600
# load night-to-night standard deviations
std_bin_file = center_lst_bin_file.replace('.LST.', '.STD.')
hdstd = HERAData(std_bin_file)
data_std, flags_std, nsamples_std = hdstd.read()
# +
# lists to store estimates of the nosie variance in the LST-binned data products
all_obs_var = []
all_predicted_var = []
all_interleaved_var = []
all_predicted_binned_var = []
# Loop over baselines
for bl in data.bls():
ant1, ant2 = utils.split_bl(bl)
auto1 = utils.join_bl(ant1, ant1)
auto2 = utils.join_bl(ant2, ant2)
if ant1[1] != ant2[1]:
continue # cross polarized
if auto1 == auto2:
continue # ignore autocorrelations in assessing noise
# Flag integrations with fewer than 10 samples
flags_here = deepcopy(flags[bl])
flags_here |= flags[auto1] | flags[auto2]
flags_here |= (nsamples[bl] < 10) | (nsamples[auto1] < 10) | (nsamples[auto2] < 10)
# Predicted night-to-night visibility variance
predicted_var = noise.predict_noise_variance_from_autos(bl, data, dt=tint)
predicted_var[flags_here] = np.nan
all_predicted_var.append(predicted_var)
# Observed night-to-night visibiltiy variance
obs_var = deepcopy(data_std[bl])**2
obs_var[flags_here] = np.nan
obs_nsamples = deepcopy(nsamples_std[bl])
obs_nsamples[flags_here] = np.nan
obs_var *= (obs_nsamples / (obs_nsamples - 1))
all_obs_var.append(obs_var)
# Predicted visibiltiy variance after LST-binning
predicted_binned_var = noise.predict_noise_variance_from_autos(bl, data, dt=tint, nsamples=nsamples)
predicted_binned_var[flags_here] = np.nan
all_predicted_binned_var.append(predicted_binned_var)
# Observed visibiltiy variance after LST-binning
data_here = deepcopy(data[bl])
data_here[flags_here] = np.nan
interleaved_variance = noise.interleaved_noise_variance_estimate(data_here, kernel=[[-.5,1,-.5]])
all_interleaved_var.append(interleaved_variance)
# -
# average all visibiltiy variances over times and unflagged antennas
mean_obs_var = np.nanmean(np.abs(all_obs_var), axis=(0,1))
mean_predicted_var = np.nanmean(all_predicted_var, axis=(0,1))
mean_interleaved_var = np.nanmean(np.abs(all_interleaved_var), axis=(0,1))
mean_predicted_binned_var = np.nanmean(all_predicted_binned_var, axis=(0,1))
def Noise_Comparison():
fig, ax = plt.subplots(2,2, figsize=(16,8), sharex='col', gridspec_kw={'height_ratios': [2, 1]})
plt.subplots_adjust(hspace=.0)
ax=ax.flatten()
ax[0].plot(hd.freqs/1e6, mean_obs_var, lw=2, label='Night-to-Night Variance of\nData LST-Binned Together')
ax[0].plot(hd.freqs/1e6, mean_predicted_var, lw=1, label='Predicted Variance from LST-Binned\nAutocorrelations')
ax[0].set_ylabel('Nightly Visibility Variance (Jy$^2$) ')
ax[0].set_title(f'Visibility Variance Across Nights at {np.round(hd.lsts[0]*12/np.pi,3)}—{np.round(hd.lsts[-1]*12/np.pi,3)} Hours LST'
'\n(Mean Over Unflagged Times and Baselines)')
ax[0].legend()
ax[1].plot(hd.freqs/1e6, mean_interleaved_var, lw=2, label='Variance from Frequency-Interleaving\nof LST-Binned Data')
ax[1].plot(hd.freqs/1e6, mean_predicted_binned_var, lw=1, label='Predicted Variance from LST-Binned\nAutocorrelations and N$_{samples}$')
ax[1].set_ylabel('LST-Binned Visibility Variance (Jy$^2$)')
ax[1].set_title(f'Variance of LST-Binned Visibilities at {np.round(hd.lsts[0]*12/np.pi,3)}—{np.round(hd.lsts[-1]*12/np.pi,3)} Hours LST'
'\n(Mean Over Unflagged Times and Baselines)')
ax[1].legend()
ax[2].plot(hd.freqs/1e6, mean_obs_var/mean_predicted_var, 'k-', lw=1)
favg_rat = np.nanmean(mean_obs_var/mean_predicted_var)
ax[2].plot(hd.freqs/1e6, np.ones_like(hd.freqs) * favg_rat, '--', c='grey', label=f'Frequency-Averaged Ratio: {favg_rat:.3f}')
ax[2].set_xlabel('Frequency (MHz)')
ax[2].set_xlim([100,200])
ax[2].set_ylim([.9, 1.5])
ax[2].set_ylabel('Observed / Predicted')
ax[2].legend(loc='upper right')
ax[3].plot(hd.freqs/1e6, mean_interleaved_var/mean_predicted_binned_var, 'k-', lw=1)
favg_rat = np.nanmean(mean_interleaved_var/mean_predicted_binned_var)
ax[3].plot(hd.freqs/1e6, np.ones_like(hd.freqs) * favg_rat, '--', c='grey', label=f'Frequency-Averaged Ratio: {favg_rat:.3f}')
ax[3].set_xlabel('Frequency (MHz)')
ax[3].set_ylim([.9, 1.5])
ax[3].set_xlim([100,200])
ax[3].set_ylabel('Observed / Predicted')
ax[3].legend(loc='upper right')
plt.tight_layout()
# # Figure 3: Comparison of Noise Metrics and Predicted Noise
#
# Comparison of noise predicted from autocorrelations (and $N_{samples}$) to the noise measured either from the standard deviation across nights or from frequency-interleaving.
#
# Based on [Validation Test 4.0.0b](https://github.com/HERA-Team/hera-validation/blob/master/test-series/4/test-4.0.0b.ipynb) and [Aguirre et al. (2021) Figure 12](https://www.overleaf.com/project/5e7cdde364f7d40001749218) (the H1C IDR2 Validation paper).
Noise_Comparison()
# pick out the baselines in the same groups but only pick the ones with the most total samples from the central file
bls_to_load = [sorted(reds[n], key=lambda bl: np.sum(nsamples[bl]))[-1] for n in range(2)]
# Load those baselines for all LSTs
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hd_full = HERAData(lst_bin_files)
data_full, flags_full, nsamples_full = hd_full.read(bls=bls_to_load, axis='blt')
# + code_folding=[]
def Plot_Full_Night():
for bl in bls_to_load:
display(HTML(f'<h2>Baseline {bl}:</h2>'))
plt.figure(figsize=(18,14))
# handle possible wraps in LST
extent=[data_full.freqs[0]/1e6, data_full.freqs[-1]/1e6, len(data_full.lsts), 0]
yticks = np.linspace(0, len(data_full.lsts) - 1e-10, 10)
yticklabels = np.round(12 / np.pi * data_full.lsts[np.floor(np.linspace(0, len(data_full.lsts) - 1e-10, 10)).astype(int)], 2)
# Plot Phase
plt.subplot(131)
plt.imshow(np.where(flags_full[bl], np.nan, np.angle(data_full[bl])),
aspect='auto', cmap='twilight', interpolation='nearest', extent=extent)
plt.yticks(yticks, yticklabels)
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
plt.title(f'{bl}: Epoch {epoch} LST-Binned Phase')
plt.colorbar(label='Phase (radians)', aspect=50)
# Plot Amplitude
plt.subplot(132)
plt.imshow(np.where(flags_full[bl], np.nan, np.abs(data_full[bl])),
aspect='auto', cmap='inferno', interpolation='nearest', norm=LogNorm(), extent=extent)
plt.yticks(yticks, yticklabels)
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
plt.title(f'{bl}: Epoch {epoch} LST-Binned Amplitude')
plt.colorbar(label='Amplitude (Jy)', aspect=50)
# Plot N Samples
plt.subplot(133)
plt.imshow(nsamples_full[bl], aspect='auto', cmap='viridis', interpolation='nearest', extent=extent)
plt.yticks(yticks, yticklabels)
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
plt.title(f'{bl}: Epoch {epoch} Samples LST-Binned')
plt.colorbar(label='Number of Samples', aspect=50)
plt.tight_layout()
plt.show()
display(HTML('<hr style="height:3px">'))
# -
# # Figure 4: Full Night Waterfalls
#
# Full-epoch phases, amplitudes, and $N_{samples}$ for baselines from the same redundant group as above, but picked to have the maximum total samples in that group.
Plot_Full_Night()
# # Metadata
print(lstbin.version.history_string())
| notebooks/h1c_idr3_lst_bin_inspect.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import cv2
ajedrez = cv2.imread('ajedrez_real.jpg')
plt.imshow(ajedrez)
ajedrez_gris = cv2.cvtColor(ajedrez, cv2.COLOR_BGR2GRAY)
plt.imshow(ajedrez_gris, cmap='gray')
esquinas = cv2.goodFeaturesToTrack(ajedrez_gris, 5, 0.01, 4)
esquinas = np.int0(esquinas)
esquinas
for i in esquinas:
x, y = i.ravel()
#circulo cv2.circle(image, center_coordinates, radius, color, thickness)
#
cv2.circle(ajedrez, (x,y), 4, color=(0,255,255), thickness=8)
plt.imshow(ajedrez)
| ClaseJueves15102020/Esquinas/ajedrez_vida_real.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Python Programming for Absolute Beginners @ 36c3
# Each of these 'cells' contains code which can be executed by selecting the cell and pressing **CTRL + Enter**. If you do not have a keyboard, you can select the cell and press the **Run** button on top.
# ## 1. Hello World
print('Hello 36c3!')
print('It is lovely to see you!')
for i in range(5):
print('Hello!')
# ## 2. The Basics
# ### 2.1 Variables
a = 13
b = 'Hello <PASSWORD>'
c = 36.3
b
# ### 2.2 Lists
l = [1, 2, 3, 4, a]
l[0]
la = [1, 2, 3]
lb = [4, 5, 6]
lol = [la, lb]
lol[0][1]
# ### 2.3 Loops
box = ['i0', 'i1', 'i2']
for item in box:
print(item)
# ### 2.4 If-Constructions
# +
a = 10
if a > 16:
print('A is greater than 15')
else:
print('A is not greater than 15')
# -
# ### 2.5 Functions
# There are lots and lots of inbuild functions!
print('Hello')
round(3.14159)
round(3.14159, ndigits=2)
# ... but of course we can create our own!
def add(a, b):
result = a + b
return result
add(5, 10)
add(2, 2)
# ## 3. The Pizza Problem
# ### 3.1 Modeling the Pizzas
# +
# A list of pizzas. Each pizza is represented as a list of lists.
pizzas = [
['small', [26, 0], 4.80, 'circle'],
['large', [30, 0], 5.50, 'circle'],
['family', [46, 33], 13.00, 'rectangle'],
]
# -
pizzas[0]
# ### 3.2 Determining the Areas
# A function to calculate area based on the shape and the size
def pizza_area(size, shape):
if shape == 'circle':
area = 3.141 * (size[0] / 2) ** 2
if shape == 'rectangle':
area = size[0] * size[1]
return area
# Let's calculate these areas
for pizza in pizzas:
area = pizza_area(pizza[1], pizza[3])
print(f'Pizza {pizza[0]} has an area of {area} cm^2')
# ### 3.3 Determining PTERs and the Best Pizza
def pizza_pter(area, price):
pter = area / price
return pter
# +
best_pizza = '' # An empty string (= sequence of characters)
best_pter = 0
worst_pizza = ''
worst_pter = 9999
for p in pizzas:
area = pizza_area(p[1], p[3])
pter = pizza_pter(area, p[2])
if pter > best_pter:
best_pter = pter
best_pizza = p[0]
if pter < worst_pter:
worst_pter = pter
worst_pizza = p[0]
print(f'The best pizza is "{best_pizza}" with a PTER of {round(best_pter)}!')
print(f'The worst pizza is "{worst_pizza}" with a PTER of {round(worst_pter)}!')
# -
# **A final note:** While this implementation works, it is far from elegant!
# **(1)** If you want to challenge yourself, read/watch up on dictionaries, and try to recreate a dictionary-based solution.
# **(2)** If you then want to challenge yourself futher, read/watch up on classes and object-oriented programming and build a solution based on that.
| Python-Programming-for-Absolute-Beginners.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/iamdanialkamali/ImbalancedLittleData/blob/master/ImbalancedLittleDataipynb.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="jX23jnhn3xU6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 945} outputId="1b387aba-dc59-4cea-94dd-f0312e69fcf0"
# !pip install hazm
# !pip install parsivar
# !pip install bert-for-tf2
# !pip install Unidecode
# !pip install transformers
import hazm
import parsivar
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Input
from tensorflow.keras.utils import plot_model
# + id="HUP75nlEjkf5" colab_type="code" colab={}
seed_value= 77
# 1. Set the `PYTHONHASHSEED` environment variable at a fixed value
import os
os.environ['PYTHONHASHSEED']=str(seed_value)
# 2. Set the `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
# 3. Set the `numpy` pseudo-random generator at a fixed value
import numpy as np
np.random.seed(seed_value)
# 4. Set the `tensorflow` pseudo-random generator at a fixed value
import tensorflow as tf
tf.random.set_seed(seed_value)
# for later versions:
# tf.compat.v1.set_random_seed(seed_value)
# 5. Configure a new global `tensorflow` session
# + id="oZS6euuZ52NK" colab_type="code" colab={}
file = open('./sentiment.txt')
raw_input_data = file.readlines()
# + id="VXKie5ZV5_FX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="86727ca0-f348-492b-cbbe-7f03b2dca3ed"
labels = []
string_datas = []
for raw_data in raw_input_data:
string_data, label = raw_data.split("\";\"")
labels.append(label.strip())
string_datas.append(string_data.strip())
df = pd.DataFrame(list(zip( labels,string_datas)),
columns =[ 'label','text'])
df.head()
# + id="oxkEvM287Pga" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="d1803ce6-b10e-41cd-eacb-80ff19070093"
chart = sns.countplot(df.label)
plt.title("Number of examples per intent")
chart.set_xticklabels(chart.get_xticklabels(),Rotation = 50)
plt.show()
# + id="Li7ThvKrT8JV" colab_type="code" colab={}
# + [markdown] id="u7nBiajL7bS2" colab_type="text"
# text quality is poor
# + id="DRvVSEp27ass" colab_type="code" colab={}
# len(hazm.Normalizer().normalize(sample).split())
# + id="ogWAsnfl8kg4" colab_type="code" colab={}
# len(parsivar.Normalizer().normalize(hazm.Normalizer().normalize(sample)).split())
# + id="SaWzSswh-RfO" colab_type="code" colab={}
# a = zip(hazm.Normalizer().normalize(sample).split(),parsivar.Normalizer().normalize(sample).split()+[0]*12)
# for x in a:
# print(x)
# + [markdown] id="-hrypCP_-waO" colab_type="text"
# ### HAZM looks better
# + id="nb6A3USxAAK-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="0a399412-43d6-45fd-d7e0-433241a6435e"
import string
from unidecode import unidecode
def remove_punctuation(text):
# text = unidecode(text)
# text = parsivar.SpellCheck().spell_corrector(text)
return text.translate(str.maketrans('', '', string.punctuation))
def mysplit(s):
head = s.rstrip('0123456789')
tail = s[len(head):]
s = " ".join([head, tail])
head = s.rstrip('۰۱۲۳۴۵۶۷۸۹')
tail = s[len(head):]
s = " ".join([head, tail])
return s
def check_sticked(data):
return " ".join(list(map(mysplit,data.split())))
df['normalized'] = df['text'].apply(check_sticked)
df['normalized'] = df['normalized'].apply(hazm.Normalizer().normalize)
df['normalized'] = df['normalized'].apply(remove_punctuation)
df.head()
# + id="YmUcQUmlUpjH" colab_type="code" colab={}
# + id="fgHYZ9MtIuIX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="ab762c05-6caf-4c34-9c8c-dacd23e81272"
# !wget https://raw.githubusercontent.com/NeelShah18/emot/master/emot/emo_unicode.py
# + id="rBBo5xWfIwiu" colab_type="code" colab={}
from emo_unicode import *
import re
# + id="kweaqOFSIHom" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="49ff8dfd-d404-487c-b29a-ae9081fb21d4"
def convert_emoticons(text):
for emot in EMOTICONS:
try:
text = re.sub(u'('+emot+')', "_".join(EMOTICONS[emot].replace(",","").split()), text)
text = re.sub(r'('+emot+')', "_".join(UNICODE_EMO[emot].replace(",","").replace(":","").split()), text)
except:
pass
return text
df['normalized'] = df['normalized'].apply(convert_emoticons)
df.head()
# + id="6vyLtnxraOgP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="a6a07025-ddf9-4465-c754-3bd6e0bf7071"
from sklearn.utils import shuffle
import random
def tokenize(text):
'''text: list of text documents'''
tokenized = parsivar.Tokenizer().tokenize_sentences(text)
return tokenized
def augment(data,rate):
augmented = []
reps=[]
for ng_rev in data:
tok = tokenize(ng_rev)
shuffled= [tok]
for i in range(rate):
random.shuffle(shuffled[-1])
newl=list(shuffled[-1])
shuffled.append(newl)
for k in shuffled:
s = ' '
new_rev = s.join(k)
if new_rev not in augmented:
augmented.append(new_rev)
else:
reps.append(new_rev)
return augmented
neg = [x[1] for x in filter(lambda x: x[0]=='neg',df.values)]
pos = [x[1] for x in filter(lambda x: x[0]=='pos',df.values)]
neg = augment(neg[10:],11)
neg_val = augment(neg[:10],5)
pos = augment(pos[10:],5)
pos_val = augment(pos[:10],5)
print(len(pos))
print(len(pos_val))
print(len(neg))
print(len(neg_val))
shuffle_x, shuffle_y = shuffle(["pos"]*len(pos)+["neg"]*len(neg),pos+neg)
df = pd.DataFrame(list(zip( shuffle_x, shuffle_y)),
columns =[ 'label','normalized'])
df_val = pd.DataFrame(list(zip( *shuffle(["pos"]*len(pos_val)+["neg"]*len(neg_val),pos_val+neg_val))),
columns =[ 'label','normalized'])
# + id="I-Rn70NeaV-t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 306} outputId="096c4503-96b7-4f8d-be71-655a11f232c7"
chart = sns.countplot(df.label)
plt.title("Number of examples per intent")
chart.set_xticklabels(chart.get_xticklabels(),Rotation = 50)
plt.show()
# + id="Q_XqTfM0IVna" colab_type="code" colab={}
from transformers import AutoConfig, AutoTokenizer, TFAutoModel
config = AutoConfig.from_pretrained("HooshvareLab/bert-fa-base-uncased-sentiment-deepsentipers-binary")
tokenizer = AutoTokenizer.from_pretrained("HooshvareLab/bert-fa-base-uncased-sentiment-deepsentipers-binary")
# + id="emtqAHbk-vad" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 121} outputId="d1f2d2cc-c626-46c5-8c66-81c98879cfeb"
# tokenized_strings = list(map(parsivar.Tokenizer().tokenize_words,normalized_strings))
# df['normalized'] = df['normalized'].apply(parsivar.Tokenizer().tokenize_words)
encoded = tokenizer.batch_encode_plus(
list(df['normalized'].values),
add_special_tokens=True,
max_length=500,
return_token_type_ids=True,
pad_to_max_length=True,
return_attention_mask=True,
)
for x in encoded.keys():
encoded[x] = np.array(encoded[x])
encoded_val = tokenizer.batch_encode_plus(
list(df_val['normalized'].values),
add_special_tokens=True,
max_length=500,
return_token_type_ids=True,
pad_to_max_length=True,
return_attention_mask=True,
)
for x in encoded_val.keys():
encoded_val[x] = np.array(encoded_val[x])
# print(len(encoding[1]))
# df.head()
encoded_val.keys()
# + id="ZARybroDAg8f" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="f065ed7f-686c-4af7-b9ce-8646d7d7f166"
# def stem(text):
# """custom function to remove the punctuation"""
# # text = list(map(parsivar.FindStems().convert_to_stem,text))
# stemmed_sample = np.array(list(map(hazm.Stemmer().stem,text)))
# return stemmed_sample
# df['tokenized'] = df['tokenized'].apply(stem)
# tokenized_strings = np.array(list(map(stem,tokenized_strings)))
# df.head()
# ['input_ids'] = np.array(encoded['input_ids'])
# df['attention_mask'] = np.array(encoded['attention_mask'])
final_doc = pd.DataFrame(list(zip( shuffle_x, shuffle_y,np.array(encoded['input_ids']),np.array(encoded['attention_mask']))),
columns =[ 'label','normalized','input_ids','attention_mask'])
final_doc.head()
# + id="H6Yn_JvmjzPL" colab_type="code" colab={}
final_doc.to_csv("coded_data.csv")
# + id="OiZCSmGy-ZLd" colab_type="code" colab={}
# + id="NlPkrkDyD2xh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="06916c4d-6728-4390-d68d-696767dc90a1"
# !wget https://raw.githubusercontent.com/kharazi/persian-stopwords/master/persian
# + id="8Z3Ugp1a9hh7" colab_type="code" colab={}
# STOPWORDS = set(map(str.strip,open('./persian').readlines()))
# def remove_stopwords(text):
# """custom function to remove the stopwords"""
# return np.array([word for word in text if word not in STOPWORDS])
# df['tokenized'] = df['tokenized'].apply(remove_stopwords)
# df.head()
# + id="CEzpJiDLEOcQ" colab_type="code" colab={}
# + id="GTtvW-8HVzgo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="a9f97ea6-1281-40bd-86da-e9769704d357"
# tokenizer.
# encoding = tokenizer.padencode_plus(
# review,
# add_special_tokens=True,
# max_length=500,
# return_token_type_ids=False,
# pad_to_max_length=True,
# return_attention_mask=True,
# )
# encoded_texts = np.array(list(map(tokenizer.convert_tokens_to_ids,df['tokenized'].values)))
# df['encoded_text'] = df['tokenized'].apply(tokenizer.convert_tokens_to_ids)
# encoded_texts = np.array(list(map(tokenizer.convert_tokens_to_ids,df['encoded_text'].values)))
# encoded_texts = np.array(list(map(tokenizer.pad,df['tokenized'].values)))
df.head()
# + id="-xGA5671e9Cl" colab_type="code" colab={}
# + id="Vea9edaaxSPO" colab_type="code" colab={}
# df['encoded_text'].values.shape
# + id="bTg3cCRBKfEH" colab_type="code" colab={}
# np.array(encoded_texts[1]).shape
# + id="XAi1TsVrPbr3" colab_type="code" colab={}
# + id="JMGv-kRZJais" colab_type="code" colab={}
ll = ['pos', 'neg']
encoded_labels = np.array(list(map(lambda d : [1,0] if d =='pos' else [0,1],df.label.values)))
encoded_labels_val = np.array(list(map(lambda d : [1,0] if d =='pos' else [0,1],df_val.label.values)))
# + id="mhWc6ubpN9vJ" colab_type="code" colab={}
# print(np.array(df.encoded_text.values))
# encoded_labels = tf.convert_to_tensor(np.array(list(map(lambda d : np.array([1,0]) if d =='pos' else np.array([0,1]),df.label.values))))
# encoded_texts = tf.convert_to_tensor(np.array(df['encoded_text'].values))
# for x in df.encoded_text.values:
# print(x.shape)
# print(encoded_texts.shape)
# encoded_texts = encoded_texts.reshape((99,500,...))
# + id="u0m_c8bwGIE-" colab_type="code" colab={}
# + id="Sm-0PNAPTRpL" colab_type="code" colab={}
from tensorflow.keras import backend as K
def recall_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision_m(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def f1_m(y_true, y_pred):
precision = precision_m(y_true, y_pred)
recall = recall_m(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# + id="TY4GnnUSUUx3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="05dc517e-5300-4d41-d310-f37820e121ac"
bert_model = TFAutoModel.from_pretrained("HooshvareLab/bert-fa-base-uncased-sentiment-deepsentipers-binary")
bert_model.trainable = False
bert_model.summary()
# + id="4AOHg7PEvTNr" colab_type="code" colab={}
# encoded_bert_data = bert_model(encoded.values())
# + id="-CXBhDWLx3PK" colab_type="code" colab={}
# # !pip install imbalanced-learn
# + id="hdLzaFxqvadx" colab_type="code" colab={}
# + id="a3_4ir0Nx-fX" colab_type="code" colab={}
# from imblearn.over_sampling import SVMSMOTE,RandomOverSampler
# from imblearn.under_sampling import RandomUnderSampler
# from imblearn.pipeline import Pipeline
# over = SVMSMOTE(sampling_strategy=0.1)
# under = RandomUnderSampler(sampling_strategy=0.5)
# steps = [('o', over)]
# pipeline = Pipeline(steps=steps)
# X, y = pipeline.fit_resample(encoded_bert_data[1].numpy(), np.array([1 if x =="pos" else 0 for x in labels]))
# + id="kx1zrI36OAtZ" colab_type="code" colab={}
max_len = 500
from tensorflow.keras import layers
from tensorflow.keras import regularizers
import tensorflow
def create_model():
## BERT encoder
# encoder = TFBertModel.from_pretrained("HooshvareLab/bert-base-parsbert-uncased")
input_ids = layers.Input(shape=(max_len,), dtype=tf.int32)
input_mask = layers.Input(shape=(max_len,), dtype=tf.int32)
input_types = layers.Input(shape=(max_len,), dtype=tf.int32)
embedding = bert_model([input_ids,input_mask,input_types])
# print(embedding[1].shape)
# print(embedding[0].shape)
# print(embedding[1].shape)
end_logits = layers.Dense(200)(embedding[1])
# end_logits = layers.Conv1D(128,10)(embedding[0])
# end_logits = layers.Conv1D(64,10)(end_logits)
# end_logits = layers.Conv1D(32,10)(end_logits)
# end_logits = layers.Conv1D(20,10)(end_logits)
# end_logits = layers.Conv1D(20,10)(end_logits)
# end_logits = layers.LSTM(400)(end_logits)
# end_logits = layers.BatchNormalization()(end_logits)
# end_logits = layers.Dense(200)(end_logits)
# end_logits = layers.BatchNormalization()(end_logits)
# end_logits = layers.Dense(200)(end_logits)
# end_logits = layers.BatchNormalization()(end_logits)
# end_logits = layers.Dense(200,)(end_logits)
# end_logits = layers.BatchNormalization()(end_logits)
end_logits = layers.Dense(200,'relu',kernel_regularizer=regularizers.l2(1e-4))(end_logits)
end_logits = layers.Dense(200,'relu')(end_logits)
end_logits = layers.Dense(200,'relu',kernel_regularizer=regularizers.l2(1e-4))(end_logits)
end_logits = layers.Dense(200,'relu')(end_logits)
end_logits = layers.Dense(200,'relu',kernel_regularizer=regularizers.l2(1e-4))(end_logits)
end_logits = layers.Dense(150,'relu')(end_logits)
end_logits = layers.Dense(100,'relu',kernel_regularizer=regularizers.l2(1e-4))(end_logits)
end_logits = layers.Dropout(0.5)(end_logits)
end_logits = layers.Dense(80,'relu',kernel_regularizer=regularizers.l2(1e-4))(end_logits)
end_logits = layers.Dropout(0.5)(end_logits)
end_logits = layers.Dense(50,'relu',kernel_regularizer=regularizers.l2(1e-4))(end_logits)
end_probs = layers.Dense(2,'softmax')(end_logits)
model = tensorflow.keras.Model(
inputs=[input_ids,input_mask,input_types],
outputs=[end_probs],
)
model.compile(
optimizer=tensorflow.keras.optimizers.Adam(1e-5),
loss = tensorflow.keras.losses.BinaryCrossentropy(),
metrics=['acc',f1_m,precision_m, recall_m]
)
return model
# + id="B-0ydS1c9itC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 756} outputId="deb3f6d5-9b06-46b6-dbee-125f8608b7f6"
from sklearn.model_selection import KFold
model = create_model()
model.summary()
# + id="4oy68zANODTq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="84d74570-361e-4417-b087-22fb62a0247d"
acc_per_fold = []
loss_per_fold = []
fold_no = 1
for train,test in KFold(10).split(encoded['input_ids'],encoded_labels):
model = create_model()
baseline = len(list(filter(lambda x: bool(x[1] == 1) ,encoded_labels[train])))/len(encoded_labels[train])
print(max([baseline,1-baseline]))
history = model.fit(
x = [encoded['input_ids'][train],encoded['token_type_ids'][train],encoded['attention_mask'][train]],
epochs = 200,
y = encoded_labels[train],
batch_size = 10,validation_data=( [encoded_val['input_ids'],encoded_val['token_type_ids'],encoded_val['attention_mask']],encoded_labels_val)
)
baseline = len(list(filter(lambda x: bool(x[1] == 1) ,encoded_labels[test])))/len(encoded_labels[test])
print(max([baseline,1-baseline]))
scores = model.evaluate([encoded['input_ids'][test],encoded['token_type_ids'][test],encoded['attention_mask'][test]], encoded_labels[test], verbose=0)
print(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; {model.metrics_names[1]} of {scores[1]*100}%')
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
fold_no = fold_no + 1
# == Provide average scores ==
print('------------------------------------------------------------------------')
print('Score per fold')
for i in range(0, len(acc_per_fold)):
print('------------------------------------------------------------------------')
print(f'> Fold {i+1} - Loss: {loss_per_fold[i]} - Accuracy: {acc_per_fold[i]}%')
print('------------------------------------------------------------------------')
print('Average scores for all folds:')
print(f'> Accuracy: {np.mean(acc_per_fold)} (+- {np.std(acc_per_fold)})')
print(f'> Loss: {np.mean(loss_per_fold)}')
print('------------------------------------------------------------------------')
# + id="0o3cYOFeOFmz" colab_type="code" colab={}
# + id="bTApM6l-OIIU" colab_type="code" colab={}
plt.plot(history.history['val_acc'])
plt.plot(history.history['acc'])
plt.legend(['val_acc','acc'])
plt.show()
plt.plot(history.history['val_loss'])
plt.plot(history.history['loss'])
plt.legend(['val_loss','loss'])
plt.show()
| ImbalancedLittleDataipynb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Big Query Machine Learning (BQML)
#
# **Learning Objectives**
# - Understand that it is possible to build ML models in Big Query
# - Understand when this is appropriate
# - Experience building a model using BQML
#
# # Introduction
# BigQuery is more than just a data warehouse, it also has some ML capabilities baked into it.
#
# As of January 2019 it is limited to linear models, but what it gives up in complexity, it gains in ease of use.
#
# BQML is a great option when a linear model will suffice, or when you want a quick benchmark to beat, but for more complex models such as neural networks you will need to pull the data out of BigQuery and into an ML Framework like TensorFlow.
#
# In this notebook, we will build a naive model using BQML. **This notebook is intended to inspire usage of BQML, we will not focus on model performance.**
# ### Set up environment variables and load necessary libraries
PROJECT = "cloud-training-demos" # Replace with your PROJECT
REGION = "us-central1" # Choose an available region for Cloud MLE
import os
os.environ["PROJECT"] = PROJECT
os.environ["REGION"] = REGION
# !pip freeze | grep google-cloud-bigquery==1.21.0 || pip install google-cloud-bigquery==1.21.0
# %load_ext google.cloud.bigquery
# ## Create BigQuery dataset
#
# Prior to now we've just been reading an existing BigQuery table, now we're going to create our own so so we need some place to put it. In BigQuery parlance, `Dataset` means a folder for tables.
#
# We will take advantage of BigQuery's [Python Client](https://cloud.google.com/bigquery/docs/reference/libraries#client-libraries-install-python) to create the dataset.
# +
from google.cloud import bigquery
bq = bigquery.Client(project = PROJECT)
dataset = bigquery.Dataset(bq.dataset("bqml_taxifare"))
try:
bq.create_dataset(dataset) # will fail if dataset already exists
print("Dataset created")
except:
print("Dataset already exists")
# -
# ## Create model
#
# To create a model ([documentation](https://cloud.google.com/bigquery-ml/docs/reference/standard-sql/bigqueryml-syntax-create))
# 1. Use `CREATE MODEL` and provide a destination table for resulting model. Alternatively we can use `CREATE OR REPLACE MODEL` which allows overwriting an existing model.
# 2. Use `OPTIONS` to specify the model type (linear_reg or logistic_reg). There are many more options [we could specify](https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-create#model_option_list), such as regularization and learning rate, but we'll accept the defaults.
# 3. Provide the query which fetches the training data
#
# Have a look at [Step Two of this tutorial](https://cloud.google.com/bigquery/docs/bigqueryml-natality) to see another example.
#
# **The query will take about two minutes to complete**
# %%bigquery --project $PROJECT
CREATE or REPLACE MODEL bqml_taxifare.taxifare_model
OPTIONS(model_type = "linear_reg",
input_label_cols = ["label"]) AS
-- query to fetch training data
SELECT
(tolls_amount + fare_amount) AS label,
pickup_datetime,
pickup_longitude,
pickup_latitude,
dropoff_longitude,
dropoff_latitude
FROM
`nyc-tlc.yellow.trips`
WHERE
-- Clean Data
trip_distance > 0
AND passenger_count > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
-- repeatable 1/5000th sample
AND ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 5000)) = 1
# ## Get training statistics
#
# Because the query uses a `CREATE MODEL` statement to create a table, you do not see query results. The output is an empty string.
#
# To get the training results we use the [`ML.TRAINING_INFO`](https://cloud.google.com/bigquery/docs/reference/standard-sql/bigqueryml-syntax-train) function.
#
# Have a look at [Step Three and Four of this tutorial](https://cloud.google.com/bigquery/docs/bigqueryml-natality) to see a similar example.
# %%bigquery --project $PROJECT
SELECT
*
FROM
ML.TRAINING_INFO(MODEL `bqml_taxifare.taxifare_model`)
# 'eval_loss' is reported as mean squared error, so our RMSE is **8.29**. Your results may vary.
# ## Predict
#
# To use our model to make predictions, we use `ML.PREDICT`. Let's, use the `taxifare_model` you trained above to infer the cost of a taxi ride that occurs at 10:00 am on January 3rd, 2014 going from the Google Office in New York (latitude: 40.7434, longitude: -74.0080) to the JFK airport (latitude: 40.6413, longitude: -73.7781)
#
# Have a look at [Step Five of this tutorial](https://cloud.google.com/bigquery/docs/bigqueryml-natality) to see another example.
# %%bigquery --project $PROJECT
#standardSQL
SELECT
predicted_label
FROM
ML.PREDICT(MODEL `bqml_taxifare.taxifare_model`,
(
SELECT
TIMESTAMP "2014-01-03 10:00:00" as pickup_datetime,
-74.0080 as pickup_longitude,
40.7434 as pickup_latitude,
-73.7781 as dropoff_longitude,
40.6413 as dropoff_latitude
))
# Our model predicts the cost would be **$22.12**.
# ## Recap
#
# The value of BQML is its ease of use:
#
# - We created a model with just two additional lines of SQL
# - We never had to move our data out of BigQuery
# - We didn't need to use an ML Framework or code, just SQL
#
# There's lots of work going on behind the scenes make this look easy. For example BQML is automatically creating a training/evaluation split, tuning our learning rate, and one-hot encoding features if neccesary. When we move to TensorFlow these are all things we'll need to do ourselves.
#
# This notebook was just to inspire usage of BQML, the current model is actually very poor. We'll prove this in the next lesson by beating it with a simple heuristic.
#
# We could improve our model considerably with some feature engineering but we'll save that for a future lesson. Also there are additional BQML functions such as `ML.WEIGHTS` and `ML.EVALUATE` that we haven't even explored. If you're interested in learning more about BQML I encourage you to [read the offical docs](https://cloud.google.com/bigquery/docs/bigqueryml).
#
# From here on out we'll focus on pulling data out of BigQuery and building models using TensorFlow, which is more effort but also offers much more flexibility.
# Copyright 2019 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| courses/machine_learning/deepdive/01_bigquery/b_bqml.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hyperspectral image - Applied Machine Learning
# ### <NAME>
# **Índex**
# 1. [Introduction](#id1)
# 2. [Importing data](#id2)
# 3. [Unsupervised learning](#id3)
# 4. [Instance selection](#id4)
# 5. [Supervised Learning](#id5)
# 6. [Feature selection](#id6)
# 7. [Ensemble methods](#id7)
# 8. [Conclusions](#id8)<br>
# [Extra: Silhouette coefficient plots](#id9)
# ## 1. Introduction<a name="id1"></a>
# The aim of this project is to classify pixels of hyperspectral satellite images in which labels are available with its corresponding class.
#
# One of the most important characteristics of satellite images usually is the great volume of data it has (increases quadratically with image dimensions and lineally with the number os channels or spectrums). Another is the reduced number of labelled instances available due to the high cost of it.
# These are the reasons why, in this project, unsupervised learning is going to be used at first with all data (clustering) and after that classification is going to be carried out to classify the whole image using only a reduced subset of the data available (supervised learning).
#
# In both cases the pixels will be considered to be separate instances, that is, the position of it in the image will not be taken into account.
# ## 2. Importing data<a name="id2"></a>
# +
import numpy as np
import scipy.io.matlab as matlab
import matplotlib.pyplot as plt
import sklearn as skl
import sklearn.model_selection as model_selection
from sklearn.cluster import KMeans
from sklearn import metrics
#Necessary to see images in Jupyter Notebook:
# %matplotlib inline
# Reading the file from Matlab format .mat
mat_file = "datasetB3.mat"
mat = matlab.loadmat(mat_file,squeeze_me=True) #returns a dictionary
list(mat.keys()) #variables saved
# Reading data
X = mat["X"] #image (hypercube 3D: rows x columns x variables)
Xl = mat["Xl"] #labelled instances (instances x variables)
Yl = mat["Yl"] #class labels (instances x 1, 0=no class)
del mat
Yl.shape
Yl_original = Yl.copy()
# Reshape of Ground truth as an image
Y = np.reshape(Yl, (X.shape[0], X.shape[1]),order="F")
Y.shape
#------------ Filter background: class 0 is removed ---------
#Class 0 is removed because it corresponds to pixels whose class is not known or different
#from the 9 classes of interest
#These pixels will not be used for training nor for validation results
Nc=Yl.max()-Yl.min()+1
if Nc>2:
Xl = Xl[Yl != 0,:];
Yl = Yl[Yl != 0]; #Labels without 0's
#Ploting the pictures
fig = plt.figure()
fig.set_size_inches(10,10)
ax=plt.subplot(1,2,1)
ax.imshow(X[:,:,1]), ax.axis('off'), plt.title('Image')
ax=plt.subplot(1,2,2)
ax.imshow(Y), ax.axis('off'), plt.title('Ground Truth')
#This part is only to add the background
clasmap=Y;
clasmap_masked = np.ma.masked_where(clasmap<1,clasmap)
plt.imshow(X[:,:,1])
plt.imshow(clasmap_masked)
# -
# ## 3. Unsupervised learning<a name="id3"></a>
# + In this part the clustering algorithm **KMeans** has been used on the whole dataset to obtain a first grouping of the image pixels. The code has been developed so it can accept other images with different dimensions and/or number of variables.
#KMeans algorithm on the whole dataset specifying the number of clusters = 9
kmeans = KMeans(n_clusters = 9, max_iter = 300, init = "k-means++")
Y_pred = kmeans.fit_predict(Xl)
# + Different metrics are studied to compare the results obtained with **KMeans** to the labels given (ground truth):
# Here the results of metrics such as *Adjusted Rand Score* and *Adjusted Mutual Information Score* are shown. These metrics measure the similarity between 2 partitions. <br>
#
# *Adjusted Rand Score* does it by counting pairs and it's range goes from -1 to 1. <br>
# *Adjusted Mutual Information Score* is based in the mutual information or conditional entropy concept and it's range goes from 0 to 1.<br>
# In both cases a higher value means higher similarity
# +
import warnings
warnings.filterwarnings('ignore') #Avoids FutureWarning for future versions
print("Adjusted Rand Score: ",round(metrics.adjusted_rand_score(Yl, Y_pred), 2)) #Range [-1,1]
print("Adjusted Mutual Info Score: ",round(metrics.adjusted_mutual_info_score(Yl, Y_pred), 2)) #Range [0,1]
# -
# Silhouette coefficient is a useful method for interpreting and validating the consistency of the clusters in a dataset. It goes from -1 to 1 and higher values are associated with higher consistency. It's considered a way of measuring how "natural" is a specific partition.
#
# Here below the average silhouette coefficients of both partitions are compared. At the end of the notebook are the plots of the silhouette coefficient: [Extra](#id9).
print("KMeans silhouette coefficient", round(metrics. silhouette_score(Xl, Y_pred, metric = "euclidean"),2))#Range [-1,1]
print("Ground truth silhouette coefficient", round(metrics. silhouette_score(Xl, Yl, metric = "euclidean"),2))
# The distribution from the labelled data shows a low silhouette coefficient, which means that it does not follow a natural partition of data. This is the reason why unsupervised learning is not going to produce a similar clustering of the image pixels.
print("Calinski-Harabasz coefficient KMeans", round(metrics.calinski_harabasz_score(Xl, Y_pred),2)) #Clustering KMeans
print("Calinski-Harabasz coefficient Ground truth",round(metrics.calinski_harabasz_score(Xl, Yl),2)) #Ground truth
# *Calinski-Harabsz* coefficient of both partitions confirmed the conclusions extracted from the silhouette coefficient results. KMeans partition has a higher C-H coefficient which means that's more related with a natural partition of data than the labels given.
#
# +
#---------------- Image drawing --------------
Yl_pred = Yl_original.copy()
Yl_pred[Yl_pred != 0] = Y_pred #IMPORTANT - Prediction is added to the data with 0's included to plot the image
Yl_pred = np.reshape(Yl_pred, (Y.shape[0], Y.shape[1]), order="F")
fig = plt.figure()
fig.set_size_inches(10,10)
ax=plt.subplot(1,2,1)
ax.imshow(Y), ax.axis('off'), plt.title('Ground Truth')
ax=plt.subplot(1,2,2)
ax.imshow(Yl_pred), ax.axis('off'), plt.title('KMeans')
#---------------Remove the comments in case of wanting to add a background--------
#clasmap=Yl_pred;
#clasmap_masked = np.ma.masked_where(clasmap<1,clasmap)
#plt.imshow(X[:,:,1])
#plt.imshow(clasmap_masked)
# -
# In these pictures is easy to see how different are KMeans results from the classes given. <br>
# It should be taken into account also that the classes obtained using KMeans don't have the same color code as the ground truth.
# ## 4. Instance selection<a name="id4"></a>
# + In this part we want to simulate the shortage of labelled data using only 5000 (aprox.) pixels to train a supervised learning classification algorithm. <br>
# In order to achieve this **KMeans** has been used to obtain the centroid of each class and, based on the distance from the different instances to it (distance based on variables, not spatial distance), pick a set of representative instances keeping the original ratio.
# +
import pandas as pd
from itertools import chain
#Dictionaries are initialized to split the different classes
dictX = {}
dictY = {}
dictprop = {} #This dictionary keeps the ratio of each class
#Ratio calculation:
for i in range(1,10):
dictX[i] = Xl[Yl == i,:];
dictY[i] = Yl[Yl == i];
dictprop[i] = len(dictX[i])/len(Xl)
#Initialization of variables to keep the aprox. 5000 observations that will be used afterwards in classification
X_class = []
Y_class = np.array([])
centers = [] #This list keeps the centroid of each class
for i in range(1,10):
#KMeans algorithm is used to obtain the centroid of each class
kmeans2 = KMeans(n_clusters = 1)
kmeans2.fit(dictX[i])
#Calculation of the distance of each point to the class centroid
a = metrics.pairwise_distances(X = kmeans2.cluster_centers_[0].reshape(1,-1), Y = dictX[i])
#A subset of observations is picked based on the distance to the centroid
df1 = pd.DataFrame(dictX[i])
df1["distance"] = a.reshape((len(dictX[i]),1))
df1 = df1.sort_values("distance")
df1 = df1.iloc[0:8*int(5000*dictprop[i]):8,0:-1] #Spaced selection
X_class.append(np.asarray(df1))
Y_class = np.append(Y_class, np.repeat(i,int(5000*dictprop[i])))
#X_class to array
X_class = np.array(list(chain(*X_class))) #X_class is the matrix with the 5000 observations that will be used in classification
Y_class = Y_class.reshape(-1,1) #Y_class is the matrix with the 5000 labels that will be used in classification
# -
# When it comes to picking the representative instances of each class, it has been decided to make a spaced selection to avoid the loss of variability that would mean picking only the points closest to the centroid (1st, 2nd, 3rd...) which would make it a lot harder for the algorithm to generalize classifying the whole image afterwards. <br>
# This has been done by using a spaced selection once ordered based on their distance to the centroid (1st, 8th, 16th...).
#
# ***Of course one of the best strategies would have been to choose random points, but this was part of the problem boundaries.***
# ## 5. Supervised learning<a name="id5"></a>
# + In this section two different classification algorithms have been trained and tested on the reduced dataset to compare their performance on the whole image. **k-NN** (*k-Nearest Nighbors*) is the first of them.
#
# *In both of them Cross Validation has been used to tune the main parameters.*
# +
from sklearn import neighbors
from sklearn.preprocessing import StandardScaler #For normalization
from sklearn.model_selection import GridSearchCV #For Cross Validation
#KNN requieres scaled variables:
X_class = StandardScaler().fit_transform(X_class)
#The reduced dataset is split in train and test (80% and 20% each)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X_class, Y_class, test_size= 0.2, random_state = 11)
#-------------CROSS VALIDATION----------------
#Different weights, numbers of neighbors and leaf sizes are tried
weights= np.array(['uniform','distance'])
kneighbors = np.arange(1,20,1)
leaf_size = np.arange(10,30,5)
tuned_parameters = {'n_neighbors': kneighbors, 'weights': weights, 'leaf_size': leaf_size}
clf = GridSearchCV(neighbors.KNeighborsClassifier(), param_grid=tuned_parameters, cv=5,n_jobs=-1,verbose=0)
clf.fit(X_train,y_train) #The combinations of models are trained
clf=clf.best_estimator_ #The one with better results is chosen
print("Chosen model:\n",clf,"\n")#Prints the parameters of the model chosen
print('OA train %0.2f' % clf.score(X_train, y_train))
preds_train = clf.predict(X_train)
print('Kappa train %0.2f' % metrics.cohen_kappa_score(y_train,preds_train))
#print(metrics.classification_report(y_train, preds_train)) #Detailed report with different metrics
print('OA test %0.2f' % clf.score(X_test, y_test))
preds_test = clf.predict(X_test)
print('Kappa test %0.2f' % metrics.cohen_kappa_score(y_test,preds_test))
CM=metrics.confusion_matrix(y_test,preds_test)
print("Confusion matrix of test data:\n",CM)
# -
# Even though the results from the test substed should provide an estimate of the results we would find when classifying the whole image, it is possible that these results are too optimistic because the model has been trained only with approximately 7% of the total data and it's possible that the criterion chosen to pick the representative observations is not the optimum approach.
#
# If we had only chosen the closest points from the centroid, a good performance with the reduced dataset would still mean terrible results on the whole image.
# +
#----------PREDICTION ON THE WHOLE IMAGE-------------
Xl_scaled = StandardScaler().fit_transform(Xl) #
knn_predict = clf.predict(Xl_scaled) #Prediction
CM_final=metrics.confusion_matrix(Yl,knn_predict)
print("Confusion matrix of the whole image: \n",CM_final)
#print(metrics.classification_report(Yl,svc_predict)) #Detailed report with different metrics
print('OA whole image %0.2f' % clf.score(Xl_scaled, Yl))
print('Kappa whole image %0.2f' % metrics.cohen_kappa_score(Yl,knn_predict))
# -
# The results on the complete image show that the score obtained previously from the test subset were a bit optimistic. However, taking into account what was said before, these results show that the model is capable of generalizing and providing relatively good classification results on observations that it has never seen before.
# To compare the results obtained with **k-NN** the same procedure is followed with the **SVC** (*Support Vector Classifier*) algorithm:
# + **SVC** has been chosen because the number of samples is not too big and methods based on kernel are not affected too much by a high number of features. Since they need to invert a matrix a much higher number of samples would increase too much the computational cost of these kind of algorithms.
# +
from sklearn import svm
#SVM requires normalized features:
X_class = StandardScaler().fit_transform(X_class)
#The reduced dataset is split in train and test (80% and 20% each)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X_class, Y_class, test_size= 0.2, random_state = 11)
#-------------CROSS VALIDATION----------------
#Different degrees and gammas are tried
degrees = np.arange(1,3,1)
gammas = np.logspace(-2, 0.5, 12)
tuned_parameters = {'degree': degrees,'gamma': gammas}
clf = GridSearchCV(svm.SVC(kernel='rbf'), tuned_parameters, cv=5,n_jobs=-1,verbose=0)
clf.fit(X_train,y_train) #The different combinations are trained
clf=clf.best_estimator_ #We keep the one that has better results
print("Chosen model:\n",clf,"\n")#Print the parameters of the chosen model
print('OA train %0.2f' % clf.score(X_train, y_train))
preds_train = clf.predict(X_train)
print('Kappa train %0.2f' % metrics.cohen_kappa_score(y_train,preds_train))
#print(metrics.classification_report(y_train, preds_train)) #Detailed report with different metrics
print('OA test %0.2f' % clf.score(X_test, y_test))
preds_test = clf.predict(X_test)
print('Kappa test %0.2f' % metrics.cohen_kappa_score(y_test,preds_test))
CM=metrics.confusion_matrix(y_test,preds_test)
print("Confusion matrix of test data:\n",CM)
# -
# Classification on both train and test subsets using **SVC** provides very positive results and better than the ones from **k-NN**. It does not seem to be overfitting since the numbers from both test and train are quite similar.
#
# +
#----------PREDICTION ON THE WHOLE IMAGE-------------
Xl_scaled = StandardScaler().fit_transform(Xl)
svc_predict = clf.predict(Xl_scaled) #Prediction
CM_final=metrics.confusion_matrix(Yl,svc_predict)
print("Confusion matrix of the whole image:\n",CM_final)
#print(metrics.classification_report(Yl,svc_predict)) #Detailed report with different metrics
print('OA whole image %0.2f' % clf.score(Xl_scaled, Yl))
print('Kappa whole image %0.2f' % metrics.cohen_kappa_score(Yl,svc_predict))
# -
# Same as with **k-NN**, results on the test subset are proven to be somewhat optimistic. However, the results using **SVC** on the whole image are not that far away from them and are quite good (and better than the ones from **k-NN**).
# +
#Images
imagen_predicha = Yl_original.copy()
imagen_predicha[imagen_predicha != 0] = svc_predict
imagen_predicha = np.reshape(imagen_predicha, (Y.shape[0], Y.shape[1]), order="F")
imagen_knn = Yl_original.copy()
imagen_knn[imagen_knn != 0] = knn_predict
imagen_knn = np.reshape(imagen_knn, (Y.shape[0], Y.shape[1]), order="F")
fig = plt.figure()
fig.set_size_inches(14,14)
ax=plt.subplot(1,3,1)
ax.imshow(Y), ax.axis('off'), plt.title('Ground Truth')
ax=plt.subplot(1,3,2)
ax.imshow(imagen_knn), ax.axis('off'), plt.title('k-NN')
ax=plt.subplot(1,3,3)
ax.imshow(imagen_predicha), ax.axis('off'), plt.title('SVC')
#---------------Remove the comments in case of wanting to add background--------
#clasmap=imagen_predicha; #aqui deberiamos poner nuestra clasificacion
#clasmap_masked = np.ma.masked_where(clasmap<1,clasmap)
#plt.imshow(X[:,:,1])
#plt.imshow(clasmap_masked)
# -
# Comparing the pictures above it's possible to see the differences between the 2 algorithms used. <br>
# The classification obtained from using **SVC** model is closer to the ground truth than the one obtained from using **k-NN**.
# ## 6. Feature selection<a name="id6"></a>
# + In this section we're going to select the most useful features, that is, the bands of the spectrum, to classify the samples. Doing so we'll get rid of those bands that are difficulting or not improving the results of the classification algorithms. <br>
# This will result in a simplification of the model, and hence a computational cost and time reduction.
# Feature selection has been carried out using **Random Forest** algorithm, which allows us to know which are the most useful variables based on impurity. Impurity can be calculated using *gini* coefficient or entropy.
#
# In general, the variables used in the first branches of the trees, are those which give more information and thus, the ones we'll keep for the reduced model.
# +
#-----------------------------FEATURE SELECTION - RANDOM FOREST---------------------
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import SelectFromModel
sel = SelectFromModel(RandomForestClassifier(n_estimators = 300))
sel.fit(X_train, y_train)
#sel.get_support() #Returns a boolean array where the most useful variables contain "True"
#Here the variables with "True" in get_support are selected
X_train_feat= X_train[:, np.where(sel.get_support() == True)[0]].copy()
X_test_feat= X_test[:, np.where(sel.get_support() == True)[0]].copy()
print("Initial number of variables: ", np.shape(Xl)[1])
print("Number of variables after feature selection: ", np.sum(sel.get_support()))
# +
#Once the features have been selected we train again the SVC model
clf.fit(X_train_feat, y_train) #Model training with features selected
print('OA train %0.2f' % clf.score(X_train_feat, y_train))
preds_train = clf.predict(X_train_feat)
print('Kappa train %0.2f' % metrics.cohen_kappa_score(y_train,preds_train))
print('OA test %0.2f' % clf.score(X_test_feat, y_test))
preds_test = clf.predict(X_test_feat)
print('Kappa test %0.2f' % metrics.cohen_kappa_score(y_test,preds_test))
#Classification of the whole image (FEATURE SELECTED)
Xl_scaled_feat = Xl_scaled[:, np.where(sel.get_support() == True)[0]].copy()
svc_predict_feat = clf.predict(Xl_scaled_feat)
print('OA whole image %0.2f' % clf.score(Xl_scaled_feat, Yl))
print('Kappa whole image %0.2f' % metrics.cohen_kappa_score(Yl,svc_predict_feat))
# -
# The results obtained after feature selection are very similar to the ones we had before it. Taking into account that the model is now using less than a half of the initial variables, these results are quite positive. We get to keep the effectiveness of the model and increase its efficiency reducing the algorithm's computational cost.
#
# Here below it's possible to compare the results over the image
# +
imagen_predicha_feat = Yl_original.copy()
imagen_predicha_feat[imagen_predicha_feat != 0] = svc_predict_feat
imagen_predicha_feat = np.reshape(imagen_predicha_feat, (Y.shape[0], Y.shape[1]), order="F")
fig = plt.figure()
fig.set_size_inches(15,15)
ax=plt.subplot(1,3,1)
ax.imshow(Y), ax.axis('off'), plt.title('Ground Truth')
ax=plt.subplot(1,3,2)
ax.imshow(imagen_predicha), ax.axis('off'), plt.title('SVC')
ax=plt.subplot(1,3,3)
ax.imshow(imagen_predicha_feat), ax.axis('off'), plt.title('SVC after Feature Selection')
# -
# ## 7. Métodos ensemble<a name="id7"></a>
# + Finally, we'll also use ensemble methods to classify the samples of the whole image training only with the subset of approximately 5000 cases and features selected.
#
# The first ensemble method to be used is **Random Forest**:
# +
n_stimators = np.arange(300, 501, 100)
tuned_parameters = {'n_estimators': n_stimators}
clf = GridSearchCV(RandomForestClassifier(), tuned_parameters, cv=5,n_jobs=-1,verbose=0)
clf.fit(X_train_feat, y_train)
clf=clf.best_estimator_ #Keeps the best model
print("Chosen model:\n",clf)
y_pred1 = clf.predict(X_train_feat)
y_pred2 = clf.predict(X_test_feat)
y_pred3 = clf.predict(Xl_scaled_feat)
#print(metrics.classification_report(y_test,y_pred)) #Detailed report with different metrics
print('OA train %0.2f' % clf.score(X_train_feat, y_train))
print('Kappa train %0.2f' % metrics.cohen_kappa_score(y_train,y_pred1))
print('OA test %0.2f' % clf.score(X_test_feat, y_test))
print('Kappa test %0.2f' % metrics.cohen_kappa_score(y_test,y_pred2))
print('OA whole image %0.2f' %clf.score(Xl_scaled_feat, Yl))
print('Kappa whole image %0.2f' % metrics.cohen_kappa_score(Yl,y_pred3))
# -
# There isn't a really big difference between the results obtained using **Random Forest** and the ones we had obtained using **SVC**.
#
# Last but not least, we'll use **Gradient Boosting**
# +
from sklearn.ensemble import GradientBoostingClassifier
n_stimators = np.arange(400, 601, 100)
tuned_parameters = {'n_estimators': n_stimators}
abc = GridSearchCV(GradientBoostingClassifier(), tuned_parameters, cv=5,n_jobs=-1,verbose=0)
abc.fit(X_train_feat, y_train)
abc=abc.best_estimator_ #Keeps the best model
print("Modelo seleccionado:\n",abc)
y_pred1 = abc.predict(X_train_feat)
y_pred2 = abc.predict(X_test_feat)
y_pred3 = abc.predict(Xl_scaled_feat)
#print(metrics.classification_report(y_test,y_pred)) #Detailed report with different metrics
print('OA train %0.2f' % abc.score(X_train_feat, y_train))
print('Kappa train %0.2f' % metrics.cohen_kappa_score(y_train,y_pred1))
print('OA test %0.2f' % abc.score(X_test_feat, y_test))
print('Kappa test %0.2f' % metrics.cohen_kappa_score(y_test,y_pred2))
print('OA whole image %0.2f' %abc.score(Xl_scaled_feat, Yl))
print('Kappa whole image %0.2f' % metrics.cohen_kappa_score(Yl,y_pred3))
imagen_predicha_rand_forest = Yl_original.copy()
imagen_predicha_rand_forest[imagen_predicha_rand_forest != 0] = y_pred3
imagen_predicha_rand_forest = np.reshape(imagen_predicha_rand_forest, (Y.shape[0], Y.shape[1]), order="F")
# -
# Again, **Gradient Boosting** results are quite close to the rest. There's no algorithm that clearly beats the rest with these problem's boundaries.
# +
imagen_gb = Yl_original.copy()
imagen_gb[imagen_gb != 0] = y_pred3
imagen_gb = np.reshape(imagen_gb, (Y.shape[0], Y.shape[1]), order="F")
fig = plt.figure()
fig.set_size_inches(16,16)
ax=plt.subplot(1,4,1)
ax.imshow(Y), ax.axis('off'), plt.title('Ground Truth')
ax=plt.subplot(1,4,2)
ax.imshow(imagen_predicha_feat), ax.axis('off'), plt.title('SVC after FS')
ax=plt.subplot(1,4,3)
ax.imshow(imagen_predicha_rand_forest), ax.axis('off'), plt.title('Random Forest after FS')
ax=plt.subplot(1,4,4)
ax.imshow(imagen_gb), ax.axis('off'), plt.title('Gradient Boosting after FS')
# -
# ## 8. Conclusions<a name="id8"></a>
# In this project it has been possible to study the clustering and classifications of a hyperspectral image with given labelled data.
#
# First, a clustering algorithm has been used (unsupervised learning) to obtain a partition of the image data and it has been compared to the one from the labelled data given. Their difference has been checked and it's been proved that the partition obtained through using **KMeans** is closer to a natural partition than the one from the labelled data thanks to the silhouette coefficient.
#
# After that, shortage of labelled data has been simulated by picking approximately 5000 representative samples from the dataset. Two different algorithms have been used and compared to classify the whole image training only with a training subset from the 5000 samples. Both **k-NN** and **SVC** supervised learning classifier algorithms have been tested being **SVC** the one with better results (88% overall accuracy over the whole image).
#
# This has showed how these models, specially **SVC**, are able to generalize when trained with a good representative sample of the dataset even if it contains only about 7% of the total number of samples.
# This also shows that the criterion used to pick the 5000 samples, using a spaced selection based on the distance from the samples to their class' centroid is effective and useful.
#
# Later, feature selection has been carried out using **Random Forest** algorithm reducing the number of bands (features) used in more than half and thus increasing the model efficiency without compromising the model effectiveness.
#
# Finally, with both instances and features selected, two ensemble methods have been used to perform classification obtaining results similar to the ones from **SVC**. These two ensemblw methods are **Random Forest** and **Gradient Boosting**.
#
# None of the different algorithms used has outperformed importantly the rest, and the summary of the socres obtained can be found below:
#
# | Algorithm | Overall Accuracy | Kappa Coefficient |
# |---------------------------: |------------------ |--------------------------- |
# | k-NN | 0.83 | 0.78 |
# | SVC | 0.88 | 0.85 |
# | SVC after FS | 0.87 | 0.83 |
# | Random Forest after FS | 0.87 | 0.82 |
# | Gradient Boosting after FS | 0.88 | 0.84 |
# ## Extra: Silhouette coefficient plot <a name="id9"></a>
# Here below the silhouette coefficient plots are shown in both the partition obtained using **KMeans** and the one from the labelled data given.
# +
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.cm as cm
range_n_clusters = [9]
for n_clusters in range_n_clusters:
fig, (ax1) = plt.subplots(1, 1)
fig.set_size_inches(10, 7)
# The plot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(Xl) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
cluster_labels = Yl
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(Xl, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", round(silhouette_avg,4))
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(Xl, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-1, -0.8, -0.6, -0.4, -0.2 , 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.suptitle(("Labelled data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
# +
range_n_clusters = [9]
for n_clusters in range_n_clusters:
fig, (ax1) = plt.subplots(1, 1)
fig.set_size_inches(10, 7)
# The plot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(Xl) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
cluster_labels = Y_pred
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(Xl, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", round(silhouette_avg,4))
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(Xl, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-1, -0.8, -0.6, -0.4, -0.2 , 0, 0.2, 0.4, 0.6, 0.8, 1])
plt.suptitle(("KMeans clustering "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
# -
# As commented in point [number 3](#id3), the partition obtained through **KMeans** is closer to a natural partition since there are a lot fewer points with a negative silhouette coefficient. A negative silhouette coefficient means that point is further away from it's cluster centroid than from the next cluster's centroid.
| Satellite_ML_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("data/dataset_train.csv").drop(columns=[
'Index',
'First Name',
'Last Name',
'Birthday',
'Best Hand',
'Hogwarts House'])
df.head()
for column in df.columns:
sns.displot(df[column], kde=True, color='red')
fig, axes = plt.subplots(1, 3, figsize=(16,5))
sns.histplot( df['Care of Magical Creatures'], kde=True, color='darkblue', ax=axes[0])
sns.histplot( df['Potions'], kde=True, color='darkblue', ax=axes[1])
sns.histplot( df['Arithmancy'], kde=True, color='darkblue', ax=axes[2])
fig, axes = plt.subplots(1, 3, figsize=(16,5))
sns.histplot( df['Astronomy'], kde=True, color='y', ax=axes[0])
sns.histplot( df['Defense Against the Dark Arts'], kde=True, color='y', ax=axes[1])
sns.histplot( df['Ancient Runes'], kde=True, color='y', ax=axes[2])
fig, axes = plt.subplots(1, 3, figsize=(16,5))
sns.histplot( df['History of Magic'], kde=True, color='black', ax=axes[0])
sns.histplot( df['Transfiguration'], kde=True, color='black', ax=axes[1])
sns.histplot( df['Divination'], kde=True, color='black', ax=axes[2])
fig, axes = plt.subplots(1, 3, figsize=(16,5))
sns.histplot( df['Flying'], kde=True, color='teal', ax=axes[0])
sns.histplot( df['Muggle Studies'], kde=True, color='teal', ax=axes[1])
sns.histplot( df['Charms'], kde=True, color='teal', ax=axes[2])
| histogram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: StyleGAN
# language: python
# name: stylegan
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Load-Network" data-toc-modified-id="Load-Network-1"><span class="toc-item-num">1 </span>Load Network</a></span></li><li><span><a href="#Project" data-toc-modified-id="Project-2"><span class="toc-item-num">2 </span>Project</a></span></li><li><span><a href="#Encode" data-toc-modified-id="Encode-3"><span class="toc-item-num">3 </span>Encode</a></span></li><li><span><a href="#Generate-Images" data-toc-modified-id="Generate-Images-4"><span class="toc-item-num">4 </span>Generate Images</a></span></li><li><span><a href="#Projected-Latent-Initialization" data-toc-modified-id="Projected-Latent-Initialization-5"><span class="toc-item-num">5 </span>Projected Latent Initialization</a></span></li></ul></div>
# -
# Project/embed real images into StyleGANv2 latent space.
# +
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
from datetime import datetime
from tqdm import tqdm
# ffmpeg installation location, for creating videos
plt.rcParams['animation.ffmpeg_path'] = str(Path.home() / "Documents/dev_tools/ffmpeg-20190623-ffa64a4-win64-static/bin/ffmpeg.exe")
# %load_ext autoreload
# %autoreload 2
# StyleGAN Utils
from stylegan_utils import load_network, gen_image_fun, synth_image_fun, create_video
import dnnlib
import dataset_tool
import run_projector
import projector
import training.dataset
import training.misc
# Specific of encoder repos, comment out if not needed
#from encoder.perceptual_model import PerceptualModel
#from encoder.generator_model import Generator
# Data Science Utils
sys.path.append(os.path.join(os.pardir, 'data-science-learning'))
from ds_utils import generative_utils
# -
res_dir = Path.home() / 'Documents/generated_data/stylegan'
# # Load Network
# +
MODELS_DIR = Path("C:/Users/User/Documents/models/stylegan2")
MODEL_NAME = 'original_ffhq'
SNAPSHOT_NAME = 'stylegan2-ffhq-config-f'
Gs, Gs_kwargs, noise_vars = load_network(str(MODELS_DIR / MODEL_NAME / SNAPSHOT_NAME) + '.pkl')
Z_SIZE = Gs.input_shape[1:][0]
IMG_SIZE = Gs.output_shape[2:]
IMG_SIZE
# -
# # Project
def project_images(images_dir, tfrecord_dir, data_dir, num_steps, num_snapshots):
# setup projector
print('Setting up projector')
proj = projector.Projector(num_steps=num_steps)
proj.set_network(Gs)
# generate tfrecords
nb_images = dataset_tool.create_from_images(str(tfrecord_dir), str(images_dir), True)
# loading images from tfrecords
dataset_obj = training.dataset.load_dataset(data_dir=str(data_dir), tfrecord_dir=tfrecord_dir,
max_label_size=0, verbose=True, repeat=False, shuffle_mb=0)
assert dataset_obj.shape == Gs.output_shape[1:]
# project all loaded images
print('=======================')
for image_idx in tqdm(range(nb_images)):
print(f'Projecting image {image_idx}/{nb_images}')
images, _labels = dataset_obj.get_minibatch_np(1)
images = training.misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
run_path = data_dir / f'out_{image_idx}'
run_path.mkdir()
run_projector.project_image(proj, targets=images,
png_prefix=dnnlib.make_run_dir_path(str(run_path / 'image_')),
num_snapshots=num_snapshots)
# +
data_dir = res_dir / 'projection' / MODEL_NAME / SNAPSHOT_NAME / datetime.now().strftime("%Y%m%d_%H%M%S") # where the projections results will be saved
images_dir = Path.home() / 'Documents/generated_data/face_extract' / 'tmp_portraits'
tfrecord_dir = data_dir / 'tfrecords'
project_images(images_dir=images_dir, tfrecord_dir=tfrecord_dir, data_dir=data_dir,
num_steps=1000, num_snapshots=50)
# -
create_video(data_dir / 'out_7',
res_dir / 'projection' / 'out_{}.mp4'.format('test'))
from config import Config
Config.get_inception_path()
# # Encode
# This does not use the official StyleGAN v2 projector, but instead relies on the direct encoder setup used by the community for v1.
# +
BATCH_SIZE = 1
PERCEPTUAL_MODEL_IMG_SIZE = 256
# setup utils generator and perceptual model
generator = Generator(Gs, BATCH_SIZE, randomize_noise=False)
perceptual_model = PerceptualModel(PERCEPTUAL_MODEL_IMG_SIZE, layer=9, batch_size=BATCH_SIZE)
perceptual_model.build_perceptual_model(generator.generated_image)
# -
def split_to_batches(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def encode_images(images_dir, data_dir, iterations, learning_rate=1.):
# collect images
images_paths = [str(img) for img in images_dir.glob('*')]
GEN_IMAGES_DIR = data_dir / '{}'.format(iterations) / 'gen_images'
GEN_DLATENT_DIR = data_dir / '{}'.format(iterations) / 'latents'
GEN_IMAGES_DIR.mkdir(parents=True, exist_ok=True)
GEN_DLATENT_DIR.mkdir(parents=True, exist_ok=True)
# project all loaded images
count = 0
for images_batch in tqdm(split_to_batches(images_paths, BATCH_SIZE), total=len(images_paths)//BATCH_SIZE):
images_names = [os.path.splitext(os.path.basename(img_path))[0] for img_path in images_batch]
perceptual_model.set_reference_images(images_batch)
optimizer = perceptual_model.optimize(generator.dlatent_variable,
iterations=iterations,
learning_rate=learning_rate)
pbar = tqdm(optimizer, leave=False, mininterval=9, total=iterations)
for loss in pbar:
pass
#pbar.set_description(' '.join(names)+' Loss: %.2f' % loss)
print(' '.join(images_names), ' loss:', loss)
# generate images from found dlatents and save them
generated_images = generator.generate_images()
generated_dlatents = generator.get_dlatents()
for img_array, dlatent, img_name in zip(generated_images, generated_dlatents, images_names):
img = Image.fromarray(img_array, 'RGB')
img.save(str(GEN_IMAGES_DIR / f'{img_name}.png'), 'PNG')
np.save(str(GEN_DLATENT_DIR / f'{img_name}.npy'), dlatent)
generator.reset_dlatents()
count += 1
# # Generate Images
target_latents = np.random.rand(18, Z_SIZE)
img = gen_image_fun(Gs, target_latents, Gs_kwargs, noise_vars, truncation_psi=0.5)
plt.imshow(img)
img = synth_image_fun(Gs, target_latents[np.newaxis,:,:], Gs_kwargs, randomize_noise=True)
plt.imshow(img)
# # Projected Latent Initialization
# Test network used to learn an initial mapping from an image to the intermediate StyleGAN latent
from PIL import Image
from keras.models import load_model
resnet = load_model(MODELS_DIR / MODEL_NAME / 'resnet' / 'finetuned_resnet.h5')
resnet_img_size = (256, 256)
resnet.summary()
target_img = Image.open("")
target_img = target_img.resize(resnet_img_size)
plt.imshow(target_img)
predicted_latent = resnet.predict(np.array(target_img)[np.newaxis,:])
img = synth_image_fun(Gs, predicted_latent, Gs_kwargs, randomize_noise=True)
plt.imshow(img)
| deep learning/StyleGAN/StyleGAN - Projector.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="gZhqxpznOZWS"
import pandas as pd
import numpy as np
import requests
import bs4 as bs
import urllib.request
# + [markdown] id="xhXPbpjhOZWT"
# ## Extracting features of 2020 movies from Wikipedia
# + id="1o9WJASWOZWT"
link = "https://en.wikipedia.org/wiki/List_of_American_films_of_2020"
# + id="TIlfZhgwOZWT"
source = urllib.request.urlopen(link).read()
soup = bs.BeautifulSoup(source,'lxml')
# + id="LFytQ2MaOZWT"
tables = soup.find_all('table',class_='wikitable sortable')
# + colab={"base_uri": "https://localhost:8080/"} id="CdSLuBzGOZWT" outputId="a59c27fc-00d2-4acd-e5c8-3dd746d78baa"
len(tables)
# + colab={"base_uri": "https://localhost:8080/"} id="VoX7c9O-OZWU" outputId="78637bfa-e750-4c01-df0a-c791c80097b3"
type(tables[0])
# + id="1y2cECOMOZWU"
df1 = pd.read_html(str(tables[0]))[0]
df2 = pd.read_html(str(tables[1]))[0]
df3 = pd.read_html(str(tables[2]))[0]
df4 = pd.read_html(str(tables[3]).replace("'1\"\'",'"1"'))[0]
# + id="t4MN1gZbOZWU"
df = df1.append(df2.append(df3.append(df4,ignore_index=True),ignore_index=True),ignore_index=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="4CCunB-rOZWU" outputId="a4d69bcd-088c-4655-a445-e8ebafdb10e6"
df
# + id="9enI6QkuOZWV"
df_2020 = df[['Title','Cast and crew']]
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="RT0jPJQQOZWV" outputId="2d6ab58f-996f-4099-e7bb-3e29abadf43c"
df_2020
# + colab={"base_uri": "https://localhost:8080/"} id="KYTIVryiOkeW" outputId="629f16ee-2aa7-4b7f-8393-09afdefef5d5"
# !pip install tmdbv3api
# + id="61XvcEzPOZWV"
from tmdbv3api import TMDb
import json
import requests
tmdb = TMDb()
tmdb.api_key = ''
# + id="_4q66M86OfgF"
from tmdbv3api import Movie
tmdb_movie = Movie()
def get_genre(x):
genres = []
result = tmdb_movie.search(x)
if not result:
return np.NaN
else:
movie_id = result[0].id
response = requests.get('https://api.themoviedb.org/3/movie/{}?api_key={}'.format(movie_id,tmdb.api_key))
data_json = response.json()
if data_json['genres']:
genre_str = " "
for i in range(0,len(data_json['genres'])):
genres.append(data_json['genres'][i]['name'])
return genre_str.join(genres)
else:
return np.NaN
# + colab={"base_uri": "https://localhost:8080/"} id="uSjX4AsmOZWV" outputId="da21d28b-1f7a-4171-8b87-2eabcc897a8d"
df_2020['genres'] = df_2020['Title'].map(lambda x: get_genre(str(x)))
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="aiwOqJZMOZWV" outputId="032dfad5-985e-4d24-9fed-ed92c9edfba3"
df_2020
# + id="GcqUuFyiOZWW"
def get_director(x):
if " (director)" in x:
return x.split(" (director)")[0]
elif " (directors)" in x:
return x.split(" (directors)")[0]
else:
return x.split(" (director/screenplay)")[0]
# + colab={"base_uri": "https://localhost:8080/"} id="A7RzXkBQOZWW" outputId="161deb48-6037-468b-c4b4-44cd51d0efc1"
df_2020['director_name'] = df_2020['Cast and crew'].map(lambda x: get_director(str(x)))
# + id="wwBWFgBSOZWW"
def get_actor1(x):
return ((x.split("screenplay); ")[-1]).split(", ")[0])
# + colab={"base_uri": "https://localhost:8080/"} id="gxr9Y4IJOZWW" outputId="68904c09-7d50-424d-e908-804489e84d05"
df_2020['actor_1_name'] = df_2020['Cast and crew'].map(lambda x: get_actor1(str(x)))
# + id="Qc7I49gVOZWX"
def get_actor2(x):
if len((x.split("screenplay); ")[-1]).split(", ")) < 2:
return np.NaN
else:
return ((x.split("screenplay); ")[-1]).split(", ")[1])
# + id="ipspWbjaOZWX"
df_2020['actor_2_name'] = df_2020['Cast and crew'].map(lambda x: get_actor2(str(x)))
# + id="pQmab5aiOZWX"
def get_actor3(x):
if len((x.split("screenplay); ")[-1]).split(", ")) < 3:
return np.NaN
else:
return ((x.split("screenplay); ")[-1]).split(", ")[2])
# + id="vWSs45LWOZWX"
df_2020['actor_3_name'] = df_2020['Cast and crew'].map(lambda x: get_actor3(str(x)))
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="2xDIRn7DOZWX" outputId="ad8d83b3-a608-43db-eeff-53a434ab3d4f"
df_2020
# + id="rBUUnUtfOZWX"
df_2020 = df_2020.rename(columns={'Title':'movie_title'})
# + id="_WCWVlAqOZWX"
new_df20 = df_2020.loc[:,['director_name','actor_1_name','actor_2_name','actor_3_name','genres','movie_title']]
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="i7KBDmu_OZWX" outputId="6b4de81e-b1ac-4fb0-d349-52ae7c210346"
new_df20
# + id="0s7XMhqzOZWY"
new_df20['comb'] = new_df20['actor_1_name'] + ' ' + new_df20['actor_2_name'] + ' '+ new_df20['actor_3_name'] + ' '+ new_df20['director_name'] +' ' + new_df20['genres']
# + colab={"base_uri": "https://localhost:8080/"} id="ta_KcPQJOZWY" outputId="ac6aa7b7-c156-49b2-9e95-3a76f971f5b1"
new_df20.isna().sum()
# + id="DkKroG5GOZWY"
new_df20 = new_df20.dropna(how='any')
# + colab={"base_uri": "https://localhost:8080/"} id="YOIv1wvhOZWY" outputId="c3ad579e-16e2-4d8c-d4a0-71891d71b986"
new_df20.isna().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="IB3YmNWROZWY" outputId="8abccee1-4b9e-436b-de18-eb97298d73a9"
new_df20['movie_title'] = new_df20['movie_title'].str.lower()
# + colab={"base_uri": "https://localhost:8080/", "height": 406} id="3e3NnY70OZWZ" outputId="cf4351f9-93f7-40bc-cc24-ab9e45bd8358"
new_df20
# + id="BCf9fH_4OZWZ"
old_df = pd.read_csv('final_data.csv')
# + id="ce2e426yOZWZ" outputId="7d2c0bd7-6907-4398-c385-829b04ad921e"
old_df
# + id="P14B9e3GOZWZ"
final_df = old_df.append(new_df20,ignore_index=True)
# + id="0RAVcY-jOZWZ" outputId="0e87fe5f-5a7a-4792-edf3-a5027e667af0"
final_df
# + id="gCTWwaOiOZWZ"
final_df.to_csv('main_data.csv',index=False)
| preprocessing_4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.0
# language: julia
# name: julia-1.6
# ---
# # Replicating Maliar, Maliar, and Valli (2010, JEDC) to solve Krusell and Smith (1998, JPE) model using Julia
#
#
# By [<NAME>](https://github.com/Shunsuke-Hori)
#
# # Overview of the notebook
# This notebook solves the model of [Krusell and Smith (1998, JPE)](https://www.journals.uchicago.edu/doi/10.1086/250034) and succesfully replicating the result of [Maliar, Maliar, and Valli (2010, JEDC)](https://www.sciencedirect.com/science/article/pii/S0165188909001328).
#
# The solution strategy is as follows
#
# 1. Solve the individual problem by Euler equation method or value function iteration (VFI) with 2D interpolation
# - Agents are boundedly rational. In the code, they take into account the information about the mean of capital
# - Aggregate law of motion is approximated by log-linear relation, i.e. $\log(K_{t+1})=B1+B2\log(K_{t})$ for good aggregate state and $\log(K_{t+1})=B3+B4\log(K_{t})$ for bad aggregate state
# - If specified, Howard's policy iteration is used
# 1. Compute the path of aggregate capital using the policy function obtained by 1. There are two ways of simulation:
# - Monte Carlo following Krusell and Smith (1998). That is, aggregate technology shocks and idiosyncratic employment shocks are drawn for many agents and many periods. Then, using the LLN, the aggregate capital is computed by aggregating all agents for all period.
# - Non-stochastic method following [Young (2010, JEDC)](https://www.sciencedirect.com/science/article/pii/S0165188909001316).
# 1. Update the coefficient of aggregate capital law of motion, $B1$, $B2$, $B3$ and $B4$, by regression
# 1. Check convergence of $B1$, $B2$, $B3$ and $B4$
#
# NOTE: Regarding interpolation, Krusell and Smith uses various interpolation scheme depending on the purpose, including polynomial interpolation. Maliar, Maliar, and Valli uses spline interpolation in their paper. This notebook only uses linear interpolation.
#
# This notebook written in Julia solves the model much faster than the code of Maliar et al. (2010) written in Matlab according to [den Haan (2010, JEDC)](https://www.sciencedirect.com/science/article/pii/S0165188909001298), although they are not simply comparable because initial guess, interpolation methods, and running environments are different.
# # Code to solve models
# First thing to do is importing functions from `KSfunctions.ipynb`.
#
# You can find the notebook in the [QuantEcon repositoy](https://github.com/QuantEcon/krusell_smith_code).
using NBInclude
@nbinclude("KSfunctions.ipynb")
using Plots # to plot the result
# pyplot()
# ## Implementation by Euler method
# First, construct a `NamedTuple`, `ksp`, which contains the parameters of the model and `kss` which has initial guess of the solution.
#
# (Grid size inconsistency is also checked, which may return error when exiting result is loaded by `load_value=true`)
# instance of KSParameter
ksp = KSParameter()
# instance of KSSolution
kss = KSSolution(ksp, load_value=false, load_B=false)
if size(kss.k_opt,1) != length(ksp.k_grid)
error("loaded data is inconsistent with k_size")
end
if size(kss.k_opt,2) != length(ksp.K_grid)
error("loaded data is inconsistent with K_size")
end
# Let's draw the shock for stochastic simulation of aggregate law of motion
# generate shocks
Random.seed!(0) # for reproducability
@time zi_shocks, epsi_shocks =generate_shocks(ksp;
z_shock_size=1100, population=10000);
# Now, the following cell solves the model with Euler equation method
# find ALM coefficient
sm = Stochastic(epsi_shocks)
T_discard = 100
@time K_ts = find_ALM_coef!(EulerMethod(),
sm, ksp, kss, zi_shocks,
tol_ump = 1e-8, max_iter_ump = 10000,
tol_B = 1e-8, max_iter_B = 500, update_B = 0.3,
T_discard = T_discard);
# Let's compare the true aggreate law of motion for capital and approximated one with figure and regression
plot_ALM(ksp.z_grid, zi_shocks, kss.B,K_ts, T_discard = T_discard)
#kss.B # Regression coefficient
println("Approximated aggregate capital law of motion")
println("log(K_{t+1})=$(kss.B[1])+$(kss.B[2])log(K_{t}) in good time (R2 = $(kss.R2[1]))")
println("log(K_{t+1})=$(kss.B[3])+$(kss.B[4])log(K_{t}) in bad time (R2 = $(kss.R2[2]))")
# The approximated law of motion of capital is very close to the true one, which implies that assuming agents are partially rational is not bad idea since the difference of their actions are negligible.
#
# The mean of capital, about 40, is sufficiently close to Maliar et al. (2010).
@save "result_Euler.jld2" ksp kss
# Compute mean of capital implied by regression
mc=MarkovChain(ksp.transmat.Pz)
sd=stationary_distributions(mc)[1]
logKg=kss.B[1]/(1-kss.B[2])
logKb=kss.B[3]/(1-kss.B[4])
meanK_reg=exp(sd[1]*logKg+sd[2]*logKb)
meanK_sim=mean(K_ts[T_discard+1:end])
println("mean of capital implied by regression is $meanK_reg")
println("mean of capital implied by simulation is $meanK_sim")
# ## Figures in Krusell-Smith
# Now, plot the replication figure:
# ### Figure 1
plot_Fig1(ksp ,kss, K_ts)
# ### Figure 2
plot_Fig2(ksp, kss, 40)
# Both figures are replicated well.
# # Solution with Young (2008)'s method
# In this section, aggregate capital is simulated by the method of Young (2008).
kss = KSSolution(ksp, load_value=false, load_B=false)
ns = NonStochastic(ksp, zi_shocks[1])
@time K_ts = find_ALM_coef!(EulerMethod(),
ns, ksp, kss, zi_shocks,
tol_ump = 1e-8, max_iter_ump = 10000,
tol_B = 1e-8, max_iter_B = 500, update_B = 0.3,
T_discard = T_discard);
plot_ALM(ksp.z_grid, zi_shocks, kss.B,K_ts, T_discard = T_discard)
#kss.B # Regression coefficient
println("Approximated aggregate capital law of motion")
println("log(K_{t+1})=$(kss.B[1])+$(kss.B[2])log(K_{t}) in good time (R2 = $(kss.R2[1]))")
println("log(K_{t+1})=$(kss.B[3])+$(kss.B[4])log(K_{t}) in bad time (R2 = $(kss.R2[2]))")
@save "result_Young.jld2" ksp kss
# Compute mean of capital implied by regression
mc=MarkovChain(ksp.transmat.Pz)
sd=stationary_distributions(mc)[1]
logKg=kss.B[1]/(1-kss.B[2])
logKb=kss.B[3]/(1-kss.B[4])
meanK_reg=exp(sd[1]*logKg+sd[2]*logKb)
meanK_sim=mean(K_ts[T_discard+1:end])
println("mean of capital implied by regression is $meanK_reg")
println("mean of capital implied by simulation is $meanK_sim")
# ## Figures in Krusell-Smith
# ### Figure 1
plot_Fig1(ksp ,kss, K_ts)
# ### Figure 2
plot_Fig2(ksp, kss, 40)
# # Solution with value function iteration
# In this section, each agent's utility maximization problem is solved by value function iteration.
# ## Implementation
# Let's skip the following steps in this section to save computational time.
# - consturuction of `ksp` instance since it is same
# - consturuction of `kss` instance to use the previous result as initial guess of the solution
# - draws of the shocks to use same ones
#
# However, instead of constructing `kss` again, obtain value from the policy function derived by Euler method:
iterate_policy!(ksp, kss, n_iter=30)
# Young's non-stochastic method is used for simuation.
kss = KSSolution(ksp, load_value=false, load_B=false)
ns = NonStochastic(ksp, zi_shocks[1])
@time K_ts = find_ALM_coef!(VFI(Howard_on=false, Howard_n_iter=20),
ns, ksp, kss, zi_shocks,
tol_ump = 1e-8, max_iter_ump = 10000,
tol_B = 1e-8, max_iter_B = 500, update_B = 0.3,
T_discard = T_discard);
plot_ALM(ksp.z_grid, zi_shocks, kss.B, K_ts, T_discard = T_discard)
#kss.B # Regression coefficient
println("Approximated aggregate capital law of motion")
println("log(K_{t+1})=$(kss.B[1])+$(kss.B[2])log(K_{t}) in good time (R2 = $(kss.R2[1]))")
println("log(K_{t+1})=$(kss.B[3])+$(kss.B[4])log(K_{t}) in bad time (R2 = $(kss.R2[2]))")
@save "result_VFI.jld2" ksp kss
# Compute mean of capital implied by regression
mc = MarkovChain(ksp.transmat.Pz)
sd = stationary_distributions(mc)[1]
logKg = kss.B[1]/(1-kss.B[2])
logKb = kss.B[3]/(1-kss.B[4])
meanK_reg = exp(sd[1]*logKg + sd[2]*logKb)
meanK_sim = mean(K_ts[T_discard+1:end])
println("mean of capital implied by regression is $meanK_reg")
println("mean of capital implied by simulation is $meanK_sim")
# ## Figures in Krusell-Smith
# ### Figure 1
plot_Fig1(ksp, kss, K_ts)
# ### Figure 2
plot_Fig2(ksp, kss, 40)
| KrusellSmith.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# ### Get the Personalize boto3 Client
# +
import boto3
import json
import numpy as np
import pandas as pd
import time
personalize = boto3.client('personalize')
personalize_runtime = boto3.client('personalize-runtime')
# -
# ### Specify a Bucket and Data Output Location
bucket = "liam-movielens-20m" # replace with the name of your S3 bucket
filename = "movie-lens-20M.csv" # replace with a name that you want to save the dataset under
# ### Download, Prepare, and Upload Training Data
# #### Download and Explore the Dataset
# !wget -N http://files.grouplens.org/datasets/movielens/ml-20m.zip
# !unzip -o ml-20m.zip
data = pd.read_csv('./ml-20m/ratings.csv', sep=',', dtype={'userid': "int64", 'movieid': "int64", 'rating': "float64", 'timestamp': "int64"})
pd.set_option('display.max_rows', 25)
data.rename(columns = {'userId':'USER_ID','movieId':'ITEM_ID','rating':'RATING','timestamp':'TIMESTAMP'}, inplace = True)
data
# #### Prepare and Upload Data
# +
data = data[data['RATING'] > 3.6] # keep only movies rated 3.6 and above
data = data[['USER_ID', 'ITEM_ID', 'TIMESTAMP']] # select columns that match the columns in the schema below
data.to_csv(filename, index=False)
boto3.Session().resource('s3').Bucket(bucket).Object(filename).upload_file(filename)
# -
# ### Create Schema
# +
schema = {
"type": "record",
"name": "Interactions",
"namespace": "com.amazonaws.personalize.schema",
"fields": [
{
"name": "USER_ID",
"type": "string"
},
{
"name": "ITEM_ID",
"type": "string"
},
{
"name": "TIMESTAMP",
"type": "long"
}
],
"version": "1.0"
}
create_schema_response = personalize.create_schema(
name = "DEMO-schema",
schema = json.dumps(schema)
)
schema_arn = create_schema_response['schemaArn']
print(json.dumps(create_schema_response, indent=2))
# -
# ### Create and Wait for Dataset Group
# #### Create Dataset Group
# +
create_dataset_group_response = personalize.create_dataset_group(
name = "DEMO-dataset-group"
)
dataset_group_arn = create_dataset_group_response['datasetGroupArn']
print(json.dumps(create_dataset_group_response, indent=2))
# -
# #### Wait for Dataset Group to Have ACTIVE Status
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_group_response = personalize.describe_dataset_group(
datasetGroupArn = dataset_group_arn
)
status = describe_dataset_group_response["datasetGroup"]["status"]
print("DatasetGroup: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### Create Dataset
# +
dataset_type = "INTERACTIONS"
create_dataset_response = personalize.create_dataset(
name = "DEMO-dataset",
datasetType = dataset_type,
datasetGroupArn = dataset_group_arn,
schemaArn = schema_arn
)
dataset_arn = create_dataset_response['datasetArn']
print(json.dumps(create_dataset_response, indent=2))
# -
# ### Prepare, Create, and Wait for Dataset Import Job
# #### Attach Policy to S3 Bucket
# +
s3 = boto3.client("s3")
policy = {
"Version": "2012-10-17",
"Id": "PersonalizeS3BucketAccessPolicy",
"Statement": [
{
"Sid": "PersonalizeS3BucketAccessPolicy",
"Effect": "Allow",
"Principal": {
"Service": "personalize.amazonaws.com"
},
"Action": [
"s3:GetObject",
"s3:ListBucket"
],
"Resource": [
"arn:aws:s3:::{}".format(bucket),
"arn:aws:s3:::{}/*".format(bucket)
]
}
]
}
s3.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy))
# -
# #### Create Personalize Role
# +
iam = boto3.client("iam")
role_name = "PersonalizeRole"
assume_role_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "personalize.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
create_role_response = iam.create_role(
RoleName = role_name,
AssumeRolePolicyDocument = json.dumps(assume_role_policy_document)
)
# AmazonPersonalizeFullAccess provides access to any S3 bucket with a name that includes "personalize" or "Personalize"
# if you would like to use a bucket with a different name, please consider creating and attaching a new policy
# that provides read access to your bucket or attaching the AmazonS3ReadOnlyAccess policy to the role
policy_arn = "arn:aws:iam::aws:policy/service-role/AmazonPersonalizeFullAccess"
iam.attach_role_policy(
RoleName = role_name,
PolicyArn = policy_arn
)
time.sleep(60) # wait for a minute to allow IAM role policy attachment to propagate
role_arn = create_role_response["Role"]["Arn"]
print(role_arn)
# -
# #### Create Dataset Import Job
# +
create_dataset_import_job_response = personalize.create_dataset_import_job(
jobName = "DEMO-dataset-import-job",
datasetArn = dataset_arn,
dataSource = {
"dataLocation": "s3://{}/{}".format(bucket, filename)
},
roleArn = role_arn
)
dataset_import_job_arn = create_dataset_import_job_response['datasetImportJobArn']
print(json.dumps(create_dataset_import_job_response, indent=2))
# -
# #### Wait for Dataset Import Job to Have ACTIVE Status
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_dataset_import_job_response = personalize.describe_dataset_import_job(
datasetImportJobArn = dataset_import_job_arn
)
status = describe_dataset_import_job_response["datasetImportJob"]['status']
print("DatasetImportJob: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### Select Recipe
list_recipes_response = personalize.list_recipes()
recipe_arn = "arn:aws:personalize:::recipe/aws-hrnn" # aws-hrnn selected for demo purposes
list_recipes_response
# ### Create and Wait for Solution
# #### Create Solution
# +
create_solution_response = personalize.create_solution(
name = "DEMO-solution",
datasetGroupArn = dataset_group_arn,
recipeArn = recipe_arn
)
solution_arn = create_solution_response['solutionArn']
print(json.dumps(create_solution_response, indent=2))
# -
# #### Create Solution Version
# +
create_solution_version_response = personalize.create_solution_version(
solutionArn = solution_arn
)
solution_version_arn = create_solution_version_response['solutionVersionArn']
print(json.dumps(create_solution_version_response, indent=2))
# -
# #### Wait for Solution Version to Have ACTIVE Status
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_solution_version_response = personalize.describe_solution_version(
solutionVersionArn = solution_version_arn
)
status = describe_solution_version_response["solutionVersion"]["status"]
print("SolutionVersion: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# #### Get Metrics of Solution
# +
get_solution_metrics_response = personalize.get_solution_metrics(
solutionVersionArn = solution_version_arn
)
print(json.dumps(get_solution_metrics_response, indent=2))
# -
# ### Create and Wait for Campaign
# #### Create Campaign
# +
create_campaign_response = personalize.create_campaign(
name = "DEMO-campaign",
solutionVersionArn = solution_version_arn,
minProvisionedTPS = 1
)
campaign_arn = create_campaign_response['campaignArn']
print(json.dumps(create_campaign_response, indent=2))
# -
# #### Wait for Campaign to Have ACTIVE Status
max_time = time.time() + 3*60*60 # 3 hours
while time.time() < max_time:
describe_campaign_response = personalize.describe_campaign(
campaignArn = campaign_arn
)
status = describe_campaign_response["campaign"]["status"]
print("Campaign: {}".format(status))
if status == "ACTIVE" or status == "CREATE FAILED":
break
time.sleep(60)
# ### Get Recommendations
# #### Select a User and an Item
# +
items = pd.read_csv('./ml-20m/movies.csv', sep=',', usecols=[0,1], header=0)
items.columns = ['ITEM_ID', 'TITLE']
user_id, item_id, _ = data.sample().values[0]
item_title = items.loc[items['ITEM_ID'] == item_id].values[0][-1]
print("USER: {}".format(user_id))
print("ITEM: {}".format(item_title))
items
# -
# #### Call GetRecommendations
# +
get_recommendations_response = personalize_runtime.get_recommendations(
campaignArn = campaign_arn,
userId = str(user_id),
itemId = str(item_id)
)
item_list = get_recommendations_response['itemList']
title_list = [items.loc[items['ITEM_ID'] == np.int(item['itemId'])].values[0][-1] for item in item_list]
print("Recommendations: {}".format(json.dumps(title_list, indent=2)))
# -
print("Campaign ARN is: " + str(campaign_arn))
print("Dataset Group ARN is: " + str(dataset_group_arn))
print("Solution Version ARN is: " + str(solution_version_arn))
print("Solution ARN is: " + str(solution_arn))
print("Dataset Interactions ARN is: " + str(dataset_arn))
| personalize_sample_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp stage
# hide
_FNAME='stage'
import unittest
from unittest import mock
from nbdev.export import notebook2script
import os
TESTCASE = unittest.TestCase()
_nbpath = os.path.join(_dh[0], _FNAME+'.ipynb')
# +
#export
import sys
import yaml
import atexit
from dvcrecord.params import Params
from dvcrecord.deps import Dependency, make_parser, DO_NOT_INCLUDE_IN_PIPELINE
from dvcrecord.output import Output
from dvcrecord.utils import maybe_yaml, write_yaml, PIPELINE_FILE_DEFAULT
class PipelineStage:
def __init__(self, name, params=None, outputs=None, deps=None, parser=None, pipefile=None):
self.name = name
self.pipefile = pipefile or PIPELINE_FILE_DEFAULT
self.parser = parser or make_parser()
self.outputs = outputs or Output()
self.params = params or Params()
self.deps = deps or Dependency(namespace=self.parse_args(), pipefile=pipefile)
self.rendering_funcs = {
'params': self.params.render,
'deps': self.deps.render,
'outs': self.outputs.render
}
self.atexit_actions()
def atexit_actions(self):
ns = self.parse_args()
if ns is None:
return None
if ns.dvc_dryrun:
atexit.register(self.show_render)
elif ns.dvc_record:
atexit.register(self.write)
def parse_args(self, *args, **kwargs):
if self.parser is None:
return None
else:
known, unknown = self.parser.parse_known_args(*args, **kwargs)
return known
def render_cmd(self, cli_args=None):
cli_args = cli_args or sys.argv[:]
cli_args = ['python'] + cli_args
return ' '.join([arg for arg in cli_args if arg not in DO_NOT_INCLUDE_IN_PIPELINE])
def render(self, as_yaml=False):
dvc_config = {}
self.deps.register_sourcecode()
#self.deps.register_param(self.params)
for key, render_func in self.rendering_funcs.items():
this_yaml = render_func(as_yaml=False)
if this_yaml:
dvc_config[key] = this_yaml
dvc_config['cmd'] = self.render_cmd()
return maybe_yaml(dvc_config, as_yaml=as_yaml)
def show_render(self):
print(self.render(as_yaml=True))
def write(self, pipefile=None):
pipefile = pipefile or self.pipefile
try:
with open(pipefile, 'r') as f:
pipeline = yaml.safe_load(f)
except FileNotFoundError:
pipeline = {'stages': {}}
pipeline['stages'][self.name] = self.render(as_yaml=False)
write_yaml(pipeline, fname=pipefile)
return pipeline
# +
import os
from tempfile import TemporaryDirectory
from dvcrecord.utils import write_yaml
def test_stage():
ps = PipelineStage(name='unittest')
with TemporaryDirectory() as tempdir:
#set up param files
params = {'myval': 1, 'stagename': {'otherval': 2}}
param_file_1 = write_yaml(params, folder=tempdir, fname='params.yaml')
params2 = {'epochs': 1000}
param_file_2 = write_yaml(params2, folder=tempdir, fname='moreparams.yaml')
TESTCASE.assertEqual(ps.params.load(param_file_1+":myval"), 1)
TESTCASE.assertEqual(ps.params.load(param_file_2+":epochs"), 1000)
#use
infile_path = ps.deps.register(write_yaml({"input": "data"}, folder=tempdir, fname='input.data'))
with open(infile_path) as infile:
infile.read()
outfile_path = write_yaml({"output": "data"}, folder=tempdir, fname='output.data')
with open(ps.outputs.register(outfile_path)) as outfile:
outfile.read()
pipefile=os.path.join(tempdir, 'dvc.yaml')
ps.write(pipefile)
test_stage()
# -
notebook2script(_nbpath)
| nbs/stage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Exploratory data analysis of the Hotel Booking Demand dataset
# ================
# - contributors: <NAME>, <NAME>, <NAME>, <NAME>
# - Created on: 2020-11-21
#
# # Summary of the data set
#
# The data set used in this project comes from the Hotel Booking demand datasets from [<NAME>, 2019](https://www.sciencedirect.com/science/article/pii/S2352340918315191#ack0005) and the data can be found from the GitHub Repository [here](https://github.com/rfordatascience/tidytuesday/tree/master/data/2020/2020-02-11). The dataset contains real world data obtained from two hotels: one resort hotel and one city hotel. Each row represents an individual hotel booking due to arrive between July 1st, 2015 and August 31st, 2017. There are 119390 observations in the data set, and 31 features. The following table shows the counts of observations for each hotel.
#
# | Resort Hotel | City Hotel |
# | -----------: | ---------: |
# | 40060 | 79330 |
#
# Table 1: Counts of observation for each hotel.
# # Import Packages and Load Data
# +
# common packages
import numpy as np
import pandas as pd
# ML packages
from sklearn.model_selection import train_test_split
# Visualization packages
import altair as alt
from altair_saver import save
# Save a vega-lite spec and a PNG blob for each plot in the notebook
alt.renderers.enable('mimetype')
# Handle large data sets without embedding them in the notebook
alt.data_transformers.enable('data_server')
# set seed
seed = 2020
# Load data:
hotels_df = pd.read_csv("../data/raw/hotels_dataset.csv")
# +
# Split data:
# 80% of observations are in the training and 20% of observations are in the test set
train_df, test_df = train_test_split(hotels_df, test_size=0.2, random_state=seed)
# Split the features and targets:
X_train = train_df.drop(["is_canceled"], axis=1)
y_train = train_df["is_canceled"]
X_test = test_df.drop(["is_canceled"], axis=1)
y_test = test_df["is_canceled"]
# Seperate Resort and City Hotel:
resort_train = X_train.loc[(X_train["hotel"] == "Resort Hotel")].copy()
city_train = X_train.loc[(X_train["hotel"] == "City Hotel")].copy()
# -
# ## Splitting the data set into training and test data sets
#
# - 80% of observations are in the training and 20% of observations are in the test set
#
# | Data partition | Is Canceled | Is Not Canceled |
# | :------------- | -----------: | ---------: |
# | Training | 35407 | 60105 |
# | Test | 8817 | 15061 |
#
# Table 2: Counts of observation for each hotel for each data partition
#
# There is a class imbalance, We would like to predict the cancellation as accurately as possible, so that the hotel does not get a unwanted surprise. Hence, we would like to maximize recall.However, in the process of maximizing recall we might over predict cancellation. That will be an adverse scenario as the management may get into panic and start introducing promotions which might take a toll of hotel revenue. Hence, we would like to keep the precision high as well. As we are interested in keeping both precision and recall high, **f1-score** will be a good evaluation metric here.
# # Viewing the Train data set
train_df.head()
train_df.info()
# Check Null values
null_df = train_df.isna().sum().reset_index(name="count_of_nulls").query("count_of_nulls != 0")
null_df["perc"] = np.round(null_df["count_of_nulls"] / train_df.shape[0] * 100, 2)
null_df
# ### Train data set observations
#
# Our train data set has a number of numeric and categorical features to be explored as well as many that may not be useful for prediction. There are also a few features that have a large number of missing values. The most of which is the feature "company" which can likely be omitted from the analysis as ~94% of the observations have null values. Next steps will require delving more into the features to see if they serve any purpose for training or if they should be omited.
# # Exploratory Analysis Visualizations
# ## Feature Distributions
# +
numeric_features = [
"lead_time",
"stays_in_weekend_nights",
"stays_in_week_nights",
"adults",
"children",
"babies",
"previous_cancellations",
"previous_bookings_not_canceled",
"booking_changes",
"days_in_waiting_list",
"adr",
"required_car_parking_spaces",
"total_of_special_requests"
]
train_df = train_df.copy()
train_df["is_canceled_cat"] = train_df["is_canceled"].apply(lambda x: "Canceled" if x == 1 else "Not Canceled")
train_df['stays_in_weekend_nights'].value_counts()
train_df_num = train_df.loc[train_df['stays_in_weekend_nights']<=10]
train_df_num = train_df_num.loc[train_df_num['stays_in_week_nights']<=15]
train_df_num = train_df_num.loc[train_df_num['lead_time']<=600]
train_df_num = train_df_num.loc[train_df_num['adults']<=4]
train_df_num = train_df_num.loc[train_df_num['children']<=3]
train_df_num = train_df_num.loc[train_df_num['babies']<=2]
train_df_num = train_df_num.loc[train_df_num['previous_cancellations']<=15]
train_df_num = train_df_num.loc[train_df_num['previous_bookings_not_canceled']<=15]
train_df_num = train_df_num.loc[train_df_num['booking_changes']<=10]
train_df_num = train_df_num.loc[train_df_num['days_in_waiting_list']<=20]
train_df_num = train_df_num.loc[train_df_num['adr']<=1000]
numeric_vs_target = (alt.Chart(train_df_num)
.mark_line(interpolate='monotone').encode(
alt.X(alt.repeat(), type='quantitative'),
alt.Y('count()', title = ""),
alt.Color('is_canceled_cat', title = ""))).properties(width=150, height=150).repeat(numeric_features,columns = 4)
numeric_vs_target
# -
# ### Distribution Observations:
#
# For our numeric feature distributions we find many of the numeric features are right skewed as they are dominated by `0` values. This may mean many of these numeric features may not be good predictors of the target and score low coefficient weights. A few numeric features that look promising for prediciton are `total_of_special_requests`, `required_car_parking_spaces`, `stay_in_week_nights` and `stay_in_weekend_nights` as they have wider distributions.
# +
# categorical features against target graph
categorical_features = [
"hotel",
"meal",
"market_segment",
"distribution_channel",
"reserved_room_type",
"deposit_type",
"customer_type",
"is_repeated_guest",
]
def make_cat_graph(var):
test = train_df.groupby(var).agg({'is_canceled':'mean'})
test.reset_index(inplace = True)
test['is_canceled'] = round(test['is_canceled'], 2)
test[var] = test[var].astype(str)
graph = alt.Chart(test).mark_rect().encode(
alt.X(var),
alt.Color("is_canceled", title="Cancel Rate"),
).properties(width=300, height=200)
return graph+alt.Chart(test).mark_text(color = 'black').encode(alt.X(var), text = 'is_canceled')
hotel = make_cat_graph('hotel')
meal = make_cat_graph('meal')
market_segment = make_cat_graph('market_segment')
distribution_channel = make_cat_graph('distribution_channel')
reserved_room_type = make_cat_graph('reserved_room_type')
deposit_type = make_cat_graph('deposit_type')
customer_type = make_cat_graph('customer_type')
is_repeated_guest = make_cat_graph('is_repeated_guest')
cat_vs_target = (hotel|meal|market_segment|distribution_channel)&(reserved_room_type|deposit_type|customer_type|is_repeated_guest)
cat_vs_target
# -
# ## Feature Correlations
# correlation chart all variable
corr_df = train_df.corr().stack().reset_index(name="corr")
corr_df["round_corr"] = np.round(corr_df["corr"], 2)
corr_plot = (
alt.Chart(
corr_df.query("level_0 != 'is_canceled' & level_1 != 'is_canceled'"),
title="Feature Correlation",
)
.mark_rect()
.encode(
x="level_0",
y="level_1",
tooltip="corr",
color=alt.Color(
"corr", scale=alt.Scale(domain=(-1, 1), scheme="purpleorange")
),
)
.properties(width=500, height=500)
)
corr_text = (
alt.Chart(corr_df.query("level_0 != 'is_canceled' & level_1 != 'is_canceled'"))
.mark_text(size=8)
.encode(
x=alt.X("level_0", title="Features"),
y=alt.Y("level_1", title="Features"),
text="round_corr",
)
.properties(width=500, height=500)
)
corr_all = corr_plot + corr_text
corr_all
# + jupyter={"source_hidden": true}
# correlation against target chart
corr_plot = (
alt.Chart(
corr_df[corr_df.level_1 == "is_canceled"], title="Feature Correlation"
)
.mark_rect()
.encode(
x=alt.X("level_0", title="Features"),
y=alt.Y("level_1", title="Target"),
tooltip="corr",
color=alt.Color(
"corr", scale=alt.Scale(domain=(-1, 1), scheme="purpleorange")
),
)
.properties(width=600)
)
corr_text = (
alt.Chart(corr_df[corr_df.level_1 == "is_canceled"])
.mark_text(size=8)
.encode(
x=alt.X("level_0", title="Features"),
y=alt.Y("level_1", title="Target"),
text="round_corr",
)
.properties(width=600)
)
corr_target = corr_plot + corr_text
corr_target
# -
# ### Correlation Observations:
#
# There is a moderate correlation between `arrival_date_week_number` and `arrival_date_year` as well as `stay_in_week_nights` and `stay_in_weekend_nights`. These may be expected values however, we need to explore the relations further in regards to training our model. There is also some correlation between `lead_time` and `total_of_special_requests` with the target. Further analysis will reveal if these are useful features for predicting the target.
# + jupyter={"source_hidden": true}
null_df = (
train_df.isna()
.sum()
.reset_index(name="missing_count")
.query("missing_count != 0")
)
null_df["missing_percentage"] = np.round(
null_df["missing_count"] / train_df.shape[0] * 100, 2
)
null_df = null_df.rename({"index": "feature"}, axis=1)
null_df
# -
# ## Feature Examination
# + jupyter={"source_hidden": true}
# feature examination charts
top_20_countries = (
X_train.groupby("country")
.size()
.reset_index(name="counts")
.sort_values(by="counts", ascending=False)[:20]
)
countries = (
alt.Chart(top_20_countries, title="Top 20 home country of guests")
.mark_bar()
.encode(
alt.X("counts", title="Guests numbers"),
alt.Y("country", sort="-x", title="Country"),
alt.Tooltip("country"),
)
)
X_train["adr_ac"] = X_train["adr"] / (X_train["adults"] + X_train["children"])
room_price = X_train[["hotel", "reserved_room_type", "adr_ac"]].sort_values(
"reserved_room_type"
)
room_price = (
alt.Chart(room_price)
.mark_boxplot(extent="min-max", clip=True)
.encode(
alt.X("adr_ac", title="Price [EUR]", scale=alt.Scale(domain=(0, 120))),
alt.Y("hotel", title="Hotel"),
color="hotel",
)
.facet(
"reserved_room_type",
columns=2,
title="Price per night and person for different room types",
)
)
resort_train["total_nights"] = (
resort_train["stays_in_weekend_nights"] + resort_train["stays_in_week_nights"]
)
city_train["total_nights"] = (
city_train["stays_in_weekend_nights"] + city_train["stays_in_week_nights"]
)
num_nights_resort = list(resort_train["total_nights"].value_counts().index)
num_bookings_resort = list(resort_train["total_nights"].value_counts())
rel_bookings_resort = (
resort_train["total_nights"].value_counts() / sum(num_bookings_resort) * 100
) # convert to percent
num_nights_city = list(city_train["total_nights"].value_counts().index)
num_bookings_city = list(city_train["total_nights"].value_counts())
rel_bookings_city = (
city_train["total_nights"].value_counts() / sum(num_bookings_city) * 100
) # convert to percent
resort_nights = pd.DataFrame(
{
"hotel": "Resort hotel",
"num_nights": num_nights_resort,
"rel_num_bookings": rel_bookings_resort,
}
)
city_nights = pd.DataFrame(
{
"hotel": "City hotel",
"num_nights": num_nights_city,
"rel_num_bookings": rel_bookings_city,
}
)
nights_data = pd.concat([resort_nights, city_nights], ignore_index=True)
nights_data
stay = (
alt.Chart(nights_data)
.mark_bar()
.encode(
alt.X("num_nights", title="Number of nights"),
alt.Y("rel_num_bookings", title="Percent of guests"),
color=alt.Color("hotel", legend=None),
)
.facet("hotel", title="Length of guests stay")
)
feature_exam = (countries.properties(height=300, width=200) | stay) & room_price
feature_exam
# -
# ### Examination Observations
#
# Looking in depth at a few features we find some interesting results. First, we notice most of our observations come from European countries, specifically most are from Portugal. It may be that model may perform better predicting on guests from non European countries. Or perhaps, the model will perform worse due to limitations from the data set coming from 2 hotels, most of the observations come because most of the guests are mainly European.
#
# Second, looking at the number of nights stayed we find a difference between the hotels. For the city hotel, guests tend to stay for 1-4 nights. While for the resort hotel, the distribution is similar, but more guests stay up to 7 nights. This could be because there is a tendency for resort hotel guests to stay longer.
#
# We can hypothesize that room price could be a good predictor for a cancellation. We find a difference in room prices between the resort and city hotels in the different room types. Perhaps the roomy type prices have a role in prediction. The limitations of this feature are that there is no currency information for price in the dataset, but we know most of guests are from European countries so it may be safe to assume that all prices all in EUR. Additionally, due to guest anonymity reasons, the rooms types are only given as letters, so we would not be able to tell which specific room types are good predictors.
# ### Price per night varies over the year
# + jupyter={"source_hidden": true}
# price versus month graph
prices_monthly = X_train[["hotel", "arrival_date_month", "adr_ac"]].sort_values(
"arrival_date_month"
)
# order by month:
months_ordered = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
prices_monthly["arrival_date_month"] = pd.Categorical(
prices_monthly["arrival_date_month"], categories=months_ordered, ordered=True
)
prices_monthly = prices_monthly.sort_values("arrival_date_month")
prices_points = (
alt.Chart(prices_monthly, title="Room price per night over the year")
.mark_point()
.encode(
alt.X("arrival_date_month", title="Month", sort=months_ordered),
alt.Y("adr_ac", title="Price [EUR]"),
alt.Color("hotel"),
)
.properties(width=500, height=400)
)
price_vs_month = prices_points.encode(y="mean(adr_ac)").mark_line()
price_vs_month
# -
# ##### Plot Summary
#
# - During the summer time, the prices in Resort Hotel are much higher than City Hotel
# - The prices of Resort Hotel varies a lot, and is most expensive during summer time
# - The prices of City Hotel varies less, and is most expensive during spring.
# ### Most busy month
# + jupyter={"source_hidden": true}
# guest versus month graph
rguests_monthly = resort_train.groupby("arrival_date_month")["hotel"].count()
cguests_monthly = city_train.groupby("arrival_date_month")["hotel"].count()
rguest_data = pd.DataFrame(
{
"month": list(rguests_monthly.index),
"hotel": "Resort hotel",
"guests": list(rguests_monthly.values),
}
)
cguest_data = pd.DataFrame(
{
"month": list(cguests_monthly.index),
"hotel": "City hotel",
"guests": list(cguests_monthly.values),
}
)
guest_data = pd.concat([rguest_data, cguest_data], ignore_index=True)
guest_data["month"] = pd.Categorical(
guest_data["month"], categories=months_ordered, ordered=True
)
guest_data = guest_data.sort_values("month")
# Dataset contains July and August date from 3 years, the other month from 2 years. Normalize data:
guest_data.loc[
(guest_data["month"] == "July") | (guest_data["month"] == "August"), "guests"
] /= 3
guest_data.loc[
~((guest_data["month"] == "July") | (guest_data["month"] == "August")), "guests"
] /= 2
guests_points = (
alt.Chart(guest_data, title="Number of guests over the year")
.mark_point()
.encode(
alt.X("month", title="Month", sort=months_ordered),
alt.Y("guests", title="Number of guests"),
alt.Color("hotel"),
)
.properties(width=500, height=400)
)
guest_vs_month = guests_points.mark_line()
guest_vs_month
# -
# ##### Plot Summary
#
# - During the winter time, the guests in both hotels are less.
# - City Hotel has more guests during the spring and autumn, in the mean time, the prices also in the higer level. In summer time, the guests are less and prices is also lower.
# - Resort Hotel's guests number varies less, when the prices reach highest in summer, the guests are less
# ### Repeated guests with previous booking
# + jupyter={"source_hidden": true}
# guest repeat booking with cancel history graph
guests_prev_cancel = X_train[
["is_repeated_guest", "previous_bookings_not_canceled"]
]
rep_guests_prev_cancel = (
alt.Chart(
guests_prev_cancel, title="Guests repeat booking with cancellation history"
)
.mark_bar()
.encode(
alt.X(
"sum(previous_bookings_not_canceled)",
title="Total number of previous bookings not cancelled",
),
alt.Y("is_repeated_guest:O", title="Repeated guests"),
)
)
rep_guests_prev_cancel
# -
# ##### Plot Summary
# - For "Repeated guests", 1 means this guest is a repeated guest, and 0 means the opposite
# - When there are more previous bookings are not canceled, the guest tends to be a repeated guest, and vice versa.
# # References
#
# <div id="refs" class="references">
#
# <div id="ref-Hotel2019">
#
# <NAME>, <NAME>, and <NAME>. 2019. "Hotel booking demand datasets." Data in brief 22: 41-49. <https://doi.org/10.1016/j.dib.2018.11.126>
#
# </div>
#
# </div>
| doc/hotels_data_preliminary_analysis_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # RJ's Branch
# The following code solves the equations of motion with air drag. With drag force $-\alpha |\dot{r}|\dot{r}$ and gravitational force $-mg\hat{y}$, these equations of motion are:
#
# $\ddot{x}=-\beta \dot{x}\sqrt{{\dot{x}^2}+{\dot{y}^2}}$
#
# $\ddot{y}=-g-\beta \dot{y}\sqrt{{\dot{x}^2}+{\dot{y}^2}}$
#
# where $\beta = \frac{\alpha}{m}$, $\alpha =\frac{\rho C_{d}A}{2}$, $\rho$ is the air desnisty, $C_{d}$ is the drag coefficient, and $A$ is the cross-sectional area of the projectile.
import matplotlib.pyplot as plt
import numpy as np
import math
import pylab
def solve(f,y0,interval,steps):
""" Solve ODE by Euler method, with fixed number
of steps.
In contrast to the examples of Newman Chapter 8, which build up a
list, point by point,
f: function giving ODE as y'=f(x,y)
y0: initial value
interval: tuple region (a,b) on which to solve ODE
steps: number of steps
Returns (x,y) points, as (steps+1)x2 numpy array.
"""
#Calculates the size of a single step
a = interval[0]
b = interval[1]
h = (b - a) / steps
x = y0
#Creates the arrays that will store the data
tpoints = np.arange(a, b, h)
xpoints = []
#Loops for every point in tpoints
for t in tpoints:
#Euler's method
xpoints.append(x)
x += h * f(x, t)
#Returns both arrays
return (tpoints, xpoints)
def projectile(dt, v_init, theta, x, y, drag=False, A_m=0, elevation=False, p0=101325, y0=1e4):
"""
Parameters:
dt - time step in s
v_init - initial velocity in m/s
theta - launch angle in radians
x - array of x values
y - array of y values
drag - boolean determining if drag force is to be calculated (optional)
A_m - coefficient used to calculate the drag force (optional)
elevation - boolean determining if elevation is to be considered (optional)
p0 - initial air density at sea level in Pa (optional)
y0 - initial altitude in m (optional)
Calculates the projectile motion of an object with the given parameters with or
without air drag.
Returned:
The x and y arrays containing the new values.
"""
#Initializes the velocity at a specific launch angle
vx = v_init * math.cos(theta)
vy = v_init * math.sin(theta)
nmax = len(x)
#Loops for every element in x
for i in range(1, nmax):
#Uses Euler method to get the solution
x[i] = x[i-1] + vx * dt
y[i] = y[i-1] + vy * dt
#Without air drag, only force of gravity in y direction
if drag == False:
vy = vy - 9.8 * dt
#With air drag, need drag force acting on both x and y
else:
fd = A_m * math.sqrt(vx ** 2 + vy ** 2)
if elevation == False:
vy = vy - 9.8 * dt - fd * vy * dt
vx = vx - fd *vx * dt
else:
p = p0 * (np.e **(-y[i] / y0))
f = (p / p0) * fd
vy = vy - 9.8 * dt - f * vy * dt
vx = vx - f *vx * dt
#Returns the values to be plotted
return x, y
# +
dt = 0.25
v_init = 700
theta1 = (55 / 180) * math.pi
theta2 = (50 / 180) * math.pi
theta3 = (45 / 180) * math.pi
theta4 = (40 / 180) * math.pi
theta5 = (35 / 180) * math.pi
x1 = np.zeros(5000)
y1 = np.zeros(5000)
x2 = np.zeros(5000)
y2 = np.zeros(5000)
x3 = np.zeros(5000)
y3 = np.zeros(5000)
x4 = np.zeros(5000)
y4 = np.zeros(5000)
x5 = np.zeros(5000)
y5 = np.zeros(5000)
x1, y1 = projectile(dt, v_init, theta1, x1, y1)
x2, y2 = projectile(dt, v_init, theta2, x2, y2)
x3, y3 = projectile(dt, v_init, theta3, x3, y3)
x4, y4 = projectile(dt, v_init, theta4, x4, y4)
x5, y5 = projectile(dt, v_init, theta5, x5, y5)
#Creates the plot
ax = plt.subplot(111)
#Plots the data and sets the axes
ax.plot(x1, y1, "b", label="Theta = 55 degrees")
ax.plot(x2, y2, "g", label="Theta = 50 degrees")
ax.plot(x3, y3, "r", label="Theta = 45 degrees")
ax.plot(x4, y4, "y", label="Theta = 40 degrees")
ax.plot(x5, y5, "k", label="Theta = 35 degrees")
ax.set_title("Projectile Motion without Air Resistance")
ax.set_xlabel("X Position (m)")
ax.set_ylabel("Y Position (m)")
ax.set_ylim(0, 18000)
ax.set_xlim(0, 55000)
ax.legend(frameon=False, loc='upper right', prop={'size': 8})
#Shows the plot
plt.show()
# +
dt = 0.25
v_init = 700
theta1 = (55 / 180) * math.pi
theta2 = (50 / 180) * math.pi
theta3 = (45 / 180) * math.pi
theta4 = (40 / 180) * math.pi
theta5 = (35 / 180) * math.pi
A_m = 4e-5
x1 = np.zeros(5000)
y1 = np.zeros(5000)
x2 = np.zeros(5000)
y2 = np.zeros(5000)
x3 = np.zeros(5000)
y3 = np.zeros(5000)
x4 = np.zeros(5000)
y4 = np.zeros(5000)
x5 = np.zeros(5000)
y5 = np.zeros(5000)
x1, y1 = projectile(dt, v_init, theta1, x1, y1, True, A_m)
x2, y2 = projectile(dt, v_init, theta2, x2, y2, True, A_m)
x3, y3 = projectile(dt, v_init, theta3, x3, y3, True, A_m)
x4, y4 = projectile(dt, v_init, theta4, x4, y4, True, A_m)
x5, y5 = projectile(dt, v_init, theta5, x5, y5, True, A_m)
#Creates the plot
ax = plt.subplot(111)
#Plots the data and sets the axes
ax.plot(x1, y1, "b", label="Theta = 55 degrees")
ax.plot(x2, y2, "g", label="Theta = 50 degrees")
ax.plot(x3, y3, "r", label="Theta = 45 degrees")
ax.plot(x4, y4, "y", label="Theta = 40 degrees")
ax.plot(x5, y5, "k", label="Theta = 35 degrees")
ax.set_title("Projectile Motion with Air Resistance")
ax.set_xlabel("X Position (m)")
ax.set_ylabel("Y Position (m)")
ax.set_ylim(0, 10500)
ax.set_xlim(0, 22500)
ax.legend(frameon=False, loc='lower center', prop={'size': 8})
#Shows the plot
plt.show()
# -
# The above plot shows the projectile motion of a ball with air resistance after being thrown at different launch angles. This plot shows the launch angles of 35, 40, 45, 50, and 55 degrees. As we can see, the larger the angle, the further the ball will travel in the y-direction and the smaller the angle, the further the ball will travel in the x-direction.
# +
x0 = 0
y0 = 0
vx0 = 100
vy0 = 100
g = 9.8
def x(x, t):
return vx0
def vx(v, t):
return 0
def vy(v, t):
return -g
x1, y1 = solve(x, x0, (0,1000), 1000)
x2, y2 = solve(vx, vx0, (0,1000), 1000)
x4, y4 = solve(vy, vy0, (0,1000), 1000)
def y(y, t):
return y4[int(t)]
x3, y3 = solve(y, y0, (0,1000), 1000)
#yxa = []
#yvxa = []
#yya = []
#yvya = []
#for i in t:
# yxa.append()
#Creates the plot
fig,ax = plt.subplots(2,2,figsize=(15,10))
#Plots the data and sets the axes
ax[0,0].plot(x1, y1, "b", label="Euler's Method Solutin")
#ax[0].plot(t, yxa, "g", label="Exact Result")
ax[0,0].set_title("Solution for dx/dt")
ax[0,0].set_xlabel("Time (s)")
ax[0,0].set_ylabel("X Position (m)")
ax[0,0].legend()
ax[1,0].plot(x2, y2, "b", label="Euler's Method Solutin")
#ax[1].plot(t, yvxa, "g", label="Exact Result")
ax[1,0].set_title("Solution for dvx/dt")
ax[1,0].set_xlabel("Time (s)")
ax[1,0].set_ylabel("X Velocity (m/s)")
ax[1,0].legend()
ax[0,1].plot(x3, y3, "b", label="Euler's Method Solutin")
#ax[2].plot(t, yya, "g", label="Exact Result")
ax[0,1].set_title("Solution for dy/dt")
ax[0,1].set_xlabel("Time (s)")
ax[0,1].set_ylabel("Y Position (m)")
ax[0,1].legend()
ax[1,1].plot(x4, y4, "b", label="Euler's Method Solutin")
#ax[3].plot(t, yvya, "g", label="Exact Result")
ax[1,1].set_title("Solution for dvy/dt")
ax[1,1].set_xlabel("Time (s)")
ax[1,1].set_ylabel("Y Velocity (m/s)")
ax[1,1].legend()
#Shows the plot
plt.show()
#ax.set_ylim(0, 10500)
#ax.set_xlim(0, 22500)
# -
# The above plots shows the solution to the four differential equations for projectile motion. The exact solutions have not yet been added to the plots, but we plan to add it soon.
# +
dt = 0.25
v_init = 700
theta1 = (45 / 180) * math.pi
theta2 = (35 / 180) * math.pi
A_m = 4e-5
x1 = np.zeros(5000)
y1 = np.zeros(5000)
x2 = np.zeros(5000)
y2 = np.zeros(5000)
x3 = np.zeros(5000)
y3 = np.zeros(5000)
x4 = np.zeros(5000)
y4 = np.zeros(5000)
x1, y1 = projectile(dt, v_init, theta1, x1, y1, True, A_m)
x2, y2 = projectile(dt, v_init, theta1, x2, y2, True, A_m, True)
x3, y3 = projectile(dt, v_init, theta2, x3, y3, True, A_m)
x4, y4 = projectile(dt, v_init, theta2, x4, y4, True, A_m, True)
#Creates the plot
ax = plt.subplot(111)
#Plots the data and sets the axes
ax.plot(x1, y1, "b--", label="Theta = 45 degrees")
ax.plot(x2, y2, "b", label="Theta = 45 degrees with elevation")
ax.plot(x3, y3, "r--", label="Theta = 35 degrees")
ax.plot(x4, y4, "r", label="Theta = 35 degrees with elevation")
ax.set_title("Projectile Motion with Air Resistance")
ax.set_xlabel("X Position (m)")
ax.set_ylabel("Y Position (m)")
ax.set_ylim(0, 10000)
ax.set_xlim(0, 30000)
ax.legend(frameon=False, loc='lower center', prop={'size': 8})
#Shows the plot
plt.show()
# -
# The above plot shows the projectile motion of an object with and without the effect of lower air density at higher altitudes. This plot shows an object with launch angles 35 and 45 degrees. As we can see, when the elevation is taken into account (i.e. the lower air densities), the object travels further in both directions.
| .ipynb_checkpoints/rj_code-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3. Using H1st.AI to Encode Human Insights as a Model and Harmonize Human + ML in a H1st.Graph
# ### 3a. Use case analysis: turning on safe-mode vs post-moterm analysis
# The H1ST.AI approach to this problem begins by thinking about the end-users of the decision system, and their uses cases.
#
# What are the use cases for such Automotive Cybersecurity system? We can envision two distinctive use cases:
# 1. The onboard intrusion detection system can detect an attack event in realtime and set the car into a safe mode so that drivers can safely get to a safe location and not be stuck in the highway with malfunctioning cars.
# 2. An security expert could review the attack in post-mortem mode, in which the IDS provides message-by-message attack vs normal classification.
#
# For use case #1 "safe mode triggering by attack event detection", the ML requirement is that it has near-zero FPR.
#
# To give an example, each second might contain 100 of CAN messages per car. If we have a fleet with just 1000 cars, each driven 1h per day, then a FPR of 0.00001 at message-level still means that each day we have 0.00001 x 100msg x 3600s x 1000cars = 3600 false positive events per day!
#
# Additionally, for deployment & anticipated regulatory purpose, the system should behave robustly and explainably. While explainability is a complex subject, we meant that one could anticipate the system’s behavior reasonably well, as well as for legal/regulation purposes. As we saw with iForest or GBM ML models, they don’t quite meet this requirement, as it is hard to explain precisely how these models classify attacks, even if they can achieve good accuracy.
#
# For use case #2 "post-morterm analysis", it turns out that the requirement is very different. Some FPR could be traded off for higher TPR for post-mortem. And the system might not need to highly explainable as it is after all the jobs of the security experts to analyze the attacks in depth and make the final decisions.
# ### 3b. Problem (re)formulation into H1st.AI Graph
# We reformulate the problem into the form of a decision graph, where the outermost flow detects attack events and corresponding yes branches handles message classification. For this tutorial we focus on injection attacks which are most common in the wild (we will revisit this later).
#
# The graph looks like this.
#
# <img src="http://docs.arimo.com/H1ST_AI_Tutorial/img/graph2.png" alt="automotive cybersecurity solution graph"/>
# ### 3c. Encoding human insights for event detection as a H1st.Model
# Remember when we start analyzing the CAN dataset, we have remarked that the normal data is highly regular, especially in terms of the message frequency for each CAN ID.
#
# It turns out that using message frequency statistics for injection event detection is highly accurate for safe-mode use cases (high TPR, low FNR). This surprising fact was first pointed out by the original CAN bus hackers <NAME> and <NAME> in the seminal white paper [Adventures in Automotive Networks and Control Units](https://ioactive.com/pdfs/IOActive_Adventures_in_Automotive_Networks_and_Control_Units.pdf).
#
# > It is pretty straightforward to detect the attacks discussed in this paper. They always involve either sending new, unusual CAN packets or flooding the CAN bus with common packets... Additionally, the frequency of normal CAN packets is very predictable... Therefore we propose that a system can detect CAN anomalies based on the known frequency of certain traffic and can alert a system or user if frequency levels vary drastically from what is well known.
#
# Using H1ST, we can encode insights of such “human” models and use them just like ML models. An h1.Model is essentially anything that can predict. H1ST provides tools to help automate their saving and loading, too, easing the way for using them in an integrated decision system.
#
# In a H1ST project structure, we typically organize this under `models` directory, e.g. the content of `models/msg_freq_event_detector.py` looks like this. The details of training is quite simple: looping through a number of files to compute window statistics such as how many msg per CAN ID are found & what’s the min & max and percentile values.
# +
import h1st as h1
SENSORS = ["SteeringAngle", "CarSpeed", "YawRate", "Gx", "Gy"]
class MsgFreqEventDetectorModel(h1.Model):
def load_data(self, num_files=None):
return util.load_data(num_files)
def train(self, prepared_data):
files = prepared_data["train_normal_files"]
from collections import defaultdict
def count_messages(f):
df = pd.read_csv(f)
df.columns = ['Timestamp', 'Label', 'CarSpeed', 'SteeringAngle', 'YawRate', 'Gx', 'Gy']
counts = defaultdict(list)
for window_start in util.gen_windows(df, window_size=config.WINDOW_SIZE, step_size=config.WINDOW_SIZE):
w_df = df[(df.Timestamp >= window_start) & (df.Timestamp < window_start + config.WINDOW_SIZE)]
for sensor in config.SENSORS:
counts[sensor].append(len(w_df.dropna(subset=[sensor])))
return pd.DataFrame(counts)
ret = [count_messages(f) for f in files]
df = pd.concat(ret)
self.stats = df.describe()
def predict(self, data):
present_size=0.1
df = data['df']
window_starts = data["window_starts"]
window_results = []
for window_start in window_starts:
w_df = df[(df.Timestamp >= window_start) & (df.Timestamp < window_start + WINDOW_SIZE)]
results = {}
for _, sensor in enumerate(SENSORS):
w_df_sensor = w_df.dropna(subset=[sensor])
max_normal_message_freq = self.stats.at['max', sensor]
msg_freq = len(w_df_sensor)
if msg_freq > (max_normal_message_freq+1): #or min_timediff < min_normal_timediff:
results[sensor] = 1
else:
results[sensor] = 0
# print((window_start, sensor, msg_freq, max_normal_message_freq, results[sensor]))
results["WindowInAttack"] = any(results.values())
results["window_start"] = window_start # information for down-stream
window_results.append(results)
return {"event_detection_results": window_results}
# -
# Now let's import and train this `MsgFreqEventDetectorModel`.
# +
h1.init()
from AutomotiveCybersecurity.models.msg_freq_event_detector import MsgFreqEventDetectorModel
m = MsgFreqEventDetectorModel()
# -
data = m.load_data(num_files=5)
m.train(data)
m.stats
# The nice things about h1st.Model that we can easily save/load them. By default, the "model", "stats" and "metrics" properties are persisted and they support a variety of flavors & data structure.
m.persist()
# ### 3d. Working with H1st Graph
# Let's now make some event-level predictions.
#
# Note that since the model was persisted using H1st model repo, this means that we can easily come back to a notebooks and/or scripts and load the trained model or computed statistics.
#
# Importantly, H1st allows much speedier integration into a Graph (and later deployment, too).
# +
from AutomotiveCybersecurity.graph import WindowGenerator
from AutomotiveCybersecurity.models.msg_freq_event_detector import MsgFreqEventDetectorModel
graph = h1.Graph()
graph.start()\
.add(WindowGenerator())\
.add(MsgFreqEventDetectorModel().load())
graph.end()
import glob
fs = glob.glob("/Users/aht/Documents/autocyber/13Prius/predict_data/add/YawRate/*.csv")
print(fs[0])
df = pd.read_csv(fs[0])
df.columns = ['Timestamp', 'Label', 'CarSpeed', 'SteeringAngle', 'YawRate', 'Gx', 'Gy']
results = graph.predict({"df": df})
results.keys()
# -
# And we should see that starting we can detect attacks starting at Timestamp 604.3105000000011
[x for x in results["event_detection_results"] if x["WindowInAttack"]][:5]
# ### 3e. Adding a message classifier, harmonizing human + ML models in the graph
# For message-level classification we can simply bring back our gradient-boosted trees which did a decent job of recognizing injection messages. (Integrating sequence model such as Bidirectional LSTM is left as an exercise for the reader).
#
# For convenient, we've re-orgarnized it as a H1st.Model, ready for use. The content of `models/gradient_boosting_msg_classifier.py` looks like this.
# +
FEATURES = SENSORS + ["%s_TimeDiff" % s for s in SENSORS]
class GradientBoostingMsgClassifierModel(h1.Model):
def load_data(self, num_samples=None):
return util.load_data_daic(num_samples, shuffle=True)
def prep_data(self, data):
# concat multiple files into separate training/test pd.DataFrame
def concat_processed_files(files):
dfs = []
for f in files:
z = pd.read_csv(f)
z.columns = ['Timestamp', 'Label', 'CarSpeed', 'SteeringAngle', 'YawRate', 'Gx', 'Gy',]
z = util.compute_timediff_fillna(z)
dfs.append(z)
df2 = pd.concat(dfs)
return df2
return {
"train_attack_df": concat_processed_files(data["train_attack_files"]),
"test_attack_df": concat_processed_files(data["test_attack_files"])
}
def train(self, prepared_data):
df = prepared_data["train_attack_df"]
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import HistGradientBoostingClassifier
X = df[FEATURES]
y = df.Label == "Tx"
self.model = HistGradientBoostingClassifier(max_iter=500).fit(X, y)
def evaluate(self, data):
df = prepared_data["test_attack_df"]
ypred = self.model.predict(df[FEATURES])
import sklearn.metrics
cf = sklearn.metrics.confusion_matrix(df.Label == "Tx", ypred)
acc = sklearn.metrics.accuracy_score(df.Label == "Tx", ypred)
print(cf)
print("Accuracy = %.4f" % acc)
self.metrics = {"confusion_matrix": cf, "accuracy": acc}
def predict(self, data):
df = data["df"].copy()
df = util.compute_timediff_fillna(df)
df['MsgIsAttack'] = 0
df['WindowInAttack'] = 0
for event_result in data["event_detection_results"]:
if event_result['WindowInAttack']:
# print("window %s in attack: event_result = %s" % (event_result['window_start'], event_result))
in_window = (df.Timestamp >= event_result['window_start']) & (df.Timestamp < event_result['window_start'] + WINDOW_SIZE)
w_df = df[in_window]
ypred = self.model.predict(w_df[FEATURES])
df.loc[in_window, "WindowInAttack"] = 1
df.loc[in_window, "MsgIsAttack"] = ypred.astype(int)
return {"injection_window_results": df}
# +
from AutomotiveCybersecurity.models.gradient_boosting_msg_classifier import GradientBoostingMsgClassifierModel
m2 = GradientBoostingMsgClassifierModel()
data = m2.load_data(num_files=50)
# -
prepared_data = m2.prep_data(data)
m2.train(prepared_data)
m2.evaluate(prepared_data)
m2.persist()
# +
class NoOp(h1.Action):
def call(self, command, inputs):
pass
graph = h1.Graph()
graph.start()\
.add(WindowGenerator())\
.add(h1.Decision(MsgFreqEventDetectorModel().load(), decision_field="WindowInAttack"))\
.add(yes=GradientBoostingMsgClassifierModel().load(),
no=NoOp())
graph.end()
results = graph.predict({"df": df})
results.keys()
# -
# Now let's evaluate the whole graph, especially focusing on the event-level TPR & FPR since they are crucial in the safe-mode deployment use case.
# +
from AutomotiveCybersecurity.util import evaluate_event_graph
evaluate_event_graph(graph, data['test_attack_files'])
# -
# Woa! We ran through all 400ms windows in the test samples and got event-level FPR=0.0% with zero false positives! (Note that this is still a subsample of the data, but once you've tried it on the full dataset the results should be the same: zero false positive at event-level.)
#
# The message-level accuracy should be nearly the same because we used the same classifier. However the decomposition leads to separation of concerns and requirement for these two use cases. We're much more comfortable with the solution now both in terms of accuracy as well as robustness and explainability.
#
# Another significance worth pointing out here is that we get multiple output streams from H1st.Graph: event-level outputs and msg-level outputs, exactly what we need for two different use cases we highlighted: safe-mode triggering and post-mortem analysis.
| examples/AutomotiveCybersecurity/notebooks/_build/html/_sources/Using H1st.AI to Encode Human Insights as a Model and Harmonize Human + ML in a H1st.Graph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Test notebook Meteorites
# +
# Standard Library Imports
from pathlib import Path
# Installed packages
import pandas as pd
import numpy as np
import requests
# Testing
from IPython.utils.capture import capture_output
from IPython.display import display
# Our package
import pandas_profiling
from pandas_profiling.utils.cache import cache_file
# +
file_name = cache_file(
"meteorites.csv",
"https://data.nasa.gov/api/views/gh4g-9sfh/rows.csv?accessType=DOWNLOAD",
)
df = pd.read_csv(file_name)
# Note: Pandas does not support dates before 1880, so we ignore these for this analysis
df['year'] = pd.to_datetime(df['year'], errors='coerce')
# Example: Constant variable
df['source'] = "NASA"
# Example: Boolean variable
df['boolean'] = np.random.choice([True, False], df.shape[0])
# Example: Mixed with base types
df['mixed'] = np.random.choice([1, "A"], df.shape[0])
# Example: Highly correlated variables
df['reclat_city'] = df['reclat'] + np.random.normal(scale=5, size=(len(df)))
# Example: Duplicate observations
duplicates_to_add = pd.DataFrame(df.iloc[0:10])
duplicates_to_add[u'name'] = duplicates_to_add[u'name'] + " copy"
df = df.append(duplicates_to_add, ignore_index=True)
# +
# Inline report without saving
with capture_output() as out:
pr = df.profile_report(sort='None', html={'style':{'full_width': True}}, progress_bar=False, minimal=True)
display(pr)
assert len(out.outputs) == 2
assert out.outputs[0].data['text/plain'] == '<IPython.core.display.HTML object>'
assert all(s in out.outputs[0].data['text/html'] for s in ['<iframe', 'Profile report generated with the `pandas-profiling`'])
assert out.outputs[1].data['text/plain'] == ''
# +
# There should be less progress bars in minimal mode
with capture_output() as out:
pfr = df.profile_report(html={'style': {'full_width': True}}, minimal=True, progress_bar=True)
assert all("FloatProgress" in s.data['text/plain'] for s in out.outputs)
assert len(out.outputs) == 5
# +
# Write to a file
with capture_output() as out:
pfr.to_file("/tmp/example.html")
assert len(out.outputs) == 0
# +
# Print existing ProfileReport object inline
with capture_output() as out:
display(pfr)
assert len(out.outputs) == 2
assert out.outputs[0].data['text/plain'] == '<IPython.core.display.HTML object>'
assert all(s in out.outputs[0].data['text/html'] for s in ['<iframe', 'Profile report generated with the `pandas-profiling`'])
assert out.outputs[1].data['text/plain'] == ''
| tests/notebooks/meteorites.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
# %matplotlib inline
from pprint import pprint
import plot
from simulate import simulate_withdrawals
import harvesting
import market
import withdrawal
import metrics
from decimal import Decimal
import itertools
import math
import pandas
# + deletable=true editable=true
YEARS = 25
# + deletable=true editable=true
def run_comparison(series):
(r1, r2) = itertools.tee(series)
p1 = (1214000 * .6, 1214000 * .4)
p2 = (2459000, 0)
sim1 = simulate_withdrawals(r1, years=YEARS, harvesting=harvesting.N_60_RebalanceHarvesting, withdraw=withdrawal.VPW, portfolio=p1)
sim2 = simulate_withdrawals(r2, years=YEARS, harvesting=harvesting.N_100_RebalanceHarvesting, withdraw=withdrawal.VPW, portfolio=p2)
s1 = [n.withdraw_r for n in sim1]
s2 = [n.withdraw_r for n in sim2]
return (s1, s2)
# + deletable=true editable=true
#MARKET = market.Returns_US_1871()
#MARKET = market.PortfolioCharts_1927(market.PortfolioCharts_1927.Weights(LCB=.8, SCV=.2))
MARKET = market.Japan_1957()
RETIRE_YEAR = 1990
s1, s2, = run_comparison(MARKET.iter_from(RETIRE_YEAR))
print("60/40", int(metrics.cew(s1)))
print("100/0", int(metrics.cew(s2)))
plot.plot_n({'60/40' : s1, '100/0': s2}, 'Year', '%d Retirement' % RETIRE_YEAR)
# + deletable=true editable=true
incomes_60 = []
incomes_100 = []
for i in range(MARKET.start_year, 2016-YEARS):
s1, s2 = run_comparison(MARKET.iter_from(i))
incomes_60 += [int(n) for n in s1]
incomes_100 += [int(n) for n in s2]
incomes_60 = pandas.Series(data=incomes_60)
incomes_100 = pandas.Series(data=incomes_100)
diff = incomes_100 - incomes_60
print('60', incomes_60.quantile(.01))
print('100', incomes_100.quantile(.01))
df = pandas.DataFrame(data={'60/40' : incomes_60, '100/0' : incomes_100, 'Delta' : diff})
# + deletable=true editable=true
def index_to_year(n):
year_of_retirement = n//YEARS + 1871
year_of_income = n%YEARS + year_of_retirement
return "%d/%d" % (year_of_retirement, year_of_income)
df['Income date'] = df.index.map(index_to_year)
df.head()
# + deletable=true editable=true
low_income = df.loc[(df['60/40'] < 40000) | (df['100/0'] < 40000)]
win_100 = low_income.sort_values('Delta', ascending=False)
win_60 = low_income.sort_values('Delta', ascending=True)
print(win_60.head())
# -
| 100-0 vs 60-40.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RohanOpenSource/ml-notebooks/blob/main/DecisionTrees.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="JE2TMkD8lffj"
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
# + [markdown] id="Zps5pLsRrwny"
# Let's start out with classification with decision trees and then move on to regression.
# + id="TVyHIWVsm97V"
iris = load_iris()
X = iris.data[:, 2:] # the third attribute and everything after it is the petal length and the width as there are only 4 attributes
y = iris.target
# + colab={"base_uri": "https://localhost:8080/"} id="IX4hvS-Qnceq" outputId="23bec1f7-af52-4161-ba4e-edcc71729bfe"
model = DecisionTreeClassifier(max_depth=2)
model.fit(X, y)
# + [markdown] id="KlRZywhMqCMC"
# Since A decision tree is not a black box model, we can see what rules it is forming in order to classify that data.
# + id="C-oleIU3nw7j"
from sklearn.tree import export_graphviz
diagram = export_graphviz(
model,
out_file=("iris_tree.dot"),
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True
)
# + colab={"base_uri": "https://localhost:8080/", "height": 440} id="I7cyvo2NprFD" outputId="6f54d96c-8a4d-4505-cdcb-00620994c1e8"
import graphviz
with open("iris_tree.dot") as f:
dot_graph = f.read()
# remove the display(...)
graphviz.Source(dot_graph)
# + [markdown] id="CiiCEFR4qo5Q"
# From this diagram, it is pretty obious why the decision tree is called what it is. It splits the data into to subsets and keeps splitting each subset until it has a result. This is done by the CART training algorithm which recursively splits the data into a tree of rules. Anyhow, it is time for regresion.
# + colab={"base_uri": "https://localhost:8080/"} id="gMhUHy7sq7ji" outputId="bb213ba9-9705-47af-d937-baafaa2ec653"
model_2 = DecisionTreeRegressor(max_depth=3)
model_2.fit(X, y)
# + id="zV1A3iiesz3a"
from sklearn.tree import export_graphviz
diagram_2 = export_graphviz(
model_2,
out_file=("iris_tree_lin.dot"),
feature_names=iris.feature_names[2:],
class_names=iris.target_names,
rounded=True,
filled=True
)
# + colab={"base_uri": "https://localhost:8080/", "height": 518} id="JKmPtJccs6Jp" outputId="153780fc-597f-4de8-9f31-28444aac99a9"
import graphviz
with open("iris_tree_lin.dot") as f:
dot_graph = f.read()
# remove the display(...)
graphviz.Source(dot_graph)
# + [markdown] id="PLHf07jytdzZ"
# Decision Tress are unlike other models because they make very few generalizations about the data. This means that they will badly overfit with a high max depth and will have overly basic rules for a lower max depth. This means that in most cases, decision trees aren't that great. However, Random forests are an evolution of decision trees that fix their issues.
| DecisionTrees.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from numpy.random import randn
np.random.seed(101)
df = pd.DataFrame(randn(5, 4), ['A', 'B', 'C', 'D', 'E'], ['W', 'X', 'Y', 'Z'])
df
df['W']
type(df['W'])
type(df)
df.W
df[['W', 'Z']]
df['new'] = df['W'] + df['Y']
df['new']
df.drop('new', axis=1, inplace=True)
df
df.drop('E')
df
# Select Rows
df.loc['A']
# Select Rows alternate
df.iloc[2] # Should be row C
df.loc['B', 'Y']
df
# Select Rows and Columns
df.loc[['A', 'B'], ['W', 'Y']]
df > 0
# Bool Checks of data frame
booldf = df > 0
df[booldf]
# OR use this
df[df > 0]
df
df['W'] > 0 # Using Column for bool selection
df[df['W'] > 0] # Pass in series, get only rows where true
# column z, less than 0
df[df['Z'] < 0]
# Do additional querying on returned data frame
df[df['W']>0]['X']
boolser = df['W']>0
result = df[boolser]
result
result[['Y', 'X']]
# conditions
df[(df['W']>0) & (df['Y']>1)]
df.reset_index()
newind = 'CA NY WY OR CO'.split()
newind
df['States'] = newind
df
df.set_index(df['States'])
df.drop('States', axis=1, inplace=True)
df
| .ipynb_checkpoints/Data Frames-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Customer Segmentation and Analysis
#Steps to solve the problem :
#Importing Libraries.
#Exploration of data.
#Data Visualization.
#Clustering using K-Means.
#Selection of Clusters.
#Ploting the Cluster Boundry and Clusters.
#3D Plot of Clusters.
#Importing Libraries.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly as py
import plotly.graph_objs as go
from sklearn.cluster import KMeans
import warnings
import os
warnings.filterwarnings("ignore")
# +
# Explore the data
data = pd.read_csv("Mall_Customers.csv")
data.head()
# -
data.shape
data.describe
data.dtypes
data.isnull().sum()
plt.style.use("fivethirtyeight") ## use fivethirtyeight style for plotting
# +
# look at the distribution of age income and spending score by using distplot() function
plt.figure(figsize = (20 , 7)) # to create a new figure & adjust figure sizes
n = 0
for x in ['Age' , 'Annual Income (k$)' , 'Spending Score (1-100)']:
n += 1
plt.subplot(1 , 3, n) # divide the graphs for in specificed destinations
sns.distplot(data[x] , bins = 20) ## show the distribution of the data set individually for age, income and score
plt.title('Distribution of {}'+" "+ x) ## can be used .format(x) function to set the titles
plt.show()
# +
## count plot for gender
plt.figure(1 , figsize = (20 , 7))
sns.countplot(y = 'Gender' , data = data)
plt.show()
# +
#Ploting the Relation between Age , Annual Income and Spending Score
plt.figure(figsize = (15 , 12))
n = 0
for x in ['Age' , 'Annual Income (k$)' , 'Spending Score (1-100)']:
for y in ['Age' , 'Annual Income (k$)' , 'Spending Score (1-100)']:
n += 1
plt.subplot(3 , 3 , n)
sns.regplot(x = x , y = y , data = data) #used to plot data and a linear regression model fit.
plt.ylabel(y.split()[0]+' '+y.split()[1] if len(y.split()) > 1 else y )
plt.show()
# +
# look at the relation of age and annual income in terms of gender
plt.figure(1 , figsize = (15 , 6))
color_dict = dict({"Male" : "blue",
"Female" : "red"})
for gender in ['Male' , 'Female']:
plt.scatter(x = 'Age' , y = 'Annual Income (k$)' , data = data[data['Gender'] == gender] ,
s = 200 , alpha = 0.5 , label = gender) # Scatter plots uses dots to represent the relationship btw variables.
plt.xlabel('Age'), plt.ylabel('Annual Income (k$)')
plt.title('Age vs Annual Income w.r.t Gender')
plt.legend()
plt.show()
# +
#look at the relation btw income and spending score in terms of gender
plt.figure(1 , figsize = (15 , 6))
for gender in ['Male' , 'Female']:
plt.scatter(x = 'Annual Income (k$)',y = 'Spending Score (1-100)' ,
data = data[data['Gender'] == gender] ,s = 200 , alpha = 0.5 , label = gender)
plt.xlabel('Annual Income (k$)'), plt.ylabel('Spending Score (1-100)')
plt.title('Annual Income vs Spending Score w.r.t Gender')
plt.legend()
plt.show()
# +
#Clustering using K- means
#Segmentation using Age and Spending Score
'''Age and spending Score'''
X1 = data[['Age' , 'Spending Score (1-100)']].values
inertia = []
# KMEANS PARAMATER DEFINITIONS: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
for n in range(1 , 11):
algorithm = (KMeans(n_clusters = n ,init='k-means++', n_init = 10 ,max_iter=300,
tol=0.0001, random_state= 111 , algorithm='elkan') )
algorithm.fit(X1)
inertia.append(algorithm.inertia_)
# +
##find the elbow point and decide the number of clusters of the algorithm
plt.figure(figsize = (15 ,6))
plt.plot(np.arange(1 , 11) , inertia , 'o')
plt.plot(np.arange(1 , 11) , inertia , '-' , alpha = 0.5)
plt.xlabel('Number of Clusters') , plt.ylabel('Sum of Squarred Error')
plt.show()
# +
algorithm = (KMeans(n_clusters = 4 ,init='k-means++', n_init = 10 ,max_iter=300,
tol=0.0001, random_state= 111 , algorithm='elkan') )
algorithm.fit(X1)
labels1 = algorithm.labels_
centroids1 = algorithm.cluster_centers_
cluster_map = pd.DataFrame()
cluster_map['data_index'] = data.index.values
cluster_map['cluster'] = labels1
cluster_map[cluster_map.cluster == 1]["data_index"]
centroids1,labels1
# +
xx.ravel(), yy.ravel(),np.c_[xx.ravel(), yy.ravel()]
# +
h = 0.02
x_min, x_max = X1[:, 0].min(), X1[:, 0].max()
y_min, y_max = X1[:, 1].min(), X1[:, 1].max()
## arrange() generates outputs between min and max within specified intervals
## meshgrid() returns two 2-dimensional arrays, representing the X and Y coordinates of all the points.
## ravel() specify all of the values of an array in one row (each column includes only one value)
## np.c_ concenation of two raveled arrays
## use algorithm.predict to estimate the clusters for each value in concenated xx and yy (algorithm is defined under KMeans)
## PREDICT FUCNTION : https://www.askpython.com/python/examples/python-predict-function
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = algorithm.predict(np.c_[xx.ravel(), yy.ravel()])
# +
plt.figure(figsize = (18 , 10) )
plt.clf() # used to clear the current figure.
Z = Z.reshape(xx.shape) ##https://www.geeksforgeeks.org/reshape-numpy-array/
plt.imshow(Z , interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap = plt.cm.Pastel2, aspect = 'auto', origin='lower') ## https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.imshow.html
plt.scatter( x = 'Age' ,y = 'Spending Score (1-100)' , data = data , c = labels1 ,
s = 200 )
plt.scatter(x = centroids1[: , 0] , y = centroids1[: , 1] , s = 300 , c = 'red' , alpha = 0.5)
plt.ylabel('Spending Score (1-100)') , plt.xlabel('Age')
plt.show()
# -
cluster_map = pd.DataFrame()
cluster_map["Customer_ID"] = data["CustomerID"].values
cluster_map['Cluster'] = algorithm.labels_
cluster_map
df= data
df["Cluster"] = cluster_map["Cluster"]
df.groupby("Cluster").mean()[["Age", "Annual Income (k$)", "Spending Score (1-100)"]]
| Mall Customer Segmentation Analysis Utku Sokat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Introduction
#
# Power is one of the most critical components of infrastructure crucial for the economic growth and welfare of nations. The existence and development of adequate infrastructure is essential for sustained growth of the Indian economy. India is the world's third largest producer and third largest consumer of electricity. Sustained economic growth continues to drive electricity demand in India.
#
#
# Consumption of electricity is known to follow economic activity closely. The industries that produce essential goods are operating at very low utilization levels. Hence, in such a scenario one expects electricity demands to go down.
#
# Here is a notebook to give you a brief intro of the dataset that I created, through interactive visualizations which will allow you to browse through data visually. The intension is to build an intuition about the data thereby being able to answer questions of relevance. The date ranges from 28/10/2019 to 23/05/2020.
#
# In this notebook I have put my hands on interactive plots which will enable anyone to browse the data with a few clicks. I hope you like it and get your hands on the dataset to build a notebook of your own.
#
# Do comment your suggestions and review of my work below. Hope you enjoy as much as I did while creating it. :)
#
# + _kg_hide-output=true
pip install bar_chart_race
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib notebook
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from IPython.display import HTML
import calendar
from plotly.subplots import make_subplots
import bar_chart_race as bcr
# -
df = pd.read_csv('../input/state-wise-power-consumption-in-india/power data.csv')
df_long = pd.read_csv('../input/state-wise-power-consumption-in-india/long_data_.csv')
# + _kg_hide-output=true
df.info()
# -
df['Date'] = pd.to_datetime(df.Date, dayfirst=True)
df_long['Dates'] = pd.to_datetime(df_long.Dates, dayfirst=True)
# # Region Wise Daily Power Consumption
# + _kg_hide-input=true
df['NR'] = df['Punjab']+ df['Haryana']+ df['Rajasthan']+ df['Delhi']+df['UP']+df['Uttarakhand']+df['HP']+df['J&K']+df['Chandigarh']
df['WR'] = df['Chhattisgarh']+df['Gujarat']+df['MP']+df['Maharashtra']+df['Goa']+df['DNH']
df['SR'] = df['Andhra Pradesh']+df['Telangana']+df['Karnataka']+df['Kerala']+df['Tamil Nadu']+df['Pondy']
df['ER'] = df['Bihar']+df['Jharkhand']+ df['Odisha']+df['West Bengal']+df['Sikkim']
df['NER'] =df['Arunachal Pradesh']+df['Assam']+df['Manipur']+df['Meghalaya']+df['Mizoram']+df['Nagaland']+df['Tripura']
# + _kg_hide-input=true
fig = go.Figure()
fig.add_trace(go.Scatter(
x=df.Date, y=df.NR,
mode='lines+markers',
name='Northern region',
marker=dict(
color='rgba(300, 50, 50, 0.8)',
size=5,
line=dict(
color='DarkSlateGrey',
width = 1
)
)
))
fig.add_trace(go.Scatter(
x=df.Date, y=df.SR,
mode='lines+markers',
name='Southern Region',
marker=dict(
color='rgba(50, 300, 50, 0.8)',
size=5,
line=dict(
color='DarkSlateGrey',
width = 1
)
)
))
fig.add_trace(go.Scatter(
x=df.Date, y=df.ER,
mode='lines+markers',
name='Eastern Region',
marker=dict(
color='rgba(50, 50, 300, 0.8)',
size=5,
line=dict(
color='DarkSlateGrey',
width = 1
)
)
))
fig.add_trace(go.Scatter(
x=df.Date, y=df.WR,
mode='lines+markers',
name='Western Region',
marker=dict(
color='rgba(300, 100, 200, 0.8)',
size=5,
line=dict(
color='DarkSlateGrey',
width = 1
)
)
))
fig.add_trace(go.Scatter(
x=df.Date, y=df.NER,
mode='lines+markers',
name='North-Eastern',
marker=dict(
color='rgba(100, 200, 300, 0.8)',
size=5,
line=dict(
color='DarkSlateGrey',
width = 1
)
)
))
fig.update_xaxes(
rangeslider_visible=True,
rangeselector=dict(
buttons=list([
dict(count=1, label="1m", step="month", stepmode="backward"),
dict(count=3, label="3m", step="month", stepmode="backward"),
dict(count=6, label="6m", step="month", stepmode="backward"),
dict(step="all")
])
)
)
fig.update_layout(title='Power Consumption in Various Region')
fig.update_layout(width=800,height=500)
fig.show()
# -
# # State-wise mean Power consumption
# + _kg_hide-input=true jupyter={"outputs_hidden": true}
df1= df[['Date', 'Punjab', 'Haryana', 'Rajasthan', 'Delhi', 'UP',
'Uttarakhand', 'HP', 'J&K', 'Chandigarh', 'Chhattisgarh', 'Gujarat',
'MP', 'Maharashtra', 'Goa', 'DNH',
'Andhra Pradesh', 'Telangana', 'Karnataka', 'Kerala', 'Tamil Nadu',
'Pondy', 'Bihar', 'Jharkhand', 'Odisha', 'West Bengal', 'Sikkim',
'Arunachal Pradesh', 'Assam', 'Manipur', 'Meghalaya', 'Mizoram',
'Nagaland', 'Tripura']]
df1 = df1.set_index('Date')
bcr.bar_chart_race(df1, figsize=(4, 3.5),period_length =500,filename = None, title='power usage by states')
# -
# # Monthly average Power Consumption
monthly_df = df_long.groupby([df_long.Dates.dt.year, df_long.Dates.dt.month,df_long.States,df_long.Regions, df_long.latitude,df_long.longitude])['Usage'].mean()
monthly_df.index = monthly_df.index.set_names(['year', 'month','State','Region','latitude','longitude'])
monthly_df = monthly_df.reset_index()
monthly_df['month'] = monthly_df['month'].apply(lambda x: calendar.month_abbr[x])
monthly_df.head()
# + _kg_hide-input=true
fig = px.sunburst(monthly_df, path=['Region', 'State','month'], values='Usage',
color='Usage',
color_continuous_scale='RdBu')
fig.update_layout(title='Click various Regions/States to view power distribution')
fig.update_layout( width=800,height=600)
fig.show()
# -
fig = px.bar(monthly_df, x="Region", y="Usage",color='State',animation_frame = 'month')
fig.update_layout(xaxis={'categoryorder':'total descending'})
fig.update_layout(title='Region-wise Bar plots')
fig.show()
# # Before and After lockdown Scenarios
df_before = df.iloc[0:150,:]
df_after = df.iloc[151:,]
# + _kg_hide-input=true
fig = go.Figure()
fig.add_trace(go.Scatter( x=df_before['Date'], y=df_before['Gujarat'], name='Gujarat before lockdown',fill='tonexty',
line=dict(width=2,dash='dot',color='firebrick')
))
fig.add_trace(go.Scatter( x=df_before['Date'], y=df_before['Maharashtra'], name='Maharashtra before lockdown',fill='tonexty',
line=dict(width=2,dash='dot',color='coral')
))
fig.add_trace(go.Scatter( x=df_before['Date'], y=df_before['MP'], name='MP before lockdown',fill='tozeroy',
line=dict(width=2,dash='dot',color='darkred')
))
fig.add_trace(go.Scatter(x=df_after['Date'], y=df_after['Gujarat'],name='Gujarat after lockdown',fill='tozeroy',
line=dict(color='firebrick', width=2)
))
fig.add_trace(go.Scatter(x=df_after['Date'], y=df_after['Maharashtra'],name='Maharashtra after lockdown',fill='tozeroy',
line=dict(color='coral', width=2)
))
fig.add_trace(go.Scatter(x=df_after['Date'], y=df_after['MP'],name='MP after lockdown',fill='tozeroy',
line=dict(color='darkred', width=2)
))
fig.update_layout(title='Power Consumption in top 3 WR states')
fig.update_layout( width=800,height=500)
fig.show()
# + _kg_hide-input=true
fig = go.Figure()
fig.add_trace(go.Scatter( x=df_before['Date'], y=df_before['Karnataka'], name='Karnataka before lockdown',fill='tonexty',
line=dict(width=2,dash='dot',color='skyblue')
))
fig.add_trace(go.Scatter( x=df_before['Date'], y=df_before['Tamil Nadu'], name='Tamil Nadu before lockdown',fill='tonexty',
line=dict(width=2,dash='dot',color='lightblue')
))
fig.add_trace(go.Scatter( x=df_before['Date'], y=df_before['Telangana'], name='Telangana before lockdown',fill='tozeroy',
line=dict(width=2,dash='dot',color='midnightblue')
))
fig.add_trace(go.Scatter(x=df_after['Date'], y=df_after['Karnataka'],name='Karnataka after lockdown',fill='tozeroy',
line=dict(color='skyblue', width=2)
))
fig.add_trace(go.Scatter(x=df_after['Date'], y=df_after['Tamil Nadu'],name='Tamil Nadu after lockdown',fill='tozeroy',
line=dict(color='lightblue', width=2)
))
fig.add_trace(go.Scatter(x=df_after['Date'], y=df_after['Telangana'],name='Telangana after lockdown',fill='tozeroy',
line=dict(color='midnightblue', width=2)
))
fig.update_layout(title='Power Consumption in top 3 WR states')
fig.update_layout( width=800,height=500)
fig.show()
# + _kg_hide-input=true
fig = go.Figure()
fig.add_trace(go.Scatter( x=df_before['Date'], y=df_before['Rajasthan'], name='Rajasthan before lockdown',fill='tonexty',
line=dict(width=2,dash='dot',color='darkviolet')
))
fig.add_trace(go.Scatter( x=df_before['Date'], y=df_before['UP'], name='UP before lockdown',fill='tonexty',
line=dict(width=2,dash='dot',color='deeppink')
))
fig.add_trace(go.Scatter( x=df_before['Date'], y=df_before['Haryana'], name='Haryana before lockdown',fill='tozeroy',
line=dict(width=2,dash='dot',color='indigo')
))
fig.add_trace(go.Scatter(x=df_after['Date'], y=df_after['Rajasthan'],name='Rajasthan after lockdown',fill='tozeroy',
line=dict(color='darkviolet', width=2)
))
fig.add_trace(go.Scatter(x=df_after['Date'], y=df_after['UP'],name='UP after lockdown',fill='tonexty',
line=dict(color='deeppink', width=2)
))
fig.add_trace(go.Scatter(x=df_after['Date'], y=df_after['Haryana'],name='Haryana after lockdown',fill='tozeroy',
line=dict(color='indigo', width=2)
))
fig.update_layout(title='Power Consumption in top 3 NR states')
fig.update_layout( width=800,height=500)
fig.show()
# -
# # Maximum value reached
# + _kg_hide-input=true
WR_df = df_long[df_long['Regions']=='WR']
NR_df = df_long[df_long['Regions']=='NR']
SR_df = df_long[df_long['Regions']=='SR']
ER_df = df_long[df_long['Regions']=='ER']
NER_df = df_long[df_long['Regions']=='NER']
# + _kg_hide-input=true
fig= go.Figure(go.Indicator(
mode = "gauge+number",
value = WR_df['Usage'].max(),
title = {'text': "Max Power Usage In WR:Maharashtra 13/05/2020"},
gauge = {
'axis': {'range': [None, 500], 'tickwidth': 1},
'threshold': {
'line': {'color': "red", 'width': 4},
'thickness': 0.75,
'value': 490}}
))
fig.show()
# + _kg_hide-input=true
fig = go.Figure(go.Indicator(
mode = "gauge+number",
value = NR_df['Usage'].max(),
title = {'text': "Max Power Usage In NR :UP 09/05/2020"},
gauge = {
'axis': {'range': [None, 500], 'tickwidth': 1},
'threshold': {
'line': {'color': "red", 'width': 4},
'thickness': 0.75,
'value': 490}}
))
fig.update_layout(legend_title_text='State Date::UP')
fig.show()
# + _kg_hide-input=true
fig = go.Figure(go.Indicator(
mode = "gauge+number",
value = SR_df['Usage'].max(),
title = {'text': "Max Power Usage In SR : Tamil Nadu 01/11/2019"},
gauge = {
'axis': {'range': [None, 500], 'tickwidth': 1},
'threshold': {
'line': {'color': "red", 'width': 4},
'thickness': 0.75,
'value': 490}}
))
fig.show()
# + _kg_hide-input=true
fig = go.Figure(go.Indicator(
mode = "gauge+number",
value = ER_df['Usage'].max(),
title = {'text': "Max Power Usage In ER: West Bangal 04/05/2020"},
gauge = {
'axis': {'range': [None, 500], 'tickwidth': 1},
'threshold': {
'line': {'color': "red", 'width': 4},
'thickness': 0.75,
'value': 490}}
))
fig.show()
# + _kg_hide-input=true
fig = go.Figure(go.Indicator(
mode = "gauge+number",
value = NER_df['Usage'].max(),
title = {'text': "Max Power Usage In NER: Assam 05/05/2020"},
gauge = {
'axis': {'range': [None, 500], 'tickwidth': 1},
'threshold': {
'line': {'color': "red", 'width': 4},
'thickness': 0.75,
'value': 490}}
))
fig.show()
# -
# # Plotting on maps
df_long = pd.read_csv('../input/state-wise-power-consumption-in-india/long_data_.csv')
df_long.dropna(inplace = True)
fig = px.scatter_geo(df_long,'latitude','longitude', color="Regions",
hover_name="States", size="Usage",
animation_frame="Dates", scope='asia')
fig.update_geos(lataxis_range=[5,35], lonaxis_range=[65, 100])
fig.show()
| notebooks/an-interactive-eda-of-electricity-consumption.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# %reload_ext watermark
# %matplotlib inline
from os.path import exists
from metapool.metapool import *
from metapool import (validate_plate_metadata, assign_emp_index, make_sample_sheet, KLSampleSheet, parse_prep, validate_and_scrub_sample_sheet, generate_qiita_prep_file)
# %watermark -i -v -iv -m -h -p metapool,sample_sheet,openpyxl -u
# -
# # Knight Lab Amplicon Sample Sheet and Mapping (preparation) File Generator
#
# ### What is it?
#
# This Jupyter Notebook allows you to automatically generate sample sheets for amplicon sequencing.
#
#
# ### Here's how it should work.
#
# You'll start out with a **basic plate map** (platemap.tsv) , which just links each sample to it's approprite row and column.
#
# You can use this google sheet template to generate your plate map:
#
# https://docs.google.com/spreadsheets/d/1xPjB6iR3brGeG4bm2un4ISSsTDxFw5yME09bKqz0XNk/edit?usp=sharing
#
# Next you'll automatically assign EMP barcodes in order to produce a **sample sheet** (samplesheet.csv) that can be used in combination with the rest of the sequence processing pipeline.
#
# **Please designate what kind of amplicon sequencing you want to perform:**
seq_type = '16S'
#options are ['16S', '18S', 'ITS']
# ## Step 1: read in plate map
#
# **Enter the correct path to the plate map file**. This will serve as the plate map for relating all subsequent information.
# +
plate_map_fp = './test_data/amplicon/compressed-map.tsv'
if not exists(plate_map_fp):
print("Error: %s is not a path to a valid file" % plate_map_fp)
# -
# **Read in the plate map**. It should look something like this:
#
# ```
# Sample Row Col Blank
# GLY_01_012 A 1 False
# GLY_14_034 B 1 False
# GLY_11_007 C 1 False
# GLY_28_018 D 1 False
# GLY_25_003 E 1 False
# GLY_06_106 F 1 False
# GLY_07_011 G 1 False
# GLY_18_043 H 1 False
# GLY_28_004 I 1 False
# ```
#
# **Make sure there a no duplicate IDs.** If each sample doesn't have a different name, an error will be thrown and you won't be able to generate a sample sheet.
# +
plate_df = read_plate_map_csv(open(plate_map_fp,'r'))
plate_df.head()
# -
# # Assign barcodes according to primer plate
#
# This portion of the notebook will assign a barcode to each sample according to the primer plate number.
#
# As inputs, it requires:
# 1. A plate map dataframe (from previous step)
# 2. Preparation metadata for the plates, importantly we need the Primer Plate # so we know what **EMP barcodes** to assign to each plate.
#
# The workflow then:
# 1. Joins the preparation metadata with the plate metadata.
# 2. Assigns indices per sample
# ## Enter and validate the plating metadata
#
# - In general you will want to update all the fields, but the most important ones are the `Primer Plate #` and the `Plate Position`. `Primer Plate #` determines which EMP barcodes will be used for this plate. `Plate Position` determines the physical location of the plate.
# - If you are plating less than four plates, then remove the metadata for that plate by deleting the text between teh curly braces.
# - For missing fields, write NA between the single quotes for example `'NA'`.
# - To enter a plate copy and paste the contents from the plates below.
# +
_metadata = [
{
# top left plate
'Plate Position': '1',
'Primer Plate #': '1',
'Sample Plate': 'THDMI_UK_Plate_2',
'Project_Name': 'THDMI UK',
'Plating': 'SF',
'Extraction Kit Lot': '166032128',
'Extraction Robot': 'Carmen_HOWE_KF3',
'TM1000 8 Tool': '109379Z',
'Primer Date': '2021-08-17', # yyyy-mm-dd
'MasterMix Lot': '978215',
'Water Lot': 'RNBJ0628',
'Processing Robot': 'Echo550',
'Original Name': ''
},
{
# top right plate
'Plate Position': '2',
'Primer Plate #': '2',
'Sample Plate': 'THDMI_UK_Plate_3',
'Project_Name': 'THDMI UK',
'Plating':'AS',
'Extraction Kit Lot': '166032128',
'Extraction Robot': 'Carmen_HOWE_KF4',
'TM1000 8 Tool': '109379Z',
'Primer Date': '2021-08-17', # yyyy-mm-dd
'MasterMix Lot': '978215',
'Water Lot': 'RNBJ0628',
'Processing Robot': 'Echo550',
'Original Name': ''
},
{
# bottom left plate
'Plate Position': '3',
'Primer Plate #': '3',
'Sample Plate': 'THDMI_UK_Plate_4',
'Project_Name': 'THDMI UK',
'Plating':'MB_SF',
'Extraction Kit Lot': '166032128',
'Extraction Robot': 'Carmen_HOWE_KF3',
'TM1000 8 Tool': '109379Z',
'Primer Date': '2021-08-17', # yyyy-mm-dd
'MasterMix Lot': '978215',
'Water Lot': 'RNBJ0628',
'Processing Robot': 'Echo550',
'Original Name': ''
},
{
# bottom right plate
'Plate Position': '4',
'Primer Plate #': '4',
'Sample Plate': 'THDMI_US_Plate_6',
'Project_Name': 'THDMI US',
'Plating':'AS',
'Extraction Kit Lot': '166032128',
'Extraction Robot': 'Carmen_HOWE_KF4',
'TM1000 8 Tool': '109379Z',
'Primer Date': '2021-08-17', # yyyy-mm-dd
'MasterMix Lot': '978215',
'Water Lot': 'RNBJ0628',
'Processing Robot': 'Echo550',
'Original Name': ''
},
]
plate_metadata = validate_plate_metadata(_metadata)
plate_metadata
# -
# The `Plate Position` and `Primer Plate #` allow us to figure out which wells are associated with each of the EMP barcodes.
if plate_metadata is not None:
plate_df = assign_emp_index(plate_df, plate_metadata, seq_type).reset_index()
plate_df.head()
else:
print('Error: Please fix the errors in the previous cell')
# As you can see in the table above, the resulting table is now associated with the corresponding EMP barcodes (`Golay Barcode`, `Forward Primer Linker`, etc), and the plating metadata (`Primer Plate #`, `Primer Date`, `Water Lot`, etc).
plate_df.head()
# # Combine plates (optional)
#
# If you would like to combine existing plates with these samples, enter the path to their corresponding sample sheets and mapping (preparation) files below. Otherwise you can skip to the next section.
#
# - sample sheet and mapping (preparation)
# +
files = [
# uncomment the line below and point to the correct filepaths to combine with previous plates
# ['test_output/amplicon/2021_08_17_THDMI-4-6_samplesheet.csv', 'test_output/amplicon/2021-08-01-515f806r_prep.tsv'],
]
sheets, preps = [], []
for sheet, prep in files:
sheets.append(KLSampleSheet(sheet))
preps.append(parse_prep(prep))
if len(files):
print('%d pair of files loaded' % len(files))
# -
# # Make Sample Sheet
#
# This workflow takes the pooled sample information and writes an Illumina sample sheet that can be given directly to the sequencing center or processing pipeline. Note that as of writing `bcl2fastq` does not support error-correction in Golay barcodes so the sample sheet is used to generate a mapping (preparation) file but not to demultiplex sequences. Demultiplexing takes place in [Qiita](https://qiita.ucsd.edu).
#
# As inputs, this notebook requires:
# 1. A plate map DataFrame (from previous step)
#
# The workflow:
# 1. formats sample names as bcl2fastq-compatible
# 2. formats sample data
# 3. sets values for sample sheet fields and formats sample sheet.
# 4. writes the sample sheet to a file
# ## Step 1: Format sample names to be bcl2fastq-compatible
#
# bcl2fastq requires *only* alphanumeric, hyphens, and underscore characters. We'll replace all non-those characters
# with underscores and add the bcl2fastq-compatible names to the DataFrame.
# +
plate_df['sample sheet Sample_ID'] = plate_df['Sample'].map(bcl_scrub_name)
plate_df.head()
# -
# ## Format the sample sheet data
#
# This step formats the data columns appropriately for the sample sheet, using the values we've calculated previously.
#
# The newly-created `bcl2fastq`-compatible names will be in the `Sample ID` and `Sample Name` columns. The original sample names will be in the Description column.
#
# Modify lanes to indicate which lanes this pool will be sequenced on.
#
# The `Project Name` and `Project Plate` columns will be placed in the `Sample_Project` and `Sample_Name` columns, respectively.
#
# sequencer is important for making sure the i5 index is in the correct orientation for demultiplexing. `HiSeq4000`, `HiSeq3000`, `NextSeq`, and `MiniSeq` all require reverse-complemented i5 index sequences. If you enter one of these exact strings in for sequencer, it will revcomp the i5 sequence for you.
#
# `HiSeq2500`, `MiSeq`, and `NovaSeq` will not revcomp the i5 sequence.
# +
sequencer = 'HiSeq4000'
lanes = [1]
metadata = {
'Bioinformatics': [
{
'Sample_Project': 'THDMI_10317',
'QiitaID': '10317',
'BarcodesAreRC': 'False',
'ForwardAdapter': '',
'ReverseAdapter': '',
'HumanFiltering': 'True',
'library_construction_protocol': 'Illumina EMP protocol 515fbc, 806r amplification of 16S rRNA V4',
'experiment_design_description': 'Equipment',
},
],
'Contact': [
{
'Sample_Project': 'THDMI_10317',
# non-admin contacts who want to know when the sequences
# are available in Qiita
'Email': 'y<EMAIL>,<EMAIL>'
},
],
'Chemistry': 'Amplicon',
'Assay': 'TruSeq HT',
}
sheet = make_sample_sheet(metadata, plate_df, sequencer, lanes)
sheet.Settings['Adapter'] = 'AGATCGGAAGAGCACACGTCTGAACTCCAGTCA'
sheet.Settings['AdapterRead2'] = 'AGATCGGAAGAGCGTCGTGTAGGGAAAGAGTGT'
# -
# Check for any possible errors in the sample sheet
sheet = validate_and_scrub_sample_sheet(sheet)
# Add the other sample sheets
if len(sheets):
sheet.merge(sheets)
# ## Step 3: Write the sample sheet to file
# +
# write sample sheet as .csv
sample_sheet_fp = './test_output/amplicon/2021_08_17_THDMI-4-6_samplesheet16S.csv'
if exists(sample_sheet_fp):
print("Warning! This file exists already.")
# +
with open(sample_sheet_fp,'w') as f:
sheet.write(f)
# !head -n 30 {sample_sheet_fp}
# !echo ...
# !tail -n 15 {sample_sheet_fp}
# -
# # Create a mapping (preparation) file for Qiita
output_filename = 'test_output/amplicon/2021-08-01-515f806r_prep.tsv'
# +
qiita_df = generate_qiita_prep_file(plate_df, seq_type)
qiita_df.head()
# -
qiita_df.set_index('sample_name', verify_integrity=True).to_csv(output_filename, sep='\t')
# Add the previous sample sheets
if len(preps):
prep = prep.append(preps, ignore_index=True)
# !head -n 5 {output_filename}
| amplicon-pooling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="UmRrjWCiI7dd" colab_type="code" outputId="9fdadbe4-fd98-4782-bc58-ec17eb81f0ab" executionInfo={"status": "ok", "timestamp": 1583267935935, "user_tz": -60, "elapsed": 6426, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}} colab={"base_uri": "https://localhost:8080/", "height": 187}
# !pip install --upgrade tables
# + id="O4ZZxb-OI_GE" colab_type="code" colab={}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# + id="eG92iN3LJMPy" colab_type="code" outputId="34a7cc7d-3186-4404-df7e-8170a5648764" executionInfo={"status": "ok", "timestamp": 1583267946212, "user_tz": -60, "elapsed": 1242, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd "/content/drive/My Drive/Colab Notebooks/matrix/matrix_two/dw_matrix_car"
# + id="_NaaWwXbJkgQ" colab_type="code" outputId="ea9bb5d3-8d18-4b6a-f785-c8bf08e50590" executionInfo={"status": "ok", "timestamp": 1583267950288, "user_tz": -60, "elapsed": 2333, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# ls data/car.h5
# + id="N3V3GLV6JsFQ" colab_type="code" colab={}
df = pd.read_hdf('data/car.h5')
# + id="XdqGS-PTKjuT" colab_type="code" outputId="8cd66398-3694-415e-fb8f-67a1e3911621" executionInfo={"status": "ok", "timestamp": 1583267959738, "user_tz": -60, "elapsed": 614, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
df.columns.values
# + id="h8LMm4scKl5R" colab_type="code" outputId="4da6f3a4-6332-4eb9-90c1-774cb772eca9" executionInfo={"status": "ok", "timestamp": 1583268003438, "user_tz": -60, "elapsed": 841, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}} colab={"base_uri": "https://localhost:8080/", "height": 265}
df['price_value'].hist(bins=100);
# + id="oTLYgtqYKuPa" colab_type="code" outputId="93c2c7de-4260-4213-9134-f589d111731d" executionInfo={"status": "ok", "timestamp": 1583268006658, "user_tz": -60, "elapsed": 620, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
df['price_value'].max()
# + id="CmupRJKvKzOa" colab_type="code" outputId="c7b948cf-0e2b-4bf0-b3d1-e5c9c5b1f14e" executionInfo={"status": "ok", "timestamp": 1583268016038, "user_tz": -60, "elapsed": 652, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}} colab={"base_uri": "https://localhost:8080/", "height": 170}
df['price_value'].describe()
# + id="Df85Ql2eK27T" colab_type="code" outputId="69fa69c2-0526-4914-ed74-29d1f72d8661" executionInfo={"status": "ok", "timestamp": 1583268018269, "user_tz": -60, "elapsed": 670, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}} colab={"base_uri": "https://localhost:8080/", "height": 306}
df['param_marka-pojazdu'].unique()
# + id="PSg3Cmz-LE7y" colab_type="code" colab={}
def group_and_barplot(feat_groupby,feat_agg='price_value',agg_funcs=[np.mean,np.median,np.size],feat_sort='mean',top=50,subplots=True):
return (
df
.groupby(feat_groupby)[feat_agg]
.agg(agg_funcs)
.sort_values(by=feat_sort,ascending=False)
.head(top)
).plot(kind='bar',figsize=(15,5),subplots=subplots)
# + id="xhJcj-XJLXoW" colab_type="code" outputId="1b6a03a7-4465-4527-a109-142969bd12e5" executionInfo={"status": "ok", "timestamp": 1583268028496, "user_tz": -60, "elapsed": 2176, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}} colab={"base_uri": "https://localhost:8080/", "height": 485}
group_and_barplot('param_marka-pojazdu')
# + id="DeCnpm0sOm1e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 436} outputId="eb1d6413-d9a8-47b4-d7c8-92394d7e34a1" executionInfo={"status": "ok", "timestamp": 1583268311687, "user_tz": -60, "elapsed": 1376, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}}
group_and_barplot('param_country-of-origin',feat_sort='size');
# + id="JShsJbvqIeRB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 504} outputId="4cc066fb-4e54-4c50-9528-45ebb6ba062b" executionInfo={"status": "ok", "timestamp": 1583268344532, "user_tz": -60, "elapsed": 1830, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}}
group_and_barplot('param_kraj-pochodzenia',feat_sort='size')
# + id="7ESD94OIJDMN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 390} outputId="dd43e8f2-b73d-4d81-ce11-1db9b3d28979" executionInfo={"status": "ok", "timestamp": 1583268458033, "user_tz": -60, "elapsed": 1502, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07332279292902070572"}}
group_and_barplot('param_kolor',feat_sort='mean');
# + id="ARo0jE2fJxyU" colab_type="code" colab={}
| day2_visualisation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Wrangle and Analyze Data: We Rate Dogs ([@dog_rates]("https://twitter.com/dog_rates"))
#
#
# A data analysis project focused on data wrangling efforts.
#
#
#
# ### Table of Contents
# - [Introduction](#intro)
# - [Gather](#gather)
# - [Assess](#assess)
# - Detect and document at least **eight (8) quality issues** and **two (2) tidiness issues**
# - [Clean](#clean)
# - [Storing, Analyzing and Visualizing Data](#storing_analyzing_and_visualizing)
# - At least **three (3) insights** and **one (1) visualization** must be produced
# - [Wrangling Efforts Report](#wranglingeffortsreport)
# - [Communicate Findings Report](#communicatefindingsreport)
# - [References](#references)
#
# <a id='intro'></a>
# ### Introduction
#
#
# Real-world data rarely comes clean. Using Python and its libraries, I will gather data from a variety of sources and in a variety of formats, assess its quality and tidiness, then clean it. This is called data wrangling. I will document my wrangling efforts in a Jupyter Notebook, plus showcase them through analyses and visualizations using Python (and its libraries) and/or SQL.
#
# The dataset that I will be wrangling (and analyzing and visualizing) is the tweet archive of Twitter user [@dog_rates]("https://twitter.com/dog_rates"), also known as [WeRateDogs]("https://twitter.com/dog_rates"). WeRateDogs is a Twitter account that rates people's dogs with a humorous comment about the dog. These ratings almost always have a denominator of 10. The numerators, though? Almost always greater than 10. 11/10, 12/10, 13/10, etc. Why? Because "they're good dogs Brent." WeRateDogs has over 4 million followers and has received international media coverage.
#
# WeRateDogs [downloaded their Twitter archive]("https://help.twitter.com/en/managing-your-account/how-to-download-your-twitter-archive") and sent it to Udacity via email exclusively for you to use in this project. This archive contains basic tweet data (tweet ID, timestamp, text, etc.) for all 5000+ of their tweets as they stood on August 1, 2017
# <a id='gather'></a>
# ### Gather
#
#
# I will be gathering data from these three resources:
#
# 1. The [WeRateDogs]("https://twitter.com/dog_rates") Twitter archive. The *twitter_archive_enhanced.csv* file was given.
#
# 2. The tweet image predictions, i.e., what breed of dog (or other object, animal, etc.) is present in each tweet according to a neural network. This file was provided.
#
# 3. Twitter API and Python's Tweepy library to gather each tweet's retweet count and favorite or like count at minimum, and any additional data I find interesting. I will be generating this using my Twitter API key, secrets, and tokens.
# import necessary libaries
import pandas as pd
import numpy as np
# +
# loading the WeRateDogs twitter archive data
archive = pd.read_csv('twitter-archive-enhanced.csv')
archive.head()
# +
# downloading the image prediction data programmatically
import requests
predicted_breeds_url = 'https://d17h27t6h515a5.cloudfront.net/topher/2017/August/599fd2ad_image-predictions/image-predictions.tsv'
response = requests.get(predicted_breeds_url)
with open('image_predictions.tsv', 'wb') as f:
f.write(response.content)
# load image prediction data
image_predictions = pd.read_csv("image_predictions.tsv", sep='\t')
# -
image_predictions.head()
import tweepy
consumer_key = 'HIDDEN'
consumer_secret = 'HIDDEN'
access_token = 'HIDDEN'
access_secret = 'HIDDEN'
# this secures my authentification codes above
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)
#this connects me to Twitter API
api = tweepy.API(auth)
api.me()
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
# +
import json
import time
start = time.time()
tweet_ids = archive.tweet_id.values
tweets_data = []
tweet_success = []
tweet_failure = []
for tweet_id in tweet_ids:
try:
data = api.get_status(tweet_id, tweet_mode='extended',
wait_on_rate_limit = True,
wait_on_rate_limit_notify = True)
tweets_data.append(data)
tweet_success.append(tweet_id)
except:
tweet_failure.append(tweet_id)
print(tweet_id)
end = time.time()
print(end - start)
# -
# storing data to tweet_json.txt
with open('tweet_json.txt', mode = 'w') as file:
json.dump(tweets_data, file)
# reading the API data stored in json file
df3 = pd.read_json('tweet_json.txt')
df3['tweet_id'] = tweet_success
df3 = df3[['tweet_id', 'retweet_count', 'favorite_count']]
df3.head()
# <a id='intro'></a>
# ### Assess
archive
image_predictions
df3
archive.info()
image_predictions.info()
df3.info()
archive.describe()
image_predictions.describe()
df3.describe()
all_columns = pd.Series(list(archive) + list(image_predictions) + list(df3))
all_columns[all_columns.duplicated()]
list(archive)
list(image_predictions)
archive[archive['retweeted_status_user_id'].isnull()]
archive.name.value_counts()
archive.rating_numerator.value_counts()
archive.rating_denominator.value_counts()
archive.sample(50)
image_predictions.p1.value_counts()
image_predictions.p2.value_counts()
image_predictions.p3.value_counts()
archive.sample(5)
image_predictions.sample(5)
from IPython.display import Image
Image(url = 'https://pbs.twimg.com/media/CU8v-rdXIAId12Z.jpg')
sum(archive.rating_numerator.isnull())
sum(archive.rating_denominator.isnull())
sum(archive.timestamp.isnull())
# #### Quality
#
# ##### `archive` table
#
# - Missing data in (in_reply_to_status_id, in_reply_to_user_id, retweeted_status_id, retweeted_status_user_id, retweeted_status_timestamp, expanded_url) columns
# - Timestamp has +0000 at the end. Not in the right format
# - Erroneous datatype (timestamp and retweeted status timestamp should be integer instead of string)
# - Tweet id is integer instead of string
# - Lowercase dog names such as *a, an, the, quite, my, such etc.* are unusual. all dog names in lowercase
# - Text shows there are retweets or replies in the data
# - Extraction of numerator and denominator are incorrect
#
# ##### `image_predictions` table
#
# - Tweet id is integer instead of string
# - Names in Algorithm's predictions p1, p2, and p3 are sentence case sometimes, lowercase other times
# - Compound names in p1, p2 and p3 columns have underscore sometimes, hyphen other times
#
# ##### `df3` table
#
# - Tweet id is integer instead of string
#
#
#
# #### Tidiness
#
# - One variable in four columns in `archive` table (doggo, floofer, pupper and puppo)
# - df3 should be part of the `archive` table
# <a id='clean'></a>
# ### Clean
# Now it's time to fix the quality issues and tidiness issues spotted above
archive_clean = archive.copy()
image_predictions_clean = image_predictions.copy()
df3_clean = df3.copy()
# ### Quality
#
# ##### Erroneous Data Format
#
# **`archive`**: Timestamp has +0000 at the end, making it recorded in pandas as object. Not in the right format
#
# ##### Define
#
# Change timestamp to datetime format
#
# ##### Code
# remove +0000 from timestamp
archive_clean['timestamp'] = archive_clean['timestamp'].str.slice(start=0, stop=-6)
# convert timestamp to datetime
archive_clean['timestamp'] = pd.to_datetime(archive_clean['timestamp'],
format = "%Y-%m-%d %H:%M:%S")
# ##### Test
archive_clean.info()
archive_clean.head(2)
# ##### Erroneous Datatype
#
# **`archive`: Tweet id is integer instead of string**
#
# **`image_predictions`: Tweet id is integer instead of string**
#
# **`df3`: Tweet id is integer instead of string**
#
# ##### Define
#
# Change tweet id datatype to string
#
# ##### Code
archive_clean['tweet_id'] = archive_clean['tweet_id'].astype(str)
image_predictions_clean['tweet_id'] = image_predictions_clean['tweet_id'].astype(str)
df3_clean['tweet_id'] = df3_clean['tweet_id'].astype(str)
# ##### Test
archive_clean.info()
image_predictions_clean.info()
df3_clean.info()
archive_clean.head(1)
# ##### Inconsistency In Name
#
# **`archive`**:Names such as a, an, the, quite, my, such etc. are unusual. all dog names in lowercase
#
# ##### Define
#
# Convert all non-dog names to np.nan. All non-dog names start with lowercase
#
# ##### Code
mask = archive_clean.name.str.islower()
column_name = 'name'
archive_clean.loc[mask, column_name] = np.nan
archive_clean.name.value_counts()
archive_clean[archive_clean.name.isnull()]
# `archives` table: **There are retweets and replies in the dataset**
#
# ##### Define
# Filter the null values for the three columns related to retweets and check to verify them.
#
# ##### Code
archive_clean = archive_clean[archive_clean.retweeted_status_id.isnull()]
archive_clean = archive_clean[archive_clean.retweeted_status_user_id.isnull()]
archive_clean = archive_clean[archive_clean.retweeted_status_timestamp.isnull()]
archive_clean = archive_clean[archive_clean.in_reply_to_status_id.isnull()]
archive_clean = archive_clean[archive_clean.in_reply_to_user_id.isnull()]
# ##### Test
archive_clean.retweeted_status_id.notnull().sum()
archive_clean.retweeted_status_user_id.notnull().sum()
archive_clean.retweeted_status_timestamp.notnull().sum()
archive_clean.in_reply_to_status_id.notnull().sum()
archive_clean.in_reply_to_user_id.notnull().sum()
# `archive` table: **The extraction of numerator and denominator are incorrect**
#
# ##### Define
# The extraction of the numerator and the denominator didn't seem to work fine, as floats were not extracted correctly. The extraction will be executed again and the results shall be stored in the DataFrame.
#
# ##### Code
rating = archive_clean.text.str.extract('((?:\d+\.)?\d+)\/(\d+)', expand = True)
rating.columns = ['rating_numerator', 'rating_denominator']
rating['rating_numerator'] = rating['rating_numerator'].astype(float)
# ##### Test
archive_clean.rating_numerator.value_counts()
archive_clean.info()
archive_clean.head(3)
# `image_predictions`: **Names in Algorithm's predictions p1, p2, and p3 are sentence case sometimes, lowercase other times**
#
# ##### Define
# Change the breed names to the same format for consistency
#
# ##### Code
# change the image predictions' breed names to the same format for consistency
image_predictions_clean[["p1", "p2", "p3"]] = image_predictions_clean[["p1", "p2", "p3"]].apply(lambda x: x.str.lower(), axis=1)
image_predictions_clean[["p1", "p2", "p3"]]
# ##### Test
image_predictions_clean.p1.describe
image_predictions_clean.p2.describe
image_predictions_clean.p3.describe
# `image_predictions`: **Compound names in p1, p2 and p3 columns have underscore sometimes, hyphen other times**
#
# ##### Define
# Use pandas apply function to make changes to compound names with hypen and underscore for consistency
#
#
# ##### Code
image_predictions_clean[["p1", "p2", "p3"]].apply(lambda x: x.str.replace("_", " ").str.title, axis=1)
image_predictions_clean[["p1", "p2", "p3"]]
# ##### Test
image_predictions_clean.p1.describe
image_predictions_clean[["p1", "p2", "p3"]].apply(lambda x: x.str.replace("_", "-").str.title, axis=1)
image_predictions_clean[["p1", "p2", "p3"]]
# ##### Test
image_predictions_clean[["p1", "p2", "p3"]].describe
# ### Missing Data
#
# **`archive`: Missing data in (in_reply_to_status_id, in_reply_to_user_id, retweeted_status_id, retweeted_status_user_id, retweeted_status_timestamp, expanded_urls) columns**
#
# ##### Define
#
# Drop the columns in which missing data are present in the `archive` table. Since they are not going to be used in my analysis.
#
# ##### Code
# remove the columns in archive with missing data
archive_clean = archive_clean.drop(['in_reply_to_status_id', 'in_reply_to_user_id',
'retweeted_status_id',
'retweeted_status_user_id',
'retweeted_status_timestamp',
'expanded_urls'], axis = 1)
# ##### Test
archive_clean.info()
# ### Tidiness
#
# **Several columns in archive table contain similar variables**
#
# ##### Define
#
# Replace *None* variables in the dog stage columns, then combine all stages to one column, separate the joint multiple stages and then convert the missing values to nan. In the end, drop individual stages from the archive table.
#
# ##### Code
# handle None variables in the dog stage columns
archive_clean.doggo.replace('None', '', inplace=True)
archive_clean.floofer.replace('None', '', inplace=True)
archive_clean.pupper.replace('None', '', inplace=True)
archive_clean.puppo.replace('None', '', inplace=True)
# merge all stages into one column
archive_clean['dog_stage'] = archive_clean.doggo + archive_clean.floofer + archive_clean.pupper + archive_clean.puppo
# drop individual stage columns
archive_clean = archive_clean.drop(['doggo', 'floofer', 'pupper', 'puppo'], axis = 1)
# handle tweets with multiple stages
archive_clean.loc[archive_clean.dog_stage == 'doggopupper', 'dog_stage'] = 'doggo, pupper'
archive_clean.loc[archive_clean.dog_stage == 'doggopuppo', 'dog_stage'] = 'doggo, puppo'
archive_clean.loc[archive_clean.dog_stage == 'doggofloofer', 'dog_stage'] = 'doggo, floofer'
# handle missing values
archive_clean.loc[archive_clean.dog_stage == '', 'dog_stage'] = np.nan
# ##### Test
archive_clean.info()
archive_clean.dog_stage.value_counts()
# **Dog breed predictions are not in should be a variable**
#
# ##### Define
#
# Write a loop to select dog breed in the predictions variable
#
# ##### Code
# writing a loop to select dog breed
dog_prediction = []
for i in range (len(image_predictions_clean)):
if image_predictions_clean['p1_dog'][i] == True:
dog_prediction.append(image_predictions_clean.p1[i])
elif image_predictions_clean['p2_dog'][i] == True:
dog_prediction.append(image_predictions_clean.p2[i])
elif image_predictions_clean['p3_dog'][i] == True:
dog_prediction.append(image_predictions_clean.p3[i])
else:
dog_prediction.append("no prediction")
# create a new column from dog prediction list
image_predictions_clean['predictions'] = dog_prediction
# ##### Test
# check prediction
image_predictions_clean[['predictions', 'p1_dog', 'p1']]
image_predictions_clean.info()
# **All the table should be merged as one**
#
# ##### Define
#
# Merge df3_clean with archive_clean
#
# ##### Code
# check if the DataFrames have duplicates first
archive_clean['tweet_id'].duplicated().sum()
image_predictions_clean['tweet_id'].duplicated().sum()
df3_clean['tweet_id'].duplicated().sum()
# good! no duplicate. now we can combine the DataFrames
we_rate_dogs = pd.merge(archive_clean,
df3_clean, on = 'tweet_id', how = 'left')
# ##### Test
we_rate_dogs.head()
we_rate_dogs.info()
# checking for duplicates again
we_rate_dogs['tweet_id'].duplicated().sum()
# **All the table should be merged as one**
#
# ##### Define
#
# Merge image_predictions_clean with we_rate_dogs
#
# ##### Code
we_rate_dogs2 = pd.merge(we_rate_dogs,
image_predictions_clean, on = 'tweet_id', how = 'left')
# ##### Test
we_rate_dogs2.head(2)
we_rate_dogs2.info()
# <a id='storing_analyzing_and_visualizing_data'></a>
# ### Storing, Analyzing and Visualizing Data
# Store the clean DataFrame in a csv file named `twitter_archive_master.csv`
#
# Analyze and visualize my wrangled data. At least, **three (3) insights and one(1) visualization** must be produced
#
#
# Drawing conclusions and creating visuals to communicate results. The following questions are addressed
#
# **Q1:** What are the features that influence retweet count and favorite count?
#
# **Q2:** Is rating influenced by dog stage? What are the dog stages with the highest rating?
#
# **Q3:** What is the most popular dog name?
# store the clean DataFrame in a csv file named twitter_archive_master.csv
we_rate_dogs2.to_csv('twitter_archive_master.csv', encoding='utf-8', index=False)
# +
# import necessary libaries
import datetime
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
# load the clean data
df = pd.read_csv('twitter_archive_master.csv')
# -
# ###### Exploring with Visuals
#
# create rating column
df['rating'] = df.rating_numerator/df.rating_denominator
#create new column for dog breeds substituting single breeds for "other"
# value counts for all the dog breeds
vc = df.predictions.value_counts()
# get the breeds that have a value count of less than 10
singles = vc[vc<10].index.tolist()
# new column for dog breeds
df['breed_group'] = df['predictions']
# replace strings in single list with string 'other'
df['breed_group'].replace(singles, 'other', inplace = True)
# get the average rating only for each breed
rating2 = df.groupby('breed_group').rating.mean()
rating2
# the index for this series is the breed
# the value is the average rating
# plotting these directly in pandas:
# plot average rating of breed group and dog stage
fig, axes = plt.subplots(1,2)
rating = df.groupby('breed_group').rating.mean()
rating.plot.barh(ax = axes[1], align = 'center', color = ['lavender', 'lightsteelblue', 'cornflowerblue', 'royalblue', 'midnightblue'], figsize = [5,11])
rating2 = df.groupby('dog_stage').rating.mean()
rating2.plot.barh(ax = axes[0], align = 'center', color = ['lavender', 'lightsteelblue', 'cornflowerblue', 'royalblue', 'midnightblue'])
plt.subplots_adjust(wspace = 0.4)
fig.set_figwidth(25)
plt.tight_layout()
plt.show()
# Dog breed with **no prediction** which is where no dog breed exist, has the highest average rating. Average rating for dog breeds is below 1.4
# +
# plot average favorite count for dog stage and breed group
fig, axes = plt.subplots(1,2, figsize = [5,11])
rating = df.groupby('dog_stage').favorite_count.mean()
rating.plot.barh(ax = axes[0],align = 'center', color = ['lavender', 'lightsteelblue', 'cornflowerblue', 'royalblue', 'midnightblue'])
rating2 = df.groupby('breed_group').favorite_count.mean()
rating2.plot.barh(ax = axes[1],align = 'center', color = ['lavender', 'lightsteelblue', 'cornflowerblue', 'royalblue', 'midnightblue'])
plt.subplots_adjust(wspace =0.4)
fig.set_figwidth(25)
plt.tight_layout()
plt.show()
# -
# There is a big difference in average favorite count for the combined dog stage **doggo, puppo** compare to other stages. This factor is subject to further findings
# +
# plot average retweet count for dog stage and breed group
fig, axes = plt.subplots(1,2, figsize = [5,11])
rating = df.groupby('dog_stage').retweet_count.mean()
rating.plot.barh(ax = axes[0],align = 'center', color = ['lavender', 'lightsteelblue', 'cornflowerblue', 'royalblue', 'midnightblue'])
rating2 = df.groupby('breed_group').retweet_count.mean()
rating2.plot.barh(ax = axes[1],align = 'center', color = ['lavender', 'lightsteelblue', 'cornflowerblue', 'royalblue', 'midnightblue'])
plt.subplots_adjust(wspace =0.4)
fig.set_figwidth(25)
plt.tight_layout()
plt.show()
# -
# There is a big difference in average retweet count for the combined dog stage **doggo, puppo** compare to other stages. This factor is also subject to further findings in our analysis
# **Q1:** What are the features that influence retweet count and favorite count?
p_breed_retweet = df[~df['breed_group'].isin(['no prediction'])].retweet_count.mean()
p_breed_favorite = df[~df['breed_group'].isin(['no prediction'])].favorite_count.mean()
p_stage_retweet = df[~df['breed_group'].isin(['no prediction'])].retweet_count.mean()
print('average retweet count for all dog breeds except no prediction = %f' %df[~df['breed_group'].isin(['no prediction'])].retweet_count.mean())
print('average favorite count for all dog breeds except no prediction = %f' %df[~df['breed_group'].isin(['no prediction'])].favorite_count.mean())
print('average rating for all dog breeds under no prediction = %f' %df[df['predictions'].isin(['no prediction'])].rating.mean())
print('average rating for all dog breeds under other = %f' %df[df['predictions'].isin(singles)].rating.mean())
# +
# correlation between retweet count and favorite count
plt.scatter(df.retweet_count, df.favorite_count)
plt.title('Correlation Between Retweet and Favourite')
plt.xlabel('Retweet Count')
plt.ylabel('Favorite Count')
plt.show();
# -
# There is a positive correlation between retweet count and favorite count
df = df.astype({'dog_stage': 'category'})
# install a pip package in the current Jupyter kernel to update the seaborn libary
import sys
# !{sys.executable} -m pip install seaborn -U
sns.scatterplot(data = df, x='retweet_count', y='favorite_count', hue = df.dog_stage.tolist());
# Here, we can see that there is a positive correlation between retweet count and favorite count across the dog stages, with **doggo** having the strongest
#
# Therefore, dog stage influences the reetweet count and favorite count
# **Q2:** Is rating influenced by dog stage? What are the dog stages with the highest rating?
#
#
sns.scatterplot(data = df, x = 'rating', y = 'dog_stage');
# Now let's check the relationship between rating and each dog stage excluding the outliers
# filter out the outliers in rating
rating_within = df[df['rating']<2.5]
sns.scatterplot( data = rating_within, x = 'rating' , y = 'dog_stage');
# This visual shows that **doggo**, **pupper** and **puppo** have the highest rating
# **Q3:** What are the most popular dog names and dog breed?
# +
# df.name.value_counts()
# -
df.name.value_counts()[1:10].plot('barh', figsize=(15,8), color = 'cornflowerblue', title='Most Popular Dog Name').set_xlabel("Dog Count");
# Lucy and Charlie are the most popular dog names followed by Cooper and Oliver.
df.breed_group.value_counts()[2:12].plot('barh', figsize=(15,8), color = 'cornflowerblue', title='Most Popular Breed').set_xlabel("Dog Count");
# Most Popular breed are Golden Retriever, Labrador Retriever and Pembroke in that order
# <a id='references'></a>
# ### References
#
# Downloading image prediction file. https://knowledge.udacity.com/questions/641740
#
# Parsing the json file to pandas DataFrame https://github.com/tweepy/tweepy/issues/1102
#
# Converting to np.nan https://knowledge.udacity.com/questions/588409
#
# Extraction of ratings https://udacity-reviews-uploads.s3.us-west-2.amazonaws.com/_attachments/251099/1598725409/Capture2.JPG
#
# Filtering retweets from archive table https://knowledge.udacity.com/questions/314510
#
# Combining dog stages https://knowledge.udacity.com/questions/321950
#
# Dog predictions column https://knowledge.udacity.com/questions/369845
#
# Merging DataFrames https://knowledge.udacity.com/questions/511144
#
# Exploring with visuals https://knowledge.udacity.com/questions/458203
#
# Updating package in the current kernel https://knowledge.udacity.com/questions/62699
#
# Plotting with Seaborn https://seaborn.pydata.org/generated/seaborn.scatterplot.html
#
# Plotting with Matplotlib https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.bar.html#matplotlib.pyplot.bar
#
#
| wrangle_act.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# language: python
# name: python395jvsc74a57bd031f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6
# ---
import pandas as pd
orders = pd.read_table('http://bit.ly/chiporders')
orders[orders.item_name.str.contains('Chicken')]
orders['item_price'] = orders.item_price.str.replace('$', '')
orders.head()
| Data_School012.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Parameters
# MLlib `Estimators` and `Transformers` use a uniform API for specifying parameters.
#
# A Param is a named parameter with self-contained documentation. A ParamMap is a set of (parameter, value) pairs.
#
# There are two main ways to pass parameters to an algorithm:
#
# - Set parameters for an instance. E.g., if `lr` is an instance of `LogisticRegression`, one could call `lr.setMaxIter(10)` to make `lr.fit()` use at most 10 iterations. This API resembles the API used in spark.mllib package.
#
# - Pass a `ParamMap` to `.fit()` or `.transform()`. Any parameters in the `ParamMap` will override parameters previously specified via setter methods.
#
# Parameters belong to specific instances of `Estimators` and `Transformers`. For example, if we have two LogisticRegression instances `lr1` and `lr2`, then we can build a ParamMap with both `maxIter` parameters specified: `ParamMap({lr1.maxIter: 10, lr2.maxIter: 20})`. This is useful if there are two algorithms with the `maxIter` parameter in a `Pipeline`.
#
#
# ### Example
# This example was adapted from Spark's MLlib: Main Guide. Link to original:
# https://spark.apache.org/docs/2.4.3/ml-pipeline.html#example-estimator-transformer-and-param
# +
from IPython.core.display import display, HTML
from pyspark.sql import SparkSession
from pyspark.ml.linalg import Vectors
from pyspark.ml.classification import LogisticRegression
spark = SparkSession.builder.getOrCreate()
# Prepare training data from a list of (label, features) tuples.
training = spark.createDataFrame(
[
(1.0, Vectors.dense([0.0, 1.1, 0.1])),
(0.0, Vectors.dense([2.0, 1.0, -1.0])),
(0.0, Vectors.dense([2.0, 1.3, 1.0])),
(1.0, Vectors.dense([0.0, 1.2, -0.5])),
],
["label", "features"],
)
# Prepare test data
test = spark.createDataFrame(
[
(1.0, Vectors.dense([-1.0, 1.5, 1.3])),
(0.0, Vectors.dense([3.0, 2.0, -0.1])),
(1.0, Vectors.dense([0.0, 2.2, -1.5])),
],
["label", "features"],
)
# +
# Create a LogisticRegression instance. This instance is an Estimator.
# maxIter and regParam are parameters
lr = LogisticRegression(maxIter=10, regParam=0.01)
# Print out the parameters, documentation, and any default values.
print(f"LogisticRegression parameters:\n{lr.explainParams()}")
# +
# I developed two simple helper scripts to help display
# the content of Params and ParamMaps in a more readable way
def print_explainParams(cls, font_size=1):
"""Helper class for pretty printing MLlib parameters,
and similar output for use in Jupyter / IPython.
Usage example:
> print_explainParams(pyspark.ml.LogisticRegression)
Parameters
----------
cls
input class (should be able to run the explainParams method)
font_size : int
control displayed font size (default = 1)
"""
title = f"<h2>Parameters for: {str(cls)}</h2>"
params = str(cls.explainParams()).split("\n")
html_body = "\n".join([f'<h4>{p.replace(":", "</h4> <p>", 1)}</p>' for p in params])
display(HTML(f"<font size='{font_size}'>{title} {html_body}</font>"))
def print_explainParamMap(cls, display_docs=True, font_size=1):
"""Helper class for pretty printing MLlib parameters,
and similar output for use in Jupyter / IPython.
Usage example:
> lr = pyspark.ml.LogisticRegressionModel
> print_explainParamMap(lr)
> print_explainParamMap(lr, False)
Parameters
----------
cls
input class (should be able to run the explainParamMap method)
font_size : int
control displayed font size (default = 1)
display_docs : bool
toggles displaying the docs or not
"""
title = f'<font size="{font_size}"><h3>Parameter Map </h3>{model1}</font>'
param_map: dict = model1.extractParamMap()
html = []
if display_docs:
html.append(title)
for p in param_map.items():
param = p[0]
value = p[1]
if display_docs:
html.append(
f"""
<font size="{font_size}"><h4>{param.name}</h4></font>
<p>
<font size="{font_size - 1}">doc: <i>{param.doc}</i><br/>
value: </font><font size="{font_size + 1}">{value}</font>
</p>
"""
)
else:
html.append(
f'<li><font size="{font_size}"><b>{param.name}:</b> {value}</font></li>'
)
display(HTML(f'{"".join(html)}'))
# -
print_explainParams(lr)
# +
# Learn a LogisticRegression model. This uses the parameters stored in lr.
model1 = lr.fit(training)
# Since model1 is a Model (i.e., a transformer produced by an Estimator),
# we can view the parameters it used during fit().
# This prints the parameter (name: value) pairs, where names are unique IDs for this
# LogisticRegression instance.
print("Model 1 was fit using parameters: ")
print(model1.extractParamMap())
# -
print_explainParamMap(model1)
# +
# We may alternatively specify parameters using a Python dictionary as a paramMap
paramMap = {lr.maxIter: 20}
paramMap[lr.maxIter] = 30 # Specify 1 Param, overwriting the original maxIter.
paramMap.update({lr.regParam: 0.1, lr.threshold: 0.55}) # Specify multiple Params.
# You can combine paramMaps, which are python dictionaries.
paramMap2 = {lr.probabilityCol: "myProbability"} # Change output column name
paramMapCombined = paramMap.copy()
paramMapCombined.update(paramMap2)
# Now learn a new model using the paramMapCombined parameters.
# paramMapCombined overrides all parameters set earlier via lr.set* methods.
model2 = lr.fit(training, paramMapCombined)
print("Model 2 fit used these parameters: ")
print_explainParamMap(model2, False)
# +
# Make predictions on test data using the Transformer.transform() method.
# LogisticRegression.transform will only use the 'features' column.
# Note that model2.transform() outputs a "myProbability" column instead of the usual
# 'probability' column since we renamed the lr.probabilityCol parameter previously.
prediction = model2.transform(test)
result = prediction.select("features", "label", "myProbability", "prediction").collect()
for row in result:
print(
"features=%s, label=%s -> prob=%s, prediction=%s"
% (row.features, row.label, row.myProbability, row.prediction)
)
# -
| Section 5 - Classification and Regression/5.4/pyspark.ml.parameters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TDA with Python using the Gudhi Library
#
# # Persistence diagram
import numpy as np
import pandas as pd
import gudhi as gd
from sklearn import manifold
from pylab import *
# %matplotlib inline
# import sklearn_tda
# ### Persistence homology and persistence diagram
# One strong motivation for defining homology groups is the fact that two shapes can be distinguished by examining their holes.
#
# Persistent homology is a powerful tool to compute, study and encode efficiently multiscale topological features of nested families of simplicial complexes and topological spaces. It encodes the evolution of the homology groups of the nested complexes (holes) across the scales.
#
# In the example below we consider the filtration given by a union of growing balls centered on the finite set of points $C$.
# <img src="Images/persistence.png" style="width: 600px;"/>
#
# a) For the radius r = 0, the union of balls is reduced to the initial finite set of point, each of them corresponding to a 0-dimensional feature, i.e. a connected component; an interval is created for the birth for each of these features at r = 0.
#
# b) Some of the balls started to overlap resulting in the death of some connected components that get merged together; the persistence diagram keeps track of these deaths, putting an end point to the corresponding intervals as they disappear.
#
# c) New components have merged giving rise to a single connected component and, so, all the intervals associated to a 0-dimensional feature have been ended, except the one corresponding to the remaining components; two new 1-dimensional features, have appeared resulting in two new intervals (in blue) starting at their birth scale.
#
# d) One of the two 1-dimensional cycles has been filled, resulting in its death in the filtration and the end of the corresponding blue interval.
#
# e) all the 1-dimensional features have died, it only remains the long (and never dying) red interval.
#
# The final barcode can also be equivalently represented as **a persistence diagram** where every interval (a,b) is represented by the the point of coordinate (a,b) in $\mathbb R^2$.
#
# Intuitively the longer is an interval in the barcode or, equivalently the farther from the diagonal is the corresponding point in the diagram, the more persistent, and thus relevant, is the corresponding homological feature across the filtration.
#
#
# ### Protein binding dataset
#
# The data we study in this notebook represent configurations of protein binding. This example is borrowed from the paper a paper of Kovacev-Nikolic et.al [1](https://arxiv.org/pdf/1412.1394.pdf).
#
# The paper compares closed and open forms of the maltose-binding protein (MBP), a large biomolecule consisting of 370 amino acid residues. The analysis is not based on geometric distances in $\mathbb R^3$ but on a metric of *dynamical distances* defined by
# $$D_{ij} = 1 - |C_{ij}|,$$
# where $C$ is the correlation matrix between residues.
#
# Correlation matrices between residues can be found at this [link](https://www.researchgate.net/publication/301543862_corr), we are greatful to the authors for sharing data !
#
# We start from the Vietoris-Rips filtrations of the Protein binding distance matrices. See this [previous tutorial](Tuto-GUDHI-simplicial-complexes-from-distance-matrix.ipynb) for more details on the construction.
# +
path_file = "./datasets/Corr_ProteinBinding/"
files_list = [
'1anf.corr_1.txt',
'1ez9.corr_1.txt',
'1fqa.corr_2.txt',
'1fqb.corr_3.txt',
'1fqc.corr_2.txt',
'1fqd.corr_3.txt',
'1jw4.corr_4.txt',
'1jw5.corr_5.txt',
'1lls.corr_6.txt',
'1mpd.corr_4.txt',
'1omp.corr_7.txt',
'3hpi.corr_5.txt',
'3mbp.corr_6.txt',
'4mbp.corr_7.txt']
corr_list = [pd.read_csv(path_file+u,
header=None,
delim_whitespace=True) for u in files_list]
dist_list = [1- np.abs(c) for c in corr_list]
# -
# We first consider the first distance matrix:
mat_dist0 = dist_list[0]
# ### Persistence barecodes and persistence diagrams
# We build the Vietoris-Rips complex from the distance matrix `mat_dist0`, see this [notebook](Tuto-GUDHI-simplicial-complexes-from-distance-matrix.ipynb) for more details.
# +
skeleton_protein0 = gd.RipsComplex(distance_matrix=mat_dist0.values,
max_edge_length=0.8)
Rips_simplex_tree_protein0 = skeleton_protein0.create_simplex_tree(max_dimension=2)
# -
# Now we can compute persistence on the simplex tree structure using the <code>persistence()</code> method of the simplex tree class:
BarCodes_Rips0 = Rips_simplex_tree_protein0.persistence()
# The object `BarCodes_Rips0` is the list of barcodes: each element in the list is a tuple (dim,(b,d)) where dim is a dimension, b is birth parameter and d is death parameter.
#
# Let's print the 20 first elements in the list:
for i in range(20):
print(BarCodes_Rips0[i])
# These 20 topolological features have dimension 1, they corresponds to holes of dimension 1.
# We have access to persistence_intervals per dimension using the `persistence_intervals_in_dimension()` method, for instance for dimension 0:
Rips_simplex_tree_protein0.persistence_intervals_in_dimension(0)
# The last bars (0.0, inf) die at infinity.
# Finally we can plot the points (birth, death) in the so-called **persistence diagram**:
gd.plot_persistence_diagram(BarCodes_Rips0);
# In this representation, 0-dimensional features are the red points (holes of dimension 0, namely connect components). The last connected component dies at infinity in the filtration (red point at the top). 1-dim features are represented in green.
#
# The most persistent topological features are those points that are far from the diagonal. Further in the tutorial we give statistical methods to identify topological features that are significant.
#
# Note that this representation does not say which points are "at the origin" of a given feature. Moreover, a given topological feature (namely a homology class that corresponds to a hole of dimension $d$) is by definition a class of cycles defined on the point cloud and thus it can be represented by several cycles .
# ### Bottleneck distance
# To exploit the topological information and topological features inferred from persistent homology, one needs to be able to compare persistence diagrams.
#
# We see a persistence diagram as the union of its points and of the diagonal, where the point of the diagonal are counted with infinite multiplicity.
#
# A matching between two diagrams $\operatorname{dgm}_1$ and $\operatorname{dgm}_2$ is a subset $m \subseteq \operatorname{dgm}_1 \times \operatorname{dgm}_2$ such that every points in $\operatorname{dgm}_1 \setminus \Delta$ and $\operatorname{dgm}_2 \setminus \Delta$ appears exactly once in $m$.
# <img src="Images/MatchingDiag.png" style="width: 400px;"/>
# The **Bottleneck distance** between $\operatorname{dgm}_1$ and $\operatorname{dgm}_2$ is then defined by
#
# $$
# \operatorname{d}_b (\operatorname{dgm}1, \operatorname{dgm}_2) =\inf_{\scriptsize{\mbox{matching}} \ m} \max_{(p,q) \in m} \| p-q \|_\infty.
# $$
#
# Let us compute the Rips complex filtration for another configuration of protein
# +
mat_dist1 = dist_list[1]
skeleton_protein1 = gd.RipsComplex(distance_matrix=mat_dist1.values,
max_edge_length=0.8)
Rips_simplex_tree_protein1 = skeleton_protein1.create_simplex_tree(max_dimension=2)
# -
# and the barcode for this filtration:
BarCodes_Rips1 = Rips_simplex_tree_protein1.persistence()
# The bottleneck distance between the two persistence diagrams can be computed using the <code>bottleneck_distance()</code> function. The bottleneck distance is computed per dimension (for dimension 1 in the example below). We can give in argument of the function the persistence_intervals for a given dimension, which can be computed using the `persistence_intervals_in_dimension()` function.
#
# +
I0 = Rips_simplex_tree_protein0.persistence_intervals_in_dimension(1)
I1 = Rips_simplex_tree_protein1.persistence_intervals_in_dimension(1)
gd.bottleneck_distance(I0,I1)
# -
# By default, the function uses an expensive algorithm to compute the exact bottleneck distance. It is also possible to compute an approximated bottleneck distance (additive approximation error given in argument), which is usually a lot faster to compute. See the [documention](http://gudhi.gforge.inria.fr/python/latest/bottleneck_distance_user.html#gudhi.bottleneck_distance) for more details.
gd.bottleneck_distance(I0,I1,0.01)
# ### MDS on the bottlneck distances
# We simple approach for comparing the MPB configurations is using the bottleneck distances between configurations.
#
# We first compute the Rips complex filtration for each MPB:
# +
persistence_list0 = []
persistence_list1 = []
i=0
for d in dist_list:
print(i)
rips_complex = gd.RipsComplex(distance_matrix=d.values,max_edge_length=0.8)
simplex_tree = rips_complex.create_simplex_tree(max_dimension=2)
diag = simplex_tree.persistence()
persistence_list0.append(simplex_tree.persistence_intervals_in_dimension(0))
persistence_list1.append(simplex_tree.persistence_intervals_in_dimension(1))
i = i +1
# -
# Next, we compute the matrix of bottleneck distances for dimensions 0 and 1:
l = len(files_list)
B0= np.zeros((l,l))
B1 =np.zeros((l,l))
for i in range(l):
for j in range(i):
B0[i,j] = gd.bottleneck_distance(persistence_list0[i], persistence_list0[j])
B1[i,j] = gd.bottleneck_distance(persistence_list1[i], persistence_list1[j])
B0 = B0 + B0.transpose()
B1 = B1 + B1.transpose()
B0
# We apply a Multidimensional Scaling method (from the [<code>scikit-learn</code>](http://scikit-learn.org/stable/) library) to visualize the bottleneck distances:
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(B0).embedding_
plt.scatter(pos[0:7,0], pos[0:7, 1], color='red', label="closed")
plt.scatter(pos[7:l,0], pos[7:l, 1], color='blue', label="red")
plt.legend( loc=3, borderaxespad=1);
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(B1).embedding_
plt.scatter(pos[0:7,0], pos[0:7, 1], color='red', label="closed")
plt.scatter(pos[7:l,0], pos[7:l, 1], color='blue', label="red")
plt.legend( loc=2, borderaxespad=1);
| Tuto-GUDHI-persistence-diagrams.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %cd ..
from gensim.corpora import WikiCorpus, MmCorpus, Dictionary
from nltk.tokenize import sent_tokenize, word_tokenize
import os
import random
import codecs
from nltk.corpus import stopwords
from nltk.stem.wordnet import WordNetLemmatizer
import re
from gensim.test.utils import common_texts
from gensim.corpora.dictionary import Dictionary
from gensim.models import LdaModel
from gensim.test.utils import common_texts
from gensim.corpora.dictionary import Dictionary
# ### Steps:
# 1) Load Wikipedia Corpus. [(Title: Sentence), ... ]
# 2) Tokenize each sentence in Wikipedia corpus.
# 3)
# ### Load WikiCorpus
ex = '\nA None\nAndorra is a coprincipality in which the office of head of state is jointly held ex officio by the French president and the bishop of the Roman Catholic diocese of U'
re.sub(r'[\n]', '', ex)
if re.search(r'\n', ex):
print(re.search(r'^([^=]*)=', ex).group(1))
def clean_sentence(sent):
return re.sub(r"\n|\'", '', sent)
def tokenize(text, token_min_len, token_max_len, lower):
# override original method in wikicorpus.py
return [token for token in sent_tokenize(text)
if len(token) >= 20 and not token.startswith('_')]
ex = 'sdlfkjds sdlkjf. sdlfkj sdfsd sdf.'
re.sub(r'[^\w\s.]', '', ex)
wiki = WikiCorpus('data/simplewiki-latest-pages-articles.xml.bz2', tokenizer_func=tokenize)
wiki.metadata = False
wiki.metadata = True
wiki_text = wiki.get_texts()
def obtain_abstract(sents):
return
wiki_text = wiki.get_texts()
sents = []
length = 100
i = 0
for t in wiki_text:
title = t[1][1]
sents_topic = [(title, clean_sentence(s)) for s in t[0] if s[0] != '*']
sents.extend(sents_topic)
i += 1
if i > length:
break
# ### Tokenize WikiCorpus
# +
# wiki_tokens
# +
# wiki_tokens = [word_tokenize(s[1]) for s in sents]
# wiki_dict = Dictionary(wiki_tokens)
# wiki_corpus = [wiki_dict.doc2bow(t) for t in wiki_tokens]
# -
# ### Train LDA on WikiCorpus
lda = LdaModel(wiki_corpus, num_topics=10)
# ### Train LSI on WikiCorpus
lsi = models.LsiModel(wiki_corpus, id2word=wiki_dict, num_topics=2)
index = MatrixSimilarity(lsi[wiki_corpus])
# ### Predict document
def predict_doc(doc, lsi, index, wiki_dict):
sim_df = pd.DataFrame()
doc_tokens = word_tokenize(doc)
doc_vec_bow = wiki_dict.doc2bow(doc_tokens)
vec_lsi = lsi[doc_vec_bow]
sims = index[vec_lsi]
sim_df['sents'] = wiki_tokens
sim_df['sim'] = sims
return sim_df.sort_values(by='sim', ascending=False)
predict_doc('April is my favorite month.', lsi, index, wiki_dict)
# ## LSI Model
# +
# Create a corpus from a list of texts
common_dictionary = Dictionary(common_texts)
common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]
# Train the model on the corpus.
# lda = LdaModel(common_corpus, num_topics=10)
# +
# for i in wiki.sample_texts(50, length=10000):
# print(i)
# +
from gensim.test.utils import common_texts
from gensim.corpora.dictionary import Dictionary
common_dictionary = Dictionary(common_texts)
# -
print(common_dictionary[0])
# +
def clean(doc):
stop_free = " ".join([i for i in doc.lower().split() if i not in stopwords])
normalized = " ".join(lemma.lemmatize(word,'v') for word in stop_free.split())
x = normalized.split()
y = [s for s in x if len(s) > 2]
return y
def get_related_documents(term, top, corpus):
clean_docs = [clean_doc(doc) for doc in corpus]
related_docid = []
test_term = [ldamodel.id2word.doc2bow(doc) for doc in clean_docs]
doc_topics = ldamodel.get_document_topics(test_term, minimum_probability=0.20)
term_topics = ldamodel.get_term_topics(term, minimum_probability=0.000001)
for k,topics in enumerate(doc_topics):
if topics:
topics.sort(key = itemgetter(1), reverse=True)
if topics[0][0] == term_topics[0][0]:
related_docid.append((k,topics[0][1]))
related_docid.sort(key = itemgetter(1), reverse=True)
for j,doc_id in enumerate(related_docid):
print (doc_id[1],"\n\n",docs_test[doc_id[0]])
if j == (top-1):
break
# +
# Create a corpus from a list of texts
common_dictionary = Dictionary(common_texts)
common_corpus = [common_dictionary.doc2bow(text) for text in common_texts]
# Train the model on the corpus.
lda = LdaModel(common_corpus, num_topics=10)
# -
lda
other_texts = [['computer', 'time', 'graph'], ['survey', 'response', 'eps'],['human', 'system', 'computer']]
other_corpus = [common_dictionary.doc2bow(text) for text in other_texts]
unseen_doc = other_corpus[0]
vector = lda[unseen_doc]
from gensim import models
from gensim.similarities import MatrixSimilarity
# https://radimrehurek.com/gensim/tut3.html
lsi = models.LsiModel(common_corpus, id2word=common_dictionary, num_topics=2)
index = MatrixSimilarity(lsi[common_corpus])
doc = "Human computer interface. How are you?"
vec_bow = common_dictionary.doc2bow(doc.lower().split())
vec_lsi = lsi[vec_bow]
sims = index[vec_lsi]
print(list(enumerate(sims)))
# +
# common_corpus
# -
| notebooks/01-wiki-corpus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# orphan: true
# ---
# + tags=["remove-input", "active-ipynb", "remove-output"]
# try:
# from openmdao.utils.notebook_utils import notebook_mode
# except ImportError:
# !python -m pip install openmdao[notebooks]
# -
# # DifferentialEvolutionDriver
#
# ```{note}
# DifferentialEvolutionDriver is based on SimpleGADriver and supports most of the same options and capabilities.
# ```
#
# This [differential evolution](https://en.wikipedia.org/wiki/Differential_evolution) variant of a genetic algorithm optimizer supports only continuous variables. The DifferentialEvolutionDriver supports both constrained and unconstrained optimization.
#
# The DifferentialEvolutionDriver has advantages and disadvantages when compared to the SimpleGADriver:
#
# - Pros
# - DifferentialEvolutionDriver is typically about 3 times faster than SimpleGADriver
# - DifferentialEvolutionDriver is usually more accurate than SimpleGADriver because it does not limit the number of bits available to represent inputs
# - DifferentialEvolutionDriver does not require the user to manually specify a number of representation bits
#
# - Cons
# - DifferentialEvolutionDriver only supports continuous input variables; SimpleGADriver also supports discrete
# - DifferentialEvolutionDriver does not support SimpleGADriver’s “compute_pareto” option for multi-objective optimization
#
# Genetic algorithms do not use gradient information to find optimal solutions. This makes them ideal for problems that do not have gradients or problems with many local minima where gradient information is not helpful in finding the global minimum. A well known example of this is finding the global minimum of of the Rastrigin function: 
#
# The example below shows an OpenMDAO solution of a higher order [Rastrigin function](https://en.wikipedia.org/wiki/Rastrigin_function).
# +
import openmdao.api as om
import numpy as np
ORDER = 6 # dimension of problem
span = 5 # upper and lower limits
class RastriginComp(om.ExplicitComponent):
def setup(self):
self.add_input('x', np.zeros(ORDER))
self.add_output('y', 0.0)
def compute(self, inputs, outputs):
x = inputs['x']
# nth dimensional Rastrigin function, array input and scalar output
# global minimum at f(0,0,0...) = 0
n = len(x)
s = 10 * n
for i in range(n):
if np.abs(x[i]) < 1e-200: # avoid underflow runtime warnings from squaring tiny numbers
x[i] = 0.0
s += x[i] * x[i] - 10 * np.cos(2 * np.pi * x[i])
outputs['y'] = s
prob = om.Problem()
prob.model.add_subsystem('rastrigin', RastriginComp(), promotes_inputs=['x'])
prob.model.add_design_var('x',
lower=-span * np.ones(ORDER),
upper=span * np.ones(ORDER))
prob.model.add_objective('rastrigin.y')
prob.driver = om.DifferentialEvolutionDriver()
prob.driver.options['max_gen'] = 400
prob.driver.options['Pc'] = 0.5
prob.driver.options['F'] = 0.5
prob.setup()
prob.run_driver()
print(prob['rastrigin.y'])
print(prob['x'])
# + tags=["remove-input", "remove-output"]
from openmdao.utils.assert_utils import assert_near_equal
assert_near_equal(prob['rastrigin.y'], 0.0, 1e-6)
assert_near_equal(prob['x'], np.zeros(ORDER), 1e-6)
# -
# ## DifferentialEvolutionDriver Options
# + tags=["remove-input"]
om.show_options_table("openmdao.drivers.differential_evolution_driver.DifferentialEvolutionDriver")
# -
# ## DifferentialEvolutionDriver Constructor
#
# The call signature for the DifferentialEvolutionDriver constructor is:
#
# ```{eval-rst}
# .. automethod:: openmdao.drivers.differential_evolution_driver.DifferentialEvolutionDriver.__init__
# :noindex:
# ```
#
# ## Using DifferentialEvolutionDriver
#
# You can change the number of generations to run the genetic algorithm by setting the “max_gen” option.
# + tags=["remove-input", "remove-output"]
from openmdao.utils.notebook_utils import get_code
from myst_nb import glue
glue("code_src15", get_code("openmdao.test_suite.components.branin.Branin"), display=False)
# -
# :::{Admonition} `Branin` class definition
# :class: dropdown
#
# {glue:}`code_src15`
# :::
# + tags=["remove-output"]
import openmdao.api as om
from openmdao.test_suite.components.branin import Branin
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Branin(), promotes_inputs=[('x0', 'xI'), ('x1', 'xC')])
model.add_design_var('xI', lower=-5.0, upper=10.0)
model.add_design_var('xC', lower=0.0, upper=15.0)
model.add_objective('comp.f')
prob.driver = om.DifferentialEvolutionDriver()
prob.driver.options['max_gen'] = 5
prob.setup()
prob.run_driver()
# -
# You can change the population size by setting the “pop_size” option. The default value for pop_size is 0, which means that the driver automatically computes a population size that is 20 times the total number of input variables.
# + tags=["remove-output"]
import openmdao.api as om
from openmdao.test_suite.components.branin import Branin
prob = om.Problem()
model = prob.model
model.add_subsystem('comp', Branin(), promotes_inputs=[('x0', 'xI'), ('x1', 'xC')])
model.add_design_var('xI', lower=-5.0, upper=10.0)
model.add_design_var('xC', lower=0.0, upper=15.0)
model.add_objective('comp.f')
prob.driver = om.DifferentialEvolutionDriver()
prob.driver.options['pop_size'] = 10
prob.setup()
prob.run_driver()
| openmdao/docs/openmdao_book/features/building_blocks/drivers/differential_evolution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ROMS Ocean Model Example
# The Regional Ocean Modeling System ([ROMS](http://myroms.org)) is an open source hydrodynamic model that is used for simulating currents and water properties in coastal and estuarine regions. ROMS is one of a few standard ocean models, and it has an active user community.
#
# ROMS uses a regular C-Grid in the horizontal, similar to other structured grid ocean and atmospheric models, and a stretched vertical coordinate (see [the ROMS documentation](https://www.myroms.org/wiki/Vertical_S-coordinate) for more details). Both of these require special treatment when using `xarray` to analyze ROMS ocean model output. This example notebook shows how to create a lazily evaluated vertical coordinate, and make some basic plots. The `xgcm` package is required to do analysis that is aware of the horizontal C-Grid.
# +
import numpy as np
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
# %matplotlib inline
import xarray as xr
# -
# Load a sample ROMS file. This is a subset of a full model available at
#
# http://barataria.tamu.edu/thredds/catalog.html?dataset=txla_hindcast_agg
#
# The subsetting was done using the following command on one of the output files:
#
# #open dataset
# ds = xr.open_dataset('/d2/shared/TXLA_ROMS/output_20yr_obc/2001/ocean_his_0015.nc')
#
# # Turn on chunking to activate dask and parallelize read/write.
# ds = ds.chunk({'ocean_time': 1})
#
# # Pick out some of the variables that will be included as coordinates
# ds = ds.set_coords(['Cs_r', 'Cs_w', 'hc', 'h', 'Vtransform'])
#
# # Select a a subset of variables. Salt will be visualized, zeta is used to
# # calculate the vertical coordinate
# variables = ['salt', 'zeta']
# ds[variables].isel(ocean_time=slice(47, None, 7*24),
# xi_rho=slice(300, None)).to_netcdf('ROMS_example.nc', mode='w')
#
# So, the `ROMS_example.nc` file contains a subset of the grid, one 3D variable, and two time steps.
# ### Load in ROMS dataset as an xarray object
# +
# load in the file
ds = xr.tutorial.open_dataset('ROMS_example.nc', chunks={'ocean_time': 1})
# This is a way to turn on chunking and lazy evaluation. Opening with mfdataset, or
# setting the chunking in the open_dataset would also achive this.
ds
# -
# ### Add a lazilly calculated vertical coordinates
#
# Write equations to calculate the vertical coordinate. These will be only evaluated when data is requested. Information about the ROMS vertical coordinate can be found (here)[https://www.myroms.org/wiki/Vertical_S-coordinate]
#
# In short, for `Vtransform==2` as used in this example,
#
# $Z_0 = (h_c \, S + h \,C) / (h_c + h)$
#
# $z = Z_0 (\zeta + h) + \zeta$
#
# where the variables are defined as in the link above.
# +
if ds.Vtransform == 1:
Zo_rho = ds.hc * (ds.s_rho - ds.Cs_r) + ds.Cs_r * ds.h
z_rho = Zo_rho + ds.zeta * (1 + Zo_rho/ds.h)
elif ds.Vtransform == 2:
Zo_rho = (ds.hc * ds.s_rho + ds.Cs_r * ds.h) / (ds.hc + ds.h)
z_rho = ds.zeta + (ds.zeta + ds.h) * Zo_rho
ds.coords['z_rho'] = z_rho.transpose() # needing transpose seems to be an xarray bug
ds.salt
# -
# ### A naive vertical slice
#
# Creating a slice using the s-coordinate as the vertical dimension is typically not very informative.
ds.salt.isel(xi_rho=50, ocean_time=0).plot()
# We can feed coordinate information to the plot method to give a more informative cross-section that uses the depths. Note that we did not need to slice the depth or longitude information separately, this was done automatically as the variable was sliced.
section = ds.salt.isel(xi_rho=50, eta_rho=slice(0, 167), ocean_time=0)
section.plot(x='lon_rho', y='z_rho', figsize=(15, 6), clim=(25, 35))
plt.ylim([-100, 1]);
# ### A plan view
#
# Now make a naive plan view, without any projection information, just using lon/lat as x/y. This looks OK, but will appear compressed because lon and lat do not have an aspect constrained by the projection.
ds.salt.isel(s_rho=-1, ocean_time=0).plot(x='lon_rho', y='lat_rho')
# And let's use a projection to make it nicer, and add a coast.
# +
proj = ccrs.LambertConformal(central_longitude=-92, central_latitude=29)
fig = plt.figure(figsize=(15, 5))
ax = plt.axes(projection=proj)
ds.salt.isel(s_rho=-1, ocean_time=0).plot(x='lon_rho', y='lat_rho',
transform=ccrs.PlateCarree())
coast_10m = cfeature.NaturalEarthFeature('physical', 'land', '10m',
edgecolor='k', facecolor='0.8')
ax.add_feature(coast_10m)
# -
| doc/examples/ROMS_ocean_model.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Advanced databases
#
# ## Data Query Language - DQL
# ### dr inż. <NAME>
# + [markdown] slideshow={"slide_type": "slide"}
# ## SQL Standard
#
#
# - Structure Query Language(SQL) is a database query language used for storing and managing data in Relational DBMS
#
# - SQL is an ANSI/ISO standard but exists different versions of the SQL language.
#
# - The major commands such as SELECT, UPDATE, WHERE, DELETE, etc. are similar.
#
# - Most of the SQL database engiones also have their own proprietary extensions in addition to the SQL standard.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## SQL Command
#
# 1. DDL: Data Definition Language
# - create
# - alter
# - delete
# - drop
# - rename
# 2. DML: Data Manipulation Language
# - insert
# - update
# - delete
# 3. TCL: Transaction Control Language
# - commit
# - rollback
# - savepoint
# 4. DQL: Data Query Language
# - select
# + [markdown] slideshow={"slide_type": "slide"}
# ## Select - basic form
#
# ```sql
# SELECT [DISTINCT|ALL ] { * | [fieldExpression [AS newName]}
# FROM tableName [alias]
# [WHERE condition]
# [GROUP BY fieldName(s)]
# [HAVING condition] ORDER BY fieldName(s)
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Select - PostgreSQL form
# <img src="./img/select_pg.png" width="40%" height="40%">
#
# [source](https://www.postgresql.org/docs/current/sql-select.html)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Tutorial database
# <img src='./img/dvd-rental-sample-database-diagram.png' width="30%" height="30%">
# + [markdown] slideshow={"slide_type": "slide"}
# ## Select on the begin
#
# **Query 1**
# ```sql
# select 'a'
# ```
# **result:**
#
# | ??column??, text |
# |:---------------: |
# | "a" |
#
# **Query 2**
# ```sql
# select 4-(4+4)*4
# ```
# **result:**
#
# | ??column??, integer |
# |:---------------------:|
# | -28 |
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Alias
#
# **Query 1**
# ```sql
# select 'a' as "char"
# ```
# **result:**
#
# | char, text |
# |:---------------: |
# | "a" |
#
# **Query 2**
# ```sql
# select 4-(4+4)*4 as "calculation"
# ```
# **result:**
#
# | calculation, integer |
# |:---------------------:|
# | -28 |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Select and function
#
# **Query 1**
# ```sql
# select upper('Anneth')
# ```
# **result:**
#
# | **upper**, text |
# |:-----------------:|
# | "ANNETH" |
#
# **Query 2**
# ```sql
# select sqrt (4*4*4*4)
# ```
# **result:**
#
# | **sqrt**, double precision|
# |:-------------------------:|
# | 16 |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Select from table
#
# Select all column from table:
#
# ```sql
# select * from actor
# ```
#
# **result:**
#
# | actor_id | first_name | last_name | last_update |
# |:--------: |:-----------: |:------------: |:----------------------: |
# | 1 | Penelope | Guiness | 2013-05-26 14:47:57.62 |
# | 2 | Nick | Wahlberg | 2013-05-26 14:47:57.62 |
# | 3 | Ed | Chase | 2013-05-26 14:47:57.62 |
# | 4 | Jennifer | Davis | 2013-05-26 14:47:57.62 |
# | 5 | Johnny | Lollobrigida | 2013-05-26 14:47:57.62 |
# | 6 | Bette | Nicholson | 2013-05-26 14:47:57.62 |
# | 7 | Grace | Mostel | 2013-05-26 14:47:57.62 |
# | 8 | Matthew | Johansson | 2013-05-26 14:47:57.62 |
# | ... | ... | ... | ... |
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Select from table chosen columns
#
# ```sql
# select first_name, last_name from actor
# ```
# **result:**
#
# | first_name | last_name |
# |:-----------: |:------------: |
# | Penelope | Guiness |
# | Nick | Wahlberg |
# | Ed | Chase |
# | Jennifer | Davis |
# | Johnny | Lollobrigida |
# | Bette | Nicholson |
# | Grace | Mostel |
# | Matthew | Johansson |
# | ... | ... |
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Select from table chosen columns and concatenation results
#
# ```sql
# select Concat('First Name: ',first_name, ' Last Name: ', last_name) as "My text"
# from actor
# ```
#
# **result:**
#
# | My text |
# |:-----------------------------------------|
# |First Name: Penelope Last Name: Guiness |
# |First Name: Nick Last Name: Wahlberg |
# |First Name: Ed Last Name: Chase |
# |First Name: Jennifer Last Name: Davis |
# |First Name: Johnny Last Name: Lollobrigida|
# |First Name: Bette Last Name: Nicholson |
# |First Name: Grace Last Name: Mostel |
# |First Name: Matthew Last Name: Johansson |
# | ... |
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Select from table chosen columns and concatenation part 2
#
# ```sql
# select Concat('First Name: ',first_name, ' Last Name: ', last_name) as "My text",
#
# last_update from actor
# ```
#
# **result:**
#
# | My text | last_update |
# |:--------------------------------------------- |:------------------------ |
# | First Name: Penelope Last Name: Guiness | 2013-05-26 14:47:57.62 |
# | First Name: Nick Last Name: Wahlberg | 2013-05-26 14:47:57.62 |
# | First Name: Ed Last Name: Chase | 2013-05-26 14:47:57.62 |
# | First Name: Jennifer Last Name: Davis | 2013-05-26 14:47:57.62 |
# | First Name: Johnny Last Name: Lollobrigida | 2013-05-26 14:47:57.62 |
# | First Name: Bette Last Name: Nicholson | 2013-05-26 14:47:57.62 |
# | First Name: Grace Last Name: Mostel | 2013-05-26 14:47:57.62 |
# | First Name: Matthew Last Name: Johansson | 2013-05-26 14:47:57.62 |
# |... | ... |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Select limit
# ```sql
# select first_name, last_name from actor limit 10
# ```
#
# **result:**
#
# | first_name | last_name |
# |:------------: |:--------------:|
# | Penelope | Guiness |
# | Nick | Wahlberg |
# | Ed | Chase |
# | Jennifer | Davis |
# | Johnny | Lollobrigida |
# | Bette | Nicholson |
# | Grace | Mostel |
# | Matthew | Johansson |
# | Joe | Swank |
# | Christian | Gable |
#
# - the *limit* value must be positive
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Select limit and offset
#
# ```sql
# select first_name, last_name from actor limit 5 offset 5
# ```
# **result:**
#
# | first_name | last_name |
# |:------------: |:--------------:|
# | Bette | Nicholson |
# | Grace | Mostel |
# | Matthew | Johansson |
# | Joe | Swank |
# | Christian | Gable |
#
# - the *offset* value must be positive
# + [markdown] slideshow={"slide_type": "slide"}
# ## Order by
#
# - Used to sort the result-set in ascending (ASC) or descending order (DESC)
# - Must by use before limit
# - Defult order by work in ASC mode
#
# ```sql
# select first_name, last_name from actor order by first_name ASC limit 10
# ```
# **result:**
#
# | first_name | last_name |
# |:----------: |:-----------: |
# | Adam | Grant |
# | Adam | Hopper |
# | Al | Garland |
# | Alan | Dreyfuss |
# | Albert | Johansson |
# | Albert | Nolte |
# | Alec | Wayne |
# | Angela | Witherspoon |
# | Angela | Hudson |
# | Angelina | Astaire |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Select with distinct
#
# **Query 1**
# ```sql
# SELECT first_name FROM actor
# ```
# Return 200 first names
#
# **Query 2**
# ```sql
# SELECT DISTINCT first_name FROM actor
# ```
# Return 128 first names
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Select with distinct part 2
#
# **Query 1**
# ```sql
# SELECT first_name, last_name FROM actor
# ```
# Return 200 first names
#
# **Query 2**
# ```sql
# SELECT DISTINCT (first_name, last_name) FROM actor
# ```
# Return 199 first names
# + [markdown] slideshow={"slide_type": "slide"}
# ## Query Explain
#
# - Returns the execution plan which PostgreSQL planner generates for a given statement.
# - Shows information about tables involed in the query, type of opperation on index, ordered, etc. and kind of join algorithm will be used
# - Most important result of Explain is is start-cost before the first row can be returned and the total cost to return the complete result set
#
# ```sql
# EXPLAIN [ ( option [, ...] ) ] statement
# EXPLAIN [ ANALYZE ] [ VERBOSE ] statement
# ```
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Explain option
#
# options:
#
# - ANALYZE [ boolean ] - defult FALSE
# - VERBOSE [ boolean ] - defult FALSE
# - COSTS [ boolean ] - defult TRUE
# - BUFFERS [ boolean ] - defult FALSE
# - TIMING [ boolean ] - defult TRUE
# - SUMMARY [ boolean ] - defult TRUE
# - FORMAT { TEXT | XML | JSON | YAML } - defult TEXT
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Explain option part 2
#
# - Analyze
# - Option causes the sql_statement to be executed first and then actual run-time statistics.
# - Return: total elapsed time expended within each plan node, the number of rows it actually returned.
# - Verbose
# - Display additional information regarding the plan
# - Return: output column list for each node in the plan tree, schema-qualify table and function names, always label variables in expressions with their range table alias, and always print the name of each trigger for which statistics are displayed
# - COSTS
# - estimated startup and total cost of each plan node, as well as the estimated number of rows (Index Scan) and the estimated width of each row (in bytes of the returned rows)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Explain exampel 1
#
# ```sql
# EXPLAIN
# select first_name, last_name from actor order by first_name ASC limit 10
# ```
# <img src="./img/explain_basic.png">
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Explain exampel 2
#
# ```sql
# EXPLAIN ANALYZE VERBOSE
# select first_name, last_name from actor order by first_name ASC limit 10
# ```
# <img src="./img/explain_pro.png">
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Explain exampel 3
#
# ```sql
# EXPLAIN (ANALYZE TRUE, VERBOSE True, BUFFERS TRUE)
# select first_name, last_name from actor order by first_name ASC limit 10
# ```
# <img src="./img/explain_full.png">
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Where in select
#
# Where used condition to filter the rows returned from the SELECT statement.
#
# Standard operators in Where:
#
# | Operator | Description |
# |:--------: |:---------------------: |
# | = | Equal |
# | > | Greater than |
# | < | Less than |
# | >= | Greater than or equal |
# | <= | Less than or equal |
# | <> or != | Not equal |
# | AND | Logical operator AND |
# | OR | Logical operator OR |
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Where examples
#
# **Query 1**
# ```sql
# select * from actor where actor_id < 5;
# ```
# **Query 2**
# ```sql
# select * from actor where actor_id < 10 and actor_id > 5;
# ```
# **Query 3**
# ```sql
# select * from actor where actor_id < 10 or actor_id > 5;
# ```
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Result Query 1
#
# | actor_id | first_name | last_name | last_update |
# |:--------: |:----------: |:----------: |:------------------------: |
# | 1 | "Penelope" | "Guiness" | "2013-05-26 14:47:57.62" |
# | 2 | "Nick" | "Wahlberg" | "2013-05-26 14:47:57.62" |
# | 3 | "Ed" | "Chase" | "2013-05-26 14:47:57.62" |
# | 4 | "Jennifer" | "Davis" | "2013-05-26 14:47:57.62" |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Function in select
#
# - A function is a set of SQL statements that perform a specific task.
#
# - In SQL Server standard we have many [predefined functions](https://www.w3schools.com/sql/sql_ref_sqlserver.asp)
#
# - Full list of PostgreSQL predefined function [here](https://www.postgresql.org/docs/current/functions.html)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Function in select example
# ```sql
# select * from actor where length(first_name) < 3;
# ```
#
# Result:
#
# | actor_id | first_name | last_name | last_update |
# |:--------: |:----------: |:-----------: |:------------------------: |
# | 3 | "Ed" | "Chase" | "2013-05-26 14:47:57.62" |
# | 136 | "Ed" | "Mansfield" | "2013-05-26 14:47:57.62" |
# | 165 | "Al" | "Garland" | "2013-05-26 14:47:57.62" |
# | 179 | "Ed" | "Guiness" | "2013-05-26 14:47:57.62" |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Between in select
#
# - Match a value against a range of values
# - Is equale of condition -> col_name >= value and col_name <= value2
#
# ```sql
# select * from actor where length(first_name) between 2 and 3 limit 5;
# ```
#
# Result:
#
# | actor_id | first_name | last_name | last_update |
# |:--------: |:----------: |:---------: |:------------------------: |
# | 3 | "Ed" | "Chase" | "2013-05-26 14:47:57.62" |
# | 9 | "Joe" | "Swank" | "2013-05-26 14:47:57.62" |
# | 13 | "Uma" | "Wood" | "2013-05-26 14:47:57.62" |
# | 18 | "Dan" | "Torn" | "2013-05-26 14:47:57.62" |
# | 19 | "Bob" | "Fawcett" | "2013-05-26 14:47:57.62" |
# + [markdown] slideshow={"slide_type": "subslide"}
# ## EXPLAIN between
#
# ```sql
# EXPLAIN ANALYZE VERBOSE
# select * from actor where length(first_name) between 2 and 3;
# ```
# Result:
# "Seq Scan on public.actor (cost=0.00..6.00 rows=1 width=25) (actual time=0.010..0.031 rows=28 loops=1)"
#
# " Output: actor_id, first_name, last_name, last_update"
#
# " Filter: ((length((actor.first_name)::text) >= 2) AND (length((actor.first_name)::text) <= 3))"
#
# " Rows Removed by Filter: 172"
#
# "Planning Time: 0.045 ms"
#
# "Execution Time: 0.040 ms"
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## EXPLAIN between
#
# ```sql
# EXPLAIN ANALYZE VERBOSE
# select * from actor where length(first_name) >= 2 and length(first_name) <=3 ;
# ```
# Result:
# "Seq Scan on public.actor (cost=0.00..6.00 rows=1 width=25) (actual time=0.013..0.035 rows=28 loops=1)"
#
# " Output: actor_id, first_name, last_name, last_update"
#
# " Filter: ((length((actor.first_name)::text) >= 2) AND (length((actor.first_name)::text) <= 3))"
#
# " Rows Removed by Filter: 172"
#
# "Planning Time: 0.066 ms"
#
# "Execution Time: 0.046 ms"
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## 'In' clausule in select
#
# - IN operator is used in the WHERE clause to check if a value matches any value in a list of values.
#
# ```sql
# select * from actor where actor_id in (1,20,30,18);
# ```
# Result:
#
# | actor_id | first_name | last_name | last_update |
# |:--------: |:----------: |:---------: |:------------------------: |
# | 1 | "Penelope" | "Guiness" | "2013-05-26 14:47:57.62" |
# | 18 | "Dan" | "Torn" | "2013-05-26 14:47:57.62" |
# | 20 | "Lucille" | "Tracy" | "2013-05-26 14:47:57.62" |
# | 30 | "Sandra" | "Peck" | "2013-05-26 14:47:57.62" |
# + [markdown] slideshow={"slide_type": "slide"}
# ## 'In' operator in select part 2
# ```sql
# select * from actor where first_name in ('Ed','Al', 'Carmen', 'Jude');
# ```
#
# Result:
#
# | actor_id | first_name | last_name | last_update |
# |:--------: |:----------: |:-----------: |:------------------------: |
# | 3 | "Ed" | "Chase" | "2013-05-26 14:47:57.62" |
# | 52 | "Carmen" | "Hunt" | "2013-05-26 14:47:57.62" |
# | 57 | "Jude" | "Cruise" | "2013-05-26 14:47:57.62" |
# | 136 | "Ed" | "Mansfield" | "2013-05-26 14:47:57.62" |
# | 165 | "Al" | "Garland" | "2013-05-26 14:47:57.62" |
# | 179 | "Ed" | "Guiness" | "2013-05-26 14:47:57.62" |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Subquery in select
#
# In from:
#
# ```sql
# select * from (select * from actor where length(first_name) < 3) tmp_actor
# where tmp_actor.actor_id < 100;
# ```
#
# Result:
#
# | actor_id | first_name | last_name | last_update |
# |:--------: |:----------: |:-----------: |:------------------------: |
# | 3 | "Ed" | "Chase" | "2013-05-26 14:47:57.62" |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Subquery in select
#
# In from:
#
# ```sql
# select * from (select first_name, last_name from actor where length(first_name) < 3) tmp_actor
# where tmp_actor.actor_id < 100;
# ```
#
# Result:
#
# ERROR: column tmp_actor.actor_id don't exist
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Subquery in select
#
# In from:
#
# ```sql
# select * from (select first_name, last_name from actor where length(first_name) < 3) tmp_actor
# where length(tmp_actor.first_name) < 3;
# ```
#
# Result:
#
# | first_name | last_name |
# |:----------: |:-----------: |
# |"Ed"| "Chase"|
# |"Ed"| "Mansfield"|
# |"Al"| "Garland"|
# |"Ed"| "Guiness"|
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Subquery in select part 2
# In where:
# ```sql
# select * from actor where actor_id in (select actor_id from actor where length(first_name) < 3) and actor_id < 100;
# ```
# Result:
#
# | actor_id | first_name | last_name | last_update |
# |:--------: |:----------: |:-----------: |:------------------------: |
# | 3 | "Ed" | "Chase" | "2013-05-26 14:47:57.62" |
# + [markdown] slideshow={"slide_type": "subslide"}
# ## EXPLAIN subquery
#
# ```sql
# EXPLAIN ANALYZE VERBOSE
# select * from (select * from actor where length(first_name) < 3) tmp_actor where tmp_actor.actor_id < 100;
# ```
#
# "Seq Scan on public.actor (cost=0.00..5.50 rows=33 width=25) (actual time=0.031..0.044 rows=1 loops=1)"
#
# " Output: actor.actor_id, actor.first_name, actor.last_name, actor.last_update"
#
# " Filter: ((actor.actor_id < 100) AND (length((actor.first_name)::text) < 3))"
#
# " Rows Removed by Filter: 199"
#
# "Planning Time: 0.074 ms"
#
# "Execution Time: 0.054 ms"
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## EXPLAIN subquery part 2
#
# ```sql
# EXPLAIN ANALYZE VERBOSE
# select * from actor where actor_id in (select actor_id from actor where length(first_name) < 3) and actor_id < 100;
# ```
#
# "Hash Join (cost=5.84..10.60 rows=33 width=25) (actual time=0.036..0.049 rows=1 loops=1)"
#
# " Output: actor.actor_id, actor.first_name, actor.last_name, actor.last_update"
#
# " Inner Unique: true"
#
# " Hash Cond: (actor.actor_id = actor_1.actor_id)"
#
# " -> Seq Scan on public.actor (cost=0.00..4.50 rows=99 width=25) (actual time=0.008..0.017 rows=99 loops=1)"
#
# ...
#
# " -> Hash (cost=5.00..5.00 rows=67 width=4) (actual time=0.023..0.023 rows=4 loops=1)"
#
# ...
#
# "Planning Time: 0.214 ms"
#
# "Execution Time: 0.068 ms"
| Advanced databases/Lecture 6(Data Query Language - DQL)/Lecture6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Use GriddedModel3D object to manipulate Ndarray NAM model
#
# First we will instantiate the GriddedModel3D object using the ndaray and header data created from the NAM Piece-Wise model. Then we will see how to:
# * get a depth slice by index
# * get a smoothed model
# * get a subsampled model
# * get a depth slice by z-depth coordinate (interpolated slice)
# * get a volume slice of the model from a BoundingBox (interpolated volume)
# * write a .vtr file of the sliced volume so that it can beviewed in 3D
# ### Step 0
#
# Load packages
#load all packages
from sys import argv
import numpy as np
import pickle
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from gnam.model.gridmod3d import gridmod3d as gm
from gnam.model.bbox import bbox as bb
import copy
# ### Step 1
#
# Read and decompress NAM ndarray model
# +
# Put in the Fully Qualified Name of the file
ifqn = './model_data/raw_nam_2017_vp_vs_rho_Q_model_dz10_depth6000.npz'
#decompress
data = np.load(ifqn)
props = data['props'] #4D ndarray of subsurface model
# Get header data arrays
xdata = data['xd']
ydata = data['yd']
zdata = data['zd']
print('Header Data:')
print(' x:',xdata)
print(' y:',ydata)
print(' z:',zdata)
print()
# individual parameters from the headers
xmin = xdata[0]
dx = xdata[1]
nx = int(xdata[2])
xmax = xmin + (nx-1)*dx #notice that this can be computed
ymin = ydata[0]
dy = ydata[1]
ny = int(ydata[2])
ymax = ymin + (ny-1)*dy #notice that this can be computed
zmin = zdata[0]
dz = zdata[1]
nz = int(zdata[2])
zmax = (-zmin) + (nz-1)*dz #notice that this can be computed
print('Individual Header Data Parameters:')
print(' xmin,dx,nx,xmax = %d,%d,%d,%d' %(xmin,dx,nx,xmax))
print(' ymin,dy,ny,ymax = %d,%d,%d,%d' %(ymin,dy,ny,ymax))
print(' zmin,dz,nz,zmax = %d,%d,%d,%d' %(zmin,dz,nz,zmax))
# -
# ### Step 2
#
# Instantiate the GriddelModel3D object
nsub_props = props.shape[0]
axes_order = {'X':0,'Y':1,'Z':2} #this dict keeps track of axes order
gm3d = gm(props,nsub_props,axes_order,(nx,ny,nz),(dx,dy,dz),(xmin,ymin,zmin))
print('gm3d.shape:',gm3d.shape)
# ### Step 3
#
# To QC, get a depth slice and plot it
# +
depth = 100 # This is an INDEX! So, know your header data and its mapping to the model!
iprop = 0
# index order: p x y z
surf = gm3d[iprop, :, :, depth].T
print('surf.shape',surf.shape)
# Get min max to normalize surface
vp_min = np.min(surf)
vp_max = np.max(surf)
print('vp_min:',vp_min)
print('vp_max:',vp_max)
surf_norm = Normalize(vp_min,vp_max)
# compute coordinates
xc = dx*np.arange(nx) + xmin
yc = dy*np.arange(ny) + ymin
xyc = np.transpose([np.tile(xc, len(yc)), np.repeat(yc, len(xc))])
#Plot
fig, ax = plt.subplots(1,figsize=(8,8))
sc = ax.scatter(xyc[:,0],xyc[:,1],s=1,c=surf.flatten(),cmap=plt.cm.jet,norm=surf_norm)
ax.set_title('Full NAM Model Surface (z=0)')
fig.colorbar(sc)
plt.show()
# -
# ### Step 4
#
# Now we will slice the model. Afterwards, we will plot another depth slice to QC. Reminder: one must understand their header data to be able to do this correctly. The smoothing is done with a Gaussian smoothing function which takes sigmas in number of SAMPLES. For exmaple, if $dz=10m$ and $dx=dy=50m$ then the sigma for $z$ should be 5X more samples than it is for $x$ and $y$.
#
# NOTE: This step will take some time. Maybe 0.5 - 2 hours depending on the machine.
# +
# set sigmas
zsmp = 5 # 1=50m sigma, 2=100 sigma, 3=150m sigma, etc...
z_sig = zsmp*(50/dz) # tested at dz=10m and was good, so assume scale by that)
y_sig = z_sig*(dz/dy)
x_sig = z_sig*(dz/dx)
sig_meters = y_sig*50
print('sigma (m):',sig_meters)
# smooth
gm3d.smoothXYZ(x_sig,y_sig,z_sig)
# -
# ### Step 5
#
# Prepare to compress and pickle smoothed GriddedModel3D: make sure the path and file name are good and varify x,y,x "header" data.
# +
# get ndarray and header data
smth_props = gm3d.getNPArray()
xmin = gm3d.get_gorigin()[0]
dx = gm3d.get_deltas()[0]
nx = gm3d.get_npoints()[0]
ymin = gm3d.get_gorigin()[1]
dy = gm3d.get_deltas()[1]
ny = gm3d.get_npoints()[1]
zmin = gm3d.get_gorigin()[2]
dz = gm3d.get_deltas()[2]
nz = gm3d.get_npoints()[2]
# header data
xdata = np.array([xmin,dx,nx])
ydata = np.array([ymin,dy,ny])
zdata = np.array([zmin,dz,nz])
print('Header Data:')
print(' x-header:',xdata.astype(np.int32))
print(' y-header:',ydata.astype(np.int32))
print(' z-header:',zdata.astype(np.int32))
print()
# calculate maxDepth from header data
maxDepth = (-zmin) + (nz-1)*dz
# fqn
out_dir = './model_data/'
fname = 'smoothed_full_nam_2017_vp_vs_rho_Q_model_dz'
fname += str(int(dz)) + '_depth' + str(int(maxDepth)) + '_sig' + str(int(sig_meters)) + '.npz'
ofqn = out_dir + fname
print('Output FQN:\n',ofqn)
# -
# ### Step 6
#
# Compress and store the numpy array of the SMOOTHED NAM model
np.savez_compressed(ofqn,props=smth_props,xd=xdata,yd=ydata,zd=zdata)
print(ofqn)
# ### Step 7
#
# Decompress the ndarray of the smoothed NAM model and instantiate a new GriddedModel3D object for QC'ing
# +
# # copy from the line above, or put in the Fully Qualified Name of the file
ifqn = './model_data/smoothed_full_nam_2017_vp_vs_rho_Q_model_dz10_depth6000_sig250.npz'
#decompress
data = np.load(ifqn)
props = data['props'] #4D ndarray of subsurface model
#header/meta data arrays
xdata = data['xd']
ydata = data['yd']
zdata = data['zd']
print('Header Data:')
print(' x:',xdata)
print(' y:',ydata)
print(' z:',zdata)
print()
# individual parameters from the headers
xmin = xdata[0]
dx = xdata[1]
nx = int(xdata[2])
xmax = xmin + (nx-1)*dx #notice that this can be computed
ymin = ydata[0]
dy = ydata[1]
ny = int(ydata[2])
ymax = ymin + (ny-1)*dy #notice that this can be computed
zmin = zdata[0]
dz = zdata[1]
nz = int(zdata[2])
zmax = (-zmin) + (nz-1)*dz #notice that this can be computed
print('Individual Header Data Parameters:')
print(' xmin,dx,nx,xmax = %d,%d,%d,%d' %(xmin,dx,nx,xmax))
print(' ymin,dy,ny,ymax = %d,%d,%d,%d' %(ymin,dy,ny,ymax))
print(' zmin,dz,nz,zmax = %d,%d,%d,%d' %(zmin,dz,nz,zmax))
# instantiate GriddedModel3D object
nsub_props = props.shape[0]
axes_order = {'X':0,'Y':1,'Z':2} #this dict keeps track of axes order
gm3d = gm(props,nsub_props,axes_order,(nx,ny,nz),(dx,dy,dz),(xmin,ymin,zmin))
print('gm3d:',gm3d)
print('gm3d.shape:',gm3d.shape)
#free up some memory if needed
#del props
# -
# ### Step 8
#
# Get another depth slice and plot it to QC smoothing and the compress/decompress
# +
depth = 100 # This is an INDEX! So, know your header data and its mapping to the model!
iprop = 0
# index order: p x y z
surf = gm3d[iprop, :, :, depth].T
print('surf.shape',surf.shape)
# Get min max to normalize surface
vp_min = np.min(surf)
vp_max = np.max(surf)
print('vp_min:',vp_min)
print('vp_max:',vp_max)
surf_norm = Normalize(vp_min,vp_max)
# compute coordinates
xc = dx*np.arange(nx) + xmin
yc = dy*np.arange(ny) + ymin
xyc = np.transpose([np.tile(xc, len(yc)), np.repeat(yc, len(xc))])
#Plot
fig, ax = plt.subplots(1,figsize=(8,8))
sc = ax.scatter(xyc[:,0],xyc[:,1],s=1,c=surf.flatten(),cmap=plt.cm.jet,norm=surf_norm)
ax.set_title('Full NAM Model Surface (z=0)')
fig.colorbar(sc)
plt.show()
# -
# ### Step 9
#
# Now we are going to subsample model. One may want to do this if they can use a courser grid, which reduced the memory footprint and it will reduce computational cost associated with simultions that use the model.
#
# Here we are going to subsample the model such that $dz=dx=dy=100m$. Again, PAY ATTENTION to your HEADER data. The arguments are is[xyz] the first SAMPLE, and id[xyz] is the subsampling interval in SAMPLES.
#
# NOTE: We will start the subsampled grid at 5 zamples in $z$. Why? In the future we will use this model to generate a mesh. And, each cell will be 100 meters. We will use ever point as a proxy for an entire cell in the mesh. For $x$ and $y$ this is fine because the mesh will be half a cell larger on both ends, but for $z$ it may not make sense to have the mess begin above ground (negitve $z$).
sub_dxy = 2 # every second sample = 100m
sub_dz = 10 # every tenth sample = 100m
sub_isz = 5 # start at the 5th sample -> at 50m -> that the cell in a mesh starts at z=0m
gm3d.subsample(isz=sub_isz,idz=sub_dz,idx=sub_dxy,idy=sub_dxy) # idx=idy=2 by default
print('gm3d:',gm3d)
# ### Step 10
#
# Again, we will compress and pickled the now subsampled and smoothed model. So here we prepare to compress and pickle and make sure the path and file name are good and varify x,y,x "header" data.
# +
# get ndarray and header data
subsmp_props = gm3d.getNPArray()
xmin = gm3d.get_gorigin()[0]
dx = gm3d.get_deltas()[0]
nx = gm3d.get_npoints()[0]
ymin = gm3d.get_gorigin()[1]
dy = gm3d.get_deltas()[1]
ny = gm3d.get_npoints()[1]
zmin = gm3d.get_gorigin()[2]
dz = gm3d.get_deltas()[2]
nz = gm3d.get_npoints()[2]
# header data
xdata = np.array([xmin,dx,nx])
ydata = np.array([ymin,dy,ny])
zdata = np.array([zmin,dz,nz])
print('Header Data:')
print(' x-header:',xdata.astype(np.int32))
print(' y-header:',ydata.astype(np.int32))
print(' z-header:',zdata.astype(np.int32))
print()
# calculate maxDepth from header data
maxDepth = nz*dz # NOTICE: this is different from how it was calculated previously.
# fqn
out_dir = './model_data/'
fname = 'subsmp_smth_full_nam_2017_vp_vs_rho_Q_model_dz'
fname += str(int(dz)) + '_depth' + str(int(maxDepth)) + '_sig' + str(int(sig_meters)) + '.npz'
ofqn = out_dir + fname
print('Output FQN:\n',ofqn)
# -
# ### Step 11
#
# Compress and store the numpy array of the SUBSAMPLED, SMOOTHED NAM model
np.savez_compressed(ofqn,props=subsmp_props,xd=xdata,yd=ydata,zd=zdata)
print(ofqn)
# ### Step 12
#
# Decompress the ndarray of the subsampled, smoothed NAM model and instantiate a new GriddedModel3D object for QC'ing
# +
# # copy from the line above, or put in the Fully Qualified Name of the file
ifqn = './model_data/subsmp_smth_full_nam_2017_vp_vs_rho_Q_model_dz100_depth6000_sig250.npz'
#decompress
data = np.load(ifqn)
props = data['props'] #4D ndarray of subsurface model
#header/meta data arrays
xdata = data['xd']
ydata = data['yd']
zdata = data['zd']
print('Header Data:')
print(' x:',xdata)
print(' y:',ydata)
print(' z:',zdata)
print()
# individual parameters from the headers
xmin = xdata[0]
dx = xdata[1]
nx = int(xdata[2])
xmax = xmin + (nx-1)*dx #notice that this can be computed
ymin = ydata[0]
dy = ydata[1]
ny = int(ydata[2])
ymax = ymin + (ny-1)*dy #notice that this can be computed
zmin = zdata[0]
dz = zdata[1]
nz = int(zdata[2])
zmax = (-zmin) + (nz-1)*dz #notice that this can be computed
print('Individual Header Data Parameters:')
print(' xmin,dx,nx,xmax = %d,%d,%d,%d' %(xmin,dx,nx,xmax))
print(' ymin,dy,ny,ymax = %d,%d,%d,%d' %(ymin,dy,ny,ymax))
print(' zmin,dz,nz,zmax = %d,%d,%d,%d' %(zmin,dz,nz,zmax))
# instantiate GriddedModel3D object
nsub_props = props.shape[0]
axes_order = {'X':0,'Y':1,'Z':2} #this dict keeps track of axes order
gm3d = gm(props,nsub_props,axes_order,(nx,ny,nz),(dx,dy,dz),(xmin,ymin,zmin))
print('gm3d:',gm3d)
print('gm3d.shape:',gm3d.shape)
#free up some memory if needed
#del props
# -
# ### Step 13
#
# Get another depth slice and plot it to QC subsampling of the smoothed model as well as the compress/decompress.
#
# HOWEVER, this time we will get an interpolated slice. We need to change the depth index that we use because the model has been subsampled. By indexing alone, we can not get the exact same depth slice because we have started the subsampled model at $z=50m$, so we will use a FLOAT_INDEX so that we can get the sampe depth of 1000m used above.
# +
depth = 1000.0 # Note I am using a FLOAT_INDEX -> a depth of 1000m
iprop = 0
# Get interpolated depth slice
surf = gm3d.depthValsSliceFromZFloat(depth,iprop)
print('surf.shape',surf.shape)
# Get min max to normalize surface
vp_min = np.min(surf)
vp_max = np.max(surf)
print('vp_min:',vp_min)
print('vp_max:',vp_max)
surf_norm = Normalize(vp_min,vp_max)
# compute coordinates
xc = dx*np.arange(nx) + xmin
yc = dy*np.arange(ny) + ymin
xyc = np.transpose([np.tile(xc, len(yc)), np.repeat(yc, len(xc))])
#Plot
fig, ax = plt.subplots(1,figsize=(8,8))
sc = ax.scatter(xyc[:,0],xyc[:,1],s=1,c=surf,cmap=plt.cm.jet,norm=surf_norm)
ax.set_title('Full NAM Model Surface (z=0)')
fig.colorbar(sc)
plt.show()
# -
# ### Step 14
#
# Here we will create a BoundingBoxy (just a 4-sided, 2D polygon) which we can use to slice a volume were the $x$ and $y$ extents are contained within the polygon. The ressulting GriddedModel3D will have $x'$ and $y'$ coordinates which are perpindicular to each other, but oblique to the initial $x$ and $y$.
#
# The BoundingBox can be rotated and translated. And will be ploted on top of the previous depth slice. The solid-line box is the inital bbox, and the dotted line is the rotated and translated bbox.
# +
# Calculate corners for bbox
# Top left corner
tlc_y = (2/3)*dy*ny + ymin
tlc_x = (1/3)*dx*nx + xmin
# Top right corner
trc_y = tlc_y
trc_x = (2/3)*dx*nx + xmin
# Lower right corner
lrc_y = (1/3)*dy*ny + ymin
lrc_x = trc_x
# Lower left corner
llc_y = lrc_y
llc_x = tlc_x
# Create a point loop structure (surface loop) from corner points
'''
c_loop = np.array([[tlc_x,tlc_y],[trc_x,trc_y],
[lrc_x,lrc_y],[llc_x,llc_y],
[tlc_x,tlc_y]])
''';
c_loop = np.array([[llc_x,llc_y],[tlc_x,tlc_y],
[trc_x,trc_y],[lrc_x,lrc_y],
[llc_x,llc_y]])
# Instantiate bbox from surface/corner loop
mybbox = bb(c_loop)
# Make a copy of the bbox and then rotate the copy
rot_bbox = copy.deepcopy(mybbox)
rot_deg = 45
rot_bbox.rotate(rot_deg)
# Translate rotated bbox
# coordinates: x y
#tran_x = -3500
#tran_y = -11500
tran_x = 10500
tran_y = -5500
rot_bbox.translate(tran_x,tran_y)
# Plot both bboxes
fig, ax = plt.subplots(1,figsize=(8,8))
ax.scatter(xyc[:,0],xyc[:,1],s=1,c=surf,cmap=plt.cm.jet,norm=surf_norm,zorder=0)
ax.plot(mybbox.getCLoop()[:,0],mybbox.getCLoop()[:,1],c='black',zorder=3)
ax.plot(rot_bbox.getCLoop()[:,0],rot_bbox.getCLoop()[:,1],c='black',linestyle='dotted',zorder=3)
ax.set_title('NAM Model w/ Bbox')
plt.show()
# -
# ### Step 15
#
# Compress and pickle the bounding box.
# +
ofqn = './model_data/rot_bbox_deg' + str(int(rot_deg))
ofqn += '_tranX' + str(int(tran_x)) + '_tranY' + str(int(tran_y)) + '.npz'
print('Output FQN:',ofqn)
print()
# Compress and pickle
print('bbox before pickle:\n',rot_bbox)
f = open(ofqn, 'wb')
pickle.dump(rot_bbox, f)
f.close()
print()
# -
# ### Step 16
#
# Decompress bounding box for QC'ing
# Decompress and pickle
ifqn = './model_data/rot_bbox_deg45_tranX10500_tranY-5500.npz' # copy from output above
f = open(ifqn, 'rb')
dill_bbox = pickle.load(f) # 'Dill' get it?
f.close()
print('bbox after pickle :\n',dill_bbox)
# ### Step 17
#
# Using the rotated and translated bbox created in the previous cell, we will get a new interpolated volume slice.
vslice_gm3d = gm3d.slice_volume_by_bbox(dill_bbox)
print(vslice_gm3d)
# ### Step 18
#
# Yet again, we will compress and pickled the now volume-sliced, subsampled, and smoothed model. So here we prepare to compress and pickle and make sure the path and file name are good and varify x,y,x "header" data.
# +
# get ndarray and header data
vslice_props = vslice_gm3d.getNPArray()
xmin = vslice_gm3d.get_gorigin()[0]
dx = vslice_gm3d.get_deltas()[0]
nx = vslice_gm3d.get_npoints()[0]
ymin = vslice_gm3d.get_gorigin()[1]
dy = vslice_gm3d.get_deltas()[1]
ny = vslice_gm3d.get_npoints()[1]
zmin = vslice_gm3d.get_gorigin()[2]
dz = vslice_gm3d.get_deltas()[2]
nz = vslice_gm3d.get_npoints()[2]
# header data
xdata = np.array([xmin,dx,nx])
ydata = np.array([ymin,dy,ny])
zdata = np.array([zmin,dz,nz])
print('Header Data:')
print(' x-header:',xdata.astype(np.int32))
print(' y-header:',ydata.astype(np.int32))
print(' z-header:',zdata.astype(np.int32))
print()
# calculate maxDepth from header data
maxDepth = nz*dz # NOTICE: this is different from how it was calculated previously.
# fqn
out_dir = './model_data/'
fname = 'vsliced_subsmp_smth_full_nam_2017_vp_vs_rho_Q_model_dz'
fname += str(int(dz)) + '_depth' + str(int(maxDepth)) + '_sig' + str(int(sig_meters)) + '.npz'
ofqn = out_dir + fname
print('Output FQN:\n',ofqn)
# -
# ### Step 19
#
# Compress and store the numpy array of the VOLUME-SLICED, SUBSAMPLED, SMOOTHED NAM model
np.savez_compressed(ofqn,props=vslice_props,xd=xdata,yd=ydata,zd=zdata)
print(ofqn)
# ### Step 20
#
# Decompress the ndarray of the subsampled, smoothed NAM model and instantiate a new GriddedModel3D object for QC'ing
# +
# # copy from the line above, or put in the Fully Qualified Name of the file
ifqn = './model_data/vsliced_subsmp_smth_full_nam_2017_vp_vs_rho_Q_model_dz100_depth6000_sig250.npz'
#decompress
data = np.load(ifqn)
props = data['props'] #4D ndarray of subsurface model
#header/meta data arrays
xdata = data['xd']
ydata = data['yd']
zdata = data['zd']
print('Header Data:')
print(' x:',xdata)
print(' y:',ydata)
print(' z:',zdata)
print()
# individual parameters from the headers
xmin = xdata[0]
dx = xdata[1]
nx = int(xdata[2])
xmax = xmin + (nx-1)*dx #notice that this can be computed
ymin = ydata[0]
dy = ydata[1]
ny = int(ydata[2])
ymax = ymin + (ny-1)*dy #notice that this can be computed
zmin = zdata[0]
dz = zdata[1]
nz = int(zdata[2])
zmax = (-zmin) + (nz-1)*dz #notice that this can be computed
print('Individual Header Data Parameters:')
print(' xmin,dx,nx,xmax = %d,%d,%d,%d' %(xmin,dx,nx,xmax))
print(' ymin,dy,ny,ymax = %d,%d,%d,%d' %(ymin,dy,ny,ymax))
print(' zmin,dz,nz,zmax = %d,%d,%d,%d' %(zmin,dz,nz,zmax))
# instantiate GriddedModel3D object
nsub_props = props.shape[0]
axes_order = {'X':0,'Y':1,'Z':2} #this dict keeps track of axes order
dill_gm3d = gm(props,nsub_props,axes_order,(nx,ny,nz),(dx,dy,dz),(xmin,ymin,zmin))
print('vslice_gm3d:',vslice_gm3d)
#free up some memory if needed
#del props
# -
# ### Step 21
#
# Overlay a depth slice of the volume-sliced model on top of the full model. Besure to zoom in on the corners and on the edges to QC that the sliced volume is within the bounding box and that the interpolated values make sense.
#
# NOTE: We will make use of the "notebook" windowing-backend so that we can ZOOM!
# +
# we want to be able to zoom into the corners and edges
# %matplotlib notebook
depth = 9 # This is an INDEX! shoudl be at 1050m in depth
iprop = 0
# index order: p x y z
vslice_surf = dill_gm3d[iprop, :, :, depth].T
full_surf = gm3d[iprop, :, :, depth].T
print('vslice_surf.shape',vslice_surf.shape)
print('full_surf.shape',full_surf.shape)
# Get min max to normalize surface
vp_min = np.min(vslice_surf)
vp_max = np.max(vslice_surf)
print('vp_min:',vp_min)
print('vp_max:',vp_max)
full_surf_norm = Normalize(vp_min,vp_max)
# Get coordinates
vslice_xyc = vslice_gm3d.getGlobalCoordsPointsXY()
full_xyc = gm3d.getGlobalCoordsPointsXY()
#Plot
fig, ax = plt.subplots(1,figsize=(8,8))
# Plot the fullslice
sc = ax.scatter(full_xyc[:,0],full_xyc[:,1],s=1,c=full_surf.flatten(),cmap=plt.cm.jet,norm=full_surf_norm,zorder=0)
# Overlay plot of the bbox filled with white
#ax.fill(dill_bbox.getCLoop()[:,0],dill_bbox.getCLoop()[:,1],c='white',zorder=1)
# Overlay plot of volume depth slice
ax.scatter(vslice_xyc[:,0],vslice_xyc[:,1],s=1,c=vslice_surf.flatten(),cmap=plt.cm.jet,norm=full_surf_norm,zorder=2)
# Over lay dotted plot of bbox surface
ax.plot(dill_bbox.getCLoop()[:,0],dill_bbox.getCLoop()[:,1],c='black',linestyle='dotted',zorder=4)
# Set other plot options
ax.set_title('Full NAM Model with Slice Overlay')
fig.colorbar(sc)
plt.show()
# -
# ### Step -1
#
# Now that we have a model we want, we can use Paraview to view both it and the full model if we convert them to VTK/VTR files. So, lets do it...
#
# NOTE: Now that you know how to slice a volume out, take a look at the notebook where the Groningen events and station data are fetched, then find plot a bbox and fine-tune its shape and location for the desired coverage. Then come back here and create a volume slice.
# +
from gnam.vtkutils.write import write_vtk_gridded_model_3d
#vk_props = dill_gm3d.getNPArray().transpose(0,3,2,1)
vk_props = dill_gm3d.getNPArray()
print('vk_props.shape:',vk_props.shape)
vk_xdata = np.zeros((3))
vk_ydata = np.zeros((3))
vk_zdata = np.zeros((3))
vk_xdata[1] = dill_gm3d.get_deltas()[0]
vk_ydata[1] = dill_gm3d.get_deltas()[1]
vk_zdata[1] = dill_gm3d.get_deltas()[2]
vk_xdata[2] = vk_props.shape[1]
vk_ydata[2] = vk_props.shape[2]
vk_zdata[2] = vk_props.shape[3]
print('vk_xdata:',vk_xdata)
print('vk_ydata:',vk_ydata)
print('vk_zdata:',vk_zdata)
# Copy and past ifqn from previous volume-slice decompression here, but do so without the extension
vtk_ofqn = './model_data/vsliced_subsmp_smth_full_nam_2017_vp_vs_rho_Q_model_dz100_depth6000_sig250'
print('vtk_ofqn:',vtk_ofqn+'.vtr')
write_vtk_gridded_model_3d(vtk_ofqn,vk_props,vk_xdata,vk_ydata,vk_zdata)
print()
full_vk_props = gm3d.getNPArray()
print('full_vk_props.shape:',full_vk_props.shape)
full_vk_xdata = np.zeros((3))
full_vk_ydata = np.zeros((3))
full_vk_zdata = np.zeros((3))
full_vk_xdata[1] = gm3d.get_deltas()[0]
full_vk_ydata[1] = gm3d.get_deltas()[1]
full_vk_zdata[1] = gm3d.get_deltas()[2]
full_vk_xdata[2] = full_vk_props.shape[1]
full_vk_ydata[2] = full_vk_props.shape[2]
full_vk_zdata[2] = full_vk_props.shape[3]
print('full_vk_xdata:',full_vk_xdata)
print('full_vk_ydata:',full_vk_ydata)
print('full_vk_zdata:',full_vk_zdata)
# Copy and past ifqn from previous full decompression here, but do so without the extension
full_vtk_ofqn = './model_data/subsmp_smth_full_nam_2017_vp_vs_rho_Q_model_dz100_depth6000_sig250'
print('full_vtk_ofqn:',full_vtk_ofqn+'.vtr')
write_vtk_gridded_model_3d(full_vtk_ofqn,full_vk_props,full_vk_xdata,full_vk_ydata,full_vk_zdata)
print('\nNow open the files in Parview and enjoy!')
# -
| notebooks/Full_Workflow/0_Construct_Model_and_SPECFEM_Mesh/1_Using_GriddedModel3D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Partially based on [<NAME>](http://sebastianraschka.com), 2015 https://github.com/rasbt/python-machine-learning-book
# # Data Pre-Processing
from IPython.display import Image
# # Dealing with missing data
# +
import pandas as pd
from io import StringIO
csv_data = '''A,B,C,D
1.0,2.0
5.0,6.0,,8.0
10.0,11.0,12.0,'''
# If you are using Python 2.7, you need
# to convert the string to unicode:
# csv_data = unicode(csv_data)
df = pd.read_csv(StringIO(csv_data))
df
# -
df.isnull().sum()
# ## Eliminating samples or features with missing values
df.dropna()
df1=df.dropna(axis=1)
df1
# only drop rows where all columns are NaN
df.dropna(how='all')
# drop rows that have not at least 4 non-NaN values
df.dropna(thresh=4)
# only drop rows where NaN appear in specific columns (here: 'C')
df.dropna(subset=['C'])
# ## Imputing missing values
# +
from sklearn.preprocessing import Imputer
imr = Imputer(missing_values='NaN', strategy='most_frequent', axis=0)
imr = imr.fit(df)
imputed_data = imr.transform(df.values)
imputed_data
# -
df.values
# <br>
# <br>
# # Handling categorical data
# +
import pandas as pd
df = pd.DataFrame([
['green', 'M', 10.1, 'class1'],
['red', 'L', 13.5, 'class2'],
['blue', 'XL', 15.3, 'class1']])
df.columns = ['color', 'size', 'price', 'classlabel']
df
# -
# <br>
# <br>
# ## Mapping ordinal features
size_mapping = {'XL': 3,'L': 2,'M': 1}
df['size'] = df['size'].map(size_mapping)
df
inv_size_mapping = {v: k for k, v in size_mapping.items()}
df['size'].map(inv_size_mapping)
# <br>
# <br>
# ## Encoding class labels
# +
from sklearn.preprocessing import LabelEncoder
class_le = LabelEncoder()
y = class_le.fit_transform(df['classlabel'].values)
y
# -
class_le.inverse_transform(y)
# <br>
# <br>
# ## Performing one-hot encoding on nominal features
# +
X = df[['color', 'size', 'price']].values
color_le = LabelEncoder()
X[:, 0] = color_le.fit_transform(X[:, 0])
X
# +
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(categorical_features=[0])
ohe.fit_transform(X).toarray()
# -
pd.get_dummies(df[['price', 'color', 'size']])
| hw1/.ipynb_checkpoints/PreprocessingII-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Série 10 - Exercices - Solutions
#
# L'identification de paramètres à partir de données est une des tâches très fréquentes en sciences. Suivant la nature du modèle, différentes méthodes doivent être employées.
#
# Le but de cette série d'exercice est de vous rendre capable d'appliquer les techniques de régression linéaires et non linéaire sur de vraies données.
# **Exercice 1** - Chercher la droite de régression
#
# Soit le jeu de donnée ci-dessous.
#
# **a)** Faire le graphe de $y$ en fonction de $x$ permettant de visualiser les points de données.
# +
import numpy as np
import matplotlib.pyplot as plt
x = np.array([4.5, 6., 7.5, 9., 10.5])
y = np.array([20, 23, 24, 28, 32])
plt.plot(x,y,'o')
plt.xlabel('x', size=14)
plt.ylabel('y', size=14)
plt.show()
# -
# **b)** Construisez la matrice dont la première colonne contient les valeurs de $x$ et la deuxième des 1.
#
# $${\bf G} = \left[ \begin{array}{cc} 4.5 & 1\\ 6. & 1\\ 7.5 & 1\\ 9. & 1\\ 10.5 & 1\end{array}\right] $$
N = len(x)
G = np.ones( (N,2) )
G[:,0] = x
print(G)
# **c)** Estimer les paramètres de la droite avec
#
# $${\bf p} = \left( {\bf G^T} {\bf G} \right) ^{-1} {\bf G^T} {\bf y}$$
#
# Puis faire le graphe pour comparer le modèle avec les données.
p = np.linalg.inv( G.T @ G ) @ G.T @ y
yc = G @ p
MSE = np.mean( (yc - y)**2 )
plt.plot(x,y,'o');
plt.plot(x,yc,'-');
print("MSE=", MSE)
# **Exercice 2** - Ecrire une fonction qui encapsule vos calculs
#
# L'idée est que cette fonction vous permette d'analyser très rapidement n'importe quel jeu de donnée.
# Le prototype de la fonction est donné ci-dessous. Remplir la partie manquante, c'est à dire celle qui fait vraiment le calcul.
#
# Une fois la fonction programmée, vérifier que si vous l'appliquez aux données de l'exercice 2, elle donne les bons résultats.
def fit_line(x,y):
"""
Computes the least-squares straight line on a data set,
and plots it with the data for rapid visual control.
Inputs:
x (numpy array): The X data values
y (numpy array): The Y data values
Returns:
p (numpy array): p=[a, b] Slope and intercept of the straight line
"""
N = len(x)
G = np.ones( (N,2) )
G[:,0] = x
p = np.linalg.inv( G.T @ G ) @ G.T @ y
yc = G @ p
MSE = np.mean( (yc - y)**2 )
plt.plot(x,y,'o');
plt.plot(x,yc,'-');
print("MSE=", MSE)
return p
fit_line(x,y)
# **Exercice 3** - Age du système solaire
#
# Le fichier `eucrites.txt` contient des données issues de l'analyse chimique de météorites (Eucrites) du système solaire. La première colonne contient le rapport isotopique 87Rb/86Sr des échantillons (87Rb est l'élèment père), alors que la deuxième colonne contient le rapport 87Sr/86Sr (avec 87Sr l'élément fils).
#
# A partir de la pente de la ligne passant à travers ces données, estimer un âge minimum pour le système solaire, sachant que la constante de décroissance radioactive pour ces données est estimée à
#
# $$\lambda = 1.393 \times 10^{-11} an^{-1}$$
#
# On vous rappelle aussi que l'âge s'obtient en
#
# $$t =\frac{ \log( a + 1)}{\lambda} $$
#
# avec $a$ la pente de la droite et log représente le logarithme népérien.
# +
data = np.loadtxt('data/eucrites.txt')
r, s = data[:,0], data[:,1]
p = fit_line(r, s)
plt.xlabel('87Rb/86Sr')
plt.ylabel('87Sr/86Sr')
plt.title("Datation d'Eucrites")
plt.show()
l = 1.393e-11
t = np.log(p[0]+1)/l / 1e9
print(f"Age: {t:4.2f} milliards d'années")
# -
# **Exercice 4** - Polynôme d'ordre 2
#
# Recopier la fonction `fit_line` et renomer là en `fit_parabola`. Modifier son contenu pour qu'elle ajuste un polynome d'ordre 2 sur les données.
#
# Appliquez cette nouvelle fonction aux données du premier exercice. Combien vallent les paramètres ? Quelle est la valeur de l'erreur quadratique moyenne ?
# +
def fit_parabola(x,y):
"""
Computes the least-squares polynom of order 2 on a data set,
and plots it with the data for rapid visual control.
Inputs:
x (numpy array): The X data values
y (numpy array): The Y data values
Returns:
p (numpy array): p=[a, b, c]
"""
N = len(x)
G = np.ones( (N,3) )
G[:,0] = x**2
G[:,1] = x
p = np.linalg.inv( G.T @ G ) @ G.T @ y
yc = G @ p
MSE = np.mean( (yc - y)**2 )
plt.plot(x,y,'o');
plt.plot(x,yc,'-');
print("MSE=", MSE)
return p
fit_parabola(x,y)
# -
# **Exercice 5** - Altération de galets
#
# Une méthode assez étonnante pour dater des conglomérats consiste à mesurer l'épaisseur du halo d'altération autour de galets dans la roche. En effet, l'épaisseur de cette zone altérée dépend de la durée d'exposition à des conditions de surface et à la vitesse du processus d'altération. Il est possible de calibrer cette relation et de s'en servir comme méthode de datation.
#
# Le fichier `alteration_galet.txt` contient des données provenant de Nouvelle Zélande et permettant de calibrer cette relation pour cette région. Le fichier contient dans la première colonne l'épaisseur de la zone altérée en mm et dans la deuxième colonne l'age en milliers d'années (kA).
#
# Charger les données, ajuster un polynôme d'ordre 2 sur ces données, et utiliser le résultat pour estimer l'age correspondant à une épaisseur de 3mm.
# +
d = np.loadtxt('data/alteration_galet.txt')
xg, yg = d[:,0], d[:,1]
p = fit_parabola(xg,yg)
e = 3
age = p[0] * e**2 + p[1] * e + p[2]
print(f"Age = {age:5.2f} kA")
# -
# **Exercice 6** - Régression non linéaire
#
# La cellule de calcul ci-dessous vous donne un jeu de donnée d'évolution d'une population de bactéries. On vous demande d'ajuster sur ces données un modéle utilisant l'équation logistique :
#
# $$ n(t) = \frac{n_{max}}{1+c \cdot \exp(-rt)}$$
# avec
# $$ c = \frac{n_{max}-n_0}{n_0}$$
#
# Quelles valeurs ont les paramètres obtenues ?
t = np.array((0.3,0.5,0.72,0.8,1,1.2,1.5,1.8,2,2.3,2.5,2.7,3)) # Temps en jours
n = np.array((46, 70, 98, 110, 135, 157, 163, 166, 182, 170, 174, 167, 186)) # Nombres de bactéries
def logistique(t, n0, nmax, r):
c = (nmax - n0) / n0
return nmax / (1 + c * np.exp(-r * t))
from scipy import optimize
p, Cp = optimize.curve_fit(logistique, t, n, p0=[10, 180, 2])
yc = logistique(t, *p)
plt.plot(t,n,'o')
plt.plot(t,yc,'-')
plt.ylabel('Nombre de bactéries')
plt.xlabel('Nombre de jours')
plt.show()
print("Paramètres identifiés:")
print(f" population initiale: {p[0]:5.1f}")
print(f" population maximale: {p[1]:5.1f}")
print(f" taux de croissance: {p[2]:5.1f}")
| series/serie10_solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: GPU-Havok
# language: python
# name: python3
# ---
# # Testing the higher order signature kernel
# ***
# The esig package implements the computation of the higher order signature kernel, which we can use to validate our implementation.
# +
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import sys
sys.path.append('..') # add to path parent dir of gpsig
# numerics
import numpy as np
# signatures
import gpsig
import esig
# -
# ***
# To do so, we simply compare the entries of the signature kernel matrix computed by gpsig with inner products of signature features computed by esig. First, generate some random data, the details of which is irrelevant for this comparison.
num_levels = 5
num_examples = 100
len_examples = 50
num_features = 3
X = np.random.randn(num_examples, len_examples, num_features)
# ***
# ### Validating the signature kernel
# ##### Computing signature features with esig
esig.is_library_loaded()
sigs = np.asarray([esig.tosig.stream2sig(x, num_levels) for x in X])
# The sigs array contains signature features up to level $M=5$ flattened out into $(1 + d + d^2 + \dots + d^M)$ dimensions. Signatures are tensors in the truncated tensor algebra $\mathbf{S}_{\leq M}(\mathbf{x}) \in \prod_{m=0}^M (\mathbb{R}^d)^{\otimes m}$, but this space is analogous to $\mathbb{R}^{1+d+d^2+\dots+d^M}$ with the Euclidean inner product, which we can use on these flattened out tensors to recover the signature kernel.
K_esig = sigs @ sigs.T
# ##### Computing the signature kernel with gpsig
# In gpsig, we first use a state-space embedding $x \mapsto \kappa(x, \cdot)$ from $\mathbb{R}^d$ into an RKHS $V$, i.e. with some abuse of notation $\kappa_{\mathbf{x}} = (\kappa(x_i, \cdot))_{i=1,\dots, l_{\mathbf x}}$ for $\mathbf{x} = (x_i)_{i=1,\dots,l_{\mathbf x}}$. To recover the same setting as in esig, we may use as state-space embedding the identity map, which specifies that the inner product of two observations is simply the Euclidean inner product. This variant of the signature kernel is called _SignatureLinear_ here.
#
# We remark that esig uses the highest order signature features, which corresponds in our case to setting $D = M$, i.e. _order = num_levels_. Furthermore, the default setting is to normalize each signature level, which we have to turn off.
input_dim = num_features * len_examples
kern = gpsig.kernels.SignatureLinear(input_dim, num_features, num_levels, order=num_levels, normalization=False)
K_gpsig = kern.compute_K_symm(X.reshape([num_examples, -1]))
# merge last two axes of the input since the kernel expects a 2d array
# ##### Comparing the results
K_diff = K_esig - K_gpsig
print('2-norm: {}'.format(np.linalg.norm(K_diff, ord=2)))
print('Fro-norm: {}'.format(np.linalg.norm(K_diff, ord='fro')))
print('Inf-norm: {}'.format(np.linalg.norm(K_diff, ord=np.inf)))
# ### Validating the (augmented) signature vs tensor kernel
# First, let us generate some sparse tensors of the form $\mathbf{z} = (z_{m,1} \otimes \dots \otimes z_{m, m})_{m=0,\dots,M}$, i.e. we generate the elements $z_{m,i} \in \mathbb{R}^d$ in the tensor products for each $0 \geq i \geq m$ and $0 \geq m \geq M$.
#
# The gpsig kernel expects that the tensors are in $(M(M+1)/2, n_{\mathbf Z}, d)$ format, i.e. all $z_{m, i}$ are stacked together along the first axis.
num_tensors = 100
Z = np.random.randn(int(num_levels*(num_levels+1)/2), num_tensors, num_features)
# ##### Computing the corresponding tensor features
# The generated components are a low-dimensional representation of the generally high-dimensional tensors, which is feasible due to the sparsity constraint. Hence, next we build the actual tensors that take values in $\prod_{m=0}^M (\mathbb{R}^d)^{\otimes m}$, but we flatten the dimensions out, similarly to the signature features previously.
tens = [np.ones((100, 1))]
k = 0
for m in range(1, num_levels+1):
Zm = Z[k]
k += 1
for i in range(1, m):
Zm = (Zm[..., None] * Z[k, :, None, :]).reshape([num_tensors, -1])
k += 1
tens.append(Zm)
tens = np.concatenate(tens, axis=1)
K_tens_vs_sig = tens @ sigs.T
# ##### Computing the tensors vs signatures kernel with gpsig
K_tens_vs_seq_gpsig = kern.compute_K_tens_vs_seq(Z, X.reshape([num_examples, -1]))
# ##### Comparing the results
K_tens_vs_seq_diff = K_tens_vs_sig - K_tens_vs_seq_gpsig
print('2-norm: {}'.format(np.linalg.norm(K_tens_vs_seq_diff, ord=2)))
print('Fro-norm: {}'.format(np.linalg.norm(K_tens_vs_seq_diff, ord='fro')))
print('Inf-norm: {}'.format(np.linalg.norm(K_tens_vs_seq_diff, ord=np.inf)))
# ### Validating the (augmented) tensor vs tensor kernel
# Finally, we validate the computation of tensor vs tensor inner product in gpsig.
#
# ##### Computing the tensor vs tensor kernel as inner product of tensor features
K_tens_vs_tens = tens @ tens.T
# ##### Computing the tensor vs tensor kernel with gpsig
K_tens_vs_tens_gpsig = kern.compute_K_tens(Z)
# ##### Comparing the results
K_tens_vs_tens_diff = K_tens_vs_tens - K_tens_vs_tens_gpsig
print('2-norm: {}'.format(np.linalg.norm(K_tens_vs_tens_diff, ord=2)))
print('Fro-norm: {}'.format(np.linalg.norm(K_tens_vs_tens_diff, ord='fro')))
print('Inf-norm: {}'.format(np.linalg.norm(K_tens_vs_tens_diff, ord=np.inf)))
| notebooks/signature_kernel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## MIDS UC Berkeley, Machine Learning at Scale
#
# __W261-1__ Summer 2016
# __Week 7__: SSSP
#
# __Name__
# <EMAIL>
#
# July 1, 2016
#
# ***
# <h1 style="color:#021353;">General Description</h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# In this assignment you will explore networks and develop MRJob code for
# finding shortest path graph distances. To build up to large data
# you will develop your code on some very simple, toy networks.
# After this you will take your developed code forward and modify it and
# apply it to two larger datasets (performing EDA along the way).
#
# <h3>Undirected toy network dataset</h3>
#
#
# In an undirected network all links are symmetric,
# i.e., for a pair of nodes 'A' and 'B,' both of the links:
#
# A -> B and B -> A
#
# will exist.
#
# The toy data are available in a sparse (stripes) representation:
#
# (node) \t (dictionary of links)
#
# on AWS/Dropbox via the url:
#
# s3://ucb-mids-mls-networks/undirected_toy.txt
# On under the Data Subfolder for HW7 on Dropbox with the same file name.
# The Data folder is in: https://db.tt/Kxu48mL1)
#
# In the dictionary, target nodes are keys, link weights are values
# (here, all weights are 1, i.e., the network is unweighted).
#
#
# <h3>Directed toy network dataset</h3>
#
# In a directed network all links are not necessarily symmetric,
# i.e., for a pair of nodes 'A' and 'B,' it is possible for only one of:
#
# A -> B or B -> A
#
# to exist.
#
# These toy data are available in a sparse (stripes) representation:
#
# (node) \t (dictionary of links)
#
# on AWS/Dropbox via the url:
#
# s3://ucb-mids-mls-networks/directed_toy.txt
# Or under the Data Subfolder for HW7 on Dropbox with the same file name
# (On Dropbox https://www.dropbox.com/sh/2c0k5adwz36lkcw/AAAAKsjQfF9uHfv-X9mCqr9wa?dl=0)
#
# In the dictionary, target nodes are keys, link weights are values
# (here, all weights are 1, i.e., the network is unweighted).
# </pre>
# </div>
# <h1 style="color:#021353;">HW 7.0: Shortest path graph distances (toy networks)</h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# In this part of your assignment you will develop the base of your code for the week.
#
# Write MRJob classes to find shortest path graph distances, as described in the lectures. In addition to finding the distances, your code should also output a distance-minimizing path between the source and target.
# Work locally for this part of the assignment, and use both of the undirected and directed toy networks.
#
# To proof you code's function, run the following jobs
#
# - shortest path in the undirected network from node 1 to node 4
# Solution: 1,5,4. NOTE: There is another shortest path also (HINT: 1->5->4)! Either will suffice (you will find this also in the remaining problems. E.g., 7.2 and 7.4.
#
#
# - shortest path in the directed network from node 1 to node 5
# Solution: 1,2,4,5
#
# and report your output---make sure it is correct!
#
# <h3>Main dataset 1: NLTK synonyms</h3>
#
# In the next part of this assignment you will explore a network derived from the NLTK synonym database used for evaluation in HW 5. At a high level, this network is undirected, defined so that there exists link between two nodes/words if the pair or words are a synonym. These data may be found at the location:
#
# <a href="s3://ucb-mids-mls-networks/synNet/synNet.txt">s3://ucb-mids-mls-networks/synNet/synNet.txt</a>
# <a href="s3://ucb-mids-mls-networks/synNet/indices.txt">s3://ucb-mids-mls-networks/synNet/indices.txt</a>
# On under the Data Subfolder for HW7 on Dropbox with the same file names
#
# where synNet.txt contains a sparse representation of the network:
#
# (index) \t (dictionary of links)
#
# in indexed form, and indices.txt contains a lookup list
#
# (word) \t (index)
#
# of indices and words. This network is small enough for you to explore and run
# scripts locally, but will also be good for a systems test (for later) on AWS.
#
# In the dictionary, target nodes are keys, link weights are values
# (here, all weights are 1, i.e., the network is unweighted).
# </pre>
# </div>
# <h1 style="color:#021353;">HW 7.1: Exploratory data analysis (NLTK synonyms)</h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# Using MRJob, explore the synonyms network data.
# Consider plotting the degree distribution (does it follow a power law?),
# and determine some of the key features, like:
#
# number of nodes,
# number links,
# or the average degree (i.e., the average number of links per node),
# etc...
#
# As you develop your code, please be sure to run it locally first (though on the whole dataset).
# Once you have gotten you code to run locally, deploy it on AWS as a systems test
# in preparation for our next dataset (which will require AWS).
# </pre>
# </div>
#
# <h1 style="color:#021353;">HW 7.2: Shortest path graph distances (NLTK synonyms)</h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# Write (reuse your code from 7.0) an MRJob class to find shortest path graph distances,
# and apply it to the NLTK synonyms network dataset.
#
# Proof your code's function by running the job:
#
# - shortest path starting at "walk" (index=7827) and ending at "make" (index=536),
#
# and showing you code's output. Once again, your output should include the path and the distance.
#
# As you develop your code, please be sure to run it locally first (though on the whole dataset).
# Once you have gotten you code to run locally, deploy it on AWS as a systems test
# in preparation for our next dataset (which will require AWS).
#
# =====================================
# <strong>NOTE: Dataset 2 English Wikipedia hyperlink network.data </strong>
# The dataset is available via Dropbox at:
#
# https://www.dropbox.com/sh/2c0k5adwz36lkcw/AAAAKsjQfF9uHfv-X9mCqr9wa?dl=0
#
# on S3 at
# <a href="s3://ucb-mids-mls-networks/wikipedia/">s3://ucb-mids-mls-networks/wikipedia/</a>
# <a href="s3://ucb-mids-mls-networks/wikipedia/all-pages-indexed-out.txt">s3://ucb-mids-mls-networks/wikipedia/all-pages-indexed-out.txt</a> # Graph
# <a href="s3://ucb-mids-mls-networks/wikipedia/indices.txt">s3://ucb-mids-mls-networks/wikipedia/indices.txt</a> # Page titles and page Ids
#
# For the remainder of this assignment you will explore the English Wikipedia hyperlink network.
#
# The dataset is built from the Sept. 2015 XML snapshot of English Wikipedia.
# For this directed network, a link between articles:
#
# A -> B
#
# is defined by the existence of a hyperlink in A pointing to B.
# This network also exists in the indexed format:
#
# Data: <a href="s3://ucb-mids-mls-networks/wikipedia/all-pages-indexed-out.txt">s3://ucb-mids-mls-networks/wikipedia/all-pages-indexed-out.txt</a>
# Data: <a href="s3://ucb-mids-mls-networks/wikipedia/all-pages-indexed-in.txt">s3://ucb-mids-mls-networks/wikipedia/all-pages-indexed-in.txt</a>
# Data: <a href="s3://ucb-mids-mls-networks/wikipedia/indices.txt">s3://ucb-mids-mls-networks/wikipedia/indices.txt</a>
#
# but has an index with more detailed data:
#
# (article name) \t (index) \t (in degree) \t (out degree)
#
# In the dictionary, target nodes are keys, link weights are values .
# Here, a weight indicates the number of time a page links to another.
# However, for the sake of this assignment, treat this an unweighted network,
# and set all weights to 1 upon data input.
#
# </pre>
# </div>
# <h1 style="color:#021353;">HW 7.3: Exploratory data analysis (Wikipedia)</h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# Using MRJob, explore the Wikipedia network data on the AWS cloud. Reuse your code from HW 7.1---does is scale well?
#
# Be cautioned that Wikipedia is a directed network, where links are not symmetric.
# So, even though a node may be linked to, it will not appear as a primary record itself if it has no out-links.
#
# This means that you may have to ADJUST your code (depending on its design).
#
# To be sure of your code's functionality in this context, run a systems test on the directed_toy.txt network.
# </pre>
# </div>
# <h1 style="color:#021353;">HW 7.4: Shortest path graph distances (Wikipedia)</h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# Using MRJob, find shortest path graph distances in the Wikipedia network on the AWS cloud.
# Reuse your code from 7.2, but once again be warned of Wikipedia being a directed network.
# To be sure of your code's functionality in this context, run a systems test on the directed_toy.txt network.
#
# When running your code on the Wikipedia network, proof its function by running the job:
#
# - shortest path from "Ireland" (index=6176135) to "University of California, Berkeley" (index=13466359),
#
# and show your code's output. Show the shortest path in terms of just page IDS but also in terms of the name of page (show of your MapReduce join skills!!)
#
# Once your code is running, find some other shortest paths and report your results.
# </pre>
# </div>
# <h1 style="color:#021353;">HW 7.5: Conceptual exercise: Largest single-source network distances</h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# Suppose you wanted to find the largest network distance from a single source,
# i.e., a node that is the furthest (but still reachable) from a single source.
#
# How would you implement this task?
# How is this different from finding the shortest path graph distances?
#
# Is this task more difficult to implement than the shortest path distance?
#
# As you respond, please comment on program structure, runtimes, iterations, general system requirements, etc...
# </pre>
# </div>
# <h1 style="color:#021353;">HW 7.5.1: </h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# Can we utilize combiners in the HW 7 to perform the shortest path implementation?
# Does order inversion help with the HW 7 shortest path implementation?
# </pre>
# </div>
# <h1 style="color:#021353;">HW 7.5.2: OPTIONAL </h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# Implement combiners in the context of HW 7.5 and contrast the performance of this implementation versus the implementation with no combiners.
#
# Please report the cluster configuration and runtimes in tabular format for both experiments and comment on your findings.
# </pre>
# </div>
# <h1 style="color:#021353;">HW 7.6: Computational exercise: Largest single-source network distances: OPTIONAL </h1>
# <div style="margin:10px;border-left:5px solid #eee;">
# <pre style="font-family:sans-serif;background-color:transparent">
# Using MRJob, write a code to find the largest graph distance and distance-maximizing nodes from a single-source.
# Test your code first on the toy networks and synonyms network to proof its function.
# </pre>
# </div>
# ==================END HW 7==================
| week7/MIDS-W261-HW-07-TEMPLATE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part 5
#
# # what is [for - else] statement?
# In part 5 excercise solution, **for-else** statement was used.
# The **for-else** construct is to find an element in a loop(and break it) or run code in else: statement
#
# In following examples, first one does the job,
# but the second one(for-else) is more likely to be resistant to errors,
# according to <NAME> in the below link
#
# Resource Link
# >https://stackoverflow.com/questions/9979970/why-does-python-use-else-after-for-and-while-loops
# +
letters = {"A", "B", "C"}
search_letter = "D"
found = False
for elm in letters:
if elm == search_letter:
print("Found :" + elm)
found = True
break
if not found:
print('Not found.')
# +
# for - else
letters = {"A", "B", "C"}
search_letter = "D"
for elm in letters:
if elm == search_letter:
print("Found :" + elm)
break
else:
print('Not found.')
# -
# ## Plotting Train Losses vs Test Losses by using matplotlib
#
# In part 5 notebook, there is a graph that compares train losses with test losses
# I wanted to have the same kind of graph for the work done by myself in the practice section
#
# Resource
# > https://www.geeksforgeeks.org/graph-plotting-in-python-set-1/
# +
# Train_losses and Test_losses were grabbed from the result in Part 5 notebook
# This was my train_losses
train_losses = [0.5139048838062581, 0.3921349845620107, 0.3535378427266566, 0.33561048804442767, 0.315498280539505, 0.301006089951565, 0.29073364978660143, 0.27829998286008073, 0.2743124962091319, 0.26420995871077724, 0.2540956304581371, 0.2508315832566605, 0.2444969094924327, 0.23426388011081642, 0.2331514927481156, 0.22289049451841092, 0.22244871298530336, 0.2189971616805426, 0.21049289203989607, 0.20941834035379164]
# This was my test_losses
test_losses = [0.4760, 0.4197, 0.4183, 0.3924, 0.3730, 0.3885, 0.3514, 0.3465, 0.3500, 0.3651, 0.3416, 0.3763, 0.3603, 0.3673, 0.3623, 0.3992, 0.3916, 0.3761, 0.3740, 0.3968]
# Import matplotlib, I am using anaconda distribution
# There was no issue when I import
import matplotlib.pyplot as plt
# I had 20 epoches
x = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20]
# Plotting
plt.plot(x, train_losses, label = "Train losses")
plt.plot(x, test_losses, label = "Test losses")
plt.title("Traning losses vs Test losses")
plt.xlabel("Epoch")
plt.ylabel("Losses")
plt.legend()
plt.show()
| intro_to_pytorch/Part5 Study.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="XN6dxHwuSFXW"
# # AM216 Mini-Project: Drug target interaction and COVID-19
#
# Rapid determination of whether a candidate small molecule will bind to a particular target receptor protein remains a stumbling block in drug discovery. If you can find a small molecule that binds to a relevant protein and modifies its function, this molecule can serve as a drug. This is an **inverse problem**: Given a protein, please find a small molecule that can bind to it.
#
# One way of solving this inverse problem would be to use molecular dynamics: Create a molecular model of a protein. Create a molecular model of all possible small molecule drugs that can bind to the protein. Simulate the interactions between the small molecules and the proteins. Find the molecules that work.
#
# Or-you might be more mathematical and try to solve this as a straight-up optimization problem: Find the small molecule that optimally binds to the binding pocket of a given drug.
#
# Although conceptually appealing, this methodology is impossible: The major bottleneck is that we do not know and cannot accurately represent the interactions between small molecules and proteins. The potentials are empirical with fitting parameters--they cannot be found from first principles and despite decades of work, we simply do not have good representations for them. For that reason, physical computation has been of limited utility for drug discovery.
#
# We should mention that a secondary bottleneck is the computing power that would be required for this search -- but this is something that could be likely sorted out if it weren't for the first bottleneck.
#
# An alternative approach is to use a *data driven approach*. Instead of representing the physics, lets make a list of all known proteins and all small molecules that binds to each protein. Lets then design a data driven way of associating small molecules to proteins, representing binding.
#
# Note that for a data driven approach to succeed, the key step is to **find ways of representing the small molecule and the protein that make it possible to find the patterns in the existing datasets.** Representation is the key problem. Presumably, the best way to represent the proteins and the small molecules is to use the physics and biochemistry of what happens when binding occurs -- as by doing this, we will focus on features that are relevant. Finding the right feature representations for this type of problem is a very active area of research.
#
# In this miniproject, you will dip your toe into the pond by studying a canonical problem, the binding affinity of small molecules against target proteins.
#
# Given all that is happening in the world right now, it seems appropriate to assign you the additional mission is to use the trained model to identify drugs that could bind to the main protease$^*$ protein of COVID-19 and prevent the production of viral enzyme.
#
# To help you in this mission, you are given KIBA ("Kinase Inhibitor Bioassay") dataset which contains 2111 drugs and 229 proteins with total of 118254 drug-protein binding affinity scores. Those are stored in `ligands_can.txt`, `proteins.txt`, and `Y`, respectively. The rows of `Y` index drugs and the columns index proteins. The element of `Y` is a real-valued binding score that takes into account dissociation constant $K_d$, inhibition constant $K_i$, and half maximal inhibitory concentration $IC_{50}$. Refer to [original paper](https://pubs.acs.org/doi/pdf/10.1021/ci400709d) for more detail. We also provide splitted indices for 5-fold cross validation in `train_fold_setting.txt` and testset indices in `test_fold_setting.txt`. Lastly, `6Y84_A.fasta.txt` contains the amino acid sequence of COVID-19 protease.
#
# You will evalulate the performance of your model using MSE and Corcordance Index (CI), which is slightly relaxed metric that measures whether the predicted binding affinity val- ues of two random drug–target pairs were predicted in the same order as their true values were.
#
# *: Note that there are many other drug targets for COVID-19 virus. [This page](
# https://www.guidetopharmacology.org/coronavirus.jsp) gives you a good summary.
#
# To help you get started, we will first introduce you to Rdkit and DeepChem packages.
# + [markdown] id="wVhNjK4kSFXZ"
# ## 1. Basic Cheminformatics with Rdkit and DeepChem
#
# For this project, you will most likely heavily rely on Rdkit and Deepchem. You can install DeepChem along with Rdkit following instruction [here](https://deepchem.io). If you are using Colab, run the following code block.
#
# + colab={"base_uri": "https://localhost:8080/"} id="G7fQCZJ8q9Je" outputId="341296ca-176c-45ce-abc6-1e452ca9c2b4"
# %tensorflow_version 1.x
# + colab={"base_uri": "https://localhost:8080/"} id="ps7x414nDcHu" outputId="eb41268f-6da9-4880-a1d1-ab2c71c40069"
# This may take a few minutes to run!
# !wget -c https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh
# !chmod +x Anaconda3-2019.10-Linux-x86_64.sh
# !bash ./Anaconda3-2019.10-Linux-x86_64.sh -b -f -p /usr/local
# !conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.3.0
import sys
sys.path.append('/usr/local/lib/python3.7/site-packages/')
import deepchem as dc
# + [markdown] id="ygWxSzB9SFXm"
# Developing new medicine is a very time-consuming, labor-intensive, and expensive task which begins with rounds of screening process where researchers run some assays with thousands of molecules to identify potential drug candidates to go onto a clinical trial. The field of cheminformatics have been developed to help accelerate this process by performing laboratory experiments on computers.
#
# In order to this we need to find a way to represent a molecule so that we can train machine learning models on it. As mentioned above, this is a critical step. If we were doing physics we would want to represent the molecule in the most natural way to represent the physics. For example there are features of the molecules that *cause* it to bind to the protein in question -- there are specific interactions between the chemical groups of the molecule and the amino acids. From a physical perspective, identifying these features is the most important part of the problem and we would like the representation to emphasize these.
#
# But given that we don't have any idea what is happening, we need another approach. We somehow have to represent both molecules and proteins in a way so that we can train machine learning models on them to see if it is possible to associate them with each other.
#
# For small molecules, this is a classic problem. How do we take a molecule and represent it. There is a field that has addressed this called 'cheminformatics'.
#
# A particularly simple method is for molecules are represented as text strings called SMILES (“Simplified Molecular-Input Line-Entry System”). Please refer to [this page](https://www.daylight.com/dayhtml/doc/theory/theory.smiles.html) for more explanantion. Here is an example SMILES string and the molecule it represents, which we obtain via `MolFromSmiles` function of Rdkit.
# + id="WobQ8Nz4U-ke" colab={"base_uri": "https://localhost:8080/"} outputId="8b4f4bc4-2fec-4f08-ecf8-9f3dfcf83518"
from google.colab import drive
drive.mount('/content/drive')
G_PATH = './drive/MyDrive/AM216/Drug Binding/'
# + id="IcdYdzPuHNfH"
# # %%capture
# # !unzip data_Drug_target_binding_affinity.zip
# + id="M8YkkNgGSFXo"
import numpy as np
import rdkit
from rdkit.Chem import Draw
from rdkit.Chem import AllChem as Chem
from rdkit.Chem import MolFromSmiles
# + id="b4sRfDxkSFXs" colab={"base_uri": "https://localhost:8080/", "height": 385} outputId="53c25a09-df8a-4ff1-e199-456e1b6a5ee3"
smiles = []
with open(G_PATH + 'data_Drug_target_binding_affinity/Sample_Ligand_List.txt', 'r') as f:
for line in f:
smiles.append(line[:-1])
print('SMILES string:')
print(smiles[0])
print('\nOriginal molecule:')
mols = MolFromSmiles(smiles[0])
Draw.MolToImage(mols)
# + [markdown] id="vuh706D2SFXw"
# As you may have guessed, this SMILES string does not contain much of structural and chemical information present in the actual molecule representation.
#
# The smiles string is a string of symbols -- it is very different from the binding structure of the molecules. One option that we have is to train machine learning models on the smiles strings directly.
#
# This has been found to not be the most effective way of proceeding. Another approach that people have thought of is to convert this to a "chemical fingerprint". This is a
# vector of 1's and 0's that captures the presence or absence of specific features, as determined by local arrangement of atoms in a molecule.
#
#
# There are many algorithms that give you this finger print. One example is Extended-connectivity fingerprints (ECFP) scheme, which comes as `GetMorganFingerprintAsBitVec` in Rdkit. Here is an example of ECFP4. The number at the end refers to bond distances used for featurizing.
# + id="KNYhVaVaSFXy" colab={"base_uri": "https://localhost:8080/"} outputId="8e7eaf66-c70f-44e7-a682-3dcd2c3a20a7"
print('ECFP4')
molecule = MolFromSmiles(smiles[0])
ECFP2 = Chem.GetMorganFingerprintAsBitVect(molecule, 2).ToBitString()
ECFP2arr = np.array(list(map(int, ECFP2)))
print(ECFP2arr[:20], '...')
print(ECFP2arr.shape)
# + [markdown] id="hLDF_gKMSFX5"
# Lots of Rdkit's featurization algorithms are ported in DeepChem. This is a deep learning for chemistry toolkit that was developed at Stanford.
#
# We can list the descriptors for molecules from RDKIT. Note that in addition to fingerprints, there are all sorts of purely chemical features, such as
#
#
# * Number of Hydrogen acceptors
# * Number of aromatic rings
# * Number of radical electrons
# * Number of rings in the molecule
# * Partial charges
#
#
# And so forth. When you are doing machine learning with complete ignorance of why the small molecule binds to something, you might as well be as general as you can be!!
#
#
# + id="Uy0vsVrBSFX7" colab={"base_uri": "https://localhost:8080/"} outputId="96aa633a-5b2a-44a5-c3a4-44c2b7b4a3c4"
from deepchem.feat import RDKitDescriptors
for descriptor in RDKitDescriptors.allowedDescriptors:
print(descriptor)
# + [markdown] id="NazROs-xSFYA"
# Deepchem has also implemented a large number of different machine learning models to build models on top of these features. We can list this as follows:
# + id="SOnVGtOgSFYB" colab={"base_uri": "https://localhost:8080/"} outputId="6e10e086-6dbf-418f-c013-9e6245b3a6cc"
dir(dc.models)
# + [markdown] id="6Cl4Ai8CSFYE"
# ## 2. Parsing KIBA dataset
#
# Now we are ready to examine the Kinase dataset. The way this works is that we are given a particular protein, and for that protein, we want to predict the small molecules that bind to it. Different proteins of course have different sets of small molecules. This dataset contains a large set of proteins, and for each of them, it contains the small molecules that bind.
#
# To associate proteins with small molecules, we also need a representation of the proteins. Proteins are of course sequences of amino acids, so the representation is just a string of amino acids. To use the string of amino acids with numbers. We do this by simply mapping each amino acid (represented by a letter) to a number.
# + id="WTIiSHZRSFYF"
import matplotlib.pyplot as plt
import json
import pickle
from collections import OrderedDict
import networkx as nx
# + id="Hu_V0gH7SFYK"
# for converting protein sequence to categorical format
seq_voc = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
seq_dict = {v:i for i,v in enumerate(seq_voc)}
seq_dict_len = len(seq_dict)
max_seq_len = 1000 # Note that all protein data will have the same length 1000
def seq_to_cat(prot):
x = np.zeros(max_seq_len)
for i, ch in enumerate(prot[:max_seq_len]):
x[i] = seq_dict[ch]
return x
# for Concordance index evaluation
def ci(y,f):
ind = np.argsort(y)
y = y[ind]
f = f[ind]
i = len(y)-1
j = i-1
z = 0.0
S = 0.0
while i > 0:
while j >= 0:
if y[i] > y[j]:
z = z+1
u = f[i] - f[j]
if u > 0:
S = S + 1
elif u == 0:
S = S + 0.5
j = j - 1
i = i - 1
j = i - 1
ci = S/z
return ci if z != 0 else 0
# + [markdown] id="E_FG7L-Rn1oV"
# We read in the ligands and the proteins, as well as the binding data.
# + id="5advqtV7SFYP"
fpath = G_PATH + 'data_Drug_target_binding_affinity/data/kiba/'
# Read in drugs and proteins
drugs_ = json.load(open(fpath + "ligands_can.txt"), object_pairs_hook=OrderedDict)
drugs = np.array([Chem.MolToSmiles(Chem.MolFromSmiles(d),isomericSmiles=True) for d in drugs_.values()])
proteins_ = json.load(open(fpath + "proteins.txt"), object_pairs_hook=OrderedDict)
proteins = np.array(list(proteins_.values()))
# Read in affinity data
affinity = np.array(pickle.load(open(fpath + "Y","rb"), encoding='latin1'))
# Read in train/test fold
train_fold = json.load(open(fpath + "folds/train_fold_setting1.txt"))
train_fold = [ee for e in train_fold for ee in e ]
'''
Here all validation folds are aggregated into training set.
If you want to train models with different architectures and/or
optimize for model hyperparameters, we encourage you to use 5-fold
cross validation as provided here.
'''
test_fold = json.load(open(fpath + "folds/test_fold_setting1.txt"))
# Prepare train/test data with fold indices
rows, cols = np.where(np.isnan(affinity)==False)
drugs_tr = drugs[rows[train_fold]] # (98545,)
proteins_tr = np.array([seq_to_cat(p) for p in proteins[cols[train_fold]]]) # (98545, 1000)
affinity_tr = affinity[rows[train_fold], cols[train_fold]] # (98545,)
drugs_ts = drugs[rows[test_fold]] # (19709,)
proteins_ts = np.array([seq_to_cat(p) for p in proteins[cols[test_fold]]]) # (19709, 1000)
affinity_ts = affinity[rows[test_fold], cols[test_fold]] # (19709,)
# + id="GudunykUcni_" colab={"base_uri": "https://localhost:8080/"} outputId="f1b30ee9-9b8d-4938-a066-535b6e8c8b06"
print('Example of drug:{}'.format(drugs_tr[0]))
print('Example of protein:{} ...'.format(proteins_tr[0][:10]))
print('Example of affinity score:{}'.format(affinity_tr[0]))
# + colab={"base_uri": "https://localhost:8080/"} id="w9_fbrsHPXh4" outputId="4785b986-15a2-432f-ff9d-b82789cb1d29"
# Convert to ECFP fingerprint
smileToMol = lambda x: MolFromSmiles(x)
featurizer = dc.feat.CircularFingerprint(size=1024)
drugs_mol_tr = list(map(smileToMol, drugs_tr))
drugs_ecfp_tr = featurizer.featurize(drugs_mol_tr)
drugs_mol_ts = list(map(smileToMol, drugs_ts))
drugs_ecfp_ts = featurizer.featurize(drugs_mol_ts)
print(drugs_ecfp_tr.shape)
print(drugs_ecfp_ts.shape)
# + id="QQbj9m7WldB6"
tr_size, drug_size = drugs_ecfp_tr.shape[0], drugs_ecfp_tr.shape[1]
ts_size = drugs_ecfp_ts.shape[0]
protein_size = max_seq_len
# + [markdown] id="GRXYpyYVSFYS"
# ## 3. Train a model on KIBA data
# + id="qODLvdvESFYT" colab={"base_uri": "https://localhost:8080/"} outputId="ff7bd961-4e7a-4e42-ec09-a38ace1aeafa"
# Have fun!
import tensorflow as tf
from keras import Sequential, Model
from keras.layers import Dense, concatenate, Dropout
def build_baseline_model(drug_size, protein_size):
drug_model = Sequential()
drug_model.add(Dense(1, input_shape=(drug_size,), activation='linear'))
protein_model = Sequential()
protein_model.add(Dense(1, input_shape=(protein_size,), activation='linear'))
# concat_layer = tf.keras.layers.Concatenate([drug_model.outputs[0], protein_model.outputs[0]])
model_concat = concatenate([drug_model.output, protein_model.output])
# fully connected
model_concat = Dense(1024, activation='relu')(model_concat)
model_concat = Dropout(0.1)(model_concat)
model_concat = Dense(1024, activation='relu')(model_concat)
model_concat = Dropout(0.1)(model_concat)
model_concat = Dense(512, activation='relu')(model_concat)
model_concat = Dense(1, kernel_initializer='normal')(model_concat)
model = Model(inputs=[drug_model.input, protein_model.input], outputs=model_concat)
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mean_squared_error'])
return model
model = build_baseline_model(drug_size=drug_size, protein_size=protein_size)
print(model.summary())
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="ViStDljpY7iN" outputId="d844d010-adb0-4050-f049-e5e1edaaba89"
from keras.callbacks import EarlyStopping
epochs = 100
batch_size = 50
callback = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
train_history = model.fit([drugs_ecfp_tr, proteins_tr], affinity_tr,
validation_split=0.2, batch_size=batch_size, epochs=epochs, verbose=True)
# + id="Ak5RPuFCASFn"
# save the trained model
import os
save_folder = f'{G_PATH}/saved_models/'
os.makedirs(save_folder, exist_ok=True)
save_path = save_folder + 'baseline_model'
model.save(save_path)
# load model
# model = keras.models.load_model(save_path)
# + colab={"base_uri": "https://localhost:8080/"} id="p4HOQGJKIiBt" outputId="a861821b-20be-4360-c2f5-96a72205f9ee"
# summarize history for loss
# print(history.history.keys())
# plt.plot(train_history.history['loss'])
# plt.plot(train_history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
print("Evaluate on test data")
results = model.evaluate([drugs_ecfp_ts, proteins_ts], affinity_ts, batch_size=128)
print("test MSE loss is:", results)
# + id="y_CH3ziCNDux"
predicted_affinity = model.predict([drugs_ecfp_ts, proteins_ts])
# + colab={"base_uri": "https://localhost:8080/"} id="tT2skwaRPpm8" outputId="31a83482-b780-487d-b89f-06d7581d00d5"
ci_score = ci(affinity_ts, predicted_affinity)
print('test CI score is:', ci_score)
# + [markdown] id="K67QchKnSFYZ"
# [The original paper](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4364066/) obtained CI score of 0.782 and MSE of 0.411, and [random forest model](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5395521/) got 0.836 and 0.222. Can you beat them?
# + [markdown] id="my0BqyOkSFYa"
# ## 4. Use your model on COVID-19 protease
# + [markdown] id="rkmmMhjOSFYc"
# Now, use your trained model to identify drugs that could be used as COVID-19 protease inhibitors from these 2111 drugs in the dataset. The sequence of the protease is provided in `6Y84_A.fasta.txt`. You might want to first predict a binding affinity of Ritonavior, a well known HIV drug that binds to HIV protease, to get the sense of a good binding score for this task. SMILES of Ritonavior is provided below.
# + id="DFt_emuuSFYd"
ritonavior = 'CC(C)C1=NC(=CS1)CN(C)C(=O)NC(C(C)C)C(=O)NC(CC2=CC=CC=C2)CC(C(CC3=CC=CC=C3)NC(=O)OCC4=CN=CS4)O'
rit_mol = MolFromSmiles(ritonavior)
Draw.MolToImage(rit_mol)
# + [markdown] id="2meDhKVKSFYi"
# Please keep in mind that proteins in KIBA data are kinase family, a different kind from protease. So in this exercise, we are relying on transfer learning. That is, the learned embedding for predicting kinase could be useful for predicting protease binding as well.
# + id="dRd6CYBhe_-U"
| mini_project/notebooks/Ruoxi_Drug_Binding_Miniproject.ipynb |
/ ---
/ jupyter:
/ jupytext:
/ text_representation:
/ extension: .q
/ format_name: light
/ format_version: '1.5'
/ jupytext_version: 1.14.4
/ kernelspec:
/ display_name: SQL
/ language: sql
/ name: SQL
/ ---
/ Best Practices and Performance Checks - <EMAIL> (http://aka.ms/BPCheck)
/
/ DESCRIPTION: This script checks for skews in the most common best practices from SQL Server 2005 onwards.
/
/ DISCLAIMER:
/ This code is not supported under any Microsoft standard support program or service.
/ This code and information are provided "AS IS" without warranty of any kind, either expressed or implied.
/ The entire risk arising out of the use or performance of the script and documentation remains with you.
/ Furthermore, Microsoft or the author shall not be liable for any damages you may sustain by using this information, whether direct,
/ indirect, special, incidental or consequential, including, without limitation, damages for loss of business profits, business interruption, loss of business information
/ or other pecuniary loss even if it has been advised of the possibility of such damages.
/ Read all the implementation and usage notes thoroughly.
/
/ REQUIREMENTS:
/ - If not sysadmin, then you must be a member of MSDB SQLAgentOperatorRole role, or have SELECT permission on the sysalerts table in MSDB to run full scope of checks.
/ - If not sysadmin, then you must be a member of the securityadmin server role, or have EXECUTE permission on the following extended sprocs to run full scope of checks: xp_enumerrorlogs, xp_readerrorlog, sp_readerrorlog.
/ Check pre-requisites for all checks
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600), @sqlmajorver int
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
IF (ISNULL(IS_SRVROLEMEMBER(N'sysadmin'), 0) = 0)
BEGIN
PRINT 'WARNING: Only a sysadmin can run ALL the checks'
END
ELSE
BEGIN
PRINT 'No issues found while checking pre-requisites to run checks: user is sysadmin'
END;
IF (ISNULL(IS_SRVROLEMEMBER(N'sysadmin'), 0) = 0)
BEGIN
DECLARE @pid int, @pname sysname, @msdbpid int, @masterpid int
DECLARE @permstbl TABLE ([name] sysname);
DECLARE @permstbl_msdb TABLE ([id] tinyint IDENTITY(1,1), [perm] tinyint)
SET @params = '@msdbpid_in int'
SELECT @pid = principal_id, @pname=name FROM master.sys.server_principals (NOLOCK) WHERE sid = SUSER_SID();
SELECT @masterpid = principal_id FROM master.sys.database_principals (NOLOCK) WHERE sid = SUSER_SID();
SELECT @msdbpid = principal_id FROM msdb.sys.database_principals (NOLOCK) WHERE sid = SUSER_SID();
-- Perms 1
IF (ISNULL(IS_SRVROLEMEMBER(N'serveradmin'), 0) <> 1) AND ((SELECT COUNT(l.name)
FROM master.sys.server_permissions p (NOLOCK) INNER JOIN master.sys.server_principals l (NOLOCK)
ON p.grantee_principal_id = l.principal_id
AND p.class = 100 -- Server
AND p.state IN ('G', 'W') -- Granted or Granted with Grant
AND l.is_disabled = 0
AND p.permission_name = 'ALTER SETTINGS'
AND QUOTENAME(l.name) = QUOTENAME(@pname)) = 0)
BEGIN
RAISERROR('WARNING: If not sysadmin, then you must be a member of serveradmin server role or have the ALTER SETTINGS server permission. Exiting...', 16, 1, N'serveradmin')
RETURN
END
ELSE IF (ISNULL(IS_SRVROLEMEMBER(N'serveradmin'), 0) <> 1) AND ((SELECT COUNT(l.name)
FROM master.sys.server_permissions p (NOLOCK) INNER JOIN sys.server_principals l (NOLOCK)
ON p.grantee_principal_id = l.principal_id
AND p.class = 100 -- Server
AND p.state IN ('G', 'W') -- Granted or Granted with Grant
AND l.is_disabled = 0
AND p.permission_name = 'VIEW SERVER STATE'
AND QUOTENAME(l.name) = QUOTENAME(@pname)) = 0)
BEGIN
RAISERROR('WARNING: If not sysadmin, then you must be a member of serveradmin server role or granted the VIEW SERVER STATE permission. Exiting...', 16, 1, N'serveradmin')
RETURN
END
ELSE
BEGIN
RAISERROR('INFORMATION: No issues found while checking for sysadmin pre-requisites to run checks', 10, 1, N'serveradmin')
END;
-- Perms 2
INSERT INTO @permstbl
SELECT a.name
FROM master.sys.all_objects a (NOLOCK) INNER JOIN master.sys.database_permissions b (NOLOCK) ON a.[OBJECT_ID] = b.major_id
WHERE a.type IN ('P', 'X') AND b.grantee_principal_id <>0
AND b.grantee_principal_id <> 2
AND b.grantee_principal_id = @masterpid;
INSERT INTO @permstbl_msdb ([perm])
EXECUTE sp_executesql N'USE msdb; SELECT COUNT([name])
FROM msdb.sys.sysusers (NOLOCK) WHERE [uid] IN (SELECT [groupuid]
FROM msdb.sys.sysmembers (NOLOCK) WHERE [memberuid] = @msdbpid_in)
AND [name] = ''SQLAgentOperatorRole''', @params, @msdbpid_in = @msdbpid;
INSERT INTO @permstbl_msdb ([perm])
EXECUTE sp_executesql N'USE msdb; SELECT COUNT(dp.grantee_principal_id)
FROM msdb.sys.tables AS tbl (NOLOCK)
INNER JOIN msdb.sys.database_permissions AS dp (NOLOCK) ON dp.major_id=tbl.object_id AND dp.class=1
INNER JOIN msdb.sys.database_principals AS grantor_principal (NOLOCK) ON grantor_principal.principal_id = dp.grantor_principal_id
INNER JOIN msdb.sys.database_principals AS grantee_principal (NOLOCK) ON grantee_principal.principal_id = dp.grantee_principal_id
WHERE dp.state = ''G''
AND dp.grantee_principal_id = @msdbpid_in
AND dp.type = ''SL''', @params, @msdbpid_in = @msdbpid;
IF (SELECT [perm] FROM @permstbl_msdb WHERE [id] = 1) = 0 AND (SELECT [perm] FROM @permstbl_msdb WHERE [id] = 2) = 0
BEGIN
RAISERROR('WARNING: If not sysadmin, then you must be a member of MSDB SQLAgentOperatorRole role, or have SELECT permission on the sysalerts table in MSDB to run full scope of checks', 16, 1, N'msdbperms')
--RETURN
END
ELSE IF (ISNULL(IS_SRVROLEMEMBER(N'securityadmin'), 0) <> 1) AND ((SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_enumerrorlogs') = 0 OR (SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'sp_readerrorlog') = 0 OR (SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_readerrorlog') = 0)
BEGIN
RAISERROR('WARNING: If not sysadmin, then you must be a member of the securityadmin server role, or have EXECUTE permission on the following extended sprocs to run full scope of checks: xp_enumerrorlogs, xp_readerrorlog, sp_readerrorlog', 16, 1, N'secperms')
--RETURN
END
ELSE IF (SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_cmdshell') = 0 OR (SELECT COUNT(credential_id) FROM master.sys.credentials WHERE name = '##xp_cmdshell_proxy_account##') = 0
BEGIN
RAISERROR('WARNING: If not sysadmin, then you must be granted EXECUTE permissions on xp_cmdshell and a xp_cmdshell proxy account should exist to run full scope of checks', 16, 1, N'xp_cmdshellproxy')
--RETURN
END
ELSE IF (SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_fileexist') = 0 OR
(SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'sp_OAGetErrorInfo') = 0 OR
(SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'sp_OACreate') = 0 OR
(SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'sp_OADestroy') = 0 OR
(SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_regenumvalues') = 0 OR
(SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_regread') = 0 OR
(SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_instance_regread') = 0 OR
(SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_servicecontrol') = 0
BEGIN
RAISERROR('WARNING: Must be a granted EXECUTE permissions on the following extended sprocs to run full scope of checks: sp_OACreate, sp_OADestroy, sp_OAGetErrorInfo, xp_fileexist, xp_regread, xp_instance_regread, xp_servicecontrol and xp_regenumvalues', 16, 1, N'extended_sprocs')
--RETURN
END
ELSE IF (SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_msver') = 0 AND @sqlmajorver < 11
BEGIN
RAISERROR('WARNING: Must be granted EXECUTE permissions on xp_msver to run full scope of checks', 16, 1, N'extended_sprocs')
--RETURN
END
ELSE
BEGIN
RAISERROR('INFORMATION: No issues found while checking for granular pre-requisites to run checks', 10, 1, N'extended_sprocs')
--RETURN
END
END;
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @src VARCHAR(255), @desc VARCHAR(255), @psavail VARCHAR(20), @psver tinyint, @masterpid int
DECLARE @agt smallint, @ole smallint, @sao smallint, @xcmd smallint
DECLARE @ErrorMessage NVARCHAR(4000)
DECLARE @permstbl TABLE ([name] sysname);
SELECT @masterpid = principal_id FROM master.sys.database_principals (NOLOCK) WHERE sid = SUSER_SID()
INSERT INTO @permstbl
SELECT a.name
FROM master.sys.all_objects a (NOLOCK) INNER JOIN master.sys.database_permissions b (NOLOCK) ON a.[OBJECT_ID] = b.major_id
WHERE a.type IN ('P', 'X') AND b.grantee_principal_id <>0
AND b.grantee_principal_id <> 2
AND b.grantee_principal_id = @masterpid;
IF ISNULL(IS_SRVROLEMEMBER(N'sysadmin'), 0) = 1 -- Is sysadmin
OR ((ISNULL(IS_SRVROLEMEMBER(N'sysadmin'), 0) <> 1
AND (SELECT COUNT(credential_id) FROM sys.credentials WHERE name = '##xp_cmdshell_proxy_account##') > 0) -- Is not sysadmin but proxy account exists
AND (SELECT COUNT(l.name)
FROM sys.server_permissions p JOIN sys.server_principals l
ON p.grantee_principal_id = l.principal_id
AND p.class = 100 -- Server
AND p.state IN ('G', 'W') -- Granted or Granted with Grant
AND l.is_disabled = 0
AND p.permission_name = 'ALTER SETTINGS'
AND QUOTENAME(l.name) = QUOTENAME(USER_NAME())) = 0) -- Is not sysadmin but has alter settings permission
OR ((ISNULL(IS_SRVROLEMEMBER(N'sysadmin'), 0) <> 1
AND ((SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_regread') > 0 AND
(SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_cmdshell') > 0)))
BEGIN
DECLARE @pstbl_avail TABLE ([KeyExist] int)
BEGIN TRY
INSERT INTO @pstbl_avail
EXEC master.sys.xp_regread N'HKEY_LOCAL_MACHINE', N'SOFTWARE\Microsoft\PowerShell\1' -- check if Powershell is installed
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Could not determine if Powershell is installed - Error raised in TRY block. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
SELECT @sao = CAST([value] AS smallint) FROM sys.configurations (NOLOCK) WHERE [name] = 'show advanced options'
SELECT @xcmd = CAST([value] AS smallint) FROM sys.configurations (NOLOCK) WHERE [name] = 'xp_cmdshell'
SELECT @ole = CAST([value] AS smallint) FROM sys.configurations (NOLOCK) WHERE [name] = 'Ole Automation Procedures'
RAISERROR ('Configuration options set for Powershell enablement verification', 10, 1) WITH NOWAIT
IF @sao = 0
BEGIN
EXEC sp_configure 'show advanced options', 1; RECONFIGURE WITH OVERRIDE;
END
IF @xcmd = 0
BEGIN
EXEC sp_configure 'xp_cmdshell', 1; RECONFIGURE WITH OVERRIDE;
END
IF @ole = 0
BEGIN
EXEC sp_configure 'Ole Automation Procedures', 1; RECONFIGURE WITH OVERRIDE;
END
IF (SELECT [KeyExist] FROM @pstbl_avail) = 1
BEGIN
DECLARE @psavail_output TABLE ([PS_OUTPUT] VARCHAR(2048));
INSERT INTO @psavail_output
EXEC master.dbo.xp_cmdshell N'%WINDIR%\System32\WindowsPowerShell\v1.0\powershell.exe -Command "Get-ExecutionPolicy"'
SELECT @psavail = [PS_OUTPUT] FROM @psavail_output WHERE [PS_OUTPUT] IS NOT NULL;
END
ELSE
BEGIN
RAISERROR ('WARNING: Powershell is not installed. Install WinRM to proceed with PS based checks',16,1);
END
IF (@psavail IS NOT NULL AND @psavail NOT IN ('RemoteSigned','Unrestricted'))
RAISERROR ('WARNING: Execution of Powershell scripts is disabled on this system.
To change the execution policy, type the following command in Powershell console: Set-ExecutionPolicy RemoteSigned
The Set-ExecutionPolicy cmdlet enables you to determine which Windows PowerShell scripts (if any) will be allowed to run on your computer. Windows PowerShell has four different execution policies:
Restricted - No scripts can be run. Windows PowerShell can be used only in interactive mode.
AllSigned - Only scripts signed by a trusted publisher can be run.
RemoteSigned - Downloaded scripts must be signed by a trusted publisher before they can be run.
Unrestricted - No restrictions; all Windows PowerShell scripts can be run; REQUIRED by BP Check.',16,1);
IF (@psavail IS NOT NULL AND @psavail IN ('RemoteSigned','Unrestricted'))
BEGIN
RAISERROR ('INFORMATION: Powershell is installed and enabled for script execution', 10, 1) WITH NOWAIT
DECLARE @psver_output TABLE ([PS_OUTPUT] VARCHAR(1024));
INSERT INTO @psver_output
EXEC master.dbo.xp_cmdshell N'%WINDIR%\System32\WindowsPowerShell\v1.0\powershell.exe -Command "Get-Host | Format-Table -Property Version"'
-- Gets PS version, as commands issued to PS v1 do not support -File
SELECT @psver = ISNULL(LEFT([PS_OUTPUT],1),2) FROM @psver_output WHERE [PS_OUTPUT] IS NOT NULL AND ISNUMERIC(LEFT([PS_OUTPUT],1)) = 1;
SET @ErrorMessage = 'INFORMATION: Installed Powershell is version ' + CONVERT(CHAR(1), @psver) + ''
PRINT @ErrorMessage
END;
IF @xcmd = 0
BEGIN
EXEC sp_configure 'xp_cmdshell', 0; RECONFIGURE WITH OVERRIDE;
END
IF @ole = 0
BEGIN
EXEC sp_configure 'Ole Automation Procedures', 0; RECONFIGURE WITH OVERRIDE;
END
IF @sao = 0
BEGIN
EXEC sp_configure 'show advanced options', 0; RECONFIGURE WITH OVERRIDE;
END;
END
ELSE
BEGIN
RAISERROR ('WARNING: Missing permissions for Powershell enablement verification', 16, 1) WITH NOWAIT
END;
/ -
/ Information section
/ Uptime subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600), @sqlmajorver int
DECLARE @UpTime VARCHAR(12),@StartDate DATETIME
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
IF @sqlmajorver < 10
BEGIN
SET @sqlcmd = N'SELECT @UpTimeOUT = DATEDIFF(mi, login_time, GETDATE()), @StartDateOUT = login_time FROM master..sysprocesses (NOLOCK) WHERE spid = 1';
END
ELSE
BEGIN
SET @sqlcmd = N'SELECT @UpTimeOUT = DATEDIFF(mi,sqlserver_start_time,GETDATE()), @StartDateOUT = sqlserver_start_time FROM sys.dm_os_sys_info (NOLOCK)';
END
SET @params = N'@UpTimeOUT VARCHAR(12) OUTPUT, @StartDateOUT DATETIME OUTPUT';
EXECUTE sp_executesql @sqlcmd, @params, @UpTimeOUT=@UpTime OUTPUT, @StartDateOUT=@StartDate OUTPUT;
SELECT 'Information' AS [Category], 'Uptime' AS [Information], GETDATE() AS [Current_Time], @StartDate AS Last_Startup, CONVERT(VARCHAR(4),@UpTime/60/24) + 'd ' + CONVERT(VARCHAR(4),@UpTime/60%24) + 'hr ' + CONVERT(VARCHAR(4),@UpTime%60) + 'min' AS Uptime
/ -
/ OS Version and Architecture subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @sqlmajorver int, @sqlminorver int, @sqlbuild int
DECLARE @ErrorMessage NVARCHAR(4000)
DECLARE @clustered bit, @osver VARCHAR(5), @ostype VARCHAR(10), @osdistro VARCHAR(20), @server VARCHAR(128), @instancename NVARCHAR(128), @arch smallint, @ossp VARCHAR(25), @SystemManufacturer VARCHAR(128)
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
SELECT @sqlminorver = CONVERT(int, (@@microsoftversion / 0x10000) & 0xff);
SELECT @sqlbuild = CONVERT(int, @@microsoftversion & 0xffff);
IF (@sqlmajorver >= 11 AND @sqlmajorver < 14) OR (@sqlmajorver = 10 AND @sqlminorver = 50 AND @sqlbuild >= 2500)
BEGIN
SET @sqlcmd = N'SELECT @ostypeOUT = ''Windows'', @osdistroOUT = ''Windows'', @osverOUT = CASE WHEN windows_release IN (''6.3'',''10.0'') AND (@@VERSION LIKE ''%Build 10586%'' OR @@VERSION LIKE ''%Build 14393%'') THEN ''10.0'' ELSE windows_release END, @osspOUT = windows_service_pack_level, @archOUT = CASE WHEN @@VERSION LIKE ''%<X64>%'' THEN 64 WHEN @@VERSION LIKE ''%<IA64>%'' THEN 128 ELSE 32 END FROM sys.dm_os_windows_info (NOLOCK)';
SET @params = N'@osverOUT VARCHAR(5) OUTPUT, @ostypeOUT VARCHAR(10) OUTPUT, @osdistroOUT VARCHAR(20) OUTPUT, @osspOUT VARCHAR(25) OUTPUT, @archOUT smallint OUTPUT';
EXECUTE sp_executesql @sqlcmd, @params, @osverOUT=@osver OUTPUT, @ostypeOUT=@ostype OUTPUT, @osdistroOUT=@osdistro OUTPUT, @osspOUT=@ossp OUTPUT, @archOUT=@arch OUTPUT;
END
ELSE IF @sqlmajorver >= 14
BEGIN
SET @sqlcmd = N'SELECT @ostypeOUT = host_platform, @osdistroOUT = host_distribution, @osverOUT = CASE WHEN host_platform = ''Windows'' AND host_release IN (''6.3'',''10.0'') THEN ''10.0'' ELSE host_release END, @osspOUT = host_service_pack_level, @archOUT = CASE WHEN @@VERSION LIKE ''%<X64>%'' THEN 64 ELSE 32 END FROM sys.dm_os_host_info (NOLOCK)';
SET @params = N'@osverOUT VARCHAR(5) OUTPUT, @ostypeOUT VARCHAR(10) OUTPUT, @osdistroOUT VARCHAR(20) OUTPUT, @osspOUT VARCHAR(25) OUTPUT, @archOUT smallint OUTPUT';
EXECUTE sp_executesql @sqlcmd, @params, @osverOUT=@osver OUTPUT, @ostypeOUT=@ostype OUTPUT, @osdistroOUT=@osdistro OUTPUT, @osspOUT=@ossp OUTPUT, @archOUT=@arch OUTPUT;
END
ELSE
BEGIN
BEGIN TRY
DECLARE @str VARCHAR(500), @str2 VARCHAR(500), @str3 VARCHAR(500)
DECLARE @sysinfo TABLE (id int,
[Name] NVARCHAR(256),
Internal_Value bigint,
Character_Value NVARCHAR(256));
INSERT INTO @sysinfo
EXEC xp_msver;
SELECT @osver = LEFT(Character_Value, CHARINDEX(' ', Character_Value)-1) -- 5.2 is WS2003; 6.0 is WS2008; 6.1 is WS2008R2; 6.2 is WS2012, 6.3 is WS2012R2, 6.3 (14396) is WS2016
FROM @sysinfo
WHERE [Name] LIKE 'WindowsVersion%';
SELECT @arch = CASE WHEN RTRIM(Character_Value) LIKE '%x64%' OR RTRIM(Character_Value) LIKE '%AMD64%' THEN 64
WHEN RTRIM(Character_Value) LIKE '%x86%' OR RTRIM(Character_Value) LIKE '%32%' THEN 32
WHEN RTRIM(Character_Value) LIKE '%IA64%' THEN 128 END
FROM @sysinfo
WHERE [Name] LIKE 'Platform%';
SET @str = (SELECT @@VERSION)
SELECT @str2 = RIGHT(@str, LEN(@str)-CHARINDEX('Windows',@str) + 1)
SELECT @str3 = RIGHT(@str2, LEN(@str2)-CHARINDEX(': ',@str2))
SELECT @ossp = LTRIM(LEFT(@str3, CHARINDEX(')',@str3) -1))
SET @ostype = 'Windows'
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Windows Version and Architecture subsection - Error raised in TRY block. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
END;
DECLARE @machineinfo TABLE ([Value] NVARCHAR(256), [Data] NVARCHAR(256))
IF @ostype = 'Windows'
BEGIN
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\BIOS','SystemManufacturer';
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\BIOS','SystemProductName';
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\BIOS','SystemFamily';
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\BIOS','BIOSVendor';
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\BIOS','BIOSVersion';
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\BIOS','BIOSReleaseDate';
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\CentralProcessor\0','ProcessorNameString';
END
SELECT @SystemManufacturer = [Data] FROM @machineinfo WHERE [Value] = 'SystemManufacturer';
SELECT 'Information' AS [Category], 'Machine' AS [Information],
CASE @osver WHEN '5.2' THEN 'XP/WS2003'
WHEN '6.0' THEN 'Vista/WS2008'
WHEN '6.1' THEN 'W7/WS2008R2'
WHEN '6.2' THEN 'W8/WS2012'
WHEN '6.3' THEN 'W8.1/WS2012R2'
WHEN '10.0' THEN 'W10/WS2016'
ELSE @ostype + ' ' + @osdistro
END AS [OS_Version],
CASE WHEN @ostype = 'Windows' THEN @ossp ELSE @osver END AS [Service_Pack_Level],
@arch AS [Architecture],
SERVERPROPERTY('MachineName') AS [Machine_Name],
SERVERPROPERTY('ComputerNamePhysicalNetBIOS') AS [NetBIOS_Name],
@SystemManufacturer AS [System_Manufacturer],
(SELECT [Data] FROM @machineinfo WHERE [Value] = 'SystemFamily') AS [System_Family],
(SELECT [Data] FROM @machineinfo WHERE [Value] = 'SystemProductName') AS [System_ProductName],
(SELECT [Data] FROM @machineinfo WHERE [Value] = 'BIOSVendor') AS [BIOS_Vendor],
(SELECT [Data] FROM @machineinfo WHERE [Value] = 'BIOSVersion') AS [BIOS_Version],
(SELECT [Data] FROM @machineinfo WHERE [Value] = 'BIOSReleaseDate') AS [BIOS_Release_Date],
(SELECT [Data] FROM @machineinfo WHERE [Value] = 'ProcessorNameString') AS [Processor_Name];
/ -
/ Disk space subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlmajorver int
DECLARE @ErrorMessage NVARCHAR(4000)
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
IF @sqlmajorver > 9
BEGIN
SELECT DISTINCT 'Information' AS [Category], 'Disk_Space' AS [Information], vs.logical_volume_name,
vs.volume_mount_point, vs.file_system_type, CONVERT(int,vs.total_bytes/1048576.0) AS TotalSpace_MB,
CONVERT(int,vs.available_bytes/1048576.0) AS FreeSpace_MB, vs.is_compressed
FROM sys.master_files mf
CROSS APPLY sys.dm_os_volume_stats(mf.database_id, mf.[file_id]) vs
ORDER BY FreeSpace_MB ASC
END;
/ -
/ HA Information subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @sqlmajorver int, @sqlminorver int, @sqlbuild int, @clustered bit
DECLARE @ptochecks bit
SET @ptochecks = 1 --(1 = ON; 0 = OFF)
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
SELECT @sqlminorver = CONVERT(int, (@@microsoftversion / 0x10000) & 0xff);
SELECT @sqlbuild = CONVERT(int, @@microsoftversion & 0xffff);
SELECT @clustered = CONVERT(bit,ISNULL(SERVERPROPERTY('IsClustered'),0))
IF @clustered = 1
BEGIN
IF @sqlmajorver < 11
BEGIN
EXEC ('SELECT ''Information'' AS [Category], ''Cluster'' AS [Information], NodeName AS node_name FROM sys.dm_os_cluster_nodes (NOLOCK)')
END
ELSE
BEGIN
EXEC ('SELECT ''Information'' AS [Category], ''Cluster'' AS [Information], NodeName AS node_name, status_description, is_current_owner FROM sys.dm_os_cluster_nodes (NOLOCK)')
END
SELECT 'Information' AS [Category], 'Cluster' AS [Information], DriveName AS cluster_shared_drives FROM sys.dm_io_cluster_shared_drives (NOLOCK)
END
ELSE
BEGIN
SELECT 'Information' AS [Category], 'Cluster' AS [Information], 'NOT_CLUSTERED' AS [Status]
END;
IF @sqlmajorver > 10
BEGIN
DECLARE @IsHadrEnabled tinyint, @HadrManagerStatus tinyint
SELECT @IsHadrEnabled = CONVERT(tinyint, SERVERPROPERTY('IsHadrEnabled'))
SELECT @HadrManagerStatus = CONVERT(tinyint, SERVERPROPERTY('HadrManagerStatus'))
SELECT 'Information' AS [Category], 'AlwaysOn_AG' AS [Information],
CASE @IsHadrEnabled WHEN 0 THEN 'Disabled'
WHEN 1 THEN 'Enabled' END AS [AlwaysOn_Availability_Groups],
CASE WHEN @IsHadrEnabled = 1 THEN
CASE @HadrManagerStatus WHEN 0 THEN '[Not started, pending communication]'
WHEN 1 THEN '[Started and running]'
WHEN 2 THEN '[Not started and failed]'
END
END AS [Status];
IF @IsHadrEnabled = 1
BEGIN
IF EXISTS (SELECT 1 FROM sys.dm_hadr_cluster)
SELECT 'Information' AS [Category], 'AlwaysOn_Cluster' AS [Information], cluster_name, quorum_type_desc, quorum_state_desc
FROM sys.dm_hadr_cluster;
IF EXISTS (SELECT 1 FROM sys.dm_hadr_cluster_members)
SELECT 'Information' AS [Category], 'AlwaysOn_Cluster_Members' AS [Information], member_name, member_type_desc, member_state_desc, number_of_quorum_votes
FROM sys.dm_hadr_cluster_members;
IF EXISTS (SELECT 1 FROM sys.dm_hadr_cluster_networks)
SELECT 'Information' AS [Category], 'AlwaysOn_Cluster_Networks' AS [Information], member_name, network_subnet_ip, network_subnet_ipv4_mask, is_public, is_ipv4
FROM sys.dm_hadr_cluster_networks;
END;
IF @ptochecks = 1 AND @IsHadrEnabled = 1
BEGIN
-- Note: If low_water_mark_for_ghosts number is not increasing over time, it implies that ghost cleanup might not happen.
SET @sqlcmd = 'SELECT ''Information'' AS [Category], ''AlwaysOn_Replicas'' AS [Information], database_id, group_id, replica_id, group_database_id, is_local, synchronization_state_desc,
is_commit_participant, synchronization_health_desc, database_state_desc, is_suspended, suspend_reason_desc, last_sent_time, last_received_time, last_hardened_time,
last_redone_time, log_send_queue_size, log_send_rate, redo_queue_size, redo_rate, filestream_send_rate, last_commit_time,
low_water_mark_for_ghosts' + CASE WHEN @sqlmajorver > 12 THEN ', secondary_lag_seconds' ELSE '' END + '
FROM sys.dm_hadr_database_replica_states'
EXECUTE sp_executesql @sqlcmd
SELECT 'Information' AS [Category], 'AlwaysOn_Replica_Cluster' AS [Information], replica_id, group_database_id, database_name, is_failover_ready, is_pending_secondary_suspend,
is_database_joined, recovery_lsn, truncation_lsn
FROM sys.dm_hadr_database_replica_cluster_states;
END
END;
/ -
/ Linked servers info subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @sqlmajorver int
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
IF (SELECT COUNT(*) FROM sys.servers AS s INNER JOIN sys.linked_logins AS l (NOLOCK) ON s.server_id = l.server_id INNER JOIN sys.server_principals AS p (NOLOCK) ON p.principal_id = l.local_principal_id WHERE s.is_linked = 1) > 0
BEGIN
SET @sqlcmd = 'SELECT ''Information'' AS [Category], ''Linked_servers'' AS [Information], s.name, s.product,
s.provider, s.data_source, s.location, s.provider_string, s.catalog, s.connect_timeout,
s.query_timeout, s.is_linked, s.is_remote_login_enabled, s.is_rpc_out_enabled,
s.is_data_access_enabled, s.is_collation_compatible, s.uses_remote_collation, s.collation_name,
s.lazy_schema_validation, s.is_system, s.is_publisher, s.is_subscriber, s.is_distributor,
s.is_nonsql_subscriber' + CASE WHEN @sqlmajorver > 9 THEN ', s.is_remote_proc_transaction_promotion_enabled' ELSE '' END + ',
s.modify_date, CASE WHEN l.local_principal_id = 0 THEN ''local or wildcard'' ELSE p.name END AS [local_principal],
CASE WHEN l.uses_self_credential = 0 THEN ''use own credentials'' ELSE ''use supplied username and pwd'' END AS uses_self_credential,
l.remote_name, l.modify_date AS [linked_login_modify_date]
FROM sys.servers AS s (NOLOCK)
INNER JOIN sys.linked_logins AS l (NOLOCK) ON s.server_id = l.server_id
INNER JOIN sys.server_principals AS p (NOLOCK) ON p.principal_id = l.local_principal_id
WHERE s.is_linked = 1'
EXECUTE sp_executesql @sqlcmd
END
ELSE
BEGIN
SELECT 'Information' AS [Category], 'Linked_servers' AS [Information], 'None' AS [Status]
END;
/ -
/ Instance info subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @ErrorMessage NVARCHAR(4000)
DECLARE @sqlmajorver int, @sqlminorver int, @sqlbuild int, @masterpid int
DECLARE @port VARCHAR(15), @replication int, @RegKey NVARCHAR(255), @cpuaffin VARCHAR(255), @cpucount int, @numa int
DECLARE @i int, @cpuaffin_fixed VARCHAR(300), @affinitymask NVARCHAR(64), @affinity64mask NVARCHAR(64), @cpuover32 int
DECLARE @permstbl TABLE ([name] sysname);
SELECT @masterpid = principal_id FROM master.sys.database_principals (NOLOCK) WHERE sid = SUSER_SID()
INSERT INTO @permstbl
SELECT a.name
FROM master.sys.all_objects a (NOLOCK) INNER JOIN master.sys.database_permissions b (NOLOCK) ON a.[OBJECT_ID] = b.major_id
WHERE a.type IN ('P', 'X') AND b.grantee_principal_id <>0
AND b.grantee_principal_id <> 2
AND b.grantee_principal_id = @masterpid;
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
SELECT @sqlminorver = CONVERT(int, (@@microsoftversion / 0x10000) & 0xff);
SELECT @sqlbuild = CONVERT(int, @@microsoftversion & 0xffff);
IF @sqlmajorver < 11 OR (@sqlmajorver = 10 AND @sqlminorver = 50 AND @sqlbuild < 2500)
BEGIN
IF (ISNULL(IS_SRVROLEMEMBER(N'sysadmin'), 0) = 1) OR ((SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_regread') = 1)
BEGIN
BEGIN TRY
SELECT @RegKey = CASE WHEN CONVERT(VARCHAR(128), SERVERPROPERTY('InstanceName')) IS NULL THEN N'Software\Microsoft\MSSQLServer\MSSQLServer\SuperSocketNetLib\Tcp'
ELSE N'Software\Microsoft\Microsoft SQL Server\' + CAST(SERVERPROPERTY('InstanceName') AS NVARCHAR(128)) + N'\MSSQLServer\SuperSocketNetLib\Tcp' END
EXEC master.sys.xp_regread N'HKEY_LOCAL_MACHINE', @RegKey, N'TcpPort', @port OUTPUT, NO_OUTPUT
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Instance info subsection - Error raised in TRY block 1. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
END
ELSE
BEGIN
RAISERROR('WARNING: Missing permissions for full "Instance info" checks. Bypassing TCP port check', 16, 1, N'sysadmin')
--RETURN
END
END
ELSE
BEGIN
BEGIN TRY
/*
SET @sqlcmd = N'SELECT @portOUT = MAX(CONVERT(VARCHAR(15),value_data)) FROM sys.dm_server_registry WHERE registry_key LIKE ''%MSSQLServer\SuperSocketNetLib\Tcp\%'' AND value_name LIKE N''%TcpPort%'' AND CONVERT(float,value_data) > 0;';
SET @params = N'@portOUT VARCHAR(15) OUTPUT';
EXECUTE sp_executesql @sqlcmd, @params, @portOUT = @port OUTPUT;
IF @port IS NULL
BEGIN
SET @sqlcmd = N'SELECT @portOUT = CONVERT(VARCHAR(15),value_data) FROM sys.dm_server_registry WHERE registry_key LIKE ''%MSSQLServer\SuperSocketNetLib\Tcp\%'' AND value_name LIKE N''%TcpDynamicPort%'' AND CONVERT(float,value_data) > 0;';
SET @params = N'@portOUT VARCHAR(15) OUTPUT';
EXECUTE sp_executesql @sqlcmd, @params, @portOUT = @port OUTPUT;
END
*/
SET @sqlcmd = N'SELECT @portOUT = MAX(CONVERT(VARCHAR(15),port)) FROM sys.dm_tcp_listener_states WHERE is_ipv4 = 1 AND [type] = 0 AND ip_address <> ''127.0.0.1'';';
SET @params = N'@portOUT VARCHAR(15) OUTPUT';
EXECUTE sp_executesql @sqlcmd, @params, @portOUT = @port OUTPUT;
IF @port IS NULL
BEGIN
SET @sqlcmd = N'SELECT @portOUT = MAX(CONVERT(VARCHAR(15),port)) FROM sys.dm_tcp_listener_states WHERE is_ipv4 = 0 AND [type] = 0 AND ip_address <> ''127.0.0.1'';';
SET @params = N'@portOUT VARCHAR(15) OUTPUT';
EXECUTE sp_executesql @sqlcmd, @params, @portOUT = @port OUTPUT;
END
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Instance info subsection - Error raised in TRY block 2. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
END
IF (ISNULL(IS_SRVROLEMEMBER(N'sysadmin'), 0) = 1) OR ((SELECT COUNT([name]) FROM @permstbl WHERE [name] = 'xp_instance_regread') = 1)
BEGIN
BEGIN TRY
EXEC master..xp_instance_regread N'HKEY_LOCAL_MACHINE', N'SOFTWARE\Microsoft\MSSQLServer\Replication', N'IsInstalled', @replication OUTPUT, NO_OUTPUT
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Instance info subsection - Error raised in TRY block 3. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
END
ELSE
BEGIN
RAISERROR('WARNING: Missing permissions for full "Instance info" checks. Bypassing replication check', 16, 1, N'sysadmin')
--RETURN
END
SELECT @cpucount = COUNT(cpu_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64
SELECT @numa = COUNT(DISTINCT parent_node_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64;
;WITH bits AS
(SELECT 7 AS N, 128 AS E UNION ALL SELECT 6, 64 UNION ALL
SELECT 5, 32 UNION ALL SELECT 4, 16 UNION ALL SELECT 3, 8 UNION ALL
SELECT 2, 4 UNION ALL SELECT 1, 2 UNION ALL SELECT 0, 1),
bytes AS
(SELECT 1 M UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL
SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL
SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9)
-- CPU Affinity is shown highest to lowest CPU ID
SELECT @affinitymask = CASE WHEN [value] = 0 THEN REPLICATE('1', @cpucount)
ELSE RIGHT((SELECT ((CONVERT(tinyint, SUBSTRING(CONVERT(binary(9), [value]), M, 1)) & E) / E) AS [text()]
FROM bits CROSS JOIN bytes
ORDER BY M, N DESC
FOR XML PATH('')), @cpucount) END
FROM sys.configurations (NOLOCK)
WHERE name = 'affinity mask';
IF @cpucount > 32
BEGIN
;WITH bits AS
(SELECT 7 AS N, 128 AS E UNION ALL SELECT 6, 64 UNION ALL
SELECT 5, 32 UNION ALL SELECT 4, 16 UNION ALL SELECT 3, 8 UNION ALL
SELECT 2, 4 UNION ALL SELECT 1, 2 UNION ALL SELECT 0, 1),
bytes AS
(SELECT 1 M UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL
SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL
SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9)
-- CPU Affinity is shown highest to lowest CPU ID
SELECT @affinity64mask = CASE WHEN [value] = 0 THEN REPLICATE('1', @cpucount)
ELSE RIGHT((SELECT ((CONVERT(tinyint, SUBSTRING(CONVERT(binary(9), [value]), M, 1)) & E) / E) AS [text()]
FROM bits CROSS JOIN bytes
ORDER BY M, N DESC
FOR XML PATH('')), @cpucount) END
FROM sys.configurations (NOLOCK)
WHERE name = 'affinity64 mask';
END;
IF @cpucount > 32
SELECT @cpuover32 = ABS(LEN(@affinity64mask) - (@cpucount-32))
SELECT @cpuaffin = CASE WHEN @cpucount > 32 THEN REVERSE(LEFT(REVERSE(@affinity64mask),@cpuover32)) + RIGHT(@affinitymask,32) ELSE RIGHT(@affinitymask,@cpucount) END
SET @cpuaffin_fixed = @cpuaffin
IF @numa > 1
BEGIN
-- format binary mask by node for better reading
SET @i = @cpucount/@numa + 1
WHILE @i < @cpucount + @numa
BEGIN
SELECT @cpuaffin_fixed = STUFF(@cpuaffin_fixed, @i, 1, '_' + SUBSTRING(@cpuaffin_fixed, @i, 1))
SET @i = @i + @cpucount/@numa + 1
END
END
SELECT 'Information' AS [Category], 'Instance' AS [Information],
(CASE WHEN CONVERT(VARCHAR(128), SERVERPROPERTY('InstanceName')) IS NULL THEN 'DEFAULT_INSTANCE'
ELSE CONVERT(VARCHAR(128), SERVERPROPERTY('InstanceName')) END) AS Instance_Name,
(CASE WHEN SERVERPROPERTY('IsClustered') = 1 THEN 'CLUSTERED'
WHEN SERVERPROPERTY('IsClustered') = 0 THEN 'NOT_CLUSTERED'
ELSE 'INVALID INPUT/ERROR' END) AS Failover_Clustered,
/*The version of SQL Server instance in the form: major.minor.build*/
CONVERT(VARCHAR(128), SERVERPROPERTY('ProductVersion')) AS Product_Version,
/*Level of the version of SQL Server Instance*/
CASE WHEN (@sqlmajorver = 11 AND @sqlminorver >= 6020) OR (@sqlmajorver = 12 AND @sqlminorver BETWEEN 2556 AND 2569) OR (@sqlmajorver = 12 AND @sqlminorver >= 4427) OR @sqlmajorver >= 13 THEN
CONVERT(VARCHAR(128), SERVERPROPERTY('ProductBuildType'))
ELSE 'NA' END AS Product_Build_Type,
CONVERT(VARCHAR(128), SERVERPROPERTY('ProductLevel')) AS Product_Level,
CASE WHEN (@sqlmajorver = 11 AND @sqlminorver >= 6020) OR (@sqlmajorver = 12 AND @sqlminorver BETWEEN 2556 AND 2569) OR (@sqlmajorver = 12 AND @sqlminorver >= 4427) OR @sqlmajorver >= 13 THEN
CONVERT(VARCHAR(128), SERVERPROPERTY('ProductUpdateLevel'))
ELSE 'NA' END AS Product_Update_Level,
CASE WHEN (@sqlmajorver = 11 AND @sqlminorver >= 6020) OR (@sqlmajorver = 12 AND @sqlminorver BETWEEN 2556 AND 2569) OR (@sqlmajorver = 12 AND @sqlminorver >= 4427) OR @sqlmajorver >= 13 THEN
CONVERT(VARCHAR(128), SERVERPROPERTY('ProductUpdateReference'))
ELSE 'NA' END AS Product_Update_Ref_KB,
CONVERT(VARCHAR(128), SERVERPROPERTY('Edition')) AS Edition,
CONVERT(VARCHAR(128), SERVERPROPERTY('MachineName')) AS Machine_Name,
RTRIM(@port) AS TCP_Port,
@@SERVICENAME AS Service_Name,
/*To identify which sqlservr.exe belongs to this instance*/
SERVERPROPERTY('ProcessID') AS Process_ID,
CONVERT(VARCHAR(128), SERVERPROPERTY('ServerName')) AS Server_Name,
@cpuaffin_fixed AS Affinity_Mask_Bitmask,
CONVERT(VARCHAR(128), SERVERPROPERTY('Collation')) AS [Server_Collation],
(CASE WHEN @replication = 1 THEN 'Installed'
WHEN @replication = 0 THEN 'Not_Installed'
ELSE 'INVALID INPUT/ERROR' END) AS Replication_Components_Installation,
(CASE WHEN SERVERPROPERTY('IsFullTextInstalled') = 1 THEN 'Installed'
WHEN SERVERPROPERTY('IsFulltextInstalled') = 0 THEN 'Not_Installed'
ELSE 'INVALID INPUT/ERROR' END) AS Full_Text_Installation,
(CASE WHEN SERVERPROPERTY('IsIntegratedSecurityOnly') = 1 THEN 'Integrated_Security'
WHEN SERVERPROPERTY('IsIntegratedSecurityOnly') = 0 THEN 'SQL_Server_Security'
ELSE 'INVALID INPUT/ERROR' END) AS [Security],
(CASE WHEN SERVERPROPERTY('IsSingleUser') = 1 THEN 'Single_User'
WHEN SERVERPROPERTY('IsSingleUser') = 0 THEN 'Multi_User'
ELSE 'INVALID INPUT/ERROR' END) AS [Single_User],
(CASE WHEN CONVERT(VARCHAR(128), SERVERPROPERTY('LicenseType')) = 'PER_SEAT' THEN 'Per_Seat_Mode'
WHEN CONVERT(VARCHAR(128), SERVERPROPERTY('LicenseType')) = 'PER_PROCESSOR' THEN 'Per_Processor_Mode'
ELSE 'Disabled' END) AS License_Type, -- From SQL Server 2008R2 always returns DISABLED.
CONVERT(NVARCHAR(128), SERVERPROPERTY('BuildClrVersion')) AS CLR_Version,
CASE WHEN @sqlmajorver >= 10 THEN
CASE WHEN SERVERPROPERTY('FilestreamConfiguredLevel') = 0 THEN 'Disabled'
WHEN SERVERPROPERTY('FilestreamConfiguredLevel') = 1 THEN 'Enabled_for_TSQL'
ELSE 'Enabled for TSQL and Win32' END
ELSE 'Not compatible' END AS Filestream_Configured_Level,
CASE WHEN @sqlmajorver >= 10 THEN
CASE WHEN SERVERPROPERTY('FilestreamEffectiveLevel') = 0 THEN 'Disabled'
WHEN SERVERPROPERTY('FilestreamEffectiveLevel') = 1 THEN 'Enabled_for_TSQL'
ELSE 'Enabled for TSQL and Win32' END
ELSE 'Not compatible' END AS Filestream_Effective_Level,
CASE WHEN @sqlmajorver >= 10 THEN
SERVERPROPERTY('FilestreamShareName')
ELSE 'Not compatible' END AS Filestream_Share_Name,
CASE WHEN @sqlmajorver >= 12 THEN
SERVERPROPERTY('IsXTPSupported')
ELSE 'Not compatible' END AS XTP_Compatible,
CASE WHEN @sqlmajorver >= 13 THEN
SERVERPROPERTY('IsPolybaseInstalled')
ELSE 'Not compatible' END AS Polybase_Installed,
CASE WHEN @sqlmajorver >= 13 THEN
SERVERPROPERTY('IsAdvancedAnalyticsInstalled')
ELSE 'Not compatible' END AS R_Services_Installed;
/ -
/ Buffer Pool Extension info subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlmajorver int
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
IF @sqlmajorver > 11
BEGIN
SELECT 'Information' AS [Category], 'BP_Extension' AS [Information],
CASE WHEN state = 0 THEN 'BP_Extension_Disabled'
WHEN state = 1 THEN 'BP_Extension_is_Disabling'
WHEN state = 3 THEN 'BP_Extension_is_Enabling'
WHEN state = 5 THEN 'BP_Extension_Enabled'
END AS state,
[path], current_size_in_kb
FROM sys.dm_os_buffer_pool_extension_configuration
END
ELSE
BEGIN
SELECT 'Information' AS [Category], 'BP_Extension' AS [Information], 'NA' AS state
END;
/ -
/ Resource Governor info subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlmajorver int
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
IF @sqlmajorver > 9
BEGIN
SELECT 'Information' AS [Category], 'RG_Classifier_Function' AS [Information], CASE WHEN classifier_function_id = 0 THEN 'Default_Configuration' ELSE OBJECT_SCHEMA_NAME(classifier_function_id) + '.' + OBJECT_NAME(classifier_function_id) END AS classifier_function, is_reconfiguration_pending
FROM sys.dm_resource_governor_configuration
SET @sqlcmd = 'SELECT ''Information'' AS [Category], ''RG_Resource_Pool'' AS [Information], rp.pool_id, name, statistics_start_time, total_cpu_usage_ms, cache_memory_kb, compile_memory_kb,
used_memgrant_kb, total_memgrant_count, total_memgrant_timeout_count, active_memgrant_count, active_memgrant_kb, memgrant_waiter_count, max_memory_kb, used_memory_kb, target_memory_kb,
out_of_memory_count, min_cpu_percent, max_cpu_percent, min_memory_percent, max_memory_percent' + CASE WHEN @sqlmajorver > 10 THEN ', cap_cpu_percent, rpa.processor_group, rpa.scheduler_mask' ELSE '' END + '
FROM sys.dm_resource_governor_resource_pools rp' + CASE WHEN @sqlmajorver > 10 THEN ' LEFT JOIN sys.dm_resource_governor_resource_pool_affinity rpa ON rp.pool_id = rpa.pool_id' ELSE '' END
EXECUTE sp_executesql @sqlcmd
SET @sqlcmd = 'SELECT ''Information'' AS [Category], ''RG_Workload_Groups'' AS [Information], group_id, name, pool_id, statistics_start_time, total_request_count, total_queued_request_count,
active_request_count, queued_request_count, total_cpu_limit_violation_count, total_cpu_usage_ms, max_request_cpu_time_ms, blocked_task_count, total_lock_wait_count,
total_lock_wait_time_ms, total_query_optimization_count, total_suboptimal_plan_generation_count, total_reduced_memgrant_count, max_request_grant_memory_kb,
active_parallel_thread_count, importance, request_max_memory_grant_percent, request_max_cpu_time_sec, request_memory_grant_timeout_sec,
group_max_requests, max_dop' + CASE WHEN @sqlmajorver > 10 THEN ', effective_max_dop' ELSE '' END + '
FROM sys.dm_resource_governor_workload_groups'
EXECUTE sp_executesql @sqlcmd
END;
/ -
/ Logon triggers subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
IF (SELECT COUNT([name]) FROM sys.server_triggers WHERE is_disabled = 0 AND is_ms_shipped = 0) > 0
BEGIN
SELECT 'Information' AS [Category], 'Logon_Triggers' AS [Information], name AS [Trigger_Name], type_desc AS [Trigger_Type],create_date, modify_date
FROM sys.server_triggers WHERE is_disabled = 0 AND is_ms_shipped = 0
ORDER BY name;
END
ELSE
BEGIN
SELECT 'Information' AS [Category], 'Logon_Triggers' AS [Information], 'NA' AS [Comment]
END;
/ -
/ Database Information subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @dbScope VARCHAR(256)
SET @dbScope = NULL --(NULL = All DBs)
-- Building DB list
DECLARE @curdbname NVARCHAR(1000), @curdbid int, @currole tinyint, @cursecondary_role_allow_connections tinyint, @state tinyint
IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tmpdbs0'))
DROP TABLE #tmpdbs0;
IF NOT EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tmpdbs0'))
CREATE TABLE #tmpdbs0 (id int IDENTITY(1,1), [dbid] int, [dbname] NVARCHAR(1000), [compatibility_level] tinyint, is_read_only bit, [state] tinyint, is_distributor bit, [role] tinyint, [secondary_role_allow_connections] tinyint, is_database_joined bit, is_failover_ready bit, isdone bit);
IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tmpdbfiledetail'))
DROP TABLE #tmpdbfiledetail;
IF NOT EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tmpdbfiledetail'))
CREATE TABLE #tmpdbfiledetail([database_id] [int] NOT NULL, [file_id] int, [type_desc] NVARCHAR(60), [data_space_id] int, [name] sysname, [physical_name] NVARCHAR(260), [state_desc] NVARCHAR(60), [size] bigint, [max_size] bigint, [is_percent_growth] bit, [growth] int, [is_media_read_only] bit, [is_read_only] bit, [is_sparse] bit, [is_name_reserved] bit)
IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.##tmpdbsizes'))
DROP TABLE ##tmpdbsizes;
IF NOT EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.##tmpdbsizes'))
CREATE TABLE ##tmpdbsizes([database_id] [int] NOT NULL, [size] bigint, [type_desc] NVARCHAR(60))
-- Get DB info
DECLARE @sqlmajorver int
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @dbid int, @dbname NVARCHAR(1000)
DECLARE @ErrorMessage NVARCHAR(4000)
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
IF @sqlmajorver < 11
BEGIN
SET @sqlcmd = 'SELECT database_id, name, [compatibility_level], is_read_only, [state], is_distributor, 1, 1, 0 FROM master.sys.databases (NOLOCK)'
INSERT INTO #tmpdbs0 ([dbid], [dbname], [compatibility_level], is_read_only, [state], is_distributor, [role], [secondary_role_allow_connections], [isdone])
EXEC sp_executesql @sqlcmd;
END;
IF @sqlmajorver > 10
BEGIN
SET @sqlcmd = 'SELECT sd.database_id, sd.name, sd.[compatibility_level], sd.is_read_only, sd.[state], sd.is_distributor, MIN(COALESCE(ars.[role],1)) AS [role], ar.secondary_role_allow_connections, rcs.is_database_joined, rcs.is_failover_ready, 0
FROM master.sys.databases (NOLOCK) sd
LEFT JOIN sys.dm_hadr_database_replica_states (NOLOCK) d ON sd.database_id = d.database_id
LEFT JOIN sys.availability_replicas ar (NOLOCK) ON d.group_id = ar.group_id AND d.replica_id = ar.replica_id
LEFT JOIN sys.dm_hadr_availability_replica_states (NOLOCK) ars ON d.group_id = ars.group_id AND d.replica_id = ars.replica_id
LEFT JOIN sys.dm_hadr_database_replica_cluster_states (NOLOCK) rcs ON rcs.database_name = sd.name AND rcs.replica_id = ar.replica_id
GROUP BY sd.database_id, sd.name, sd.is_read_only, sd.[state], sd.is_distributor, ar.secondary_role_allow_connections, sd.[compatibility_level], rcs.is_database_joined, rcs.is_failover_ready;'
INSERT INTO #tmpdbs0 ([dbid], [dbname], [compatibility_level], is_read_only, [state], is_distributor, [role], [secondary_role_allow_connections], is_database_joined, is_failover_ready, [isdone])
EXEC sp_executesql @sqlcmd;
END;
/* Validate if database scope is set */
IF @dbScope IS NOT NULL AND ISNUMERIC(@dbScope) <> 1 AND @dbScope NOT LIKE '%,%'
BEGIN
RAISERROR('ERROR: Invalid parameter. Valid input consists of database IDs. If more than one ID is specified, the values must be comma separated.', 16, 42) WITH NOWAIT;
RETURN
END;
IF @dbScope IS NOT NULL
BEGIN
RAISERROR (N'Applying specific database scope list, if any', 10, 1) WITH NOWAIT
SELECT @sqlcmd = 'DELETE FROM #tmpdbs0 WHERE [dbid] > 4 AND [dbid] NOT IN (' + REPLACE(@dbScope,' ','') + ')'
EXEC sp_executesql @sqlcmd;
END;
/* Populate data file info*/
WHILE (SELECT COUNT(id) FROM #tmpdbs0 WHERE isdone = 0) > 0
BEGIN
SELECT TOP 1 @curdbname = [dbname], @curdbid = [dbid], @currole = [role], @state = [state], @cursecondary_role_allow_connections = secondary_role_allow_connections FROM #tmpdbs0 WHERE isdone = 0
IF (@currole = 2 AND @cursecondary_role_allow_connections = 0) OR @state <> 0
BEGIN
SET @sqlcmd = 'SELECT [database_id], [file_id], type_desc, data_space_id, name, physical_name, state_desc, size, max_size, is_percent_growth,growth, is_media_read_only, is_read_only, is_sparse, is_name_reserved
FROM sys.master_files (NOLOCK) WHERE [database_id] = ' + CONVERT(VARCHAR(10), @curdbid)
END
ELSE
BEGIN
SET @sqlcmd = 'USE ' + QUOTENAME(@curdbname) + ';
SELECT ' + CONVERT(VARCHAR(10), @curdbid) + ' AS [database_id], [file_id], type_desc, data_space_id, name, physical_name, state_desc, size, max_size, is_percent_growth,growth, is_media_read_only, is_read_only, is_sparse, is_name_reserved
FROM sys.database_files (NOLOCK)'
END
BEGIN TRY
INSERT INTO #tmpdbfiledetail
EXECUTE sp_executesql @sqlcmd
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Database Information subsection - Error raised in TRY block. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
UPDATE #tmpdbs0
SET isdone = 1
WHERE [dbid] = @curdbid
END;
BEGIN TRY
INSERT INTO ##tmpdbsizes([database_id], [size], [type_desc])
SELECT [database_id], SUM([size]) AS [size], [type_desc]
FROM #tmpdbfiledetail
WHERE [type_desc] <> 'LOG'
GROUP BY [database_id], [type_desc]
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Database Information subsection - Error raised in TRY block. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
IF @sqlmajorver < 11
BEGIN
SET @sqlcmd = N'SELECT ''Information'' AS [Category], ''Databases'' AS [Information],
db.[name] AS [Database_Name], SUSER_SNAME(db.owner_sid) AS [Owner_Name], db.[database_id],
db.recovery_model_desc AS [Recovery_Model], db.create_date, db.log_reuse_wait_desc AS [Log_Reuse_Wait_Description],
(dbsize.[size]*8)/1024 AS [Data_Size_MB], ISNULL((dbfssize.[size]*8)/1024,0) AS [Filestream_Size_MB],
ls.cntr_value/1024 AS [Log_Size_MB], lu.cntr_value/1024 AS [Log_Used_MB],
CAST(CAST(lu.cntr_value AS FLOAT) / CAST(ls.cntr_value AS FLOAT)AS DECIMAL(18,2)) * 100 AS [Log_Used_pct],
db.[compatibility_level] AS [Compatibility_Level], db.collation_name AS [DB_Collation],
db.page_verify_option_desc AS [Page_Verify_Option], db.is_auto_create_stats_on, db.is_auto_update_stats_on,
db.is_auto_update_stats_async_on, db.is_parameterization_forced,
db.snapshot_isolation_state_desc, db.is_read_committed_snapshot_on,
db.is_read_only, db.is_auto_close_on, db.is_auto_shrink_on, ''NA'' AS [is_indirect_checkpoint_on],
db.is_trustworthy_on, db.is_db_chaining_on, db.is_parameterization_forced
FROM master.sys.databases AS db (NOLOCK)
INNER JOIN ##tmpdbsizes AS dbsize (NOLOCK) ON db.database_id = dbsize.database_id
INNER JOIN sys.dm_os_performance_counters AS lu (NOLOCK) ON db.name = lu.instance_name
INNER JOIN sys.dm_os_performance_counters AS ls (NOLOCK) ON db.name = ls.instance_name
LEFT JOIN ##tmpdbsizes AS dbfssize (NOLOCK) ON db.database_id = dbfssize.database_id AND dbfssize.[type_desc] = ''FILESTREAM''
WHERE dbsize.[type_desc] = ''ROWS''
AND dbfssize.[type_desc] = ''FILESTREAM''
AND lu.counter_name LIKE N''Log File(s) Used Size (KB)%''
AND ls.counter_name LIKE N''Log File(s) Size (KB)%''
AND ls.cntr_value > 0 AND ls.cntr_value > 0' + CASE WHEN @dbScope IS NOT NULL THEN CHAR(10) + ' AND db.[database_id] IN (' + REPLACE(@dbScope,' ','') + ')' ELSE '' END + '
ORDER BY [Database_Name]
OPTION (RECOMPILE)'
END
ELSE IF @sqlmajorver = 11
BEGIN
SET @sqlcmd = N'SELECT ''Information'' AS [Category], ''Databases'' AS [Information],
db.[name] AS [Database_Name], SUSER_SNAME(db.owner_sid) AS [Owner_Name], db.[database_id],
db.recovery_model_desc AS [Recovery_Model], db.create_date, db.log_reuse_wait_desc AS [Log_Reuse_Wait_Description],
(dbsize.[size]*8)/1024 AS [Data_Size_MB], ISNULL((dbfssize.[size]*8)/1024,0) AS [Filestream_Size_MB],
ls.cntr_value/1024 AS [Log_Size_MB], lu.cntr_value/1024 AS [Log_Used_MB],
CAST(CAST(lu.cntr_value AS FLOAT) / CAST(ls.cntr_value AS FLOAT)AS DECIMAL(18,2)) * 100 AS [Log_Used_pct],
db.[compatibility_level] AS [Compatibility_Level], db.collation_name AS [DB_Collation],
db.page_verify_option_desc AS [Page_Verify_Option], db.is_auto_create_stats_on, db.is_auto_update_stats_on,
db.is_auto_update_stats_async_on, db.is_parameterization_forced,
db.snapshot_isolation_state_desc, db.is_read_committed_snapshot_on,
db.is_read_only, db.is_auto_close_on, db.is_auto_shrink_on,
CASE WHEN db.target_recovery_time_in_seconds > 0 THEN 1 ELSE 0 END AS is_indirect_checkpoint_on,
db.target_recovery_time_in_seconds, db.is_encrypted, db.is_trustworthy_on, db.is_db_chaining_on, db.is_parameterization_forced
FROM master.sys.databases AS db (NOLOCK)
INNER JOIN ##tmpdbsizes AS dbsize (NOLOCK) ON db.database_id = dbsize.database_id
INNER JOIN sys.dm_os_performance_counters AS lu (NOLOCK) ON db.name = lu.instance_name
INNER JOIN sys.dm_os_performance_counters AS ls (NOLOCK) ON db.name = ls.instance_name
LEFT JOIN ##tmpdbsizes AS dbfssize (NOLOCK) ON db.database_id = dbfssize.database_id AND dbfssize.[type_desc] = ''FILESTREAM''
WHERE dbsize.[type_desc] = ''ROWS''
AND lu.counter_name LIKE N''Log File(s) Used Size (KB)%''
AND ls.counter_name LIKE N''Log File(s) Size (KB)%''
AND ls.cntr_value > 0 AND ls.cntr_value > 0' + CASE WHEN @dbScope IS NOT NULL THEN CHAR(10) + ' AND db.[database_id] IN (' + REPLACE(@dbScope,' ','') + ')' ELSE '' END + '
ORDER BY [Database_Name]
OPTION (RECOMPILE)'
END
ELSE IF @sqlmajorver = 12
BEGIN
SET @sqlcmd = N'SELECT ''Information'' AS [Category], ''Databases'' AS [Information],
db.[name] AS [Database_Name], SUSER_SNAME(db.owner_sid) AS [Owner_Name], db.[database_id],
db.recovery_model_desc AS [Recovery_Model], db.create_date, db.log_reuse_wait_desc AS [Log_Reuse_Wait_Description],
(dbsize.[size]*8)/1024 AS [Data_Size_MB], ISNULL((dbfssize.[size]*8)/1024,0) AS [Filestream_Size_MB],
ls.cntr_value/1024 AS [Log_Size_MB], lu.cntr_value/1024 AS [Log_Used_MB],
CAST(CAST(lu.cntr_value AS FLOAT) / CAST(ls.cntr_value AS FLOAT)AS DECIMAL(18,2)) * 100 AS [Log_Used_pct],
db.[compatibility_level] AS [Compatibility_Level], db.collation_name AS [DB_Collation],
db.page_verify_option_desc AS [Page_Verify_Option], db.is_auto_create_stats_on, db.is_auto_create_stats_incremental_on,
db.is_auto_update_stats_on, db.is_auto_update_stats_async_on, db.delayed_durability_desc AS [delayed_durability_status],
db.snapshot_isolation_state_desc, db.is_read_committed_snapshot_on,
db.is_read_only, db.is_auto_close_on, db.is_auto_shrink_on,
CASE WHEN db.target_recovery_time_in_seconds > 0 THEN 1 ELSE 0 END AS is_indirect_checkpoint_on,
db.target_recovery_time_in_seconds, db.is_encrypted, db.is_trustworthy_on, db.is_db_chaining_on, db.is_parameterization_forced
FROM master.sys.databases AS db (NOLOCK)
INNER JOIN ##tmpdbsizes AS dbsize (NOLOCK) ON db.database_id = dbsize.database_id
INNER JOIN sys.dm_os_performance_counters AS lu (NOLOCK) ON db.name = lu.instance_name
INNER JOIN sys.dm_os_performance_counters AS ls (NOLOCK) ON db.name = ls.instance_name
LEFT JOIN ##tmpdbsizes AS dbfssize (NOLOCK) ON db.database_id = dbfssize.database_id AND dbfssize.[type_desc] = ''FILESTREAM''
WHERE dbsize.[type_desc] = ''ROWS''
AND lu.counter_name LIKE N''Log File(s) Used Size (KB)%''
AND ls.counter_name LIKE N''Log File(s) Size (KB)%''
AND ls.cntr_value > 0 AND ls.cntr_value > 0' + CASE WHEN @dbScope IS NOT NULL THEN CHAR(10) + ' AND db.[database_id] IN (' + REPLACE(@dbScope,' ','') + ')' ELSE '' END + '
ORDER BY [Database_Name]
OPTION (RECOMPILE)'
END
ELSE IF @sqlmajorver >= 13
BEGIN
SET @sqlcmd = N'SELECT ''Information'' AS [Category], ''Databases'' AS [Information],
db.[name] AS [Database_Name], SUSER_SNAME(db.owner_sid) AS [Owner_Name], db.[database_id],
db.recovery_model_desc AS [Recovery_Model], db.create_date, db.log_reuse_wait_desc AS [Log_Reuse_Wait_Description],
(dbsize.[size]*8)/1024 AS [Data_Size_MB], ISNULL((dbfssize.[size]*8)/1024,0) AS [Filestream_Size_MB],
ls.cntr_value/1024 AS [Log_Size_MB], lu.cntr_value/1024 AS [Log_Used_MB],
CAST(CAST(lu.cntr_value AS FLOAT) / CAST(ls.cntr_value AS FLOAT)AS DECIMAL(18,2)) * 100 AS [Log_Used_pct],
db.[compatibility_level] AS [Compatibility_Level], db.collation_name AS [DB_Collation],
db.page_verify_option_desc AS [Page_Verify_Option], db.is_auto_create_stats_on, db.is_auto_create_stats_incremental_on,
db.is_auto_update_stats_on, db.is_auto_update_stats_async_on, db.delayed_durability_desc AS [delayed_durability_status],
db.is_query_store_on, db.snapshot_isolation_state_desc, db.is_read_committed_snapshot_on,
db.is_read_only, db.is_auto_close_on, db.is_auto_shrink_on,
CASE WHEN db.target_recovery_time_in_seconds > 0 THEN 1 ELSE 0 END AS is_indirect_checkpoint_on,
db.target_recovery_time_in_seconds, db.is_encrypted, db.is_trustworthy_on, db.is_db_chaining_on, db.is_parameterization_forced,
db.is_memory_optimized_elevate_to_snapshot_on, db.is_remote_data_archive_enabled, db.is_mixed_page_allocation_on
FROM master.sys.databases AS db (NOLOCK)
INNER JOIN sys.dm_os_performance_counters AS lu (NOLOCK) ON db.name = lu.instance_name
INNER JOIN sys.dm_os_performance_counters AS ls (NOLOCK) ON db.name = ls.instance_name
INNER JOIN ##tmpdbsizes AS dbsize (NOLOCK) ON db.database_id = dbsize.database_id
LEFT JOIN ##tmpdbsizes AS dbfssize (NOLOCK) ON db.database_id = dbfssize.database_id AND dbfssize.[type_desc] = ''FILESTREAM''
WHERE dbsize.[type_desc] = ''ROWS''
AND lu.counter_name LIKE N''Log File(s) Used Size (KB)%''
AND ls.counter_name LIKE N''Log File(s) Size (KB)%''
AND ls.cntr_value > 0 AND ls.cntr_value > 0' + CASE WHEN @dbScope IS NOT NULL THEN CHAR(10) + ' AND db.[database_id] IN (' + REPLACE(@dbScope,' ','') + ')' ELSE '' END + '
ORDER BY [Database_Name]
OPTION (RECOMPILE)'
END
ELSE IF @sqlmajorver >= 14
BEGIN
SET @sqlcmd = N'SELECT ''Information'' AS [Category], ''Databases'' AS [Information],
db.[name] AS [Database_Name], SUSER_SNAME(db.owner_sid) AS [Owner_Name], db.[database_id],
db.recovery_model_desc AS [Recovery_Model], db.create_date, db.log_reuse_wait_desc AS [Log_Reuse_Wait_Description],
(dbsize.[size]*8)/1024 AS [Data_Size_MB], ISNULL((dbfssize.[size]*8)/1024,0) AS [Filestream_Size_MB],
ls.cntr_value/1024 AS [Log_Size_MB], lu.cntr_value/1024 AS [Log_Used_MB],
CAST(CAST(lu.cntr_value AS FLOAT) / CAST(ls.cntr_value AS FLOAT)AS DECIMAL(18,2)) * 100 AS [Log_Used_pct],
CASE WHEN ssu.reserved_space_kb>0 THEN ssu.reserved_space_kb/1024 ELSE 0 END AS [Version_Store_Size_MB],
db.[compatibility_level] AS [Compatibility_Level], db.collation_name AS [DB_Collation],
db.page_verify_option_desc AS [Page_Verify_Option], db.is_auto_create_stats_on, db.is_auto_create_stats_incremental_on,
db.is_auto_update_stats_on, db.is_auto_update_stats_async_on, db.delayed_durability_desc AS [delayed_durability_status],
db.is_query_store_on, db.snapshot_isolation_state_desc, db.is_read_committed_snapshot_on,
db.is_read_only, db.is_auto_close_on, db.is_auto_shrink_on,
CASE WHEN db.target_recovery_time_in_seconds > 0 THEN 1 ELSE 0 END AS is_indirect_checkpoint_on,
db.target_recovery_time_in_seconds, db.is_encrypted, db.is_trustworthy_on, db.is_db_chaining_on, db.is_parameterization_forced,
db.is_memory_optimized_elevate_to_snapshot_on, db.is_remote_data_archive_enabled, db.is_mixed_page_allocation_on
FROM master.sys.databases AS db (NOLOCK)
INNER JOIN ##tmpdbsizes AS dbsize (NOLOCK) ON db.database_id = dbsize.database_id
INNER JOIN sys.dm_os_performance_counters AS lu (NOLOCK) ON db.name = lu.instance_name
INNER JOIN sys.dm_os_performance_counters AS ls (NOLOCK) ON db.name = ls.instance_name
LEFT JOIN ##tmpdbsizes AS dbfssize (NOLOCK) ON db.database_id = dbfssize.database_id AND dbfssize.[type_desc] = ''FILESTREAM''
LEFT JOIN sys.dm_tran_version_store_space_usage AS ssu (NOLOCK) ON db.database_id = ssu.database_id
WHERE dbsize.[type_desc] = ''ROWS''
AND lu.counter_name LIKE N''Log File(s) Used Size (KB)%''
AND ls.counter_name LIKE N''Log File(s) Size (KB)%''
AND ls.cntr_value > 0 AND ls.cntr_value > 0' + CASE WHEN @dbScope IS NOT NULL THEN CHAR(10) + ' AND db.[database_id] IN (' + REPLACE(@dbScope,' ','') + ')' ELSE '' END + '
ORDER BY [Database_Name]
OPTION (RECOMPILE)'
END
EXECUTE sp_executesql @sqlcmd;
SELECT 'Information' AS [Category], 'Database_Files' AS [Information], DB_NAME(database_id) AS [Database_Name], [file_id], type_desc, data_space_id AS [Filegroup], name, physical_name,
state_desc, (size * 8) / 1024 AS size_MB, CASE max_size WHEN -1 THEN 'Unlimited' ELSE CONVERT(VARCHAR(10), max_size) END AS max_size,
CASE WHEN is_percent_growth = 0 THEN CONVERT(VARCHAR(10),((growth * 8) / 1024)) ELSE growth END AS [growth], CASE WHEN is_percent_growth = 1 THEN 'Pct' ELSE 'MB' END AS growth_type,
is_media_read_only, is_read_only, is_sparse, is_name_reserved
FROM #tmpdbfiledetail
ORDER BY database_id, [file_id];
IF @sqlmajorver >= 12
BEGIN
IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tblInMemDBs'))
DROP TABLE #tblInMemDBs;
IF NOT EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tblInMemDBs'))
CREATE TABLE #tblInMemDBs ([DBName] sysname, [Has_MemoryOptimizedObjects] bit, [MemoryAllocated_MemoryOptimizedObjects_KB] DECIMAL(18,2), [MemoryUsed_MemoryOptimizedObjects_KB] DECIMAL(18,2));
UPDATE #tmpdbs0
SET isdone = 0;
UPDATE #tmpdbs0
SET isdone = 1
WHERE [state] <> 0 OR [dbid] < 5;
UPDATE #tmpdbs0
SET isdone = 1
WHERE [role] = 2 AND secondary_role_allow_connections = 0;
IF (SELECT COUNT(id) FROM #tmpdbs0 WHERE isdone = 0) > 0
BEGIN
RAISERROR (N'Starting Storage analysis for In-Memory OLTP Engine', 10, 1) WITH NOWAIT
WHILE (SELECT COUNT(id) FROM #tmpdbs0 WHERE isdone = 0) > 0
BEGIN
SELECT TOP 1 @dbname = [dbname], @dbid = [dbid] FROM #tmpdbs0 WHERE isdone = 0
SET @sqlcmd = 'USE ' + QUOTENAME(@dbname) + ';
SELECT ''' + REPLACE(@dbname, CHAR(39), CHAR(95)) + ''' AS [DBName], ISNULL((SELECT 1 FROM sys.filegroups FG WHERE FG.[type] = ''FX''), 0) AS [Has_MemoryOptimizedObjects],
ISNULL((SELECT CONVERT(DECIMAL(18,2), (SUM(tms.memory_allocated_for_table_kb) + SUM(tms.memory_allocated_for_indexes_kb))) FROM sys.dm_db_xtp_table_memory_stats tms), 0.00) AS [MemoryAllocated_MemoryOptimizedObjects_KB],
ISNULL((SELECT CONVERT(DECIMAL(18,2),(SUM(tms.memory_used_by_table_kb) + SUM(tms.memory_used_by_indexes_kb))) FROM sys.dm_db_xtp_table_memory_stats tms), 0.00) AS [MemoryUsed_MemoryOptimizedObjects_KB];'
BEGIN TRY
INSERT INTO #tblInMemDBs
EXECUTE sp_executesql @sqlcmd
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Storage analysis for In-Memory OLTP Engine subsection - Error raised in TRY block. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
UPDATE #tmpdbs0
SET isdone = 1
WHERE [dbid] = @dbid
END
END;
IF (SELECT COUNT([DBName]) FROM #tblInMemDBs WHERE [Has_MemoryOptimizedObjects] = 1) > 0
BEGIN
SELECT 'Information' AS [Category], 'InMem_Database_Storage' AS [Information], DBName AS [Database_Name],
[MemoryAllocated_MemoryOptimizedObjects_KB], [MemoryUsed_MemoryOptimizedObjects_KB]
FROM #tblInMemDBs WHERE Has_MemoryOptimizedObjects = 1
ORDER BY DBName;
END
ELSE
BEGIN
SELECT 'Information' AS [Category], 'InMem_Database_Storage' AS [Information], 'NA' AS [Comment]
END
END;
-- http://support.microsoft.com/kb/2857849
DECLARE @IsHadrEnabled tinyint
SELECT @IsHadrEnabled = CONVERT(tinyint, SERVERPROPERTY('IsHadrEnabled'))
IF @sqlmajorver > 10 AND @IsHadrEnabled = 1
BEGIN
SELECT 'Information' AS [Category], 'AlwaysOn_AG_Databases' AS [Information], dc.database_name AS [Database_Name],
d.synchronization_health_desc, d.synchronization_state_desc, d.database_state_desc
FROM sys.dm_hadr_database_replica_states d
INNER JOIN sys.availability_databases_cluster dc ON d.group_database_id=dc.group_database_id
WHERE d.is_local=1
END;
/ -
/ Database file autogrows last 72h subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @sqlmajorver int, @sqlminorver int, @sqlbuild int
DECLARE @ErrorMessage NVARCHAR(4000)
DECLARE @ostype VARCHAR(10)
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
SELECT @sqlminorver = CONVERT(int, (@@microsoftversion / 0x10000) & 0xff);
SELECT @sqlbuild = CONVERT(int, @@microsoftversion & 0xffff);
IF (@sqlmajorver >= 11) OR (@sqlmajorver = 10 AND @sqlminorver = 50 AND @sqlbuild >= 2500)
BEGIN
SET @sqlcmd = N'SELECT @ostypeOUT = ''Windows'' FROM sys.dm_os_windows_info (NOLOCK)';
SET @params = N'@ostypeOUT VARCHAR(10) OUTPUT';
EXECUTE sp_executesql @sqlcmd, @params, @ostypeOUT=@ostype OUTPUT;
END
ELSE
BEGIN
SET @ostype = 'Windows'
END;
IF EXISTS (SELECT TOP 1 id FROM sys.traces WHERE is_default = 1)
BEGIN
DECLARE @tracefilename VARCHAR(500)
IF @ostype = 'Windows'
SELECT @tracefilename = LEFT([path],LEN([path]) - PATINDEX('%\%', REVERSE([path]))) + '\log.trc' FROM sys.traces WHERE is_default = 1;
IF @ostype <> 'Windows'
SELECT @tracefilename = LEFT([path],LEN([path]) - PATINDEX('%/%', REVERSE([path]))) + '/log.trc' FROM sys.traces WHERE is_default = 1;
WITH AutoGrow_CTE (databaseid, [filename], Growth, Duration, StartTime, EndTime)
AS
(
SELECT databaseid, [filename], SUM(IntegerData*8) AS Growth, Duration, StartTime, EndTime--, CASE WHEN EventClass =
FROM sys.fn_trace_gettable(@tracefilename, default)
WHERE EventClass >= 92 AND EventClass <= 95 AND DATEDIFF(hh,StartTime,GETDATE()) < 72 -- Last 24h
GROUP BY databaseid, [filename], IntegerData, Duration, StartTime, EndTime
)
SELECT 'Information' AS [Category], 'Recorded_Autogrows_Lst72H' AS [Information], DB_NAME(database_id) AS Database_Name,
mf.name AS logical_file_name, mf.size*8 / 1024 AS size_MB, mf.type_desc,
ag.Growth AS [growth_KB], CASE WHEN is_percent_growth = 1 THEN 'Pct' ELSE 'MB' END AS growth_type,
Duration/1000 AS Growth_Duration_ms, ag.StartTime, ag.EndTime
FROM sys.master_files mf
LEFT OUTER JOIN AutoGrow_CTE ag ON mf.database_id=ag.databaseid AND mf.name=ag.[filename]
WHERE ag.Growth > 0 --Only where growth occurred
GROUP BY database_id, mf.name, mf.size, ag.Growth, ag.Duration, ag.StartTime, ag.EndTime, is_percent_growth, mf.growth, mf.type_desc
ORDER BY Database_Name, logical_file_name, ag.StartTime;
END
ELSE
BEGIN
SELECT 'Information' AS [Category], 'Recorded_Autogrows_Lst72H' AS [Information], 'WARNING: Could not gather information on autogrow times' AS [Comment]
END;
/ -
/ Database triggers subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @dbScope VARCHAR(256)
SET @dbScope = NULL --(NULL = All DBs)
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @sqlmajorver int
DECLARE @ErrorMessage NVARCHAR(4000)
DECLARE @dbid int, @dbname NVARCHAR(1000)
/* Validate if database scope is set */
IF @dbScope IS NOT NULL AND ISNUMERIC(@dbScope) <> 1 AND @dbScope NOT LIKE '%,%'
BEGIN
RAISERROR('ERROR: Invalid parameter. Valid input consists of database IDs. If more than one ID is specified, the values must be comma separated.', 16, 42) WITH NOWAIT;
RETURN
END;
IF @dbScope IS NOT NULL
BEGIN
RAISERROR (N'Applying specific database scope list', 10, 1) WITH NOWAIT
SELECT @sqlcmd = 'DELETE FROM #tmpdbs0 WHERE [dbid] > 4 AND [dbid] NOT IN (' + REPLACE(@dbScope,' ','') + ')'
EXEC sp_executesql @sqlcmd;
END;
IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tblTriggers'))
DROP TABLE #tblTriggers;
IF NOT EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tblTriggers'))
CREATE TABLE #tblTriggers ([DBName] sysname, [triggerName] sysname, [schemaName] sysname, [tableName] sysname, [type_desc] NVARCHAR(60), [parent_class_desc] NVARCHAR(60), [create_date] DATETIME, [modify_date] DATETIME, [is_disabled] bit, [is_instead_of_trigger] bit, [is_not_for_replication] bit);
IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tmpdbs0'))
DROP TABLE #tmpdbs0;
IF NOT EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tmpdbs0'))
CREATE TABLE #tmpdbs0 (id int IDENTITY(1,1), [dbid] int, [dbname] NVARCHAR(1000), [compatibility_level] tinyint, is_read_only bit, [state] tinyint, is_distributor bit, [role] tinyint, [secondary_role_allow_connections] tinyint, is_database_joined bit, is_failover_ready bit, isdone bit);
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
IF @sqlmajorver < 11
BEGIN
SET @sqlcmd = 'SELECT database_id, name, [compatibility_level], is_read_only, [state], is_distributor, 1, 1, 0 FROM master.sys.databases (NOLOCK)'
INSERT INTO #tmpdbs0 ([dbid], [dbname], [compatibility_level], is_read_only, [state], is_distributor, [role], [secondary_role_allow_connections], [isdone])
EXEC sp_executesql @sqlcmd;
END;
IF @sqlmajorver > 10
BEGIN
SET @sqlcmd = 'SELECT sd.database_id, sd.name, sd.[compatibility_level], sd.is_read_only, sd.[state], sd.is_distributor, MIN(COALESCE(ars.[role],1)) AS [role], ar.secondary_role_allow_connections, rcs.is_database_joined, rcs.is_failover_ready, 0
FROM master.sys.databases (NOLOCK) sd
LEFT JOIN sys.dm_hadr_database_replica_states (NOLOCK) d ON sd.database_id = d.database_id
LEFT JOIN sys.availability_replicas ar (NOLOCK) ON d.group_id = ar.group_id AND d.replica_id = ar.replica_id
LEFT JOIN sys.dm_hadr_availability_replica_states (NOLOCK) ars ON d.group_id = ars.group_id AND d.replica_id = ars.replica_id
LEFT JOIN sys.dm_hadr_database_replica_cluster_states (NOLOCK) rcs ON rcs.database_name = sd.name AND rcs.replica_id = ar.replica_id
GROUP BY sd.database_id, sd.name, sd.is_read_only, sd.[state], sd.is_distributor, ar.secondary_role_allow_connections, sd.[compatibility_level], rcs.is_database_joined, rcs.is_failover_ready;'
INSERT INTO #tmpdbs0 ([dbid], [dbname], [compatibility_level], is_read_only, [state], is_distributor, [role], [secondary_role_allow_connections], is_database_joined, is_failover_ready, [isdone])
EXEC sp_executesql @sqlcmd;
END;
UPDATE #tmpdbs0
SET isdone = 1
WHERE [state] <> 0 OR [dbid] < 5;
UPDATE #tmpdbs0
SET isdone = 1
WHERE [role] = 2 AND secondary_role_allow_connections = 0;
IF (SELECT COUNT(id) FROM #tmpdbs0 WHERE isdone = 0) > 0
BEGIN
WHILE (SELECT COUNT(id) FROM #tmpdbs0 WHERE isdone = 0) > 0
BEGIN
SELECT TOP 1 @dbname = [dbname], @dbid = [dbid] FROM #tmpdbs0 WHERE isdone = 0
SET @sqlcmd = 'USE ' + QUOTENAME(@dbname) + ';
SELECT N''' + REPLACE(@dbname, CHAR(39), CHAR(95)) + ''' AS [DBName], st.name, ss.name, stb.name, st.type_desc, st.parent_class_desc, st.create_date, st.modify_date, st.is_disabled, st.is_instead_of_trigger, st.is_not_for_replication
FROM sys.triggers AS st
INNER JOIN sys.tables stb ON st.parent_id = stb.[object_id]
INNER JOIN sys.schemas ss ON stb.[schema_id] = ss.[schema_id]
WHERE st.is_ms_shipped = 0
ORDER BY stb.name, st.name;'
BEGIN TRY
INSERT INTO #tblTriggers
EXECUTE sp_executesql @sqlcmd
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Database triggers subsection - Error raised in TRY block. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
UPDATE #tmpdbs0
SET isdone = 1
WHERE [dbid] = @dbid
END
END;
IF (SELECT COUNT([triggerName]) FROM #tblTriggers) > 0
BEGIN
SELECT 'Information' AS [Category], 'Database_Triggers' AS [Information], DBName AS [Database_Name],
triggerName AS [Trigger_Name], schemaName AS [Schema_Name], tableName AS [Table_Name],
type_desc AS [Trigger_Type], parent_class_desc AS [Trigger_Parent],
CASE is_instead_of_trigger WHEN 1 THEN 'INSTEAD_OF' ELSE 'AFTER' END AS [Trigger_Behavior],
create_date, modify_date,
CASE WHEN is_disabled = 1 THEN 'YES' ELSE 'NO' END AS [is_disabled],
CASE WHEN is_not_for_replication = 1 THEN 'YES' ELSE 'NO' END AS [is_not_for_replication]
FROM #tblTriggers
ORDER BY DBName, tableName, triggerName;
END
ELSE
BEGIN
SELECT 'Information' AS [Category], 'Database_Triggers' AS [Information], 'NA' AS [Comment]
END;
/ -
/ Feature usage subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @dbScope VARCHAR(256)
SET @dbScope = NULL --(NULL = All DBs)
DECLARE @dbid int, @dbname VARCHAR(1000)
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @sqlmajorver int, @sqlbuild int
DECLARE @ErrorMessage NVARCHAR(4000)
DECLARE @IsHadrEnabled tinyint
/* Validate if database scope is set */
IF @dbScope IS NOT NULL AND ISNUMERIC(@dbScope) <> 1 AND @dbScope NOT LIKE '%,%'
BEGIN
RAISERROR('ERROR: Invalid parameter. Valid input consists of database IDs. If more than one ID is specified, the values must be comma separated.', 16, 42) WITH NOWAIT;
RETURN
END;
IF @dbScope IS NOT NULL
BEGIN
RAISERROR (N'Applying specific database scope list', 10, 1) WITH NOWAIT
SELECT @sqlcmd = 'DELETE FROM #tmpdbs0 WHERE [dbid] > 4 AND [dbid] NOT IN (' + REPLACE(@dbScope,' ','') + ')'
EXEC sp_executesql @sqlcmd;
END;
IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tmpdbs0'))
DROP TABLE #tmpdbs0;
IF NOT EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tmpdbs0'))
CREATE TABLE #tmpdbs0 (id int IDENTITY(1,1), [dbid] int, [dbname] NVARCHAR(1000), [compatibility_level] tinyint, is_read_only bit, [state] tinyint, is_distributor bit, [role] tinyint, [secondary_role_allow_connections] tinyint, is_database_joined bit, is_failover_ready bit, isdone bit);
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
SELECT @sqlbuild = CONVERT(int, @@microsoftversion & 0xffff);
SELECT @IsHadrEnabled = CONVERT(tinyint, SERVERPROPERTY('IsHadrEnabled'))
IF @sqlmajorver < 11
BEGIN
SET @sqlcmd = 'SELECT database_id, name, [compatibility_level], is_read_only, [state], is_distributor, 1, 1, 0 FROM master.sys.databases (NOLOCK)'
INSERT INTO #tmpdbs0 ([dbid], [dbname], [compatibility_level], is_read_only, [state], is_distributor, [role], [secondary_role_allow_connections], [isdone])
EXEC sp_executesql @sqlcmd;
END;
IF @sqlmajorver > 10
BEGIN
SET @sqlcmd = 'SELECT sd.database_id, sd.name, sd.[compatibility_level], sd.is_read_only, sd.[state], sd.is_distributor, MIN(COALESCE(ars.[role],1)) AS [role], ar.secondary_role_allow_connections, rcs.is_database_joined, rcs.is_failover_ready, 0
FROM master.sys.databases (NOLOCK) sd
LEFT JOIN sys.dm_hadr_database_replica_states (NOLOCK) d ON sd.database_id = d.database_id
LEFT JOIN sys.availability_replicas ar (NOLOCK) ON d.group_id = ar.group_id AND d.replica_id = ar.replica_id
LEFT JOIN sys.dm_hadr_availability_replica_states (NOLOCK) ars ON d.group_id = ars.group_id AND d.replica_id = ars.replica_id
LEFT JOIN sys.dm_hadr_database_replica_cluster_states (NOLOCK) rcs ON rcs.database_name = sd.name AND rcs.replica_id = ar.replica_id
GROUP BY sd.database_id, sd.name, sd.is_read_only, sd.[state], sd.is_distributor, ar.secondary_role_allow_connections, sd.[compatibility_level], rcs.is_database_joined, rcs.is_failover_ready;'
INSERT INTO #tmpdbs0 ([dbid], [dbname], [compatibility_level], is_read_only, [state], is_distributor, [role], [secondary_role_allow_connections], is_database_joined, is_failover_ready, [isdone])
EXEC sp_executesql @sqlcmd;
END;
UPDATE #tmpdbs0
SET isdone = 1
WHERE [state] <> 0 OR [dbid] < 5;
IF @sqlmajorver > 9
BEGIN
IF EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tblPerSku'))
DROP TABLE #tblPerSku;
IF NOT EXISTS (SELECT [object_id] FROM tempdb.sys.objects (NOLOCK) WHERE [object_id] = OBJECT_ID('tempdb.dbo.#tblPerSku'))
CREATE TABLE #tblPerSku ([DBName] sysname NULL, [Feature_Name] VARCHAR(100));
UPDATE #tmpdbs0
SET isdone = 0;
UPDATE #tmpdbs0
SET isdone = 1
WHERE [state] <> 0 OR [dbid] < 5;
UPDATE #tmpdbs0
SET isdone = 1
WHERE [role] = 2 AND secondary_role_allow_connections = 0;
IF (SELECT COUNT(id) FROM #tmpdbs0 WHERE isdone = 0) > 0
BEGIN
WHILE (SELECT COUNT(id) FROM #tmpdbs0 WHERE isdone = 0) > 0
BEGIN
SELECT TOP 1 @dbname = [dbname], @dbid = [dbid] FROM #tmpdbs0 WHERE isdone = 0
SET @sqlcmd = 'USE ' + QUOTENAME(@dbname) + ';
SELECT ''' + REPLACE(@dbname, CHAR(39), CHAR(95)) + ''' AS [dbname], feature_name FROM sys.dm_db_persisted_sku_features (NOLOCK)
UNION ALL
SELECT ''' + REPLACE(@dbname, CHAR(39), CHAR(95)) + ''' AS [dbname], ''Change_Tracking'' AS feature_name FROM sys.change_tracking_databases (NOLOCK) WHERE database_id = DB_ID()
UNION ALL
SELECT TOP 1 ''' + REPLACE(@dbname, CHAR(39), CHAR(95)) + ''' AS [dbname], ''Fine_grained_auditing'' AS feature_name FROM sys.database_audit_specifications (NOLOCK)'
IF @sqlmajorver >= 13
SET @sqlcmd = @sqlcmd + CHAR(10) + 'UNION ALL
SELECT TOP 1 ''' + REPLACE(@dbname, CHAR(39), CHAR(95)) + ''' AS [dbname], ''Polybase'' AS feature_name FROM sys.external_data_sources (NOLOCK)
UNION ALL
SELECT TOP 1 ''' + REPLACE(@dbname, CHAR(39), CHAR(95)) + ''' AS [dbname], ''Row_Level_Security'' AS feature_name FROM sys.security_policies (NOLOCK)
UNION ALL
SELECT TOP 1 ''' + REPLACE(@dbname, CHAR(39), CHAR(95)) + ''' AS [dbname], ''Always_Encrypted'' AS feature_name FROM sys.column_master_keys (NOLOCK)
UNION ALL
SELECT TOP 1 ''' + REPLACE(@dbname, CHAR(39), CHAR(95)) + ''' AS [dbname], ''Dynamic_Data_Masking'' AS feature_name FROM sys.masked_columns (NOLOCK) WHERE is_masked = 1'
BEGIN TRY
INSERT INTO #tblPerSku
EXECUTE sp_executesql @sqlcmd
END TRY
BEGIN CATCH
SELECT ERROR_NUMBER() AS ErrorNumber, ERROR_MESSAGE() AS ErrorMessage;
SELECT @ErrorMessage = 'Feature usage subsection - Error raised in TRY block. ' + ERROR_MESSAGE()
RAISERROR (@ErrorMessage, 16, 1);
END CATCH
UPDATE #tmpdbs0
SET isdone = 1
WHERE [dbid] = @dbid
END
END;
IF @sqlmajorver > 10 AND ((@sqlmajorver = 13 AND @sqlbuild < 4000) OR @sqlmajorver < 13) AND @IsHadrEnabled = 1
BEGIN
INSERT INTO #tblPerSku
SELECT [dbname], 'Always_On' AS feature_name FROM #tmpdbs0 WHERE is_database_joined = 1;
END;
IF (SELECT COUNT(DISTINCT [name]) FROM master.sys.databases (NOLOCK) WHERE database_id NOT IN (2,3) AND source_database_id IS NOT NULL) > 0 -- Snapshot
BEGIN
INSERT INTO #tblPerSku
SELECT DISTINCT [name], 'DB_Snapshot' AS feature_name FROM master.sys.databases (NOLOCK) WHERE database_id NOT IN (2,3) AND source_database_id IS NOT NULL;
END;
IF (SELECT COUNT(DISTINCT [name]) FROM master.sys.master_files (NOLOCK) WHERE database_id NOT IN (2,3) AND [type] = 2 and file_guid IS NOT NULL) > 0 -- Filestream
BEGIN
INSERT INTO #tblPerSku
SELECT DISTINCT DB_NAME(database_id), 'Filestream' AS feature_name FROM sys.master_files (NOLOCK) WHERE database_id NOT IN (2,3) AND [type] = 2 and file_guid IS NOT NULL;
END;
IF (SELECT COUNT([Feature_Name]) FROM #tblPerSku) > 0
BEGIN
SELECT 'Information' AS [Category], 'Feature_usage' AS [Check], 'INFORMATION: Some databases are using features that are not common to all editions' AS [Comment]
SELECT 'Information' AS [Category], 'Feature_usage' AS [Information], DBName AS [Database_Name], [Feature_Name]
FROM #tblPerSku
ORDER BY 2, 3
END
ELSE
BEGIN
SELECT 'Information' AS [Category], 'Feature_usage' AS [Check], 'NA' AS [Comment]
END
END;
/ -
/ Backups since last Full Information subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
SET DATEFORMAT mdy;
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @sqlmajorver int
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
IF @sqlmajorver > 10
BEGIN
SET @sqlcmd = N'SELECT ''Information'' AS [Category], ''Backups_since_last_Full'' AS [Information],
[database_name] AS [Database_Name], CASE WHEN type = ''D'' THEN ''Database''
WHEN type = ''I'' THEN ''Diff_Database''
WHEN type = ''L'' THEN ''Log''
WHEN type = ''F'' THEN ''File''
WHEN type = ''G'' THEN ''Diff_file''
WHEN type = ''P'' THEN ''Partial''
WHEN type = ''Q'' THEN ''Diff_partial''
ELSE NULL END AS [bck_type],
[backup_start_date], [backup_finish_date],
CONVERT(decimal(20,2),backup_size/1024.00/1024.00) AS [backup_size_MB],
CONVERT(decimal(20,2),compressed_backup_size/1024.00/1024.00) AS [compressed_backup_size_MB],
[recovery_model], [user_name],
database_backup_lsn AS [full_base_lsn], [differential_base_lsn], [expiration_date],
[is_password_protected], [has_backup_checksums], [is_readonly], is_copy_only, [has_incomplete_metadata] AS [Tail_log]
FROM msdb.dbo.backupset bck1 (NOLOCK)
WHERE is_copy_only = 0 -- No COPY_ONLY backups
AND backup_start_date >= (SELECT MAX(backup_start_date) FROM msdb.dbo.backupset bck2 (NOLOCK) WHERE bck2.type IN (''D'',''F'',''P'') AND is_copy_only = 0 AND bck1.database_name = bck2.database_name)
ORDER BY database_name, backup_start_date DESC'
END
ELSE
BEGIN
SET @sqlcmd = N'SELECT ''Information'' AS [Category], ''Backups_since_last_Full'' AS [Information],
[database_name] AS [Database_Name], CASE WHEN type = ''D'' THEN ''Database''
WHEN type = ''I'' THEN ''Diff_Database''
WHEN type = ''L'' THEN ''Log''
WHEN type = ''F'' THEN ''File''
WHEN type = ''G'' THEN ''Diff_file''
WHEN type = ''P'' THEN ''Partial''
WHEN type = ''Q'' THEN ''Diff_partial''
ELSE NULL END AS [bck_type],
[backup_start_date], [backup_finish_date],
CONVERT(decimal(20,2),backup_size/1024.00/1024.00) AS [backup_size_MB],
''NA'' AS [compressed_backup_size_MB],
[recovery_model], [user_name],
database_backup_lsn AS [full_base_lsn], [differential_base_lsn], [expiration_date],
[is_password_protected], [has_backup_checksums], [is_readonly], is_copy_only, [has_incomplete_metadata] AS [Tail_log]
FROM msdb.dbo.backupset bck1 (NOLOCK)
WHERE is_copy_only = 0 -- No COPY_ONLY backups
AND backup_start_date >= (SELECT MAX(backup_start_date) FROM msdb.dbo.backupset bck2 (NOLOCK) WHERE bck2.type IN (''D'',''F'',''P'') AND is_copy_only = 0 AND bck1.database_name = bck2.database_name)
ORDER BY database_name, backup_start_date DESC'
END;
EXECUTE sp_executesql @sqlcmd;
/ -
/ System Configuration subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
SELECT 'Information' AS [Category], 'All_System_Configurations' AS [Information],
name AS [Name],
configuration_id AS [Number],
minimum AS [Minimum],
maximum AS [Maximum],
is_dynamic AS [Dynamic],
is_advanced AS [Advanced],
value AS [ConfigValue],
value_in_use AS [RunValue],
description AS [Description]
FROM sys.configurations (NOLOCK)
ORDER BY name OPTION (RECOMPILE);
/ -
/ Checks section
/ Number of available Processors for this instance vs. MaxDOP setting subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @cpucount int, @numa int, @affined_cpus int, @cpuover32 int, @affinitymask NVARCHAR(64), @affinity64mask NVARCHAR(64)
DECLARE @i int, @cpuaffin VARCHAR(300), @cpuaffin_fixed VARCHAR(300)
SELECT @numa = COUNT(DISTINCT parent_node_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64;
SELECT @cpucount = COUNT(cpu_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64
;WITH bits AS
(SELECT 7 AS N, 128 AS E UNION ALL SELECT 6, 64 UNION ALL
SELECT 5, 32 UNION ALL SELECT 4, 16 UNION ALL SELECT 3, 8 UNION ALL
SELECT 2, 4 UNION ALL SELECT 1, 2 UNION ALL SELECT 0, 1),
bytes AS
(SELECT 1 M UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL
SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL
SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9)
-- CPU Affinity is shown highest to lowest CPU ID
SELECT @affinitymask = CASE WHEN [value] = 0 THEN REPLICATE('1', @cpucount)
ELSE RIGHT((SELECT ((CONVERT(tinyint, SUBSTRING(CONVERT(binary(9), [value]), M, 1)) & E) / E) AS [text()]
FROM bits CROSS JOIN bytes
ORDER BY M, N DESC
FOR XML PATH('')), @cpucount) END
FROM sys.configurations (NOLOCK)
WHERE name = 'affinity mask';
IF @cpucount > 32
BEGIN
;WITH bits AS
(SELECT 7 AS N, 128 AS E UNION ALL SELECT 6, 64 UNION ALL
SELECT 5, 32 UNION ALL SELECT 4, 16 UNION ALL SELECT 3, 8 UNION ALL
SELECT 2, 4 UNION ALL SELECT 1, 2 UNION ALL SELECT 0, 1),
bytes AS
(SELECT 1 M UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL
SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL
SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9)
-- CPU Affinity is shown highest to lowest CPU ID
SELECT @affinity64mask = CASE WHEN [value] = 0 THEN REPLICATE('1', @cpucount)
ELSE RIGHT((SELECT ((CONVERT(tinyint, SUBSTRING(CONVERT(binary(9), [value]), M, 1)) & E) / E) AS [text()]
FROM bits CROSS JOIN bytes
ORDER BY M, N DESC
FOR XML PATH('')), @cpucount) END
FROM sys.configurations (NOLOCK)
WHERE name = 'affinity64 mask';
END;
IF @cpucount > 32
SELECT @cpuover32 = ABS(LEN(@affinity64mask) - (@cpucount-32))
SELECT @cpuaffin = CASE WHEN @cpucount > 32 THEN REVERSE(LEFT(REVERSE(@affinity64mask),@cpuover32)) + RIGHT(@affinitymask,32) ELSE RIGHT(@affinitymask,@cpucount) END
SET @cpuaffin_fixed = @cpuaffin
SET @i = @cpucount/@numa + 1
WHILE @i < @cpucount + @numa
BEGIN
SELECT @cpuaffin_fixed = STUFF(@cpuaffin_fixed, @i, 1, '_' + SUBSTRING(@cpuaffin_fixed, @i, 1))
SET @i = @i + @cpucount/@numa + 1
END;
SELECT @affined_cpus = COUNT(cpu_id) FROM sys.dm_os_schedulers WHERE is_online = 1 AND scheduler_id < 255 AND parent_node_id < 64;
SELECT @cpucount = COUNT(cpu_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64
SELECT 'Processor_checks' AS [Category], 'Parallelism_MaxDOP' AS [Check],
CASE WHEN [value] > @affined_cpus THEN 'WARNING: MaxDOP setting exceeds available processor count (affinity)'
WHEN @numa = 1 AND @affined_cpus > 8 AND ([value] = 0 OR [value] > 8) THEN 'WARNING: MaxDOP setting is not recommended for current processor count (affinity)'
WHEN @numa > 1 AND (@cpucount/@numa) < 8 AND ([value] = 0 OR [value] > (@cpucount/@numa)) THEN 'WARNING: MaxDOP setting is not recommended for current NUMA node to processor count (affinity) ratio'
WHEN @numa > 1 AND (@cpucount/@numa) >= 8 AND ([value] = 0 OR [value] > 8 OR [value] > (@cpucount/@numa)) THEN 'WARNING: MaxDOP setting is not recommended for current NUMA node to processor count (affinity) ratio'
ELSE 'OK'
END AS [Deviation]
FROM sys.configurations (NOLOCK) WHERE name = 'max degree of parallelism';
SELECT 'Processor_checks' AS [Category], 'Parallelism_MaxDOP' AS [Information],
CASE WHEN [value] > @affined_cpus THEN @affined_cpus
WHEN @numa = 1 AND @affined_cpus > 8 AND ([value] = 0 OR [value] > 8) THEN 8
WHEN @numa > 1 AND (@cpucount/@numa) < 8 AND ([value] = 0 OR [value] > (@cpucount/@numa)) THEN @cpucount/@numa
WHEN @numa > 1 AND (@cpucount/@numa) >= 8 AND ([value] = 0 OR [value] > 8 OR [value] > (@cpucount/@numa)) THEN 8
ELSE 0
END AS [Recommended_MaxDOP],
[value] AS [Current_MaxDOP], @cpucount AS [Available_Processors], @affined_cpus AS [Affined_Processors],
-- Processor Affinity is shown highest to lowest CPU ID
@cpuaffin_fixed AS Affinity_Mask_Bitmask
FROM sys.configurations (NOLOCK) WHERE name = 'max degree of parallelism';
/ -
/ Processor Affinity in NUMA architecture subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @cpucount int, @numa int, @affined_cpus int, @cpuover32 int, @affinitymask NVARCHAR(64), @affinity64mask NVARCHAR(64)
DECLARE @i int, @cpuaffin VARCHAR(300), @cpuaffin_fixed VARCHAR(300)
SELECT @numa = COUNT(DISTINCT parent_node_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64;
SELECT @cpucount = COUNT(cpu_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64
;WITH bits AS
(SELECT 7 AS N, 128 AS E UNION ALL SELECT 6, 64 UNION ALL
SELECT 5, 32 UNION ALL SELECT 4, 16 UNION ALL SELECT 3, 8 UNION ALL
SELECT 2, 4 UNION ALL SELECT 1, 2 UNION ALL SELECT 0, 1),
bytes AS
(SELECT 1 M UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL
SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL
SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9)
-- CPU Affinity is shown highest to lowest CPU ID
SELECT @affinitymask = CASE WHEN [value] = 0 THEN REPLICATE('1', @cpucount)
ELSE RIGHT((SELECT ((CONVERT(tinyint, SUBSTRING(CONVERT(binary(9), [value]), M, 1)) & E) / E) AS [text()]
FROM bits CROSS JOIN bytes
ORDER BY M, N DESC
FOR XML PATH('')), @cpucount) END
FROM sys.configurations (NOLOCK)
WHERE name = 'affinity mask';
IF @cpucount > 32
BEGIN
;WITH bits AS
(SELECT 7 AS N, 128 AS E UNION ALL SELECT 6, 64 UNION ALL
SELECT 5, 32 UNION ALL SELECT 4, 16 UNION ALL SELECT 3, 8 UNION ALL
SELECT 2, 4 UNION ALL SELECT 1, 2 UNION ALL SELECT 0, 1),
bytes AS
(SELECT 1 M UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL
SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL
SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9)
-- CPU Affinity is shown highest to lowest CPU ID
SELECT @affinity64mask = CASE WHEN [value] = 0 THEN REPLICATE('1', @cpucount)
ELSE RIGHT((SELECT ((CONVERT(tinyint, SUBSTRING(CONVERT(binary(9), [value]), M, 1)) & E) / E) AS [text()]
FROM bits CROSS JOIN bytes
ORDER BY M, N DESC
FOR XML PATH('')), @cpucount) END
FROM sys.configurations (NOLOCK)
WHERE name = 'affinity64 mask';
END;
IF @cpucount > 32
SELECT @cpuover32 = ABS(LEN(@affinity64mask) - (@cpucount-32))
SELECT @cpuaffin = CASE WHEN @cpucount > 32 THEN REVERSE(LEFT(REVERSE(@affinity64mask),@cpuover32)) + RIGHT(@affinitymask,32) ELSE RIGHT(@affinitymask,@cpucount) END;
SET @cpuaffin_fixed = @cpuaffin
SET @i = @cpucount/@numa + 1
WHILE @i < @cpucount + @numa
BEGIN
SELECT @cpuaffin_fixed = STUFF(@cpuaffin_fixed, @i, 1, '_' + SUBSTRING(@cpuaffin_fixed, @i, 1))
SET @i = @i + @cpucount/@numa + 1
END;
IF @numa > 1
BEGIN
WITH ncpuCTE (ncpus) AS (SELECT COUNT(cpu_id) AS ncpus from sys.dm_os_schedulers WHERE is_online = 1 AND scheduler_id < 255 AND parent_node_id < 64 GROUP BY parent_node_id, is_online HAVING COUNT(cpu_id) = 1),
cpuCTE (node, afin) AS (SELECT DISTINCT(parent_node_id), is_online FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64 GROUP BY parent_node_id, is_online)
SELECT 'Processor_checks' AS [Category], 'Affinity_NUMA' AS [Check],
CASE WHEN (SELECT COUNT(*) FROM ncpuCTE) > 0 THEN '[WARNING: Current NUMA configuration is not recommended. At least one node has a single assigned CPU]'
WHEN (SELECT COUNT(DISTINCT(node)) FROM cpuCTE WHERE afin = 0 AND node NOT IN (SELECT DISTINCT(node) FROM cpuCTE WHERE afin = 1)) > 0 THEN 'WARNING: Current NUMA configuration is not recommended. At least one node does not have assigned CPUs'
ELSE 'OK' END AS [Deviation]
FROM sys.dm_os_sys_info (NOLOCK)
OPTION (RECOMPILE);
SELECT 'Processor_checks' AS [Category], 'Affinity_NUMA' AS [Information], cpu_count AS [Logical_CPU_Count],
(SELECT COUNT(DISTINCT parent_node_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64) AS [NUMA_Nodes],
-- Processor Affinity is shown highest to lowest CPU ID
@cpuaffin_fixed AS Affinity_Mask_Bitmask
FROM sys.dm_os_sys_info (NOLOCK)
OPTION (RECOMPILE);
END
ELSE
BEGIN
SELECT 'Processor_checks' AS [Category], 'Affinity_NUMA' AS [Check], 'Not_NUMA' AS [Deviation]
FROM sys.dm_os_sys_info (NOLOCK)
OPTION (RECOMPILE);
END;
/ -
/ Additional Processor information subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
DECLARE @cpucount int, @numa int, @affined_cpus int, @cpuover32 int, @affinitymask NVARCHAR(64), @affinity64mask NVARCHAR(64)
DECLARE @i int, @cpuaffin VARCHAR(300), @cpuaffin_fixed VARCHAR(300), @ostype VARCHAR(10), @SystemManufacturer VARCHAR(128)
DECLARE @sqlcmd NVARCHAR(max), @params NVARCHAR(600)
DECLARE @sqlmajorver int, @sqlminorver int, @sqlbuild int
DECLARE @ErrorMessage NVARCHAR(4000)
DECLARE @machineinfo TABLE ([Value] NVARCHAR(256), [Data] NVARCHAR(256))
SELECT @sqlmajorver = CONVERT(int, (@@microsoftversion / 0x1000000) & 0xff);
SELECT @sqlminorver = CONVERT(int, (@@microsoftversion / 0x10000) & 0xff);
SELECT @sqlbuild = CONVERT(int, @@microsoftversion & 0xffff);
SELECT @numa = COUNT(DISTINCT parent_node_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64;
SELECT @cpucount = COUNT(cpu_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64;
IF (@sqlmajorver >= 11 AND @sqlmajorver < 14) OR (@sqlmajorver = 10 AND @sqlminorver = 50 AND @sqlbuild >= 2500)
BEGIN
SET @ostype = 'Windows'
END
ELSE IF @sqlmajorver >= 14
BEGIN
SET @sqlcmd = N'SELECT @ostypeOUT = host_platform FROM sys.dm_os_host_info (NOLOCK)';
SET @params = N'@ostypeOUT VARCHAR(10) OUTPUT';
EXECUTE sp_executesql @sqlcmd, @params, @ostypeOUT=@ostype OUTPUT;
END
IF @ostype = 'Windows'
BEGIN
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\BIOS','SystemManufacturer';
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\BIOS','BIOSVendor';
INSERT INTO @machineinfo
EXEC xp_instance_regread 'HKEY_LOCAL_MACHINE','HARDWARE\DESCRIPTION\System\CentralProcessor\0','ProcessorNameString';
END;
SELECT @SystemManufacturer = [Data] FROM @machineinfo WHERE [Value] = 'SystemManufacturer';
;WITH bits AS
(SELECT 7 AS N, 128 AS E UNION ALL SELECT 6, 64 UNION ALL
SELECT 5, 32 UNION ALL SELECT 4, 16 UNION ALL SELECT 3, 8 UNION ALL
SELECT 2, 4 UNION ALL SELECT 1, 2 UNION ALL SELECT 0, 1),
bytes AS
(SELECT 1 M UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL
SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL
SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9)
-- CPU Affinity is shown highest to lowest CPU ID
SELECT @affinitymask = CASE WHEN [value] = 0 THEN REPLICATE('1', @cpucount)
ELSE RIGHT((SELECT ((CONVERT(tinyint, SUBSTRING(CONVERT(binary(9), [value]), M, 1)) & E) / E) AS [text()]
FROM bits CROSS JOIN bytes
ORDER BY M, N DESC
FOR XML PATH('')), @cpucount) END
FROM sys.configurations (NOLOCK)
WHERE name = 'affinity mask';
IF @cpucount > 32
BEGIN
;WITH bits AS
(SELECT 7 AS N, 128 AS E UNION ALL SELECT 6, 64 UNION ALL
SELECT 5, 32 UNION ALL SELECT 4, 16 UNION ALL SELECT 3, 8 UNION ALL
SELECT 2, 4 UNION ALL SELECT 1, 2 UNION ALL SELECT 0, 1),
bytes AS
(SELECT 1 M UNION ALL SELECT 2 UNION ALL SELECT 3 UNION ALL
SELECT 4 UNION ALL SELECT 5 UNION ALL SELECT 6 UNION ALL
SELECT 7 UNION ALL SELECT 8 UNION ALL SELECT 9)
-- CPU Affinity is shown highest to lowest CPU ID
SELECT @affinity64mask = CASE WHEN [value] = 0 THEN REPLICATE('1', @cpucount)
ELSE RIGHT((SELECT ((CONVERT(tinyint, SUBSTRING(CONVERT(binary(9), [value]), M, 1)) & E) / E) AS [text()]
FROM bits CROSS JOIN bytes
ORDER BY M, N DESC
FOR XML PATH('')), @cpucount) END
FROM sys.configurations (NOLOCK)
WHERE name = 'affinity64 mask';
END;
IF @cpucount > 32
SELECT @cpuover32 = ABS(LEN(@affinity64mask) - (@cpucount-32))
SELECT @cpuaffin = CASE WHEN @cpucount > 32 THEN REVERSE(LEFT(REVERSE(@affinity64mask),@cpuover32)) + RIGHT(@affinitymask,32) ELSE RIGHT(@affinitymask,@cpucount) END;
SELECT @affined_cpus = COUNT(cpu_id) FROM sys.dm_os_schedulers WHERE is_online = 1 AND scheduler_id < 255 AND parent_node_id < 64;
SET @cpuaffin_fixed = @cpuaffin
SET @i = @cpucount/@numa + 1
WHILE @i < @cpucount + @numa
BEGIN
SELECT @cpuaffin_fixed = STUFF(@cpuaffin_fixed, @i, 1, '_' + SUBSTRING(@cpuaffin_fixed, @i, 1))
SET @i = @i + @cpucount/@numa + 1
END;
SELECT 'Processor_checks' AS [Category], 'Processor_Summary' AS [Information], cpu_count AS [Logical_CPU_Count], hyperthread_ratio AS [Cores2Socket_Ratio],
cpu_count/hyperthread_ratio AS [CPU_Sockets],
CASE WHEN @numa > 1 THEN (SELECT COUNT(DISTINCT parent_node_id) FROM sys.dm_os_schedulers WHERE scheduler_id < 255 AND parent_node_id < 64) ELSE 0 END AS [NUMA_Nodes],
@affined_cpus AS [Affined_Processors],
-- Processor Affinity is shown highest to lowest Processor ID
@cpuaffin_fixed AS Affinity_Mask_Bitmask
FROM sys.dm_os_sys_info (NOLOCK)
OPTION (RECOMPILE);
-- Check for HP Logical Processor issue (https://support.hpe.com/hpsc/doc/public/display?docId=emr_na-c04650594)
IF LOWER(@SystemManufacturer) <> 'microsoft' and LOWER(@SystemManufacturer) <> 'vmware' and LOWER(@ostype) = 'windows'
BEGIN
DECLARE @BIOSVendor AS varchar(128), @Processor_Name as varchar(128)
SELECT @BIOSVendor = [Data] FROM @machineinfo WHERE [Value] = 'BIOSVendor'
SELECT @Processor_Name = [Data] FROM @machineinfo WHERE [Value] = 'ProcessorNameString'
IF LOWER(@BIOSVendor) = 'hp' AND LOWER(@Processor_Name) like '%xeon%e5%' --and
BEGIN
SELECT 'Processor_checks' AS [Category], 'HP Logical Processor Issue' AS [Information], 'Warning: You may be affected by HP Logical Processor issue outlined in https://support.hpe.com/hpsc/doc/public/display?docId=emr_na-c04650594' AS [Deviation]
END
END;
/ -
/ Processor utilization rate in the last 2 hours subsection
/ +
SET NOCOUNT ON;
SET ANSI_WARNINGS ON;
SET QUOTED_IDENTIFIER ON;
SET DATEFORMAT mdy;
DECLARE @ts_now bigint
DECLARE @tblAggCPU TABLE (SQLProc tinyint, SysIdle tinyint, OtherProc tinyint, Minutes tinyint)
SELECT @ts_now = ms_ticks FROM sys.dm_os_sys_info (NOLOCK);
WITH cteCPU (record_id, SystemIdle, SQLProcessUtilization, [timestamp]) AS (SELECT
record.value('(./Record/@id)[1]', 'int') AS record_id,
record.value('(./Record/SchedulerMonitorEvent/SystemHealth/SystemIdle)[1]', 'int') AS SystemIdle,
record.value('(./Record/SchedulerMonitorEvent/SystemHealth/ProcessUtilization)[1]', 'int') AS SQLProcessUtilization,
[TIMESTAMP] FROM (SELECT [TIMESTAMP], CONVERT(xml, record) AS record
FROM sys.dm_os_ring_buffers (NOLOCK)
WHERE ring_buffer_type = N'RING_BUFFER_SCHEDULER_MONITOR'
AND record LIKE '%<SystemHealth>%') AS x
)
INSERT INTO @tblAggCPU
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 10
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -10, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 20
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -10, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -20, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 30
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -20, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -30, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 40
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -30, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -40, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 50
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -40, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -50, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 60
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -50, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -60, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 70
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -60, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -70, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 80
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -70, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -80, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 90
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -80, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -90, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 100
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -90, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -100, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 110
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -100, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -110, GETDATE())
UNION ALL
SELECT AVG(SQLProcessUtilization), AVG(SystemIdle), CASE WHEN AVG(SystemIdle) + AVG(SQLProcessUtilization) < 100 THEN 100 - AVG(SystemIdle) - AVG(SQLProcessUtilization) ELSE 0 END, 120
FROM cteCPU
WHERE DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) <= DATEADD(mi, -110, GETDATE()) AND
DATEADD(ms, -1 * (@ts_now - [timestamp]), GETDATE()) > DATEADD(mi, -120, GETDATE())
IF (SELECT COUNT(SysIdle) FROM @tblAggCPU WHERE SysIdle < 30) > 0
BEGIN
SELECT 'Processor_checks' AS [Category], 'Processor_Usage_last_2h' AS [Check], 'WARNING: Detected CPU usage over 70 pct' AS [Deviation];
END
ELSE IF (SELECT COUNT(SysIdle) FROM @tblAggCPU WHERE SysIdle < 10) > 0
BEGIN
SELECT 'Processor_checks' AS [Category], 'Processor_Usage_last_2h' AS [Check], 'WARNING: Detected CPU usage over 90 pct' AS [Deviation];
END
ELSE
BEGIN
SELECT 'Processor_checks' AS [Category], 'Processor_Usage_last_2h' AS [Check], 'OK' AS [Deviation];
END;
SELECT 'Processor_checks' AS [Category], 'Agg_Processor_Usage_last_2h' AS [Information], SQLProc AS [SQL_Process_Utilization], SysIdle AS [System_Idle], OtherProc AS [Other_Process_Utilization], Minutes AS [Time_Slice_Last_x_min]
FROM @tblAggCPU;
| BPCheck/BPCheck.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
#
# Head model and forward computation
# ==================================
#
# The aim of this tutorial is to be a getting started for forward
# computation.
#
# For more extensive details and presentation of the general
# concepts for forward modeling. See `ch_forward`.
#
#
#
# +
import mne
from mne.datasets import sample
data_path = sample.data_path()
# the raw file containing the channel location + types
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# The paths to freesurfer reconstructions
subjects_dir = data_path + '/subjects'
subject = 'sample'
# -
# Computing the forward operator
# ------------------------------
#
# To compute a forward operator we need:
#
# - a ``-trans.fif`` file that contains the coregistration info.
# - a source space
# - the BEM surfaces
#
#
# Compute and visualize BEM surfaces
# ----------------------------------
#
# The BEM surfaces are the triangulations of the interfaces between different
# tissues needed for forward computation. These surfaces are for example
# the inner skull surface, the outer skull surface and the outer skill
# surface.
#
# Computing the BEM surfaces requires FreeSurfer and makes use of either of
# the two following command line tools:
#
# - `gen_mne_watershed_bem`
# - `gen_mne_flash_bem`
#
# Here we'll assume it's already computed. It takes a few minutes per subject.
#
# For EEG we use 3 layers (inner skull, outer skull, and skin) while for
# MEG 1 layer (inner skull) is enough.
#
# Let's look at these surfaces. The function :func:`mne.viz.plot_bem`
# assumes that you have the the *bem* folder of your subject FreeSurfer
# reconstruction the necessary files.
#
#
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', orientation='coronal')
# Visualization the coregistration
# --------------------------------
#
# The coregistration is operation that allows to position the head and the
# sensors in a common coordinate system. In the MNE software the transformation
# to align the head and the sensors in stored in a so-called **trans file**.
# It is a FIF file that ends with -trans.fif. It can be obtained with
# mne_analyze (Unix tools), mne.gui.coregistration (in Python) or mrilab
# if you're using a Neuromag system.
#
# For the Python version see func:`mne.gui.coregistration`
#
# Here we assume the coregistration is done, so we just visually check the
# alignment with the following code.
#
#
# +
# The transformation file obtained by coregistration
trans = data_path + '/MEG/sample/sample_audvis_raw-trans.fif'
info = mne.io.read_info(raw_fname)
mne.viz.plot_alignment(info, trans, subject=subject, dig=True,
meg=['helmet', 'sensors'], subjects_dir=subjects_dir)
# -
# Compute Source Space
# --------------------
#
# The source space defines the position of the candidate source locations.
# The following code compute such a cortical source space with
# an OCT-6 resolution.
#
# See `setting_up_source_space` for details on source space definition
# and spacing parameter.
#
#
src = mne.setup_source_space(subject, spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
print(src)
# ``src`` contains two parts, one for the left hemisphere (4098 locations) and
# one for the right hemisphere (4098 locations). Sources can be visualized on
# top of the BEM surfaces.
#
#
mne.viz.plot_bem(subject=subject, subjects_dir=subjects_dir,
brain_surfaces='white', src=src, orientation='coronal')
# However, only sources that lie in the plotted MRI slices are shown.
# Let's write a few lines of mayavi to see all sources.
#
#
# +
import numpy as np # noqa
from mayavi import mlab # noqa
from surfer import Brain # noqa
brain = Brain('sample', 'lh', 'inflated', subjects_dir=subjects_dir)
surf = brain.geo['lh']
vertidx = np.where(src[0]['inuse'])[0]
mlab.points3d(surf.x[vertidx], surf.y[vertidx],
surf.z[vertidx], color=(1, 1, 0), scale_factor=1.5)
# -
# Compute forward solution
# ------------------------
#
# We can now compute the forward solution.
# To reduce computation we'll just compute a single layer BEM (just inner
# skull) that can then be used for MEG (not EEG).
#
# We specify if we want a one-layer or a three-layer BEM using the
# conductivity parameter.
#
# The BEM solution requires a BEM model which describes the geometry
# of the head the conductivities of the different tissues.
#
#
conductivity = (0.3,) # for single layer
# conductivity = (0.3, 0.006, 0.3) # for three layers
model = mne.make_bem_model(subject='sample', ico=4,
conductivity=conductivity,
subjects_dir=subjects_dir)
bem = mne.make_bem_solution(model)
# Note that the BEM does not involve any use of the trans file. The BEM
# only depends on the head geometry and conductivities.
# It is therefore independent from the MEG data and the head position.
#
# Let's now compute the forward operator, commonly referred to as the
# gain or leadfield matrix.
#
# See :func:`mne.make_forward_solution` for details on parameters meaning.
#
#
fwd = mne.make_forward_solution(raw_fname, trans=trans, src=src, bem=bem,
meg=True, eeg=False, mindist=5.0, n_jobs=2)
print(fwd)
# We can explore the content of fwd to access the numpy array that contains
# the gain matrix.
#
#
leadfield = fwd['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
# To extract the numpy array containing the forward operator corresponding to
# the source space `fwd['src']` with cortical orientation constraint
# we can use the following:
#
#
fwd_fixed = mne.convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
leadfield = fwd_fixed['sol']['data']
print("Leadfield size : %d sensors x %d dipoles" % leadfield.shape)
# This is equivalent to the following code that explicitly applies the
# forward operator to a source estimate composed of the identity operator:
#
#
n_dipoles = leadfield.shape[1]
vertices = [src_hemi['vertno'] for src_hemi in fwd_fixed['src']]
stc = mne.SourceEstimate(1e-9 * np.eye(n_dipoles), vertices, tmin=0., tstep=1)
leadfield = mne.apply_forward(fwd_fixed, stc, info).data / 1e-9
# To save to disk a forward solution you can use
# :func:`mne.write_forward_solution` and to read it back from disk
# :func:`mne.read_forward_solution`. Don't forget that FIF files containing
# forward solution should end with *-fwd.fif*.
#
# To get a fixed-orientation forward solution, use
# :func:`mne.convert_forward_solution` to convert the free-orientation
# solution to (surface-oriented) fixed orientation.
#
#
# Exercise
# --------
#
# By looking at
# `sphx_glr_auto_examples_forward_plot_forward_sensitivity_maps.py`
# plot the sensitivity maps for EEG and compare it with the MEG, can you
# justify the claims that:
#
# - MEG is not sensitive to radial sources
# - EEG is more sensitive to deep sources
#
# How will the MEG sensitivity maps and histograms change if you use a free
# instead if a fixed/surface oriented orientation?
#
# Try this changing the mode parameter in :func:`mne.sensitivity_map`
# accordingly. Why don't we see any dipoles on the gyri?
#
#
| 0.15/_downloads/plot_forward.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # 锚框
#
# 目标检测算法通常会在输入图像中采样大量的区域,然后判断这些区域中是否包含我们感兴趣的目标,并调整区域边缘从而更准确地预测目标的真实边界框(ground-truth bounding box)。不同的模型使用的区域采样方法可能不同。这里我们介绍其中的一种方法:它以每个像素为中心生成多个大小和宽高比(aspect ratio)不同的边界框。这些边界框被称为锚框(anchor box)。我们将在后面基于锚框实践目标检测。
#
# 首先,导入本节需要的包或模块。这里我们新引入了`contrib`包,并修改了NumPy的打印精度。由于`NDArray`的打印实际调用NumPy的打印函数,本节打印出的`NDArray`中的浮点数更简洁一些。
# + attributes={"classes": [], "id": "", "n": "1"}
# %matplotlib inline
import d2lzh as d2l
from mxnet import contrib, gluon, image, nd
import numpy as np
np.set_printoptions(2)
# -
# ## 生成多个锚框
#
# 假设输入图像高为$h$,宽为$w$。我们分别以图像的每个像素为中心生成不同形状的锚框。设大小为$s\in (0,1]$且宽高比为$r > 0$,那么锚框的宽和高将分别为$ws\sqrt{r}$和$hs/\sqrt{r}$。当中心位置给定时,已知宽和高的锚框是确定的。
#
# 下面我们分别设定好一组大小$s_1,\ldots,s_n$和一组宽高比$r_1,\ldots,r_m$。如果以每个像素为中心时使用所有的大小与宽高比的组合,输入图像将一共得到$whnm$个锚框。虽然这些锚框可能覆盖了所有的真实边界框,但计算复杂度容易过高。因此,我们通常只对包含$s_1$或$r_1$的大小与宽高比的组合感兴趣,即
#
# $$(s_1, r_1), (s_1, r_2), \ldots, (s_1, r_m), (s_2, r_1), (s_3, r_1), \ldots, (s_n, r_1).$$
#
# 也就是说,以相同像素为中心的锚框的数量为$n+m-1$。对于整个输入图像,我们将一共生成$wh(n+m-1)$个锚框。
#
# 以上生成锚框的方法已实现在`MultiBoxPrior`函数中。指定输入、一组大小和一组宽高比,该函数将返回输入的所有锚框。
# + attributes={"classes": [], "id": "", "n": "2"}
img = image.imread('../img/catdog.jpg').asnumpy()
h, w = img.shape[0:2]
print(h, w)
X = nd.random.uniform(shape=(1, 3, h, w)) # 构造输入数据
Y = contrib.nd.MultiBoxPrior(X, sizes=[0.75, 0.5, 0.25], ratios=[1, 2, 0.5])
Y.shape
# -
# 我们看到,返回锚框变量`y`的形状为(批量大小,锚框个数,4)。将锚框变量`y`的形状变为(图像高,图像宽,以相同像素为中心的锚框个数,4)后,我们就可以通过指定像素位置来获取所有以该像素为中心的锚框了。下面的例子里我们访问以(250,250)为中心的第一个锚框。它有4个元素,分别是锚框左上角的$x$和$y$轴坐标和右下角的$x$和$y$轴坐标,其中$x$和$y$轴的坐标值分别已除以图像的宽和高,因此值域均为0和1之间。
# + attributes={"classes": [], "id": "", "n": "3"}
boxes = Y.reshape((h, w, 5, 4))
boxes[250, 250, 0, :]
# -
# 为了描绘图像中以某个像素为中心的所有锚框,我们先定义`show_bboxes`函数以便在图像上画出多个边界框。
# + attributes={"classes": [], "id": "", "n": "4"}
# 本函数已保存在d2lzh包中方便以后使用
def show_bboxes(axes, bboxes, labels=None, colors=None):
def _make_list(obj, default_values=None):
if obj is None:
obj = default_values
elif not isinstance(obj, (list, tuple)):
obj = [obj]
return obj
labels = _make_list(labels)
colors = _make_list(colors, ['b', 'g', 'r', 'm', 'c'])
for i, bbox in enumerate(bboxes):
color = colors[i % len(colors)]
rect = d2l.bbox_to_rect(bbox.asnumpy(), color)
axes.add_patch(rect)
if labels and len(labels) > i:
text_color = 'k' if color == 'w' else 'w'
axes.text(rect.xy[0], rect.xy[1], labels[i],
va='center', ha='center', fontsize=9, color=text_color,
bbox=dict(facecolor=color, lw=0))
# -
# 刚刚我们看到,变量`boxes`中$x$和$y$轴的坐标值分别已除以图像的宽和高。在绘图时,我们需要恢复锚框的原始坐标值,并因此定义了变量`bbox_scale`。现在,我们可以画出图像中以(250, 250)为中心的所有锚框了。可以看到,大小为0.75且宽高比为1的锚框较好地覆盖了图像中的狗。
# + attributes={"classes": [], "id": "", "n": "5"}
d2l.set_figsize()
bbox_scale = nd.array((w, h, w, h))
fig = d2l.plt.imshow(img)
show_bboxes(fig.axes, boxes[250, 250, :, :] * bbox_scale,
['s=0.75, r=1', 's=0.5, r=1', 's=0.25, r=1', 's=0.75, r=2',
's=0.75, r=0.5'])
# -
# ## 交并比
#
# 我们刚刚提到某个锚框较好地覆盖了图像中的狗。如果该目标的真实边界框已知,这里的“较好”该如何量化呢?一种直观的方法是衡量锚框和真实边界框之间的相似度。我们知道,Jaccard系数(Jaccard index)可以衡量两个集合的相似度。给定集合$\mathcal{A}$和$\mathcal{B}$,它们的Jaccard系数即二者交集大小除以二者并集大小:
#
# $$J(\mathcal{A},\mathcal{B}) = \frac{\left|\mathcal{A} \cap \mathcal{B}\right|}{\left| \mathcal{A} \cup \mathcal{B}\right|}.$$
#
#
# 实际上,我们可以把边界框内的像素区域看成是像素的集合。如此一来,我们可以用两个边界框的像素集合的Jaccard系数衡量这两个边界框的相似度。当衡量两个边界框的相似度时,我们通常将Jaccard系数称为交并比(intersection over union,IoU),即两个边界框相交面积与相并面积之比,如图9.2所示。交并比的取值范围在0和1之间:0表示两个边界框无重合像素,1表示两个边界框相等。
#
# 
#
# 在本节的剩余部分,我们将使用交并比来衡量锚框与真实边界框以及锚框与锚框之间的相似度。
#
#
#
# ## 标注训练集的锚框
#
#
# 在训练集中,我们将每个锚框视为一个训练样本。为了训练目标检测模型,我们需要为每个锚框标注两类标签:一是锚框所含目标的类别,简称类别;二是真实边界框相对锚框的偏移量,简称偏移量(offset)。在目标检测时,我们首先生成多个锚框,然后为每个锚框预测类别以及偏移量,接着根据预测的偏移量调整锚框位置从而得到预测边界框,最后筛选需要输出的预测边界框。
#
#
# 我们知道,在目标检测的训练集中,每个图像已标注了真实边界框的位置以及所含目标的类别。在生成锚框之后,我们主要依据与锚框相似的真实边界框的位置和类别信息为锚框标注。那么,该如何为锚框分配与其相似的真实边界框呢?
#
#
# 假设图像中锚框分别为$A_1, A_2, \ldots, A_{n_a}$,真实边界框分别为$B_1, B_2, \ldots, B_{n_b}$,且$n_a \geq n_b$。定义矩阵$\boldsymbol{X} \in \mathbb{R}^{n_a \times n_b}$,其中第$i$行第$j$列的元素$x_{ij}$为锚框$A_i$与真实边界框$B_j$的交并比。
# 首先,我们找出矩阵$\boldsymbol{X}$中最大元素,并将该元素的行索引与列索引分别记为$i_1,j_1$。我们为锚框$A_{i_1}$分配真实边界框$B_{j_1}$。显然,锚框$A_{i_1}$和真实边界框$B_{j_1}$在所有的“锚框—真实边界框”的配对中相似度最高。接下来,将矩阵$\boldsymbol{X}$中第$i_1$行和第$j_1$列上的所有元素丢弃。找出矩阵$\boldsymbol{X}$中剩余的最大元素,并将该元素的行索引与列索引分别记为$i_2,j_2$。我们为锚框$A_{i_2}$分配真实边界框$B_{j_2}$,再将矩阵$\boldsymbol{X}$中第$i_2$行和第$j_2$列上的所有元素丢弃。此时矩阵$\boldsymbol{X}$中已有两行两列的元素被丢弃。
# 依此类推,直到矩阵$\boldsymbol{X}$中所有$n_b$列元素全部被丢弃。这个时候,我们已为$n_b$个锚框各分配了一个真实边界框。
# 接下来,我们只遍历剩余的$n_a - n_b$个锚框:给定其中的锚框$A_i$,根据矩阵$\boldsymbol{X}$的第$i$行找到与$A_i$交并比最大的真实边界框$B_j$,且只有当该交并比大于预先设定的阈值时,才为锚框$A_i$分配真实边界框$B_j$。
#
#
# 如图9.3(左)所示,假设矩阵$\boldsymbol{X}$中最大值为$x_{23}$,我们将为锚框$A_2$分配真实边界框$B_3$。然后,丢弃矩阵中第2行和第3列的所有元素,找出剩余阴影部分的最大元素$x_{71}$,为锚框$A_7$分配真实边界框$B_1$。接着如图9.3(中)所示,丢弃矩阵中第7行和第1列的所有元素,找出剩余阴影部分的最大元素$x_{54}$,为锚框$A_5$分配真实边界框$B_4$。最后如图9.3(右)所示,丢弃矩阵中第5行和第4列的所有元素,找出剩余阴影部分的最大元素$x_{92}$,为锚框$A_9$分配真实边界框$B_2$。之后,我们只需遍历除去$A_2, A_5, A_7, A_9$的剩余锚框,并根据阈值判断是否为剩余锚框分配真实边界框。
#
# 
#
#
# 现在我们可以标注锚框的类别和偏移量了。如果一个锚框$A$被分配了真实边界框$B$,将锚框$A$的类别设为$B$的类别,并根据$B$和$A$的中心坐标的相对位置以及两个框的相对大小为锚框$A$标注偏移量。由于数据集中各个框的位置和大小各异,因此这些相对位置和相对大小通常需要一些特殊变换,才能使偏移量的分布更均匀从而更容易拟合。设锚框$A$及其被分配的真实边界框$B$的中心坐标分别为$(x_a, y_a)$和$(x_b, y_b)$,$A$和$B$的宽分别为$w_a$和$w_b$,高分别为$h_a$和$h_b$,一个常用的技巧是将$A$的偏移量标注为
#
# $$\left( \frac{ \frac{x_b - x_a}{w_a} - \mu_x }{\sigma_x},
# \frac{ \frac{y_b - y_a}{h_a} - \mu_y }{\sigma_y},
# \frac{ \log \frac{w_b}{w_a} - \mu_w }{\sigma_w},
# \frac{ \log \frac{h_b}{h_a} - \mu_h }{\sigma_h}\right),$$
#
# 其中常数的默认值为$\mu_x = \mu_y = \mu_w = \mu_h = 0, \sigma_x=\sigma_y=0.1, \sigma_w=\sigma_h=0.2$。如果一个锚框没有被分配真实边界框,我们只需将该锚框的类别设为背景。类别为背景的锚框通常被称为负类锚框,其余则被称为正类锚框。
#
#
# 下面演示一个具体的例子。我们为读取的图像中的猫和狗定义真实边界框,其中第一个元素为类别(0为狗,1为猫),剩余4个元素分别为左上角的$x$和$y$轴坐标以及右下角的$x$和$y$轴坐标(值域在0到1之间)。这里通过左上角和右下角的坐标构造了5个需要标注的锚框,分别记为$A_0, \ldots, A_4$(程序中索引从0开始)。先画出这些锚框与真实边界框在图像中的位置。
# + attributes={"classes": [], "id": "", "n": "6"}
ground_truth = nd.array([[0, 0.1, 0.08, 0.52, 0.92],
[1, 0.55, 0.2, 0.9, 0.88]])
anchors = nd.array([[0, 0.1, 0.2, 0.3], [0.15, 0.2, 0.4, 0.4],
[0.63, 0.05, 0.88, 0.98], [0.66, 0.45, 0.8, 0.8],
[0.57, 0.3, 0.92, 0.9]])
fig = d2l.plt.imshow(img)
show_bboxes(fig.axes, ground_truth[:, 1:] * bbox_scale, ['dog', 'cat'], 'k')
show_bboxes(fig.axes, anchors * bbox_scale, ['0', '1', '2', '3', '4']);
# -
# 我们可以通过`contrib.nd`模块中的`MultiBoxTarget`函数来为锚框标注类别和偏移量。该函数将背景类别设为0,并令从零开始的目标类别的整数索引自加1(1为狗,2为猫)。我们通过`expand_dims`函数为锚框和真实边界框添加样本维,并构造形状为(批量大小, 包括背景的类别个数, 锚框数)的任意预测结果。
# + attributes={"classes": [], "id": "", "n": "7"}
labels = contrib.nd.MultiBoxTarget(anchors.expand_dims(axis=0),
ground_truth.expand_dims(axis=0),
nd.zeros((1, 3, 5)))
# -
# 返回的结果里有3项,均为`NDArray`。第三项表示为锚框标注的类别。
# + attributes={"classes": [], "id": "", "n": "8"}
labels[2]
# -
# 我们根据锚框与真实边界框在图像中的位置来分析这些标注的类别。首先,在所有的“锚框—真实边界框”的配对中,锚框$A_4$与猫的真实边界框的交并比最大,因此锚框$A_4$的类别标注为猫。不考虑锚框$A_4$或猫的真实边界框,在剩余的“锚框—真实边界框”的配对中,最大交并比的配对为锚框$A_1$和狗的真实边界框,因此锚框$A_1$的类别标注为狗。接下来遍历未标注的剩余3个锚框:与锚框$A_0$交并比最大的真实边界框的类别为狗,但交并比小于阈值(默认为0.5),因此类别标注为背景;与锚框$A_2$交并比最大的真实边界框的类别为猫,且交并比大于阈值,因此类别标注为猫;与锚框$A_3$交并比最大的真实边界框的类别为猫,但交并比小于阈值,因此类别标注为背景。
#
#
# 返回值的第二项为掩码(mask)变量,形状为(批量大小, 锚框个数的四倍)。掩码变量中的元素与每个锚框的4个偏移量一一对应。
# 由于我们不关心对背景的检测,有关负类的偏移量不应影响目标函数。通过按元素乘法,掩码变量中的0可以在计算目标函数之前过滤掉负类的偏移量。
# + attributes={"classes": [], "id": "", "n": "9"}
labels[1]
# -
# 返回的第一项是为每个锚框标注的四个偏移量,其中负类锚框的偏移量标注为0。
# + attributes={"classes": [], "id": "", "n": "10"}
labels[0]
# -
# ## 输出预测边界框
#
# 在模型预测阶段,我们先为图像生成多个锚框,并为这些锚框一一预测类别和偏移量。随后,我们根据锚框及其预测偏移量得到预测边界框。当锚框数量较多时,同一个目标上可能会输出较多相似的预测边界框。为了使结果更加简洁,我们可以移除相似的预测边界框。常用的方法叫作非极大值抑制(non-maximum suppression,NMS)。
#
# 我们来描述一下非极大值抑制的工作原理。对于一个预测边界框$B$,模型会计算各个类别的预测概率。设其中最大的预测概率为$p$,该概率所对应的类别即$B$的预测类别。我们也将$p$称为预测边界框$B$的置信度。在同一图像上,我们将预测类别非背景的预测边界框按置信度从高到低排序,得到列表$L$。从$L$中选取置信度最高的预测边界框$B_1$作为基准,将所有与$B_1$的交并比大于某阈值的非基准预测边界框从$L$中移除。这里的阈值是预先设定的超参数。此时,$L$保留了置信度最高的预测边界框并移除了与其相似的其他预测边界框。
# 接下来,从$L$中选取置信度第二高的预测边界框$B_2$作为基准,将所有与$B_2$的交并比大于某阈值的非基准预测边界框从$L$中移除。重复这一过程,直到$L$中所有的预测边界框都曾作为基准。此时$L$中任意一对预测边界框的交并比都小于阈值。最终,输出列表$L$中的所有预测边界框。
#
# 下面来看一个具体的例子。先构造4个锚框。简单起见,我们假设预测偏移量全是0:预测边界框即锚框。最后,我们构造每个类别的预测概率。
# + attributes={"classes": [], "id": "", "n": "11"}
anchors = nd.array([[0.1, 0.08, 0.52, 0.92], [0.08, 0.2, 0.56, 0.95],
[0.15, 0.3, 0.62, 0.91], [0.55, 0.2, 0.9, 0.88]])
offset_preds = nd.array([0] * anchors.size)
cls_probs = nd.array([[0] * 4, # 背景的预测概率
[0.9, 0.8, 0.7, 0.1], # 狗的预测概率
[0.1, 0.2, 0.3, 0.9]]) # 猫的预测概率
# -
# 在图像上打印预测边界框和它们的置信度。
# + attributes={"classes": [], "id": "", "n": "12"}
fig = d2l.plt.imshow(img)
show_bboxes(fig.axes, anchors * bbox_scale,
['dog=0.9', 'dog=0.8', 'dog=0.7', 'cat=0.9'])
# -
# 我们使用`contrib.nd`模块的`MultiBoxDetection`函数来执行非极大值抑制并设阈值为0.5。这里为`NDArray`输入都增加了样本维。我们看到,返回的结果的形状为(批量大小, 锚框个数, 6)。其中每一行的6个元素代表同一个预测边界框的输出信息。第一个元素是索引从0开始计数的预测类别(0为狗,1为猫),其中-1表示背景或在非极大值抑制中被移除。第二个元素是预测边界框的置信度。剩余的4个元素分别是预测边界框左上角的$x$和$y$轴坐标以及右下角的$x$和$y$轴坐标(值域在0到1之间)。
# + attributes={"classes": [], "id": "", "n": "13"}
output = contrib.ndarray.MultiBoxDetection(
cls_probs.expand_dims(axis=0), offset_preds.expand_dims(axis=0),
anchors.expand_dims(axis=0), nms_threshold=0.5)
output
# -
# 我们移除掉类别为-1的预测边界框,并可视化非极大值抑制保留的结果。
# + attributes={"classes": [], "id": "", "n": "14"}
fig = d2l.plt.imshow(img)
for i in output[0].asnumpy():
if i[0] == -1:
continue
label = ('dog=', 'cat=')[int(i[0])] + str(i[1])
show_bboxes(fig.axes, [nd.array(i[2:]) * bbox_scale], label)
# -
# 实践中,我们可以在执行非极大值抑制前将置信度较低的预测边界框移除,从而减小非极大值抑制的计算量。我们还可以筛选非极大值抑制的输出,例如,只保留其中置信度较高的结果作为最终输出。
#
#
# ## 小结
#
# * 以每个像素为中心,生成多个大小和宽高比不同的锚框。
# * 交并比是两个边界框相交面积与相并面积之比。
# * 在训练集中,为每个锚框标注两类标签:一是锚框所含目标的类别;二是真实边界框相对锚框的偏移量。
# * 预测时,可以使用非极大值抑制来移除相似的预测边界框,从而令结果简洁。
#
# ## 练习
#
# * 改变`MultiBoxPrior`函数中`sizes`和`ratios`的取值,观察生成的锚框的变化。
# * 构造交并比为0.5的两个边界框,观察它们的重合度。
# * 按本节定义的为锚框标注偏移量的方法(常数采用默认值),验证偏移量`labels[0]`的输出结果。
# * 修改“标注训练集的锚框”与“输出预测边界框”两小节中的变量`anchors`,结果有什么变化?
#
#
#
#
# ## 扫码直达[讨论区](https://discuss.gluon.ai/t/topic/7024)
#
# 
| chapter_computer-vision/anchor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="ttdHe7yVem48"
# ### 판다스 설치 및 불러오기
#
# 아나콘다(Anaconda)를 사용한다면 판다스가 기본으로 장착돼있으므로 별도의 설치과정이 필요 없다. 표준 파이썬을 사용하거나 가상환경을 만든다면 다음과 같이 판다스를 설치하면 된다. 참고로 주피터 노트북에서 느낌표(!)를 앞에 붙이면 명령프롬프트(cmd창)에서의 명령어를 사용할 수 있다.
# -
pip install pandas # cmd 창 또는 터미널에서 입력할 경우
pip install pandas --upgrade # 이미 설치된 상태에서 버전 업그레이드 하는 경우
# !pip install pandas # 주피터 노트북에서 입력할 경우
# 판다스 라이브러리를 사용하려면 우선 임포트(import)를 해야한다. 판다스는 pd 라는 별칭으로 임포트하는 것이 전세계적인 관례이므로 따르도록 하자. 아래와 같이 판다스의 버전을 확인해보자. 구버전일 경우 업그레이드를 해주자 (2021.2.21 현재 1.2.2 가 최신)
# +
import pandas as pd # 라이브러리 임포트
pd.__version__ # 버전 확인
# -
# 아래에는 판다스를 통해 할 수 있는 간단한 작업들의 예시다. 연습삼아 주피터 노트북을 켜고 코드를 따라 치면서 결과를 확인해보자.
# ### 엑셀 파일에서 데이터 읽어오기
#
# 원본 데이터를 담고 있는 [엑셀파일](/asset/excel/패널자료_2010_2019. 코스피코스닥.xlsx)은 다음과 같다. 코스피와 코스닥에 상장된 기업들의 과거 10년간의 매출액, 영업이익, 당기순이익과 섹터 정보를 담은 자료이다. 총 22,600개의 행(row)과 7개의 열(column)로 구성돼있다.
#
# 
#
# 이 데이터를 판다스 명령어를 통해 파이썬 환경으로 불러오자
data = pd.read_excel('패널자료_2010_2019. 코스피코스닥.xlsx')
data
# ### 필터링 / 정렬하기
#
# LG전자의 2015년 이후의 자료만 뽑아서 연도 역순으로 정렬을 해보자.
condition = (data.종목명 == 'LG전자') & (data.연도 >= 2015)
lg = data[condition].sort_values(by='연도', ascending=False)
lg
# ### 피벗테이블
#
# 행에는 연도별, 열에는 섹터별로 구분해 해당 기업들의 영업이익 합계를 보여주는 피벗테이블을 생성해보자
pivot = data.pivot_table(values='영업이익(천원)', index = '연도', columns='섹터', aggfunc='sum')
pivot
# ### 시각화
#
# 위에서 생성한 피벗테이블 중에서 '자동차 및 부품' 산업과 '자본재' 산업의 영업이익의 시계열 그래프를 그려보자
pivot[['자동차 및 부품', '자본재']].plot()
| _ipynb/pandas_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cap_mud_tensorflow
# language: python
# name: cap_mud
# ---
# +
import tensorflow as tf
import pickle
import import_ipynb
from model import Model
from utils import build_dict, build_dataset, batch_iter
embedding_size=300
num_hidden = 300
num_layers = 3
learning_rate = 0.001
beam_width = 10
keep_prob = 0.8
glove = True
batch_size=256
num_epochs=10
print("Loading dictionary...")
word_dict, reversed_dict, article_max_len, summary_max_len = build_dict("valid", False)
print("Loading validation dataset...")
valid_x = build_dataset("valid", word_dict, article_max_len, summary_max_len, False)
valid_x_len = [len([y for y in x if y != 0]) for x in valid_x]
with tf.Session() as sess:
print("Loading saved model...")
model = Model(reversed_dict, article_max_len, summary_max_len, embedding_size, num_hidden, num_layers, learning_rate, beam_width, keep_prob, glove, forward_only=True)
saver = tf.train.Saver(tf.global_variables())
ckpt = tf.train.get_checkpoint_state("./saved_model/")
saver.restore(sess, ckpt.model_checkpoint_path)
batches = batch_iter(valid_x, [0] * len(valid_x), batch_size, 1)
print("Writing summaries to 'result.txt'...")
for batch_x, _ in batches:
batch_x_len = [len([y for y in x if y != 0]) for x in batch_x]
valid_feed_dict = {
model.batch_size: len(batch_x),
model.X: batch_x,
model.X_len: batch_x_len,
}
prediction = sess.run(model.prediction, feed_dict=valid_feed_dict)
prediction_output = [[reversed_dict[y] for y in x] for x in prediction[:, 0, :]]
with open("result.txt", "a") as f:
for line in prediction_output:
summary = list()
for word in line:
if word == "</s>":
break
if word not in summary:
summary.append(word)
print(" ".join(summary), file=f)
print('Summaries are saved to "result.txt"...')
| ml_headline_server_code/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('base')
# language: python
# name: python3
# ---
# # Natural Language Processing
# One of my objectives was to predict from the description how well the movie was going to be received.
# To do that I need to work with the ``description`` column as input, and change the ``rating`` column into a boolean one where True is over 5 of valoration.
import pandas as pd
import numpy as np
data = pd.read_csv('data\imdb_processed.csv')
sample = data.sample(100, random_state=42)
# ## Target column
def approved(rating):
if rating >= 5.0:
return True
else:
return False
data['rating_bool'] = data.rating.map(approved)
data['rating_bool'].value_counts()
# The target is very unbalanced, and I also need to reduce the amount of rows in me dataset due to time constraints, so I will apply some downsampling.
from sklearn.utils import resample
category_0 = data[data['rating_bool'] == False]
print(category_0.shape)
category_1 = data[data['rating_bool'] == True]
print(category_1.shape)
category_1_undersampled = resample(category_1,
replace=False,
n_samples = len(category_0))
print(category_1_undersampled.shape)
data_downsampled = pd.concat([category_0, category_1_undersampled], axis=0)
data_downsampled.shape
# ## Processing text
# text = data[['genres', 'description']]
descriptions = data_downsampled.description
# +
from nltk.tokenize import word_tokenize
import nltk
# nltk.download('punkt')
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
from nltk.corpus import stopwords
import re
# +
def clean_up(s):
"""
Cleans up numbers, URLs, and special characters from a string.
Args:
s: The string to be cleaned up.
Returns:
A string that has been cleaned up.
"""
reg_url = '(?:(?:https?|ftp):\/\/)?[\w\/\-?=%.]+\.[\w\/\-&?=%.]+'
reg_sp = '[^A-Za-z ]'
s = re.sub(reg_url,'',s)
s = re.sub(reg_sp,' ',s)
return s
def tokenize(s):
"""
Tokenize a string.
Args:
s: String to be tokenized.
Returns:
A list of words as the result of tokenization.
"""
s = word_tokenize(s)
return s
def stem_and_lemmatize(l):
"""
Perform stemming and lemmatization on a list of words.
Args:
l: A list of strings.
Returns:
A list of strings after being stemmed and lemmatized.
"""
ps = PorterStemmer() #I'm not convinced these go here
lemmatizer = WordNetLemmatizer()
list = [lemmatizer.lemmatize(ps.stem(word)) for word in l]
for word in l:
word = ps.stem(word)
word = lemmatizer.lemmatize(word)
return list
def remove_stopwords(l):
"""
Remove English stopwords from a list of strings.
Args:
l: A list of strings.
Returns:
A list of strings after stop words are removed.
"""
clean_list = [word for word in l if not word in stopwords.words()]
return clean_list
def full_process(s):
'''
Args:
s: the string to process
Returns:
The list of words after removing the stopwords
'''
s = clean_up(s)
l = tokenize(s)
l = stem_and_lemmatize(l)
clean_list = remove_stopwords(l)
return clean_list
# -
data_downsampled['tokens'] = descriptions.apply(full_process) #full
data_downsampled['tokens'][:15]
data_downsampled
# +
# data_downsampled.to_csv('data\data_downsampled.csv', index=False)
# -
data_downsampled = pd.read_csv('data\data_downsampled.csv')
# ## Word list
# Is a distribution of frequencies for the words.
from nltk.probability import FreqDist
token_list = data_downsampled['tokens'].tolist()
# +
word_list = []
regx = '''[\[\]\"\']'''
for token in token_list:
# word_list = word_list + list(token_list[i])
token = re.sub(regx,'',token)
list = token.split(', ')
word_list = word_list + list
# +
fd = FreqDist(word_list)
fd.plot(20, cumulative=False)
# -
# I can already see a big outlier with the word ' hi', I will have to treat it.
#
fd.max()
word_list2 = word_list
# I will use a list comprehension to read the original ``word_list`` and copy everything that is not 'hi' to a new list.
# +
def remove_values_from_list(the_list, val):
return [value for value in the_list if value != val]
word_list2 = remove_values_from_list(word_list, 'hi')
fd = FreqDist(word_list2)
fd.max()
# -
fd.plot(20, cumulative=False)
# Now this plot looks much better.
len(fd)
# That is a high amount of words, but I can select just the most common:
most_common = fd.most_common(500)
common_word_list = [a_tuple[0] for a_tuple in most_common]
# common_word_list
def find_features(row):
words = row['tokens']
features = {}
for w in common_word_list:
features[w] = (w in words)
return (features, row['rating_bool'])
features = data_downsampled.apply(find_features, axis=1).tolist()
len(features)
# With my features done, I can use a Naive Bayes Model to try to predict whether the movie has a score above 5 or not.
# 1st -> Train/Test split
# +
frontier = int(np.ceil(len(features)*0.8))
# set that we'll train our classifier with
training_set = features[:frontier]
# set that we'll test against.
testing_set = features[frontier:]
# -
# 2nd -> Training
classifier = nltk.NaiveBayesClassifier.train(training_set)
# 3rd -> Testing
print("Classifier accuracy percent:",(nltk.classify.accuracy(classifier, testing_set))*100)
classifier.show_most_informative_features(15)
# The accuracy does not seem that good, I'll try using more words:
# +
most_common = fd.most_common(3000)
common_word_list = [a_tuple[0] for a_tuple in most_common]
features = data_downsampled.apply(find_features, axis=1).tolist()
frontier = int(np.ceil(len(features)*0.8))
training_set = features[:frontier]
testing_set = features[frontier:]
classifier = nltk.NaiveBayesClassifier.train(training_set)
print("Classifier accuracy percent:",(nltk.classify.accuracy(classifier, testing_set))*100)
# -
# Adding more words doesn't seem to help that much. Now I will try running the model with the word I removed before, 'hi'.
# +
word_list = []
regx = '''[\[\]\"\']'''
for token in token_list:
# word_list = word_list + list(token_list[i])
token = re.sub(regx,'',token)
list = token.split(', ')
word_list = word_list + list
# +
fd2 = FreqDist(word_list)
del most_common
most_common = fd2.most_common(3000)
common_word_list = [a_tuple[0] for a_tuple in most_common]
features = data_downsampled.apply(find_features, axis=1).tolist()
frontier = int(np.ceil(len(features)*0.8))
# set that we'll train our classifier with
training_set = features[:frontier]
# set that we'll test against.
testing_set = features[frontier:]
classifier = nltk.NaiveBayesClassifier.train(training_set)
print("Classifier accuracy percent:",(nltk.classify.accuracy(classifier, testing_set))*100)
# -
# It is a bit suspicious to get the exact same accuracies with 'hi' and 500 words and without 'hi' and 3000 words, but I cannot find the reason so far.
# I want to study further on what is happening here, so I will add the features to the dataframe so I can see what the descriptions with 'hi' in it have in common.
def find_features_column(row):
words = row['tokens']
features = {}
for w in common_word_list:
features[w] = (w in words)
return features
words_df = data_downsampled.apply(find_features_column, axis=1)
words_df
type(words_df)
new_df = pd.DataFrame(words_df.tolist())
new_df
data_downsampled.reset_index(drop=True)
data_downsampled_2 = pd.concat([data_downsampled, new_df], axis=1)
pd.set_option('display.max_colwidth', None)
data_downsampled_2['description'].loc[data_downsampled_2.hi == True].head(5)
# The 'hi' in question is what I got from stemming/lemmatizing the words 'him' or 'his'.
#
# The next part would be to apply this dataset to linear and logistic regressions.
data_downsampled_2.to_csv('data\data_tokenized.csv', index=False)
| 4-nlp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/giuliapuntoit/CapsnetGoogleLandmark/blob/main/project_VGG16.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="c9QcGnGPdX2C"
#
# **Install requirements**
# + id="k9O3aM3Tb28q" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="302544eb-f408-4038-b640-f8c51411ee42"
# !pip3 install 'torch==1.4.0'
# !pip3 install 'torchvision==0.5.0'
# !pip3 install 'Pillow-SIMD'
# !pip3 install 'tqdm'
# !pip3 install 'livelossplot'
# !pip3 install --upgrade 'pillow'
# + [markdown] id="fo942LMOdlh4"
# **Import libraries**
# + id="DokFOdD1dJEl"
import os
import logging
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Subset, DataLoader
from torch.backends import cudnn
import torchvision
from torchvision import transforms
from torchvision.models import alexnet
from torchvision.models import vgg16_bn
from torchvision.models import densenet161
from torchvision.models import resnet50
from PIL import Image
from tqdm import tqdm
from livelossplot import PlotLosses
import copy
from datetime import datetime
# + [markdown] id="uZKRW1_Wu488"
# **Download images**
# + id="iCpZyPUNu7aH"
# Note to Kagglers: This script will not run directly in Kaggle kernels. You
# need to download it and run it on your local machine.
# Downloads images from the Google Landmarks dataset using multiple threads.
# Images that already exist will not be downloaded again, so the script can
# resume a partially completed download. All images will be saved in the JPG
# format with 90% compression quality.
import sys, os, multiprocessing, csv
from PIL import Image
from io import BytesIO
from urllib.request import urlopen
import torch
from torchvision import transforms
def ParseData(data_file):
csvfile = open(data_file, 'r')
csvreader = csv.reader(csvfile)
key_url_label_list = [line[:3] for line in csvreader] # prima era :2 -> non capisco se splitta in orizzontale o verticale
return key_url_label_list # Chop off header
def DownloadImage(key_url_label, out_dir):
line = key_url_label
#(key, url, label) = key_url_label
key = line[0]
url = line[1]
label = line[2]
out_lab = os.path.join(out_dir, label)
filename = os.path.join(out_lab, '%s.jpg' % key)
print(out_lab)
if not os.path.isdir(out_lab):
os.mkdir(out_lab)
p = transforms.Compose([transforms.Resize((224, 224))])
if os.path.exists(filename):
print('Image %s already exists. Skipping download.' % filename)
return
try:
response = urlopen(url)
image_data = response.read()
except:
print('Warning: Could not download image %s from %s' % (key, url))
return
try:
pil_image = Image.open(BytesIO(image_data))
except:
print('Warning: Failed to parse image %s' % key)
return
try:
pil_image_rgb = pil_image.convert('RGB')
pil_image_rgb = p(pil_image_rgb)
except:
print('Warning: Failed to convert image %s to RGB' % key)
return
try:
pil_image_rgb.save(filename, format='JPEG')
except:
print('Warning: Failed to save image %s' % filename)
return
def Run(data_file, out_dir):
#if len(sys.argv) != 3:
# print('Syntax: %s <data_file.csv> <output_dir/>' % sys.argv[0])
# sys.exit(0)
#(data_file, out_dir) = sys.argv[1:]
if not os.path.exists(out_dir):
os.mkdir(out_dir)
key_url_label_list = ParseData(data_file)
#pool = multiprocessing.Pool(processes=50)
#pool.map(DownloadImage, key_url_label_list, out_dir)
for element in key_url_label_list:
#print(element)
DownloadImage(element, out_dir)
#if __name__ == '__main__':
# Run()
# + [markdown] id="OIDLJuIXK_vh"
# **Set Arguments**
# + id="d5PkYfqfK_SA"
DEVICE = 'cuda' # 'cuda' or 'cpu'
NUM_CLASSES = 50 # 50 selected classes for now
BATCH_SIZE = 16 # Higher batch sizes allows for larger learning rates. An empirical heuristic suggests that, when changing
# the batch size, learning rate should change by the same factor to have comparable results
LR = 1e-3 # The initial Learning Rate
MOMENTUM = 0.9 # Hyperparameter for SGD, keep this at 0.9 when using SGD
WEIGHT_DECAY = 5e-5 # Regularization, you can keep this at the default
NUM_EPOCHS = 30 # Total number of training epochs (iterations over dataset)
STEP_SIZE = 20 # How many epochs before decreasing learning rate (if using a step-down policy)
GAMMA = 0.1 # Multiplicative factor for learning rate step-down
LOG_FREQUENCY = 10
# + [markdown] id="9gwii0TBHvzh"
# **Define Data Preprocessing**
# + id="QUDdw4j2H0Mc"
# FORSE QUI CI SONO DELLA TRANSFORM SUPERFLUE
# Define transforms for training phase
train_transform = transforms.Compose([transforms.Resize(256), # Resizes short size of the PIL image to 256
transforms.CenterCrop(224), # Crops a central square patch of the image
# 224 because torchvision's AlexNet needs a 224x224 input!
# Remember this when applying different transformations, otherwise you get an error
transforms.ToTensor(), # Turn PIL Image to torch.Tensor
#transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)) # Normalizes tensor with mean and standard deviation
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # Normalizes tensor with mean and standard deviation
])
# Define transforms for the evaluation phase
eval_transform = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
#transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) # Normalizes tensor with mean and standard deviation
])
# + [markdown] id="2qYIHPzYLY7i"
# **Prepare Dataset**
# + id="QfVq_uDHLbsR" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="e48890e3-2741-4bc0-a44c-17848dd6bfe0"
FMT = '%H:%M:%S'
now = datetime.now()
time1_before_download = now.strftime(FMT)
print(time1_before_download)
# Clone github repository with data
if not os.path.isdir('./landmark-recognition'):
# !git clone https://github.com/davidetadz/landmark-recognition.git
data_file = './landmark-recognition/train_filter.csv'
out_dir = './landmark-recognition/train_img_folder/'
Run(data_file, out_dir)
DATA_DIR = 'landmark-recognition/train_img_folder/'
# Prepare Pytorch train/test Datasets
train_dataset = torchvision.datasets.ImageFolder(DATA_DIR, transform=train_transform) # probably da cambiare e usare le funzioni che ho scritto
test_dataset = torchvision.datasets.ImageFolder(DATA_DIR, transform=eval_transform)
#train_dataset = Landmark(root='landmark-recognition', split='train', transform=train_transform)
#test_dataset = Landmark(root='landmark-recognition', split='test', transform=eval_transform)
#half_train_indexes = [idx for idx in range(len(train_dataset)) if idx % 2]
#half_val_indexes = [idx for idx in range(len(train_dataset)) if not idx % 2]
train_indexes = [idx for idx in range(len(train_dataset)) if idx % 5]
test_indexes = [idx for idx in range(len(test_dataset)) if not idx % 5]
#half_train_dataset = Subset(train_dataset, half_train_indexes)
#half_val_dataset = Subset(train_dataset, half_val_indexes)
train_dataset = Subset(train_dataset, train_indexes)
test_dataset = Subset(test_dataset, test_indexes)
# Check dataset sizes
print('Train Dataset: {}'.format(len(train_dataset)))
print('Test Dataset: {}'.format(len(test_dataset)))
#print('Half train Dataset: {}'.format(len(half_train_dataset)))
#print('Half val Dataset: {}'.format(len(half_val_dataset)))
now = datetime.now()
time2_after_download = now.strftime(FMT)
print(time2_after_download)
# + [markdown] id="FYEDQ7Z21ldN"
# **Prepare Dataloaders**
# + id="VriRw8SI1nle"
# Dataloaders iterate over pytorch datasets and transparently provide useful functions (e.g. parallelization and shuffling)
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=True)
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
# here i should load half_train_dataset and half_val_dataset instead of train and test datasets
#train_dataloader = DataLoader(half_train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=True)
#val_dataloader = DataLoader(half_val_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
#test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
# + [markdown] id="gbZ1t5Qs2z4j"
# **Prepare Network**
# + id="exHUjtXa22DN" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="2a62b92a-ace5-4e57-f2e4-cbd8cbb3ca7c"
# try with vgg16_bn, googlenet, alexnet, resnet
# net = vgg16_bn(pretrained=True) # VGG 16-layer model (configuration “D”) with batch normalization
# net = alexnet(pretrained=True)
# net = densenet161(pretrained=True)
net = resnet50(pretrained=False)
print(net)
# AlexNet has 1000 output neurons, corresponding to the 1000 ImageNet's classes
# We need 50 outputs for this VGG
# AlexNet and VGG16
#net.classifier[6] = nn.Linear(4096, NUM_CLASSES) # nn.Linear in pytorch is a fully connected layer
# The convolutional layer is nn.Conv2d
# Resnet50
net.fc = nn.Linear(2048, NUM_CLASSES)
# DenseNet161
#net.classifier = nn.Linear(2208, NUM_CLASSES)
# We just changed the last layer of AlexNet with a new fully connected layer with 101 outputs
# It is mandatory to study torchvision.models.alexnet source code
# + [markdown] id="KEyL3H_R4qCf"
# **Prepare Training**
# + id="9sjq00G94tSc"
# Define loss function
criterion = nn.CrossEntropyLoss() # for classification, we use Cross Entropy
# Choose parameters to optimize
# To access a different set of parameters, you have to access submodules of AlexNet
# (nn.Module objects, like AlexNet, implement the Composite Pattern)
# e.g.: parameters of the fully connected layers: net.classifier.parameters()
# e.g.: parameters of the convolutional layers: look at alexnet's source code ;)
parameters_to_optimize = net.parameters() # In this case we optimize over all the parameters of AlexNet
# Define optimizer
# An optimizer updates the weights based on loss
# We use SGD with momentum
optimizer = optim.SGD(parameters_to_optimize, lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
# Define scheduler
# A scheduler dynamically changes learning rate
# The most common schedule is the step(-down), which multiplies learning rate by gamma every STEP_SIZE epochs
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=GAMMA)
# + [markdown] id="AxYUli9d9uYQ"
# **Train**
# + id="ZcoQ5fD49yT_" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="03cf1589-4415-42a6-ae7c-ddbcacb6be55"
# By default, everything is loaded to cpu
net = net.to(DEVICE) # this will bring the network to GPU if DEVICE is cuda
liveloss = PlotLosses()
cudnn.benchmark # Calling this optimizes runtime
current_step = 0
# Start iterating over the epochs
for epoch in range(NUM_EPOCHS):
logs = {}
net.train(True) # Set Network to training mode
print('Starting epoch {}/{}, LR = {}'.format(epoch+1, NUM_EPOCHS, scheduler.get_lr()))
train_running_corrects = 0
train_running_loss = 0.0
# Iterate over the dataset
for images, labels in train_dataloader:
# Bring data over the device of choice
images = images.to(DEVICE)
labels = labels.to(DEVICE)
net.train() # Sets module in training mode
# PyTorch, by default, accumulates gradients after each backward pass
# We need to manually set the gradients to zero before starting a new iteration
optimizer.zero_grad() # Zero-ing the gradients
# Forward pass to the network
outputs = net(images)
# Compute loss based on output and ground truth
loss = criterion(outputs, labels)
train_running_loss += loss.detach() * images.size(0)
# Log loss
if current_step % LOG_FREQUENCY == 0:
print('Step {}, Loss {}'.format(current_step, loss.item()))
# Compute gradients for each layer and update weights
loss.backward() # backward pass: computes gradients
optimizer.step() # update weights based on accumulated gradients
# Get predictions
_, preds = torch.max(outputs.data, 1)
# Update Corrects
train_running_corrects += torch.sum(preds == labels.data).data.item()
current_step += 1
# Calculate Loss and Accuracy
epoch_train_loss = train_running_loss / len(train_dataset)
epoch_train_acc = train_running_corrects / float(len(train_dataset))
print('Training Accuracy at epoch {}: {}'.format(epoch+1, epoch_train_acc))
logs['' + 'Loss'] = epoch_train_loss
logs['' + 'Accuracy'] = epoch_train_acc
liveloss.update(logs)
#liveloss.draw()
# Step the scheduler at each epoch
scheduler.step()
# go to next epoch
now = datetime.now()
time3_after_training = now.strftime(FMT)
print(time3_after_training)
# + id="tLgVemJBwJB1" colab={"base_uri": "https://localhost:8080/", "height": 431} outputId="882bf1fa-cde9-4bcb-dd34-57099718b8e0"
liveloss.draw() #plot graphs
# + [markdown] id="UsHFI-GAJd69"
# **Test**
# + id="EO3HV5pqJg1o" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="e4c08dcd-a6ff-4011-8a08-09cb26e844a5"
net = net.to(DEVICE) # this will bring the network to GPU if DEVICE is cuda
net.train(False) # Set Network to evaluation mode
running_corrects = 0
for images, labels in tqdm(test_dataloader):
images = images.to(DEVICE)
labels = labels.to(DEVICE)
# Forward Pass
outputs = net(images)
# Get predictions
_, preds = torch.max(outputs.data, 1)
# Update Corrects
running_corrects += torch.sum(preds == labels.data).data.item()
# Calculate Accuracy
accuracy = running_corrects / float(len(test_dataset))
print('Test Accuracy: {}'.format(accuracy))
now = datetime.now()
time4_after_testing = now.strftime(FMT)
print(time4_after_testing)
delta_download = datetime.strptime(time2_after_download, FMT) - datetime.strptime(time1_before_download, FMT)
delta_training = datetime.strptime(time3_after_training, FMT) - datetime.strptime(time2_after_download, FMT)
delta_testing = datetime.strptime(time4_after_testing, FMT) - datetime.strptime(time3_after_training, FMT)
print('Time to download:\t{}'.format(delta_download))
print('Time to train:\t\t{}'.format(delta_training))
print('Time to test:\t\t{}'.format(delta_testing))
| project_VGG16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introducción a modelos de regresión
# ## Primera parte: Analizar los datos
# ### Importamos las librerías necesarias
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
# ### Importamos nuestro conjunto de datos
enfermedad_corazon= None #TODO: Importar el archivo csv heart_disease.csv con pandas
# ### Analizamos los datos y metadatos de nuestro data frame
# +
#TODO: Revisamos los primeros 5 registros de nuestro dataframe
# +
#TODO: Verificamos si hay valores nulos
# +
#TODO: Borramos los valores nulos
# -
print(enfermedad_corazon.groupby('TenYearCHD').size())
# ## Segunda parte: Modelo de regresión logística
X=enfermedad_corazon[enfermedad_corazon.columns]
X= #TODO: Obtenemos un subconjunto de los datos, específicamente 'age','cigsPerDay','totChol','sysBP','diaBP','glucose'
X.head()
Y= #TODO: Obtenemos un subconjunto de los datos, específicamente 'TenYearCHD'
Y.head()
# ### Separamos nuestros datos en entrenamiento y pruebas
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=1)
# ### Creamos nuestro modelo y lo entrenamos
mi_modelo=LogisticRegression(max_iter=1)
#TODO: Entrenamos nuestro modelo con los datos de entrenamiento
# ### Verificamos el accuracy de nuestro modelo
result=#TODO: Calculamos el accuracy
print('Accuracy : ' ,(result))
| AI/Talleres/AutoML/.ipynb_checkpoints/LogisticRegression-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This notebook shows, how to compute RandomForest's accuracy scores for each value of `n_estimators` without retraining the model. No rocket science involved, but still useful.
# # Load some data
# +
import sklearn.datasets
from sklearn.model_selection import train_test_split
X, y = sklearn.datasets.load_digits(10,True)
X_train, X_val, y_train, y_val = train_test_split(X, y)
# -
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
# **Step 1:** first fit a Random Forest to the data. Set `n_estimators` to a high value.
rf = RandomForestClassifier(n_estimators=500, max_depth=4, n_jobs=-1)
rf.fit(X_train, y_train)
# **Step 2:** Get predictions for each tree in Random Forest separately.
predictions = []
for tree in rf.estimators_:
predictions.append(tree.predict_proba(X_val)[None, :])
# **Step 3:** Concatenate the predictions to a tensor of size `(number of trees, number of objects, number of classes)`.
predictions = np.vstack(predictions)
# **Step 4:** Сompute cumulative average of the predictions. That will be a tensor, that will contain predictions of the random forests for each `n_estimators`.
cum_mean = np.cumsum(predictions, axis=0)/np.arange(1, predictions.shape[0] + 1)[:, None, None]
# **Step 5:** Get accuracy scores for each `n_estimators` value
scores = []
for pred in cum_mean:
scores.append(accuracy_score(y_val, np.argmax(pred, axis=1)))
# **That is it!** Plot the resulting scores to obtain similar plot to one that appeared on the slides.
plt.figure(figsize=(10, 6))
plt.plot(scores, linewidth=3)
plt.xlabel('num_trees')
plt.ylabel('accuracy');
# We see, that 150 trees are already sufficient to have stable result.
| courses/how-to-win-ds-competition/reading_materials/Hyperparameters_tuning_video2_RF_n_estimators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from sklearn import datasets
import DReaM
# -
# # Iris Dataset
# This is an exmple where rule-generating features and cluster-preserving features are identical, and no prior rules are provided.
# +
data = datasets.load_iris()["data"] # Load the iris dataset
# Assuming rule-generating features and cluster-preserving features are
# identical. I.e., X = Y.
M = DReaM.DReaM(data, data, K = 3)
# We repeat the algorithm for 10 times. In each time we randomly
# initialize the model using GMM. We keep the results with the maximum
# likelihood.
M.repeat()
# Plot the rules.
M.plot_rules(0,1)
M.plot_rules(2,3)
# Display the rules discovered.
print()
print(M.get_rules())
# -
# # Synthetic Data
#
# This is an exmple where rule-generating features and cluster-preserving features are different, and prior rules are provided. We first generate the data and then run the algorithm on the data.
# +
# Generating the data
N = 1000
D = 2
X = np.random.normal(0, 1, [N, D])
Y = np.zeros([N, D])
ind_1 = np.bitwise_and(X[:, 0]>0, X[:, 1]>0)
Y[ind_1, 0] = np.random.normal(5, 1, ind_1.sum())
Y[ind_1, 1] = np.random.normal(3, 1, ind_1.sum())
ind_2 = np.bitwise_and(X[:, 0]>0, X[:, 1]<0)
Y[ind_2, 0] = np.random.normal(5, 1, ind_2.sum())
Y[ind_2, 1] = np.random.normal(-3, 1, ind_2.sum())
ind_3 = np.bitwise_and(X[:, 0]<0, X[:, 1]>0)
Y[ind_3, 0] = np.random.normal(-5, 1, ind_3.sum())
Y[ind_3, 1] = np.random.normal(3, 1, ind_3.sum())
ind_4 = np.bitwise_and(X[:, 0]<0, X[:, 1]<0)
Y[ind_4, 0] = np.random.normal(-5, 1, ind_4.sum())
Y[ind_4, 1] = np.random.normal(-3, 1, ind_4.sum())
# Defining prior rules.
mu_t_plus0 = np.array([[3, 3], [3, -1]])
mu_t_minus0 = np.array([[-3, -1], [-3, -3]])
M = DReaM.DReaM(X, Y, K = 2, mu_t_plus0 = mu_t_plus0, mu_t_minus0 = mu_t_minus0)
# We repeat the algorithm for 10 times. In each time we randomly
# initialize the model based on the prior rules. We keep the results with
# the maximum likelihood.
M.repeat()
# Plot the rules.
M.plot_rules()
# Plot the cluster-preserving features Y.
M.plot_Y()
# Display the rules discovered.
print()
print(M.get_rules())
# -
#
| Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# # Linear Regression with NumPy
x = np.linspace(0, 1, 20)
m = 2
c = 0.5
y = m * x + c
plt.plot(x, y)
plt.axis([-0.1, 1.1, 0, 3])
plt.grid()
x
y
# ## Imagine that there is no line
plt.scatter(x, y, s=5)
plt.axis([-0.1, 1.1, 0, 3])
plt.grid()
print(x)
print(y)
# ## Ordinary Least Squares
#
# Recall that,
#
# $$\mathbf{y} = \mathbf{X}\mathbf{\beta} + \mathbf{\epsilon}$$
#
# and
#
# $$\mathbf{\hat{\beta}} = (\mathbf{X}^{T}\mathbf{X})^{-1}\mathbf{X}^{T}\mathbf{y}$$
x
x = x.reshape(-1, 1)
y = y.reshape(-1, 1)
print(x)
print(y)
# Suppose,
#
# $$a = (\mathbf{X}^{T}\mathbf{X})^{-1}$$
# and
# $$b = \mathbf{X}^{T}\mathbf{y}$$
#
# Therefore,
#
# $$\mathbf{\hat{\beta}} = ab$$
a = np.linalg.inv(np.dot(x.T, x))
b = np.dot(x.T, y)
beta_hat = np.dot(a, b)
beta_hat
# $$\mathbf{\hat{y}} = \mathbf{X}\mathbf{\hat{\beta}}$$
y_hat = np.dot(x, beta_hat)
plt.scatter(x, y, s=5, label='Ground Truth')
plt.plot(x, y_hat, 'g', label='OLS')
plt.legend()
plt.axis([-0.1, 1.1, 0, 3])
plt.grid()
# # Question: What went wrong?
# ## Estimating the intercept / error
x
x = np.c_[x, np.ones(x.shape)]
x
a = np.linalg.inv(np.dot(x.T, x))
b = np.dot(x.T, y)
beta_hat = np.dot(a, b)
beta_hat
m_hat, c_hat = beta_hat.ravel()
y_hat = x[:, 0] * m_hat + c_hat
plt.scatter(x[:, 0], y, c='b', s=5, label='Ground Truth')
plt.plot(x[:, 0], y_hat, 'g', label='OLS')
plt.legend()
plt.axis([-0.1, 1.1, 0, 3])
plt.grid()
# # Exercise: Make some noise.
# #### Step 1: Pick a random slope ($m$) in the interval $[0.5, 2]$
# #### Step 2: Pick a random intercept ($c$) in the interval $[0, 1]$
# #### Step 3: Create $y = mx + c$ and add some noise to it, with [`np.random.rand`](https://docs.scipy.org/doc/numpy-1.15.1/reference/generated/numpy.random.rand.html)
# #### Step 4: Find $m$ and $c$, verify your solution
# +
# enter code here
| linear_models/01_linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GUI Basics
#
# In this example we wil learn the basic interaction with the `plot`'s user interface.
import k3d
plot = k3d.plot()
plot += k3d.points([0, 0, 0, 1, 1, 1])
plot.display()
# Expected result:
# 
# ## View / camera position adjustment
#
# The plot can be adjusted using mouse actions:
# - mouse wheel / scroll controls the zooming in or out
# - dragging with left mouse button rotates the plot (all directions)
# - dragging with right mouse button translates the plot (all directions)
# - dragging with wheel / both mose buttons: zooms in or out (only vertical)
#
# To return to the default camera position, press the "Camera reset" icon from the top-right toolbar
# ## Fullscreen mode
#
# It is possible to switch the plot to fullscreen mode using the "Fullscreen" icon from the toolbar.
# To exit fullscreen mode press the Esc key (there should be a notification from your browser).
#
# ## Viewing plots in a detached window
#
# Especially in multiple monitor setups it may be useful to detach the plot to a dedicated window. This is achieved by clicking the "Detach widget" icon.
#
# ## Downloading current view as PNG
#
# To save a snapshot of the current view, press the "Save screenshot" icon from the toolbar.
#
#
# The filename will be generated as "K3D-", then a string of digits (technically: decimal timestamp) and then ".png". The PNG file containing the `plot`'s interior (no UI decorations) will be processed as a regular download.
# This example was just a short description of the UI interaction with K3D plots. Some of these actions (camera adjustment and screenshot retrieval) can be done programmatically, which will be shown in other examples.
plot.snapshot_include_js = False
| examples/04_gui_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Setup
# +
# Import some common packages
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import warnings
import urllib
# Import ML packages
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from pandas.plotting import scatter_matrix
# Suppress warnings
warnings.filterwarnings("ignore")
# To make this notebook's output stable across runs
np.random.seed(42)
print("Done importing libraries and setting up Notebook.")
# -
fitbitData = pd.read_csv("MasterDatabase.csv")
#fitbitData.drop('Unnamed: 0')
fitbitData.head()
#fitbitData.describe()
#fitbitData.info()
fitbitData = fitbitData.drop(['Unnamed: 0'], axis=1)
fitbitData.head()
# +
from future_encoders import ColumnTransformer
from future_encoders import OneHotEncoder
# Drop categories
#fitbitData_num = fitbitData.drop(['Date', 'Week Day'], axis = 1)
fitbitData_num_ = fitbitData.drop(['Steps', 'Distance', 'Minutessedentary', 'Minuteslightlyactive', 'Minutesfairlyactive', 'Minutesveryactive','Date', 'Week Day'], axis = 1)
num_attribs = list(fitbitData_num)
cat_attribs = ['Date','Week Day']
num_pipeline = Pipeline([
('imputer', Imputer(strategy = "median")),
('std_scaler', StandardScaler()),
])
full_pipeline = ColumnTransformer([
("num", num_pipeline, num_attribs),
("cat", OneHotEncoder(), cat_attribs),
])
fitbitData_prepared = full_pipeline.fit_transform(fitbitData)
label = "Calories"
train_set, test_set = train_test_split(fitbitData, test_size=0.2, random_state=10)
x_tr = train_set.drop(label,axis=1)
y_train = train_set[label].copy()
x_te = test_set.drop(label,axis=1)
y_test = test_set[label].copy()
x_train = full_pipeline.transform(x_tr) #Process training data
x_test = full_pipeline.transform(x_te) #Process test data
print("x train shape")
print(x_train.shape)
print("y train shape")
print(y_train.shape)
# -
# Correlations in the original database (numerical)
corr_matrix = fitbitData_num.corr()
corr_matrix["Calories"].sort_values(ascending=False)
fitbitData_num.hist(bins = 50, figsize = (20, 15))
plt.show()
plot = fitbitData_num.plot(figsize = (20, 10))
fig = plot.get_figure()
fig.savefig("numerics.png")
attributes = num_attribs
scatter_matrix(fitbitData_num[num_attribs], figsize = (17, 14))
# +
from sklearn.tree import DecisionTreeRegressor
print("Decision Tree Regressor")
tree_reg = DecisionTreeRegressor(random_state=42)
tree_reg.fit(x_train, y_train)
#Decision Tree Regression
#Prediction on training set
train_tree_scores = cross_val_score(tree_reg, x_train, y_train,
scoring="neg_mean_squared_error", cv=10)
train_tree_rmse_scores = np.sqrt(-train_tree_scores)
print("Training data root mean square error:", train_tree_rmse_scores.mean())
#Prediction on test set
test_tree_scores = cross_val_score(tree_reg, x_test, y_test,
scoring="neg_mean_squared_error", cv=10)
test_tree_rmse_scores = np.sqrt(-test_tree_scores)
print("Test data root mean square error:", test_tree_rmse_scores.mean())
# +
from sklearn.ensemble import RandomForestRegressor
#Create a Random Forest Regression based on the training data
print("RandomForest Regressor")
forest_reg = RandomForestRegressor(random_state=42)
forest_reg.fit(x_train,y_train)
#Prediction on training set
train_forest_scores = cross_val_score(forest_reg, x_train, y_train,
scoring="neg_mean_squared_error", cv=10)
train_forest_rmse_scores = np.sqrt(-train_forest_scores)
print("Training data root mean square error:",train_forest_rmse_scores.mean())
#Prediction on testing set
test_forest_scores = cross_val_score(forest_reg, x_test, y_test,
scoring="neg_mean_squared_error", cv=10)
test_forest_rmse_scores = np.sqrt(-test_forest_scores)
print("Test data root mean square error:", test_forest_rmse_scores.mean())
# +
from sklearn.svm import SVR
#Same issue as above
gamma1, gamma2, gamma3 = 0.1, 0.01, 0.001
C1, C2, C3 = 1, 500, 1000
hyperparams = (gamma1, C1), (gamma1, C2), (gamma1, C3), (gamma2, C1), (gamma2, C2), (gamma2, C3), (gamma3, C1), (gamma3, C2), (gamma3, C3)
from sklearn.model_selection import cross_val_score
for gamma, C in hyperparams:
rbf_kernel_svRg_clf = SVR(kernel="rbf", gamma = gamma, C=C)
rbf_kernel_svRg_clf.fit(x_train, y_train)
#Performance metrics
from sklearn.model_selection import cross_val_score
svRg_mse = cross_val_score(rbf_kernel_svRg_clf, x_test, y_test, cv=3, scoring="neg_mean_squared_error")
svRg_rmse = np.sqrt(-svRg_mse)
print("\n gamma is " + str(gamma))
print("C is " + str(C))
print("average:")
svRg_average_error = np.mean(svRg_rmse)
print(svRg_average_error)
# -
| Fitbit_ML_Updated.ipynb |