code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from itertools import islice
from random import random
import time
import csv
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
from matplotlib import interactive
import datetime as dt
interactive(True)
# +
#file for functions to do my live plotting.
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import time
# use ggplot style for more sophisticated visuals
plt.style.use('ggplot')
#custom pause function that doesn't force the window back into the foreground.
def mypause(interval):
backend = plt.rcParams['backend']
if backend in matplotlib.rcsetup.interactive_bk:
figManager = matplotlib._pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
canvas.start_event_loop(interval)
return
def live_plotter(x_vec,y1_data,line1,identifier='',pause_time=0.016, figure = None ):
if line1==[]:
# this is the call to matplotlib that allows dynamic plotting
plt.ion()
if type(figure) is not matplotlib.figure.Figure: #check to determine if a figure is handed to the function. if so, use it, else make one.
fig = plt.figure(figsize=(13,6))
print("no figure")
else:
fig = figure
print("provided figure")
ax = fig.add_subplot(111)
# create a variable for the line so we can later update it
line1, = ax.plot(x_vec,y1_data,'-o',alpha=0.8)
#update plot label/title
plt.ylabel('Y Label')
plt.title('Title: {}'.format(identifier))
plt.show()
# after the figure, axis, and line are created, we only need to update the y-data
line1.set_ydata(y1_data)
# adjust limits if new data goes beyond bounds
if np.min(y1_data)<=line1.axes.get_ylim()[0] or np.max(y1_data)>=line1.axes.get_ylim()[1]:
plt.ylim([np.min(y1_data)-np.std(y1_data),np.max(y1_data)+np.std(y1_data)])
# this pauses the data so the figure/axis can catch up - the amount of pause can be altered above
#plt.pause(pause_time)
mypause(pause_time)
# return line so we can update it again in the next iteration
return line1
# +
count = 0
inputFile =[]
CSVfileName ="datasets/kdd99-unsupervised-ad.csv"
MAXROWS = 100
# with open(CSVfileName, "r", newline='') as csvfile:
# for row in csvfile:
# if MAXROWS > 0 and count >= MAXROWS:
# break
# count= count + 1
# inputFile.append(row.encode('utf-8'))
# print('currently reading {} rows \r'.format(count), end ="")
# csvfile.close()
# print("total rows counted:{}".format(count))
# string_length = len(inputFile)
#print("inputCount: {}".format(cppProcess.getCurrentInputCount()))
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# neigh = KNeighborsClassifier(n_neighbors=3)
# neigh.fit(X, y)
#sent = cppProcess.initReaders(inputFile)
#print("initReader {}".format(sent))
df = pd.read_csv(CSVfileName, header=None)
print(df.head())
mapping = {k: v for v, k in enumerate(df.iloc[:,29].unique())}
print(mapping)
df.iloc[:,29] = df.iloc[:,29].map(mapping)
print(df.head())
#standardize the data to normal distribution
from sklearn import preprocessing
dataset1_standardized = preprocessing.scale(df)
dataset1_standardized = pd.DataFrame(dataset1_standardized)
plt.figure(figsize=(10, 8))
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 5):
kmeans = KMeans(n_clusters = i, init = 'k-means++', random_state = 42)
kmeans.fit(dataset1_standardized)
wcss.append(kmeans.inertia_)
print(i)
plt.plot(range(1, 5), wcss)
plt.title('The Elbow Method')
plt.xlabel('Number of clusters')
plt.ylabel('WCSS')
plt.show()
#print(cppProcess.checkComplete())
# count =0
# size = 100
# x_vec = np.linspace(0,1,size+1)[0:-1]
# #y_vec = np.random.randn(len(x_vec))
# #x_vec = np.zeros(shape=(1,1))
# y_vec = np.zeros(shape=(100,1))
# line1 = []
# fig=plt.figure(figsize=(13,6))
# counter = 0
# while counter <= 1000:
# counter = counter +1
# #print('currently processed {} lines...\r'.format(cppProcess.getResultsCount()), end ="")
# y_vec[-1] = np.random.randn(1)
# #y_vec[-1] = cppProcess.getResultsCount()
# line1 = live_plotter(x_vec,y_vec,line1, figure=fig)
# y_vec = np.append(y_vec[1:],0.0)
print("done")
# -
# Fitting K-Means to the dataset
kmeans = KMeans(n_clusters = 3, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(dataset1_standardized)
#beginning of the cluster numbering with 1 instead of 0
y_kmeans1=y_kmeans
y_kmeans1=y_kmeans+1
# New Dataframe called cluster
cluster = pd.DataFrame(y_kmeans1)
# Adding cluster to the Dataset1
df['cluster'] = cluster
#Mean of clusters
kmeans_mean_cluster = pd.DataFrame(round(df.groupby('cluster').mean(),1))
kmeans_mean_cluster
# +
df.groupby('cluster').count()
#len(df.index)
#rowCount = range(0,len(df.index))
#ax1 = df.plot.scatter(x=2, y = 'cluster')
# -
ax1 = df.plot.scatter(x=2, y = 'cluster')
# +
#***************** new code starts here **********************
# +
import pandas as pd
#data = pd.read_csv("./datasets/kdd99-unsupervised-ad.csv", header = None)
data2 = pd.read_csv("./datasets/kddcup_data_10_percent_corrected.csv", header = None)
print(data2.head())
# -
df = data2.loc[:,41].unique()
print(df)
#strings
types = data2.select_dtypes(include=['object'])
print(types.head())
for t in types:
print(np.sort(types[t].unique()) )
le = preprocessing.LabelEncoder()
le.fit(types[t])
print("classes of {} are:{}".format(t,np.sort(le.classes_)))
output = le.transform(types[t])
# +
# a better working implementation of the fit transform function available in the label encoder library. Adds in the features
# I personally expected in it when I used it.
#performs individual column label encoding. provides list of label encoders for reversing of the transformation at a later date.
def fit_transform_cols(data):
object_data = data.select_dtypes(include=['object']).copy()
# print(types.head())
output = pd.DataFrame(data).copy()
le_list = {}
for col in object_data:
#print(np.sort(object_data[col].unique()) )
le = preprocessing.LabelEncoder()
le.fit(object_data[col])
le_list[col] = le
#print("classes of {} are:{}".format(col,np.sort(le.classes_)))
output[col] = le.transform(object_data[col])
return output, le_list
def transform_reverse_cols(data, le_list):
output = pd.DataFrame(data).copy()
for key, value in le_list.items():
print(type(value))
print("classes of {} are:{}".format(key,np.sort(value.classes_)))
#print(key, type(value))
output[key]= value.inverse_transform(data[key])
return output
print(data2.head())
output, le_list = fit_transform(data2)
print(output.head())
rencoded = transform_reverse(output, le_list)
print(rencoded.head())
# +
print(type(le))
if isinstance(le,preprocessing.LabelEncoder):
print("true")
print(type(1))
# -
data2.head()
df = data2.groupby(41).count()
results = df[0].sort_values(ascending=False)
results = results / len(data2.index) * 100
print(results)
# +
from sklearn import preprocessing
labels = (data2.iloc[:,41])
inputFile =data2.drop([41], axis=1)
le = preprocessing.LabelEncoder()
le.fit(labels)
print("classes of labels are:{}".format(le.classes_))
labels_encoded = le.transform(labels)
labels_strings = {1:'back.', 2:'buffer_overflow.', 3:'ftp_write.', 4:'guess_passwd.', 5:'imap.', 6:'ipsweep.',
7:'land.', 8:'loadmodule.', 9:'multihop.', 10:'neptune.', 11:'nmap.', 12:'normal.', 13:'perl.',
14:'phf.', 15:'pod.', 16:'portsweep.', 17:'rootkit.', 18:'satan.', 19:'smurf.', 20:'spy.', 21:'teardrop.',
21:'warezclient.', 22:'warezmaster.'}
le2 = preprocessing.LabelEncoder()
leArray = []
inputEncoded=np.array((0,0))
obj_df = inputFile.select_dtypes(include=['object']).copy()
obj_df.apply(le2.fit_transform)
inputFile.update(obj_df.apply(le2.fit_transform))
print(inputFile.head())
print("classes of labels are:{}".format(le2.classes_))
# for column in obj_df:
# le_temp = preprocessing.LabelEncoder()
# le_temp.fit(obj_df[column])
# print("classes of labels are:{}".format(le_temp.classes_))
# res = le_temp.transform(obj_df[column])
# np.append(inputEncoded,res)
#inputFileEncoded = le2.fit_transform(inputFile)
#print(inputFile)
#inputFile = inputFile.astype(str)
#inputFile = inputFile.values.tolist()
labels = labels_encoded.astype(str)
print(type(labels))
labels = labels.tolist()
# +
obj_df = inputFile.select_dtypes(include=['object']).copy()
obj_df.apply(le2.fit_transform)
df_reencodeded.update(obj_df.apply(le2.fit_transform))
df_reencodeded = df_reencodeded.apply(le2.inverse_transform)
print(df_reencodeded)
# +
df_reencodeded = inputFileEncoded.apply(le2.inverse_transform)
print(df_reencodeded)
label_reencodeded = le.inverse_transform(labels_encoded)
print(label_reencodeded)
# +
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=3)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(inputFile, labels, test_size=0.33, random_state=42)
trainSet = np.array(X_train)
print("trainset Shape {}".format(trainSet.shape))
labelsSet = np.ravel(y_train)
print("labelSet Shape {}".format(labelsSet.shape))
trainSet = trainSet.astype(np.float64)
labelsSet = labelsSet.astype(np.float64)
knn.fit(trainSet, labelsSet)
# +
from sklearn.metrics import confusion_matrix
print(trainSet.shape)
X_test = np.array(X_test).astype(np.float64)
y_test = np.array(y_test).astype(np.float64)
print("test")
print(X_test[0].reshape(1,-1).shape)
result = []
# for i in range(1000):
# result.append(knn.predict(X_test[i].reshape(1,-1)))
# print(i)
label, proba = knn.predict(X_test[:1000])
#print(np.array(result).size)
#print(np.array(y_test).size)
#print(y_test)
confusion_matrix(y_test[:1000],result)
#print("result:{}".format(result))
#print("answer:{}".format(y_test[0]))
# +
y_test_1 = y_test[:1000]
conf_mat = confusion_matrix(y_test_1,result)
fig = plt.figure(figsize=(10,10))
width = np.shape(conf_mat)[1]
height = np.shape(conf_mat)[0]
res = plt.imshow(np.array(conf_mat), cmap=plt.cm.summer, interpolation='nearest')
for i, row in enumerate(conf_mat):
for j, c in enumerate(row):
if c>0:
plt.text(j-.2, i+.1, c, fontsize=16)
cb = fig.colorbar(res)
plt.title('Confusion Matrix')
_ = plt.xticks(range(21), [l for l in labels_strings.values()], rotation=90)
_ = plt.yticks(range(21), [l for l in labels_strings.values()])
# +
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
labels = (data2.loc[:,41])
le.fit(labels)
print("classes of labels are:{}".format(le.classes_))
labels_normalized = le.transform(labels)
# -
inputFile_strings = data2.to_csv(header=None, index=False).strip('\n').split('\n')
print(inputFile_strings[0])
inputFile_strings2 = data2.astype(str).values.tolist()
print(inputFile_strings2[0])
output = []
for i in inputFile_strings:
output.append(i.split(','))
#input = np.reshape(inputFile_strings, (-1,1))
#input = input.split(',')
#input = pd.read_csv(inputFile_strings, index_col = 0, parse_dates= True, header = None, sep=',')
df = pd.DataFrame(output)
print(df)
print(df.head())
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=3)
from sklearn.preprocessing import OneHotEncoder
enc = OneHotEncoder(handle_unknown='ignore')
encoded = enc.fit(df)
print(encoded.categories)
knn.fit(df, df)
le = preprocessing.LabelEncoder()
labels = (data2.loc[:,41])
le.fit(labels)
print("classes of labels are:{}".format(le.classes_))
labels_normalized = le.transform(labels)
training = (data2.drop([41], axis=1))
le.fit(training)
import pandas as pd
df_results = pd.read_csv("results/20190517-143649.csv")
df_results.head
df_results["predicted"] = df_results["predicted"].str.strip("[]")
df_results.dtypes
df_results = df_results.astype(float)
df_results.dtypes
import numpy as np
results = np.where((df_results['predicted'] == df_results['Label']))
results = df_results.Label.eq(df_results.predicted).astype(int).groupby(df_results.predicted).transform('sum')
print(results)
| Knn Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/emali1/Mastering-Keras/blob/master/Module_4_1_Inception_Blocks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="s3HBx1OFkKNo"
# ## Module 4.1: Inception Layers
#
# We will see how to create inception layers for use in advanced convolutional neural networks.
#
# We will:
# - Implement a function to create the basic and standard inception blocks.
# - Create basic models and visually examine the inception architecture.
#
# Since we are looking at *layers* rather than networks, we will not solve a problem in this module. However it is a good exercise for you to try on your own to make use of these inception layers to improve the CNN performance from module 2.2.
# + [markdown] id="gU06JTGIkLP-"
# We import required libraries.
# + id="NxmVuYBap6AE" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="14a76b2c-f10a-4b85-fc53-d86876b08b26"
from keras.models import Model
from keras.layers import Input,Conv2D,Flatten,Dense
from keras.layers import MaxPooling2D
from keras.layers.merge import concatenate
from keras.utils import plot_model
# + id="o5d5YKvgNIM7"
# + [markdown] id="34ZOT4rcsBN2"
# We make a wrapper function to create a basic inception block. We allow the caller to specify the number of 1x1, 3x3 and 5x5 filters to use - you could allow the caller much more freedom to specify the internal layers.
# + id="7xuC5QO9qHP7"
# Creating a basic inception block
def basic_inception_module(layer_in, f1, f3, f5):
# 1x1 Convolution
conv1 = Conv2D(f1, (1,1), padding='same', activation='relu')(layer_in)
# 3x3 Convolution
conv3 = Conv2D(f3, (3,3), padding='same', activation='relu')(layer_in)
# 5x5 Convolution
conv5 = Conv2D(f5, (5,5), padding='same', activation='relu')(layer_in)
# 3x3 Max Pooling
pool = MaxPooling2D((3,3), strides=(1,1), padding='same')(layer_in)
# Concatenate
layer_out = concatenate([conv1, conv3, conv5, pool], axis=-1)
return layer_out
# + [markdown] id="sLj052p-sWlC"
# Let's create a model that consists of two basic inception blocks. Then we can get its summary and graph its architecture. Notice that now we have parallel layers the linear summary is less clear that the graphical representation.
# + id="xRt3rAXdrZ6i" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="80a806b1-ddd1-434a-9c50-21cef6d270cf"
# Define input
inputs1 = Input(shape=(128, 128, 3))
# Add two inception blocks
iblock1 = basic_inception_module(inputs1, 32, 64, 32)
iblock2 = basic_inception_module(iblock1, 32, 64, 32)
# Flatten for output
flat = Flatten()(iblock2)
dense1 = Dense(256, activation='relu')(flat)
outputs = Dense(10, activation='softmax')(dense1)
# Create model
model1 = Model(inputs=inputs1, outputs=outputs)
# Summarize model
model1.summary()
# Plot model graph
plot_model(model1, show_shapes=True, to_file='naive_inception_module.png')
# + [markdown] id="MEz5GXQmtf9h"
# We make a wrapper function to create a standard inception block. We allow the caller to specify the number of 1x1, 3x3 and 5x5 filters to use, as well as the number of 1x1 filters to place before the 3x3 and 5x5 filter layers and after the max pooling layer. Again, you could allow the caller much more freedom to specify the internal layers.
# + id="P7vNzjG6tfDo"
# Create inception module
def inception_module(layer_in, f1, f3_in, f3_out, f5_in, f5_out, mp_out):
# 1x1 Convolution
conv1 = Conv2D(f1, (1,1), padding='same', activation='relu')(layer_in)
# 3x3 Convolution
conv3 = Conv2D(f3_in, (1,1), padding='same', activation='relu')(layer_in)
conv3 = Conv2D(f3_out, (3,3), padding='same', activation='relu')(conv3)
# 5x5 Convolution
conv5 = Conv2D(f5_in, (1,1), padding='same', activation='relu')(layer_in)
conv5 = Conv2D(f5_out, (5,5), padding='same', activation='relu')(conv5)
# 3x3 Max Pooling
pool = MaxPooling2D((3,3), strides=(1,1), padding='same')(layer_in)
pool = Conv2D(mp_out, (1,1), padding='same', activation='relu')(pool)
# Concatenate
layer_out = concatenate([conv1, conv3, conv5, pool], axis=-1)
return layer_out
# + [markdown] id="oq1_953duerL"
# Let's create a model that consists of two basic inception blocks to look at its summary and graph its architecture.
# + id="ZOiPAYOyue8j" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="18e34fd1-3df6-445e-d54d-84c30e797698"
# Define input
inputs1 = Input(shape=(128, 128, 3))
# Add two inception blocks
iblock1 = inception_module(inputs1, 64, 64, 128, 16, 32, 32)
iblock2 = inception_module(iblock1, 64, 64, 128, 16, 32, 32)
# Flatten for output
flat = Flatten()(iblock2)
dense1 = Dense(256, activation='relu')(flat)
outputs = Dense(10, activation='softmax')(dense1)
# Create model
model2 = Model(inputs=inputs1, outputs=outputs)
# Summarize model
model2.summary()
# Plot model graph
plot_model(model2, show_shapes=True, to_file='inception_module.png')
# + id="SyuafZ5qNKDk"
import numpy as np
from sklearn.metrics import confusion_matrix,classification_report
from keras.datasets import cifar10
from keras import Model
from keras.layers import Dense,Dropout,Flatten,Activation,Input
from keras.optimizers import Adam
from keras.layers.convolutional import Conv2D,MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping
from keras.utils import np_utils
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/"} id="oZwd76NoNipA" outputId="ac088fe8-d7ad-41c6-e84b-d4e61fa202e3"
# Load images
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
# Make versions of the labels that are one-hot vectors
train_labels_array=np_utils.to_categorical(train_labels, 10)
test_labels_array=np_utils.to_categorical(test_labels, 10)
# Make vector of classnames
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
# + id="edO41sfGUVvU"
# Create inception module
def inception_module(layer_in, f1, f3_in, f3_out, f5_in, f5_out, mp_out):
# 1x1 Convolution
conv1 = Conv2D(f1, (1,1), padding='same', activation='relu')(layer_in)
# drop1 = Dropout(0.5)(pool1)
# 3x3 Convolution
conv3 = Conv2D(f3_in, (1,1), padding='same', activation='relu')(layer_in)
conv3 = Conv2D(f3_out, (3,3), padding='same', activation='relu')(conv3)
# 5x5 Convolution
conv5 = Conv2D(f5_in, (1,1), padding='same', activation='relu')(layer_in)
conv5 = Conv2D(f5_out, (5,5), padding='same', activation='relu')(conv5)
# 3x3 Max Pooling
pool = MaxPooling2D((3,3), strides=(1,1), padding='same')(layer_in)
pool = Conv2D(mp_out, (1,1), padding='same', activation='relu')(pool)
# Concatenate
layer_out = concatenate([conv1, conv3, conv5, pool], axis=-1)
drop1 = Dropout(0.5)(layer_out)
return drop1
def getModel():
inputs = Input(shape=(32, 32, 3),name="Input")
iblock1 = inception_module(inputs, 32, 32, 32, 64, 64, 32)
iblock2 = inception_module(iblock1, 32, 32, 32, 64, 64, 32)
# Flatten for output
flat = Flatten()(iblock2)
drop = Dropout(0.5)(flat)
dense1 = Dense(256, activation='relu')(drop)
outputs = Dense(10, activation='softmax')(dense1)
model = Model(inputs=inputs, outputs=outputs)
return model
model=getModel()
opt=Adam()
model.compile(optimizer=opt,
loss='categorical_crossentropy',
metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="7gT1tFWtpSHc" outputId="4a7a6090-ca9b-4f4b-ec03-015a3906ed6b"
# Before calling fit, we create the Early Stopping callback.
# We set it up to stop if improvement in the validation loss
# does not occur over 10 epochs. When stopping occurs, the
# weights associated with the best validation loss are restored.
earlyStopping = EarlyStopping(monitor="val_loss",
patience=10,
verbose=1,
restore_best_weights=True)
# We need to use the one-hot vector version of the labels
# This shouldn't go through all 100 epoches, because of the
# early stopping, but can take some time.
history = model.fit(train_images,
train_labels_array,
epochs=300,
shuffle=True,
callbacks=[earlyStopping],
batch_size = 1024,
validation_split=.2)
# + id="SBFKG0NKqdzq"
def plot_training_history(history):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model accuracy and loss')
plt.xlabel('Epoch')
plt.legend(['Accuracy','Validation Accuracy', 'Loss',
'Validation Loss'], loc='upper right')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="9l0cEvxBqhEr" outputId="fb50ae6e-d396-4c35-ea94-c654472d3ea1"
plot_training_history(history)
# + id="4mYR_9CYqhOq"
# + id="Sni2cNOWqhSA"
# + id="uHlVJoPjqhVs"
# + id="AkM8OwUHqhdn"
def test_model(model,x,y):
y_pred = model.predict(x)
y_pred = np.argmax(y_pred,axis=1)
cm = confusion_matrix(y, y_pred)
print("Confusion Matrix:")
print(cm)
print("Classification report:")
print(classification_report(y, y_pred))
# + colab={"base_uri": "https://localhost:8080/"} id="mOJyrbTeskqz" outputId="a10abb89-425b-4d41-f7d5-fc53f35babc2"
test_model(model,test_images,test_labels)
| Module_4_1_Inception_Blocks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Turn the dfs to attribute, adjacency, and label matrices
import numpy as np
import pandas as pd
import geopandas as gpd
import networkx as nx
import matplotlib.pyplot as plt
import pickle
import copy
from scipy.sparse import csr_matrix
# +
# read files.
with open('../../data/02_intermediate/boston_stays.pickle', 'rb') as f:
df = pickle.load(f)
with open("../../data/02_intermediate/boston_annual_growth_2016_2018.pickle", 'rb') as f:
df_growth_16_18 = pickle.load(f)
with open("../../data/02_intermediate/boston_socioecon_2016.pickle", 'rb') as f:
df_socio_2016 = pickle.load(f)
# -
# find overlapping GEO IDs.
# use only the GEOID (not GEOID_home) from the df.
overlapping_geoid = list(set(df_growth_16_18.index).intersection(set(np.unique(df.GEOID))))
print(len(overlapping_geoid))
# # Place Graph (X, A, Y)
#
# Predict growth now
# # X
var_list = ['inc_per_capita', 'property_value_median', 'pop_total',
'households', 'race_white_ratio', 'race_black_ratio',
'age_median', 'travel_driving_ratio',
'edu_bachelor_ratio']
X = df_socio_2016.loc[overlapping_geoid, var_list]
X = X.sort_index()
X
3600*3
len(np.unique(df.cat))
# activity sorting.
u, count = np.unique(df.cat, return_counts = True)
count_sort_ind = np.argsort(-count) # from large to small
u[count_sort_ind]
count[count_sort_ind]
# # A
#
# - A: unweighted adjacency matrix.
# - A_: weighted matrix.
#
# create the people place matrix.
people_place_vector = df.groupby(['user','GEOID']).count().iloc[:, 0]
people_place_m = people_place_vector.unstack(level = 'user')
people_place_m.values[people_place_m.isna()] = 0
# create the subset.
people_place_m = people_place_m.loc[overlapping_geoid, :]
people_place_m = people_place_m.sort_index() # imp! it matches the idx in X.
people_place_m
# create the weighted adjacency matrix. (incidence matrix)
people_place_sparse = csr_matrix(people_place_m)
A_ = people_place_sparse @ people_place_sparse.T
A_ # only 50% of the matrix is zero. Very dense
# create the unweighted adjacency matrix.
nnz_inds = A_.nonzero()
keep = np.where(A_.data > 0.0)[0]
n_keep = len(keep)
A = csr_matrix((np.ones(n_keep), (nnz_inds[0][keep], nnz_inds[1][keep])), shape = A_.shape) # this is the (data, (row, col)) way to create the csr
A.toarray()
A_.toarray()
# # A Heterogeneity
#
# - Weighted and unweighted.
np.unique(df.hour_of_day)
df
# +
# What are the meaningful categories?
# Weekday vs. Weekend
# hour_of_day: rush hours (7-10AM; 4-7PM) and non-rush hours (others).
# activity categories: 'Office', 'Residential', others.
# duration categories: 60*5=300; 300~3600; 3600~3600*3=10800.
# -
# Create the people place matrix.
def compute_hetero_A(df_sub):
print('start...')
# well. First, not a nice function practice.
# return heterogeneous A and A_
people_place_vector = df_sub.groupby(['user','GEOID']).count().iloc[:, 0]
people_place_m = people_place_vector.unstack(level = 'user')
people_place_m.values[people_place_m.isna()] = 0
# expand the indices first (because we may not have the full 3102 locations in the sub_dataframes)
# then choose the overlapping geoid.
geoid_difference = set(overlapping_geoid).difference(set(people_place_m.index))
people_place_m_to_be_expanded = pd.DataFrame(0.0, index = geoid_difference, columns = people_place_m.columns)
people_place_m = pd.concat([people_place_m, people_place_m_to_be_expanded], axis = 0)
people_place_m = people_place_m.loc[overlapping_geoid, :]
people_place_m = people_place_m.sort_index()
# create the weighted adjacency matrix. (incidence matrix)
people_place_sparse = csr_matrix(people_place_m)
A_ = people_place_sparse @ people_place_sparse.T
# create the unweighted adjacency matrix.
nnz_inds = A_.nonzero()
keep = np.where(A_.data > 0.0)[0]
n_keep = len(keep)
A = csr_matrix((np.ones(n_keep), (nnz_inds[0][keep], nnz_inds[1][keep])), shape = A_.shape) # this is the (data, (row, col)) way to create the csr
print('end.')
return A, A_
np.logical_and(df['hour_of_day'] >= 7, df['hour_of_day'] <= 10)
df_sub = df.loc[np.logical_and(df['hour_of_day'] >= 6, df['hour_of_day'] < 11),:]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['weekend'] = A
A_weighted_dic['weekend'] = A_
# +
# A_unweighted_dic, A_weighted_dic.
A_unweighted_dic = {}
A_weighted_dic = {}
# weekday
df_sub = df.loc[df['weekday']==True,:]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['weekday'] = A
A_weighted_dic['weekday'] = A_
# weekend
df_sub = df.loc[df['weekend']==True,:]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['weekend'] = A
A_weighted_dic['weekend'] = A_
# hours_morning_rush
df_sub = df.loc[np.logical_and(df['hour_of_day'] >= 7, df['hour_of_day'] <= 10),:]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['hours_morning_rush'] = A
A_weighted_dic['hours_morning_rush'] = A_
# hours_afternoon_rush
df_sub = df.loc[np.logical_and(df['hour_of_day'] >= 16, df['hour_of_day'] <= 19),:]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['hours_afternoon_rush'] = A
A_weighted_dic['hours_afternoon_rush'] = A_
# hours_noon
df_sub = df.loc[np.logical_and(df['hour_of_day'] > 10, df['hour_of_day'] < 16),:]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['hours_noon'] = A
A_weighted_dic['hours_noon'] = A_
# hours_early_morning
df_sub = df.loc[df['hour_of_day'] < 7, :]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['hours_early_morning'] = A
A_weighted_dic['hours_early_morning'] = A_
# hours_late_night
df_sub = df.loc[df['hour_of_day'] > 19, :]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['hours_late_night'] = A
A_weighted_dic['hours_late_night'] = A_
# activity_office
df_sub = df.loc[df['cat']=='Office',:]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['activity_office'] = A
A_weighted_dic['activity_office'] = A_
# activity_residence
df_sub = df.loc[df['cat']=='Residential',:]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['activity_residence'] = A
A_weighted_dic['activity_residence'] = A_
# activity_others
df_sub = df.loc[np.logical_and(df['cat']!='Residential', df['cat']!='Office'), :]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['activity_others'] = A
A_weighted_dic['activity_others'] = A_
# duration_less_5min (300)
df_sub = df.loc[df['duration']<=300, :]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['duration_less_5min'] = A
A_weighted_dic['duration_less_5min'] = A_
# duration_between_5min_1hour (300~3600)
df_sub = df.loc[np.logical_and(df['duration']>300, df['duration']<3600), :]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['duration_between_5min_1hour'] = A
A_weighted_dic['duration_between_5min_1hour'] = A_
# duration_between_1hour_3hour (3600~10800)
df_sub = df.loc[np.logical_and(df['duration']>3600, df['duration']<10800), :]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['duration_between_1hour_3hour'] = A
A_weighted_dic['duration_between_1hour_3hour'] = A_
# duration_large_3hour (10800)
df_sub = df.loc[df['duration']>10800, :]
A, A_ = compute_hetero_A(df_sub)
A_unweighted_dic['duration_large_3hour'] = A
A_weighted_dic['duration_large_3hour'] = A_
# -
A_.toarray()
A.toarray()
A_.shape
A.shape
# create the subset.
people_place_m = people_place_m.loc[overlapping_geoid, :]
people_place_m = people_place_m.sort_index() # imp! it matches the idx in X.
people_place_m
# +
# create the weighted adjacency matrix. (incidence matrix)
people_place_sparse = csr_matrix(people_place_m)
A_ = people_place_sparse @ people_place_sparse.T
# create the unweighted adjacency matrix.
nnz_inds = A_.nonzero()
keep = np.where(A_.data > 0.0)[0]
n_keep = len(keep)
A = csr_matrix((np.ones(n_keep), (nnz_inds[0][keep], nnz_inds[1][keep])), shape = A_.shape) # this is the (data, (row, col)) way to create the csr
# -
# # Y
df_growth_16_18.columns
# +
var_list = ['inc_per_capita_annual_growth',
'pop_total_annual_growth',
'property_value_median_annual_growth']
Y = df_growth_16_18.loc[overlapping_geoid, var_list]
# -
Y = Y.sort_index()
Y
# +
# Save X, A, Y
with open("../../data/03_processed/place_graph_X.pickle", 'wb') as f:
pickle.dump(X, f)
with open("../../data/03_processed/place_graph_A.pickle", 'wb') as f:
pickle.dump(A, f)
with open("../../data/03_processed/place_graph_weighted_A.pickle", 'wb') as f:
pickle.dump(A_, f)
with open("../../data/03_processed/place_graph_Y.pickle", 'wb') as f:
pickle.dump(Y, f)
# -
| src/03_process/.ipynb_checkpoints/process_01_create_graphs-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# # !pip install --user requests
import requests
# # Number - Solve Problems With Large Numbers
# We can solve number problems using Python, even with big numbers.
# ## Population of Canada
# According to [Statistics Canada](https://www12.statcan.gc.ca/census-recensement/2016/dp-pd/prof/details/page.cfm?Lang=E&Geo1=PR&Code1=01&Geo2=&Code2=&Data=Count&SearchText=Canada&SearchType=Begins&SearchPR=01&B1=All&TABID=1), the population of Canada in 2011 was **33 476 688** and in 2016 was **35 151 728**. To find out how much the population increased by, we can subtract these large numbers.
35151728 - 33476688
# Notice that we don't put spaces, or commas, between digits in the Python code.
#
# We can also write some fancier code that will tell us what we are calculating:
print('Between 2011 and 2016 the population of Canada increased by:')
35151728 - 33476688
# Or the same calculation with variables:
p2011 = 33476688
p2016 = 35151728
print('The population of Canada increased from', p2011, 'in 2011 to', p2016, 'in 2016 which is a difference of', p2016-p2011)
# ## Populations of The Western Provinces
# We are going to get the populations of different provinces in Canada from Wikipedia so that we can compare them. Here's a function that gets a Wikipedia article for a province and reads the population from line 26 of the data table.
# +
def getPopulation(province):
url = 'https://en.wikipedia.org/wiki/' + province
request = requests.get(url)
df = pd.read_html(request.content)
populationString = df[0][1][26]
population = populationString.split(' ')[0]
return population
popMB = getPopulation('Manitoba')
popSK = getPopulation('Saskatchewan')
popAB = getPopulation('Alberta')
popBC = getPopulation('British_Columbia')
# -
# Now we have the population of each province in a variable.
print(popAB)
# But Python doesn't like having commas in numbers...
popBC - popAB
# So we need to remove the commas and convert the numbers from strings to integers.
popMB = int(popMB.replace(',', ''))
popSK = int(popSK.replace(',', ''))
popAB = int(popAB.replace(',', ''))
popBC = int(popBC.replace(',', ''))
# Now it should work. Let's find the difference between the population of BC and the population of Alberta.
popBC - popAB
# Now it's your turn, calculate the population difference between Manitoba and Saskatchewan. Think about which has a larger population.
# Calculate the total population of all four of these provinces. It should be about 11 million people.
# ## Word Counts in Shakespeare's Plays
# To look at another large number, let's count how many words there are in the play *Macbeth* by <NAME>. First we need to get the text of the play from the Project Gutenberg site.
macbeth = requests.get('http://www.gutenberg.org/cache/epub/1129/pg1129.txt').text
print(macbeth)
# Now to count how many words there are we need to split the text every time we see a space, then count using the length function.
words = macbeth.split(' ')
len(words)
# So there are 29 128 words in that version of *Macbeth*. If we wanted to print it out on paper, we can estimate that about 500 words would fit on a page, so how many pages would we need?
len(words) / 500
# So we would need about 59 pieces of paper to print out this play.
#
# How about the play *Hamlet*? First let's count the words.
hamlet = requests.get('http://www.gutenberg.org/cache/epub/1787/pg1787.txt').text
print(len(hamlet.split(' ')))
# So if there are 49 160 words in *Hamlet*, about how many 500 word pages would we need in order to print it?
# One more question. If the play *The Merchant of Venice* was printed out on 42 pages, approximately how many words does it contain?
#
# Check how close your answer was by using this Python code:
#
# print(len(requests.get('http://www.gutenberg.org/cache/epub/1515/pg1515.txt').text.split(' ')))
| Mathematics/Number/Number 2.1 - Problems With Large Numbers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Django Shell-Plus
# language: python
# name: django_extensions
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc" style="margin-top: 1em;"><ul class="toc-item"><li><span><a href="#Enter-a-Topic" data-toc-modified-id="Enter-a-Topic-1"><span class="toc-item-num">1 </span>Enter a Topic</a></span></li><li><span><a href="#Build-a-Lexicon" data-toc-modified-id="Build-a-Lexicon-2"><span class="toc-item-num">2 </span>Build a Lexicon</a></span></li><li><span><a href="#Search-for-Segments" data-toc-modified-id="Search-for-Segments-3"><span class="toc-item-num">3 </span>Search for Segments</a></span></li><li><span><a href="#Visualize-Video-Timelines" data-toc-modified-id="Visualize-Video-Timelines-4"><span class="toc-item-num">4 </span>Visualize Video Timelines</a></span></li><li><span><a href="#Validation" data-toc-modified-id="Validation-5"><span class="toc-item-num">5 </span>Validation</a></span><ul class="toc-item"><li><span><a href="#Assert-No-Double-Counting" data-toc-modified-id="Assert-No-Double-Counting-5.1"><span class="toc-item-num">5.1 </span>Assert No Double Counting</a></span></li><li><span><a href="#Sensitivity-of-Total-Segment-Length-to-Window-Size" data-toc-modified-id="Sensitivity-of-Total-Segment-Length-to-Window-Size-5.2"><span class="toc-item-num">5.2 </span>Sensitivity of Total Segment Length to Window Size</a></span></li><li><span><a href="#Sensitivity-of-Total-Segment-Length-to-Threshold" data-toc-modified-id="Sensitivity-of-Total-Segment-Length-to-Threshold-5.3"><span class="toc-item-num">5.3 </span>Sensitivity of Total Segment Length to Threshold</a></span></li><li><span><a href="#Overlap-Between-Topics" data-toc-modified-id="Overlap-Between-Topics-5.4"><span class="toc-item-num">5.4 </span>Overlap Between Topics</a></span></li></ul></li><li><span><a href="#Analysis" data-toc-modified-id="Analysis-6"><span class="toc-item-num">6 </span>Analysis</a></span><ul class="toc-item"><li><span><a href="#Topic-by-Show" data-toc-modified-id="Topic-by-Show-6.1"><span class="toc-item-num">6.1 </span>Topic by Show</a></span><ul class="toc-item"><li><span><a href="#Topic-by-Show-By-Year" data-toc-modified-id="Topic-by-Show-By-Year-6.1.1"><span class="toc-item-num">6.1.1 </span>Topic by Show By Year</a></span></li><li><span><a href="#Topic-by-Show-By-Quarter" data-toc-modified-id="Topic-by-Show-By-Quarter-6.1.2"><span class="toc-item-num">6.1.2 </span>Topic by Show By Quarter</a></span></li></ul></li><li><span><a href="#Multitopic-Comparison" data-toc-modified-id="Multitopic-Comparison-6.2"><span class="toc-item-num">6.2 </span>Multitopic Comparison</a></span></li></ul></li></ul></div>
# +
from esper.prelude import *
from esper.widget import *
from esper.topics import *
from esper.spark_util import *
from esper.timeline_plot import VideoRow, VideoSegment, plot_video_timelines
from datetime import timedelta
from collections import defaultdict, Counter, OrderedDict
import _pickle as pickle
# -
# # Enter a Topic
topic = 'syria'
# # Build a Lexicon
lexicon = mutual_info(topic)
lexicon
# # Search for Segments
segments = find_segments(lexicon, window_size=500, threshold=10., merge_overlaps=False)
with open('/tmp/topic-{}.pkl'.format(topic), 'wb') as f:
pickle.dump(segments, f)
show_segments([x for x in segments if x[3] > 50])
with open('/tmp/topic-{}.pkl'.format(topic), 'rb') as f:
segments = pickle.load(f)
# # Visualize Video Timelines
# +
threshold = 50
# Exact mentions
video_id_to_mentions = caption_search([topic.upper()])[0]
# Get videos with most topic time
video_id_to_segments = defaultdict(list)
video_id_to_total_segment_time = Counter()
for segment in segments:
video_id, _, interval, score, _ = segment
video_id_to_segments[video_id].append(segment)
if score >= threshold:
video_id_to_total_segment_time[video_id] += interval[1] - interval[0]
# Get the face genders and commercials dataframes
commercials = get_commercials()
face_genders = get_face_genders()
gender_map = { x.id : x.name for x in Gender.objects.all() }
def plot_helper(video_ids):
video_id_to_face_genders = defaultdict(list)
for face_gender in face_genders.where(
(face_genders.video_id.isin(video_ids)) &
(face_genders.host_probability < 0.8) &
(face_genders.probability > 0.95)
).select('video_id', 'gender_id', 'min_frame', 'max_frame').collect():
video_id_to_face_genders[
(face_gender['video_id'], gender_map[face_gender['gender_id']])
].append(
(face_gender['min_frame'], face_gender['max_frame'])
)
video_id_to_commercials = defaultdict(list)
for commercial in commercials.where(
commercials.video_id.isin(video_ids)
).select('video_id', 'min_frame', 'max_frame').collect():
video_id_to_commercials[
commercial['video_id']
].append((commercial['min_frame'], commercial['max_frame']))
def unpack_segments(segment_list):
return [(interval, val) for _, _, interval, val, _ in segment_list]
rows = []
for video in Video.objects.filter(id__in=video_ids):
vid_segments = []
# Topic Segments
for (a, b), val in unpack_segments(video_id_to_segments[video.id]):
vid_segments.append(VideoSegment(
start_time=timedelta(seconds=a),
end_time=timedelta(seconds=b),
display_label='non-commercial',
display_value=min(1., val / 250.)
))
# Commerical segments
vid_segments.extend([
VideoSegment(
start_time=timedelta(seconds=a / video.fps),
end_time=timedelta(seconds=b / video.fps),
display_label='commercial',
display_value=1.
) for a, b in video_id_to_commercials[video.id]
])
intervals_with_women = [
(timedelta(seconds=a / video.fps), timedelta(seconds=b / video.fps))
for a, b in video_id_to_face_genders[(video.id, 'F')]
]
intervals_with_men = [
(timedelta(seconds=a / video.fps), timedelta(seconds=b / video.fps))
for a, b in video_id_to_face_genders[(video.id, 'M')]
]
row = VideoRow(
video,
segments=vid_segments,
# Draw some intervals on all of the videos
interval_labels=OrderedDict([
('{} score >= {}'.format(topic, threshold), [
(timedelta(seconds=a), timedelta(seconds=b))
for _, _, (a, b), val, _ in video_id_to_segments[video.id] if val >= threshold
]),
('{} score >= {}'.format(topic, 2 * threshold), [
(timedelta(seconds=a), timedelta(seconds=b))
for _, _, (a, b), val, _ in video_id_to_segments[video.id] if val >= 2 * threshold
]),
('woman on screen (excl. hosts)', intervals_with_women),
('man on screen (excl. hosts)', intervals_with_men)
]),
discrete_labels={
'{} mentioned'.format(topic): [
timedelta(seconds=(a + b) / 2) for a, b in video_id_to_mentions.get(video.id, [])
]
}
)
rows.append(row)
plot_video_timelines(
rows,
interval_label_color_map={
'{} score >= {}'.format(topic, threshold): 'Red',
'{} score >= {}'.format(topic, 2 * threshold): 'DarkRed',
'woman on screen (excl. hosts)': 'Orange',
'man on screen (excl. hosts)': 'Blue'
},
discrete_label_shape_map={
'{} mentioned'.format(topic): 'o'
},
max_length=timedelta(seconds=3600 * 3)
)
sorted_ids = sorted(video_id_to_segments.keys(),
key=lambda x: -video_id_to_total_segment_time[x])
num_buckets = 20
num_videos = 5
for i in range(num_buckets):
start_idx = i * int(len(sorted_ids) / num_buckets)
video_ids = sorted_ids[start_idx:start_idx + num_videos]
print('{}th percentile of {} time [{}, {}]'.format(
int(100 - (i * 100 / num_buckets)),
topic, start_idx, start_idx + num_videos)
)
plot_helper(video_ids)
# -
# # Validation
print('Coverage of "{}": {:0.2f} hrs'.format(topic, get_total_segment_length(segments).total_seconds() / 60 / 60))
# ## Assert No Double Counting
# This might happen if we have more than one transcript file loaded for each video.
check_for_double_counting(segments)
# ## Sensitivity of Total Segment Length to Window Size
#
# We are interested in the stability of the total segment runtime when window size is varied. A low variation indicates that the algorithm is not sensitive to the choice of the window size parameter.
plot_total_segment_length_vs_window_size(
lexicon,
window_sizes=[10, 50, 100, 250, 500, 1000]
)
# ## Sensitivity of Total Segment Length to Threshold
#
# We are interested in the stability of the total segment runtime when the threshold is varied. A low variation indicates that the algorithm is not sensitive to the choice of the threshold parameter.
plot_total_segment_length_vs_threshold(
lexicon,
thresholds=[5, 10, 25, 50, 75, 100, 200]
)
# ## Overlap Between Topics
#
# Some topics are subtopics of another topic. For instance, we expect "affordable care act" to be a subtopic of "healthcare". This section prints out the segment overlap between topics.
related_topics = ['isis', 'terrorism', 'middle east', 'islam']
unrelated_topics = ['baseball', 'healthcare', 'taxes']
topics = [topic] + related_topics + unrelated_topics
assert len(topics) > 1
topic_overlap = get_overlap_between_topics(
[topic] + related_topics + unrelated_topics,
window_size=250
)
topic_overlap
# # Analysis
# ## Topic by Show
topic_time_by_show = get_topic_time_by_show(segments)
plot_topic_time_by_show(topic, topic_time_by_show)
# ### Topic by Show By Year
plot_topic_by_show_over_time(topic, segments)
# ### Topic by Show By Quarter
plot_topic_by_show_over_time(topic, segments, quarters=True)
# ## Multitopic Comparison
topics_to_compare = ['healthcare', 'election', 'email', 'immigration']
# + hide_input=true
topics = [topic] + topics_to_compare
assert len(topics) > 1
def plot_topic_comparison_by_show(topics, window_size=250, threshold=50):
topic_times_by_show = []
for topic in topics:
lexicon = mutual_info(topic)
segments = find_segments(lexicon, window_size=window_size, threshold=threshold)
topic_times_by_show.append(get_topic_time_by_show(segments))
plot_topic_time_by_show(topics, topic_times_by_show)
plot_topic_comparison_by_show(topics)
# +
def plot_topic_comparison_by_show(topics, years=range(2015, 2018),
window_size=100, threshold=33):
segments_by_show = []
for topic in topics:
lexicon = mutual_info(topic)
segments = find_segments(lexicon, window_size=window_size, threshold=threshold)
segments_by_show.append(segments)
print('All coverage')
plot_topic_time_by_show(
topics,
[get_topic_time_by_show(segs) for segs in segments_by_show],
normalize_by_total_runtime=True
)
if years is not None:
for year in years:
print('Coverage in {}'.format(year))
plot_topic_time_by_show(
topics,
[
get_topic_time_by_show(
segs,
date_range=['{}-01-01'.format(year), '{}-01-01'.format(year + 1)]
) for segs in segments_by_show
],
normalize_by_total_runtime=False
)
topics = [topic] + topics_to_compare
assert len(topics) > 1
plot_topic_comparison_by_show(topics)
# +
video_id_to_face_genders = defaultdict(list)
for face_gender in FaceGender.objects.filter(
gender__name__in=['M', 'F'],
face__shot__video__id__in=top_ids,
probability__gt=0.95
).values('face__shot__video__id', 'gender__name', 'face__shot__min_frame', 'face__shot__max_frame'):
video_id_to_face_genders[(face_gender['face__shot__video__id'], face_gender['gender__name'])].append(
(face_gender['face__shot__min_frame'], face_gender['face__shot__max_frame'])
)
video_id_to_commercials = defaultdict(list)
for commercial in Commercial.objects.filter(video__id__in=top_ids).values('video__id', 'min_frame', 'max_frame'):
video_id_to_commercials[commercial['video__id']].append((commercial['min_frame'], commercial['max_frame']))
# -
| app/notebooks/topic_analysis_workflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import boto3
from s3 import get_file
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# -
def data(data):
df = pd.read_csv(data,sep='|')
return df
# +
#master_train_playlist.csv
s3 = boto3.resource('s3')
bucket = 's3ssp'
train_data = data(get_file(s3,bucket,download_file='Analysis_Data/new_master_train_playlist.csv',rename_file = 'master_train_playlist.csv'))
test_data = data(get_file(s3,bucket,download_file='Analysis_Data/test_ssp.csv',rename_file = 'test.csv'))
# -
train_data.target.value_counts()
# +
#Sort columns to be alphabetical order
df_train = train_data.reindex(sorted(train_data.columns), axis=1)
df_test = test_data.reindex(sorted(test_data.columns), axis=1)
# +
df_train = df_train.drop(columns=['playlist','valence','loudness','danceability','energy','mode','time_signature'
])
df_test = df_test.drop(columns=['playlist','valence','loudness','danceability','energy','mode','time_signature',
])
# -
df_train.columns
# +
sns.set(style="whitegrid")
ax = sns.countplot(x="target", data=df_train)
ax.set_title('Initial Sampled Target Count')
ax.set_ylabel('sample size')
ax.figure.savefig("Initial Sample.png")
# -
# ## Pipeline
# ## Set up Dataset
# +
from sklearn.utils import resample
# Separate majority and minority classes
df_majority = df_train[df_train.target==0]
df_minority = df_train[df_train.target==1]
# Upsample minority class
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=373533, # to match majority class
random_state=123,) # reproducible results
# Combine majority class with upsampled minority class
df_train = pd.concat([df_majority, df_minority_upsampled])
# Display new class counts
sns.set(style="whitegrid")
ax = sns.countplot(x="target", data=df_train)
ax.set_title('Resample Target Counts')
ax.set_ylabel('sample size')
ax.figure.savefig("Resample.png")
# +
#Structure
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split as tts
from sklearn.model_selection import cross_val_score as cvs
from sklearn.model_selection import StratifiedKFold as KFold
from yellowbrick.model_selection import CVScores
#Kernal
from sklearn.gaussian_process.kernels import RBF
#Classifier
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier,GradientBoostingClassifier,BaggingClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
#Transform
from sklearn.preprocessing import StandardScaler, RobustScaler, MaxAbsScaler,MinMaxScaler
from sklearn.decomposition import FastICA
from sklearn import preprocessing
#Reports
from yellowbrick.classifier import confusion_matrix,classification_report,DiscriminationThreshold
from sklearn.metrics import accuracy_score, precision_score, recall_score,f1_score
X = df_train[[col for col in df_train.columns if col != 'target']]
y = df_train['target']
X_train, X_test, y_train, y_test = tts(X,y, test_size=0.2)
# +
from yellowbrick.datasets import load_credit
from yellowbrick.features import Rank2D
# Instantiate the visualizer with the Pearson ranking algorithm
visualizer = Rank2D(algorithm='pearson')
visualizer.fit(X_train, y_train) # Fit the data to the visualizer
visualizer.transform(X_train) # Transform the data
visualizer.show() # Finalize and render the figure
# -
# ## Initial Model Testing
# +
scaler = [StandardScaler(), RobustScaler(), MaxAbsScaler(),MinMaxScaler()]
models = [
#Standard Scaler,QuantileTransformer random_state=0
#C=0.01,penalty='l1',solver='liblinear')
Pipeline([
('std',StandardScaler()),
('reg',LogisticRegression())
]),
Pipeline([
('std',StandardScaler()),
('mlp',MLPClassifier())
]),
#Does identify positive more than comparison
Pipeline([
('std',StandardScaler()),
('rfc',RandomForestClassifier())
]),
#Too many false positives and negatives
Pipeline([
('std',StandardScaler()),
('reg',AdaBoostClassifier())
]),
#Robust Scaler leads to false positives above 3000
#Above 3000 for both false positives
Pipeline([
('std',StandardScaler()),
('reg',KNeighborsClassifier())
]),
#learning_rate= 0.3, loss='deviance')
Pipeline([
('std',StandardScaler()),
('gbc',GradientBoostingClassifier())
]),
]
a = []
for model in models:
print(model)
model.fit(X_train,y_train)
_ = confusion_matrix(model, X_test, y_test,is_fitted=True)
visualizer = DiscriminationThreshold(model)
visualizer.fit(X_test, y_test) # Fit the data to the visualizer
visualizer.show()
y_pred = model.predict(X_train)
model_str = str(model[1])
model_name = model_str[0:model_str.find('(',0)].strip()
#print('Train CV Score = {x} '.format(x=cvs(model,X_train,y_train)[3]))
print("Test CV Score = {x}".format(x=cvs(model,X_test,y_test)[3]))
a.append({'Model':model[1],'Transformer':model[0],
'Test Model Score':cvs(model,X_test,y_test)[3],
'F1 Score':f1_score(y_train,y_pred),'Precision Score':precision_score(y_train,y_pred),
'Recall Score':recall_score(y_train,y_pred)})
#More complexity led to less precision
#Robust Scaler
#Pipeline([
# ('std',RobustScaler()),
# ('reg',LogisticRegression())
#]),
#Pipeline([
# ('std',RobustScaler()),
# ('reg',RandomForestClassifier(n_estimators=50))
#]),
#Does identify positive more than comparison
#Pipeline([
# ('std',StandardScaler()),
# ('reg',RandomForestClassifier())
#]),
#Too many false positives and negatives
#Pipeline([
# ('std',StandardScaler()),
# ('reg',AdaBoostClassifier())
#]),
#Robust Scaler leads to false positives above 3000
#Above 3000 for both false positives
#Pipeline([
# ('std',StandardScaler()),
# ('reg',KNeighborsClassifier())
#]),
#Pipeline([
# ('std',StandardScaler()),
# ('reg',DecisionTreeClassifier())
#]),
#Above 3000 on false positives
#Kills Kernal
#Pipeline([
# ('std',StandardScaler()),
# ('reg',GaussianProcessClassifier())
#]),
#Pipeline([
# ('std',StandardScaler()),
# ('reg',GaussianNB())
#]),
#Pipeline([
# ('std',StandardScaler()),
# ('reg',QuadraticDiscriminantAnalysis())
#]),
# -
b = pd.DataFrame(a)
b
# ## Grid Search Hyperparameter Tuning
# +
from sklearn.model_selection import GridSearchCV
#learning curve and validation curve
def grid_search_paramaters(model):
model = str(model)
model = model[:model.find('(',0)]
if model == 'LogisticRegression':
param_grid = {
'reg__solver':['liblinear'],
'reg__penalty':['l1'],
'reg__C':[0.01],
}
return param_grid
elif model == 'RandomForestClassifier':
param_grid = {
# 'rfc__max_leaf_nodes':[1000,2000,3000],
# 'rfc__n_estimators':[100,150,200],
# 'rfc__criterion':['gini','entropy'],
'rfc__max_features': [1,2]
}
return param_grid
elif model == 'RidgeClassifier':
param_grid = {
'rc__alpha':[0.1]
}
return param_grid
elif model == 'BaggingClassifier':
param_grid = {
'bc__n_estimators':[50]
}
return param_grid
elif model == 'GradientBoostingClassifier':
param_grid = {
'gbc__loss':['deviance'],
#'gbc__criterion':['friedman_mse', 'mse', 'mae'],
'gbc__learning_rate':[0.1,0.2,0.3],
}
return param_grid
elif model == 'MLPClassifier':
param_grid = {
#'mlp__solver':['sgd', 'adam'],
#'mlp__activation':['identity', 'logistic', 'tanh', 'relu'],
#'mlp__max_iter':[200,400,600,800,1000],
#'mlp__learning_rate':['constant', 'invscaling', 'adaptive'],
#'mlp__learning_rate_init':['double',0.001,.01,.05,.1],
#'mlp__power_t':['double',.05,.08,1],
#'mlp__max_iterint':[200,250,300],
}
return param_grid
else:
return None
#if mode == 'RandomForestClassifier'
# +
models = [
#Standard Scaler,QuantileTransformer random_state=0
Pipeline([
('std',StandardScaler()),
('reg',LogisticRegression())
]),
#Pipeline([
# ('std',StandardScaler()),
# ('mlp',MLPClassifier())
#]),
#Pipeline([
# ('std',StandardScaler()),
# ('rfc',RandomForestClassifier())
#]),
#Pipeline([
# ('std',StandardScaler()),
# ('rc',RidgeClassifier())
#]),
#Pipeline([
# ('std',StandardScaler()),
# ('bc',BaggingClassifier())
#]),
Pipeline([
('std',StandardScaler()),
('gbc',GradientBoostingClassifier())
]),
]
for model in models:
search = GridSearchCV(model,grid_search_paramaters(model[1]),cv=2)
search.fit(X_train, y_train)
_ = confusion_matrix(search, X_test, y_test,is_fitted=True)
visualizer = DiscriminationThreshold(model)
visualizer.fit(X_test, y_test) # Fit the data to the visualizer
visualizer.show()
#visualizer = DiscriminationThreshold(model)
#visualizer.fit(X_test, y_test) # Fit the data to the visualizer
#visualizer.show()
model_str = str(model[1])
print(model_str[0:model_str.find('(',0)])
print("Best CV score={x}".format(x=search.best_score_))
print("Best Parameters {x}".format(x=search.best_params_))
print('\n')
#model.predict(X_test,y_test)
#a = pd.DataFrame(search.cv_results_)
# -
# ## Stop
# +
import matplotlib.pyplot as plt
# Create 2 side-by-side subplots
fig, axes = plt.subplots(ncols=2, figsize=(8, 4))
# Instantiate the ConfusionMatrix visualizer for an SVM model
svm_confusion_matrix.fit(X_train, y_train)
svm_confusion_matrix.score(X_test, y_test)
# Create a cross-validation strategy
strategy = StratifiedKFold(n_splits=12, shuffle=True)
# Instantiate the CVScores visualizer for an SVM model
svm_cv_scores = CVScores(
SVC(),
scoring="f1_weighted",
cv=strategy,
ax=axes[1]
)
svm_cv_scores.fit(X, y)
fig.suptitle("Performance of an SVM model on the occupancy dataset")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.show()
# -
# ## Logistical Regression
# from sklearn.model_selection import cross_val_score,train_test_split
# from yellowbrick.classifier import confusion_matrix,classification_report
# from sklearn.linear_model import LogisticRegression
#
# from sklearn.metrics import confusion_matrix
#
#
# X = df_train[[col for col in df_train.columns if col != 'target']]
#
# y = df_train['target']
#
# X_train, X_test, y_train, y_test = train_test_split(StandardScaler().fit_transform(X),y, test_size=0.2)
#
# model = LogisticRegression()
# model.fit(X_train, y_train)
#
# _ = confusion_matrix(model, X_test, y_test,is_fitted=True)
#
#
# #confusion_matrix(X_train, y_train)
#
#
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.model_selection import cross_val_score,train_test_split
# from yellowbrick.classifier import confusion_matrix,classification_report
#
#
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
#
# model = LogisticRegression()
# model.fit(X_train, y_train)
# from sklearn.model_selection import cross_val_score,train_test_split
# from yellowbrick.classifier import confusion_matrix,classification_report
# from sklearn.linear_model import LogisticRegression
# from sklearn.preprocessing import StandardScaler
# from sklearn.ensemble import RandomForestClassifier
#
#
# X = df_train[[col for col in df_train.columns if col != 'target']]
#
# y = df_train['target']
#
# #Cross
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
#
# clf = RandomForestClassifier(n_estimators=100)
# clf.fit(X_train, y_train)
# print(clf.predict)
#
#
#
#
#
#
# #confusion_matrix(logreg, X_train, y_train, X_test, y_test)
#
# scores = cross_val_score(clf,X,y)
# from sklearn.model_selection import cross_val_score,train_test_split
# from yellowbrick.classifier import confusion_matrix,classification_report
# from sklearn.linear_model import LogisticRegression
# from sklearn.ensemble import RandomForestRegressor
# from sklearn.preprocessing import RobustScaler
#
#
# X = df_train[[col for col in df_train.columns if col != 'target']]
#
# y = df_train['target']
# logreg = LogisticRegression()
#
#
# #Cross
# X_train, X_test, y_train, y_test = train_test_split(RobustScaler().fit_transform(X), y, test_size=0.2)
# confusion_matrix(logreg, X_train, y_train, X_test, y_test)
#
#
# from sklearn.pipeline import Pipeline
# from sklearn.utils.validation import check_is_fitted
# from sklearn.ensemble import RandomForestRegressor
# from sklearn.preprocessing import OneHotEncoder
# from sklearn import svm
# from sklearn.model_selection import cross_val_score,train_test_split
# from yellowbrick.classifier import confusion_matrix,classification_report
#
#
#
# #RandomForestRegressor(n_estimators = 50)
#
#
#
# #enc = OneHotEncoder(handle_unknown='ignore')
# #enc.fit(X)
# #enc.categories_
#
#
# #Support Vector Machine
# #estimator = svm.SVC(gamma=.001#,kernel='linear',C=100)
#
# #confusion_matrix(estimator, *train_test_split(X, y, test_size=0.2))
# #scores = cross_val_score(estimator,X,y)
# #estimator.fit(X,y)
# #estimator.predict(df_test)
# #Cross value scores
# #cross_val_score(X,y)
#
# fromsklearn.neighborsimport KNeighborsClassifierfromsklearn.preprocessingimport StandardScalerfromsklearn.imputeimport SimpleImputerfromsklearn.pipelineimport Pipelinemodel = Pipeline([ ('impute', SimpleImputer(strategy='mean')), ('scale', StandardScaler()), ('knn', KNeighborsClassifier(n_neighbors=12))])# Update the hyperparameters of the modelmodel.set_params(knn__weights="distance")model.fit(X_train, y_train)model.score(X_test, y_test)
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.preprocessing import StandardScaler
# from sklearn.impute import SimpleImputer
# from sklearn.pipeline import Pipeline
#
# model = Pipeline([('scale', StandardScaler()),
# ('reg',LogisticRegression(fit_intercept=False))
# ])
#
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# confusion_matrix(model.reg, X_train, y_train, X_test, y_test)
#
# # Update the hyperparameters of the model
#
#
# from sklearn.preprocessing import StandardScaler
# from sklearn.datasets import make_classification
# from sklearn.model_selection import train_test_split
# from sklearn.pipeline import Pipeline
# from sklearn.linear_model import LogisticRegression
# from sklearn.ensemble import RandomForestRegressor
#
# #X, y = make_classification(random_state=0)
# X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
#
# model = [Pipeline([
# ('scale', StandardScaler()),
# ('reg'),LogisticRegression(fit_intercept=False)
# ]),
# ]
#
# #check_is_fitted(model,['support_fit_','support_','support_vectors_'])
#
# confusion_matrix(logreg, X_train, y_train, X_test, y_test)
# model = Pipeline([
# ('std', StandardScaler()),
# ('reg'),RandomForestRegressor(n_estimator=50)
# ]),
# Pipeline([
# ('std', StandardScaler()),
# ('reg'),MLPRegressor(hidden_layer_sizes=(100,100,100))
# ]),
# Pipeline([
# ('std', RobustScaler()),
# ('reg'),LogisticRegression(fit_intercept=False)
# ]),
# Pipeline([
# ('std', RobustScaler()),
# ('reg'),RandomForestRegressor(n_estimator=50)
# ]),
# Pipeline([
# ('std', RobustScaler()),
# ('reg'),MLPRegressor(hidden_layer_sizes=(100,100,100))
# ]),
# ##Model suggestions from Dr. Bengfort
# 1) Gradient Boosting – might want to try them
# 2) if svc is taking too long then recommend just using Stochastic Gradient Descent or Linear SVC - should improve the performance of the support vector machine and when you are working with support vector machine, make sure your trying polynomial kernels in addition to linear kernels
# 3) Multinomial naïve bayes if not normally distributed
# 4) Bayesian classifiers are going to perform the worst – this can be are baseline
# 5) Smote yellowbrick.target class balance
# 6) Filter down zero
# 7) parrell functions
#
#
#
# !1) Gaussian Naïve Bayes if features are normally distributed
# !2) stick with support vector machines and logistic regression to preserve the linearity of the features in an understandable way (natural binary classifiers)
# !3) Stick with support vector machines and logistic regression
# !4) Don’t know if you would get good results from Random Forest and K Near Neighbors,
# !5) Ada boost
| 5_Machine_Learning/Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.12 64-bit (''matlab'': conda)'
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
train = pd.read_csv('train.csv')
train.rename(columns={'index': 'id'}, inplace=True)
test = pd.read_csv('test.csv')
sample_submit = pd.read_csv('sample_submit.csv', names=['id', 'Y'])
data = pd.concat([train, test], sort=False)
# +
# あまり効果なし
# print(data['education'].unique())
# data.loc[data['education'] == 'Some-college', 'education'] = 'Bachelors'
# data.loc[data['education'] == '12th', 'education'] = 'junior-glad'
# data.loc[data['education'] == '10th', 'education'] = 'junior-glad'
# data.loc[data['education'] == '5th-6th', 'education'] = 'junior-glad'
# data.loc[data['education'] == '7th-8th', 'education'] = 'junior-glad'
# data.loc[data['education'] == '11th', 'education'] = 'junior-glad'
# data.loc[data['education'] == '9th', 'education'] = 'junior-glad'
# data.loc[data['education'] == '1st-4th', 'education'] = 'junior-glad'
# print(data['education'].unique())
# data
# -
data['workclass'].unique()
# カテゴリカルな特徴量は全てとりあえずエンコーディングしておく.
# one-hotにしておく.
from sklearn import preprocessing
categorical_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'native-country']
for column in categorical_columns:
# d = {}
# for i, category in enumerate(data[column].unique()):
# d[category] = i
# data[column] = data[column].map(d)
data = pd.concat([data, pd.get_dummies(data[column])], axis=1)
data.drop(categorical_columns, inplace=True, axis=1)
for column in data.columns:
data[column] = preprocessing.minmax_scale(data[column])
data
# +
data.drop('id', axis=1, inplace=True)
# data.drop('fnlwgt', axis=1, inplace=True)
train = data[:len(train)]
test = data[len(train):]
y_train = train['Y']
X_train = train.drop('Y', axis=1)
X_test = test.drop('Y', axis=1)
X_train
# +
import lightgbm as lgb
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
y_preds = []
models = []
oof_train = np.zeros((len(X_train),))
cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
for fold_id, (train_index, valid_index) in enumerate(cv.split(X_train, y_train)):
X_tr = X_train.iloc[train_index, :]
X_val = X_train.iloc[valid_index, :]
y_tr = y_train.iloc[train_index]
y_val = y_train.iloc[valid_index]
lgb_train = lgb.Dataset(X_tr, y_tr)
lgb_eval = lgb.Dataset(X_val, y_val, reference=lgb_train)
clf = QuadraticDiscriminantAnalysis()
clf.fit(X_tr, y_tr)
oof_train[valid_index] = clf.predict(X_val)
y_pred = clf.predict(X_test)
y_preds.append(y_pred)
# -
oof_train
# +
from sklearn.metrics import accuracy_score
y_pred_oof = (oof_train > 0.5).astype(int)
accuracy_score(y_train, y_pred_oof)
# -
y_sub = sum(y_preds) / len(y_preds)
y_sub = (y_sub > 0.5).astype(int)
y_sub[:10]
sample_submit['Y'] = y_sub
sample_submit.to_csv('submit-qda.csv', header=False, index=False)
sample_submit
| 576-Census/old/qda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Tâm lý học về sự cầu tiến
#
# Lĩnh vực tâm lý học thực chứng nghiên cứu những hành vi nào của con người sẽ dẫn đến một cuộc sống tuyệt vời. Bạn có thể tưởng tượng lĩnh vực này như điểm giao thoa của sách tự lực và lý thuyết thống kê chính xác. Một trong những phát hiện nổi tiếng của tâm lý học tích cực là **Tư Duy Cầu Tiến**. Mỗi người có thể có một tư duy bảo thủ hoặc tư duy cầu tiến. Nếu bạn có tư duy bảo thủ, bạn tin rằng các khả năng của một người đã được định sẵn kể từ lúc mới sinh ra hoặc ngay từ thời thơ ấu. Như vậy, tài năng của một người là cố định và không thể thay đổi trong suốt cuộc đời. Nếu bạn đã không có được nó ngay lúc này, bạn vĩnh viễn không thể có được nó. Mặt khác, nếu bạn có một tư duy cầu tiến, bạn tin rằng tài năng có thể được phát triển. Hệ quả trực tiếp của việc này là bạn sẽ không coi sự thất bại như một dạng thiếu năng lực, mà bạn coi nó như là một phần của quá trình học hỏi.
#
# Chúng ta sẽ không đi vào việc tranh luận tư duy nào trong hai tư duy này là đúng (có lẽ nó nằm đâu đó ở giữa). Việc này không cần thiết cho mục đích của chúng ta. Quan trọng là các nhà tâm lý học đã phát hiện ra những người có tư duy cầu tiến có khả năng thành công cao hơn. Họ có nhiều khả năng đạt được những điều họ đã vạch ra.
#
# Như đã làm với suy luận nhân quả, chúng ta đã học được cách nhìn những lời khẳng định này với sự thận trọng. Có phải do tư duy cầu tiến khiến cho con người thành công hơn? Hay đơn giản chỉ là trường hợp những người thành công hơn thì có xu hướng phát triển tư duy cầu tiến như là kết quả cho sự thành công của họ? Chú ý tới kết quả tiềm năng, chúng ta có lý do để tin rằng thiên lệch xuất hiện trong những lời khẳng định này. \\(Y_0|T=1\\) có thể lớn hơn \\(Y_0|T=0\\), nghĩa là những người có tư duy cầu tiến sẽ thành công hơn ngay cả khi họ có tư duy bảo thủ.
#
# Để làm rõ vấn đề này, các nhà nghiên cứu đã thiết kế [Nghiên cứu quốc gia về tư duy học tập](https://mindsetscholarsnetwork.org/about-the-network/current-initatives/national-mindset-study/#). Đây là một nghiên cứu ngẫu nhiên được thực hiện ở các trường trung học công lập của Mỹ với mục đích tìm kiếm tác động của tư duy cầu tiến. Các thức hoạt động của nghiên cứu này như sau. Học sinh tham gia hội thảo của trường để hình thành tư duy cầu tiến cho bản thân. Sau đó, các nhà nghiên cứu theo dõi các sinh viên này trong những năm đại học để đo lường thành tích học tập của họ. Phép đo này được tổng hợp thành điểm thành tích và được chuẩn hoá. Dữ liệu gốc của nghiên cứu này không được công bố rộng rãi để đảm bảo quyền riêng tư của các sinh viên. Tuy nhiên, chúng ta có một bộ dữ liệu mô phỏng với các thuộc tính thống kê tương tự được cung cấp bởi [<NAME>](https://arxiv.org/pdf/1902.07409.pdf), vì vậy chúng ta sẽ tận dụng nó.
#
# +
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
from matplotlib import style
from matplotlib import pyplot as plt
import seaborn as sns
import statsmodels.formula.api as smf
import graphviz as gr
# %matplotlib inline
style.use("fivethirtyeight")
pd.set_option("display.max_columns", 6)
# -
# Bên cạnh biến được can thiệp và kết quả, nghiên cứu cũng ghi lại một số thuộc tính khác:
#
# * schoolid: định danh trường của học sinh;
# * success_expect: sự tự kỳ vọng vào thành công trong tương lai, một biến đại diện cho những thành tích trước đó, được đo lường trước khi chỉ định ngẫu nhiên;
# * ethnicity: biến phân loại cho chủng tộc/sắc tộc của học sinh;
# * gender: biến phân loại cho nhận biết giới tính của học sinh;
# * frst_in_family: biến phân loại cho học vấn của thế hệ đầu tiên, ví dụ, người đầu tiên trong gia đình vào đại học;
# * school_urbanicity: biến phân loại cấp trường cho tính đô thị của trường, ví dụ, nông thôn, ngoại ô, v.v;
# * school_mindset: giá trị trung bình cấp trường cho giá trị trung bình của học sinh có tư duy bảo thủ, được ghi lại trước chỉ định ngẫu nhiên, được chuẩn hoá;
# * school_achievement: thành tích cấp trường, được đo lường bởi điểm thi và sự chuẩn bị đại học cho 4 khoá học sinh trước đó, được chuẩn hoá;
# * school_ethnic_minority: thành phần chủng tộc/dân tộc thiểu số, ví dụ, tỷ lệ phần trăm học sinh là người da màu, người La-tin, người Mỹ bản địa, được chuẩn hoá;
# * school_poverty: tỷ lệ nghèo ở trường, ví dụ, tỷ lệ phần trăm học sinh tới từ những gia đình có thu nhập dưới chuẩn nghèo liên bang, được chuẩn hoá;
# * school_size: tổng số học sinh trong bốn cấp học ở trường, được chuẩn hoá.
data = pd.read_csv("./data/learning_mindset.csv")
data.sample(5, random_state=5)
# Mặc dù nghiên cứu được thực hiện ngẫu nhiên, nhưng có vẻ như dữ liệu này không hoàn toàn loại bỏ được nhiễu. Nếu chúng ta nhìn vào những thuộc tính bổ sung, chúng ta sẽ nhận thấy chúng khác nhau một cách có hệ thống giữa nhóm được can thiệp và nhóm đối chứng. Một trong những nguyên nhân có thể là biến can thiệp được đo lường bởi những học sinh tham gia hội thảo. Vì vậy, mặc dù cơ hội để tham gia là ngẫu nhiên, bản thân việc tham gia lại không phải như vậy. Ở đây chúng ta gặp phải trường hợp không tuân thủ. Một minh chứng cho điều này là kỳ vọng về thành công của học sinh có tương quan như thế nào với việc tham gia hội thảo. Học sinh tự kỳ vọng thành công cao hơn có nhiều khả năng đã tham gia hội thảo tư duy cầu tiến hơn.
data.groupby("success_expect")["intervention"].mean()
# Tuy nhiên, hãy xem xét sự khác biệt của các giá trị trung bình \\(E[Y|T=1] - E[Y|T=0]\\) trông như thế nào. Điều này sẽ là cơ sở hữu ích để thực hiện so sánh.
smf.ols("achievement_score ~ intervention", data=data).fit().summary().tables[1]
# Chỉ cần so sánh những người có và không có can thiệp, chúng ta có thể thấy nhóm can thiệp có điểm thành tích trung bình là 0.3185 (0.4723 - 0.1538), cao hơn nhóm đối chứng. Liệu con số này lớn hay nhỏ đây? Chúng ta biết rằng việc diễn giải các kết quả đã được chuẩn hoá không dễ dàng chút nào, nhưng hãy kiên nhẫn trong giây lát. Chúng tôi cho rằng điều này đáng để xem xét kỹ lưỡng bởi chúng ta sẽ còn gặp phải điểm số được chuẩn hoá trong tương lai.
#
# Biến kết quả được chuẩn hoá nghĩa là nó được đo lường theo độ lệch chuẩn. Vì vâỵ, nhóm can thiệp trên 0.3185 lần độ lệch chuẩn so với nhóm đối chứng. Đó là ý nghĩa của nó. Đối với việc liệu con số này là nhỏ hay lớn, hãy nhớ lại một vài kiến thức về phân phối chuẩn. Chúng ta biết 95% quan sát trong khoảng 2 lần độ lệch chuẩn, còn lại 2.5% ở mỗi đuôi trái và đuôi phải. Điều này cũng có nghĩa nếu ai đó cao hơn giá trị trung bình 2 lần độ lệch chuẩn, 97.5% (95% cộng thêm 2.5% đuôi trái) những người khác thấp hơn người đó. Bằng cách nhìn vào CDF, chúng ta cũng thấy rằng khoảng 85% quan sát dưới 1 lần độ lệch chuẩn và 70% quan sát dưới 0.5 lần độ lệch chuẩn. Do vậy, điều này có nghĩa trung bình nhóm can thiệp cao hơn 70% thành tích cá nhân. Mặt khác, giá trị trung bình của nhóm đối chứng chỉ cao hơn 44% thành tích cá nhân.
#
# Điều này có thể được biểu diễn như sau.
plt.hist(data["achievement_score"], bins=20, alpha=0.3, label="Tất cả")
plt.hist(data.query("intervention==0")["achievement_score"], bins=20, alpha=0.3, color="C2")
plt.hist(data.query("intervention==1")["achievement_score"], bins=20, alpha=0.3, color="C3")
plt.vlines(-0.1538, 0, 300, label="Nhóm đối chứng", color="C2")
plt.vlines(-0.1538+0.4723, 0, 300, label="Nhóm can thiệp", color="C3")
plt.legend();
# Tất nhiên, chúng ta vẫn cho rằng kết qủa này chệch. Sự khác biệt giữa nhóm can thiệp và nhóm đối chứng có thể còn nhỏ hơn mức này, bởi chúng ta cho rằng thiên lệch dương. Chúng ta đã thấy rằng những người tham vọng hơn thì sẵn sàng tham gia hội thảo hơn, vì vậy có lẽ họ còn thành công hơn nếu họ thật sự tham gia hội thảo. Để kiểm soát thiên lệch này, chúng ta có thể sử dụng phương pháp hồi quy hoặc ghép cặp, nhưng đã đến lúc để học một phương pháp mới.
# ## Điểm Xu Hướng
#
# Điểm xu hướng xuất phát từ việc nhận ra rằng bạn không nhất thiết phảỉ trực tiếp kiểm soát biến nhiễu X để thu được độc lập có điều kiện \\((Y_1, Y_0) \perp T | X\\). Thay vào đó, chỉ cần kiểm soát điểm cân bằng \\(E[T|X]\\) là đủ. Điểm cân bằng này thường là xác suất có điều kiện cuả can thiệp, \\(P(T|X)\\), còn được gọi là điểm xu hướng \\(P(x)\\). Điểm xu hướng giúp bạn không cần phải cố định toàn bộ X để thu được độc lập cho kết quả tiềm năng của can thiệp. Chỉ cần cố định một biến điểm xu hướng là đủ:
#
# $
# (Y_1, Y_0) \perp T | P(x)
# $
#
# Có một cách chứng minh bằng toán học cho lý do tại sao lại như vậy, nhưng chúng ta có thể tạm thời bỏ qua nó và tiếp cận vấn đề bằng trực giác. Điểm xu hướng là xác suất có điều kiện của việc nhận can thiệp. Vì vậy chúng ta có thể nghĩ về nó như một loại hàm mà biến đổi X thành can thiệp T. Điểm xu hướng tạo nên điểm trung gian giữa biến X và can thiệp T. Chúng ta biểu diễn chúng qua biểu đồ nhân quả như sau.
g = gr.Digraph()
g.edge("T", "Y")
g.edge("X", "Y")
g.edge("X", "P(x)")
g.edge("P(x)", "T")
g
# Nếu ta đã biết P(x), một mình biến X sẽ không giúp ta biết thêm gì hơn về T. Điều này có nghĩa là việc kiểm soát P(x) giống như trực tiếp kiểm soát X. Chúng ta có thể nghĩ về nó qua ví dụ về tư duy. Nhóm can thiệp và nhóm đối chứng ban đầu không tương đồng với nhau bởi những người tham vọng hơn có nhiều khả năng nhận can thiệp và thành công hơn trong cuộc sống. Tuy nhiên, nếu chúng ta chọn ra 2 cá nhân, một từ nhóm can thiệp và một từ nhóm đối chứng, với xác suất nhận can thiệp là như nhau, hai cá nhân này sẽ tương đồng. Hãy nghĩ về điều này. Nếu họ có cùng xác suất nhận can thiệp, lý do duy nhất mà một trong số 2 người nhận được và người còn lại không nhận được không đơn thuần là ngẫu nhiên. Giữ điểm xu hướng không đổi theo cách làm cho dữ liệu như thể được chỉ định ngẫu nhiên.
#
# Giờ chúng ta đã nắm được cách giải thích trực quan, hãy nhìn vào cách chứng minh toán học. Chúng ta muốn chứng minh \\((Y_1, Y_0) \perp T | P(x)\\) tương đương với việc khẳng định rằng
#
# $
# E[T|P(x), X] = E[T|P(x)]
# $
#
# Điều này chỉ đơn thuần cho biết khi chúng ta cố định P(x), X không cung cấp thêm bất cứ thông tin gì về T. Cách chứng minh này có thể hơi lạ. Chúng tôi sẽ chứng minh phương trình trên đúng bằng cách biến đổi nó thành một phương trình đơn giản hơn. Đầu tiên hãy xem xét vế trái \\(E[T|P(x), X]\\).
#
# $
# E[T|P(x), X] = E[T|X] = P(x)
# $
#
# Chúng ta dựa vào sự thật rằng P(x) thực chất chỉ là hàm của X, vì vậy cố định P(x) không cung cấp thêm bất cứ thông tin nào sau khi chúng ta cố định X. Sau đó, chúng ta sử dụng định nghĩa về điểm xu hướng \\(E[T|X]\\). Đối với vế phải, chúng ta sẽ sử dụng Luật kỳ vọng lặp \\(E[A] = E[E[A|B]]\\). Định luật này cho biết chúng ta có thể tính toán giá trị kỳ vọng của A bằng cách xem xét giá trị của A theo B và sau đó lấy giá trị trung bình.
#
# $
# E[T|P(x)] = E[E[T|P(x),X]|P(x)] = E[P(x)|P(x)] = P(x)
# $
#
# Dấu bằng đầu tiên bắt nguồn từ Luật kỳ vọng lặp. Dấu bằng thứ hai là điều chúng ta tìm ra khi xử lý vế trái. Bởi cả vế trái và vế phải bằng nhau, cùng bằng P(x), ta có điều phải chứng minh.
# ## Điểm Xu Hướng Theo Trọng Số
#
# 
#
# OK, vậy giờ chúng ta đã có điểm xu hướng. Tiếp theo sẽ là gì? Như đã nói, tất cả những gì chúng ta cần làm là cố định nó. Ví dụ, chúng ta có thể chạy một hồi quy tuyến tính chỉ cố định điểm xu hướng, thay vì tất cả Xs. Bây giờ, hãy xem xét một kỹ thuật mà chỉ sử dụng một mình điểm xu hướng. Ý tưởng là biểu diễn khác biệt có điều kiện của giá trị trung bình thông qua điểm xu hướng
#
# $
# E[Y|X,T=1]−E[Y|X,T=0] = E\bigg[\dfrac{Y}{P(x)}|X,T=1\bigg]P(x) - E\bigg[\dfrac{Y}{(1-P(x))}|X,T=0\bigg](1-P(x))
# $
#
# Chúng ta có thể giản lược phương trình này hơn nữa, nhưng trước hết hãy xem xét nó dưới dạng này này bởi nó giúp chúng ta có cách nhìn trực quan về những gì điểm xu hướng đang thực hiện. Phần tử đầu tiên ước lượng \\(Y_1\\). Nó gộp tất cả những trường hợp được can thiệp và nhân chúng với nghịch đảo của xác suất can thiệp. Bằng cách này ta gán cho những người có xác suất can thiệp thấp trọng số cao. Điều này nghe có vẻ hợp lý đấy chứ? Nếu một người có xác suất can thiệp thấp, người đó sẽ giống như người trong nhóm đối chứng. Tuy nhiên, cá nhân đó đã được can thiệp. Điều này hẳn là thú vị. Chúng ta gặp những đối tượng can thiệp trông như thể đối chứng, vì vậy chúng ta gán cho cá thể đó trọng số cao. Việc này giúp tạo ra một tổng thể có kích thước giống như ban đầu, nhưng tất cả mọi người đều được nhận can thiệp. Với cách diễn giải tương tự, phần tử còn lại xem xét nhóm đối chứng và đặt trọng số cao cho những cá nhân giống như được can thiệp. Mô hình ước lượng này được gọi là Inverse Probability of Treatment Weighting, vì nó chia mỗi kết quả của đối tượng cho xác suất nhận can thiệp ngược với điều mà nó nhận được.
#
# Hình sau biểu diễn những điều mô hình ước lượng này thực hiện.
#
# 
#
# Biều đồ phía trên bên trái biểu diễn dữ liệu gốc. Các chấm màu xanh biểu diễn nhóm đối chứng và các chấm đỏ biểu diễn nhóm can thiệp. Biểu đồ dưới cùng biểu diễn điểm xu hướng P(x). Lưu ý rằng nó nằm giữa 0 và 1, nó tăng khi X tăng. Cuối cùng, biểu đồ phía trên bên phải là dữ liệu sau khi được đặt trọng số. Chú ý chấm đỏ (được can thiệp) nằm ở phía bên trái (điểm xu hướng thấp hơn) có trọng số cao hơn. Tương tự, chấm xanh nằm về phía bên phải cũng có trọng số cao hơn.
#
# Sau khi chúng ta đã có cái nhìn trực quan, chúng ta có thể giản lược những phần tử ở trên
#
# $
# E\bigg[Y \dfrac{T-P(x)}{P(x)(1-P(x))}\bigg|X\bigg]
# $
#
# mà nếu chúng ta tích hợp với X sẽ trở thành mô hình ước lượng điểm xu hướng theo trọng số.
# $
# E\bigg[Y \dfrac{T-P(x)}{P(x)(1-P(x))}\bigg]
# $
#
# Lưu ý rằng mô hình ước lượng này yêu cầu \\(P(x)\\) hoặc \\(1-P(x)\\) phải dương. Nói cách khác, điều này có nghĩa là mọi người cần có ít nhất một cơ hội nào đó để nhận can thiệp hoặc không nhận can thiệp. Một cách nói khác là phân phối của nhóm can thiệp và nhóm đối chứng có chồng lấn. Đây là **giả thiết điều kiện dương** của suy luận nhân quả. Nó cũng có ý nghĩa về mặt trực quan. Nếu nhóm can thiệp và nhóm đối chứng không chồng lấn, có nghĩa là chúng rất khác nhau và chúng ta không thể ngoại suy tác động của một nhóm lên nhóm còn lại. Phép ngoại suy này không phải là không thể thực hiên được (hồi quy có thể), nhưng nó vô cùng nguy hại. Nó giống như việc thử nghiệm một loại thuốc mới trong một thí nghiệm mà chỉ có đàn ông mới nhận can thiệp và sau đó giả định rằng phụ nữ sẽ có phản ứng tương tự.
#
# ## Ước lượng Điểm Xu Hướng
#
# Trong một thế giới lý tưởng, chúng ta sẽ có điểm xu hướng thực \\(P(x)\\). Tuy nhiên, trên thực tế, chúng ta chưa biết cơ chế chỉ định can thiệp và chúng ta cần thay thế điểm xu hướng thực bằng một ước lượng của nó \\(\hat{P}(x)\\). Một cách thông dụng để thực hiện điều này là sử dụng hồi quy lô-gít nhưng các phương pháp học máy khác, như gradient boosting, cũng có thể được sử dụng (mặc dù nó yêu cầu một vài bước bổ sung để tránh tình trạng quá khớp).
#
# Ở đây, tôi sẽ sử dụng hồi quy lô-gít. Điều này có nghĩa là tôi sẽ phải biến đổi các biến phân loại trong tập dữ liệu thành các biến giả.
# +
categ = ["ethnicity", "gender", "school_urbanicity"]
cont = ["school_mindset", "school_achievement", "school_ethnic_minority", "school_poverty", "school_size"]
data_with_categ = pd.concat([
data.drop(columns=categ), # dataset without the categorical features
pd.get_dummies(data[categ], columns=categ, drop_first=False)# dataset without categorical converted to dummies
], axis=1)
print(data_with_categ.shape)
# -
# Bây giờ, hãy sử dụng hồi quy lô-gít để ước lượng điểm xu hướng.
# +
from sklearn.linear_model import LogisticRegression
T = 'intervention'
Y = 'achievement_score'
X = data_with_categ.columns.drop(['schoolid', T, Y])
ps_model = LogisticRegression(C=1e6).fit(data_with_categ[X], data_with_categ[T])
data_ps = data.assign(propensity_score=ps_model.predict_proba(data_with_categ[X])[:, 1])
data_ps[["intervention", "achievement_score", "propensity_score"]].head()
# -
# Đầu tiên, chúng ta có thể chắc rằng trọng số điểm xu hướng thực sự tái cấu trúc tổng thể như thể mọi người đều nhận can thiệp như nhau. Bằng cách đặt trọng số \\(1/P(x)\\), nó tạo ra tổng thể mà mọi người đều được nhận can thiệp và bằng cách đặt trọng số \\(1/(1-P(x))\\), nó tạo ra tổng thể mà mọi người không nhận can thiệp.
weight_t = 1/data_ps.query("intervention==1")["propensity_score"]
weight_nt = 1/(1-data_ps.query("intervention==0")["propensity_score"])
print("Kích Thước Mẫu ban đầu", data.shape[0])
print("Kích Thước Mẫu của Tổng thể được được can thiệp", sum(weight_t))
print("Kích Thước Mẫu của Tổng thể đối chứng", sum(weight_nt))
# Chúng ta cũng có thể sử dụng điểm xu hướng để tìm bằng chứng của nhiễu. Nếu một phân đoạn của tổng thể có điểm xu hướng cao hơn một phân đoạn khác, điều này có nghĩa là một điều gì đó không ngẫu nhiên đang gây ra can thiệp. Nếu điều tương tự cũng gây ra kết quả, chúng ta có nhiễu. Trong trường hợp của chúng ta, chúng ta có thể thấy rằng những sinh viên có tham vọng lớn hơn cũng có xác suất tham dự hội thảo về tư duy cầu tiến cao hơn.
sns.boxplot(x="success_expect", y="propensity_score", data=data_ps)
plt.title("Confounding Evidence");
# Chúng tôi cũng phải kiểm tra xem có sự chồng lấn giữa tổng thể được can thiệp và đối chứng hay không. Để làm được như vậy, chúng ta có thể thấy phân phối@ thực nghiệm của điểm xu hướng trên nhóm can thiệp và nhóm đối chứng. Nhìn vào biểu đồ dưới đây, chúng ta có thể thấy rằng không ai có điểm xu hướng bằng 0 và ngay cả ở những vùng thấp hơn của điểm xu hướng, chúng ta có thể tìm thấy cả những cá nhân được can thiệp và không được can thiệp. Đây là những gì chúng tôi gọi là một tổng thể được can thiệp và đối chứng cân bằng.
sns.distplot(data_ps.query("intervention==0")["propensity_score"], kde=False, label="Non Treated")
sns.distplot(data_ps.query("intervention==1")["propensity_score"], kde=False, label="Treated")
plt.title("Positivity Check")
plt.legend();
# Cuối cùng, chúng ta sử dụng mô hình ước lượng điểm xu hướng theo trọng số để thu được ước lượng tác động can thiệp trung bình.
# +
weight = ((data_ps["intervention"]-data_ps["propensity_score"]) /
(data_ps["propensity_score"]*(1-data_ps["propensity_score"])))
y1 = sum(data_ps.query("intervention==1")["achievement_score"]*weight_t) / len(data)
y0 = sum(data_ps.query("intervention==0")["achievement_score"]*weight_nt) / len(data)
ate = np.mean(weight * data_ps["achievement_score"])
print("Y1:", y1)
print("Y0:", y0)
print("ATE", np.mean(weight * data_ps["achievement_score"]))
# -
# Điểm xu hướng theo trọng số nói rằng chúng ta nên kỳ vọng các cá nhân được can thiệp thành công ở mức 0.38 lần độ lệch chuẩn cao hơn so với các cá nhân đối chứng. Chúng ta cũng có thể thấy rằng nếu không có ai nhận can thiệp, chúng ta nên kỳ vọng mức độ thành công chung sẽ thấp hơn 0,12 độ lệch chuẩn so với hiện tại. Bằng lập luận tương tự, chúng ta nên kỳ vọng mức độ thành công chung sẽ cao hơn 0,25 độ lệch chuẩn nếu chúng ta tổ chức hội thảo cho tất cả mọi người. Đối chiếu điều này với ước lượng ATE 0,47 mà chúng ta có được nếu chỉ so sánh đơn giản giữa nhóm can thiệp và nhóm đối chứng. Đây là bằng chứng cho thấy thiên lệch mà chúng ta có thực sự mang giá trị dương và việc kiểm soát X cho chúng ta một ước lượng khiêm tốn hơn về tác động của tư duy phát triển.
# ## Sai Số Chuẩn
#
# 
#
# Để tính toán sai số chuẩn cho mô hình ước lượng IPTW, chúng ta có thể sử dụng công thức phương sai của bình quân gia quyền.
#
# $
# \sigma^2_w = \dfrac{\sum_{i=1}^{n}w_i(y_i-\hat{\mu})^2}{\sum_{i=1}^{n}w_i}
# $
#
# Tuy nhiên, chúng ta chỉ có thể sử dụng công thức này nếu chúng ta có điểm xu hướng thực. Nếu chúng ta đang sử dụng phiên bản ước lượng của nó, \\(\hat{P}(x)\\), chúng ta cần tính đến các lỗi trong quá trình ước lượng. Cách dễ nhất để làm điều này là bootstrap toàn bộ quy trình. Điều này đạt được bằng cách lấy mẫu có thay thế từ dữ liệu gốc và tính toán ATE như chúng tôi đã làm ở trên. Sau đó, chúng tôi lặp lại điều này nhiều lần để có được phân phối của ước lượng ATE.
# +
from joblib import Parallel, delayed # for parallel processing
# define function that computes the IPTW estimator
def run_ps(df, X, T, y):
# estimate the propensity score
ps = LogisticRegression(C=1e6).fit(df[X], df[T]).predict_proba(df[X])[:, 1]
weight = (df[T]-ps) / (ps*(1-ps)) # define the weights
return np.mean(weight * df[y]) # compute the ATE
np.random.seed(88)
# run 1000 bootstrap samples
bootstrap_sample = 1000
ates = Parallel(n_jobs=4)(delayed(run_ps)(data_with_categ.sample(frac=1, replace=True), X, T, Y)
for _ in range(bootstrap_sample))
ates = np.array(ates)
# -
# ATE là giá trị trung bình của các mẫu bootstrap và sai số chuẩn là độ lệch chuẩn của những mẫu này.
print(f"ATE 95% CI: {ates.mean()} +- {1.96*ates.std()}")
# Chúng ta có thể mô phỏng các mẫu bootstrap trông như thế nào cùng với các khoảng tin cậy.
sns.distplot(ates, kde=False)
plt.vlines(ates.mean()-1.96*ates.std(), 0, 20, linestyles="dotted")
plt.vlines(ates.mean()+1.96*ates.std(), 0, 20, linestyles="dotted", label="95% CI")
plt.title("ATE Bootstrap Distribution")
plt.legend();
# ## Các vấn đề phổ biến của Điểm Xu Hướng
#
# Với vai trò là một chuyên gia dữ liệu, tôi hiểu rất khó cưỡng lại mong muốn tận dụng sử dụng sức mạnh của bộ công cụ học máy để ước lượng điểm xu hướng chính xác nhất có thể. Bạn có thể nhanh chóng bị choáng ngợp bởi tất cả các phép tối ưu hoá AUC, kiểm chứng chéo, và điều chỉnh siêu tham số Bayes. Chúng tôi không nói rằng bạn không nên làm điều đó. Trên thực tế, tất cả lý thuyết về điểm xu hướng và học máy đều mới xuất hiện, vì vậy có rất nhiều điều chúng ta chưa biết. Nhưng rất đáng để tìm hiểu một phần nào đó trước.
#
# Điều đầu tiên là chất lượng dự báo của điểm xu hướng không chuyển thành các đặc tính cân bằng của nó. Xuất phát từ lĩnh vực học máy, một trong những mặt thách thức nhất khi làm quen với suy luận nhân quả là việc coi mọi thứ như một bài toán dự báo. Trên thực tế, việc tối đa hóa sức mạnh dự báo của điểm xu hướng thậm chí có thể làm ảnh hưởng đến mục đích suy luận nhân quả. **Điểm xu hướng không cần thiết phải dự báo can thiệp tốt. Nó chỉ cần bao gồm tất cả các biến nhiễu**. Nếu chúng ta đưa vào các biến rất tốt cho việc dự báo can thiệp nhưng không liên quan đến kết quả, nó thực sự sẽ làm tăng phương sai của mô hình ước lượng điểm xu hướng. Điều này tương tự như vấn đề hồi quy tuyến tính gặp phải khi chúng ta đưa vào các biến tương quan với can thiệp nhưng không liên quan đến kết quả.
#
# 
#
# Để thấy điều này, hãy xem xét ví dụ sau (phỏng theo Sách của Hernán). Bạn có 2 trường học, một trong số đó áp dụng hội thảo về tư duy cầu tiến cho 99% sinh viên và trường còn lại áp dụng cho 1%. Giả sử rằng các trường không có tác động đến tác động can thiệp (trừ việc thông qua can thiệp), vì vậy không cần thiết phải kiểm soát nó. Nếu bạn thêm biến trường học vào mô hình điểm xu hướng, nó sẽ có khả năng dự báo rất cao. Tuy nhiên, một cách tình cờ, chúng ta có thể kết thúc với một mẫu mà tất cả mọi người trong trường A đều được can thiệp, dẫn đến điểm xu hướng là 1 cho trường đó, và điều này sẽ dẫn đến một phương sai vô hạn. Đây là một ví dụ cực đoan, nhưng hãy xem nó sẽ hoạt động như thế nào với dữ liệu mô phỏng.
np.random.seed(42)
school_a = pd.DataFrame(dict(T=np.random.binomial(1, .99, 400), school=0, intercept=1))
school_b = pd.DataFrame(dict(T=np.random.binomial(1, .01, 400), school=1, intercept=1))
ex_data = pd.concat([school_a, school_b]).assign(y = lambda d: np.random.normal(1 + 0.1 * d["T"]))
ex_data.head()
# Sau khi mô phỏng dữ liệu này, chúng ta chạy bootstrap với thuật toán Điểm Xu Hướng hai lần. Lần thứ nhất bao gồm trường học như một thuộc tính của mô hình điểm xu hướng. Lần thứ hai, chúng ta không cho trường học vào mô hình.
ate_w_f = np.array([run_ps(ex_data.sample(frac=1, replace=True), ["school"], "T", "y") for _ in range(500)])
ate_wo_f = np.array([run_ps(ex_data.sample(frac=1, replace=True), ["intercept"], "T", "y") for _ in range(500)])
sns.distplot(ate_w_f, kde=False, label="PS W School")
sns.distplot(ate_wo_f, kde=False, label="PS W/O School")
plt.legend();
# Như bạn có thể thấy, mô hình ước lượng điểm xu hướng bổ sung thuộc tính trường học có phương sai rất lớn, trong khi mô hình không có nó hoạt động tốt hơn nhiều. Ngoài ra, vì trường học không phải là biến nhiễu, nên mô hình không có nó cũng không chệch.
#
# Như đã nói, dự báo can thiệp đơn thuần không phải là vấn đề cần tập trung. Chúng ta thực sự cần xây dựng dự báo theo cách kiểm soát nhiễu, không phải theo cách dự báo can thiệp. Điều này dẫn đến một vấn đề khác thường gặp trong các phương pháp điểm xu hướng. Trong ví dụ về tư duy của chúng ta, dữ liệu hóa ra rất cân bằng. Nhưng không phải dữ liệu nào cũng như vậy. Trong một số tình huống, nhóm can thiệp có xác suất can thiệp cao hơn nhiều so với nhóm đối chứng và phân phối điểm xu hướng không chồng lấn nhiều.
sns.distplot(np.random.beta(4,1,500), kde=False, label="Non Treated")
sns.distplot(np.random.beta(1,3,500), kde=False, label="Treated")
plt.title("Positivity Check")
plt.legend();
# Nếu điều này xảy ra, nó có nghĩa là điều kiện dương không mạnh lắm. Nếu một đối tượng được can thiệp có điểm xu hướng, chẳng hạn, 0,9 và điểm xu hướng tối đa của nhóm đối chứng là 0,7, chúng ta sẽ không có bất kỳ một đối chứng nào để so sánh với cá nhân có điểm xu hướng 0,9. Sự thiếu cân bằng này có thể tạo ra một số thiên lệch, bởi vì chúng ta sẽ phải ngoại suy tác động can thiệp cho các vùng chưa biết. Không chỉ vậy, các đối tượng có điểm xu hướng rất cao hoặc rất thấp có trọng số rất cao, làm tăng phương sai. Theo luật kinh nghiệm, bạn sẽ gặp rắc rối nếu bất kỳ trọng số nào lớn hơn 20 (điều này xảy ra với một đối tượng không được can thiệp có điểm xu hướng bằng 0,95 hoặc một đối tượng được can thiệp với điểm xu hướng bằng 0,05).
#
# Một giải pháp thay thế là cắt giảm trọng số để có giá trị tối đa là 20. Điều này sẽ làm giảm phương sai, nhưng nó thực sự sẽ tạo ra nhiều thiên lệch hơn. Thành thật mà nói, mặc dù đây là một cách làm phổ biến để giảm phương sai, chúng tôi không thực sự ưa thích nó. Bạn sẽ không bao giờ biết liệu thiên lệch mà bạn gây ra bằng việc cắt giảm có quá nhiều hay không. Ngoài ra, nếu các phân phối không chồng lấn, dữ liệu của bạn có thể không đủ để đưa ra kết luận nhân quả. Để có thêm một số cách nhìn trực quan về điều này, chúng ta có thể xem xét một kỹ thuật kết hợp điểm xu hướng và ghép cặp.
#
# ## Ghép Cặp Điểm Xu Hướng
#
# Như đã nói, bạn không cần phải kiểm soát X khi bạn có điểm xu hướng. Kiểm soát điểm xu hướng là đủ. Do đó, bạn có thể coi điểm xu hướng như thực hiện thao tác giảm chiều dữ liệu trên không gian thuộc tính. Nó gộp tất cả các thuộc tính trong X thành một chiều chỉ định can thiệp. Vì lý do này, chúng ta có thể coi điểm xu hướng là một thuộc tính đầu vào cho các mô hình khác. Lấy một mô hình hồi quy làm ví dụ.
smf.ols("achievement_score ~ intervention + propensity_score", data=data_ps).fit().summary().tables[1]
# Nếu chúng ta kiểm soát điểm xu hướng, chúng ta ước lượng được ATE bằng 0,39 thấp hơn 0,47 chúng ta thu được trước đó với mô hình hồi quy không kiểm soát điểm xu hướng. Chúng ta cũng có thể sử dụng ghép cặp trên điểm xu hướng. Lần này, thay vì cố gắng tìm các cặp tương tự về tất cả các đặc điểm X, chúng ta có thể tìm các cặp chỉ có cùng điểm xu hướng.
#
# Đây quả là một cải tiến lớn so với mô hình ước lượng ghép cặp, vì nó giải quyết được lời nguyền đa chiều. Ngoài ra, nếu một thuộc tính không quan trọng đối với chỉ định can thiệp, thì mô hình điểm xu hướng sẽ xác định và đưa ra mức độ quan trọng thấp cho nó khi khớp với cơ chế can thiệp. Mặt khác, ghép cặp trên các thuộc tính vẫn sẽ cố gắng tìm các cặp mà các cá nhân giống nhau về các thuộc tính không quan trọng này.
# +
from causalinference import CausalModel
cm = CausalModel(
Y=data_ps["achievement_score"].values,
D=data_ps["intervention"].values,
X=data_ps[["propensity_score"]].values
)
cm.est_via_matching(matches=1, bias_adj=True)
print(cm.estimates)
# -
# Như chúng ta thấy, chúng ta cũng thu được ATE bằng 0,38, điều này đồng nhất với những gì chúng ta đã thấy trước đó với điểm xu hướng theo trọng số. Ghép cặp trên điểm xu hướng cũng cung cấp cho chúng ta một cách nhìn trực quan về lý do tại sao có sự chồng lấn nhỏ trong điểm xu hướng giữa nhóm can thiệp và nhóm đối chứng là nguy hiểm. Nếu điều này xảy ra, ghép cặp dựa trên sự khác biệt điểm xu hướng sẽ lớn, dẫn đến thiên lệch.
#
# Một lưu ý cuối cùng ở đây là các sai số chuẩn trên là sai, vì chúng không giải thích cho sự không chắc chắn trong việc ước lượng điểm xu hướng. Đáng tiếc rằng [bootstrap không hoạt động với ghép cặp] (https://economics.mit.edu/files/11862). Ngoài ra, lý thuyết trên còn rất mới nên các phương pháp điểm xu hướng với sai số chuẩn chưa được thực hiện trên Python. Vì lý do này, chúng tôi không thấy nhiều ghép cặp điểm xu hướng trong Python.
#
# ## Ý tưởng chính
#
# Ở đây, chúng ta đã biết rằng xác suất nhận can thiệp được gọi là điểm xu hướng và chúng ta có thể sử dụng điểm này làm điểm cân bằng. Điều này có nghĩa là, nếu chúng ta có điểm xu hướng, chúng ta không cần phải kiểm soát trực tiếp các biến nhiễu. Việc kiểm soát điểm xu hướng là đủ để xác định tác động nhân quả. Chúng ta đã thấy cách điểm xu hướng hoạt động như giảm chiều dữ liệu trong không gian biến nhiễu.
#
# Những dự đoán này cho phép chúng ta rút ra một mô hình ước lượng trọng số cho suy luận nhân quả. Không chỉ vậy, chúng ta đã thấy cách điểm xu hướng có thể được sử dụng cùng với các phương pháp khác để kiểm soát thiên lệch nhiễu.
#
# Cuối cùng, chúng ta đã xem xét một số vấn đề ngoại suy mà chúng ta có thể gặp phải nếu chúng ta không thể có sự chồng lấn tốt giữa phân phối điểm xu hướng được can thiệp và đối chứng.
# ## Tài liệu tham khảo
# Tôi muốn dành loạt bài viết này như lời cảm ơn tới <NAME>, <NAME> và <NAME> bởi lớp học Kinh tế lượng tuyệt vời của họ. Hầu hết những ý tưởng trong chương này được đúc kết từ những bài giảng của họ tại Hiệp hội kinh tế Hoa Kỳ. Lắng nghe các bài giảng của họ giúp tôi có thêm động lực đi qua một năm 2020 đầy khó khăn này.
#
# * [Cross-Section Econometrics](https://www.aeaweb.org/conference/cont-ed/2017-webcasts)
# * [Mastering Mostly Harmless Econometrics](https://www.aeaweb.org/conference/cont-ed/2020-webcasts)
#
# Tôi cũng trích dẫn một cuốn sách tuyệt vời từ Angrist. Họ đã thành công trong việc chỉ cho tôi thấy rằng Kinh tế lượng, hoặc là Lượng theo cách gọi của họ, không chỉ cực kỳ hữu ích mà còn vô cùng thú vị.
#
# * [Mostly Harmless Econometrics](https://www.mostlyharmlesseconometrics.com/)
# * [Mastering 'Metrics](https://www.masteringmetrics.com/)
#
# Cuối cùng, không thể không nhắc đến cuốn sách được viết bởi <NAME> và <NAME>. Nó là người bạn đồng hành đáng tin cậy giúp tôi tìm lời giải đáp cho những câu hỏi hóc búa nhất về tính nhân quả.
#
# * [Causal Inference Book](https://www.hsph.harvard.edu/miguel-hernan/causal-inference-book/)
# # Bảng Từ Viết tắt
# |Viết tắt| Tiếng Anh | Tiếng Việt |
# | --- | --- | --- |
# |ATE|Average Treatment Effect|Tác động Can thiệp Trung bình|
# |AUC|Area Under the Curve|Diện tích Dưới Đường cong (ROC)|
# |CDF|Cumulative Distribution Function|Hàm Phân phối Tích lũy|
# |IPTW|Inverse Probability of Treatment Weighting|Trọng số theo Nghịch đảo của Xác suất Can thiệp|
#
# # Bảng Thuật ngữ
# | Thuật ngữ | Tiếng Anh |
# | --- | --- |
# |biến giả|dummy, dummy variable|
# |biến nhiễu|confounder, confounding variable|
# |biến phân loại|categorical variable|
# |biến đại diện|proxy, surrogate variable|
# |bootstrap|bootstrap|
# |bình quân gia quyền|weighted average|
# |bộ dữ liệu mô phỏng|simulated dataset|
# |chuyên gia dữ liệu|data scientist|
# |chệch|biased|
# |chỉ định can thiệp|treatment assignment|
# |cố định|condition on|
# |dữ liệu|data|
# |dữ liệu mô phỏng|simulated data|
# |ghép cặp|matching|
# |ghép cặp điểm xu hướng|propensity score matching|
# |giá trị kỳ vọng|expected value|
# |giá trị trung bình|mean|
# |giả thiết điều kiện dương|positivity assumption|
# |giảm chiều dữ liệu|dimensionality reduction|
# |gradient boosting|gradient boosting|
# |học máy|machine learning|
# |hồi quy|regression, regress|
# |hồi quy lô-gít|logistic regression|
# |hồi quy tuyến tính|linear regression|
# |khoảng tin cậy|confidence interval|
# |kiểm chứng chéo|cross validation|
# |kích thước mẫu|sample size|
# |kết quả|outcome|
# |kết quả tiềm năng|potential outcome|
# |luật kỳ vọng lặp|law of iterated expectation|
# |lời nguyền đa chiều|curse of dimensionality|
# |mô hình ước lượng|estimator|
# |mô hình ước lượng điểm xu hướng theo trọng số|propensity score weighting estimator|
# |mẫu bootstrap|bootstrap sample|
# |nghịch đảo của xác suất can thiệp|inverse probability of treatment|
# |ngoại suy|extrapolation, extrapolate|
# |nhiễu|confounding|
# |nhóm can thiệp|treated group|
# |nhóm được can thiệp|treatment group, test group|
# |nhóm đối chứng|control group, untreated group|
# |phân phối|distribution|
# |phân phối chuẩn|normal distribution|
# |phương sai|variance|
# |quá khớp|overfitting|
# |sai số chuẩn|standard error|
# |suy luận nhân quả|causal inference, causal reasoning|
# |thiên lệch|bias|
# |thuật toán|algorithm|
# |thuộc tính|feature|
# |thống kê|statistics|
# |tác động can thiệp|treatment effect, treatment impact|
# |tác động can thiệp trung bình|average treatment effect|
# |tối ưu hoá|optimisation|
# |tổng thể|population|
# |xác suất có điều kiện|conditional probability|
# |điều chỉnh siêu tham số|hyper paremeter tuning|
# |điểm cân bằng|balancing score|
# |điểm xu hướng|propensity score|
# |điểm xu hướng theo trọng số|propensity score weighting|
# |điểm xu hướng theo trọng số |propensity weighting|
# |được can thiệp|treated|
# |đối chứng|untreated, non-treated|
# |độ lệch chuẩn|standard deviation|
# |độc lập có điều kiện|conditionally independent, conditional independence|
#
| ipynb/11-Điểm-xu-hướng-DT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pyfastx
from fqfa.fastq.fastq import parse_fastq_reads
# # Benchmark 1: list of reads
#
# This code creates a list containing all the reads in the file.
# Note that the data structures for the reads are quite different, with two being package-specific objects and one being a tuple.
# ## pyfastx with index
#
# Much of the time spent in the first example is likely spent building the ``.fxi`` index file.
# This file enables direct access into the FASTQ file, which we are not using here.
# The index is quite large, much larger than the reads in this case:
#
# ```
# 334M BRCA1_input_sample.fq
# 48M BRCA1_input_sample.fq.bz2
# 511M BRCA1_input_sample.fq.fxi
# 68M BRCA1_input_sample.fq.gz
# 513M BRCA1_input_sample.fq.gz.fxi
# ```
# %time reads = [x for x in pyfastx.Fastq("BRCA1_input_sample.fq")]
for x in reads[:5]:
print(repr(x))
del reads
# ## pyfastx without index
#
# This is by far the fastest for just reading data from the file, but it doesn't perform any extra computation or quality value conversion.
# %time reads = [x for x in pyfastx.Fastq("BRCA1_input_sample.fq", build_index=False)]
for x in reads[:5]:
print(x)
del reads
# ## fqfa
#
# Unlike pyfastx, fqfa takes an open file handle rather than a file name.
# In these examples, this is addressed using a context created by a with statement.
with open("BRCA1_input_sample.fq") as handle:
# %time reads = [x for x in parse_fastq_reads(handle)]
for x in reads[:5]:
print(x)
del reads
# # Benchmark 2: summarized quality statistics
#
# This code calculates the median average read quality for all reads in the file.
from statistics import mean, median
# ## pyfastx with index
#
# pyfastx provides integer quality values as part of its FASTQ read data structure.
# %time read_quals = [mean(x.quali) for x in pyfastx.Fastq("BRCA1_input_sample.fq")]
print(f"Median average quality is {median(read_quals)}")
del read_quals
# ## pyfastx without index
#
# The timing here is quite a bit closer to the others, since the conversion and calculation has not already been performed as part of processing the input file.
# %time read_quals = [mean([ord(c) - 33 for c in x[2]]) for x in pyfastx.Fastq("BRCA1_input_sample.fq", build_index=False)]
print(f"Median average quality is {median(read_quals)}")
del read_quals
# ## fqfa
#
# This code uses the ``average_quality()`` method implemented by the FastqRead class.
with open("BRCA1_input_sample.fq") as handle:
# %time read_quals = [x.average_quality() for x in parse_fastq_reads(handle)]
print(f"Median average quality is {median(read_quals)}")
del read_quals
# # Benchmark 3: filtering reads on quality
#
# This code creates a list of reads for which all bases are at least Q20.
# The performance and usage in this section is quite a bit faster than Benchmark 2 following recent performance improvements in pyfastx.
# ## pyfastx with index
# %time filt_reads = [x for x in pyfastx.Fastq("BRCA1_input_sample.fq") if min(x.quali) >= 20]
print(f"Kept {len(filt_reads)} reads after applying filter.")
del filt_reads
# ## pyfastx without index
# %time filt_reads = [x for x in pyfastx.Fastq("BRCA1_input_sample.fq", build_index=False) if min([ord(c) - 33 for c in x[2]]) >= 20]
print(f"Kept {len(filt_reads)} reads after applying filter.")
del filt_reads
# ## fqfa
#
# This code uses the ``min_quality()`` method implemented by the FastqRead class.
with open("BRCA1_input_sample.fq") as handle:
# %time filt_reads = [x for x in parse_fastq_reads(handle) if x.min_quality() >= 20]
print(f"Kept {len(filt_reads)} reads after applying filter.")
del filt_reads
| docs/notebooks/benchmarks_raw.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 继续挑战,越来越有趣了
#
# ---
# ### 第29题地址[guido.html](http://www.pythonchallenge.com/pc/ring/guido.html)
# * <img src="http://repeat:switch@www.pythonchallenge.com/pc/ring/whoisit.jpg" alt="whoisit.jpg" width="30%" height="30%">
# * 网页标题是`silence!`,题目内容为空,[源码](view-source:http://www.pythonchallenge.com/pc/ring/guido.html)里面没有隐藏内容,但后面有一堆的空行。
# 图片里面有好多元素,主体一个用眼镜和杯子等组合成的面罩形状,旁边是一个啤酒瓶和一个画着鸡的不知道什么东西。<br>
# 而标题是`silence`,应该对应源码后面那些空行吧。<br>
# 我们仔细看下空行,发现每一行的长度是不一样的!所以每一行的空格数对应着字节码:
# +
import re
import requests
with requests.Session() as sess:
sess.auth = ('repeat', 'switch')
response = sess.get('http://www.pythonchallenge.com/pc/ring/guido.html').text
silence = bytes(len(line) for line in response.splitlines() if re.match(r'^( )*$', line))
print(silence)
# -
# 是以`BZh`开头的`bzip2`压缩格式:
# +
from bz2 import decompress
print(decompress(silence).decode())
# -
# 图片名字叫`whoisit.jpg`,也就是说问那些空行是什么东西。好了,打开链接到[yankeedoodle.html](http://www.pythonchallenge.com/pc/ring/yankeedoodle.html),来到了下一题。
# 顺便科普下`yankeedoodle`**洋基曲**:
# > 《洋基歌》(Yankee Doodle)是一首美国传统歌曲,其起源可追溯至美国七年战争时期。今天这首歌在美国通常被当作爱国歌曲,它同时还是康乃狄克州的州歌。<br>
# > 这首歌的真正源头仍不清楚。常见的说法认为这首歌是在美国独立战争以前英军用以嘲笑殖民地居民粗俗的衣着和举止的,而“洋基”一词是对参加法印战争的新英格兰人的轻蔑之词。这首歌的曲调很可能来自与儿歌《露茜的钱袋》(<NAME>),其中一个版本的歌词可能来自于一名英国外科医生理查德·沙克伯勒(<NAME>)。虽然这首歌的歌词带有贬义,然而独立战争期间美军却采用《洋基歌》作为他们自己的歌以反讽敌军,表明他们对自己朴素的家纺衣着和毫不矫揉造作的举止感到自豪。洋基歌的歌词有许多不同版本。多年来,这首歌在美国一直被当作非正式的国歌,也是最受欢迎的儿歌。在各类电影,电视和动画片中时常能听见这首歌曲。
# > ###### From [wikipedia.org](https://zh.wikipedia.org/wiki/%E6%B4%8B%E5%9F%BA%E6%AD%8C)
#
# 所以这是一首某种性质的儿歌,也就跟这题的图片对应上了。
# ### 总结:这一题需要一些观察力足矣。
# ###### 本题代码地址[29_guido.ipynb](https://github.com/StevenPZChan/pythonchallenge/blob/notebook/nbfiles/29_guido.ipynb)
| nbfiles/29_guido.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME>
# ## Data Science and Business Analytics Intern @ The Sparks Foundation
# ### Topic : Exploratory Data Analysis (EDA) - Terrorism
# ### Dataset : globalterrorismdb_0718dist.csv [https://bit.ly/2TK5Xn5]
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
data = pd.read_csv("globalterrorismdb_0718dist.csv",encoding='latin1')
data.head()
data.columns.values
data.rename(columns={'iyear':'Year','imonth':'Month','iday':"day",'gname':'Group','country_txt':'Country','region_txt':'Region','provstate':'State','city':'City','latitude':'latitude',
'longitude':'longitude','summary':'summary','attacktype1_txt':'Attacktype','targtype1_txt':'Targettype','weaptype1_txt':'Weapon','nkill':'kill',
'nwound':'Wound'},inplace=True)
data = data[['Year','Month','day','Country','State','Region','City','latitude','longitude',"Attacktype",'kill',
'Wound','target1','summary','Group','Targettype','Weapon','motive']]
data.head()
data.shape
data.isnull().sum()
data['Wound'] = data['Wound'].fillna(0)
data['kill'] = data['kill'].fillna(0)
data['Casualities'] = data['kill'] + data['Wound']
data.info()
data.describe()
year = data['Year'].unique()
years_count = data['Year'].value_counts(dropna = False).sort_index()
plt.figure(figsize = (18,10))
sns.barplot(x = year,
y = years_count,
palette = "tab10")
plt.xticks(rotation = 50)
plt.xlabel('Attacking Year',fontsize=20)
plt.ylabel('Number of Attacks Each Year',fontsize=20)
plt.title('Attacks In Years',fontsize=30)
plt.show()
pd.crosstab(data.Year, data.Region).plot(kind='area',stacked=False,figsize=(20,10))
plt.title('Terrorist Activities By Region In Each Year',fontsize=25)
plt.ylabel('Number of Attacks',fontsize=20)
plt.xlabel("Year",fontsize=20)
plt.show()
attack = data.Country.value_counts()[:10]
attack
data.Group.value_counts()[1:10]
plt.subplots(figsize=(20,10))
sns.barplot(data['Country'].value_counts()[:10].index,data['Country'].value_counts()[:10].values,palette='YlOrBr_r')
plt.title('Top Countries Affected')
plt.xlabel('Countries')
plt.ylabel('Count')
plt.xticks(rotation = 50)
plt.show()
df = data[['Year','kill']].groupby(['Year']).sum()
fig, ax4 = plt.subplots(figsize=(20,10))
df.plot(kind='bar',alpha=0.7,ax=ax4)
plt.xticks(rotation = 50)
plt.title("People Died Due To Attack",fontsize=25)
plt.ylabel("Number of killed peope",fontsize=20)
plt.xlabel('Year',fontsize=20)
top_side = ax4.spines["top"]
top_side.set_visible(False)
right_side = ax4.spines["right"]
right_side.set_visible(False)
data['City'].value_counts().to_frame().sort_values('City',axis=0,ascending=False).head(10).plot(kind='bar',figsize=(20,10),color='blue')
plt.xticks(rotation = 50)
plt.xlabel("City",fontsize=15)
plt.ylabel("Number of attack",fontsize=15)
plt.title("Top 10 most effected city",fontsize=20)
plt.show()
data['Attacktype'].value_counts().plot(kind='bar',figsize=(20,10),color='magenta')
plt.xticks(rotation = 50)
plt.xlabel("Attacktype",fontsize=15)
plt.ylabel("Number of attack",fontsize=15)
plt.title("Name of attacktype",fontsize=20)
plt.show()
data[['Attacktype','kill']].groupby(["Attacktype"],axis=0).sum().plot(kind='bar',figsize=(20,10),color=['darkslateblue'])
plt.xticks(rotation=50)
plt.title("Number of killed ",fontsize=20)
plt.ylabel('Number of people',fontsize=15)
plt.xlabel('Attack type',fontsize=15)
plt.show()
data[['Attacktype','Wound']].groupby(["Attacktype"],axis=0).sum().plot(kind='bar',figsize=(20,10),color=['cyan'])
plt.xticks(rotation=50)
plt.title("Number of wounded ",fontsize=20)
plt.ylabel('Number of people',fontsize=15)
plt.xlabel('Attack type',fontsize=15)
plt.show()
plt.subplots(figsize=(20,10))
sns.countplot(data["Targettype"],order=data['Targettype'].value_counts().index,palette="gist_heat",edgecolor=sns.color_palette("mako"));
plt.xticks(rotation=90)
plt.xlabel("Attacktype",fontsize=15)
plt.ylabel("count",fontsize=15)
plt.title("Attack per year",fontsize=20)
plt.show()
data['Group'].value_counts().to_frame().drop('Unknown').head(10).plot(kind='bar',color='green',figsize=(20,10))
plt.title("Top 10 terrorist group attack",fontsize=20)
plt.xlabel("terrorist group name",fontsize=15)
plt.ylabel("Attack number",fontsize=15)
plt.show()
data[['Group','kill']].groupby(['Group'],axis=0).sum().drop('Unknown').sort_values('kill',ascending=False).head(10).plot(kind='bar',color='yellow',figsize=(20,10))
plt.title("Top 10 terrorist group attack",fontsize=20)
plt.xlabel("terrorist group name",fontsize=15)
plt.ylabel("No of killed people",fontsize=15)
plt.show()
df=data[['Group','Country','kill']]
df=df.groupby(['Group','Country'],axis=0).sum().sort_values('kill',ascending=False).drop('Unknown').reset_index().head(10)
df
kill = data.loc[:,'kill']
print('Number of people killed by terror attack:', int(sum(kill.dropna())))
typeKill = data.pivot_table(columns='Attacktype', values='kill', aggfunc='sum')
typeKill
countryKill = data.pivot_table(columns='Country', values='kill', aggfunc='sum')
countryKill
# **Conclusion and Results :**
# - Country with the most attacks: **Iraq**
#
# - City with the most attacks: **Baghdad**
#
# - Region with the most attacks: **Middle East & North Africa**
#
# - Year with the most attacks: **2014**
#
# - Month with the most attacks: **5**
#
# - Group with the most attacks: **Taliban**
#
# - Most Attack Types: **Bombing/Explosion**
#
# ### Thank You!
| Task #4 Exploratory Data Analysis Terrorism.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: openvino_env
# language: python
# name: openvino_env
# ---
# # Optical Character Recognition (OCR) with OpenVINO
#
# This tutorial demonstrates how to perform optical character recognition (OCR) with OpenVINO models. It is a continuation of the [004-hello-detection](../004-hello-detection/004-hello-detection.ipynb) tutorial, which shows only text detection.
#
# The [horizontal-text-detection-0001](https://docs.openvinotoolkit.org/latest/omz_models_model_horizontal_text_detection_0001.html) and [text-recognition-resnet](https://docs.openvinotoolkit.org/latest/omz_models_model_text_recognition_resnet_fc.html) models are used together for text detection and then text recognition.
#
# In this tutorial, Open Model Zoo tools including Model Downloader, Model Converter and Info Dumper are used to download and convert the models from the [Open Model Zoo](https://github.com/openvinotoolkit/open_model_zoo). See the [104-model-tools](../104-model-tools/104-model-tools.ipynb) tutorial for more information about these tools.
# ## Imports
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
import json
import shutil
import sys
from pathlib import Path
import cv2
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import Markdown, display
from openvino.inference_engine import IECore
from PIL import Image
from yaspin import yaspin
sys.path.append("../utils")
from notebook_utils import load_image
# -
# ## Settings
# + tags=[] test_replace={"~/open_model_zoo_cache": "open_model_zoo_cache"}
ie = IECore()
model_dir = Path("model")
precision = "FP16"
detection_model = "horizontal-text-detection-0001"
recognition_model = "text-recognition-resnet-fc"
base_model_dir = Path("~/open_model_zoo_models").expanduser()
omz_cache_dir = Path("~/open_model_zoo_cache").expanduser()
model_dir.mkdir(exist_ok=True)
# -
# ## Download Models
#
# The next cells will run Open Model Zoo's Model Downloader to download the detection and recognition models. If the models have been downloaded before, they will not be downloaded again.
download_command = f"omz_downloader --name {detection_model},{recognition_model} --output_dir {base_model_dir} --cache_dir {omz_cache_dir} --precision {precision} --num_attempts 3"
display(Markdown(f"Download command: `{download_command}`"))
with yaspin(text=f"Downloading {detection_model}, {recognition_model}") as sp:
download_result = !$download_command
sp.text = f"Finished downloading {detection_model}, {recognition_model}"
sp.ok("✔")
# +
### The text-recognition-resnet-fc model consists of many files. All filenames are printed in
### Model Downloader's output. Uncomment the next two lines to show this output
# for line in download_result:
# print(line)
# -
# ## Convert Models
#
# The downloaded detection model is an Intel model, which is already in OpenVINO's Intermediate Representation (IR) format. The text recognition model is a public model which needs to be converted to IR. Since this model was downloaded from Open Model Zoo we can use Model Converter to convert the model to IR format.
#
# Model Converter output will be displayed. Conversion was succesful if the last lines of output include `[ SUCCESS ] Generated IR version 10 model.`
convert_command = f"omz_converter --name {recognition_model} --precisions {precision} --download_dir {base_model_dir} --output_dir {base_model_dir}"
display(Markdown(f"Convert command: `{convert_command}`"))
display(Markdown(f"Converting {recognition_model}..."))
# ! $convert_command
# ## Copy Models
#
# To make it easier to work with the models, we copy the models from the Open Model Zoo tree to the _model_ subdirectory relative to this Jupyter notebook. We get the path to the Open Model Zoo model directory from Open Model Zoo's `omz_info_dumper` tool.
# +
# models_info_output = %sx omz_info_dumper --name $detection_model,$recognition_model
detection_model_info, recognition_model_info = json.loads(models_info_output.get_nlstr())
for model_info in (detection_model_info, recognition_model_info):
omz_dir = Path(model_info["subdirectory"])
omz_model_dir = base_model_dir / omz_dir / precision
for model_file in omz_model_dir.iterdir():
try:
shutil.copyfile(model_file, model_dir / model_file.name)
except FileExistsError:
pass
detection_model_path = (model_dir / detection_model).with_suffix(".xml")
recognition_model_path = (model_dir / recognition_model).with_suffix(".xml")
# + [markdown] tags=[]
# ## Object Detection
#
# Load the detection model, load an image, do inference and get the detection inference result.
#
# ### Load Detection Model
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
detection_net = ie.read_network(
model=detection_model_path, weights=detection_model_path.with_suffix(".bin")
)
detection_exec_net = ie.load_network(detection_net, "CPU")
detection_input_layer = next(iter(detection_exec_net.input_info))
# + [markdown] tags=[]
# ### Load an Image
# + pycharm={"name": "#%%\n"} tags=[]
# image_file can point to a URL or local image
image_file = "https://github.com/openvinotoolkit/openvino_notebooks/raw/main/notebooks/004-hello-detection/data/intel_rnb.jpg"
image = load_image(image_file)
# N,C,H,W = batch size, number of channels, height, width
N, C, H, W = detection_net.input_info[detection_input_layer].tensor_desc.dims
# Resize image to meet network expected input sizes
resized_image = cv2.resize(image, (W, H))
# Reshape to network input shape
input_image = np.expand_dims(resized_image.transpose(2, 0, 1), 0)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB));
# -
# ### Do Inference
#
# Text boxes are detected in the images and returned as blobs of data in the shape of `[100, 5]`. Each detection description has the format `[x_min, y_min, x_max, y_max, conf]`.
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
result = detection_exec_net.infer(inputs={detection_input_layer: input_image})
# Extract list of boxes from results
boxes = result["boxes"]
# Remove zero only boxes
boxes = boxes[~np.all(boxes == 0, axis=1)]
# -
# ### Get Detection Results
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
def multiply_by_ratio(ratio_x, ratio_y, box):
return [
max(shape * ratio_y, 10) if idx % 2 else shape * ratio_x
for idx, shape in enumerate(box[:-1])
]
def run_preprocesing_on_crop(crop, net_shape):
temp_img = cv2.resize(crop, net_shape)
temp_img = temp_img.reshape((1,) * 2 + temp_img.shape)
return temp_img
def convert_result_to_image(bgr_image, resized_image, boxes, threshold=0.3, conf_labels=True):
# Define colors for boxes and descriptions
colors = {"red": (255, 0, 0), "green": (0, 255, 0), "white": (255, 255, 255)}
# Fetch image shapes to calculate ratio
(real_y, real_x), (resized_y, resized_x) = image.shape[:2], resized_image.shape[:2]
ratio_x, ratio_y = real_x / resized_x, real_y / resized_y
# Convert base image from bgr to rgb format
rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)
# Iterate through non-zero boxes
for box, annotation in boxes:
# Pick confidence factor from last place in array
conf = box[-1]
if conf > threshold:
# Convert float to int and multiply position of each box by x and y ratio
(x_min, y_min, x_max, y_max) = map(int, multiply_by_ratio(ratio_x, ratio_y, box))
# Draw box based on position, parameters in rectangle function are: image, start_point, end_point, color, thickness
cv2.rectangle(rgb_image, (x_min, y_min), (x_max, y_max), colors["green"], 3)
# Add text to image based on position and confidence, parameters in putText function are: image, text, bottomleft_corner_textfield, font, font_scale, color, thickness, line_type
if conf_labels:
# Create background box based on annotation length
(text_w, text_h), _ = cv2.getTextSize(
f"{annotation}", cv2.FONT_HERSHEY_TRIPLEX, 0.8, 1
)
image_copy = rgb_image.copy()
cv2.rectangle(
image_copy,
(x_min, y_min - text_h - 10),
(x_min + text_w, y_min - 10),
colors["white"],
-1,
)
# Add weighted image copy with white boxes under text
cv2.addWeighted(image_copy, 0.4, rgb_image, 0.6, 0, rgb_image)
cv2.putText(
rgb_image,
f"{annotation}",
(x_min, y_min - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.8,
colors["red"],
1,
cv2.LINE_AA,
)
return rgb_image
# -
# ## Text Recogntion
#
# Load the text recognition model and do inference on the detected boxes from the detection model.
#
# ### Load Text Recognition Model
# +
recognition_net = ie.read_network(
model=recognition_model_path, weights=recognition_model_path.with_suffix(".bin")
)
recognition_exec_net = ie.load_network(recognition_net, "CPU")
recognition_output_layer = next(iter(recognition_exec_net.outputs))
recognition_input_layer = next(iter(recognition_exec_net.input_info))
# Get height and width of input layer
_, _, H, W = recognition_net.input_info[recognition_input_layer].tensor_desc.dims
# -
# ### Do Inference
# +
# Calculate scale for image resizing
(real_y, real_x), (resized_y, resized_x) = image.shape[:2], resized_image.shape[:2]
ratio_x, ratio_y = real_x / resized_x, real_y / resized_y
# Convert image to grayscale for text recognition model
grayscale_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Get dictionary to encode output, based on model documentation
letters = "~0123456789abcdefghijklmnopqrstuvwxyz"
# Prepare empty list for annotations
annotations = list()
cropped_images = list()
# fig, ax = plt.subplots(len(boxes), 1, figsize=(5,15), sharex=True, sharey=True)
# For each crop, based on boxes given by detection model we want to get annotations
for i, crop in enumerate(boxes):
# Get coordinates on corners of crop
(x_min, y_min, x_max, y_max) = map(int, multiply_by_ratio(ratio_x, ratio_y, crop))
image_crop = run_preprocesing_on_crop(grayscale_image[y_min:y_max, x_min:x_max], (W, H))
# Run inference with recognition model
recognition_result = recognition_exec_net.infer(inputs={recognition_input_layer: image_crop})
# Squeeze output to remove unnececery dimension
recognition_results_test = np.squeeze(recognition_result[recognition_output_layer])
# Read annotation based on probabilities from output layer
annotation = list()
for letter in recognition_results_test:
parsed_letter = letters[letter.argmax()]
# Returning 0 index from argmax signalises end of string
if parsed_letter == letters[0]:
break
annotation.append(parsed_letter)
annotations.append("".join(annotation))
cropped_image = Image.fromarray(image[y_min:y_max, x_min:x_max])
cropped_images.append(cropped_image)
boxes_with_annotations = list(zip(boxes, annotations))
# + [markdown] tags=[]
# ## Show Results
#
# ### Show Detected Text Boxes and OCR Results for the Image
#
# Visualize the result by drawing boxes around recognized text and showing the OCR result from the text recognition model
# -
plt.figure(figsize=(12, 12))
plt.imshow(convert_result_to_image(image, resized_image, boxes_with_annotations, conf_labels=True));
# ### Show the OCR Result per Bounding Box
#
# Depending on the image, the OCR result may not be readable in the image with boxes as displayed in the cell above. In the next cell, we show the extracted boxes, and the OCR result per box.
for cropped_image, annotation in zip(cropped_images, annotations):
display(cropped_image, Markdown("".join(annotation)))
# ### Print Annotations in Plain Text Format
#
# Print annotations for detected text based on their position in the input image starting from the upper left corner.
#
# + tags=[]
[
annotation
for _, annotation in sorted(zip(boxes, annotations), key=lambda x: x[0][0] ** 2 + x[0][1] ** 2)
]
| notebooks/208-optical-character-recognition/208-optical-character-recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="2G-Ip9WAfuDx"
#from google.colab import files
import numpy as np
import tensorflow as tf
import time
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 72, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} colab_type="code" executionInfo={"elapsed": 15812, "status": "ok", "timestamp": 1526158278237, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-lyRwyRCbSzY/AAAAAAAAAAI/AAAAAAAAUYQ/zHzRCLhMZRs/s50-c-k-no/photo.jpg", "userId": "116324510352588137280"}, "user_tz": -120} id="_AZa1yy5gYck" outputId="d3d3842d-7629-4d42-b852-84426bd9443f"
'''This is for the Colab's notebook'''
#uploaded = files.upload()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 102} colab_type="code" executionInfo={"elapsed": 666, "status": "ok", "timestamp": 1526158279974, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-lyRwyRCbSzY/AAAAAAAAAAI/AAAAAAAAUYQ/zHzRCLhMZRs/s50-c-k-no/photo.jpg", "userId": "116324510352588137280"}, "user_tz": -120} id="e3hrryEFgvox" outputId="e46eae10-3e1f-4836-c53c-197e05299211"
with open('nietzsche.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
print(encoded[:100])
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="j3jFeVAMLAeZ"
def get_batches(arr, n_seqs, n_steps):
# Get the batch size and number of batches we can make
batch_size = n_seqs * n_steps
n_batches = len(arr)//batch_size
# Keep only enough characters to make full batches
arr = arr[:n_batches*batch_size]
# Reshape into n_seqs rows
arr = arr.reshape((n_seqs,-1))
for n in range(0, arr.shape[1], n_steps):
# The features
x = arr[:,n:n+n_steps]
# The targets, shifted by one
y = np.zeros(x.shape)
y[:,:-1],y[:,-1] = x[:,1:] ,x[:,0]
yield x, y
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="hQjAWtgw6Qvp"
batch_size = 100 # Sequences per batch
num_steps = 150 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
epochs = 40
save_every_n = 200 # Save every N iterations
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="X6u2wRCjiuh0"
def build_inputs(batch_size, num_steps):
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name="inputs")
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name="targets")
keep_prob = tf.placeholder(tf.float32, name="keep_prob")
return inputs, targets, keep_prob
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
def build_cell(lstm_size, keep_prob):
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
cell = tf.contrib.rnn.MultiRNNCell([build_cell(lstm_size, keep_prob) for _ in range(num_layers)])
initial_state = cell.zero_state(batch_size, tf.float32)
return cell, initial_state
def build_output(lstm_output, in_size, out_size):
seq_output = tf.concat(lstm_output, axis=1)
x = tf.reshape(seq_output, [-1, in_size])
with tf.variable_scope("softmax"):
softmax_w = tf.Variable(tf.truncated_normal((in_size, out_size),stddev=0.1))
softmax_b = tf.Variable(tf.zeros(out_size))
logits = tf.matmul(x,softmax_w) + softmax_b
out = tf.nn.softmax(logits, name='predictions')
return out, logits
def build_loss(logits, targets, lstm_size, num_classes):
y_one_hot = tf.one_hot(targets, num_classes)
y_reshaped = tf.reshape(y_one_hot, logits.get_shape())
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped)
loss = tf.reduce_mean(loss)
return loss
def build_optimizer(loss, learning_rate, grad_clip):
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
class CharRNN:
def __init__(self, num_classes, batch_size, num_steps, lstm_size,
num_layers, learning_rate,
grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob = build_inputs(batch_size, num_steps)
# Build the LSTM cells
cell, self.initial_state = build_lstm(lstm_size, num_layers, batch_size, keep_prob)
## Run the data through RNN layers
# First one-hot encode the input tokens
x_one_hot = tf.one_hot(self.inputs, num_classes)
# Run each sequence step through the RNN and collect the outputs
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=self.initial_state)
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits = build_output(outputs, lstm_size, num_classes)
# Loss and Optimizer (with gradient clipping)
self.loss = build_loss(self.logits, self.targets, lstm_size, num_classes)
self.optimizer = build_optimizer(self.loss, learning_rate, grad_clip)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Ym5CJuc65aoI"
def train(encoded, vocab):
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# saver.restore(sess,"checkpoints/______.ckpt")
counter = 0
for e in range(epochs):
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs:x,
model.targets:y,
model.keep_prob:keep_prob,
model.initial_state:new_state
}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="oef7XguO5bdp"
def pick_top_n(preds, vocab_size, top_n=3):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, vocab, vocab_to_int, int_to_vocab, n_samples, lstm_size, vocab_size, seed="The "):
samples = [c for c in seed]
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in seed:
x = np.zeros((1, 1))
x[0, 0] = vocab_to_int[c]
feed = {model.inputs:x,
model.keep_prob:1.0,
model.initial_state:new_state
}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0, 0] = c
feed = {model.inputs:x,
model.keep_prob:1.0,
model.initial_state:new_state
}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return "".join(samples)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 27217} colab_type="code" executionInfo={"elapsed": 831104, "status": "ok", "timestamp": 1526164392930, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-lyRwyRCbSzY/AAAAAAAAAAI/AAAAAAAAUYQ/zHzRCLhMZRs/s50-c-k-no/photo.jpg", "userId": "116324510352588137280"}, "user_tz": -120} id="4JLkM6Vc6IuF" outputId="4cd1d46e-049b-4665-8c6c-94975c2e1869"
train(encoded, vocab)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="vkTK4B-lgmBJ"
files.download(tf.train.latest_checkpoint('checkpoints') + '.meta')
files.download(tf.train.latest_checkpoint('checkpoints') + '.index')
files.download(tf.train.latest_checkpoint('checkpoints') + '.data-00000-of-00001')
# + colab={"autoexec": {"startup": false, "wait_interval": 0}, "base_uri": "https://localhost:8080/", "height": 306} colab_type="code" executionInfo={"elapsed": 5083, "status": "ok", "timestamp": 1526165360701, "user": {"displayName": "<NAME>", "photoUrl": "//lh3.googleusercontent.com/-lyRwyRCbSzY/AAAAAAAAAAI/AAAAAAAAUYQ/zHzRCLhMZRs/s50-c-k-no/photo.jpg", "userId": "116324510352588137280"}, "user_tz": -120} id="l2735KEy5f4D" outputId="e9c4e37f-0aff-40d1-d7f3-937cdd3eefc2"
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, vocab, vocab_to_int, int_to_vocab, 1000, lstm_size, len(vocab), seed="King ")
print(samp)
| LSTM_text_generator/lstm_text_generator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # 2018.11.01: Network inference from time series of categorical variables
# +
import sys,os
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import inference
# +
# setting parameter:
np.random.seed(1)
n = 20 # number of positions
m = 5 # number of values at each position
l = int(4*((n*m)**2)) # number of samples
g = 4.
# -
w0 = inference.generate_interactions(n,m,g)
s = inference.generate_sequences(w0,n,m,l)
# ## Large sample sizes
w1 = inference.fit_additive(s,n,m)
plt.scatter(w0,w1)
plt.plot([-0.5,0.5],[-0.5,0.5],'r--')
mse = ((w0-w1)**2).mean()
print(mse)
# plt.scatter(w0,w1)
# plt.plot([-0.5,0.5],[-0.5,0.5],'r--')
w2 = inference.fit_multiplicative(s,n,m)
plt.scatter(w0,w2)
plt.plot([-0.5,0.5],[-0.5,0.5],'r--')
mse = ((w0-w2)**2).mean()
print(mse)
# ## Small sample sizes
l2 = int(0.5*(n*m)**2)
# ### Additive update
w1 = inference.fit_additive(s[:l2],n,m)
plt.scatter(w0,w1)
plt.plot([-0.5,0.5],[-0.5,0.5],'r--')
mse = ((w0-w1)**2).mean()
print('MSE by additive update:',mse)
# ### Multiplicative update
w = inference.fit_multiplicative(s[:l2],n,m)
plt.scatter(w0,w)
plt.plot([-0.5,0.5],[-0.5,0.5],'r--')
mse = ((w0-w)**2).mean()
print('MSE by multiplicative update:',mse)
| old_versions/2018.11.03_inference/1main_g4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Relmath DEMO
#
#
# +
from relmath import *
from relmath_bq import *
State.s.push_env()
with quote():
A = Rel([
[0.1, 0.6, 0.3, 0.0],
[0.0, 0.8, 0.2, 0.0],
[0.0, 0.0, 0.1, 0.9]
],
['Mario','Luigi','Bowser'],
['Movies','Music','Sport','Kidnapping'])
M = A * A.T
MAT = A * A.T.simp()
print(MAT)
Ms = M.simp()
print(Ms)
M.to_bq()
# -
| relmath-demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Add anna to the path
import os
import sys
module_path = os.path.abspath(os.path.join('../../../anna'))
if module_path not in sys.path:
sys.path.append(module_path)
DATA_DIR = "../../../data"
# +
import json
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tensorflow as tf
import anna.data.dataset.reuters21578 as reuters
import anna.model.premade as models
import anna.summary.extract as summary
import anna.summary.plots as plots
tf.logging.set_verbosity(tf.logging.ERROR)
# %matplotlib inline
# +
# Load data
train_docs, test_docs, unused_docs, labels = reuters.fetch_and_parse(DATA_DIR)
# Standard Reuters Config
val_size = 777
epochs = 40
shuffle = 10000
folder_name = "model-embeddings"
# -
for emb in ["glove", "fasttext", "none"]:
# Create default trainer
model = models.AVGxBR(DATA_DIR, labels,
name=emb,
folder_name=folder_name,
pretrained_embeddings="fasttext" if emb == "none" else emb,
voc_size=1 if emb == "none" else 100000,
oov_size=100000 if emb == "none" else 0)
# Train and evaluate
print("Model: {}, Embeddings: {}".format(model, emb))
model.train(train_docs, test_docs,
val_size=val_size, epochs=epochs, shuffle=shuffle)
# Delete to save memory
del model
# +
# Evaluate non-trainable embeddings
model = models.AVGxBR(DATA_DIR, labels,
name="non-trainable",
folder_name=folder_name,
trainable_embeddings=False,
voc_size=100000,
oov_size=0)
# Train and evaluate
print("Model: {}, Embeddings: non-trainable glove".format(model))
model.train(train_docs, test_docs,
val_size=val_size, epochs=epochs, shuffle=shuffle)
# Delete to save memory
del model
# -
model_path = os.path.join(DATA_DIR, folder_name)
metrics_path = os.path.join(model_path, "metrics.json")
# Load from tfrecord event files
metrics = summary.parse_all(model_path)
with open(metrics_path, "w") as f:
json.dump(metrics, f)
# +
# Load from json serialized metrics
#with open(metrics_path, "r") as f:
# metrics = json.load(f)
# -
num_epochs = min([len(m["val"]["perf/accuracy"]) for m in metrics.values()])
# +
remove_first = 10
x = range(num_epochs)[remove_first:]
pre, ax = plots.subplots(figsize=[8, 5], xlabel="epoch")
i = 0
for name, metric in sorted(metrics.items()):
if name == "non-trainable":
continue
y = metric["val"]["perf/accuracy"]
y = [m[1] for m in y]
y = plots.moving_average(y, window=6)
y = y[remove_first:num_epochs]
plots.plot(ax, x, y, color=i, label=name)
i += i * 1 + 2
ax.set_ylabel("ACC")
ax.legend(loc=4, fontsize=16)
# +
remove_first = 10
x = range(num_epochs)[remove_first:]
pre, ax = plots.subplots(figsize=[8, 5], xlabel="epoch")
i = 2
for name, metric in [("trainable", metrics["glove"]), ("non-trainable", metrics["non-trainable"])]:
y = metric["val"]["perf/accuracy"]
y = [m[1] for m in y]
y = plots.moving_average(y, window=6)
y = y[remove_first:num_epochs]
plots.plot(ax, x, y, color=i, label=name)
i -= 2
ax.set_ylabel("ACC")
ax.legend(loc=4, fontsize=16)
| notebook/text/experiments/embeddings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
url = 'https://github.com/eueung/pilrek/raw/master/pilrek.csv'
df = pd.read_csv(url)
df.tail()
import matplotlib.pyplot as plt
# %matplotlib inline
df.shape
df.dtypes
df.isna().sum()
#Menghitung nilai modus dari nama calon rektor
CaRekPilihan_mode = df['CaRek Pilihan'].mode()
print(CaRekPilihan_mode)
#Membuat dataframe baru yang berisi khusus ca-rektor favorite tersebut
temp = df.groupby('CaRek Pilihan')
fav = temp.get_group('Acep Purqon (FMIPA-FI)') #Parameter nama CaRek harus diganti apabila modus berubah
fav.tail()
#Membuat nilai per kategori untuk
fav['Kategori Anda'].value_counts()
# +
#PLOT RADAR CHART untuk power seorang CaRek
#Terinspirasi dari Winning Eleven
import matplotlib.pyplot as plt
df['CaRek Pilihan'].value_counts().plot(kind='bar') #Menentukan CaRek favorit
CaRek_voters = fav['Kategori Anda'].value_counts()
# +
from math import pi
# %matplotlib inline
#Create a data frame from "fav"
CaRekFav = fav['Kategori Anda'].value_counts()
data = pd.DataFrame([CaRekFav], index = [""])
data
# -
Attributes =list(data)
AttNo = len(Attributes)
# +
# Plots a radar chart.
# Set data
cat = Attributes
values = [102, 81, 57, 33, 7] #nilai harus diganti apabila nilai berganti
N = len(cat)
x_as = [n / float(N) * 2 * pi for n in range(N)]
# Because our chart will be circular we need to append a copy of the first
# value of each list at the end of each list with data
values += values[:1]
x_as += x_as[:1]
# Set color of axes
plt.rc('axes', linewidth=0.5, edgecolor="#888888")
# Create polar plot
ax = plt.subplot(111, polar=True)
# Set clockwise rotation. That is:
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
# Set position of y-labels
ax.set_rlabel_position(0)
# Set color and linestyle of grid
ax.xaxis.grid(True, color="#888888", linestyle='solid', linewidth=0.5)
ax.yaxis.grid(True, color="#888888", linestyle='solid', linewidth=0.5)
# Set number of radial axes and remove labels
plt.xticks(x_as[:-1], [])
# Set yticks
plt.yticks([20, 40, 60, 80, 100], ["20", "40", "60", "80", "100"])
# Plot data
ax.plot(x_as, values, linewidth=0, linestyle='solid', zorder=3)
# Fill area
ax.fill(x_as, values, 'b', alpha=0.3)
# Set axes limits
plt.ylim(0, 100)
# Draw ytick labels to make sure they fit properly
for i in range(N):
angle_rad = i / float(N) * 2 * pi
if angle_rad == 0:
ha, distance_ax = "center", 10
elif 0 < angle_rad < pi:
ha, distance_ax = "left", 1
elif angle_rad == pi:
ha, distance_ax = "center", 1
else:
ha, distance_ax = "right", 1
ax.text(angle_rad, 100 + distance_ax, cat[i], size=10, horizontalalignment=ha, verticalalignment="center")
# Show polar plot
plt.title("Kekuatan CaRek Favorit - E-mail")
plt.show()
# -
#PLOT TIMESTAMP
df['Timestamp'] = pd.to_datetime(df['Timestamp'])
df['TimestampH']=df.Timestamp.dt.hour
df['TimestampH'].tail()
hour=df.loc[df.Timestamp.dt.hour==9].sort_values('TimestampH').tail()
type(df['TimestampH'])
df['TimestampH'].value_counts(sort=False).plot()
plt.xlabel("Jam UTC+7")
plt.ylabel("Jumlah Pengisian")
plt.title("Timestamp Jam Pengisian Survey - Email")
| contrib/UTS 18117007 & 18117018/pilrek Farras Eldy Rashad (18117007) dan William Damario Lukito (18117018).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + code_folding=[]
""" This is a game based on the true story of a girl from FLorida who went missing and was found \
after almost two weeks.
Number of bugs = I tried to implement random, but I could not.
Date: November 8 2021"""
# Importing random
import random
# Defining the introduction to the game
def intro():
print(f"""
{victim_name.capitalize()} and {suspect_name.capitalize()} decided to go on a cross road trip for a period of 2 \
months starting from West Virginia. She would talk to her mother at least twice a week over the calls.Three weeks \
later Mrs.Bert receives a text. After reading it Mrs.Bert suspects that the message was not sent by her daughter, \
{victim_name.capitalize()}. Mr.Bert and she tries contacting their daughter but fail. They try to contact \
{suspect_name.capitalize()}'s\phone,but fail again.
They decide to file a missing complaint after a few hours. They get to know that {suspect_name.capitalize()} has \
returnedto his parents home without {victim_name.capitalize()}.They also get informed that {suspect_name.capitalize()} \
has hired Mr. Stone as his lawyer and the cops can't take any action against him as they do not have any valid proof \
against {suspect_name.capitalize()}. After a couple of days they get to know, from cops, that their daughter and her \
boyfriend was pulled over by cops in at a gas station near New Hampshire, as they were found engaged in a fight.\n
""")
print(""" At this stage Mr. and Mrs. Bert felt the only way to know about their daughter was
through a detective. Hence, they hired a detective to know what has happened with their daughter.""")
print(" You will be playing the role of the detective.")
# Please give a name to your detective based on your choice
detective_name = input(prompt = "Please enter your name:\n")
print(f""" "Thank you {detective_name.capitalize()} for taking up this case. We are counting on you to know \
about our daughter. We are not even sure if she is in grave danger and is in need of some help ."- Mrs.Bert""")
print()
print()
# A small description on the stages of the game.
print(f"There are three different scenes of investigation. You will start your investigation with one of the \
below mentioned scenes in random. Use your detective instincts to move forward from there. You will move to the \
next stage when you successfully complete the current stage. If you fail in any stage at a crucial condition you \
will get a second chance to play that stage with different option, if not you will start over from the beginning.\
You have to successfully complete three stages to reach to Ms.{victim_name.capitalize()}.")
print("""Scene #1: You can sneak into <NAME>'s' home and try finding some proofs against him.""")
print("Scene #2: You can drive to New Hampshire and meet the owner of the gas station and know more about \
the incident.")
print(f"Scene #3: You can meet the couple who got a ride from {victim_name.capitalize()} and \
{suspect_name.capitalize()} to know about the last place they were seen together.")
print("All the best!")
#first_stage = input(prompt = "You should make a choice that will help you clear the first stage successfully.\
# \nPlease input number or name of your choice:\
#\n1. Sneak into the room\
# \n2. Drive to gas station\
# \n3. Meet the couple\n").lower()
scene = random.randint(a=1,
b=3)
# In this stage you will decide your first place of investigation
if scene == 1:
print(f"You are now at the secret entrance of <NAME>'s house.")
stage_1a()
elif scene == 2:
print("You are now on your way to the gas station near New Hampshire.")
stage_1b()
elif scene == 3:
print(f"You are now on the way to meet with the couple who had met {victim_name.capitalize()}.")
stage_1c()
else:
print("Something went wrong")
# The different sub stages of stage one will take you through different possibilities for each of the choice you make.
def stage_1a():
first_stage_1 = input(prompt = "You should make a choice that will help you clear the first stage successfully.\
\nPlease input number or option name of your choice:\
\n1. Look into suspect's room. Option: suspect room\
\n2. Look into suspect's father's room. Option: father room\
\n3. Look into the store room. Option:store room\n").lower()
# In this stage you will decide on where to search for your first clue to succeed first stage.
if '1' in first_stage_1 or "suspect room" in first_stage_1:
print("You looked through the room, but did not find anything.")
stage_1a()
elif '2' in first_stage_1 or "father room" in first_stage_1:
print("Oh No! You got caught as Mr. John was already there in the room.")
intro()
elif '3' in first_stage_1 or "store room" in first_stage_1:
print(f"You successfully found {victim_name.capitalize()}'s phone. It gave you all the required details.")
print("You successfully completed the first round....")
second_stage()
else:
print("Something went wrong.")
def stage_1b():
first_stage_2 = input(prompt = "You should make a choice that will help you clear the first stage successfully.\
\nPlease input number or option name of your choice:\
\n1. Meet the gas station owner. Option: owner\
\n2. Look into the CCTV recordings. Option: cctv\
\n3. Ignore gas station. Option: ignore\n").lower()
# Get a chance to talk to the owner of the gas station.
if '1' in first_stage_2 or "owner" in first_stage_2:
print(f"You found that {victim_name.capitalize()} and {suspect_name.capitalize()} had engaged in a\
huge fight and other details.")
print("You successfully completed the first round....")
second_stage()
elif '2' in first_stage_2 or "cctv" in first_stage_2:
print(f"{victim_name.capitalize()} and {suspect_name.capitalize()} were pulled over by the cops after being\
complained of violent behavior in public space.")
stage_1b()
elif '3' in first_stage_2 or "ignore" in first_stage_2:
print(f"Sorry, you were not able to find anything.")
intro()
else:
print("Something went wrong.")
def stage_1c():
first_stage_3 = input(prompt = "You should make a choice that will help you clear the first stage successfully.\
\nPlease input number or option name of your choice:\
\n1. Couple denies to share any information. Option: denied\
\n2. Couple agrees to share the information. Option: agreed\
\n3. You missed the address of the couple. Option: missed\n").lower()
# In this stage you try to talk to couple and learn more aabout the victim
if '1' in first_stage_3 or "denied" in first_stage_3:
print("Sorry, the couple are scared of you!")
intro()
elif '2' in first_stage_3 or "agreed" in first_stage_3:
print(f"You found that {victim_name.capitalize()} and {suspect_name.capitalize()} gave them a drop to\
New Hampshire and the destination they were headed to.")
print("You successfully completed the first round...Congratulations!")
second_stage()
elif '3' in first_stage_3 or "missed" in first_stage_3:
print(f"Sorry, you were not able to find anything.")
intro()
else:
print("Something went wrong.")
# Defining the second stage of the game.
def second_stage():
print(f"Congratulations on successfully completing first stage.\ You are now in the second stage of the game.\
And one step closer to finding {victim_name.capitalize()}. From the previous stage you now know their last hiking \
place - White Mountains. All the best for your new adventures...!!!")
second_stage = input(prompt = "What will be your next choice?\
Please input number or option name of your choice:\
\n1. Look into every trail in White Mountains. Option: white mountains\
\n2. Meet the couple again. Option: meet again\
\n3. Try looking into the victim's phone. Option: search phone \n").lower()
# Make more careful decisions. One mistake and back to stage 0.
# Playing this stage will help you know which trail did they pick for their hike.
if '1' in second_stage or "white mountains" in second_stage:
print("Ohh you are very hard working! But White Mountains is too big for you search!")
stage_2a()
elif '2' in second_stage or "meet again" in second_stage:
print("Good choice! Might be they'll have more information.")
stage_2b()
elif '3' in second_stage or "search phone" in second_stage:
print("Good choice! Might be it'll have more information.")
stage_2c()
else:
print("Something went wrong.")
# The different sub stages of stage two will take you through different possibilities for each of the choice you make.
def stage_2a():
print("Sorry you were not able to find anything. You will have to play the game from the beginning.\
\nBetter luck next time :(")
intro()
def stage_2b():
second_stage_1 = input(prompt = "You only have one questions to ask the couple...\
Carefully decide what you want to ask them.\
\nPlease input number or option name of your choice:\
\n1. Did you click any pics with them? Option: picture\
\n2. Which trail did you all hike together? Option: trail\
\n3. Why did you'll take lift? Option: lift\n").lower()
# In this stage you will try finding the second clue which help you succeed the second stage
if '1' in second_stage_1 or "picture" in second_stage_1:
print("Good move! But you will have to ask another question to look at the photos.")
second_stage()
elif '2' in second_stage_1 or "trail" in second_stage_1:
print(f"Clever! You got to know the name of the trail {victim_name.capitalize()} last climbed.")
print("Success...You found the name of the trail. Completed the second round!!!")
third_stage()
elif '3' in second_stage_1 or "lift" in second_stage_1:
print("Sorry, this question did not help you in any way.")
intro()
else:
print("Something went wrong")
def stage_2c():
second_stage_2 = input(prompt = "The phone is in a very condition and has very little battery. You have \
no time to get it repaired. In this condition what will be your immediate search? \
\nPlease input number or option name of your choice:\
\n1. Last navigation search. Option: navigation\
\n2. Last call log search. Option : call log\
\n3. Look into the gallery. Option: gallery\n").lower()
# In this stage you will use your best knowledge in phone search and detective instincts
if '1' in second_stage_2 or "navigation" in second_stage_2:
print(f"Clever! you get to know the name of the trail {victim_name.capitalize()} had searched for.")
print("Success...You found the name of the trail. Completed the second round!!!")
third_stage()
elif '2' in second_stage_2 or "call log" in second_stage_2:
print(f"Good move! But you did not find anything very important.")
second_stage()
elif '3' in second_stage_2 or "gallery" in second_stage_2:
print("Sorry, the gallery was password protected and did not help you in any way.")
second_stage()
else:
print("Something went wrong")
# In this stage you will be able to find your victim.
def third_stage():
print(f"Congratulations on successfully completing Stage 2. You have good detective instincts.\
This is your last stage. Now you the name of the trail that was taken by\
{victim_name.capitalize()} and {suspect_name.capitalize()}. You will be able to find {victim_name.capitalize()} \
in this stage.")
print("All the best for the last stage of the game...!")
third_stage = input(prompt = "What will you choose to do now?\
\nPlease input number or name of your choice:\
\n1. Get the help of police Option: police\
\n2. Get help from search team. Option: team\
\n3. Go looking by yourself. Option: yourself \n").lower()
# In this stage you will solve the mystery of missing girl
if '1' in third_stage or "police" in third_stage:
print("This might not be best move! Many formalities to complete before moving forward.")
elif '2' in third_stage or "team" in third_stage:
print(f"Good Move! You were able to find {victim_name.capitalize()}.")
win()
elif '3' in third_stage or "yourself" in third_stage:
print(f"You took a while but you were able to find {victim_name.capitalize()}!")
win()
else:
print("Something went wrong!")
# defining win function
def win():
print(f" Great Job! You found {victim_name.capitalize()}. But Alas! She is no more.\
You are left disappointed to find her dead. Now it is your turn to find the culprit\
Be ready to find if {suspect_name.capitalize()} has anything to do with this and solve\
the mystery behind {victim_name.capitalize()}'s death. See you in the Murder Mystery Part 2...")
# defining fail function
def fail():
print(" Oh No! Something went wrong. You were very close but you will have to start over again.")
last_input = input(prompt = "Do you wish to play the game again?\
\nPlease input number or name of your choice:\
\n1. No\
\n2. Yes\n").lower()
last_input = last_input.capitalize()
if '1' in last_input or 'No' in last_input:
print("Thank you for playing this game.")
elif '2' in last_input or 'Yes' in last_input:
print("Hurray!!! Lets begin the Game...")
intro()
else:
print("Something went wrong")
print()
print()
print(" **********************************************")
print(" * *")
print(" * Murder Mystery *")
print(" * *")
print(" **********************************************")
print()
print("""
A message beeps from the phone. "West-Wood doesn't have a network." \n
The person reading this message appears to be dubious and terrified.
""")
print()
print("Its time to meet the characters of the game!\n")
victim_name = input(prompt = "What would you like to name your victim?\n").capitalize() # give a name to the victim
suspect_name = input(prompt = "What would you name the suspect of this game?\n").lower() # give a name to the victim
start_game = input(prompt ="Are you excited to play the game?\
\n Please input number or name of your choice:\
\n1. No\
\n2. Yes\n").lower()
start_game = start_game.capitalize()
if '1' in start_game or 'No' in start_game:
print("No Problem, we can start when you feel like playing.")
elif '2' in start_game or 'Yes' in start_game:
print("Hurray!!! Lets begin the Game...")
intro()
else:
print("Something went wrong")
# -
| Ramu_Sneha_Text_Adventure_Game.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %run notebook_setup.py
sys.path.append('../..')
# -
from standardiser import rules
rules.logger.setLevel('DEBUG')
# # Rule application strategy
#
# This document discusses aspects of the rule application strategy, as this is not always straightforward.
#
# The document was originally created to illustrate a problem with an earlier version of the code. There, the transform for each rule was applied repeatedly, with the *first* product of each reaction being taken an input to the next, until the reaction no longer produced a product. This was to handle cases where a moiety requiring rule-based standardisation occurrred multiple times in a molecule, and it worked for most such cases.
#
# However, that approach failed for molecules such as this one (which is a simplified version of real examples)...
mol = Chem.MolFromSmiles("Oc1nc(CCc2c(O)ncnc2O)nc(O)c1")
mol
# +
# Reaction defining rule (for rule '2-hydroxy pyridine -> 2-pyridone')...
rxn = [x['rxn'] for x in rules.rule_set if x['name'] == '2-hydroxy pyridine -> 2-pyridone'][0]
# +
# Run transform...
# NB As the transorm is a one-component reaction, only the first component from each product tuple will exist
products = [x[0] for x in rxn.RunReactants((mol,))]
# Sanitize product mols...
for x in products: Chem.SanitizeMol(x)
# Keep unique products only...
products = list({Chem.MolToSmiles(x): x for x in products}.values())
# Depict...
Draw.MolsToGridImage(products)
# Note that, in each product, only one example of the target moiety had been fixed...
# +
# Re-run reaction on first product...
prod = rxn.RunReactants((products[0],))[0][0]
prod
# +
# An attempt to sanitize this 'molecule' fails...
try:
Chem.SanitizeMol(prod)
except ValueError as err:
print(err.args[0])
# +
# Re-run reaction on second product...
prod = rxn.RunReactants((products[1],))[0][0]
prod
# Note that both applications of rhe reaction have been to the first ring...
# +
# An attempt to sanitize this 'molecule' also fails...
try:
Chem.SanitizeMol(prod)
except ValueError as err:
print(err.args[0])
# -
# As is hopefully clear from the above, the problem is that, as the transform is applied to the same ring as the first on the second attempt, a non-physical 'intermediate' is obtained. This may be depicted but not sanitized, which means it may not used in any further transformations.
#
# Thus, the desired product, where the transform has been applied once to each ring, cannot be obtained by simple serial application of the transform to the first product. A slightly different strategy must thus be applied to ensure cases such as this one are handled.
#
# Now, all products are used, and the transform applied to each until the final molecule is converged upon (or we run out of iterations in a few pathological cases)...
mol = Chem.MolFromSmiles("Oc1nc(CCc2c(O)ncnc2O)nc(O)c1")
mol
# +
fixed = None
mols = [mol]
changed = False
for n_pass in range(1, 10):
logging.debug("apply_rule> starting pass {}...".format(n_pass))
products = {}
for mol in mols:
for product in [x[0] for x in rxn.RunReactants((mol,))]:
try:
Chem.SanitizeMol(product)
smiles = Chem.MolToSmiles(product, isomericSmiles=True)
except ValueError as error:
continue # We are assuming this simply means an unphysical molecule has been generated
if smiles in products: continue # Keep only new structures
products[smiles] = product
if products:
changed = True
logging.debug("apply_rule> there are {} products: will continue".format(len(products.values())))
mols = list(products.values()) # Update list of mols
else: # Finished...
logging.debug("apply_rule> there were no products: will return")
fixed = mols[0] if changed else None # If there a multiple possible 'fixed' molecules, just take first
break
fixed
# -
# Note also that it would be possible to deal with this issue by rewriting the SMARTS transforms to be more specific (_i.e._ by excluding cases such as the present one). However, tt was felt that this would likely end up being more complicated than the chosen method, as mutiple complicated SMARTS could become necessary to represent fairly simple transforms.
| standardiser2/docs/Rule_application_strategy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Initial Load of Survey Data Items
import pandas as pd
import shelve
import random
from surveyor import *
def open_config():
config_file = 'config'
s = shelve.open(config_file, writeback=True)
return(s)
def show_available_categories(s):
print(f"{'-'*10}Available Categories{'-'*10}")
for category in s['available_categories']:
print(category)
def add_category():
s = open_config()
print(f"\nFunction: Add Category")
show_available_categories()
new_category = input("Type new category:")
# You must set writeback to True when opening shelve to allow for writing of mutable list below
s['available_categories'].append(new_category)
print(f"\nCategory Added!\n")
show_available_categories()
def delete_category():
s = open_config()
print(f"\nFunction: Delete Category")
answer = makeChoice(list(s['available_categories']))
s['available_categories'].remove(s['available_categories'][answer])
def show_question_list(s):
print(f"List of Questions: \n{s['question_list']}")
def add_question(s):
df_questions = s['question_list']
answer_category = makeChoice(list(s['available_categories']))
answer_question = input("Type new question:")
df_questions = df_questions.append([{'Index':random.randrange(1, 10**5), 'Category':s['available_categories'][answer_category], 'Question':answer_question}], ignore_index=True)
s['question_list'] = df_questions
def reset_data():
answer = input("Do you want to reset all survey meta-data? (Y/N)")
if answer == "Y":
print('Resetting....')
df_questions = pd.DataFrame(columns=['Index', 'Category', 'Question'])
df_questions = df_questions.append([{'Index': random.randrange(1, 10**5), 'Category':'Category 1', 'Question':'Do you like math?'}], ignore_index=True)
df_questions = df_questions.append([{'Index': random.randrange(1, 10**5), 'Category':'Category 1', 'Question':'Do you like english?'}], ignore_index=True)
df_questions = df_questions.append([{'Index': random.randrange(1, 10**5), 'Category':'Category 2', 'Question':'Do you like your teacher?'}], ignore_index=True)
df_questions = df_questions.append([{'Index': random.randrange(1, 10**5), 'Category':'Category 2', 'Question':'Do you like homework?'}], ignore_index=True)
df_questions = df_questions.append([{'Index': random.randrange(1, 10**5), 'Category':'Category 3', 'Question':'Do you like tests?'}], ignore_index=True)
open_config()
s['teacher_list'] = ['Mrs. Oneil', '<NAME>']
s['subject_list'] = ['English', 'Math', 'Science']
s['student_list'] = ['<NAME>', '<NAME>', '<NAME>']
s['question_list'] = df_questions
s['available_categories'] = ['Category 1', 'Category 2', 'Category 3']
s.close()
else:
print('Resetting Cancelled')
# +
s = open_config()
show_question_list(s)
# -
delete_category()
add_category()
add_question(s)
show_question_list(s)
reset_data()
from surveyor import *
import shelve
mysurvey = Survey()
mysurvey.configure_survey()
mysurvey.take_survey()
display(mysurvey.survey_answers)
| Survey_Admin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Использование yaml
# Импортируем необходимые библиотеки
import yaml
# Опишем конфигурацию, описывающую создание персонажа
hero_yaml = '''
--- !Character
factory:
!factory assassin
name:
7NaGiBaToR7
'''
# Используем абстрактную фабрику, использование которой будем конфигурировать
class HeroFactory:
@classmethod
def create_hero(Class, name):
return Class.Hero(name)
@classmethod
def create_weapon(Class):
return Class.Weapon()
@classmethod
def create_spell(Class):
return Class.Spell()
# +
class WarriorFactory(HeroFactory):
class Hero:
def __init__(self, name):
self.name = name
self.weapon = None
self.armor = None
self.spell = None
def add_weapon(self, weapon):
self.weapon = weapon
def add_spell(self, spell):
self.spell = spell
def hit(self):
print(f"Warrior {self.name} hits with {self.weapon.hit()}")
self.weapon.hit()
def cast(self):
print(f"Warrior {self.name} casts {self.spell.cast()}")
self.spell.cast()
class Weapon:
def hit(self):
return "Claymore"
class Spell:
def cast(self):
return "Power"
class MageFactory(HeroFactory):
class Hero:
def __init__(self, name):
self.name = name
self.weapon = None
self.armor = None
self.spell = None
def add_weapon(self, weapon):
self.weapon = weapon
def add_spell(self, spell):
self.spell = spell
def hit(self):
print(f"Mage {self.name} hits with {self.weapon.hit()}")
self.weapon.hit()
def cast(self):
print(f"Mage {self.name} casts {self.spell.cast()}")
self.spell.cast()
class Weapon:
def hit(self):
return "Staff"
class Spell:
def cast(self):
return "Fireball"
class AssassinFactory(HeroFactory):
class Hero:
def __init__(self, name):
self.name = name
self.weapon = None
self.armor = None
self.spell = None
def add_weapon(self, weapon):
self.weapon = weapon
def add_spell(self, spell):
self.spell = spell
def hit(self):
print(f"Assassin {self.name} hits with {self.weapon.hit()}")
self.weapon.hit()
def cast(self):
print(f"Assassin {self.name} casts {self.spell.cast()}")
class Weapon:
def hit(self):
return "Dagger"
class Spell:
def cast(self):
return "Invisibility"
# -
# Опишем конструктор, который сможет обрабатывать узел `!factory`. Он будет возвращать соответствующую фабрику.
#
# Конструктор должен принимать 2 аргумента: `loader` и `node`. Объект `loader` — это загрузчик YAML, `node` — узел файла.
# Поскольку структура YAML-файла древовидная, то при первичном проходе обработчиком всё содержимое файла помещаеться в древовидную структуру, содержащую информацию файла в текстовом виде. `node` является узлом именно такого текстового дерева, а `loader` — загрузчик умеющий обрабатывать `node`. По итогу, ниже следующий конструктор `factory_constructor` будет являться частью `loader` и будет вызываться им по необходимости.
#
# Для описанного выше YAML-файла: `loader` — загрузчик, «знакомый» с данным конструктором; `node` — хранит текст `assassin` (информация, хранящаяся за именем пользовательского типом `!factory`) и различную дополнительную информацию.
# Поскольку `assassin` — простой скаляр, то для его получения (без дополнительной информации) необходимо воспользоваться методом `construct_scalar`. Если бы после `!factory` располагался список, то необходимо было бы воспользоваться методом `construct_sequenc` и т.д.
def factory_constructor(loader, node):
data = loader.construct_scalar(node)
if data == "mage":
return MageFactory
elif data == "warrior":
return WarriorFactory
else:
return AssassinFactory
# Опишем класс `Character`, в который будут загружаться данные из yaml. Определим у него метод `create_hero`, позволяющий создать персонажа в соответствии с конфигурацией.
class Character(yaml.YAMLObject):
yaml_tag = "!Character"
def create_hero(self):
hero = self.factory.create_hero(self.name)
weapon = self.factory.create_weapon()
spell = self.factory.create_spell()
hero.add_weapon(weapon)
hero.add_spell(spell)
return hero
# Присоединим конструктор и создадим персонажа в соответствии с yaml-конфигурацией.
loader = yaml.Loader
loader.add_constructor("!factory", factory_constructor)
hero = yaml.load(hero_yaml).create_hero()
hero.hit()
hero.cast()
| oop_patterns_python/week_4/pyyaml/Simple_yaml.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np # linear algebra
import pandas as pd # data processing
import matplotlib.pyplot as plt
import seaborn as sns
import shap
sns.set(style = 'ticks',color_codes = True)
## Reading the data set from the local folder of the machine.
train_df = pd.read_csv('train.csv')
test_df = pd.read_csv('test.csv')
features_df = pd.read_csv('features.csv')
stores_df = pd.read_csv('stores.csv')
## Checking the first 5 columns of the train data frame.
train_df.head()
## Checking the data types of the columns in the train data frames.
train_df.dtypes
## Checking the data types of the test data frame.
test_df.dtypes
## Merging the stores data frame and the feature data frame(left_join).
train_dataset = train_df.merge(stores_df, how='left').merge(features_df, how='left')
test_dataset = test_df.merge(stores_df, how='left')
# Viewing the first 5 rows of the train data set.
train_dataset.head()
## Describing the statistics of the data sets.
train_dataset.describe(include= 'all')
## Find out the Percentage of the NULL values for each individual feature.
(train_dataset.isna().sum()/len(train_dataset['Store']))*100
## Plotting the correlation matrix of the features in the data sets.
corr = train_dataset.corr()
f,ax = plt.subplots(figsize = (15,15))
cmap = sns.diverging_palette(220,20,as_cmap=True)
sns.heatmap(corr,cmap=cmap,vmax=0.3,center = 0,annot = True,square = True
,linewidths=0.5,cbar_kws={'shrink':0.5})
plt.show()
shap.initjs()
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
train_dataset[['MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5']] = train_dataset[['MarkDown1','MarkDown2','MarkDown3','MarkDown4','MarkDown5']].fillna(0.0)
train_dataset['Year'] = pd.to_datetime(train_dataset['Date']).dt.year
train_dataset['Month'] = pd.to_datetime(train_dataset['Date']).dt.month
train_dataset['Week'] = pd.to_datetime(train_dataset['Date']).dt.isocalendar().week
# Dropping the Columns
train_dataset = train_dataset.drop(columns=['Date'])
# Moving the target variable to the end.
df = train_dataset.pop('Weekly_Sales')
train_dataset['Weekly_Sales'] = df
train_dataset.head()
# ## Feature Selection/Engineering
## Groupby "Store","Dept","Week","IsHoliday" features of the training data set.
train_dataset_gp = train_dataset.groupby(['Store','Dept','Week','IsHoliday']).median().fillna(0.0)
train_dataset.head()
train_dataset_gp.head()
#Select everything from the list , not but the last one.
features_inp , target_feture = train_dataset_gp.columns[:-1],train_dataset_gp.columns[-1]
## Splitting into the feature and target data set.
features_data , target_data = train_dataset_gp[features_inp].copy(), train_dataset_gp[target_feture].copy()
numerical_column = train_dataset_gp[features_inp].select_dtypes(include = np.number).columns.to_list()
categorical_column = train_dataset_gp[features_inp].select_dtypes(include = 'object').columns.tolist()
## Developing the pipeline of imputation and min_max_scaler()
from sklearn.pipeline import make_pipeline
pipe = make_pipeline(SimpleImputer(),MinMaxScaler())
impu_scaler = pipe.fit(features_data[numerical_column])
features_data[numerical_column] = impu_scaler.transform(features_data[numerical_column])
def label_enco(df):
le = LabelEncoder()
for col in df.columns :
if df[col].dtype == "object" or df[col].dtype == "bool" :
df[col] = le.fit_transform(df[col])
#Performing the label encoding of the dataset if the feature is of type object or bool.
label_enco(features_data)
x_train,x_test,y_train,y_test = train_test_split(features_data[numerical_column + categorical_column]
,target_data,test_size= 0.20,random_state=42)
x_train.head()
# ## Model Developement
from sklearn.tree import DecisionTreeRegressor
tree = DecisionTreeRegressor(random_state=0)
# %%time
tree.fit(x_train,y_train)
from sklearn.metrics import mean_squared_error
predictions = tree.predict(x_test)
tree_rmse = mean_squared_error(y_test,predictions,squared= False)
print('Validation RMSE:{}'.format(tree_rmse))
import matplotlib.pyplot as plt
from sklearn.tree import plot_tree, export_text
import seaborn as sns
sns.set_style('darkgrid')
# %matplotlib inline
plt.figure(figsize=(30,15))
plot_tree(tree, feature_names=x_train.columns, max_depth=3, filled=True);
tree_importance = tree.feature_importances_
tree_importance_df = pd.DataFrame({'features' : x_train.columns,
'importance': tree_importance}).sort_values('importance',ascending = False)
tree_importance_df
plt.title('Decision Tree Feature Importance')
sns.barplot(data = tree_importance_df.head(10),x='importance',y='features')
from sklearn.ensemble import RandomForestRegressor
rf_model = RandomForestRegressor(random_state= 0 , n_estimators = 10)
# %%time
rf_model.fit(x_train,y_train)
rf1_train_pred = rf_model.predict(x_test)
rf1_validation_rmse = mean_squared_error(rf1_train_pred,y_test, squared=False)
print('Validation RMSE:{}'.format(rf1_validation_rmse))
# +
#################################************************************************###############################################
| Walmart_Sales_Forecasting_SII.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv('all_seasons_working.csv')
# DATA CLEANING AND DEALING WITH OUTLIERS
# changing 'draft number' column from object type to int type
df.draft_number = pd.to_numeric(df.draft_number, errors='coerce').fillna(0).astype(np.int64)
# # group by name
mean_val_players = df.groupby('player_name').mean()
# dropping players with draft number not in range(1, 61) (we want to base on new form of draft where
# max draft number is 60)
drafted_players_grouped = mean_val_players[(mean_val_players['draft_number'] > 0) & (mean_val_players['draft_number'] < 61)]
# choosing only players that have played decent amount of games during their careers (avg half a season
# games played every year) and also choosing players that avg more than 4pts per game in a season
# - this is due to lack of a column 'minutes played'. I want to avoid a situation where a player with
# a lot of games played, was only 0-10 min on a court every game and his stats are lowered due to that.
drafted_players_grouped = drafted_players_grouped[(drafted_players_grouped['gp'] > 41) & (drafted_players_grouped['pts'] > 4)]
drafted_players_grouped
# +
fig,axes = plt.subplots(nrows=3,ncols=3,figsize=(40,40))
axes[0, 0].plot(drafted_players_grouped['pts'],drafted_players_grouped['draft_number'],'o')
axes[0, 0].set_title("pts", fontsize=35)
axes[0, 0].set_ylabel("draft_number", fontsize=35)
axes[0,1].plot(drafted_players_grouped['reb'],drafted_players_grouped['draft_number'],'o')
axes[0,1].set_title("reb", fontsize=35)
axes[0,1].set_ylabel('draft_number', fontsize=35)
axes[0,2].plot(drafted_players_grouped['ast'],drafted_players_grouped['draft_number'],'o')
axes[0,2].set_title("ast", fontsize=35);
axes[0,2].set_ylabel('draft_number', fontsize=35)
axes[1,0].plot(drafted_players_grouped['net_rating'],drafted_players_grouped['draft_number'],'o')
axes[1,0].set_title("net_rating", fontsize=35);
axes[1,0].set_ylabel('draft_number', fontsize=35)
axes[1,1].plot(drafted_players_grouped['oreb_pct'],drafted_players_grouped['draft_number'],'o')
axes[1,1].set_title("oreb_pct", fontsize=35);
axes[1,1].set_ylabel('draft_number', fontsize=35)
axes[1,2].plot(drafted_players_grouped['dreb_pct'],drafted_players_grouped['draft_number'],'o')
axes[1,2].set_title("dreb_pct", fontsize=35);
axes[1,2].set_ylabel('draft_number', fontsize=35)
axes[2,0].plot(drafted_players_grouped['usg_pct'],drafted_players_grouped['draft_number'],'o')
axes[2,0].set_title("usg_pct", fontsize=35);
axes[2,0].set_ylabel('draft_number', fontsize=35)
axes[2,1].plot(drafted_players_grouped['ts_pct'],drafted_players_grouped['draft_number'],'o')
axes[2,1].set_title("ts_pct", fontsize=35);
axes[2,1].set_ylabel('draft_number', fontsize=35)
axes[2,2].plot(drafted_players_grouped['ast_pct'],drafted_players_grouped['draft_number'],'o')
axes[2,2].set_title("ast_pct", fontsize=35);
axes[2,2].set_ylabel('draft_number', fontsize=35)
plt.tight_layout();
# -
# LINEAR REGRESSION IMPLEMENTATION
# +
# separate into features and labels so we will create 2 matrixes
# features - remaining columns that are not your label column
X = drafted_players_grouped.drop(columns=['draft_number', 'age', 'gp', 'Unnamed: 0', 'player_height',
'player_weight'], axis=1)
print(X)
# label column
y = drafted_players_grouped['draft_number']
print(y)
from sklearn.model_selection import train_test_split
# separates the features (x) and later labels (y) - do not change the order of unpacking
# test size - what percentage of data should go to the test set (around 30 percent)
# random_state - controls the shuffling of the data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
# -
X_test
# +
# help(train_test_split)
# -
# creating an estimator within Sklearn
from sklearn.linear_model import LinearRegression
# +
# read, look up to hyperparameters ( parameters to adjust for better performance)
# help(LinearRegression)
# -
# first time use default and check default performance, after that change parameters/hyperparameters
# it creates an instance of linear regression model
# (model will 'learn' the beta coefficients for the best fitting line.
# we did not tell the computer what those Beta coeffcient values were, it learned those through gradient descent)
model = LinearRegression()
# now this 'model' instance is waiting for data to be trained on
# we do it by calling fit method on model
model.fit(X_train,y_train)
# +
# now we want to predict on test features and compare with y_test
test_predictions = model.predict(X_test)
# compare test predictions with y_test values (true values for X_test features)
from sklearn.metrics import mean_absolute_error, mean_squared_error
# checking the mean value of usg_pct column
mean_val = drafted_players_grouped['draft_number'].mean()
print(mean_val)
# checking the mean absolute error
mean_abs_err = mean_absolute_error(y_test, test_predictions)
print(mean_abs_err)
# in comparison to avg value, mean absolute error is
# +
# checking the mean squared error
mean_sqr_err = mean_squared_error(y_test, test_predictions)
print(mean_sqr_err)
# to compare with mean_val we need to use sqrt func on mean_sqr_err
# checking the root mean squared error
root_mean_sqrt_err = np.sqrt(mean_sqr_err)
print(root_mean_sqrt_err)
# -
test_residuals = y_test - test_predictions
test_residuals
fig = plt.gcf()
fig.set_size_inches(30, 20)
# checking if using linear regression is fine, the data below should be distributed randomly
# if its not we should consider not using linear regression
sns.scatterplot(x=y_test, y=test_residuals)
plt.axhline(y=0, color='red', ls='--')
# check if distribution is normal
sns.displot(test_residuals, bins=25, kde=True)
# +
import scipy as sp
# create a figure and axis to plot on
fig, ax = plt.subplots(figsize=(6,8), dpi=100)
# probplot returns the raw values if needed
# want to see the plot, so we assign these values to
_ = sp.stats.probplot(test_residuals, plot=ax)
# The red line is 'normal distribution'
# +
# DEPLOYMENT OF THE MODEL
# this should be the same model that was adjusted on test set of data
final_model = model
# fit final model on a full data
final_model.fit(X,y)
# +
# checking the coefficients
# every coefficient is for another column in data
# (if coef ~ 0, there is no correlation)
final_model.coef_
# if I increase reb by one unit, i can expect decreasing draft_number by 0.8
# -
X.columns
# +
# showing on plot true values and values predicted by a model
y_hat = final_model.predict(X)
fig,axes = plt.subplots(nrows=1,ncols=3,figsize=(16,6))
axes[0].plot(drafted_players_grouped['pts'],drafted_players_grouped['draft_number'],'o')
axes[0].plot(drafted_players_grouped['pts'],y_hat,'o',color='red')
axes[0].set_ylabel('draft_number')
axes[0].set_title("pts")
axes[1].plot(drafted_players_grouped['ts_pct'],drafted_players_grouped['draft_number'],'o')
axes[1].plot(drafted_players_grouped['ts_pct'],y_hat,'o',color='red')
axes[1].set_title("ts_pct")
axes[1].set_ylabel('draft_number')
axes[2].plot(drafted_players_grouped['ast'],drafted_players_grouped['draft_number'],'o')
axes[2].plot(drafted_players_grouped['ast'],y_hat,'o',color='red')
axes[2].set_title("ast");
axes[2].set_ylabel('draft_number')
plt.tight_layout();
# red color are values predicted by a model
# we can see that the model is not precise. We should try different approach to get the answer for
# our main question and compare results.
# -
# DEPLOYMENT OF A MODEL
# DEPLOING A MODEL
from joblib import dump, load
# now we are saving a model
dump(final_model, 'final_draft_prediction.joblib')
# +
# loading ready model
loaded_model = load('final_draft_prediction.joblib')
# model coeficients:
loaded_model.coef_
# -
# Lets try to predict draft number for a player with stats like these:
# something around Lebron James career stats:
some_guy = [[27, 9, 7, 2.5, 0.08, 0.05, 0.3, 0.55, 0.3]]
loaded_model.predict(some_guy)
# we can see that prediction is not accurate, but we should have in mind Root mean squared error ~13.5.
# This is pretty big error for such a data set.
# POLYNOMIAL REGRESSION
# +
# Now we will try to use Polynomial Regression for this data
X_poly = drafted_players_grouped.drop(columns=['draft_number', 'age', 'gp', 'Unnamed: 0', 'player_height',
'player_weight'], axis=1)
print(X_poly)
# label column
y_poly = drafted_players_grouped['draft_number']
print(y_poly)
from sklearn.preprocessing import PolynomialFeatures
# degree - it will create x^2 plot and include_bias - it will create columns of 1
# (the degree was changed to 1 due to smaller RMSE error - it will be shown below)
polynomial_converter = PolynomialFeatures(degree=1, include_bias=False)
# -
# we dont need to split (training, test data) it just yet, because it's not machine learning model,
# it's feature converter
polynomial_converter.fit(X_poly)
poly_features = polynomial_converter.transform(X_poly)
poly_features.shape
poly_features[0]
# +
from sklearn.model_selection import train_test_split
# changing X to poly_features (features with more columns)
poly_X_train, poly_X_test, poly_y_train, poly_y_test = train_test_split(poly_features, y_poly, test_size=0.3, random_state=101)
# -
# +
from sklearn.linear_model import LinearRegression
poly_model = LinearRegression()
poly_model.fit(poly_X_train, poly_y_train)
poly_test_predictions = poly_model.predict(poly_X_test)
print(poly_model.coef_)
# +
from sklearn.metrics import mean_squared_error, mean_absolute_error
MAE = mean_absolute_error(poly_y_test, poly_test_predictions)
print(MAE)
MSE = mean_squared_error(poly_y_test, poly_test_predictions)
print(MSE)
RMSE = np.sqrt(MSE)
print(RMSE)
# -
# PLOTTING THE RESULTS FOR DIFFERENT FUNCTION DEGREE
# +
# creating a loop for polynomial regression to check how our reggresion model behaves on
# different function degree
train_RMSE_error = []
test_RMSE_error = []
for d in range(1,10):
poly_converter1 = PolynomialFeatures(degree = d, include_bias=False)
poly_features1 = poly_converter1.fit_transform(X_poly)
poly2_X_train, poly2_X_test, poly2_y_train, poly2_y_test = train_test_split(poly_features1, y_poly, test_size=0.3, random_state=101)
poly2_model = LinearRegression()
poly2_model.fit(poly2_X_train, poly2_y_train)
poly2_train_pred = poly2_model.predict(poly2_X_train)
poly2_test_pred = poly2_model.predict(poly2_X_test)
poly2_train_rmse = np.sqrt(mean_squared_error(poly2_y_train, poly2_train_pred))
poly2_test_rmse = np.sqrt(mean_squared_error(poly2_y_test, poly2_test_pred))
train_RMSE_error.append(poly2_train_rmse)
test_RMSE_error.append(poly2_test_rmse)
# -
# it behaves naturally - when we increases degree of a function, the rmse gettin lower
# (for higher degrees we got some anomaly due to huge overfitting)
train_RMSE_error
test_RMSE_error
# +
plt.plot(range(1, 4), train_RMSE_error[:3], label='Train RMSE')
plt.plot(range(1, 4), test_RMSE_error[:3], label='Test RMSE')
plt.xlabel('Degree_of_polynomial_regression')
plt.ylabel('RMSE')
plt.legend()
# based on both train_RMSE_error and test_RMSE_error - the model is overfitted and does not give
# results that we expected. RMSE for trainig data is decreasing but RMSE for test data does not follow
# this trend.
# We can also say that the best performance is reached when the degree of the polynomial model is 1 or 2
# The next step would be to regularize data and check how our model would behave/change.
# -
| nba_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Portable Hadoop Environment - Exercise 4
#
# Welcome, if you're reading this then you've successfully opened the Jupyter Notebook for Exercise 4. We will now cover several tasks to help you analyze Hive data using Python from within this notebook.
#
# The first step is importing the necessary libraries.
#
# In the following two cells you can see that we've installed the matplotlib and pyhive libraries by using !pip. This executes pip in on the client node and installs modules automatically. Then we've imported those modules.
# For any missing modules, you can use pip from within a cell as seen here:
# !pip install matplotlib
# !pip install pyhive
# +
# Pandas is a python data handling library
# Matplot lib is used for plotting charts and graphs
# Pyhive is the library used for accessing Hive
import pandas as pd
import matplotlib.pyplot as plt
from pyhive import hive
# -
# Now that the modules have been imported successfully we can get to work on the data!
#
# We're going to reuse the data loaded in Exercise 2. The next cell shows how to connect to hive and execute a query which populates a Pandas dataframe for further analysis.
# +
# In the Portable Hadoop Environment, you only need the host and port specified for the connection
conn = hive.Connection(host='hive-server',port='10000')
# Panda's read_sql method makes querying hive incredibly simple)
df = pd.read_sql(con=conn, sql='SELECT * FROM truckingco.trucks t JOIN truckingco.drivers d on t.location = d.location')
# Now we can take a look at the first 5 records
df.head(5)
# -
# This is great. Now that we've created the dataframe we can start aggregating and plotting graphs.
#
# Lets start by doing just a little bit of data cleanup. We'll do this by creating a new dataframe but only selecting a subset of columns from the previous one.
data = df[["t.location","d.firstname","d.lastname","d.startdate","d.rate"]]
data.head(5)
# Okay now with that data, let's start by doing a quick analysis on the "d.rate" column, or the payrate for the drivers.
data.agg(
{
"d.rate": ["min", "max", "median", "mean"]
})
# That's some good information to report on. Now lets do some work with dataframes to get the counts for each driver's routes driven.
# +
# Concatenating to create a column that contains the drivers full name
data["d.fullname"] = data["d.firstname"] + " " + data["d.lastname"]
# Setting the index of the df to the newly created column
data.set_index(["d.fullname"])
# Using value_counts() to get the counts for each driver and creating a new dataframe to hold that result
driver_counts = data["d.fullname"].value_counts()
driver_counts
# -
# Now let's plot a graph for the number of times driven by each driver.
# +
plt.plot(driver_counts)
plt.xlabel('Driver Name')
plt.ylabel('# Driven')
plt.title('Count of routes driven by driver')
plt.show()
# -
# As you can see using Python and Jupyter notes is a great way to disect and analyze data from Hive. We've only begun to scratch the surface with this example. Feel free to play around further with this notebook or try creating your own. Once you're finished please proceed to Challenge 2!
| documentation/exercise_4/PHE - Exercise 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="tmMP3NSw1cD0" colab_type="text"
# <h1> Quiz 1 : Pemahaman Tentang Model Evaluasi</h1>
#
# Jawab pertanyaan di bawah ini dengan bahasa kalian masing2?
#
# 1. Apa perbedaan antara data latih, data validasi, dan data test?
# 2. Bagaimana cara kita menilai performa suatu model?
# 3. Apa itu Confusion Matriks? Jelaskan secara lengkap!
# 4. Apa itu Classification Report dari sklearn?
# + [markdown] id="xhJH7v1s1ntU" colab_type="text"
# 1. Data latih adalah sekumpulan data yang digunakan untuk dipelajari dengan diberikan input dan output yang diharapkan, sedangkan data validasi adalah sekumpulan data yang digunakan untuk mengevaluasi data training apakah sudah akurat atau belum, jika belum maka akan dilakukan training kembali, sementara data test adalah sekumpulan data yang ditujukan untuk mengetahui apakah suatu model lolos atau tidak dengan catatan tidak menggunakan data training melainkan data yang baru.
#
# 2. Dengan menggunakan metode cross validation yang di dalamnya terdapat validation fold dan training fold untuk menilai suatu performa suatu model.
#
# 3. Confusion matriks adalah suatu matriks yang lebih detail dalam menilai suatu model yang didalamnya terdapat predictive values dan actual values. Kedua values tersebut dijadikan 4 tebakan dibanding akurasi yang hanya 2 tebakan benar atau salah yaitu:
# - Tebakan Benar : Kenyataan Benar, Positif (True Positif)
# - Tebakan Salah : Kenyataan Benar, Negatif (False Negatif)
# - Tebakan Benar : Kenyataan Salah, Negatif (False Positif)
# - Tebakan Salah : Kenyataan Salah, Positif (True Negatif)
#
# 4. Suatu function yang digunakan untuk membuat laporan teks yang menunjukkan matriks klasifikasi utama.
# + [markdown] id="FK4o8-Ga1cET" colab_type="text"
# <h1>Quiz 2 : Applikasi Model Evaluasi</h1>
#
# Kali ini kita akan menggunakan data untuk memprediksi kelangsungan hidup pasien yang telah mengalami operasi payu dara. Dengan informasi yang dimiliki terkait pasien, kita akan membuat model untuk memprediksi apakah pasien akan bertahan hidup dalam waktu lebih dari 5 tahun atau tidak.
#
# Lebih Lengkapnya kalian bisa membaca informasi tentang dataset di link berikut : https://raw.githubusercontent.com/jbrownlee/Datasets/master/haberman.names
#
# Buat model Klasifikasi (Model/Algoritma Bebas) untuk memprediksi status pasien. dengan ketentuan sebagai berikut :
#
# 1. Bagi kedua data ini menjadi data training dan data test dengan test_size=0.25.
# 3. Pelajar tentang metrics roc_auc_score kemudian buatlah model dan evaluasi dengan menggunakan teknik cross-validation dengan scoring 'roc_auc'. baca https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html untuk menggunakan metric roc_auc saat cross-validation.
# 3. Berapa score rata2 dari model dengan teknik cross-validation tersebut?
# 4. Prediksi data test dengan model yang telah kalian buat!
# 5. Bagaimana hasil confusion matrix dari hasil prediksi tersebut?
# 6. Bagaimana classification report dari hasil prediksi tersebut?
# 5. Seberapa baik model anda dalam memprediksi seorang pasien mempunyai status positive?
# 6. Seberapa baik model anda dalam memprediksi seorang pasien mempunyai status negatif?
# + id="vuCVgjGB1cEs" colab_type="code" colab={}
import pandas as pd
url = 'https://raw.githubusercontent.com/jbrownlee/Datasets/master/haberman.csv'
list_cols = ['Age', "Patient's Years", "N_positive_ax", "survival_status"]
df = pd.read_csv(url, names=list_cols)
# + id="8jApCyM-1cGq" colab_type="code" outputId="e9f0ac6c-0ef2-47eb-b026-8ef54b6ad1cd" colab={"base_uri": "https://localhost:8080/", "height": 206}
df.head()
# + id="1RM5rwEw1cIP" colab_type="code" outputId="ffb32d24-4515-45c8-bfb9-dc1d9d6dbdca" colab={"base_uri": "https://localhost:8080/", "height": 69}
df['survival_status'].value_counts()
# + id="QTnmQucvmuXZ" colab_type="code" outputId="fe4ae1a1-5855-4365-9a99-c2a8e3f9eea8" colab={"base_uri": "https://localhost:8080/", "height": 404}
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.metrics import auc
from sklearn.metrics import plot_roc_curve
from sklearn.model_selection import StratifiedKFold
X = df.drop('survival_status', axis=1)
y = df['survival_status']
n_samples, n_features = X.shape
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
cv = StratifiedKFold(n_splits=6)
classifier = svm.SVC(kernel='linear', probability=True, random_state=random_state)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots(figsize=(12,6))
for i, (train, test) in enumerate(cv.split(X, y)):
classifier.fit(X[train], y[train])
viz = plot_roc_curve(classifier, X[test], y[test], name='ROC fold {}'.format(i), alpha=0.3, lw=1, ax=ax)
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.25
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b', label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc), lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2, label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05], title="Receiver operating characteristic example")
ax.legend(loc="lower right")
plt.show()
# + id="vwWfBpnslQoa" colab_type="code" outputId="233e22f3-baf5-4206-bf65-218432d85f86" colab={"base_uri": "https://localhost:8080/", "height": 34}
from sklearn.model_selection import cross_val_score
cv_result = cross_val_score(classifier, X, y, cv=10)
cv_result.mean()
# + id="kt6paFMbc3Fw" colab_type="code" outputId="e4059cb1-8982-4108-a8ae-c18a15761043" colab={"base_uri": "https://localhost:8080/", "height": 69}
# No 4
y_pred = classifier.predict(X[test])
y_pred
# + id="zlptePvldZqh" colab_type="code" outputId="1eab818c-c447-4ef2-9525-b5e188cf4052" colab={"base_uri": "https://localhost:8080/", "height": 52}
# No 5
from sklearn.metrics import confusion_matrix, classification_report
confusion_matrix(y[test], y_pred, labels=[2, 1])
# + id="a6Yk_B59n_XN" colab_type="code" colab={}
TP = 6
FP = 5
FN = 8
TN = 32
# + id="bhDjQJJwdkEf" colab_type="code" outputId="47d32bea-3847-4bc2-961f-f987fcb2c359" colab={"base_uri": "https://localhost:8080/", "height": 173}
# No 6
print(classification_report(y[test], y_pred))
# + id="FiOU18C4dl61" colab_type="code" outputId="3ba32daf-432d-4fd5-a7bb-844d7dab770c" colab={"base_uri": "https://localhost:8080/", "height": 69}
# No 7
precision = TP/(TP+FP)
recall = TP/(TP+FN)
f1score = 2 * precision * recall / (precision + recall)
print(precision)
print(recall)
print(f1score)
# + id="y69YGhlgdqBJ" colab_type="code" outputId="605e917b-0759-4a02-bb30-eb0d4c7cb7c1" colab={"base_uri": "https://localhost:8080/", "height": 69}
# No 8
precision2 = TN/(TN+FN)
recall2 = TN/(TN+FP)
f1score2 = (precision2 * recall2 * 2) / (precision2 + recall2)
print(precision2)
print(recall2)
print(f1score2)
# + id="pU8hnQIivcg9" colab_type="code" colab={}
| Learn/Week 4 Machine Learning/Week_4_Day_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip3 install tqdm requests dill
# +
import requests
from tqdm import tqdm
import os
def download_from_url(url, dst):
file_size = int(requests.head(url).headers["Content-Length"])
if os.path.exists(dst):
first_byte = os.path.getsize(dst)
else:
first_byte = 0
if first_byte >= file_size:
return file_size
header = {"Range": "bytes=%s-%s" % (first_byte, file_size)}
pbar = tqdm(
total=file_size, initial=first_byte,
unit='B', unit_scale=True, desc=url.split('/')[-1])
req = requests.get(url, headers=header, stream=True)
with(open(dst, 'ab')) as f:
for chunk in req.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
pbar.update(1024)
pbar.close()
return file_size
# -
download_from_url('https://raw.githubusercontent.com/sjwhitworth/golearn/master/examples/datasets/mnist_train.csv',
'mnist_train.csv')
# +
from sparkflow.graph_utils import build_graph
from sparkflow.tensorflow_async import SparkAsyncDL
import tensorflow as tf
from pyspark.ml.feature import VectorAssembler, OneHotEncoder
from pyspark.ml.pipeline import Pipeline
from sparkflow.graph_utils import build_adam_config
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
def rnn_model():
x = tf.placeholder(tf.float32, shape=[None, 784], name='x')
y = tf.placeholder(tf.float32, shape=[None, 10], name='y')
x = tf.reshape(x, shape=[-1, 28, 28])
def cells(reuse=False):
return tf.nn.rnn_cell.LSTMCell(64,initializer=tf.orthogonal_initializer(),reuse=reuse)
outputs, _ = tf.nn.dynamic_rnn(cells(), x, dtype = tf.float32)
out = tf.layers.dense(outputs[:,-1], 10)
z = tf.argmax(out, 1, name='out')
loss = tf.losses.softmax_cross_entropy(y, out)
return loss
# -
from pyspark.sql import SparkSession
from pyspark.sql.functions import rand
sparkSession = SparkSession.builder.appName("csv").getOrCreate()
df = sparkSession.read.csv('mnist_train.csv',header=True,inferSchema=True)
va = VectorAssembler(inputCols=df.columns[1:785], outputCol='features').transform(df)
va.select('label').show(1)
encoded = OneHotEncoder(inputCol='label', outputCol='labels', dropLast=False).transform(va).select(['features', 'label', 'labels'])
mg = build_graph(rnn_model)
adam_config = build_adam_config(learning_rate=0.001, beta1=0.9, beta2=0.999)
spark_model = SparkAsyncDL(
inputCol='features',
tensorflowGraph=mg,
tfInput='x:0',
tfLabel='y:0',
tfOutput='out:0',
tfOptimizer='adam',
miniBatchSize=300,
miniStochasticIters=1,
shufflePerIter=True,
iters=50,
predictionCol='predicted',
labelCol='labels',
partitions=3,
verbose=1,
optimizerOptions=adam_config
)
fitted_model = spark_model.fit(encoded)
predictions = fitted_model.transform(encoded)
predictions.show(1)
evaluator = MulticlassClassificationEvaluator(
labelCol="label", predictionCol="predicted", metricName="accuracy")
accuracy = evaluator.evaluate(predictions)
print("Test Error = %g" % (1.0 - accuracy))
accuracy
| notebooks/9.mnist-sparkflow-rnn-lstm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Resumo, Teoria e Prática - Equações Diferenciais
# > Autor: <NAME><br>
# > Contato: <EMAIL><br>
# > Repo: [@mirandagil](https://github.com/mirandagil/university-courses/analise-numerica-edo-2019-1)<br>
# > Fontes bibliográficas:
# * <NAME>. (2017). <i>Equações Diferenciais</i>.
# * <NAME>. & <NAME>. (1997) <i>Numerical Linear Algebra</i>. SIAM
# * <NAME> (1988) <i>Mecânica</i>. CAMPUS
#
#
# `last update: 06/04/2019`
#
# ---
# # O que é uma EDO
#
# Equações Diferenciais estão presentes em diversos modelos em física, química, biologia, economia, engenharia, etc. Vários fenômenos envolvem a variação de uma quantidade em relaçao a outra, levando naturalmente a modelos baseados em equações diferenciais. Podemos ter variações temporais de, por exemplo, a posição de um objeto, a temperatura de um material, a concentração de um agente químico, a concentração de um poluente ou nutriente em um meio, a umidade do ar, o número de habitantes de uma cidade, a densidade de bactérias de uma cultura, a densidade de massa de um gás, o valor de uma mercadoria, o câmbio entre moedas, o produto interno bruto de um país, etc. Além de variações temporais dessas quantidades, podemos ter variações em relação a outras quantidades, como variação de temperatura em relação à posição e variação de densidade de massa de um fluido em relação à temperatura, por exemplo.
#
#
#
#
# As equações diferenciais são expressões matemáticas de certas leis envolvidas em uma modelagem, que podem, por exemplo, ser leis fundamentais, como a segunda lei de Newton, empíricas, como em reações químicas, ou heurísticas, como em dinâmica populacional.
# <br><br><br>
# Uma equacão diferencial é uma equação cuja incógnita é uma função e cuja equação envolve derivadas dessa função procurada. Mais especificamente, consideramos uma equação da forma
# $$
# F\left(t,x,\frac{\mathrm{d}x}{\mathrm{d}t}, \dots, \frac{\mathrm{d}^nx}{\mathrm{d}t^n} \right) = 0
# $$
# onde $t$ é uma variável independente, $F = F (t, x, x_1,\dots, x_n) $ é uma função $F: \mathbb{R}^{n+2} \to \mathbb{R}$ e $x = x(t)$ é uma variável dependente, que é a função procurada (incógnita). Esta é uma equação de ordem $n$, indicando a derivada de ordem mais alta presente na equação.
#
# REFERÊNCIA: <NAME> . **Equações Diferenciais **, 2017.<br>
# http://www.labma.ufrj.br/~rrosa/dvifiles/apostila-ed-maio2017.pdf
#
#
# <NAME>, Medalha Fields, falando sobre motivação e utilidade das equações diferenciais (em inglês com legendas)
# https://www.youtube.com/watch?v=o9zQpQjfQ80
#
# 3Blue1Brown, Canal de divulgação matemática com vídeo explicativo sobre EDOs
# https://www.youtube.com/watch?v=p_di4Zn4wz4
#
#
# ### Uma descrição mais 'ingênua'
# Equações diferenciais são equações que modelam vários fenomenos do mundo em que vivemos, essas equações estão ligadas a maneira que esses fenomenos mudam. Quando dizemos que queremos resolver uma equação diferencial, é por que queremos descobrir como esse fenomeno ocorre, ou como estará no futuro mas sabemos apenas como ele muda, seja como ele muda a cada minuto como por exemplo a chuva que vai ficando mais forte ou mais fraca, ou se muda a cada lugar diferente que está sendo observado, por exemplo medir a temperatura próximo do ar condicionado numa sala ou longe do ar na mesma sala.
# ---
# ## Solução de EDOs
# +
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import odeint
import sympy
from sympy import Function, dsolve, Eq, Derivative, symbols, init_printing, plot, Matrix, exp
# -
# #### Um modelo simples
# Vamos primeiro olhar para uma EDO bem simples, que modela decaimento radioativo com uma massa inicial $y_0$ e coeficiente de decaimento $\lambda$
# $$
# \dot{y} = -\lambda y\\
# \dot{y} + \lambda y = 0\\
# e^{\lambda y} (\dot{y} + \lambda y) = 0\\
# (e^{\lambda y} y)' = 0 \\
# e^{\lambda y} y = c \\
# \therefore y = ce^{-\lambda y}
# $$
# Onde a condição inicial $y_0 = c$, pois
# $$
# y(0) = c e^{-\lambda \cdot 0}\\
# y(0) = c
# $$
#
# #### Visualizando soluções
# $y_0 = 5, \lambda = 0.3$
# +
def model1(y, t, l):
# modelo para decaimento radioativo
return -l*y
ts = np.linspace(0,20)
ys = odeint(model1,50,ts,args=(0.3,))
plt.plot(ts,ys,label=0.3)
plt.legend(title='$\lambda$')
plt.ylabel('massa')
plt.xlabel('tempo')
plt.show()
# -
# Aqui podemos ver o 'poder' de modelagem de uma EDO, o gráfico nos diz que: Um elemento com massa 5 levará 20 unidades de tempo para chegar a uma massa 0.
#
# Vamos observar que alterando o coeficiente $\lambda$, obtemos uma nova solução, para isso segue um plot com $\lambda$ variando de 0.1 a 0.5
lambdas = np.linspace(0.1,0.5,num=5)
ts = np.linspace(0,20)
for l in lambdas:
ys = odeint(model1,5,ts,(l,))
plt.plot(ts,ys,label=str(l))
plt.ylabel('massa')
plt.xlabel('tempo')
plt.title('Visualização com Diferentes $\lambda$')
plt.legend(title='$\lambda$')
plt.show()
# ---
# ## EDOs de ordem superior
#
# Vamos olhar para a modelagem do sistema massa-mola, novamente temos uma EDO a ser resolvida.
#
# $$
# m\frac{\mathrm{d}^2 x}{\mathrm{d}t^2} = -kx\\
# \frac{\mathrm{d}^2 x}{\mathrm{d}t^2} = -\frac{k}{m}x
# $$
# Para ter um pouco de intuição física e algébrica sobre com o que estamos lidando, vamos fazer a seguinte mudança de varíaveis: $\omega_0 = \sqrt{\frac{k}{m}}$<br>
# O motivo ficará claro quando chegarmos a solução.<br>
# Portanto agora nossa EDO tem a seguinte cara
# $$
# \frac{\mathrm{d}^2 x}{\mathrm{d}t^2} = -\omega_0^2x
# $$
#
#
# ##### Solução analítica
# $$
# \frac{\mathrm{d}^2 x}{\mathrm{d}t^2} = -\omega_0^2x\\
# \frac{\mathrm{d}^2 x}{\mathrm{d}t^2} + \omega_0^2x = 0\\
# $$
# Tomando o operador linear $D^2$
# $$
# (D^2+\omega_0^2)x = 0\\
# $$
# Podemos olhar para $(D^2+\omega_0^2)$ como um polinômio do segundo grau em D, e portanto fatorar
# $$
# (D-r_1)(D-r_2)x = 0\\
# $$
# Onde $r_n$ são as raízes do polinômio. Se chamarmos o termo $(D-r_2)x = z$
# $$
# (D-r_2)x = z\\
# \therefore (D-r_1)z = 0
# $$
# Mas esta é uma EDO de primeira ordem, a qual conhecemos a solução geral:
# $$
# (D-r_1)z = 0\\
# \frac{\mathrm{d}}{\mathrm{d}t}z - r_1z = 0\\
# \therefore z = c_1 e^{r_1 t}
# $$
# Voltando na equação $(D-r_2)x = z$
# $$
# (D-r_2)x = z\\
# \frac{\mathrm{d}}{\mathrm{d}t} x - r_2 x = c_1 e^{r_1 t}\\
# e^{-r_2 t}(\frac{\mathrm{d}}{\mathrm{d}t} x - r_2 x) = (c_1 e^{r_1 t})e^{-r_2 t}\\
# \frac{\mathrm{d}}{\mathrm{d}t}(e^{-r_2 t} x) = c_1 e^{(r_1⁻r_2) t}\\
# e^{-r_2 t} x = \frac{c_1}{r_1 - r_2} e^{(r_1⁻r_2) t}\\
# \therefore x = c_2 e^{r_1t} + c_3 e^{r_2 t}
# $$
# Com $r_1 \neq r_2$ <br>
# A solução de $(D^2+\omega_0^2) = 0$, claramente, é $r_1 = i\omega_0, r_2 = -i\omega_0$<br>
# Temos então duas raízes complexas, vamos olhar para algumas propriedades dos complexos, seja $C \in \mathbb{C}$, chamamos de $C^*$ o conjugado complexo de $C$
# $$
# C = a + bi \\
# C^* = a - bi\\
# C + C^* = a\\
# C - C^* = 2bi
# $$
# Tomando $C_1 = C, C_2 = C^*$, nossa solução se torna:
# $$x(t) = C e^{i\omega_0t} + C^* e^{-i\omega_0t}$$
# Passando $C$ para coordenadas polares
# $$
# C = r e^{i\theta} \\
# C^* = r e^{-i\theta}\\
# r = \sqrt{a^2 + b^2}\\
# tg(\theta) = \frac{b}{a}\\
# a = r cos(\theta)\\
# b = r sen(\theta)\\
# \theta \in \left[-\frac{\pi}{2},\dfrac{\pi}{2}\right]\\
# $$
# Para carregar menos a notação, chamaremos $r = \frac{1}{2}A$, podemos reescrever agora, usando a identidade de Euler:
#
# $$
# \begin{align}
# x(t) &= \frac{1}{2}A e^{i\theta} e^{i\omega_0t} + \frac{1}{2}A e^{-i\theta}e^{-i\omega_0 t}\\
# & = \frac{1}{2}A e^{i(\omega_0t + \theta)} + \frac{1}{2}A e^{-i(\omega_0t + \theta)}\\
# & = \frac{1}{2}A \big[cos(\omega_0t + \theta) + i sen(\omega_0t + \theta)\big] + \frac{1}{2} A \big[cos(\omega_0t + \theta) - isen(\omega_0t + \theta)\big] \\
# & = \frac{1}{2}A \big[cos(\omega_0t + \theta) + i sen(\omega_0t + \theta) + cos(\omega_0t + \theta) - i sen(\omega_0t + \theta) \big]\\
# x(t) &= A cos(\omega_0t + \theta)
# \end{align}
# $$
# Onde temos um significado para cada valor:
# $$
# \begin{align}
# A &\to \text{Amplitudade da curva senoidal} \\
# \omega_0 &\to \text{Frequência Angular} \\
# \theta &\to \text{Fase, ou defasagem da curva senoidal}
# \end{align}
# $$
#
#
#
#
#
#
#
#
#
#
#
#
#
#
# #### Passar para exponencial complexa e identidade de euler
# Podemos representar como um sistema de equações diferenciais
# $$
# \begin{cases}
# \frac{\mathrm{d}x}{\mathrm{d}t} &= v \\
# \frac{\mathrm{d}v}{\mathrm{d}t} &= -\omega_0^2 x - \omega_1^2 v
# \end{cases}
# $$
# Vamos reescrever o sistema de equações como uma equação matricial<br>
# $$
# Ay = \frac{\mathrm{d}y}{\mathrm{d}t}
# $$
# Onde
# $$
# y = \begin{pmatrix} x \\ v \end{pmatrix}, \\
# A = \begin{pmatrix} 0 & 1 \\ -\omega_0^2 & - \omega_1^2\end{pmatrix}
# $$
#
#
# Portanto podemos escrever como
#
# $$
# \frac{\mathrm{d}}{\mathrm{d}t}\begin{pmatrix} \dot{y} \\ y \end{pmatrix} = \begin{pmatrix} -\omega^2_0y \\ \dot{y} \end{pmatrix}
# $$
#
# Agora escrevemos um modelo de forma vetorial
| analise-numerica-edo-2019-1/.ipynb_checkpoints/EDOs-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # License Plate Detection with OpenCV
#
# In this project we demonstrate how to use OpenCV only, with traditional computer vision approaches, to perform License Plate Detection (LPD).
#
# We follow two approaches:
#
# 1- __Morphology based approach__: where only morphological transforms are used, along with some rules to detect the LP.
#
# 2- __Charater based approach__: in addition to basic morphological approaches, basic char detection, also based on morphology, is used as an extra characteristic of the LP.
#
# Further, the problem of Licence Plate Recognition (LPR), by recognizing the number and digits written, can be addressed by the second approach.
#
# In both approaches, we load HD videos (1080p). Due to the camera position, this is the most effective resolution to detect LP patterns.
#
# In both approaches we merge car detection, using background subtraction, to narrow the search space.
# # Pre-requisites
#
# You need to install the packages in `requirements.txt`:
#
# `pip install -r requirements.txt`
# +
import numpy as np
import cv2
import imutils
import os
from tqdm import tqdm
import matplotlib.pyplot as plt
# -
# Project constants
SCALAR_BLACK = (0.0, 0.0, 0.0)
SCALAR_WHITE = (255.0, 255.0, 255.0)
SCALAR_YELLOW = (0.0, 255.0, 255.0)
SCALAR_GREEN = (0.0, 255.0, 0.0)
SCALAR_RED = (0.0, 0.0, 255.0)
# +
# Helper functions
def plot_img(img):
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
def draw_oriented_bbox(frame, bbox):
# Oriented
rotrect = cv2.minAreaRect(bbox)
center, size, theta = rotrect
box = cv2.boxPoints(rotrect)
box = np.int0(box)
cv2.drawContours(frame, [box], 0, SCALAR_RED, 10)
# -
# # Moving object detection (MOD)
#
# In this part, we show how to detect and isolate the car box.
#
# We use background subtraction. [See this reference](https://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/). This is possible due to the fixed camera position.
#
# We can detect bounding rectangle or oriented one. The oriented bbox is not very accurate, and later it turns to be not important for LPD.
# +
def detect_cars(frame, background):
MIN_AREA = 10000
cars = []
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# compute the absolute difference between the current frame and
# first frame
frame_delta = cv2.absdiff(background, gray)
thresh = cv2.threshold(frame_delta, 25, 255, cv2.THRESH_BINARY)[1]
# dilate the thresholded image to fill in holes, then find contours
# on thresholded image
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
# loop over the contours
for k,c in enumerate(cnts):
# if the contour is too small, ignore it
if cv2.contourArea(c) < MIN_AREA:
continue
car = cv2.boundingRect(c)
cars.append(car)
return cars
# -
# ## Video processing
# The `process_video` function takes car of frame processing of the given `video_file`. The output is saved in the location of the output `video_output_file`.
#
# This function can be used to:
# - Detect Moving cars.
# - Detect LPs within car frames, and plot it back in the original frame.
# - Detect LPs in the big frame directly.
# +
def make_1080p(cap):
cap.set(3, 1920)
cap.set(4, 1080)
def make_720p(cap):
cap.set(3, 1280)
cap.set(4, 720)
def make_480p(cap):
cap.set(3, 640)
cap.set(4, 480)
def change_res(width, height):
cap.set(3, width)
cap.set(4, height)
def process_video(video_file, # The video path to be processed
video_output_file, # The output video file
output_video_resolution=(640,480), # The desired output resolution
frames_cnt=None, # The desired number of frames to process. If None the whole video is processed.
cars_detection=True, # LPD will work on the car cropped image or whole image.
show_cars_bbox=0,# 0=dont show, 1: show rect, 2: show oriented bbox.
detect_LP_fn=None, # The LPD function.
debug=False):
# Set input capture to 1080p
cap = cv2.VideoCapture(video_file)
make_1080p(cap)
# Set the frame count if no passed desired number process the whole video
if frames_cnt == None: frames_cnt = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# Prepare the output video file
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(video_output_file,fourcc, 20.0, output_video_resolution)
# The min detectable car is a 100x100 rectangle
MIN_AREA = 10000
# Set the back ground frame to nothing
background = None
for cnt in tqdm(range(frames_cnt), position=0, leave=True):
ret, frame = cap.read()
if ret:
if cars_detection:
if background is None:
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
# if the first frame is None, initialize it
background = gray
continue
cars = detect_cars(frame, background)
for car in cars:
(x, y, w, h) = car
car = frame[y:y+h,x:x+w,:]
if debug: print('Car size', car.shape)
if detect_LP_fn != None:
# Pass the cropped car image to LPD
car_LP, LPs = detect_LP(car, debug)
# Put back the LP patch in the original frame
frame[y:y+h,x:x+w,:] = car_LP
if show_cars_bbox == 1: # Just rectangle
cv2.rectangle(frame, (x, y), (x + w, y + h), SCALAR_RED, 10)
elif show_cars_bbox == 2: # Oriented rectangle
draw_oriented_bbox(frame, c)
elif detect_LP_fn != None:
frame, LPs = detect_LP_fn(frame, debug)
if debug: plot_img(frame)
out.write(cv2.resize(frame, output_video_resolution))
else:
print('no video')
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
cap.release()
out.release()
print('Video is ready at: ', video_output_file)
# -
video_file = 'dat/detection_test.mp4'
video_output_file = 'dat/cars_detection.mp4'
#process_video(video_file, video_output_file, show_cars_bbox=1)
# # Morphology based approach
#
# This approach is based on applying morphological operations to emphasasize the LP pattern. Mainly, two main patters:
# - Edge of bright area
# - Rectangular shape
#
# As usual with rule based approaches, we suffer sensitivity to parameters settings. To make it less critical we perform two simple tricks:
# - Apply the rules only on the car patches, thanks to the car detection step.
# - Resize into standard size makes it easier to set global rules, with less sensitivity to scale. This is also possible thanks to the car detection step.
#
# In this approach we follow the following steps:
#
# - Resize frame to standard size.
# - Transform frame into gray scale.
# - Adaptive thresholding.
# - Canny edge detection.
# - Dilation loop (3 iteration, 3x3 kernel).
# - Contours on dialted image.
# - Get candidate plates by fitting oriented bbox around contours.
# - Filter the candidate LPs with rules on L,W of the oriented bbox.
# - Resize the frame back into the original size
#
#
# +
GAUSSIAN_SMOOTH_FILTER_SIZE = (5, 5)
ADAPTIVE_THRESH_BLOCK_SIZE = 19
ADAPTIVE_THRESH_WEIGHT = 40
def extractValue(img):
height, width, numChannels = img.shape
imgHSV = np.zeros((height, width, 3), np.uint8)
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
imgHue, imgSaturation, img = cv2.split(img)
return img
def maximizeContrast(gray):
height, width = gray.shape
imgTopHat = np.zeros((height, width, 1), np.uint8)
imgBlackHat = np.zeros((height, width, 1), np.uint8)
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
imgTopHat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, structuringElement)
imgBlackHat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, structuringElement)
imgGrayscalePlusTopHat = cv2.add(gray, imgTopHat)
imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)
return imgGrayscalePlusTopHatMinusBlackHat
# Denosing + Gray + Thresholding
def preprocess(img):
gray = extractValue(img)
gray = maximizeContrast(gray)
height, width = gray.shape
blurred = np.zeros((height, width, 1), np.uint8)
blurred = cv2.GaussianBlur(gray, GAUSSIAN_SMOOTH_FILTER_SIZE, 0)
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#blurred = cv2.GaussianBlur(gray, (21, 21), 0)
#thresh = cv2.adaptiveThreshold(blurred, 255.0, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, ADAPTIVE_THRESH_BLOCK_SIZE, ADAPTIVE_THRESH_WEIGHT)
thresh = cv2.adaptiveThreshold(blurred, 255.0, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, ADAPTIVE_THRESH_BLOCK_SIZE, ADAPTIVE_THRESH_WEIGHT)
return thresh
# +
SCALAR_BLACK = (0.0, 0.0, 0.0)
SCALAR_WHITE = (255.0, 255.0, 255.0)
SCALAR_YELLOW = (0.0, 255.0, 255.0)
SCALAR_GREEN = (0.0, 255.0, 0.0)
SCALAR_RED = (0.0, 0.0, 255.0)
def detect_LP_morpho(img, L_min=0, L_max=1000, W_min=0, W_max=1000, debug=False):
min_canny = 100
max_canny = 200
dilation_type = cv2.MORPH_RECT #cv2.MORPH_ELLIPSE, cv2.MORPH_CROSS
thresh = preprocess(img)
if debug: plot_img(thresh)
edges = cv2.Canny(thresh,100,200)
if debug: plot_img(edges)
kernel_sz = 3
iterations = 2
structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_sz, kernel_sz))
dilated = cv2.dilate(edges, structuringElement, iterations=iterations)
contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
candidates = []
for c in contours:
rotrect = cv2.minAreaRect(c)
center, size, theta = rotrect
L, W = min(size), max(size)
if L >= L_min and L <= L_max and W >= W_min and W <= W_max:
#rec = rotrect
candidates.append(rotrect)
box = cv2.boxPoints(rotrect)
box = np.int0(box)
cv2.drawContours(img, [box], 0, SCALAR_GREEN, 2)
if debug:
text = 'L=' + str(int(L)) + ', W=' + str(int(W))
font = cv2.FONT_HERSHEY_SIMPLEX
scale = float(L) / 40.0 # base font scale on height of plate area
thickness = int(round(scale * 1.5))
color = SCALAR_YELLOW
cv2.putText(img, text, (int(center[0]-L/2), int(center[1]-W/2)), font, scale, color, thickness)
#cv2.putText(img, text, (0,10), font, int(scale), color, int(thickness))
if debug: plot_img(dilated)
#print(rec)
return img, candidates
# -
def detect_LP(img, debug):
sz = (img.shape[1], img.shape[0])
car_LP, LPs = detect_LP_morpho(cv2.resize(img, (500,500)), L_min=35, L_max=60, W_min=55, W_max=120, debug=debug)
car_LP = cv2.resize(car_LP, sz)
return car_LP, LPs
# ## Calibrating the rules
# In this section we use sample images captured from the test video in order to calibrate the min and max L and W of the plate detection.
#
# We set `debug=True` in order to see the intermediate results (thresholding, edges, dilation).
img = cv2.imread("imgs/char_frame_180_car_no_lp1.png")
plot_img(img)
detected_img, LPs = detect_LP_morpho(cv2.resize(img, (500,500)), L_min=35, L_max=60, W_min=55, W_max=90, debug=True)
plot_img(detected_img)
video_file = 'dat/detection_test.mp4'
video_output_file = 'dat/morpho_LP_detection.mp4'
#process_video(video_file, video_output_file, detect_LP_fn=detect_LP)
# We notice the following problems:
# - Many false positives
# - Rules apply to disoriented false contours
# - Aggeessive dilation make bigger rectangles in some cases
#
#
# It is recommended:
# - Integrate a tracker to smooth the false positives (TBD).
# - Add more features of LP, like characters, which we will do next.
# # Character based approach
#
# The main approach in this part is imported from this nice git [repo](https://github.com/MicrocontrollersAndMore/OpenCV_3_License_Plate_Recognition_Python.git) Code is copied here just for self contained repo, with minor changes.
#
#
# The approach shares the same preprocessing steps as in the morphological approach above.
#
# However, we integrate extra features, which is char detection. This facilitates the filtering out operation, instead of only relying on the L, W rules.
#
# The downside is that, we now depend on the language of the sign. For different languages, we need different char detector, which is not a bi issue.
# +
import char.DetectChars
import char.DetectPlates
import char.PossiblePlate
def drawRectangleAroundPlate(imgOriginalScene, licPlate):
p2fRectPoints = cv2.boxPoints(licPlate.rrLocationOfPlateInScene) # get 4 vertices of rotated rect
cv2.line(imgOriginalScene, tuple(p2fRectPoints[0]), tuple(p2fRectPoints[1]), SCALAR_GREEN, 2) # draw 4 red lines
cv2.line(imgOriginalScene, tuple(p2fRectPoints[1]), tuple(p2fRectPoints[2]), SCALAR_GREEN, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[2]), tuple(p2fRectPoints[3]), SCALAR_GREEN, 2)
cv2.line(imgOriginalScene, tuple(p2fRectPoints[3]), tuple(p2fRectPoints[0]), SCALAR_GREEN, 2)
def writeLicensePlateCharsOnImage(imgOriginalScene, licPlate):
ptCenterOfTextAreaX = 0 # this will be the center of the area the text will be written to
ptCenterOfTextAreaY = 0
ptLowerLeftTextOriginX = 0 # this will be the bottom left of the area that the text will be written to
ptLowerLeftTextOriginY = 0
sceneHeight, sceneWidth, sceneNumChannels = imgOriginalScene.shape
plateHeight, plateWidth, plateNumChannels = licPlate.imgPlate.shape
intFontFace = cv2.FONT_HERSHEY_SIMPLEX # choose a plain jane font
fltFontScale = float(plateHeight) / 30.0 # base font scale on height of plate area
intFontThickness = int(round(fltFontScale * 1.5)) # base font thickness on font scale
textSize, baseline = cv2.getTextSize(licPlate.strChars, intFontFace, fltFontScale, intFontThickness) # call getTextSize
# unpack roatated rect into center point, width and height, and angle
( (intPlateCenterX, intPlateCenterY), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg ) = licPlate.rrLocationOfPlateInScene
intPlateCenterX = int(intPlateCenterX) # make sure center is an integer
intPlateCenterY = int(intPlateCenterY)
ptCenterOfTextAreaX = int(intPlateCenterX) # the horizontal location of the text area is the same as the plate
if intPlateCenterY < (sceneHeight * 0.75): # if the license plate is in the upper 3/4 of the image
ptCenterOfTextAreaY = int(round(intPlateCenterY)) + int(round(plateHeight * 1.6)) # write the chars in below the plate
else: # else if the license plate is in the lower 1/4 of the image
ptCenterOfTextAreaY = int(round(intPlateCenterY)) - int(round(plateHeight * 1.6)) # write the chars in above the plate
# end if
textSizeWidth, textSizeHeight = textSize # unpack text size width and height
ptLowerLeftTextOriginX = int(ptCenterOfTextAreaX - (textSizeWidth / 2)) # calculate the lower left origin of the text area
ptLowerLeftTextOriginY = int(ptCenterOfTextAreaY + (textSizeHeight / 2)) # based on the text area center, width, and height
# write the text on the image
cv2.putText(imgOriginalScene, licPlate.strChars, (ptLowerLeftTextOriginX, ptLowerLeftTextOriginY), intFontFace, fltFontScale, SCALAR_YELLOW, intFontThickness)
# end function
# -
import char.Preprocess
char.Preprocess.ADAPTIVE_THRESH_WEIGHT = 19
def detect_LP_char(frame, L_min=0, L_max=50, W_min=0, W_max=150, debug=False):
blnKNNTrainingSuccessful = char.DetectChars.loadKNNDataAndTrainKNN() # attempt KNN training
if blnKNNTrainingSuccessful == False: # if KNN training was not successful
print("\nerror: KNN traning was not successful\n") # show error message
return frame, None # and exit program
# end if
#imgOriginalScene = cv2.imread("LicPlateImages/1.png") # open image
if frame is None: # if image was not read successfully
print("\nerror: image not read from file \n\n") # print error message to std out
return frame, None # and exit program
# end if
listOfPossiblePlates = char.DetectPlates.detectPlatesInScene(frame) # detect plates
listOfPossiblePlates = char.DetectChars.detectCharsInPlates(listOfPossiblePlates) # detect chars in plates
if len(listOfPossiblePlates) == 0: # if no plates were found
if debug: print("\nno license plates were detected\n") # inform user no plates were found
return frame, None
else: # else
# if we get in here list of possible plates has at leat one plate
# sort the list of possible plates in DESCENDING order (most number of chars to least number of chars)
listOfPossiblePlates.sort(key = lambda possiblePlate: len(possiblePlate.strChars), reverse = True)
# suppose the plate with the most recognized chars (the first plate in sorted by string length descending order) is the actual plate
licPlate = listOfPossiblePlates[0]
#plot_img(licPlate.imgPlate)
if len(licPlate.strChars) == 0: # if no chars were found in the plate
if debug: print("\nno characters were detected\n\n") # show message
return frame,None # and exit program
# end if
if licPlate.imgPlate.shape[0] < L_max and licPlate.imgPlate.shape[1] < W_max:
drawRectangleAroundPlate(frame, licPlate) # draw red rectangle around plate
#print("\nlicense plate read from image = " + licPlate.strChars + "\n") # write license plate text to std out
#print("----------------------------------------")
#writeLicensePlateCharsOnImage(frame, licPlate) # write license plate text on the image
#frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return frame, licPlate
img = cv2.imread("imgs/char_frame_180_car_no_lp1.png")
plot_img(img)
detected_img, LPs = detect_LP_char(cv2.resize(img, (700,700)), L_min=0, L_max=50, W_min=0, W_max=150, debug=True)
plot_img(detected_img)
def detect_LP(img, debug):
sz = (img.shape[1], img.shape[0])
car_LP, LPs = detect_LP_char(cv2.resize(img, (700,700)), L_min=0, L_max=50, W_min=0, W_max=150, debug=debug)
car_LP = cv2.resize(car_LP, sz)
return car_LP, LPs
video_file = 'dat/detection_test.mp4'
video_output_file = 'dat/char_LP_detection.mp4'
process_video(video_file, video_output_file, detect_LP_fn=detect_LP)
# The effect of adding the characters detection feature is clear in filtering out false positive.
# # Effect of MOD
# Now we will run the same approach, but on the whole frame, instead of detecting the car first
video_file = 'dat/detection_test.mp4'
video_output_file = 'dat/char_LP_detection_without_car_detection.mp4'
process_video(video_file, video_output_file, detect_LP_fn=detect_LP, cars_detection=False)
# Again, lots of false positives detected. This shows the effect of detecting the moving cars as a preprocessing step.
#
# In the final video, you might now see any detected plates, since they are all filtered out by the internal rules.
# # References
# - https://www.pyimagesearch.com/2015/05/25/basic-motion-detection-and-tracking-with-python-and-opencv/
# - https://sod.pixlab.io/articles/license-plate-detection.html
# - https://github.com/MicrocontrollersAndMore/OpenCV_3_License_Plate_Recognition_Python.git
| doc/doc_code_inside.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="kbeZa7WS_2-s"
# 
#
#
# # SIT742: Big Data Analytics
# **(Module: Big Data)**
#
# ---
# - Materials in this module include resources collected from various open-source online repositories.
# - You are free to use, change and distribute this package.
# - If you found any issue/bug for this document, please submit an issue at [tulip-lab/sit742](https://github.com/tulip-lab/sit742/issues)
#
#
# Prepared by **SIT742 Teaching Team**
#
# ---
#
#
# ## Session 3E: Data Acquisition (2)
#
# In this session, we will learn how to use Python Packages to ETL the data and files.
#
#
#
# ### Content
#
#
# 1. `Pandas` Basics
#
# 2. Loading `CSV` Data
#
# 3. Data Extraction through Web `API`
#
# 4. Web Crawling using `BeautifulSoup`
#
#
#
# **Note**: The data available on those service might be changing, so you need to adjust the code to accommodate those changes.
#
# ---
# + [markdown] colab_type="text" id="rdDCBCLn_2-1"
# ## Part 1. `Pandas` Basics
#
#
# `Pandas` is an open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language.
#
# To get started, we can import `Pandas` with:
# -
import pandas as pd
# #### `Series` and `DataFrame`
#
# Two core components of pandas are the `Series` and `DataFrame`. A `Series` is essentially a column, and a `DataFrame` is a multi-dimensional table made up of a collection of `series`. For a two dimensional DataFrame, the row labels are referred to as `index`, and the column labels are referred to as `columns`.
#
# There are many ways to create a dataframe, and one common option is to use a simple dictionary with each entry acting as a column in the dataframe, as shown in below:
#
#
# +
# Defining DataFrame by specifing a list of observations
df_1= pd.DataFrame([['a', 'b', 'c'],
['d', 'e', 'f'],
['g', 'h', 'i']],
index = [1,2,3], columns = ['col1', 'col2', 'col3'])
# Defining DataFrame by specifing a dictionary of columns
df_2=pd.DataFrame({'col1': ['a', 'd', 'g'],
'col2': ['b', 'e', 'h'],
'col3': ['c', 'f', 'i']},
index = [1,2,3])
# -
df_1
# Both `df_1` and `df_2` defined above are identical.
#
# The index labels, columns labels, and data values stored in the dataframe `df_1` can be retrieved using `df_1.index`, `df_1.columns` and `df_1.values` respectively.
#
df_1.index
df_1.columns
df_1.values
# `Pandas` also provides operations to inspect a dataframe to gain better understanding of its contents.
#
# - `.head(n)` and `.tail(n)` returns the top and bottom `n` rows of the dataframe, respectively.
# - `.describe()` returns summary of all numerical variables (columns) in the dataset.
#
#
df_1.head(2)
df_1.tail(2)
df_1.describe()
# ## Part 2. Loading `CSV` Data
#
# Here you will learn how to use Pandas
# [read_csv()](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) function to load a CSV file. Before we start importing our CSV file, it might be good for you to read [Pandas tutorial on reading CSV files](http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table).
#
# If `wget` was not installed in your `Python` platform, install it first:
#
# !pip install wget
# Suppose the `csv` data file is avilable at a URL, we use `wget` to download it to the local file system.
#
# +
import wget
link_to_data = 'https://github.com/tulip-lab/sit742/raw/master/Jupyter/data/Melbourne_bike_share.csv'
DataSet = wget.download(link_to_data)
# -
#
# ### Importing `CSV` data
#
# Importing `CSV` files with `Pandas` function [`read_csv()`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) and converting the data into a form Python can understand is simple. It only takes a couple of lines of code. The imported data will be stored in Pandas `DataFrame`.
#
#
import pandas as pd
csvdf = pd.read_csv("Melbourne_bike_share.csv")
type(csvdf)
# ### Inspecting the Data
#
# Now, the data should be loaded into `Python`. Let's have a look at the first 5 records in the dataset. There are a coupe of ways to retrieve these records.
#
# For example, you can use
# * `csvdf.head(n = 5)`: It will return first `n` rows in a DataFrame, `n = 5` by default.
# * `csvdf[:5]`: It uses the slicing method to retrieve the first `5` rows.
#
# Refer to "[Indexing and Selecting Data](http://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html)"
# for how to slice, dice, and generally get and set subsets of pandas objects.
#
# Here, we use the `head` function.
# +
# have an oveall inspection
csvdf.describe()
#We can use the head instead of the For Statement shown in the above example.
csvdf.head()
csvdf[:5]
csvdf.loc[:4]
#tail is used to show last several records.
csvdf.tail()
# -
# Currently, the row indices are integers automatically generated by `Pandas`.
# Suppose you want to set IDs as row indices and delete the ID column.
# Resetting the row indices can be easily done with the following DataFrame function:
#
# >DataFrame.set_index(keys, drop=True, append=False, inplace=False, verify_integrity=False)
#
# See its [API webpage](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.set_index.html)
# for the detailed usage.
# The keys are going to be the IDs in the first column.
# By setting `inplace = True`, the corresponding change is done inplace and won't return a new DataFrame object.
#
# +
#To show how many records in the file
len(csvdf.ID.unique())
csvdf.set_index(csvdf.ID, inplace = True)
csvdf.head()
# -
# To remove the ID column that is now redundant, you use DataFrame `drop` function and set `inplace = True`
# >DataFrame.drop(labels, axis=0, level=None, inplace=False, errors='raise')
csvdf.drop('ID', 1, inplace = True)
csvdf.head()
# + [markdown] colab_type="text" id="TkOmnCyu_2-5"
# ## Part 3. Data Extraction through Web `API`
#
#
# Many of you will probably be interested in scraping data from the web for your projects. For example, what if we were interested in working with some historical Canadian weather data? Well, we can get that from: http://climate.weather.gc.ca using their API. Requests are going to be formatted like this:
#
# + colab={} colab_type="code" id="NaOZOhi0_2-8"
import pandas as pd
url_template = "http://climate.weather.gc.ca/climate_data/bulk_data_e.html?format=csv&stationID=5415&Year={year}&Month={month}&timeframe=1&submit=Download+Data"
# + [markdown] colab_type="text" id="04ZKvRtM_2_J"
# Note that we've requested the data be returned as a CSV, and that we're going to supply the month and year as inputs when we fire off the query. To get the data for March 2012, we need to format it with month=3, year=2012:
#
# + colab={} colab_type="code" id="W_bhAPpd_2_M"
url = url_template.format(month=3, year=2012)
url
# + [markdown] colab_type="text" id="o76kkUME_2_V"
# This is great! We can just use the same `read_csv` function as before, and just give it a URL as a filename. Awesome.
#
# Upon inspection, we find out that there are 0 rows (as in 03/2020) of metadata at the top of this CSV, but pandas knows CSVs are weird, so there's a `skiprows` options. We parse the dates again, and set 'Date/Time' to be the index column. Here's the resulting dataframe.
# + colab={} colab_type="code" id="N5lP8CF9_2_Y"
weather_mar2012 = pd.read_csv(url, skiprows=0, index_col='Date/Time (LST)', parse_dates=True, encoding='latin1')
# + colab={} colab_type="code" id="u-uWpzab_2_g"
weather_mar2012.head()
# + [markdown] colab_type="text" id="M32ynyCR_2_p"
# As before, we can get rid of any columns that don't contain real data using ${\tt .dropna()}$
# + colab={} colab_type="code" id="czOBl_A8_2_s"
weather_mar2012 = weather_mar2012.dropna(axis=1, how='any')
# + colab={} colab_type="code" id="O4270mD1_2_1"
weather_mar2012.head()
# + [markdown] colab_type="text" id="O74UiyU4_3AA"
# Getting better! The Year/Month/Day/Time columns are redundant, though, and the Data Quality column doesn't look too useful. Let's get rid of those.
# + colab={} colab_type="code" id="nybBYbyH_3AG"
weather_mar2012 = weather_mar2012.drop(['Year', 'Month', 'Day', 'Time (LST)'], axis=1)
weather_mar2012[:5]
# + [markdown] colab_type="text" id="fERnSYxB_3AQ"
# Great! Now let's figure out how to download the whole year? It would be nice if we could just send that as a single request, but like many APIs this one is limited to prevent people from hogging bandwidth. No problem: we can write a function!
# + colab={} colab_type="code" id="f_cCjj05_3AT"
def download_weather_month(year, month):
url = url_template.format(year=year, month=month)
weather_data = pd.read_csv(url, skiprows=0, index_col='Date/Time (LST)', parse_dates=True)
weather_data = weather_data.dropna(axis=1)
weather_data.columns = [col.replace('\xb0', '') for col in weather_data.columns]
weather_data = weather_data.drop(['Year', 'Day', 'Month', 'Time (LST)'], axis=1)
return weather_data
# + [markdown] colab_type="text" id="9icj3vwY_3Ab"
# Now to test that this function does the right thing:
# + colab={} colab_type="code" id="xZT66AAk_3Ae"
download_weather_month(2020, 1).head()
# + [markdown] colab_type="text" id="2wax-OUr_3Am"
# Woohoo! Now we can iteratively request all the months using a single line. This will take a little while to run.
# + colab={} colab_type="code" id="-s22hxtp_3Ao"
data_by_month = [download_weather_month(2012, i) for i in range(1, 12)]
# + [markdown] colab_type="text" id="R4S-Wk3j_3Ax"
# Once that's done, it's easy to concatenate all the dataframes together into one big dataframe using ${\tt pandas.concat()}$. And now we have the whole year's data!
# + colab={} colab_type="code" id="PJCpxgx6_3A1"
weather_2012 = pd.concat(data_by_month)
# + [markdown] colab_type="text" id="tFEUd8lF_3A9"
# This thing is long, so instead of printing out the whole thing, I'm just going to print a quick summary of the ${\tt DataFrame}$ by calling ${\tt .info()}$:
# + colab={} colab_type="code" id="zQyvC2MG_3BA"
weather_2012.info()
# + [markdown] colab_type="text" id="ckItOLd__3BJ"
# And a quick reminder, if we wanted to save that data to a file:
# + colab={} colab_type="code" id="KLWc_X5h_3BM"
weather_2012.to_csv('weather_2012.csv')
# + colab={} colab_type="code" id="pxaDCOS1_3BY"
# !ls
# + [markdown] colab_type="text" id="jbZfj5eH_3Bl"
# And finally, something you should do early on in the wrangling process, plot data:
# + colab={} colab_type="code" id="mv6HNBvW_3Br"
# plot that data
import matplotlib.pyplot as plt
# so now 'plt' means matplotlib.pyplot
dateRange = weather_2012.index
temperature = weather_2012['Temp (C)']
df1 = pd.DataFrame({'Temperature' : temperature}, index=dateRange)
plt.plot(df1.index.to_pydatetime(), df1.Temperature)
plt.title("The 2012 annual temperature in Canada")
plt.xlabel("Month")
plt.ylabel("Temperature")
# + colab={} colab_type="code" id="y9ansQSO_3B8"
# nothing to see... in iPython you need to specify where the chart will display, usually it's in a new window
# to see them 'inline' use:
# %matplotlib inline
#If you add the %matplotlib inline, then you can skip the plt.show() function.
#How to close python warnings
import warnings
warnings.filterwarnings('ignore')
# + colab={} colab_type="code" id="4Tfyd2Hf_3CJ"
# that's better, try other plots, scatter is popular, also boxplott
df1 = pd.read_csv('weather_2012.csv', low_memory=False)
df1.plot(kind='scatter',x='Dew Point Temp (C)',y='Rel Hum (%)',color='red')
df1.plot(kind='scatter',x='Temp (C)',y='Wind Spd (km/h)',color='yellow')
# + colab={} colab_type="code" id="RZdrp7EAH6eM"
# show first several 'weather' columns value
weather_2012['Weather'].head()
# + colab={} colab_type="code" id="pVlfon_bEr60"
#Boxplot sample
climategroup1 = df1[df1['Weather']=='Fog']['Temp (C)']
climategroup2 = df1[df1['Weather']=='Rain']['Temp (C)']
climategroup3 = df1[df1['Weather']=='Clear']['Temp (C)']
climategroup4 = df1[df1['Weather']=='Cloudy']['Temp (C)']
data =[climategroup1,climategroup2,climategroup3,climategroup4]
fig1, ax1 = plt.subplots()
ax1.set_title('Temperature Boxplot based on the Climate group')
ax1.set_ylabel('Temperature')
ax1.set_xlabel('Climate Group')
boxplot=ax1.boxplot(data,
notch=True,
patch_artist=True,
labels=['Fog','Rain','Clear','Cloudy'],
boxprops=dict(linestyle='--', linewidth=2, color='black'))
colors = ['cyan', 'pink', 'lightgreen', 'tan', 'pink']
for patch, color in zip(boxplot['boxes'], colors):
patch.set_facecolor(color)
plt.show()
# + [markdown] colab_type="text" id="s5P7FONd_3CR"
# ## Part 4. States and Territories of Australia
#
# We are interested in getting State and Territory information from Wikipedia, however we do not want to copy and paste the table : )
#
# Here is the URL
# https://en.wikipedia.org/wiki/States_and_territories_of_Australia
#
# We need two libraries to do the task:
#
# Check documentations here:
# * [urllib](https://docs.python.org/2/library/urllib.html)
# * [BeautifulSoup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/)
#
# + colab={} colab_type="code" id="QYlnsbPr_3CV"
import sys
if sys.version_info[0] == 3:
from urllib.request import urlopen
else:
from urllib import urlopen
from bs4 import BeautifulSoup
# + [markdown] colab_type="text" id="rBmeWNVx_3Ce"
# We first save the link in wiki
# + colab={} colab_type="code" id="P7gpk9P-_3Cg"
wiki = "https://en.wikipedia.org/wiki/States_and_territories_of_Australia"
# + [markdown] colab_type="text" id="OpIxUZ4a_3Cn"
# Then use ulropen to open the page.
#
# If you get "SSL: CERTIFICATE_VERIFY_FAILED", what you need to do is find where "Install Certificates.command" file is, and click it to upgrade the certificate. Then, you should be able to solve the problem.
# + colab={} colab_type="code" id="00UN2EMw_3Cq"
page = urlopen(wiki)
# + colab={} colab_type="code" id="vbjPbWLl_3Cx"
if sys.version_info[0] == 3:
page = page.read()
# + [markdown] colab_type="text" id="kyy4lpJ7_3C4"
# You will meet BeautifulSoup later in this subject, so don't worry if you feel uncomfortable with it now. You can always revisit.
#
# We begin by reading in the source code and creating a Beautiful Soup object with the BeautifulSoup function.
# + colab={} colab_type="code" id="KMjST_SK_3C9"
soup = BeautifulSoup(page, "lxml")
# + [markdown] colab_type="text" id="arXCtPja_3DJ"
# Then we print and see.
# + colab={} colab_type="code" id="6VUNMhBW_3DM"
print(soup.prettify())
# + [markdown] colab_type="text" id="GNlxOnLL_3DX"
# For who do not know much about HTML, this might be a bit overwhelming, but essentially it contains lots of tags in the angled brackets providing structural and formatting information that we don't care so much here. What we need is the table.
#
# Let's first check the title.
# + colab={} colab_type="code" id="OX2ce0ZK_3Df"
soup.title.string
# + [markdown] colab_type="text" id="L5X_vbyY_3Do"
# It looks fine, then we would like to find the table.
#
# Let's have a try to extract all contents within the 'table' tag.
# + colab={} colab_type="code" id="2mdtetMZ_3Dq"
all_tables = soup.findAll('table')
print(all_tables)
# + [markdown] colab_type="text" id="N0tvbY3S_3Dw"
# This returns a collection of tag objects. It seems that most of the information are useless and it's getting hard to hunt for the table. So searched online and found an instruction here:
#
# https://adesquared.wordpress.com/2013/06/16/using-python-beautifulsoup-to-scrape-a-wikipedia-table/
#
# The class is "wikitable sortable"!! Have a try then.
# + colab={} colab_type="code" id="c350bOM0_3Dz"
right_table = soup.find('table', class_='wikitable sortable')
print(right_table)
# + [markdown] colab_type="text" id="jbqSZ0i7_3D5"
# Next we need to extract table header row by find the first 'tr'
# + colab={} colab_type="code" id="Pt3kyOGd_3D7"
head_row = right_table.find('tr')
print(head_row)
# + [markdown] colab_type="text" id="uj-r3Xhs_3EB"
# Then we extract header row name via iterate through each row and extract text.
#
# The `.findAll` function in Python returns a list containing all the elements, which you can iterate through.
# + colab={} colab_type="code" id="NLwGVlDi_3ED"
header_list = []
headers = head_row.findAll('th')
for header in headers:
#print header.find(text = True)
header_list.append(header.find(text = True).strip())
header_list
# + [markdown] colab_type="text" id="UZNmqW_J_3EK"
# We can probably iterate through this list and then extract contents. But let's take a simple approach of extracting each column separately.
# + colab={} colab_type="code" id="wNw5o7xQ_3EO"
flag = []
state = []
abbrev = []
ISO = []
Capital = []
Population = []
Area = []
Seats = []
Gov = []
Premier = []
for row in right_table.findAll("tr"):
cells = row.findAll('td')
if len(cells) > 0 : # and len(cells) < 10:
flag.append(cells[0].find(text=True))
state.append(cells[1].find(text=True).strip())
abbrev.append(cells[2].find(text=True).strip())
ISO.append(cells[3].find(text=True).strip())
Capital.append(cells[4].find(text=True).strip())
Population.append(cells[5].find(text=True).strip())
Area.append(cells[6].find(text=True).strip())
Seats.append(cells[7].find(text=True).strip())
Gov.append(cells[8].find(text=True).strip())
Premier.append(cells[10].find(text=True).strip())
# + [markdown] colab_type="text" id="aB_ywSZe_3EZ"
# Next we can append all list to the dataframe.
# + colab={} colab_type="code" id="B0KLUyg9_3Eb"
df_au = pd.DataFrame()
df_au[header_list[0]] = flag
df_au[header_list[1]] = state
df_au[header_list[2]] = abbrev
df_au[header_list[3]] = ISO
df_au[header_list[4]] = Capital
df_au[header_list[5]] = Population
df_au[header_list[6]] = Area
df_au[header_list[7]] = Seats
df_au[header_list[8]] = Gov
df_au[header_list[9]] = Premier
# + [markdown] colab_type="text" id="kAkmBKlD_3E1"
# Done !
# + colab={} colab_type="code" id="0SVh16b8_3E3"
df_au
| Jupyter/M03-BigData/M03E-DataAcquisition-II.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# from fire_params import paramtersIO
import ee
import imgLib
import geemap
import fire_module as fm
ee.Initialize()
Map = geemap.Map()
Map
pio = fm.paramtersIO()
s1 = fm.step1()
s1.prepare_masking(pio.maskingMethod)
s1.apply_masking_params()
env = s1.setup_landsat()
# +
analysisYear = pio.analysisYear
baselineLength = pio.baselineLength
analysisYear = pio.analysisYear
cover = ee.Image("projects/sig-misc-ee/assets/roc_fire/landcover/roc_forest_cover_map_1990_gaf_fin01")
region = ee.FeatureCollection(ee.Feature(cover.geometry()))
# self.geometry,self. - self.baselineLength, self.analysisYear
col = env.getLandsat(region, analysisYear - baselineLength, analysisYear)
# -
mean2 = col.mean()
Map.addLayer(mean2,{'min':0,'max':0.15,'bands':['swir2','nir','red']})
# test 2
s2 = fm.step1()
cover = ee.Image("projects/sig-misc-ee/assets/roc_fire/landcover/roc_forest_cover_map_1990_gaf_fin01")
bl_col = s2.prepare_script1(region, cover,"Forest")
print(bl_col.first().bandNames().getInfo())
| nb_testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/CoronaWhy/team-literature-review/blob/master/tlr/faiss_document_similarity_search.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# # Document Similarity Search
#
#
# This notebook shows how to run document similarity search on the CORD-19 dataset using the [FAISS](https://github.com/facebookresearch/faiss) algorithms and based on [<NAME> & Coronawhy Task Ties Team](https://www.kaggle.com/crispyc/coronawhy-task-ties-patient-descriptions#Code) submission on round 2 of [CORD-19 Research Challenge](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge).
#
#
# ## Requirements
#
# ### Python requirements
#
# The required dependencies for this code can be located on the requirements.txt file and installed with
#
# ```
# pip install -r requirements.txt
# ```
#
# ### Vector embeddings
#
# In order to perform the similarity search we need the documents transformed into vector embeddings.
# We will use the [embeddings](https://www.kaggle.com/allen-institute-for-ai/CORD-19-research-challenge?select=cord_19_embeddings) provided by the authors of the CORD-19 dataset.
#
# If you have your [Kaggle credentials](https://github.com/Kaggle/kaggle-api#api-credentials) setup in place you can download it automatically the first time you run it (it will be stored for later uses), it may take some time as the file is 4GB,or you can always download the file by hand and pass the path as an argument.
#
#
# ## Quickstart
# +
from document_similarity_search import document_similarity_search
uids = [
'02tnwd4m',
'byp2eqhd',
]
titles = [
'Nitric oxide: a pro-inflammatory mediator in lung disease?',
'Immune pathways and defence mechanisms in honey bees Apis mellifera',
]
num_results = 5
document_similarity_search(uids=uids, titles=titles, num_results=5)
# -
# ## What's next?
#
# ### MongoDB
#
# In order to perfom the retrieval of the articles, we need to have the CORD-19 dataset available in a MongoDB database.
#
# By default we use the one in the [Coronawhy Infrastructure](https://www.coronawhy.org/services), but you can use your own by changing the `mongodb` section on the `setup.cfg` file.
#
#
# ### Embeddings
#
# You can use your own set of embeddings to create the index of the search, just have to pass it as an argument:
# +
import pandas as pd
from document_similarity_search import document_similarity_search
uids = [
'02tnwd4m',
'byp2eqhd',
]
titles = [
'Nitric oxide: a pro-inflammatory mediator in lung disease?',
'Immune pathways and defence mechanisms in honey bees Apis mellifera',
]
num_results = 5
embeddings = pd.read_csv('my_embeddings.csv')
document_similarity_search(uids=uids, titles=titles, num_results=5, embeddings=embeddings)
| tlr/document_similarity_search.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# Import Splinter, BeautifulSoup, and Pandas
from splinter import Browser
from bs4 import BeautifulSoup as soup
import pandas as pd
from webdriver_manager.chrome import ChromeDriverManager
# Set the executable path and initialize Splinter
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# +
# Visit the mars nasa news site
url = 'https://redplanetscience.com/'
browser.visit(url)
# Optional delay for loading the page
browser.is_element_present_by_css('div.list_text', wait_time=1)
# +
# Convert the browser html to a soup object and then quit the browser
html = browser.html
news_soup = soup(html, 'html.parser')
slide_elem = news_soup.select_one('div.list_text')
# -
slide_elem.find('div', class_='content_title')
# Use the parent element to find the first a tag and save it as `news_title`
news_title = slide_elem.find('div', class_='content_title').get_text()
news_title
# Use the parent element to find the paragraph text
news_p = slide_elem.find('div', class_='article_teaser_body').get_text()
news_p
# ### JPL Space Images Featured Image
# Visit URL
url = 'https://spaceimages-mars.com'
browser.visit(url)
# Find and click the full image button
full_image_elem = browser.find_by_tag('button')[1]
full_image_elem.click()
# Parse the resulting html with soup
html = browser.html
img_soup = soup(html, 'html.parser')
img_soup
# +
# find the relative image url
img_url_rel = img_soup.find('img', class_='fancybox-image').get('src')
img_url_rel
# -
# Use the base url to create an absolute url
img_url = f'https://spaceimages-mars.com/{img_url_rel}'
img_url
# ### Mars Facts
df = pd.read_html('https://galaxyfacts-mars.com')[0]
df.head()
df.columns=['Description', 'Mars', 'Earth']
df.set_index('Description', inplace=True)
df
df.to_html()
# # D1: Scrape High-Resolution Mars’ Hemisphere Images and Titles
# ### Hemispheres
# +
# 1. Use browser to visit the URL
url = 'https://marshemispheres.com/'
browser.visit(url)
# -
from bs4 import BeautifulSoup as Soup
import requests
from splinter import Browser
import pandas as pd
from selenium import webdriver
# +
# Parse the html with Beautiful Soup
html = browser.html
html_soup = soup(html, 'html.parser')
# 2. Create a list to hold the images and titles
hemisphere_image_urls = []
# 3. Write code to retrieve the image urls and titles for each hemisphere.
# select finds multiple instances and returns a LIST, find finds the first,
# so they don't do the same thing. select_one would be the equivalent to find.
# NEED TO PUT IMG URL TO RESULT_LIST
result_list =[]
results = html_soup.select('div.item')
# -
for result in results:
image_url = result.find('a')['href']
result_list.append(img_url)
# NEED TO LOOP THRU EACH RESULT_LIST FOR HEMISPHERE FULL IMAGES
for hemisphere in result_list:
browser.visit(f'{url}{hemisphere}')
hemisphere_html = browser.html
hemisphere_soup = soup(hemisphere_html, 'html.parser')
titles = hemisphere_soup.select('h2.title')
for title in titles:
hemisphere_title = title.text
full_resolution_jpgs = hemisphere_soup.select('div.downloads')
for image in full_resolution_jpgs:
jpg_url = image.find('a')['href']
hemisphere_image_urls.append({"title":hemisphere_title,"image_url": f'{url}{jpg_url}'})
# 4. Print the list that holds the dictionary of each image url and title.
hemisphere_image_urls
# 5. Quit the browser
browser.quit()
| Mission_to_Mars_Challenge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using data collators for training and error analysis
# > A text classification example with 🤗 Transformers and Datasets
#
# - comments: false
# - categories: [til,nlp,huggingface,transformers]
# - badges: true
# - hide_github_badge: true
# - hide_binder_badge: true
# +
#hide
# uncomment if running on Colab
# # !pip install transformers datasets pandas
# +
#hide
import warnings
import datasets
import transformers
warnings.filterwarnings("ignore")
datasets.logging.set_verbosity_error()
transformers.logging.set_verbosity_error()
# -
# Recently, [<NAME>](https://twitter.com/GuggerSylvain?s=20) from HuggingFace has created some nice tutorials on using `transformers` for [text classification](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/text_classification.ipynb) and [named entity recognition](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/token_classification.ipynb#scrollTo=545PP3o8IrJV). One trick that caught my attention was the use of a _data collator_ in the trainer, which automatically pads the model inputs in a batch to the length of the longest example. This bypasses the need to set a _global_ maximum sequence length, and in practice leads to faster training since we perform fewer redundant computations on the padded tokens and attention masks.
#
# I wanted to use a data collator for both training _and_ error analysis (e.g. by inspecting the top losses of the model). One problem: during training, each batch is collated on the fly so how do I pad my inputs in subsequent `Dataset.map` operations?
#
# For _sequence classification_ tasks, the solution I ended up with was to simply grab the data collator from the trainer and use it in my post-processing functions:
#
# ```python
# data_collator = trainer.data_collator
#
# def processing_function(batch):
# # pad inputs
# batch = data_collator(batch)
# ...
# return batch
# ```
#
# For _token classification_ tasks, there is a dedicated `DataCollatorForTokenClassification` which expects a `list` of `dicts`, where each `dict` represents a single example in the dataset. Since a `Dataset` slice returns a `dict` of `lists`, we need a two more lines to wrangle the data in the expected format:
#
# ```python
# from transformers import DataCollatorForTokenClassification
#
# data_collator = DataCollatorForTokenClassification(trainer.tokenizer)
#
# def processing_function(batch):
# # convert dict of lists to list of dicts
# features = [dict(zip(batch, t)) for t in zip(*batch.values())]
# # pad inputs and labels
# batch = data_collator(features)
# ...
# return batch
# ```
# For an end-to-end example, let's grab 1,000 examples from the IMDB dataset:
# +
from datasets import load_dataset
imdb = (load_dataset('imdb', split='train')
.train_test_split(train_size=800, test_size=200))
imdb
# -
# Next, let's load a pretrained model and its corresponding tokenizer:
# +
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
num_labels = 2
model_name = 'distilbert-base-cased'
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = (AutoModelForSequenceClassification
.from_pretrained(model_name, num_labels=num_labels)
.to(device))
# -
# Before fine-tuning the model, we need to tokenize and encode the dataset, so let's do that with a simple `Dataset.map` operation:
# +
def tokenize_and_encode(batch):
return tokenizer(batch['text'], truncation=True)
imdb_enc = imdb.map(tokenize_and_encode, batched=True)
imdb_enc
# -
# The final step is to define the metrics
# +
import numpy as np
from datasets import load_metric
accuracy_score = load_metric("accuracy")
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return accuracy_score.compute(predictions=predictions, references=labels)
# -
# the arguments for the trainer
# +
from transformers import TrainingArguments
batch_size = 16
logging_steps = len(imdb_enc['train']) // batch_size
training_args = TrainingArguments(
output_dir="results",
num_train_epochs=1,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
evaluation_strategy="epoch",
disable_tqdm=False,
logging_steps=logging_steps)
# -
# and the trainer itself:
# > Important: The trainer will remove _in-place_ any dataset columns of `str` type, so in this example `imdb_enc` loses the `text` column.
# +
from transformers import Trainer
trainer = Trainer(
model=model,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=imdb_enc['train'],
eval_dataset=imdb_enc['test'],
tokenizer=tokenizer)
trainer.train();
# -
# By default, the `Trainer` class uses the simple `default_data_collator` to collate batches of dict-like objects, but by passing the tokenizer we get a `DataCollatorWithPadding` instead:
data_collator = trainer.data_collator
type(data_collator)
# To see how this collator works, let's pass a dummy batch and observe that both the `input_ids` and `attention_mask` are padded as expected:
batch = {'input_ids': [[0,1,2], [0,1,2,3,4,5]]}
data_collator(batch)
# Finally, we can calculate the loss per example with the following function:{% fn 1 %}
# +
def loss_per_example(batch):
batch = data_collator(batch)
input_ids = torch.tensor(batch["input_ids"], device=device)
attention_mask = torch.tensor(batch["attention_mask"], device=device)
labels = torch.tensor(batch["labels"], device=device)
with torch.no_grad():
output = model(input_ids, attention_mask)
batch["predicted_label"] = torch.argmax(output.logits, axis=1)
loss = torch.nn.functional.cross_entropy(
output.logits, labels, reduction="none")
batch["loss"] = loss
# datasets requires list of NumPy array data types
for k, v in batch.items():
batch[k] = v.cpu().numpy()
return batch
losses_ds = imdb_enc['test'].map(
loss_per_example, batched=True, batch_size=batch_size)
# -
# It's then a simple matter to convert `losses_ds` to a `pandas.DataFrame` and sort by loss to find the examples where the model is most confused:
# +
import pandas as pd
pd.set_option("display.max_colwidth", None)
losses_ds.set_format('pandas')
losses_df = losses_ds[:][['label', 'predicted_label', 'loss']]
# add the text column removed by the trainer
losses_df['text'] = imdb['test']['text']
losses_df.sort_values("loss", ascending=False).head()
# -
# {{ 'The non-padded version of this function is adapted from an implementation by [<NAME>](https://twitter.com/lvwerra?s=20).' | fndetail: 1 }}
| _notebooks/2021-01-01-til-data-collator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + endofcell="--"
############### Blackjack Project #####################
#Difficulty Normal 😎: Use all Hints below to complete the project.
#Difficulty Hard 🤔: Use only Hints 1, 2, 3 to complete the project.
#Difficulty Extra Hard 😭: Only use Hints 1 & 2 to complete the project.
#Difficulty Expert 🤯: Only use Hint 1 to complete the project.
############### Our Blackjack House Rules #####################
## The deck is unlimited in size.
## There are no jokers.
## The Jack/Queen/King all count as 10.
## The the Ace can count as 11 or 1.
## Use the following list as the deck of cards:
## cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
## The cards in the list have equal probability of being drawn.
## Cards are not removed from the deck as they are drawn.
## The computer is the dealer.
##################### Hints #####################
#Hint 1: Go to this website and try out the Blackjack game:
# https://games.washingtonpost.com/games/blackjack/
#Then try out the completed Blackjack project here:
# http://blackjack-final.appbrewery.repl.run
#Hint 2: Read this breakdown of program requirements:
# http://listmoz.com/view/6h34DJpvJBFVRlZfJvxF
#Then try to create your own flowchart for the program.
#Hint 3: Download and read this flow chart I've created:
# https://drive.google.com/uc?export=download&id=1rDkiHCrhaf9eX7u7yjM1qwSuyEk-rPnt
#Hint 4: Create a deal_card() function that uses the List below to *return* a random card.
#11 is the Ace.
#cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]
#Hint 5: Deal the user and computer 2 cards each using deal_card() and append().
#user_cards = []
#computer_cards = []
#Hint 6: Create a function called calculate_score() that takes a List of cards as input
#and returns the score.
#Look up the sum() function to help you do this.
#Hint 7: Inside calculate_score() check for a blackjack (a hand with only 2 cards: ace + 10) and return 0 instead of the actual score. 0 will represent a blackjack in our game.
#Hint 8: Inside calculate_score() check for an 11 (ace). If the score is already over 21, remove the 11 and replace it with a 1. You might need to look up append() and remove().
#Hint 9: Call calculate_score(). If the computer or the user has a blackjack (0) or if the user's score is over 21, then the game ends.
#Hint 10: If the game has not ended, ask the user if they want to draw another card. If yes, then use the deal_card() function to add another card to the user_cards List. If no, then the game has ended.
#Hint 11: The score will need to be rechecked with every new card drawn and the checks in Hint 9 need to be repeated until the game ends.
#Hint 12: Once the user is done, it's time to let the computer play. The computer should keep drawing cards as long as it has a score less than 17.
#Hint 13: Create a function called compare() and pass in the user_score and computer_score. If the computer and user both have the same score, then it's a draw. If the computer has a blackjack (0), then the user loses. If the user has a blackjack (0), then the user wins. If the user_score is over 21, then the user loses. If the computer_score is over 21, then the computer loses. If none of the above, then the player with the highest score wins.
#Hint 14: Ask the user if they want to restart the game. If they answer yes, clear the console and start a new game of blackjack and show the logo from art.py.
########################################################################################################
# - reshuffles deck once the deck gets low
# - betting is allowed
# -
import random
import time
from IPython.display import clear_output
logo = """
.------. _ _ _ _ _
|A_ _ |. | | | | | | (_) | |
|( \/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __
| \ /|K /\ | | '_ \| |/ _` |/ __| |/ / |/ _` |/ __| |/ /
| \/ | / \ | | |_) | | (_| | (__| <| | (_| | (__| <
`-----| \ / | |_.__/|_|\__,_|\___|_|\_\ |\__,_|\___|_|\_\\
| \/ K| _/ |
`------' |__/
"""
single_deck = [2,3,4,5,6,7,8,9,10,10,10,10,11,
2,3,4,5,6,7,8,9,10,10,10,10,11,
2,3,4,5,6,7,8,9,10,10,10,10,11,
2,3,4,5,6,7,8,9,10,10,10,10,11]
full_deck = []
decks = int(input("How many decks do you want to play with? "))
for i in range(decks):
full_deck.extend(single_deck)
cards_left = full_deck
again = 'y'
clear_output()
starting_amount = float(input("How much money do you want to pull out of the ATM? $"))
dollars = starting_amount
def deal():
"""This function starts the beginning of the game and runs through the player's choices before the dealer plays."""
global dollars
bet = float(input(f"You have ${dollars}\nEnter your wager: $"))
if bet > dollars:
print("Your wager is more money than you have.")
return
elif bet <= 0:
print("You have entered an invalid wager.")
return
player_cards = random.sample(cards_left, 2)
cards_left.remove(player_cards[0])
cards_left.remove(player_cards[1])
dealer_cards = random.sample(cards_left, 2)
cards_left.remove(dealer_cards[0])
cards_left.remove(dealer_cards[1])
print(f"""Your cards are {player_cards}. Your total is {sum(player_cards)}\nThe dealer's card is {dealer_cards[0]}\n\n""")
if sum(player_cards) == 21:
if sum(dealer_cards) == 21:
print(f"The dealer has {dealer_cards}. You both have Blackjack. Push.")
return dollars
print(f"The dealer's cards are: {dealer_cards}\nBlackjack! You win.")
dollars = dollars + (1.5 * bet)
return dollars
hit = input("Do you want to hit? Type 'y' or 'n': ")
while hit == 'y':
new_hit = random.sample(cards_left, 1)[0]
cards_left.remove(new_hit)
player_cards.append(new_hit)
while sum(player_cards) > 21 and 11 in player_cards:
player_cards[player_cards.index(11)] = 1
if sum(player_cards) > 21:
print(f"Your cards are {player_cards}, totaling {sum(player_cards)}. Busted! You lose.")
dollars = dollars - bet
return dollars
print(f"""Your cards are {player_cards}. Your total is {sum(player_cards)}\nThe dealer's card is {dealer_cards[0]}""")
hit = input("Do you want to hit? Type 'y' or 'n': ")
dealers_turn(dealer_cards = dealer_cards, player_cards = player_cards, cards_left = cards_left, bet = bet)
def dealers_turn(dealer_cards, player_cards, cards_left, bet):
"""This function runs through the computer's plays and compares to the player's round to see who wins."""
global dollars
while sum(dealer_cards) <= 16:
new_hit = random.sample(cards_left, 1)[0]
cards_left.remove(new_hit)
dealer_cards.append(new_hit)
while sum(dealer_cards) > 21 and 11 in dealer_cards:
dealer_cards[dealer_cards.index(11)] = 1
if sum(dealer_cards) > 21:
print(f"\nThe dealer's cards are {dealer_cards}. The dealer busted with {sum(dealer_cards)}. You win!")
dollars = dollars + bet
return dollars
elif sum(dealer_cards) == sum(player_cards):
print(f"\nThe dealer's cards are {dealer_cards}. You both scored {sum(dealer_cards)}. Tie.")
return dollars
elif sum(player_cards) > sum(dealer_cards):
print(f"\nThe dealer's cards are {dealer_cards}, totaling {sum(dealer_cards)}. You win!")
dollars = dollars + bet
return dollars
elif sum(player_cards) < sum(dealer_cards):
print(f"\nThe dealer's cards are {dealer_cards}, totaling {sum(dealer_cards)}. You lose.")
dollars = dollars - bet
return dollars
def reshuffle():
"""This function shuffles the deck before the next round so that the deck does not run out of cards."""
global cards_left
print(f"The deck only has {len(cards_left)} cards left. Reshuffling the deck...")
time.sleep(3)
clear_output()
cards_left = []
for i in range(decks):
cards_left.extend(single_deck)
return cards_left
while len(cards_left) > 10 and again == 'y':
print(f"Cards Remaining: {len(cards_left)}")
print(logo)
deal()
if dollars == 0:
print("You have run out of money. Better luck next time!")
break
again = input(f"Current Balance: ${dollars}\nPlay again? Type 'y' or 'n': ")
if again == 'y' and len(cards_left) <= 15:
reshuffle()
if again == 'n':
if dollars >= starting_amount:
print(f"You walked away with ${dollars} and a profit of ${dollars-starting_amount}")
break
elif dollars < starting_amount:
print(f"You walked away with ${dollars} and a loss of ${starting_amount - dollars}. Better luck next time!")
break
clear_output()
# #things to add:
# add if computer also has blackjack on opening hand, then it's a push.
# how to handle the Ace going to 1
#figure out why full_deck is also removed when I only use cards_left.remove
# --
| Day 1-15/day-11-blackjack.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### HYPERPARAMETER OPTIMIZATION WITH RANDOMIZEDSERCHCV
# +
# import packages
import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.model_selection import RandomizedSearchCV
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings("ignore")
# -
# load data
data = pd.read_csv("data/mobile_price_data.csv")
#read data
data.head()
#show shape
data.shape
#show list of columns
list(data.columns)
# show data properties
data.describe()
# show data information
data.info()
# check if it has missing values
data.isnull().sum()
# split data into features and target
X = data.drop("price_range", axis=1).values
y = data.price_range.values
# standardize the feature variables
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
#Create classifier
rf_classifier = RandomForestClassifier(n_jobs=-1)
# set different parameter values to tune
param_grid = {
"n_estimators": [100, 200, 300, 400],
"max_depth": [1, 3, 5, 7, 9],
"criterion": ["gini", "entropy"],
}
# set gridsearch
model = RandomizedSearchCV(
estimator=rf_classifier, param_distributions=param_grid,n_iter=5, cv=5, verbose=2, n_jobs=1,random_state=42
)
# train the model with gridserchCV
model.fit(X_scaled,y)
# print the best score and estimator
print(model.best_score_)
print(model.best_estimator_.get_params())
| RandomizedSearchCV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Train Linear Learner model using File System Data Source
# This notebook example is similar to [An Introduction to Linear Learner with MNIST](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/linear_learner_mnist/linear_learner_mnist.ipynb).
#
# [An Introduction to Linear Learner with MNIST](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/linear_learner_mnist/linear_learner_mnist.ipynb) has been adapted to walk you through on using the AWS Elastic File System (EFS) or AWS FSx for Lustre (FSxLustre) as an input datasource to training jobs.
#
# Please read the original notebook and try it out to gain an understanding of the ML use-case and how it is being solved. We will not delve into that here in this notebook.
# ## Setup
# Again, we won't go into detail explaining the code below, it has been lifted verbatim from [An Introduction to Linear Learner with MNIST](https://github.com/awslabs/amazon-sagemaker-examples/blob/master/introduction_to_amazon_algorithms/linear_learner_mnist/linear_learner_mnist.ipynb).
# +
# !pip install -U --quiet "sagemaker>=1.14.2,<2"
# Define IAM role
import boto3
import re
from sagemaker import get_execution_role
from sagemaker.session import Session
role = get_execution_role()
# Specify training container
from sagemaker.amazon.amazon_estimator import get_image_uri
container = get_image_uri(boto3.Session().region_name, "linear-learner")
# Specify S3 bucket and prefix that you want to use for model data
# Feel free to specify a different bucket here if you wish.
bucket = Session().default_bucket()
prefix = "sagemaker/DEMO-linear-mnist"
# Setup an output S3 location for the model artifact
output_location = "s3://{}/{}/output".format(bucket, prefix)
print("training artifacts will be uploaded to: {}".format(output_location))
# -
# ## Prepare File System Input
# Next, we specify the details of file system as an input to your training job. Using file system as a data source eliminates the time your training job spends downloading data with data streamed directly from file system into your training algorithm.
# +
from sagemaker.inputs import FileSystemInput
# Specify file system id.
file_system_id = "<your_file_system_id>"
# Specify directory path associated with the file system. You need to provide normalized and absolute path here.
file_system_directory_path = "<your_file_system_directory_path>"
# Specify the access mode of the mount of the directory associated with the file system.
# Directory can be mounted either in 'ro'(read-only) or 'rw' (read-write).
file_system_access_mode = "<your_file_system_access_mode>"
# Specify your file system type, "EFS" or "FSxLustre".
file_system_type = "<your_file_system_type>"
# Give Amazon SageMaker Training Jobs Access to FileSystem Resources in Your Amazon VPC.
security_groups_ids = "<your_security_groups_ids>"
subnets = "<your_subnets>"
file_system_input = FileSystemInput(
file_system_id=file_system_id,
file_system_type=file_system_type,
directory_path=file_system_directory_path,
file_system_access_mode=file_system_access_mode,
)
# -
# ## Training the linear model
# Once we have the file system provisioned and file system input ready for training, the next step is to actually train the model.
# +
import boto3
import sagemaker
sess = sagemaker.Session()
linear = sagemaker.estimator.Estimator(
container,
role,
subnets=subnets,
security_group_ids=security_groups_ids,
train_instance_count=1,
train_instance_type="ml.c4.xlarge",
output_path=output_location,
sagemaker_session=sess,
)
linear.set_hyperparameters(feature_dim=784, predictor_type="binary_classifier", mini_batch_size=200)
linear.fit({"train": file_system_input})
# -
# Towards the end of the job you should see model artifact generated and uploaded to `output_location`.
| introduction_to_amazon_algorithms/linear_learner_mnist/linear_learner_mnist_with_file_system_data_source.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from indicator.base import *
import pandas as pd
a = [1, 2, 3, 4, 5, 6, 7]
b = [2, 1, 6, 8, 4, 3, 9]
df = pd.DataFrame({'a': a, 'b': b})
# 逻辑取值
IF(df['a'] > df['b'], df['a'], df['b'])
# 逻辑和
IFAND(df['a'] > df['b'], df['a'] > 5, df['a'], df['b'])
# 求最大值
MAX(df['a'], df['b'])
# 求最小值
MIN(df['a'], df['b'])
# 求总和
SUM(df['a'], 2)
# 引用若干周期前的数据
COUNT(df['a'], 2)
# 估算标准差
STD(df['a'], 2)
# 求绝对值
ABS(df['a'])
# 平均绝对偏差
AVEDEV(df['a'], 2)
# 两条线交叉
CROSS(df['a'], df['b'])
# 简单移动平均
MA(df['a'], 2)
# 移动平均
SMA(df['a'], 2)
# 指数移动平均
EMA(df['a'], 2)
# 求最高值
HHV(df['a'], 2)
# 求最低值
LLV(df['a'], 2)
# 引用若干周期前的数据
REF(df['a'], 2)
| example/base.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# <div style="text-align: right"> ↑ Ensure Kernel is set to ↑ </div><br><div style="text-align: right">
# conda_python3 </div>
# # SageMaker Image Classification Built-In Algorithm
# ## Introduction
# The Amazon SageMaker image classification algorithm is a supervised learning algorithm that supports multi-label classification. It takes an image as input and outputs one or more labels assigned to that image. It uses a convolutional neural network (ResNet) that can be trained from scratch or trained using transfer learning when a large number of training images are not available.
# The outline of this notebook is
#
# 1. Prepare images into RecordIO format
#
# 2. Train the SageMaker Image Classification built-in algorithm
#
# 3. Create and deploy the model to an endpoint for doing inference
#
# 4. Test realtime inference with the endpoint
#
# 5. Do batch inference using SageMaker Batch Transform
#
# Lets start by importing some base libraries and some initial variables
# In the cell below, replace **'your-unique-bucket-name'** with the name of bucket you created in the data-prep notebook
# +
# %%time
import boto3
import os
import re
import sagemaker
from sagemaker import get_execution_role
from sagemaker.amazon.amazon_estimator import get_image_uri
role = get_execution_role()
bucket = 'your-unique-bucket-name'
training_image = sagemaker.image_uris.retrieve(region=boto3.Session().region_name, framework='image-classification')
# -
# Install mxnet so we can use some of the tools to create RecordIO format datasets
# ! wget https://github.com/apache/incubator-mxnet/releases/download/1.5.0/apache-mxnet-src-1.5.0-incubating.tar.gz
# ! tar zxf apache-mxnet-src-1.5.0-incubating.tar.gz
# ## Data Preparation
#
# Lets first list out the folders in our data folder
# ! ls -1 ../data
# Now we create a folder to store our RecordIO files
# ! mkdir recordio_dataset
# We will now build our train and validation datasets in recordio format
# First we generate list files using im2rec.py from mxnet <br>
# The output will show the class label and its assigned number (implied from the folder structure)<br>
# i.e.<br>
# Priority 0<br>
# Roundabout 1<br>
# Signal 2
# ! python apache-mxnet-src-1.5.0-incubating/tools/im2rec.py recordio_dataset/train ../data/train --recursive --list --num-thread 8
# ! python apache-mxnet-src-1.5.0-incubating/tools/im2rec.py recordio_dataset/validation ../data/val --recursive --list --num-thread 8
# Now we have generated the list files, we will use them to generate the respective training and validation recordio files
# ! python apache-mxnet-src-1.5.0-incubating/tools/im2rec.py recordio_dataset/train.lst ../data/train
# ! python apache-mxnet-src-1.5.0-incubating/tools/im2rec.py recordio_dataset/validation.lst ../data/val
# Now we have the train and validation datasets in recordio format, we will now copy them to our S3 bucket
s3_train_key = "recordio_dataset/train"
s3_validation_key = "recordio_dataset/validation"
s3_train = 's3://{}/{}/'.format(bucket, s3_train_key)
s3_validation = 's3://{}/{}/'.format(bucket, s3_validation_key)
# ! aws s3 cp recordio_dataset/train.lst {s3_train}
# ! aws s3 cp recordio_dataset/train.rec {s3_train}
# ! aws s3 cp recordio_dataset/train.idx {s3_train}
# ! aws s3 cp recordio_dataset/validation.lst {s3_validation}
# ! aws s3 cp recordio_dataset/validation.rec {s3_validation}
# ! aws s3 cp recordio_dataset/validation.idx {s3_validation}
# ## Training
# Lets now define our hyperparameter values for training the Image Classification algorithm
# For this training, we will use 50 layers
num_layers = "18"
# we need to specify the input image shape for the training data
image_shape = "3,640,640"
# we also need to specify the number of training samples in the training set
# for caltech it is 15420
num_training_samples = "1334"
# specify the number of output classes
num_classes = "3"
# batch size for training
mini_batch_size = "64"
# number of epochs
epochs = "50"
# learning rate
learning_rate = "0.01"
# We will now set up the hyperparameters and define our training and valiadation channels
# +
# %%time
import time
from time import gmtime, strftime
s3 = boto3.client('s3')
# create unique job name
job_name_prefix = 'traffic-image-classification'
job_name = job_name_prefix + '-' + time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
training_params = \
{
# specify the training docker image
"AlgorithmSpecification": {
"TrainingImage": training_image,
"TrainingInputMode": "File"
},
"RoleArn": role,
"OutputDataConfig": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, job_name_prefix)
},
"ResourceConfig": {
"InstanceCount": 1,
#"InstanceType": "ml.m5.12xlarge",
"InstanceType": "ml.p3.2xlarge",
"VolumeSizeInGB": 50
},
"TrainingJobName": job_name,
"HyperParameters": {
"image_shape": image_shape,
"num_layers": str(num_layers),
"num_training_samples": str(num_training_samples),
"num_classes": str(num_classes),
"mini_batch_size": str(mini_batch_size),
"epochs": str(epochs),
"learning_rate": str(learning_rate)
},
"StoppingCondition": {
"MaxRuntimeInSeconds": 360000
},
#Training data should be inside a subdirectory called "train"
#Validation data should be inside a subdirectory called "validation"
#The algorithm currently only supports fullyreplicated model (where data is copied onto each machine)
"InputDataConfig": [
{
"ChannelName": "train",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3_train,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-recordio",
"CompressionType": "None"
},
{
"ChannelName": "validation",
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": s3_validation,
"S3DataDistributionType": "FullyReplicated"
}
},
"ContentType": "application/x-recordio",
"CompressionType": "None"
}
]
}
print('Training job name: {}'.format(job_name))
print('\nInput Data Location: {}'.format(training_params['InputDataConfig'][0]['DataSource']['S3DataSource']))
# -
# We now run the training job and wait until it completes - runs for 15-20 minutes
# +
# %%time
# create the Amazon SageMaker training job
sagemaker = boto3.client(service_name='sagemaker')
sagemaker.create_training_job(**training_params)
# confirm that the training job has started
status = sagemaker.describe_training_job(TrainingJobName=job_name)['TrainingJobStatus']
print('Training job current status: {}'.format(status))
try:
# wait for the job to finish and report the ending status
sagemaker.get_waiter('training_job_completed_or_stopped').wait(TrainingJobName=job_name)
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
except:
print('Training failed to start')
# if exception is raised, that means it has failed
message = sagemaker.describe_training_job(TrainingJobName=job_name)['FailureReason']
print('Training failed with the following error: {}'.format(message))
# -
# The training job is launched asynchronously. The get_waiter method waits until the job finishes then calls the describe_training_job to get the current status of the job
training_info = sagemaker.describe_training_job(TrainingJobName=job_name)
status = training_info['TrainingJobStatus']
print("Training job ended with status: " + status)
# # Inference
#
# ***
#
# A trained model does nothing on its own. We now want to use the model to perform inference. For this example, that means predicting the topic mixture representing a given document.
#
# This section involves several steps,
#
# 1. [Create Model](#CreateModel) - Create model for the training output
# 1. [Create Endpoint Configuration](#CreateEndpointConfiguration) - Create a configuration defining an endpoint.
# 1. [Create Endpoint](#CreateEndpoint) - Use the configuration to create an inference endpoint.
# 1. [Perform Inference](#PerformInference) - Perform inference on some input data using the endpoint.
# ## Create Model
#
# We now create a SageMaker Model from the training output. Using the model we can create an Endpoint Configuration.
# +
# %%time
import boto3
from time import gmtime, strftime
sage = boto3.Session().client(service_name='sagemaker')
model_name="sm-full-image-classification-model"
print(model_name)
# model_data would normally be set to the URI of the model artifacts from the training job
# We have commented out that line below. For the purposes of this workshop
# We will be setting model_data to point to a pretrained model
#info = sage.describe_training_job(TrainingJobName=job_name) # Get the information of the training job
#model_data = info['ModelArtifacts']['S3ModelArtifacts'] # Get the model S3 URI from the training job
model_data = 's3://ml-materials/sm_image_class/model.tar.gz'
print(model_data)
containers = {'us-west-2': '433757028032.dkr.ecr.us-west-2.amazonaws.com/image-classification:latest',
'us-east-1': '811284229777.dkr.ecr.us-east-1.amazonaws.com/image-classification:latest',
'us-east-2': '825641698319.dkr.ecr.us-east-2.amazonaws.com/image-classification:latest',
'eu-west-1': '685385470294.dkr.ecr.eu-west-1.amazonaws.com/image-classification:latest'}
hosting_image = containers[boto3.Session().region_name]
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
# -
# ### Create Endpoint Configuration
# At launch, we will support configuring REST endpoints in hosting with multiple models, e.g. for A/B testing purposes. In order to support this, customers create an endpoint configuration, that describes the distribution of traffic across the models, whether split, shadowed, or sampled in some way.
#
# In addition, the endpoint configuration describes the instance type required for model deployment, and at launch will describe the autoscaling configuration.
# +
from time import gmtime, strftime
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_config_name = job_name_prefix + '-epc-' + timestamp
endpoint_config_response = sage.create_endpoint_config(
EndpointConfigName = endpoint_config_name,
ProductionVariants=[{
'InstanceType':'ml.m4.xlarge',
'InitialInstanceCount':1,
'ModelName':model_name,
'VariantName':'AllTraffic'}])
print('Endpoint configuration name: {}'.format(endpoint_config_name))
print('Endpoint configuration arn: {}'.format(endpoint_config_response['EndpointConfigArn']))
# -
# ### Create Endpoint
# Lastly, the customer creates the endpoint that serves up the model, through specifying the name and configuration defined above. The end result is an endpoint that can be validated and incorporated into production applications. This takes 9-11 minutes to complete.
# +
# %%time
import time
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
endpoint_name = job_name_prefix + '-ep-' + timestamp
print('Endpoint name: {}'.format(endpoint_name))
endpoint_params = {
'EndpointName': endpoint_name,
'EndpointConfigName': endpoint_config_name,
}
endpoint_response = sagemaker.create_endpoint(**endpoint_params)
print('EndpointArn = {}'.format(endpoint_response['EndpointArn']))
# -
# Finally, now the endpoint can be created. It may take sometime to create the endpoint...
# +
# get the status of the endpoint
response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = response['EndpointStatus']
print('EndpointStatus = {}'.format(status))
# wait until the status has changed
sagemaker.get_waiter('endpoint_in_service').wait(EndpointName=endpoint_name)
# print the status of the endpoint
endpoint_response = sagemaker.describe_endpoint(EndpointName=endpoint_name)
status = endpoint_response['EndpointStatus']
print('Endpoint creation ended with EndpointStatus = {}'.format(status))
if status != 'InService':
raise Exception('Endpoint creation failed.')
# -
# If you see the message,
#
# > `Endpoint creation ended with EndpointStatus = InService`
#
# then congratulations! You now have a functioning inference endpoint. You can confirm the endpoint configuration and status by navigating to the "Endpoints" tab in the AWS SageMaker console.
#
# We will finally create a runtime object from which we can invoke the endpoint.
# ## Perform Inference
# Finally, the customer can now validate the model for use. They can obtain the endpoint from the client library using the result from previous operations, and generate classifications from the trained model using that endpoint.
#
import boto3
runtime = boto3.Session().client(service_name='runtime.sagemaker')
# ### Download test image
file_name = '../data/test/Roundabout/R2.png'
# test image
from IPython.display import Image
Image(file_name)
# ### Evaluation
#
# Evaluate the image through the network for inteference. The network outputs class probabilities and typically, one selects the class with the maximum probability as the final class output.
#
# **Note:** The output class detected by the network may not be accurate in this example. To limit the time taken and cost of training, we have trained the model only for a couple of epochs. If the network is trained for more epochs (say 20), then the output class will be more accurate.
# +
import json
import numpy as np
with open(file_name, 'rb') as f:
payload = f.read()
payload = bytearray(payload)
response = runtime.invoke_endpoint(EndpointName=endpoint_name,
ContentType='application/x-image',
Body=payload)
result = response['Body'].read()
# result will be in json format and convert it to ndarray
result = json.loads(result)
print(result)
# -
# ### Clean up
#
# When we're done with the endpoint, we can just delete it and the backing instances will be released. Run the following cell to delete the endpoint.
sage.delete_endpoint(EndpointName=endpoint_name)
# ## Batch Inference
# We are going to use SageMaker Batch Transform to run batch inference on the Test dataset provided
# +
# %%time
import sagemaker
sage = boto3.Session().client(service_name='sagemaker')
model_name="traffic-full-image-classification-model" + time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
print(model_name)
# model_data would normally be set to the URI of the model artifacts from the training job
# We have commented out the lines below. For the purposes of this workshop
# We will be setting model_data to point to a pretrained model
#info = sage.describe_training_job(TrainingJobName=job_name) # Get the information of the training job
#model_data = info['ModelArtifacts']['S3ModelArtifacts'] # Get the model S3 URI from the training job
model_data = 's3://ml-materials/sm_image_class/model.tar.gz'
print(model_data)
hosting_image = sagemaker.image_uris.retrieve(region=boto3.Session().region_name, framework='image-classification')
primary_container = {
'Image': hosting_image,
'ModelDataUrl': model_data,
}
create_model_response = sage.create_model(
ModelName = model_name,
ExecutionRoleArn = role,
PrimaryContainer = primary_container)
print(create_model_response['ModelArn'])
# -
# Copy the test images to your S3 bucket
batch_input = f's3://{bucket}/test/'
# ! aws s3 cp ../data/test/ {batch_input} --recursive
# Setup the parameters for this batch transform job
# +
timestamp = time.strftime('-%Y-%m-%d-%H-%M-%S', time.gmtime())
batch_job_name = "traffic-image-classification-model" + timestamp
request = \
{
"TransformJobName": batch_job_name,
"ModelName": model_name,
"MaxConcurrentTransforms": 16,
"MaxPayloadInMB": 6,
"BatchStrategy": "SingleRecord",
"TransformOutput": {
"S3OutputPath": 's3://{}/{}/output'.format(bucket, batch_job_name)
},
"TransformInput": {
"DataSource": {
"S3DataSource": {
"S3DataType": "S3Prefix",
"S3Uri": batch_input
}
},
"ContentType": "application/x-image",
"SplitType": "None",
"CompressionType": "None"
},
"TransformResources": {
"InstanceType": "ml.m5.12xlarge",
"InstanceCount": 1
}
}
print('Transform job name: {}'.format(batch_job_name))
print('\nInput Data Location: {}'.format(batch_input))
# -
# Now lets run the batch transform job and wait for completion - takes 5-10 minutes
# +
# %%time
sagemaker = boto3.client('sagemaker')
sagemaker.create_transform_job(**request)
print("Created Transform job with name: ", batch_job_name)
while(True):
response = sagemaker.describe_transform_job(TransformJobName=batch_job_name)
status = response['TransformJobStatus']
if status == 'Completed':
print("Transform job ended with status: " + status)
break
if status == 'Failed':
message = response['FailureReason']
print('Transform failed with the following error: {}'.format(message))
raise Exception('Transform job failed')
time.sleep(30)
# -
# Let us now look at the result of the predictions for each image together with thier confidence rating.
# Note that we have to map the class numbers back to the label assignments
# +
import json
import numpy as np
from urllib.parse import urlparse
s3_client = boto3.client('s3')
object_categories = ['Priority','Roundabout','Signal']
def list_objects(s3_client, bucket, prefix):
response = s3_client.list_objects(Bucket=bucket, Prefix=prefix)
objects = [content['Key'] for content in response['Contents']]
return objects
def get_label(s3_client, bucket, prefix):
filename = prefix.split('/')[-1]
s3_client.download_file(bucket, prefix, filename)
with open(filename) as f:
data = json.load(f)
index = np.argmax(data['prediction'])
probability = data['prediction'][index]
print("Filename: " + filename + " Result: label - " + object_categories[index] + ", probability - " + str(probability))
return object_categories[index], probability
inputs = list_objects(s3_client, bucket, urlparse(batch_input).path.lstrip('/'))
print("Sample inputs: " + str(inputs[:2]))
outputs = list_objects(s3_client, bucket, batch_job_name + "/output")
print("Sample output: " + str(outputs[:2]))
# Check prediction result of the first 8 test images
[get_label(s3_client, bucket, prefix) for prefix in outputs[0:8]]
# -
| sm_image_class/traffic-classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # This is a naive text summarization algorithm
# #### Created by <NAME>
# #### April, 2013
#
#
# # www.KudosData.com
# #### Updated by <NAME>, to support input text in Chinese
# #### March, 2017
# # Imports
# +
# coding=UTF-8
from __future__ import division
import re
# Python2 unicode & float-division support:
# from __future__ import unicode_literals, division
# +
# # %matplotlib inline
# import pandas as pd
import numpy as np
# import matplotlib.pyplot as plt
import io
# 中文字符和语言处理库
import jieba
# 机器学习库 sklearn 分类学习模型库
#from sklearn import linear_model
from sklearn.feature_extraction import DictVectorizer # 数据结构变换:把 Dict 转换为 稀疏矩阵
# from sklearn.linear_model import LogisticRegression # 逻辑回归分类模型
# from sklearn.pipeline import make_pipeline # 封装机器学习模型流程
# from sklearn.metrics import confusion_matrix, roc_curve, auc
# 中文显示设置
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
mpl.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
mpl.rcParams['font.size'] = 14 # 设置字体大小
np.random.seed(88)
# -
# # Define Functions
# Python3
# 中文分词功能小函数, 输出 字符串, 各词组由空格分隔
def KudosData_word_tokenizer(foo):
seg_token = jieba.cut(str(foo), cut_all=True)
seg_str = str(' '.join(seg_token))
return seg_str
# Python2
# 中文分词功能小函数, 输出 字符串, 各词组由空格分隔
# def KudosData_word_tokenizer(foo):
# seg_token = jieba.cut(foo, cut_all=True)
# seg_str = ' '.join(seg_token)
# return seg_str
# +
class SummaryTool(object):
# Naive method for splitting a text into paragraphs
def split_content_to_paragraphs(self, content):
# pre-process to identiy new paragraph:
content = content.replace("\r\n", "\n") # line-break codec converion Windows->Linux
content = content.replace("\n ", "\n\n") # new paragraph: \n + English Space Space
content = content.replace("\n ", "\n\n") # new paragraph: \n + Chinese Space Space (MS WORD alignment)
content = content.replace("\n ", "\n\n") # new paragraph: \n + Tab
content = content.replace(" ", " ") # clearing: Tab
content = content.replace(" ", "") # clearing: remove Chinese Spaces (MS WORD alignment)
content = content.replace("\n\n\n", "\n\n") # clearing
content = content.replace("\n\n\n", "\n\n") # clearing
content = content.replace("\n\n\n", "\n\n") # clearing
content = content.replace("\n\n\n", "\n\n") # clearing
content = content.replace(" ", " ") # clearing: 2 Spaces -> 1 Space
content = content.replace(" ", " ") # clearing: 2 Spaces -> 1 Space
content = content.replace(" ", " ") # clearing: 2 Spaces -> 1 Space
content = content.replace(" ", " ") # clearing: 2 Spaces -> 1 Space
# debug:
print('')
print('.........................................................................')
print('-->> splitting below text into paragraphs by \n\n - processed input text:')
print(content)
print('.........................................................................')
return content.split("\n\n") # split paragraph base on: \n\n. New paragraph will have 2 leading Spaces
# Naive method for splitting a text/paragraphs into sentences
def split_content_to_sentences(self, content):
# content = content.replace("\n", ". ")
# return content.split(". ")
# content = re.sub(r'\W+', '。', content)
# content = content.replace("‘", "")
# content = content.replace("’", "")
# content = content.replace("“", "")
# content = content.replace("”", "")
content = content.replace("\n", "。") # clearing: append sentences within paragraph
# content = content.replace("[", "。[")
# content = content.replace("]", "]。")
# content = content.replace("{", "。{")
# content = content.replace("}", "}。")
content = content.replace("[", "。")
content = content.replace("]", "。")
content = content.replace("{", "。")
content = content.replace("}", "。")
content = content.replace("|", "")
# content = content.replace("?", "。")
# content = content.replace("!", "。")
# content = content.replace(";", ";。")
# content = content.replace("?", "?。")
# content = content.replace("!", "!。")
# content = content.replace(";", "。")
# content = content.replace("?", "。")
# content = content.replace("!", "。")
# make (xxx) as part of previous sentence.
# content = content.replace("(", "。(")
# content = content.replace(")", ")。")
# make “xxx” as part of previous sentence.
# content = content.replace("“", "。“")
# content = content.replace("”", "”。")
# make ‘xxx’ as part of previous sentence.
# content = content.replace("‘", "。‘")
# content = content.replace("’", "’。")
# temp clearing
# Below are English chars
# content = content.replace("。,", "")
# content = content.replace("。-", "")
# content = content.replace("。_", "")
# content = content.replace("。<", "")
# content = content.replace("。(", "")
# content = content.replace(",", "") # Simply removed all English comma
# content = content.replace("-", "")
# content = content.replace("_", "")
# content = content.replace("<", "")
# content = content.replace(">", "")
# content = content.replace("(", "")
# content = content.replace(")", "")
# Below are Chinese chars
# content = content.replace("。 ", "") # clearing: special Chinese Tab / Spaces - This should be removed!
# content = content.replace("。·", "")
# content = content.replace("。/", "")
# content = content.replace("。–", "")
# content = content.replace("。—", "")
# content = content.replace("。-", "")
# content = content.replace("。,", "")
# content = content.replace("。、", "")
# content = content.replace("。《", "")
# content = content.replace("。(", "")
# content = content.replace("。“", "")
# content = content.replace("。‘", "")
# content = content.replace(" ", "") # clearing: special Chinese Tab / Spaces
# content = content.replace("·", "")
# content = content.replace("/", "")
# content = content.replace("–", "")
# content = content.replace("—", "")
# content = content.replace("-", "")
# # content = content.replace(",", "") # Simply removed all Chinese comma
# content = content.replace("、", "")
# content = content.replace("《", "")
# content = content.replace("》", "")
# content = content.replace("(", "")
# content = content.replace(")", "")
# content = content.replace("“", "")
# content = content.replace("”", "")
# content = content.replace("‘", "")
# content = content.replace("’", "")
# Please note, below clearing Space is very good for Chinese, but bad for English sentences embedded in Chinese.
# Kindly brainstorm for better solutions...
# content = content.replace(" ", "") # clearing: Space
# temp clearing - end
# content = content.replace(" ", "。") # clearing: Tab
content = content.replace(" ", " ") # clearing: Tab
content = content.replace(" ", "") # clearing: remove Chinese Spaces (MS WORD alignment)
content = content.replace(" ", " ") # clearing: 2 Spaces -> 1 Space
content = content.replace(" ", " ") # clearing: 2 Spaces -> 1 Space
content = content.replace(" ", " ") # clearing: 2 Spaces -> 1 Space
content = content.replace(" ", " ") # clearing: 2 Spaces -> 1 Space
content = content.replace("。。", "。") # clearing
content = content.replace("。。", "。") # clearing
content = content.replace("。。", "。") # clearing
content = content.replace("。。", "。") # clearing
# make sentence as part of previous sentence with ':'.
# content = content.replace(":。", "") # English - This should be removed!
# content = content.replace(":", "") # English
# content = content.replace(":。", "") # Chinese - This should be removed!
# content = content.replace(":", "") # Chinese
print('')
print('-->> splitting below paragraph into sentences:')
print(content)
return content.split("。")
# Caculate the intersection between 2 sentences
def sentences_intersection(self, sent1, sent2):
# www.KudosData.com - Chinese
sent1chn = KudosData_word_tokenizer(sent1)
sent2chn = KudosData_word_tokenizer(sent2)
# split the sentence into words/tokens
# s1 = set(sent1.split(" "))
# s2 = set(sent2.split(" "))
s1 = set(sent1chn.split(" "))
s2 = set(sent2chn.split(" "))
# debug
# print(sent1)
# print(sent1chn)
# print(s1)
# print('')
# If there is not intersection, just return 0
if (len(s1) + len(s2)) == 0:
print('# If there is not intersection, just return 0')
return 0
# We normalize the result by the average number of words
return len(s1.intersection(s2)) / ((len(s1) + len(s2)) / 2)
# Format a sentence - remove all non-alphbetic chars from the sentence
# We'll use the formatted sentence as a key in our sentences dictionary
def format_sentence(self, sentence):
sentence = re.sub(r'\W+', '', sentence)
return sentence
# Convert the content into a dictionary <K, V>
# k = The formatted sentence
# V = The rank of the sentence
def get_senteces_ranks(self, content):
print(' ')
print('starting: get_senteces_ranks(self, content) ...')
# Split the paragraph into sentences
sentences = self.split_content_to_sentences(content)
# www.KudosData.com - clean sentences
# Don't know to how to do:
# sentences = self.format_sentence(sentences)
# Calculate the intersection of every two sentences
n = len(sentences)
# [Sam python 2.7 -> 3.4] values = [[0 for x in xrange(n)] for x in xrange(n)]
values = [[0 for x in range(n)] for x in range(n)]
for i in range(0, n):
for j in range(0, n):
values[i][j] = self.sentences_intersection(sentences[i], sentences[j])
# Build the sentences dictionary
# The score of a sentences is the sum of all its intersection
sentences_dic = {}
for i in range(0, n):
score = 0
for j in range(0, n):
if i == j:
continue
score += values[i][j]
sentences_dic[self.format_sentence(sentences[i])] = score
# www.KudosData.com
print('>>>>>>>>>>>>>>>>>>> successfully completed: get_senteces_ranks() : %d' % i)
return sentences_dic
# Return the best sentence in a paragraph
def get_best_sentence(self, paragraph, sentences_dic):
# Split the paragraph into sentences
sentences = self.split_content_to_sentences(paragraph)
# Ignore short paragraphs
if len(sentences) < 2:
print('==>> Ignore above short paragraph')
return ""
# Get the best sentence according to the sentences dictionary
# best_sentence = ""
best_sentence = "### Best_sentence NOT produced for this paragraph ###"
max_value = 0
for s in sentences:
strip_s = self.format_sentence(s)
if strip_s:
if sentences_dic[strip_s] > max_value:
max_value = sentences_dic[strip_s]
best_sentence = s
return best_sentence
# Build the summary
def get_summary(self, title, content, sentences_dic):
# Split the test into paragraphs
paragraphs = self.split_content_to_paragraphs(content)
# Add the title
summary = []
# If necessary, can remove title from displaying
summary.append(title.strip())
summary.append("")
# Add the best sentence from each paragraph
for p in paragraphs:
sentence = self.get_best_sentence(p, sentences_dic).strip()
if sentence:
summary.append(sentence)
return ("\n").join(summary)
# -
# # Input text
# +
# process Unicode text input
with io.open('input_text.txt','r',encoding='utf8') as f:
text = f.read()
content = text
title = '''
<Dummy Title>
'''
# -
# # Run Topic Summerization
# ### Get important sentence(s) of each paragraph from input text
# Create a SummaryTool object
st = SummaryTool()
# Build the sentences dictionary
sentences_dic = st.get_senteces_ranks(content)
# Build the summary with the sentences dictionary
summary = st.get_summary(title, content, sentences_dic)
# print(the ratio between the summary length and the original length
print("Original Length : %s" % (len(title) + len(content)))
print("Summary Length : %s" % len(summary))
print("Summary Ratio : %s %%" % (100 * (len(summary) / (len(title) + len(content)))))
print("")
# print the summary
print(summary)
# ### Output results to a file
with io.open('output_topic_summary.txt','w',encoding='utf8') as f:
f.write("Original Length : %s" % (len(title) + len(content)))
f.write("\n")
f.write("Summary Length : %s" % len(summary))
f.write("\n")
f.write("Summary Ratio : %s %%" % (100 * (len(summary) / (len(title) + len(content)))))
f.write("\n")
f.write("\n")
f.write(summary)
f.close()
| topic_summary/dated/topic_summary_chn_v008.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
tf.__version__
# +
import os
import re
import h5py
import shutil
import skimage
import imageio
import numpy as np
from glob import glob
from tqdm import tqdm
tqdm.pandas(desc="progress-bar")
import matplotlib.pyplot as plt
# %matplotlib inline
# don't print matching warnings
import warnings
warnings.filterwarnings('ignore')
# -
# ### Download dataset and unpack it
# +
_URL = 'https://s3.amazonaws.com/nist-srd/SD18/sd18.zip'
path_to_zip = tf.keras.utils.get_file('sd18.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'sd18/')
# -
# ### Organize all images
# +
if not os.path.isdir('data'):
os.mkdir('data')
filenames = glob(PATH + 'single/f1_p1/*/*.png')
for filename in tqdm(filenames):
indx = filename.split('/')[-1].split('_')[0]
# remove leading zeros from index
indx = re.sub(r'(?<!\d)0+', '', indx)
side = filename.split('/')[-1].split('_')[2].split('.')[0].lower()
new_file = 'data/mugshot_{}.{}.png'.format(side, indx)
shutil.copyfile(filename, new_file)
# -
# Convert Grayscale to RGB
# Resize to (256, 256)
filenames = glob('data/*.png')
for filename in tqdm(filenames):
im = skimage.io.imread(filename)
im = skimage.color.gray2rgb(im)
im = skimage.transform.resize(im, (256, 256), anti_aliasing=True)
im = skimage.util.img_as_ubyte(im)
skimage.io.imsave(filename, im)
# Flip L to R
filenames = glob('data/mugshot_l.*.png')
for filename in tqdm(filenames):
im = skimage.io.imread(filename)
im = np.fliplr(im)
skimage.io.imsave(filename, im)
# rename file
new_filename = filename.replace('_l', '_r')
os.rename(filename, new_filename)
# +
if not os.path.isdir('tmp'):
os.mkdir('tmp')
if not os.path.isdir('data/test'):
os.mkdir('data/test')
# +
# train test split
frnt_files = sorted(glob('data/mugshot_f.*.png'))
side_files = sorted(glob('data/mugshot_r.*.png'))
mylist = list(zip(frnt_files, side_files))
for f in mylist[:int(len(mylist)*0.8)]:
shutil.move(f[0], 'tmp')
shutil.move(f[1], 'tmp')
for f in mylist[int(len(mylist)*0.8):]:
shutil.move(f[0], 'data/test')
shutil.move(f[1], 'data/test')
# -
# ### Image-to-Image paper describes that it randomly jitter each image
# 1. resize image up
# 2. randomly crop back to org size
# 3. randomly flip horizontally
def load(image):
image = tf.io.read_file(image)
image = tf.image.decode_png(image)
return tf.dtypes.cast(image, tf.float32)
@tf.function()
def random_jitter(image):
# resizing to 286 x 286 x 3
image = tf.image.resize(image, [286, 286], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
# randomly cropping to 256 x 256 x 3
image = tf.image.random_crop(image, size=[256, 256, 3])
# randomly mirroring
image = tf.image.random_flip_left_right(image)
return image
# normalizing the images to [-1, 1]
def normalize(image):
return (image / 127.5) - 1
if not os.path.isdir('data/train'):
os.mkdir('data/train')
filenames = sorted(glob('tmp/mugshot_f.*.png'))
for filename in filenames:
name = filename.split('/')[-1]
image = load(filename)
for i in range(4):
image = random_jitter(image)
image = normalize(image)
nname = name.replace('_f', '_f' + str(i))
imageio.imwrite(os.path.join('data/train/', nname), image)
filenames = sorted(glob('data/train/mugshot_f?.*.png'))
for k, filename in enumerate(filenames):
shutil.move(filename, 'data/train/mugshot_front.' + str(k) + '.png')
filenames = sorted(glob('tmp/mugshot_r.*.png'))
for filename in tqdm(filenames):
for i in range(4):
nname = filename.replace('_r', '_r' + str(i))
shutil.copy2(filename, nname)
filenames = sorted(glob('tmp/mugshot_r?.*.png'))
for k, filename in enumerate(filenames):
shutil.move(filename, 'data/train/mugshot_side.' + str(k) + '.png')
# ### Save the image dataset into a HDF5
# +
hdf5_path = 'data/dataset.hdf5'
# train images
frnt_path = 'data/train/mugshot_front.*.png'
side_path = 'data/train/mugshot_side.*.png'
frnt = glob(frnt_path)
side = glob(side_path)
train_inpt = frnt[0:int(len(frnt))]
train_real = side[0:int(len(side))]
# test images
frnt_path = 'data/test/mugshot_f.*.png'
side_path = 'data/test/mugshot_r.*.png'
frnt = glob(frnt_path)
side = glob(side_path)
test_inpt = frnt[0:int(len(frnt))]
test_real = side[0:int(len(side))]
# -
len(train_inpt)
# +
# Define an array for each of train and test set with the shape
# (number of data, image_height, image_width, image_depth)
train_shape = (len(train_inpt), 256, 256, 3)
test_shape = (len(test_inpt), 256, 256, 3)
# open a hdf5 file and create earrays
hdf5_file = h5py.File(hdf5_path, mode='w')
hdf5_file.create_dataset("train_inpt", train_shape, np.int8)
hdf5_file.create_dataset("train_real", train_shape, np.int8)
hdf5_file.create_dataset("test_inpt", test_shape, np.int8)
hdf5_file.create_dataset("test_real", test_shape, np.int8)
# +
for i in range(len(train_inpt)):
img = skimage.io.imread(train_inpt[i])
hdf5_file["train_inpt"][i, ...] = img[None]
img = skimage.io.imread(train_real[i])
hdf5_file["train_real"][i, ...] = img[None]
for i in range(len(test_inpt)):
img = skimage.io.imread(test_inpt[i])
hdf5_file["test_inpt"][i, ...] = img[None]
img = skimage.io.imread(test_real[i])
hdf5_file["test_real"][i, ...] = img[None]
# -
hdf5_file.close()
# ### Check if the data is saved properly in the HDF5 file
# +
# open the hdf5 file
hdf5_path = 'data/dataset.hdf5'
hdf5_file = h5py.File(hdf5_path, 'r')
# Get total number of samples
num_data = hdf5_file['train_inpt'].shape[0]
print(num_data)
# -
# ### Cleaning up behind me
# +
# Removing all png images
files = glob('data/*.png')
for file in files:
os.remove(file)
# remove tmp dir
shutil.rmtree('tmp')
# Remove downloaded data
shutil.rmtree(PATH)
# -
| notebooks/load_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2-JoiningDatasets
# This tutorial shows how to identify drug molecules in the PDB by joining two datasets:
#
# 1. Drug information from DrugBank
# 2. Ligand information from RCSB PDB
from pyspark.sql import SparkSession
from mmtfPyspark.datasets import customReportService, drugBankDataset
from mmtfPyspark.structureViewer import view_binding_site
# #### Configure Spark
spark = SparkSession.builder.appName("2-JoiningDatasets").getOrCreate()
# ## Download open DrugBank dataset
# Download a dataset of drugs from [DrugBank](https://www.drugbank.ca) and filter out any drugs that do not have an InChIKey. [InChIKeys](https://en.wikipedia.org/wiki/International_Chemical_Identifier) are unique identifiers for small molecules.
#
# DrugBank provides more [detailed datasets](https://github.com/sbl-sdsc/mmtf-pyspark/blob/master/mmtfPyspark/datasets/drugBankDataset.py), e.g., subset of approved drugs, but a DrugBank username and password is required. For this tutorial we use the open DrugBank dataset.
drugs = drugBankDataset.get_open_drug_links()
drugs = drugs.filter("StandardInChIKey IS NOT NULL").cache()
drugs.toPandas().head(5)
# ## Download ligand annotations from RCSB PDB
# Here we use [RCSB PDB web services](http://dx.doi.org/10.1093/nar/gkq1021) to download InChIKeys and molecular weight for ligands in the PDB (this step can be slow!).
#
# We filter out entries without an InChIKey and low molecular weight ligands using SQL syntax.
# +
ligands = customReportService.get_dataset(["ligandId","InChIKey","ligandMolecularWeight"])
ligands = ligands.filter("InChIKey IS NOT NULL AND ligandMolecularWeight > 300").cache()
ligands.toPandas().head(10)
# -
# ## Find drugs in PDB
# By [joining](https://spark.apache.org/docs/latest/api/python/pyspark.sql.html?highlight=join#pyspark.sql.DataFrame.join) the two datasets on the InChIKey, we get the intersection between the two datasets.
ligands = ligands.join(drugs, ligands.InChIKey == drugs.StandardInChIKey)
# #### Keep only unique ligands per structure
# Here we [drop](https://spark.apache.org/docs/latest/api/python/pyspark.sql.html?highlight=join#pyspark.sql.DataFrame.dropDuplicates) rows with the same structureId and ligandId.
ligands = ligands.dropDuplicates(["structureId","ligandId"]).cache()
# #### Keep only essential columns
ligands = ligands.select("structureId","ligandId","chainId","Commonname")
ligands.toPandas().head(10)
# ## Visualize drug binding sites
# #### Extract id columns as lists (required for visualization)
pdb_ids = ligands.select("structureId").rdd.flatMap(lambda x: x).collect()
ligand_ids = ligands.select("ligandId").rdd.flatMap(lambda x: x).collect()
chain_ids = ligands.select("chainId").rdd.flatMap(lambda x: x).collect()
# Disable scrollbar for the visualization below
# + language="javascript"
# IPython.OutputArea.prototype._should_scroll = function(lines) {return false;}
# -
# #### Show binding site residues within 4.5 A from the drug molecule
view_binding_site(pdb_ids, ligand_ids, chain_ids, distance=4.5);
spark.stop()
| 4-mmtf-pyspark-advanced/2-JoiningDatasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Show ALL outputs in cell, not only last result
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
relative_filepath = "../../"
# +
#Set relative path mapping for module imports
import sys
sys.path.append(relative_filepath)
# for path in sys.path:
# print(path)
# -
# External Dependencies
import numpy as np
import pandas as pd
# +
# Read in pickled combined data
X_y_data = pd.read_pickle(relative_filepath + "data/interim/step_3a/X_y_data.pkl")
# Read in pickled train data
X_y_train = pd.read_pickle(relative_filepath + "data/interim/step_3a/X_y_train.pkl")
# Read in pickled test data
X_y_test = pd.read_pickle(relative_filepath + "data/interim/step_3a/X_y_test.pkl")
# Recap data structure
X_y_data.head()
X_y_data.shape
# +
import json
dict_ml_missing_data = json.load(open(relative_filepath + "reports/dicts/dict_ml_missing_data.json"))
# +
#values for config dict
input_dfs = [X_y_data,
X_y_train,
X_y_test]
target = "classLabel"
# -
# ## Data Cleaning Checklist
# + active=""
# https://www.justintodata.com/data-cleaning-python-ultimate-guide/
#
# Table Of Contents
# Missing data
# Irregular data (Outliers)
# Unnecessary data
# Unnecessary type #1: Uninformative / Repetitive
# Unnecessary type #2: Irrelevant
# Unnecessary type #3: Duplicates
# Inconsistent data
# Inconsistent type #1: Capitalization
# Inconsistent type #2: Formats
# Inconsistent type #3: Categorical Values
# Inconsistent type #4: Addresses
# +
https://elitedatascience.com/data-cleaning
Remove Unwanted observations
Duplicate observations
Irrelevant observations
Fix Structural Errors
Filter Unwanted Outliers
Handle Missing Data
Missing categorical data
Missing numeric data
# -
# +
# Imports
import pandas as pd
import numpy as np
pd.options.display.max_columns = None
from matplotlib import pyplot as plt
import seaborn as sns
# Display HTML
from IPython.display import Image
from IPython.core.display import HTML
# +
# Input data
titanic_X_y_train = pd.read_csv('train.csv')
titanic_X_y_test = pd.read_csv('test.csv')
# titanic_X_y_train = sns.load_dataset('titanic')
# titanic_X_y_train.head()
target = 'Survived'
# +
# Split into train & test
# X_train, X_test, y_train, y_test = train_test_split(X,
# y,
# test_size=0.2,
# stratify=y,
# random_state=11)
X_train = titanic_X_y_train.drop(target, axis=1)
y_train = titanic_X_y_train[target]
X_train.head()
# y_train
# +
# determine categorical and numerical features
numerical_cols = X_train.select_dtypes(include=['int64', 'float64']).columns
categorical_cols = X_train.select_dtypes(include=['object', 'bool']).columns
print(list(numerical_cols))
print(list(categorical_cols))
# +
# Finalised preprocessing handlers
# Numeric handlers
def num_imputation_handler(X):
pass
def power_transform_handler(X):
pass
def outlier_handler(X):
pass
# Categorical handlers
def cat_imputation_handler(X):
pass
def label_encoding_handler(df):
pass
def one_hot_encoding_handler(df):
pass
def ordinal_encoding_handler(df):
pass
def target_encoding_handler(df):
pass
# Text handlers
def vectorizer_handler(df):
pass
# Model input handlers
def scaling_handler(X):
pass
def imbalance_handler(df):
pass
# +
## FILL WITH RELEVANT ##
# Column dtypes selector
numerical_cols = []
# imputation_cols = []
# power_transform_cols = []
# outlier_cols = []
# scaling_cols = []
categorical_cols = []
text_cols = []
# Function transformers for numeric pipeline
get_numeric_data = FunctionTransformer(lambda x: x[numerical_cols], validate=False)
apply_num_imputations = FunctionTransformer(FUNCTION, validate=False)
apply_power_transforms = FunctionTransformer(FUNCTION, validate=False)
apply_outlier_handling = FunctionTransformer(FUNCTION, validate=False)
apply_scaling = FunctionTransformer(FUNCTION, validate=False)
apply_balancing = FunctionTransformer(FUNCTION, validate=False)
# Function transformers for categorical pipeline
get_categorical_data = FunctionTransformer(lambda x: x[categorical_cols], validate=False)
apply_cat_imputations = FunctionTransformer(FUNCTION, validate=False) #SimpleImputer(strategy='most_frequent', fill_value='categorical', missing_values=np.nan)
apply_label_encoding = FunctionTransformer(FUNCTION, validate=False)
apply_one_hot_encoding = FunctionTransformer(FUNCTION, validate=False)
apply_ordinal_encoding = FunctionTransformer(FUNCTION, validate=False)
# Function transformers for text pipeline
get_text_data = FunctionTransformer(lambda x: x[text_cols], validate=False)
apply_vectorizer = FunctionTransformer(FUNCTION, validate=False)
# +
# Individual dtype pipelines
numeric_transformer = Pipeline([
('selector', get_numeric_data),
('imputer', apply_num_imputations),
('power_transformer', apply_power_transforms),
('outliers', apply_outlier_handling)
])
categorical_transformer = Pipeline([
('selector', get_categorical_data),
('imputer', apply_cat_imputations),
('le', apply_label_encoding),
('ohe', apply_one_hot_encoding),
('ordinal', apply_ordinal_encoding)
])
text_transformer = Pipeline([
('selector', get_text_data),
('vectorizer', apply_vectorizer),
])
# +
# Preprocessing pipeline with feature union
preprocessor_pl = FeatureUnion(transformer_list=[
('numeric', numeric_transformer),
('categorical', categorical_transformer),
('text', text_transformer)
])
preprocessor_pl_result = preprocessor_pl.fit_transform(X_train)
type(preprocessor_pl_result)
preprocessor_pl_result.shape
# +
# Full pipeline
preprocessor_pl = Pipeline([
('union', FeatureUnion(transformer_list=[
('numeric', numeric_transformer),
('categorical', categorical_transformer),
('text', text_transformer)
])),
# ('scaler', apply_scaling),
# ('imbalance', apply_balancing),
# ('clf', LogisticRegression())
])
preprocessor_pl_result = preprocessor_pl.fit_transform(X_train)
type(preprocessor_pl_result)
preprocessor_pl_result.shape
# +
# Preprocessing pipeline with column transformer
preprocessor_pl = ColumnTransformer(transformers=[
('num', numeric_transformer, numerical_cols),
('cat', categorical_transformer, categorical_cols)
])
preprocessor_pl_result = preprocessor_pl.fit_transform(X_train)
type(preprocessor_pl_result)
preprocessor_pl_result.shape
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
clf = Pipeline([
('preprocessor', preprocessor_pl),
('classifier', LogisticRegression())
])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=0)
clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test))
param_grid = {
'preprocessor__num__imputer__strategy': ['mean', 'median'],
'classifier__C': [0.1, 1.0, 10, 100],
}
grid_search = GridSearchCV(clf, param_grid, cv=10)
grid_search
# +
from IPython.display import display
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import FunctionTransformer
from sklearn import set_config
set_config(display='diagram')
# Defining an example pipeline
model = Pipeline([('transformer', FunctionTransformer(lambda x: 2*x)), ('clf', LogisticRegression())])
display(model)
# -
Image(url= "https://assets.datacamp.com/production/repositories/4983/datasets/238dde66d8af1b7ebd8ffe82de9df60ad6a68d22/preprocessing3.png")
# ## Discovery for Building Preprocessing Handlers
# ### Numerical transformers
# +
from sklearn.compose import ColumnTransformer
from sklearn.impute import SimpleImputer
from fancyimpute import KNN, IterativeImputer
from scipy.stats import boxcox, yeojohnson
from sklearn.preprocessing import PowerTransformer
# +
def num_imputation_handler(X):
pass
####
####
num_imputation_cols =[]
num_imputation_handler = ColumnTransformer(transformers=[
('imputer', imputer, cols),
('imputer', imputer, cols),
('imputer', imputer, cols),
('imputer', imputer, cols),
], remainder='passthrough', verbose_feature_names_out=False)
####
####
# Numpy implementation for pipelines
num_imputation_handler.fit_transform(X_train)
# Pdf for verification / exploration
# Options: mean/median/mode/constant
imputer_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer_mean = SimpleImputer(missing_values=np.nan, strategy='median')
imputer_mode = SimpleImputer(missing_values=np.nan, strategy='most_frequent')
imputer_constant = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value=0)
X_train.fillna(method='ffill', inplace=True)
X_train.fillna(method='bfill', inplace=True)
X_train.interpolate(method='linear', inplace=True)
X_train.interpolate(method='quadratic', inplace=True)
X_train.interpolate(method='nearest', inplace=True)
fancyimpute KNN, MICE
# +
def power_transform_handler(X):
pass
####
####
power_transform_cols = ['Age', 'Fare']
power_transform = PowerTransformer(method='yeo-johnson',
standardize=False)
power_transform_handler = ColumnTransformer(transformers=[
('pt', power_transform, power_transform_cols),
], remainder='drop', verbose_feature_names_out=False)
####
####
print('X Shape before PT:', X_train.shape, '\n')
# Numpy implementation for pipelines
X_train_pt = power_transform_handler.fit_transform(X_train)
print('X Shape after PT as CT numpy:', X_train_pt.shape, '\n')
# print(X_train_pt, '\n')
# # Pdf for verification / exploration
X_train_pt = pd.DataFrame(X_train_pt, columns=power_transform_handler.get_feature_names_out())
X_train_pt.dtypes
X_train_pt.head()
# histograms of the features before power transforms
X_train[numerical_cols].hist()
# histograms of the features after power transforms
X_train_pt[power_transform_cols].hist()
# plt.show()
# Options:
# # Yeo-Johnson supports both positive or negative data
# pt = PowerTransformer(method='yeo-johnson', standardize=True, copy=True)
# # Box-Cox requires input data to be strictly positive
# pt = PowerTransformer(method='box-cox')
# In SciPy:
# y, fitted_lambda = yeojohnson(y, lmbda=None)
# y, fitted_lambda = boxcox(y, lmbda=None)
# +
# https://towardsdatascience.com/an-easy-tool-to-correctly-transform-non-linear-data-for-linear-regression-5fbe7f7bfe2f
Image(url = "https://miro.medium.com/max/1170/1*iiTwCk-QjOTS83Rl1qfO7A.png")
# + active=""
# The goal of this article is to demonstrate how to use this diagram to make transformations to your data. Before we get into examples, the way you can read this diagram is:
#
# If you see data that looks like the curve in the top left,
# you can try to decrease the power of x and/or increase the power of y.
#
# If you see data that looks like the curve in the top right,
# you can try to increase the power of x and/or increase the power of y.
#
# If you see data that looks like the curve in the bottom right,
# you can try to increase the power of x and/or decrease the power of y.
#
# If you see data that looks like the curve in the bottom left,
# you can try to decrease the power of x and/or decrease the power of y.
#
# Notes:
# Squaring the input variable is great for modeling data that are better fit by a curved line.
# transforming X: do not change the values of the residuals
#
# Using the log transform on the response is good for data where the variance is unequal.
# transforming y: the relationship between the linear model and the error terms is also changed.
# -
Image(url= "https://miro.medium.com/max/872/1*Jwpotn5OKYfkzoGQFYKunA.jpeg")
# + active=""
# If the data are right-skewed (clustered at lower values)
# move down the ladder of powers
# (that is, try square root, cube root, logarithmic, etc. transformations).
#
# If the data are left-skewed (clustered at higher values)
# move up the ladder of powers
# (cube, square, etc).
# -
Image(url= "https://miro.medium.com/max/656/1*8jUUiaF9dD9ZiLzH8e_9jA.png")
Image(url= "https://miro.medium.com/max/1400/1*RRZ4lakWAhBWRMC9r1r0Ew.jpeg")
# + active=""
# The boxcox() SciPy function implements the Box-Cox method. It takes an argument, called lambda, that controls the type of transform to perform.
#
# lambda = -1. is a reciprocal transform.
# lambda = -0.5 is a reciprocal square root transform.
# lambda = 0.0 is a log transform.
# lambda = 0.5 is a square root transform.
# lambda = 1.0 is no transform.
#
# A limitation of the Box-Cox transform is that it assumes that all values in the data sample are positive.
#
# Yeo-Johnson Transformation Method
#
# Unlike the Box-Cox transform, it does not require the values for each input variable to be strictly positive.
# It supports zero values and negative values. This means we can apply it to our dataset without scaling it first.
# +
def outlier_handler(X):
pass
####
####
outlier_cols =
outlier_handler = ColumnTransformer(transformers=[
('outlier_remover', OutlierRemover(), outlier_cols)
], remainder='passthrough')
####
####
# +
#Load libraries
from sklearn.datasets import load_boston
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
#Load data
X, y = load_boston(return_X_y=True)
#Create data frame
boston = load_boston()
columns = boston.feature_names
df = pd.DataFrame(X, columns = columns)
# +
#df.describe()
df_1 = df[['TAX', 'B']]
df_2 = df[['CRIM', 'ZN', 'INDUS', 'RM', 'AGE', 'DIS', 'RAD', 'PTRATIO','LSTAT']]
df_3 = df[['CHAS', 'NOX']]
ax = sns.boxplot(data=df_2, orient="h", palette="Set2")
# + active=""
# # https://towardsdatascience.com/detecting-and-treating-outliers-in-python-part-1-4ece5098b755
#
# Tukey’s box plot method:
#
# Next to its visual benefits, the box plot provides useful statistics to identify individual observations as outliers.
#
# Tukey distinguishes between possible and probable outliers.
# A possible outlier is located between the inner and the outer fence, whereas a probable outlier is located outside the outer fence.
# -
Image(url="https://miro.medium.com/max/1342/1*vQyvZ7yZpLcFk7eDdoc5lg.png")
# +
#Tukey's method
def tukeys_method(df, variable):
#Takes two parameters: dataframe & variable of interest as string
q1 = df[variable].quantile(0.25)
q3 = df[variable].quantile(0.75)
iqr = q3-q1
inner_fence = 1.5*iqr
outer_fence = 3*iqr
#inner fence lower and upper end
inner_fence_le = q1-inner_fence
inner_fence_ue = q3+inner_fence
#outer fence lower and upper end
outer_fence_le = q1-outer_fence
outer_fence_ue = q3+outer_fence
outliers_prob = []
outliers_poss = []
for index, x in enumerate(df[variable]):
if x <= outer_fence_le or x >= outer_fence_ue:
outliers_prob.append(index)
for index, x in enumerate(df[variable]):
if x <= inner_fence_le or x >= inner_fence_ue:
outliers_poss.append(index)
return outliers_prob, outliers_poss
probable_outliers_tm, possible_outliers_tm = tukeys_method(df, "CRIM")
print(probable_outliers_tm)
# [374, 375, 376, 378, 379, 380, 381, 384, 385, 386, 387, 398, 400, 403, 404, 405, 406,
# 410 412, 413, 414, 415, 417, 418, 425, 427, 437, 440, 468, 477]
print(possible_outliers_tm)
# [367, 371, 373, 374, 375, 376, 377, 378, 379, 380, 381, 382, 384, 385, 386, 387, 388,
# 392, 394, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 409, 410, 411, 412, 413,
# 414, 415, 416, 417, 418, 419, 420, 422, 425, 426, 427, 429, 431, 434, 435, 436, 437,
# 438, 439, 440, 441, 443, 444, 445, 447, 448, 454, 468, 469, 477, 478, 479]
# +
#Transform 'CRIM' to log
log_CRIM = np.log(df['CRIM'])
df['CRIM_man'] = df['CRIM']+1
log_CRIM = np.log(df['CRIM_man'])
df['CRIM_log'] = log_CRIM
#Plot
sns.distplot(df['CRIM_log'])
#Calculate probable and possible outliers using log-iq method
probable_outliers_logiq, possible_outliers_logiq = tukeys_method(df, 'CRIM_log')
print(probable_outliers_logiq)
print(possible_outliers_logiq)
# + active=""
# Following a common rule of thumb, if z > C, where C is usually set to 3, the observation is marked as an outlier. This rule stems from the fact that if a variable is normally distributed, 99.7% of all data points are located 3 standard deviations around the mean. Let’s see on our example, which observations of ‘CRIM’ are detected to be outliers using the z-score:
# +
#Internally studentized method (z-score)
def z_score_method(df, variable_name):
#Takes two parameters: dataframe & variable of interest as string
columns = df.columns
z = np.abs(stats.zscore(df))
threshold = 3
outlier = []
index=0
for item in range(len(columns)):
if columns[item] == variable_name:
index = item
for i, v in enumerate(z[:, index]):
if v > threshold:
outlier.append(i)
else:
continue
return outlier
outlier_z = z_score_method(df, 'CRIM')
print(outlier_z)
# [380, 398, 404, 405, 410, 414, 418, 427]
# + active=""
# When using the z-score method, 8 observations are marked as outliers. However, this method is highly limited as the distributions mean and standard deviation are sensitive to outliers. This means that finding one outlier is dependent on other outliers as every observation directly affects the mean.
#
# Moreover, the z-score method assumes the variable of interest to be normally distributed. A more robust method that can be used instead is the externally studentized residuals. Here, the influence of the examined data point is removed from the calculation of the mean and standard deviation, like so:
# + active=""
# The test statistic is calculated like the z-score using robust statistics. Also, to identify outlying observations, the same cut-off point of 3 is used. If the test statistic lies above 3, it is marked as an outlier. Compared to the internally (z-score) and externally studentized residuals, this method is more robust to outliers and does assume X to be parametrically distributed (Examples of discrete and continuous parametric distributions).
# +
#MAD method
def mad_method(df, variable_name):
#Takes two parameters: dataframe & variable of interest as string
columns = df.columns
med = np.median(df, axis = 0)
mad = np.abs(stats.median_absolute_deviation(df))
threshold = 3
outlier = []
index=0
for item in range(len(columns)):
if columns[item] == variable_name:
index == item
for i, v in enumerate(df.loc[:,variable_name]):
t = (v-med[index])/mad[index]
if t > threshold:
outlier.append(i)
else:
continue
return outlier
outlier_mad = mad_method(df, 'CRIM')
print(outlier_mad)
#[20, 31, 32, 34, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 153, 154, 155,
# 156, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 171, 310, 356, 357,
# 358, 359, 360, 361, 362, 363, 364, 365, 366, 367, 368, 369, 370, 371, 372, 373, 374,
# 375, 376, 377, 378, 379, 380, 381, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391,
# 392, 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407, 408,
# 409, 410, 411, 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424, 425,
# 426, 427, 428, 429, 430, 431, 432, 433, 434, 435, 436, 437, 438, 439, 440, 441, 442,
# 443, 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458, 459,
# 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473, 474, 475, 476,
# 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487]
# -
https://towardsdatascience.com/detecting-and-treating-outliers-in-python-part-2-3a3319ec2c33
# + active=""
# Handling non-error outliers
#
# There exist three different options on how to treat non-error outliers:
# Keep
# Delete
# Recode
# +
#Mahalonibis Distance
def mahalanobis_method(df):
#M-Distance
x_minus_mu = df - np.mean(df)
cov = np.cov(df.values.T) #Covariance
inv_covmat = sp.linalg.inv(cov) #Inverse covariance
left_term = np.dot(x_minus_mu, inv_covmat)
mahal = np.dot(left_term, x_minus_mu.T)
md = np.sqrt(mahal.diagonal())
#Flag as outlier
outlier = []
#Cut-off point
C = np.sqrt(chi2.ppf((1-0.001), df=df.shape[1])) #degrees of freedom = number of variables
for index, value in enumerate(md):
if value > C:
outlier.append(index)
else:
continue
return outlier, md
outliers_mahal_bi, md_bi = mahalanobis_method(df=df_bivariate)
#[380, 398, 404, 405, 410, 414, 418, 427]
outliers_mahal, md = mahalanobis_method(df=df)
#[152, 155, 214, 353, 364, 365, 367, 380, 405, 410, 414, 418, 488, 489, 490, 491, 492]
# +
#Robust Mahalonibis Distance
def robust_mahalanobis_method(df):
#Minimum covariance determinant
rng = np.random.RandomState(0)
real_cov = np.cov(df.values.T)
X = rng.multivariate_normal(mean=np.mean(df, axis=0), cov=real_cov, size=506)
cov = MinCovDet(random_state=0).fit(X)
mcd = cov.covariance_ #robust covariance metric
robust_mean = cov.location_ #robust mean
inv_covmat = sp.linalg.inv(mcd) #inverse covariance metric
#Robust M-Distance
x_minus_mu = df - robust_mean
left_term = np.dot(x_minus_mu, inv_covmat)
mahal = np.dot(left_term, x_minus_mu.T)
md = np.sqrt(mahal.diagonal())
#Flag as outlier
outlier = []
C = np.sqrt(chi2.ppf((1-0.001), df=df.shape[1]))#degrees of freedom = number of variables
for index, value in enumerate(md):
if value > C:
outlier.append(index)
else:
continue
return outlier, md
outliers_mahal_rob_bi, md_rb_bi = robust_mahalanobis_method(df=df_bivariate)
#[141, 374, 380, 398, 404, 405, 410, 414, 418, 427]
outliers_mahal_rob, md_rb = robust_mahalanobis_method(df=df)
#[123, 126, 142, 152, 155, 163, 214, 283, 353, 364, 365, 367, 380, 405, 410,
# 418, 488, 489, 490, 491, 492]
# +
#Visualization
#You need deep copy otherwise cannot
#add column to a slice of a DataFrame
df_bi_cp = copy.deepcopy(df_bivariate)
#Add md and robust md to copy of dataframe
df_bi_cp['md'] = md_bi
df_bi_cp['md_robust'] = md_rb_bi
def flag_outliers(df, outliers):
flag = []
for index in range(df.shape[0]):
if index in outliers:
flag.append(1)
else:
flag.append(0)
return flag
#Flag outliers with 1, others with 0
df_bi_cp['flag'] = flag_outliers(df_bivariate, outliers_mahal_bi)
df_bi_cp['flag_rob'] = flag_outliers(df_bivariate, outliers_mahal_rob_bi)
#MD classic
ax = sns.scatterplot(x="LSTAT", y="CRIM", hue='flag', data=df_bi_cp)
#MD robust
ax = sns.scatterplot(x="LSTAT", y="CRIM", hue='flag_rob', data=df_bi_cp)
# -
https://towardsdatascience.com/detecting-and-treating-outliers-in-python-part-3-dcb54abaf7b0
# +
#
class OutlierRemover(BaseEstimator,TransformerMixin):
def __init__(self,factor=1.5):
self.factor = factor
def outlier_detector(self,X,y=None):
X = pd.Series(X).copy()
q1 = X.quantile(0.25)
q3 = X.quantile(0.75)
iqr = q3 - q1
self.lower_bound.append(q1 - (self.factor * iqr))
self.upper_bound.append(q3 + (self.factor * iqr))
def fit(self,X,y=None):
self.lower_bound = []
self.upper_bound = []
X.apply(self.outlier_detector)
return self
def transform(self,X,y=None):
X = pd.DataFrame(X).copy()
for i in range(X.shape[1]):
x = X.iloc[:, i].copy()
x[(x < self.lower_bound[i]) | (x > self.upper_bound[i])] = np.nan
X.iloc[:, i] = x
return X
outlier_remover = OutlierRemover()
test = pd.DataFrame({'col1':[100,200,300,999],'col2':[0,0,1,2],'col3':[-10,0,1,2]})
test
outlier_remover.fit_transform(test)
#
data.plot(kind="box",subplots=True,figsize=(15,5),title="Data with Outliers");
#
outlier_remover = OutlierRemover()
#ColumnTransformer to remove outliers
ct = ColumnTransformer(transformers=[['outlier_remover',OutlierRemover(),list(range(data.shape[1]))]],remainder='passthrough')
#iris data after outlier removal
data_without_outliers = pd.DataFrame(ct.fit_transform(data),columns=data.columns)
#iris data box plot after outlier removal
data_without_outliers.plot(kind="box",subplots=True,figsize=(15,5),title="Data without Outliers");
# 4 outliers are removed from SepalWidthCm, other columns stayed the same as they have no outliers.
data_without_outliers.isnull().sum()
#outliers removed from sepal width (cm)
list(data.loc[data_without_outliers.isnull().sum(axis=1)>0,'SepalWidthCm'])
# Method 2
def outlier_removal(X,factor):
X = pd.DataFrame(X).copy()
for i in range(X.shape[1]):
x = pd.Series(X.iloc[:,i]).copy()
q1 = x.quantile(0.25)
q3 = x.quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - (factor * iqr)
upper_bound = q3 + (factor * iqr)
X.iloc[((X.iloc[:,i] < lower_bound) | (X.iloc[:,i] > upper_bound)),i] = np.nan
return X
#creating outlier_remover object using FunctionTransformer with factor=1.5
outlier_remover = FunctionTransformer(outlier_removal,kw_args={'factor':1.5})
test = pd.DataFrame({'col1':[100,200,300,999],'col2':[0,0,1,2],'col3':[-10,0,1,2]})
test
outlier_remover.fit_transform(test)
# -
# ### Categorical transformers
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, OrdinalEncoder
X_train.info()
X_train.head()
# +
from fancyimpute import KNN, IterativeImputer
def cat_imputation_handler(X):
pass
####
####
# cat_imputation_handler =
####
####
# Create dictionary for ordinal encoders
cat_imputation_cols = ['Cabin']
ordinal_enc_dict = {}
# Loop over columns to encode
for col_name in X_train[cat_imputation_cols]:
#Create ordinal encoder for the column
ordinal_enc_dict[col_name] = OrdinalEncoder()
# Select the non-null values in the column
col = X_train[col_name]
col_not_null = col[col.notnull()]
reshaped_vals = col_not_null.values.reshape(-1, 1)
# Encode the non-null values of the column
encoded_vals = ordinal_enc_dict[col_name].fit_transform(reshaped_vals)
# Replace the column with ordinal values
X_train.loc[col.notnull(), col_name] = np.squeeze(encoded_vals)
X_train_KNN_imputed = X_train[cat_imputation_cols].copy(deep=True)
X_train_KNN_imputed.head()
# Create KNN imputer
KNN_imputer = KNN()
X_train_KNN_imputed.iloc[:, :] = np.round(KNN_imputer.fit_transform(X_train_KNN_imputed))
for col in X_train_KNN_imputed:
reshaped_col = X_train_KNN_imputed[col].values.reshape(-1, 1)
X_train_KNN_imputed[col] = ordinal_enc_dict[col].inverse_transform(reshaped_col)
X_train_KNN_imputed.head()
# +
#instantiate both packages to use
encoder = OrdinalEncoder()
imputer = IterativeImputer(ExtraTreesRegressor())
imputer = KNN()
# create a list of categorical columns to iterate over
cat_imputation_cols = []
def encode(data):
'''function to encode non-null data and replace it in the original data'''
#retains only non-null values
nonulls = np.array(data.dropna())
#reshapes the data for encoding
impute_reshape = nonulls.reshape(-1,1)
#encode date
impute_ordinal = encoder.fit_transform(impute_reshape)
#Assign back encoded values to non-null values
data.loc[data.notnull()] = np.squeeze(impute_ordinal)
return data
#create a for loop to iterate through each column in the data
for columns in cat_imputation_cols:
encode(impute_data[columns])
impute_data
# impute data and convert
encode_data = pd.DataFrame(np.round(imputer.fit_transform(impute_data)), columns = impute_data.columns)
# +
def label_encoding_handler(y):
pass
####
####
label_encoding_handler = LabelEncoder()
####
####
print('y Shape before LE:', y_train.shape, '\n')
# Label encode target variable output as numpy array
y_train_le = label_encoding_handler.fit_transform(y_train)
print('y Shape after LE as numpy:', y_train.shape, '\n')
# print(y_train_le, '\n')
# +
def one_hot_encoding_handler(df):
pass
####
####
ohe_cols = ['Sex', 'Embarked']
ohe = OneHotEncoder(drop='first')
one_hot_encoding_handler = ColumnTransformer(transformers=[
('ohe', ohe, ohe_cols)
], remainder='passthrough', verbose_feature_names_out=False)
####
####
print('X Shape before OHE:', X_train.shape, '\n')
# Numpy implementation for pipelines
X_train_ohe = one_hot_encoding_handler.fit_transform(X_train)
print('X Shape after OHE as CT numpy:', X_train_ohe.shape, '\n')
print(X_train_ohe, '\n')
# Pdf for verification / exploration
X_train_ohe = pd.DataFrame(X_train_ohe, columns=one_hot_encoding_handler.get_feature_names_out())
print('X Shape after OHE as pdf:', X_train_ohe.shape, '\n')
# X_train_ohe.dtypes
X_train_ohe.head()
# Options:
# +
def ordinal_encoding_handler(X):
pass
####
####
ordinal_cols = ['Pclass']
ordinal_feat_1_categories = [1, 2, 3]
# ordinal_feat_2_categories = ['first', 'second', 'third']
ordinal = OrdinalEncoder(categories=[ordinal_feat_1_categories])
ordinal_encoding_handler = ColumnTransformer(transformers=[
('ordinal', ordinal, ordinal_cols)
], remainder='passthrough', verbose_feature_names_out=False)
####
####
print('X Shape before Ordinal Encoding:', X_train_ohe.shape, '\n')
# Numpy implementation for pipelines
X_train_ordinal = ordinal_encoding_handler.fit_transform(X_train_ohe)
print('X Shape after Ordinal Encoding as CT numpy:', X_train_ordinal.shape, '\n')
print(X_train_ordinal, '\n')
# Pdf for verification / exploration
X_train_ordinal = pd.DataFrame(X_train_ordinal, columns=ordinal_cols + list(X_train_ohe.drop(columns=ordinal_cols).columns))
X_train_ordinal.columns = [col +'_ordinal' if col in ordinal_cols else col for col in X_train_ordinal.columns]
# X_train_ordinal.dtypes
X_train_ordinal.head()
# Options
# +
def target_encoding_handler(df):
pass
####
####
target_encoding_handler =
####
####
# -
# ### Text transformers
# +
# Text handlers
def vectorizer_handler(df):
pass
####
####
vectorizer_handler =
####
####
# Numpy implementation for pipelines
# Pdf for verification / exploration
# Options
# -
# ### Model input transformers
# +
# Model input handlers
def scaling_handler(X):
pass
####
####
scaling_handler =
####
####
# Numpy implementation for pipelines
# Pdf for verification / exploration
# Options
# +
def imbalance_handler(df):
pass
####
####
imbalance_handler =
####
####
# Numpy implementation for pipelines
# Pdf for verification / exploration
# Options
# -
# +
##############
###################
#################
# +
# Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
#
from sklearn import set_config # to change the display
from sklearn.utils import estimator_html_repr # to save the diagram into HTML format
# Validation
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OrdinalEncoder, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
# Classification models
from sklearn.linear_model import LogisticRegression
# Evaluation / Scoring metrics
from sklearn.metrics import accuracy_score
# +
# from sklearn.datasets import make_classification, load_breast_cancer
# X, y = load_breast_cancer(return_X_y = True, as_frame=True)
# X.head()
# +
import pandas as pd
from sklearn.datasets import load_boston
# load data
boston = load_boston()
X = pd.DataFrame(boston.data, columns=boston.feature_names)
X.drop('CHAS', axis=1, inplace=True)
y = pd.Series(boston.target, name='MEDV')
# inspect data
X.head()
# -
# Split into train & test
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
# stratify=y,
random_state=11)
X_train.dtypes
X_train.shape
X_train.info()
# +
cat_features = X_train.select_dtypes(include=['object']).columns
num_features = X_train.select_dtypes(include=['int64', 'float64']).columns
# cat_features = []
# num_features = []
# print(cat_features)
# print(num_features)
# -
len(cat_features)
len(num_features)
print(X_train.isnull().sum())
from helpers.preprocessing.outliers import boxplot_numeric_features, IQR_Outliers, CustomSampler_IQR
# + active=""
# Numeric Outlier (IQR)
# Z-Score
# DBSCAN
# Isolation Forest
# +
###
rows_for_plot = 6
cols_for_plot = 5
###
boxplot_numeric_features(X_train,
rows_for_plot=rows_for_plot,
cols_for_plot=cols_for_plot)
# +
from scipy import stats
import numpy as np
X = X_train
z_score_thresh = 3
print("Shape before IQR outlier removal:", X.shape)
print("Shape after IQR outlier removal:", X_o.shape)
# +
from scipy import stats
import numpy as np
X = X_train
z_score_thresh = 3
print("Shape before Z-score outlier removal:", X.shape)
print("Shape after Z-score outlier removal:", X_o.shape)
# +
# IQR_Outliers(X_train)
# +
# CustomSampler_IQR(X_train, y_train)
# +
# Split into train & test
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
stratify=y,
random_state=11)
X_train.head()
# +
# evaluate model on training dataset with outliers removed
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import mean_absolute_error
# summarize the shape of the training dataset
print(X_train.shape, y_train.shape)
# # summarize the shape of the updated training dataset
print(X_train.shape, y_train.shape)
### Model for coefficients of features
model = LogisticRegression(random_state=11)
s_scaler = StandardScaler()
###
# Pipeline with Scaler
pipeline_scaler = Pipeline([
('scaler', s_scaler),
('model', model)
])
pipeline_scaler.fit(X_train, y_train)
y_train_pred = pipeline_scaler.predict(X_train)
print("Accuracy on Train set:", accuracy_score(y_train, y_train_pred), "\n")
y_test_pred = pipeline_scaler.predict(X_test)
print("Accuracy on Test set:", accuracy_score(y_test, y_test_pred), "\n")
# +
# evaluate model on training dataset with outliers removed
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import mean_absolute_error
# summarize the shape of the training dataset
print(X_train.shape, y_train.shape)
# identify outliers in the training dataset
lof = LocalOutlierFactor()
yhat = lof.fit_predict(X_train)
# # select all rows that are not outliers
mask = yhat != -1
X_train, y_train = X_train[mask], y_train[mask]
# # summarize the shape of the updated training dataset
print(X_train.shape, y_train.shape)
### Model for coefficients of features
model = LogisticRegression(random_state=11)
s_scaler = StandardScaler()
###
# Pipeline with Scaler
pipeline_scaler = Pipeline([
('scaler', s_scaler),
('model', model)
])
pipeline_scaler.fit(X_train, y_train)
y_train_pred = pipeline_scaler.predict(X_train)
print("Accuracy on Train set:", accuracy_score(y_train, y_train_pred), "\n")
y_test_pred = pipeline_scaler.predict(X_test)
print("Accuracy on Test set:", accuracy_score(y_test, y_test_pred), "\n")
# # evaluate the model
# yhat = model.predict(X_test)
# # evaluate predictions
# mae = mean_absolute_error(y_test, yhat)
# print('MAE: %.3f' % mae)
# -
# +
from imblearn.pipeline import Pipeline
from imblearn import FunctionSampler
# Do i want to remove outliers from test dataset? Without the outlier tows then no prediction can be made
LR_Pipeline = Pipeline([
('Outlier_removal', FunctionSampler(func=CustomSampler_IQR, validate = False)),
('Imputer', SimpleImputer(strategy = "median")),
('LR', LogisticRegression(C = 0.7, random_state = 42, max_iter = 1000))])
# -
# Define categorical pipeline
cat_pipe = Pipeline([
('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
('encoder', OneHotEncoder(handle_unknown='ignore', sparse=False))
])
# Define numerical pipeline
num_pipe = Pipeline([
('imputer', SimpleImputer(strategy='median')),
('scaler', MinMaxScaler())
])
# Combine categorical and numerical pipelines
preprocessor = ColumnTransformer([
('cat_transformer', cat_pipe, cat_features),
('num_transformer', num_pipe, num_features)
])
# +
# Fit a pipeline with transformers and an estimator to the training data
pipeline = Pipeline([
('preprocessor', preprocessor),
('model', LogisticRegression())
])
pipeline.fit(X_train, y_train)
# Predict training data
y_train_pred = pipe.predict(X_train)
# print(f"Predictions on training data: {y_train_pred}")
print("Accuracy on Training set:", accuracy_score(y_train, y_train_pred), "\n")
y_test_pred = pipeline.predict(X_test)
# print(f"Predictions on test data: {y_test_pred}")
print("Accuracy on Test set:", accuracy_score(y_test, y_test_pred), "\n")
# +
# set config to diagram for visualizing the pipelines/composite estimators
set_config(display='diagram')
# Lets visualize the pipeline
pipeline
# -
https://towardsdatascience.com/custom-transformers-and-ml-data-pipelines-with-python-20ea2a7adb65
# +
imputer = SimpleImputer(strategy="median")
# Num_vars is the list of numerical variables
X_train_num = X_train[numeric_features]
X_train_num = imputer.fit_transform(X_train_num)
# +
ordinal_encoder = OrdinalEncoder()
X_train_cat = X_train[categorical_features]
X_train_cat_ord_encoded = ordinal_encoder.fit_transform(X_train_cat)
X_train_cat_ord_encoded[:,1:10]
# +
cat_encoder = OneHotEncoder()
X_train_cat_hot_encoded = cat_encoder.fit_transform(X_train_cat)
X_train_cat_hot_encoded
# -
StandardScaler().fit_transform(X_train_num)
# +
# Custome transformations
from sklearn.base import BaseEstimator, TransformerMixin
ratings_index = -2
reviews_index = -1
class NewVariablesAdder(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X):
# Make a new variable that is rating divided by number of reviews
ratings_over_reviews = X[:,ratings_index]/X[:,reviews_index]
return np.c_[X, ratings_over_reviews]
# +
num_pipeline = Pipeline([
('imputer', SimpleImputer(strategy='median')),
# ('add_variables', NewVariablesAdder()),
('std_scaler', StandardScaler())
])
X_train_num_transformed = num_pipeline.fit_transform(X_train_num)
# +
pipeline = ColumnTransformer([
('numerical', num_pipeline, num_vars),
('categorical', OneHotEncoder(), cat_vars),
])
X_train = pipeline.fit_transform(X_train)
# -
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)
])
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
('features', FeatureUnion ([
('Cat Columns', Pipeline([
('Category Extractor', TypeSelector(np.number)),
('Impute Zero', SimpleImputer(strategy="constant", fill_value=0))
])),
('Numerics', Pipeline([
('Numeric Extractor', TypeSelector("category")),
('Impute Missing', SimpleImputer(strategy="constant", fill_value='missing'))
]))
]))
imputer = SimpleImputer(strategy = 'median', fill_value = 0)
# +
numeric_features = ['age', 'fare']
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='median')),
('scaler', StandardScaler())])
categorical_features = ['embarked', 'sex', 'pclass']
categorical_transformer = OneHotEncoder(handle_unknown='ignore')
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features),
('cat', categorical_transformer, categorical_features)])
# Append classifier to preprocessing pipeline.
# Now we have a full prediction pipeline.
clf = Pipeline(steps=[('preprocessor', preprocessor),
('classifier', LogisticRegression())])
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=0)
clf.fit(X_train, y_train)
print("model score: %.3f" % clf.score(X_test, y_test))
# -
numeric_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='mean'))
,('scaler', StandardScaler())
])
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='constant'))
,('encoder', OrdinalEncoder())
])
# ## Duplicates
dict_data_cleaning
# generate count statistics of duplicate entries
print ("## Number of duplicate rows ## \n")
if len(X_y_data[X_y_data.duplicated()]) > 0:
print("Number of duplicated observations: ", len(X_y_data[X_y_data.duplicated()]))
X_y_data[X_y_data.duplicated(keep=False)].sort_values(by=list(X_y_data.columns)).head()
else:
print("No duplicated observations found")
# +
#X_y_data.drop_duplicates(inplace=True)
# -
# ## Missing/Null Values
# +
#dict_data_cleaning
# -
# drop rows with a lot of missing values.
ind_missing = df[df['num_missing'] > 35].index
df_less_missing_rows = df.drop(ind_missing, axis=0)
# hospital_beds_raion has a lot of missing.
# If we want to drop.
cols_to_drop = ['hospital_beds_raion']
df_less_hos_beds_raion = df.drop(cols_to_drop, axis=1)
# replace missing values with the median.
med = df['life_sq'].median()
print(med)
df['life_sq'] = df['life_sq'].fillna(med)
# +
# impute the missing values and create the missing value indicator variables for each numeric column.
df_numeric = df.select_dtypes(include=[np.number])
numeric_cols = df_numeric.columns.values
for col in numeric_cols:
missing = df[col].isnull()
num_missing = np.sum(missing)
if num_missing > 0: # only do the imputation for the columns that have missing values.
print('imputing missing values for: {}'.format(col))
df['{}_ismissing'.format(col)] = missing
med = df[col].median()
df[col] = df[col].fillna(med)
# +
# impute the missing values and create the missing value indicator variables for each non-numeric column.
df_non_numeric = df.select_dtypes(exclude=[np.number])
non_numeric_cols = df_non_numeric.columns.values
for col in non_numeric_cols:
missing = df[col].isnull()
num_missing = np.sum(missing)
if num_missing > 0: # only do the imputation for the columns that have missing values.
print('imputing missing values for: {}'.format(col))
df['{}_ismissing'.format(col)] = missing
top = df[col].describe()['top'] # impute with the most frequent value.
df[col] = df[col].fillna(top)
# +
# categorical
df['sub_area'] = df['sub_area'].fillna('_MISSING_')
# numeric
df['life_sq'] = df['life_sq'].fillna(-999)
# -
This article covers 7 ways to handle missing values in the dataset:
# +
Deleting Rows with missing values
# +
Impute missing values for continuous variable
data["Age"] = data["Age"].replace(np.NaN, data["Age"].mean())
data["Age"] = data["Age"].replace(np.NaN, data["Age"].median())
# -
Impute missing values for categorical variable
# +
Other Imputation Methods
data["Age"] = data["Age"].fillna(method='ffill')
data["Age"] = data["Age"].interpolate(method='linear', limit_direction='forward', axis=0)
# -
Using Algorithms that support missing values
# +
Prediction of missing values
from sklearn.linear_model import LinearRegression
import pandas as pd
data = pd.read_csv("train.csv")
data = data[["Survived", "Pclass", "Sex", "SibSp", "Parch", "Fare", "Age"]]
data["Sex"] = [1 if x=="male" else 0 for x in data["Sex"]]
test_data = data[data["Age"].isnull()]
data.dropna(inplace=True)
y_train = data["Age"]
X_train = data.drop("Age", axis=1)
X_test = test_data.drop("Age", axis=1)
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# +
Imputation using Deep Learning Library — Datawig
import pandas as pd
#pip install datawig
import datawig
data = pd.read_csv("train.csv")
df_train, df_test = datawig.utils.random_split(data)
#Initialize a SimpleImputer model
imputer = datawig.SimpleImputer(
input_columns=['Pclass','SibSp','Parch'], # column(s) containing information about the column we want to impute
output_column= 'Age', # the column we'd like to impute values for
output_path = 'imputer_model' # stores model data and metrics
)
#Fit an imputer model on the train data
imputer.fit(train_df=df_train, num_epochs=50)
#Impute missing values and return original dataframe with predictions
imputed = imputer.predict(df_test)
# -
# ## Do All Data Cleaning on Only Train Set and Apply Calculations to Validation/Test Later on
# ## Handle Data Types
# +
#dict_ml_data_types
# -
# ## Handle Missing Data
# +
from IPython.display import Image
from IPython.core.display import HTML
Image(url= "https://miro.medium.com/max/700/1*_RA3mCS30Pr0vUxbp25Yxw.png")
# -
dict_ml_missing_data
# ## Handle Redundant/Irrelevant Features
dict_ml_redundant_features
# ## Handle Redundant/Irrelevant Observations
dict_ml_redundant_observations
# ## Handle Outliers
dict_ml_outliers
# ## Handle Class Imbalance
dict_ml_class_imbalance
# ## Handle Category Encoding
dict_ml_category_encoding
# ## Handle Rescaling: Standardise/Normalise
dict_ml_rescaling
# ## Handle Other Distribution Transformations
TO LOOK INTO NEXT PREPROCESSING PIPELINES
TARGET ENCODER LOOK INTO THIS
# +
# SETTINGS FOR ALL PREPROCESSING STEPS TO FEED INTO PIPELINES
imputers
scaler
pca
smote
# -
# +
preprocessor = ColumnTransformer(
transformers=[
('numeric', numeric_transformer, numeric_features)
,('categorical', categorical_transformer, categorical_features)
])
# -
from sklearn.ensemble import RandomForestRegressor
pipeline = Pipeline(steps = [
('preprocessor', preprocessor)
,('regressor',RandomForestRegressor())
])
| notebooks/machine_learning_algorithms/5-Data-Preprocessing-Cleaning-Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# prerequisite package imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
# %matplotlib inline
from solutions_multiv import adaptedplot_solution_1, adaptedplot_solution_2
# -
# In this workspace, you will work with the fuel economy dataset from the previous lesson on bivariate plots.
fuel_econ = pd.read_csv('./data/fuel_econ.csv')
fuel_econ.head()
fuel_econ.displ.describe()
fuel_econ.comb.describe()
bins_x = np.arange(0.6, 7+0.3, 0.3)
bins_y = np.arange(12, 58+3, 3)
plt.hist2d(data=fuel_econ, x='displ', y='comb', cmin=0.5,
cmap='viridis_r', bins=[bins_x, bins_y])
plt.xlabel('Displacement (l)')
plt.ylabel('Combined Fuel Eff. (mpg)')
plt.colorbar();
# **Let's change this heat map of fuel efficiency against engine size, so that average CO2 output is plotted instead. If we change it so that each point's weight is equal to its CO2 emissions divided by the number of cars in its bin, then the _total_ within each bin will be the average CO2 emissions.**
displ_bins = pd.cut(fuel_econ['displ'], bins_x, right=False, include_lowest=False,
labels=False).astype(int)
comb_bins = pd.cut(fuel_econ['comb'], bins_y, right=False, include_lowest=False,
labels=False).astype(int)
n_points = fuel_econ.groupby([displ_bins, comb_bins]).size()
n_points = n_points.reset_index().pivot(index='displ', columns='comb').values
n_points
displ_bins
comb_bins
n_points[displ_bins, comb_bins]
# Each car's CO2 emission divided by the number of cars in its bin --> the CO2 weight for this car
# Then for one bin, all CO2 weights in this bin will add up --> just like co2_1/n + co2_2/n + ... + co2_n/n
# --> equal to (co2_1 + co2_2 + ... + co2_n)/n --> the average CO2 emissions
co2_weights = fuel_econ['co2'] / n_points[displ_bins, comb_bins]
co2_weights
bins_x = np.arange(0.6, 7+0.3, 0.3)
bins_y = np.arange(12, 58+3, 3)
plt.hist2d(data=fuel_econ, x='displ', y='comb', cmin=0.5,
cmap='viridis_r', bins=[bins_x, bins_y], weights=co2_weights)
plt.xlabel('Displacement (l)')
plt.ylabel('Combined Fuel Eff. (mpg)')
plt.colorbar(label='CO2 (g/mi)');
# **Task 1**: Plot the city ('city') vs. highway ('highway') fuel efficiencies (both in mpg) for each vehicle class ('VClass'). Don't forget that vehicle class is an ordinal variable with levels {Minicompact Cars, Subcompact Cars, Compact Cars, Midsize Cars, Large Cars}.
vehicle_class = ['Minicompact Cars', 'Subcompact Cars', 'Compact Cars', 'Midsize Cars', 'Large Cars']
v_class = pd.api.types.CategoricalDtype(categories=vehicle_class, ordered=True)
fuel_econ['VClass'] = fuel_econ['VClass'].astype(v_class)
g = sb.FacetGrid(data=fuel_econ, col='VClass')
g.map(plt.scatter, 'city', 'highway')
g.set_titles(col_template='{col_name}');
g = sb.FacetGrid(data=fuel_econ, col='VClass')
g.map(sb.regplot, 'city', 'highway', x_jitter=0.3, scatter_kws={'alpha':1/5}, fit_reg=False)
g.set_titles(col_template='{col_name}');
g = sb.FacetGrid(data=fuel_econ, col='VClass', height=3, col_wrap=3)
g.map(plt.scatter, 'city', 'highway', alpha=1/5)
g.set_titles(col_template='{col_name}');
# run this cell to check your work against ours
adaptedplot_solution_1()
# **Task 2**: Plot the relationship between engine size ('displ', in meters), vehicle class, and fuel type ('fuelType'). For the lattermost feature, focus only on Premium Gasoline and Regular Gasoline cars. What kind of relationships can you spot in this plot?
fuel_econ.fuelType.unique()
df = fuel_econ.loc[fuel_econ['fuelType'].isin(['Regular Gasoline', 'Premium Gasoline'])]
ax = sb.barplot(data=df, x='VClass', y='displ', hue='fuelType', ci='sd')
plt.xticks(rotation=15)
plt.xlabel('Vehicle Classes')
plt.ylabel('Displacement (l)')
ax.legend(loc=8, ncol=2, framealpha=1, title='Fuel Type');
ax = sb.boxplot(data=df, x='VClass', y='displ', hue='fuelType')
plt.xticks(rotation=15)
plt.xlabel('Vehicle Classes')
plt.ylabel('Displacement (l)')
# legend to right of figure
ax.legend(loc=6, bbox_to_anchor=(1.0, 0.5));
# run this cell to check your work against ours
adaptedplot_solution_2()
| adapted_bivariate_plot_practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Visualize gSparse's 3-Community Graph with Networkx package
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# ## A 60 vertices Dumbell
H = nx.read_edgelist(path="../Dataset/3-Community-30.csv", delimiter=' ', nodetype=int, encoding="utf-8")
fig = plt.figure(figsize=(10,10))
pos = nx.spring_layout(H,scale=3.0)
nx.draw(H,pos=pos)
#labels=nx.draw_networkx_labels(H,pos=pos)
nx.node_attribute_xy
plt.show()
fig.savefig("3C-1.pdf")
# ## A Sparsifier with Epsilon = 0.25
#
# +
allFiles = ['../Dataset/sparsifier-3-Community-eps25-edges.csv',
'../Dataset/sparsifier-3-Community-eps25-weights.csv']
nodes = np.loadtxt(allFiles[0],delimiter=",")
weight = np.loadtxt(allFiles[1],delimiter=",")
dataset = pd.DataFrame({'source':nodes[:,0],'target':nodes[:,1],'weight':weight})
H=nx.from_pandas_edgelist(dataset, edge_attr=True)
edgewidth = np.array([ float(d['weight']) for (u,v,d) in H.edges(data=True)])
edgewidth = edgewidth * 0.25
fig = plt.figure(figsize=(10,10))
nx.draw(H,pos=pos,width=edgewidth)
plt.show()
fig.savefig("3C-2.pdf")
# -
# ## A Sparsifier with Epsilon = 0.50
# +
allFiles = ['../Dataset/sparsifier-3-Community-eps50-edges.csv',
'../Dataset/sparsifier-3-Community-eps50-weights.csv']
nodes = np.loadtxt(allFiles[0],delimiter=",")
weight = np.loadtxt(allFiles[1],delimiter=",")
dataset = pd.DataFrame({'source':nodes[:,0],'target':nodes[:,1],'weight':weight})
H=nx.from_pandas_edgelist(dataset, edge_attr=True)
edgewidth = np.array([ float(d['weight']) for (u,v,d) in H.edges(data=True)])
edgewidth = edgewidth * 0.5
fig = plt.figure(figsize=(10,10))
nx.draw(H,pos=pos,width=edgewidth)
plt.show()
fig.savefig("3C-3.pdf")
# -
# ## A Sparsifier with Epsilon = 0.75
# +
allFiles = ['../Dataset/sparsifier-3-Community-eps75-edges.csv',
'../Dataset/sparsifier-3-Community-eps75-weights.csv']
nodes = np.loadtxt(allFiles[0],delimiter=",")
weight = np.loadtxt(allFiles[1],delimiter=",")
dataset = pd.DataFrame({'source':nodes[:,0],'target':nodes[:,1],'weight':weight})
H=nx.from_pandas_edgelist(dataset, edge_attr=True)
edgewidth = np.array([ float(d['weight']) for (u,v,d) in H.edges(data=True)])
edgewidth = edgewidth
fig = plt.figure(figsize=(10,10))
nx.draw(H,pos=pos,width=edgewidth)
plt.show()
fig.savefig("3C-4.pdf")
# -
| Visualization/3-Community-Graph.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Q1:
# + active=""
# Store a message in a variable, and then print that
# message.
# -
message = "Hello dude !"
print(message)
# # Q2:
# + active=""
# Store a message in a variable, and print that message.
# Then change the value of your variable to a new message, and print the new
# message.
# -
message2 = "Hey dude !!"
print(message2)
message2 = "it must change"
print(message2)
# # Q3:
# + active=""
# Store a person’s name in a variable, and print a message to that person. Your message should be simple, such as, “Hello Shahzad,
# would you like to learn some Python today?”
# -
person = "Shakeel"
print('Hello {0}, would you like to learn some Python today'.format(person))
# # Q4:
# + active=""
# Store a person’s name in a variable, and then print that person’s name in lowercase, uppercase, and titlecase.
# -
person = "Shakeel"
print(person.lower())
print(person.upper())
print(person.title())
# # Q5:
# + active=""
# Find a quote from a famous person you admire. Print the
# quote and the name of its author. Your output should look something like the
# following, including the quotation marks:
#
#
# <NAME> once said, “A person who never made a
# mistake never tried anything new.”
# -
print("<NAME> once said, \"When someone offends me, I think it\'s a gift from Allah. He is teaching me humility\" ")
# # Q6:
# + active=""
# Write addition, subtraction, multiplication, and division
# operations that each result in the number 8. Be sure to enclose your operations
# in print statements to see the results. You should create four lines that look
# like this:
#
#
# print(5 + 3)
# -
print(4+4)
print(11-3)
print(4*2)
print(16/2)
# # Q7:
# + active=""
# Create a variable called number1 with the value of 8.
# Write a print statement to print number1 multiplied by 9.
# -
number = 8
print(number * 9)
# # Q8:
# + active=""
# Store your favorite number in a variable. Then, using
# that variable, create a message that reveals your favorite number. Print that
# message.
# -
favorite_number = 7
print("My favorite number is {0}".format(favorite_number))
# # Q9:
# + active=""
# Store your name and your age in a varibale called my_name and my_age.
# Use format method to print your name and your age.
# Your final output sholud be like this:
#
# OUTPUT:
# My name is <NAME> and my age is 21.
#
# NOTE:Use both of the format methods which were discuss in the class.
# -
my_name = "Shakeel"
my_age = "25"
print("First Method".upper())
print("My name is {0} and my age is {1}".format(my_name, my_age))
print("Second Method".upper())
print(f"My name is {my_name} and my age is {my_age}")
# # Q10:
# + active=""
# Write a Python program to check if a number is positive, negative or zero.
# -
number = float(input('Enter your number :'))
if number == 0:
print ('Your number is zero')
if number > 0 :
print ('Your number is positive')
if number < 0:
print ('Your number is negative ')
# # Q11:
# + active=""
# Write a Python program which accepts the radius of a circle from the user and compute the
# area.
# -
pi = 3.142
radius = int(input('Please enter radius of the circle :'))
area = (pi)*(radius ** 2)
print (area)
# # Q12:
# + active=""
# Write a Python function to check whether a number is completely divisible by another
# number. Accept two integer values form the user
# -
num1 = int (input('Enter first integer value :'))
num2 = int (input('Enter second integer value :'))
if num1%num2 == 0:
print ('Your numbers are completely divisible')
else:
print('Your numbers are not divisible')
# # Q13:
# + active=""
# Write a Python program to find whether a given number (accept from the user) is even or
# odd, print out an appropriate message to the user.
# -
number = int(input('Enter your number:'))
if number%2==0:
print ('Your number is even')
else :
print ('Yuor number is odd')
# # Q14:
# + active=""
# Print a suitable statement that uses their response. Such as, if they entered “Bangkok”: “I’d love to visit Bangkook more often”.
# -
p=input('enter place:')
print (f'“I’d love to visit {p} more often”.')
# # Q15:
# + active=""
# Write an input line to ask a user whether they want to take the red pill or the blue pill.
#
# If they write “red” then print “You stay in wonderland and see how far the rabbit hole goes”.
#
# Elif they write “blue” then print “You wake up in your bed and believe what you want to believe.”.
#
# Else print “That’s not an option Neo.”
# -
input_line = input('Do you want to take red pill or bllue pill ?').lower()
if input_line == 'red':
print ('"You stay in wonderland and see how far the rabbit hole goes".')
elif input_line == 'blue':
print ('"You wake up in your bed and believe what you want to believe".')
else:
print('"That’s not an option Neo".')
| PIAIC Assignment 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import csv
import time
# declaring globals
dataVals = np.genfromtxt(r'1zncA.txt', delimiter='', dtype=float)
avgCoords = []
indexofthresholds = []
# +
def normal(p0,p1,p2):
x0, y0, z0 = p0
x1, y1, z1 = p1
x2, y2, z2 = p2
ux, uy, uz = u = [x1 - x0, y1 - y0, z1 - z0] #first vector
vx, vy, vz = v = [x2 - x0, y2 - y0, z2 - z0] #sec vector
u_cross_v = [uy * vz - uz * vy, uz * vx - ux * vz, ux * vy - uy * vx] #cross product
point = np.array(p1)
normal = np.array(u_cross_v)
return point, normal
def point_check(point, norm, p_plane):
# In this instance we assume that the plane points 'up' and the arbitrary point
# is below the plane
dot_product = np.dot(norm, (point-p_plane))
if dot_product <= 0: # point is below the plane
return -1
elif dot_product == 0: # point is on the plane
return 0
else:
return 1 # point is above the plane
# This function verifies if the line segment crosses plane or not
def segment_verify(l1, l2, p1, p2, p3):
plane_point, plane_norm = normal(p1, p2, p3)
check1 = point_check(l1,plane_norm,plane_point)
check2 = point_check(l2,plane_norm,plane_point)
# If both checks return the same answer, that means line segment is not crossing the plane
if check1 == check2:
return False
# If both checks return different answer that means line segment is crossing the plane
else:
return True
# -
# determines if a line segments crosses the triangle
def intersect_line_triangle(q1, q2, p1, p2, p3):
def signed_tetra_volume(a, b, c, d):
return np.sign(np.dot(np.cross(b - a, c - a), d - a))
numknots = 0
val = segment_verify(q1, q2, p1, p2, p3)
#If we know the line segment crosses the plane, check to see if it crosses the triangle in the plane to see if theres a knot
if val == True:
s3 = signed_tetra_volume(p1, p2, q1, q2)
s4 = signed_tetra_volume(p2, p3, q1, q2)
s5 = signed_tetra_volume(p3, p1, q1, q2)
if s3==s4 and s4==s5:
numknots = numknots + 1
return numknots
# # Strategy 1: Once threshold is reach, remove the point
# Checking if Threshold is reached and we can skip and delete the index of i+1
def lineseg_dist(p, a, b):
# normalized tangent vector
d = np.divide(b - a, np.linalg.norm(b - a))
# signed parallel distance components
s = np.dot(a - p, d)
t = np.dot(p - b, d)
# clamped parallel distance
h = np.maximum.reduce([s, t, 0])
# perpendicular distance component
c = np.cross(p - a, d)
return np.hypot(h, np.linalg.norm(c))
# # Run Code
# +
dataVals = np.genfromtxt(r'1zncA.txt', delimiter='', dtype=float)
totalTime = 0
# Arbitrary # of iterations
for k in range(0, 50):
nproblem = 0
start = time.time()
for i in range(0, len(dataVals) - 2):
# Attempting to straighten out the triangle
xCoord = (dataVals[i][0] + dataVals[i + 1][0] + dataVals[i + 2][0]) / 3
yCoord = (dataVals[i][1] + dataVals[i + 1][1] + dataVals[i + 2][1]) / 3
zCoord = (dataVals[i][2] + dataVals[i + 1][2] + dataVals[i + 2][2]) / 3
avgCoords=[xCoord, yCoord, zCoord];
# Generating the triangle in 3d space to see if a line segment crosses it
A = dataVals[i]
B = dataVals[i + 1]
C = avgCoords
nk=0
# Checking all line segments up until indexes used for the triangle
# range is until i-2 since the line seg right before the i would never cross
for j in range(0, i-2):
E = dataVals[j]
F = dataVals[j + 1]
nk += intersect_line_triangle(E, F, A, B, C)
# Checking all line segments after the indexes used for the triangle
for j in range(i + 2, len(dataVals)-1):
E = dataVals[j]
F = dataVals[j + 1]
nk += intersect_line_triangle(E, F, A, B, C)
# Generating the other part of the triangle in 3d space to see if a line segment crosses it
A = dataVals[i + 1]
B = avgCoords
C = dataVals[i + 2]
# Checking all line segments up until indexes used for the triangle
# range is until i-1 since the line seg right before the i would never cross
for j in range(0, i-1):
E = dataVals[j]
F = dataVals[j + 1]
nk += intersect_line_triangle(E, F, A, B, C)
# Checking all line segments after the indexes used for the triangle
for j in range(i + 3, len(dataVals)-1):
E = dataVals[j]
F = dataVals[j + 1]
nk += intersect_line_triangle(E, F, A, B, C)
# If no knots detected, we "pull" down the triangle at i+1 to the avgCoords to begin "straightening" the sequence
if nk==0:
dataVals[i + 1] = avgCoords
nproblem += nk
# Check if distance is short enough for Threshold trick
distance = lineseg_dist(avgCoords, dataVals[i], dataVals[i+2])
if distance < 0.01:
indexofthresholds.append(i)
# deletes the indexes that have reached the threshold since its in the middle of a straight line
for i in indexofthresholds:
dataVals = np.delete(dataVals, i, 0)
indexofthresholds = []
end = time.time()
print("On iteration:", k)
totalTime += round(end - start, 10)
print("Time for execution of program: {}".format(round(end-start, 10)))
print("curr possible numknot:", nproblem)
print("Total program runtime: {}".format(totalTime))
# -
| ECS129 Knot or Not_v4_incomplete.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import dicom
import numpy as np
import os
from PIL import Image
import shutil
import scipy.misc
import random
from matplotlib import pyplot as plt
import SimpleITK as sitk
import json
from torchvision import transforms as T
from torchvision.transforms import functional as F
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
import pandas as pd
import glob
import csv
def rm_mkdir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
print('Remove path - %s'%dir_path)
os.makedirs(dir_path)
print('Create path - %s'%dir_path)
def convertImage(raw):
img = np.array(raw)
img[img>255] = 255
mean = np.mean(img)
min_img = np.min(img)
if min_img<-1500:
min_img = min_img*(1.0/2.0)
#print(mean, min_img)
img = img - mean
img = img*1.0 + mean*(mean/min_img)*1.1#修对比度和亮度
img = img/np.max(img)
#img[img>255] = 255
img[img<0] = 0
return img
num = 98
# +
patients = os.listdir('./ISIC/dataset/cancer/B题-全部数据/数据集1/')
for patient in patients[(num):(num+1)]:
lis = glob.glob(os.path.join('./ISIC/dataset/cancer/B题-全部数据/数据集1/'+patient+'/arterial phase','*.dcm'))
for i in lis:
image = sitk.ReadImage(i)
image_array = sitk.GetArrayFromImage(image)
mask_image = Image.open(i[:-4]+'_mask.png')
mask_image = np.array(mask_image)
#print(np.max(mask_image))
if np.max(mask_image)>0:
plt.figure(figsize=(7,7))
img_t = 0.3*(mask_image/255.)*convertImage(image_array)[0,:,:]+convertImage(image_array)[0,:,:]
tmp_zeros = np.zeros(shape=(512,512,3))
tmp_zeros[:,:,0] = 0.1*(mask_image/255.)*convertImage(image_array)[0,:,:] #+ 2.0*convertImage(image_array)[0,:,:]
tmp_zeros[:,:,1] = 0.5*(mask_image/255.)*convertImage(image_array)[0,:,:] + convertImage(image_array)[0,:,:]
tmp_zeros[:,:,2] = 0.5*(mask_image/255.)*convertImage(image_array)[0,:,:] + convertImage(image_array)[0,:,:]
plt.title(label=i+' '+str(np.max(mask_image)))
plt.imshow(tmp_zeros)
plt.show()
for patient in patients[(num):(num+1)]:
lis = glob.glob(os.path.join('./ISIC/dataset/cancer/B题-全部数据/数据集1/'+patient+'/venous phase','*.dcm'))
for i in lis:
image = sitk.ReadImage(i)
image_array = sitk.GetArrayFromImage(image)
mask_image = Image.open(i[:-4]+'_mask.png')
mask_image = np.array(mask_image)
#print(np.max(mask_image))
if np.max(mask_image)>0:
plt.figure(figsize=(7,7))
img_t = 0.3*(mask_image/255.)*convertImage(image_array)[0,:,:]+convertImage(image_array)[0,:,:]
tmp_zeros = np.zeros(shape=(512,512,3))
tmp_zeros[:,:,0] = 0.1*(mask_image/255.)*convertImage(image_array)[0,:,:] #+ 2.0*convertImage(image_array)[0,:,:]
tmp_zeros[:,:,1] = 0.5*(mask_image/255.)*convertImage(image_array)[0,:,:] + convertImage(image_array)[0,:,:]
tmp_zeros[:,:,2] = 0.5*(mask_image/255.)*convertImage(image_array)[0,:,:] + convertImage(image_array)[0,:,:]
plt.title(label=i+' '+str(np.max(mask_image)))
plt.imshow(tmp_zeros)
plt.show()
# -
| code/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3
# ---
# # Matrix Calculator With Clusters
#
# This uses Sagemaker to run the model as a Processing Job on a very large server
# from sagemaker import get_execution_role
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.sklearn.processing import SKLearnProcessor
# +
# role = get_execution_role()
sklearn_processor = SKLearnProcessor(framework_version = '0.20.0', # Specify Version of Scikit-learn
role = "AmazonSageMaker-ExecutionRole-20211115T123876", # Specify Role Created for Security
instance_type = 'ml.m5.24xlarge', # Specify Instance Type
instance_count = 1)
# -
# This code runs a background job
sklearn_processor.run(code='processing.py', #This script does all the processing, make changes here
inputs=[ProcessingInput(
source="songdata.csv",
destination='/opt/ml/processing/input')],
outputs=[ProcessingOutput(
output_name='songlist',
source='/opt/ml/processing/output')]
)
| model/RunSagemakerModel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
class Heap:
def __init__(self):
self.array = []
def heapify(self, n, i):
largest = i
l = 2 * i + 1
r = 2 * i + 2
if l < n and self.array[l] > self.array[i]:
largest = l
if r < n and self.array[r] > self.array[largest]:
largest = r
if largest != i:
self.array[i], self.array[largest] = self.array[largest], self.array[i]
self.heapify(n, largest)
def insert(self, val):
n = len(self.array)
self.array.append(val)
if n:
for i in range(n // 2 - 1, -1, -1):
self.heapify(n, i)
def delete(self, val):
flag = 0
for i in range(len(self.array)):
if self.array[i] == val:
flag = 1
break
if not flag:
return "Number is not present in the array!"
self.array[i], self.array[-1] = self.array[-1], self.array[i]
self.array.pop()
n = len(self.array)
for i in range(n // 2 - 1, -1, -1):
self.heapify(n, i)
if __name__=='__main__':
arr = Heap()
arr.insert(3)
arr.insert(4)
arr.insert(9)
arr.insert(5)
arr.insert(2)
print ("Max-Heap array: " + str(arr.array))
arr.delete(4)
print("After deleting an element: " + str(arr.array))
# -
| Anjani/data-structures/heap/basic_heap_operations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="eNf1ZO4_sJK2"
# # Assignment 2: Decision Trees
#
# In this assignment, you are going to implement a decision tree (or random forest) to forcast the weather.
#
# ## Description
#
# - You must implement a `Model` class for training and prediction:
# ```python
# X, y = load_dataset()
#
# model = Model(num_features, num_classes)
#
# # training
# model.fit(X, y)
#
# # prediction
# y_pred = model.predict(X)
# ```
# - Please search (Ctrl+F) for `TODO` to see what you need to do. You have to implement the classifier from scratch (do not directly use the classifier in scikit-learn).
# - About the dataset
# - Given the **training set**, please **train/validate** on it.
# (note that your model will get bad testing score if it overfits on the training set)
# - After submitting the assignment, we will train on the same training set and test on the hidden **testing set** for scoring (using [f1-score](https://towardsdatascience.com/a-look-at-precision-recall-and-f1-score-36b5fd0dd3ec#11b8)).
#
# ### Typical performance
#
# - **Random Guess**
# F1-Score: 0.30
# Accuracy: 0.50
# - **Always Predict 1**
# F1-Score: 0.37
# Accuracy: 0.22
# - **Always Predict 0**
# F1-Score: 0.00
# Accuracy: 0.77
# - **sklearn.tree.DecisionTreeClassifier**
# - **Training (5-fold cross-validation mean)**
# F1-Score: 0.63-0.99
# Accuracy: 0.85-0.99
# - **Validation (5-fold cross-validation mean)**
# F1-Score: 0.50-0.60
# Accuracy: 0.75-0.90
#
# + id="LEKlEvtfpJEd"
###########################
# DO NOT CHANGE THIS CELL #
###########################
import os
import urllib.request
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from sklearn.metrics import f1_score, accuracy_score
def load_dataset(url):
""" Get and load weather dataset. """
path = url.split('/')[-1] # get the file name from url
if not os.path.exists(path):
print('Download:', url)
urllib.request.urlretrieve(url, path)
return pd.read_pickle(path) # pickle protocol=4
def get_input_target(df):
""" Get X and y from weather dataset. """
target_column = 'RainTomorrow' # predict 1 of it rains tomorrow
X = df.drop(columns=[target_column]).to_numpy()
y = df[target_column].to_numpy()
return X, y
def k_fold_cv(model_create_fn, X, y, k=5):
""" Run k-fold cross-validation. """
results = []
idxs = list(range(X.shape[0]))
np.random.shuffle(idxs)
for i, (train_idxs, val_idxs) in enumerate(KFold(k).split(idxs)):
splits = {'train': (X[train_idxs], y[train_idxs]),
'val': (X[val_idxs], y[val_idxs] )}
print('Run {}:'.format(i+1))
model = model_create_fn()
model.fit(*splits['train']) # training
for name, (X_split, y_split) in splits.items():
y_pred = model.predict(X_split)
result = {'split': name,
'f1': f1_score(y_pred, y_split),
'acc': accuracy_score(y_pred, y_split)}
results.append(result)
print('{split:>8s}: f1={f1:.4f} acc={acc:.4f}'.format(**result))
return pd.DataFrame(results)
# @begin
# + id="wd7H-c_Gy_aA"
# TODO: you can define or import something here (optional)
from __future__ import division
import random
import numpy as np
from scipy.stats import mode
from collections import Counter
import time
class Model:
def __init__(self, num_features, num_classes: int):
"""
Initialize the model.
Args:
num_features (int) : the input feature size.
num_classes (int) : number of output classes.
"""
self.num_features = num_features
self.num_classes = num_classes
self.max_depth=10
# TODO: implement your model initialization here (optional)
def build_tree(self,X,y,feature_indexes,depth):
if len(y) < self.num_classes or entropy(y) is 0 or depth is self.max_depth:
return mode(y)[0][0]
feature_index,threshold=find_split(X,y,feature_indexes)
X_true,y_true,X_false,y_false=split(X,y,feature_index,threshold)
if y_false.shape[0] is 0 or y_true.shape[0] is 0:
return mode(y)[0][0]
branch_true=self.build_tree(X_true,y_true,feature_indexes,depth+1)
branch_false=self.build_tree(X_false,y_false,feature_indexes,depth+1)
return node(feature_index,threshold,branch_true,branch_false)
def fit(self, X: np.ndarray, y: np.ndarray):
"""
Train on input/target pairs.
Args:
X (np.ndarray) : training inputs with shape (num_inputs, num_features).
y (np.ndarray) : training targets with shape (num_inputs,).
"""
# TODO: implement your training algorithm here
n_features=X.shape[1]
n_sub_features=(self.num_features)
feature_indexes=random.sample(range(n_features),n_sub_features)
self.tree=self.build_tree(X,y,feature_indexes,0)
def predict(self, X: np.ndarray) -> np.ndarray:
'''
Predict y given X.
Args:
X (np.ndarray) : inputs, shape: (num_inputs, num_features).
Returns:
np.ndarray : the predicted integer outputs, shape: (num_inputs,).
'''
# TODO: implement your prediction algorithm here
#p = np.random.randint(0, self.num_classes, size=X.shape[0]) # (delete this)
num_sample=X.shape[0]
p=np.empty(num_sample)
for i in range(num_sample):
n=self.tree
while isinstance(n,node):
if X[i][n.feature_index] <= n.threshold:
n=n.branch_true
else:
n=n.branch_false
p[i]=n
return p
class node(object):
def __init__(self,feature_index,threshold,branch_true,branch_false):
self.feature_index=feature_index
self.threshold=threshold
self.branch_true=branch_true
self.branch_false=branch_false
def find_split(X,y,feature_indexes):
num_features=X.shape[1]
best_gain=0
best_feature_index=0
best_threshold=0
for feature_index in feature_indexes:
values=sorted(set(X[:,feature_index]))
total=0
for i in range(len(values)):
total+=values[i]
for i in range(7):
pivot=int(random.uniform(0,len(values)-1))
threshold=values[pivot]
#total=total*random.uniform(0.1,0.5)
X_true,y_true,X_false,y_false=split(X,y,feature_index,threshold)
gain=information_gain(y,y_true,y_false)
if gain > best_gain:
best_gain=gain
best_feature_index=feature_index
best_threshold=threshold
return best_feature_index,best_threshold
def split(X,y,feature_index,threshold):
X_true=[]
y_true=[]
X_false=[]
y_false=[]
for i in range(len(y)):
if X[i][feature_index] <= threshold :
X_true.append(X[i])
y_true.append(y[i])
else:
X_false.append(X[i])
y_false.append(y[i])
X_true=np.array(X_true)
y_true=np.array(y_true)
X_false=np.array(X_false)
y_false=np.array(y_false)
return X_true,y_true,X_false,y_false
def entropy(Y):
start=time.process_time()
distribution=Counter(Y)
s=0.0
total=len(Y)
for y,num_y in distribution.items():
p_y=(num_y/total)
s+=p_y*np.log(p_y)
return -s
def information_gain(y,y_true,y_false):
return entropy(y)-(entropy(y_true)*len(y_true)+entropy(y_false)*len(y_false))/len(y)
# TODO: define your methods if needed (optional)
# + colab={"base_uri": "https://localhost:8080/", "height": 443} id="EydllYOY2pEd" outputId="27672a68-bac6-45b6-b70c-d078d0e20172"
# @end
###########################
# DO NOT CHANGE THIS CELL #
###########################
df = load_dataset('https://lab.djosix.com/weather.pkl')
X_train, y_train = get_input_target(df)
df.head(100000)
# + [markdown] id="HYhtbRt0iORo"
#
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="S4fC13ijA_8-" outputId="d3e915c3-c456-4a5e-d578-ec70663569c8"
###########################
# DO NOT CHANGE THIS CELL #
###########################
create_model = lambda: Model(X_train.shape[1], 2)
k_fold_cv(create_model, X_train, y_train).groupby('split').mean()
# + [markdown] id="Z8nq6VuYDNt0"
# ## Submission
#
# 1. Make sure your code runs correctly after clicking `"Runtime" > "Restart and run all"`
# 2. Rename this notebook to `XXXXXXX_2.ipynb`, where `XXXXXXX` is your student ID.
# 3. Download IPython notebook: `"File" > "Download" > "Download .ipynb"`
# 4. Download Python source code: `"File" > "Download" > "Download .py"`
# 5. Create a zip file for `XXXXXXX_2.ipynb` and `XXXXXXX_2.py`
# named `XXXXXXX_2.zip`, where `XXXXXXX` is your student ID.
# 6. Upload the zip file to E3.
#
# 😊 Good luck!
| HW2/0816183_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # !wget https://huseinhouse-storage.s3-ap-southeast-1.amazonaws.com/bert-bahasa/singlish.txt
# +
with open('singlish.txt') as fopen:
singlish = fopen.read().split('\n')
len(singlish)
# +
import re
from tqdm import tqdm
import cleaning
def preprocessing(string):
string = re.sub(
'http\S+|www.\S+',
'',
' '.join(
[i for i in string.split() if i.find('#') < 0 and i.find('@') < 0]
),
)
chars = ',.()!:\'"/;=-'
for c in chars:
string = string.replace(c, f' {c} ')
string = re.sub(
u'[0-9!@#$%^&*()_\-+{}|\~`\'";:?/.>,<]',
' ',
string,
flags = re.UNICODE,
)
string = re.sub(r'[ ]+', ' ', string).strip()
return string.lower()
def loop(strings):
for i in tqdm(range(len(strings))):
strings[i] = preprocessing(strings[i])
return strings
# -
singlish = cleaning.multiprocessing(singlish, loop, cores = 26)
# +
# https://guidesify.com/blog/2017/08/13/singlish-phrases-define-singapore/
manglish_vocab = {
'siasui', 'lah', 'chun', 'kapster', 'leh', 'lansi', 'lan si', 'meh',
'stim', 'maluation', 'kantoi', 'seh', 'yam', 'hor', 'la', 'cha',
'tao', 'amoi', 'aiya', 'angmor', 'angpau', 'beng', 'chow', 'batam',
'liao', 'nian', 'buiji', 'hou', 'guo', 'jiang', 'chiu', 'buji',
'hao', 'kam', 'wan', 'yao', 'cao', 'ciao', 'jin', 'hoseh',
'jiak', 'ying', 'leybit', 'sibei', 'laobu', 'sia', 'cilok',
'cibai', 'cb', 'entao', 'gwai', 'kai', 'kongmong', 'kapcai',
'lanjiao', 'lancau', 'lalazai', 'momantai', 'paikia', 'paiseh',
'pokai', 'seow', 'sohai', 'sueh', 'tapau', 'wor', 'hor',
'terrer', 'chop', 'lansi', 'paiseh', 'syok', 'shiok',
'sibeh', 'kawkaw', 'abuden', 'mah', 'lor', 'paiseh',
'niang', 'aiya', 'kena', 'aiyo', 'moh', 'bojio',
'buay', 'kia', 'chao', 'chim', 'cheem', 'chiong',
'chiobu', 'dabao', 'kiang', 'hosei', 'hoseh',
'jialat', 'kaypoh', 'kenasai', 'liddat', 'machiam',
'nehmind', 'pokkai', 'shiok', 'siol', 'smlj', 'suan',
'suay', 'swaku', 'swee', 'tyco', 'wapiang', 'walao', 'zhun',
'walau', 'ngaidi', 'ngaidiao', 'jilo', 'kampung',
'kaobu', 'siao', 'obiang', 'orredy', 'oredy',
'pantang', 'pokai', 'siam', 'tangi', 'talk cock', 'tombalek',
'womit', 'yandao'
}
# +
from tqdm import tqdm
manglish = []
for s in tqdm(singlish):
if len(set(s.split()) & manglish_vocab):
manglish.append(s)
# -
len(manglish), len(singlish)
# +
import json
with open('manglish.json', 'w') as fopen:
json.dump(manglish, fopen)
# +
import boto3
bucketName = 'malaya-dataset'
outPutname = f"dumping/twitter/manglish.json"
s3 = boto3.client('s3')
s3.upload_file('manglish.json',bucketName,outPutname)
| dumping/manglish/manglish.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Complex Numbers (huh? lol I forgot what I once knew)
#
# - Construtor: complex(x, y)
# - x: real part
# - y: imaginary part
# - literal: x + yJ or x + yj
# - x and y are stored as floats
#
# Example:
#
# <pre>
# a = complex(1, 2)
# b = 1 + 2j
# a == b --> True
# </pre>
#
# ## Properties and methods
#
# - .real
# - .imag
# - .conjugate() --> returns the complex conjugate
#
# ## Arithmetic operators
#
# - standard arithmetic operators work as expected with complex numbers
# - real and complex numbers can be mixed, (1 + 2j) * 3 --> 4 + 2j
# - // and % operators are not supported
#
# ## Other operations
#
# - == and != operators are supported
# - comparison operators are not supported, <. >, ...
# - math module functions will not work
# - cmath module functions work
#
# ## cmath functions
#
# - .phase(x): returns the angle between -pi, pi counter-clockwise from the real axis
# - .abs(x): returns the magnitude (r) of x
# - .rect(r, phi): returns a complex number equivalent to the complex number defined by r,phi
# ## Code Examples
a = complex(1, 2)
a
b = 1 + 2j
b
a == b
a.real,type(a.real)
a.imag,type(a.imag)
a.conjugate()
a = 1 + 2j
b = 10 + 8j
a + b
a * b
a / b
# ### Non-supported operators
a // 2
a % 2
divmod(a, b)
# ### More code examples
a = 0.1j
format(a.imag, ".25f")
a + a + a == 0.3j
format((a + a + a).imag, ".25f")
format((0.3j).imag, ".25f")
# ## cmath - watched video only
| python-deepdive/deepdive1/section04/section_04_complex_numbers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# [](https://colab.research.google.com/github/eirasf/GCED-AA3/blob/main/lab4/lab4.ipynb)
#
# # Lab4: Aprendizaje por refuerzo - Programación dinámica
#
# En este laboratorio nos familizarizaremos con [OpenGym](https://gym.openai.com/), una librería de Python desarrollada por [OpenAI](https://openai.com/) para simular problemas que se pueden resolver utilizando aprendizaje por refuerzo.
# Además, desarrollaremos algunos métodos de control basados en programación dinámica.
#
# ## OpenGym
# OpenGym es una herramienta para desarrollar y comparar algoritmos de aprendizaje por refuerzo. Facilita la simulación de interacciones de un agente con entornos muy diversos.
#
# Para utilizar OpenGym, el primer paso es importar la librería. Una vez hecho, podremos crear un entorno que nos permita simular el problema que deseemos de entre los [muchos disponibles](https://gym.openai.com/envs/).
#
# En esta práctica utilizaremos un entorno muy sencillo que simula un *GridWorld*, es decir, un mundo compuesto por casillas por las que el agente podrá desplazarse. En particular, cargaremos el entorno `MiniGrid-Empty-5x5-v0`.
# Si los paquetes no están instalados, hay que ejecutar estas líneas:
# #!pip install gym
# #!pip install gym-minigrid
import gym
import gym_minigrid
import numpy as np
env = gym.make('MiniGrid-Empty-5x5-v0')
# El método `gym.make` devuelve un objeto de tipo entorno que nos ofrece, entre otros, los siguientes métodos y propiedades:
# - `reset`: Devuelve el entorno a su estado original
# - `actions`: Muestra una lista de las acciones disponibles
# - `max_steps`: Fija el número máximo de acciones que puede realizar el agente en cada episodio
# - `render`: Devuelve una imagen donde aparece representada la situación actual
# - `step(accion)`: Ejecuta una acción y actualiza el entorno en consecuencia
#
# El objetivo del agente en este problema es alcanzar la meta, representada por una casilla verde. El agente aparece representado como una flecha roja que refleja su posición y orientación.
# +
# Muestra las acciones disponibles
acciones = env.actions
print([a.name for a in acciones])
# Resetea el entorno
env.reset()
# Muestra el entorno en su estado inicial
import matplotlib.pyplot as plt
def muestra_entorno(env):
im = plt.imshow(env.render('rgb_array'))
plt.show()
muestra_entorno(env)
# TODO: Ejecuta la acción forward (puedes referirte a la acción i-ésima como acciones(i) o como acciones.nombre) y muestra el entorno de nuevo
...
# TODO: Prueba a ver cómo afectan las distintas acciones al entorno
...
# -
# El método `step(acción)` devuelve cuatro valores:
# - Observación: Representa lo que puede percibir el agente del entorno en su estado actual
# - Recompensa: Indica el valor de la señal de recompensa para la ejecución de la acción aplicada en el estado en que se aplicó.
# - Completado: Valor booleano que indica si se ha terminado el episodio
# - Info: Información adicional
#
# De cara a aprender una política, debemos buscar una manera de representar los estados. Habitualmente, el agente se basaría en sus observaciones para generar una representación del estado. Sin embargo, para este problema en particular, vamos a utilizar una representación del estado más sencilla que la observación que puede hacer el agente. Esta labor de representación del estado la llevaría a cabo el **intérprete** en el esquema habitual de un problema de aprendizaje por refuerzo.
# 
#
# El intérprete identificará el estado con un vector de tres componentes que indicarán, respectivamente, la columna, fila y orientación del agente. Las filas/columnas se numerarán del 0 al 2 (de arriba a abajo y derecha a izquierda) y la orientación podrá tomar los siguientes valores:
# - 0 $\rightarrow$ derecha
# - 1 $\rightarrow$ abajo
# - 2 $\rightarrow$ izquierda
# - 3 $\rightarrow$ arriba
#
# El entorno de este problema nos da acceso directo a dos variables que serán de utilidad:
# - `agent_pos`: Indica la casilla en la que está el agente. Las casillas del tablero están numeradas del 1 al 3, por lo que será necesario adaptar dicha numeración.
# - `agent_dir`: Indica la orientación del agente usando el código descrito en el párrafo anterior.
#
# Creemos una función que haga las labores de intérprete y devuelva la codificación del estado actual del entorno:
# +
def get_estado(env):
# TODO - Completa la función
...
env.reset()
# COMPROBACIÓN
assert(get_estado(env)==[0,0,0])
# -
# ### Simulación de un episodio con política aleatoria
#
# Ahora que sabemos manejar el entorno, vamos a probar qué tal funciona una política aleatoria.
#
# Crea un bucle que simule un episodio completo siguiendo una política aleatoria, es decir, que aplique una acción aleatoria hasta que el entorno nos indique que el episodio ha acabado. Puedes obtener una acción aleatoria llamando a `env.action_space.sample()`.
#
# Muestra el entorno tras aplicar cada acción. Además muestra un mensaje que indique:
# - Qué número de paso se ha ejecutado
# - El nombre de la acción aplicada
# - El estado resultante
# - La recompensa obtenida en ese paso
# - Si ha terminado la ejecución
# +
# Eliminamos el límite de pasos por episodio. Solo acabará el episodio al alcanzar la meta.
env.max_steps = float('inf')
print(f'Max steps modificado: {env.max_steps}')
done = False
contador = 0
while not done:
# TODO - Completa el bucle
...
# -
# ### Simplificación del espacio de acciones y definición de políticas
# Habrás comprobado que hay acciones que no son útiles para este problema. Vamos a simplificar la búsqueda reduciendo el espacio de acciones a tres:
# 1. left
# 2. right
# 3. forward
#
# Al hacer esto, ya no podemos generar una acción aleatoria utilizando `env.action_space.sample()`. Además, dicha función elige una acción al azar, lo cual no nos interesa. Nuestro objetivo es obtener una **política** que determine la mejor acción en cada situación, así que vamos a aprovechar el momento para definir una estructura de datos que nos permita hacer eso.
#
# La política debe indicar una probabilidad $\pi(a|s)$ de elegir la acción $a$ estando en el estado $s$ (así que $\sum_{a'}\pi(a'|s)=1$, con $s\in \mathcal{S}$ y $a\in \mathcal{A}$). Declararemos una política como un `numpy.array` que asocie a cada estado $s$ una distribución de probabilidad sobre las distintas acciones.
#
# La política aleatoria otorgará la misma probabilidad a todas las acciones para cada estado (como hay tres acciones, para cualquier par estado-acción la probabilidad será 1/3).
# +
# Seleccionamos solo las tres acciones indicadas
ACCIONES_UTILES = [acciones.left, acciones.right, acciones.forward]
# TODO - Indica el shape de la policy
policy_aleatoria = np.zeros(...)
# Inicializamos todos los valores a 1/3
policy_aleatoria[:] = 1.0/3
# -
# Una vez hemos definido la política, podemos definir una función para sustituir a `env.action_space.sample()`. Esta función recibirá la política que debe seguir y el estado para el que seleccionar la acción. Devolverá la acción muestreada siguiendo la distribución descrita por la política para ese estado.
# +
def sample_policy(state, policy):
# Obtenemos la distribución de probabilidad
probs = policy[state[0],state[1],state[2],:]
# Generamos un número aleatorio entre 0 y 1
r = np.random.random()
# Para muestrear la acción, trocearemos el intervalo [0-1] en trozos, uno para cada acción.
# El tamaño del trozo que corresponde a cada acción se corresponde con la probabilidad que asigna la política a dicha acción para el estado en cuestión
# Ej: Si la distribución para el estado s es [0.1, 0.5, 0.4], se generarán 3 intervalos: [0-0.1], [0.1-0.6] y [0.6-1], correspondientes a las acciones 0, 1 y 2 respectivamente.
# El resultado del muestreo será la acción correspondiente al intervalo en que esté contenido el valor aleatorio (uniforme) r.
p_acumulada = probs[0]
i=0
while p_acumulada<r:
i+=1
p_acumulada+=probs[i]
return ACCIONES_UTILES[i]
# TODO - Obtén la acción muestreada de la policy_aleatoria para el estado en que el agente está en la casilla central mirando hacia abajo
accion = ...
print(accion)
# -
# ### Simulación de episodios siguiendo una política prefijada
# Al disponer ya de una definición para políticas y de una función de muestreo sobre cualquier política, podemos adaptar nuestro bucle que simula un episodio para que lo haga siguiendo una política concreta.
#
# Definiremos una función llamada `simula_episodio` para ello. La función debe recibir la política a seguir y dos parámetros que nos indicarán, respectivamente, si se mostrarán los *renders* de los pasos del episodio y si se mostrarán mensajes de texto.
#
# La función deberá devolver una tupla con el **retorno** obtenido, es decir, la suma de las recompensas recibidas en cada paso y el **número de pasos** necesarios para completar el episodio.
# +
def simula_episodio(politica, muestra_renders=False, imprime=False):
env.reset()
# TODO - Adapta el bucle que escribiste 3 celdas de código más arriba para que use la política para decidir qué acción tomar en cada paso
...
return (ret, contador)
retorno,num_pasos = simula_episodio(policy_aleatoria, muestra_renders=False, imprime=True)
print(f'Simulado un episodio con retorno {retorno} ({num_pasos})')
# -
# ### Comprobación del rendimiento de la política
#
# Hagamos un experimento para evaluar la eficacia de la política aleatoria. Simularemos 200 episodios, anotando el número de pasos necesarios para completar cada uno. No es necesario que llevemos cuenta de los retornos obtenidos porque en este problema siempre será 1 (dado que solo la última acción da recompensa, con valor 1).
#
# Mostraremos estadísticas y gráficos que nos puedan ayudar a tener una idea de cómo de bien funciona.
# +
def comprueba_politica(politica):
episodios_pasos = []
# TODO - Simula 200 episodios, añadiendo sus respectivos números de pasos a episodios_pasos y mostrando por cada uno un mensaje de la forma '{iteracion} - Simulado un episodio con retorno {retorno} ({num_pasos})'
...
assert(len(episodios_pasos)==200)
# Mostramos un histograma para la duración de los episodios
plt.hist(episodios_pasos)
plt.show()
# Mostramos también un diagrama de caja
plt.boxplot(episodios_pasos)
plt.show()
# Por último, escribimos un par de estadísticas
print('Se han necesitado de media',np.mean(episodios_pasos),'pasos (+-',np.std(episodios_pasos),')')
print('El episodio más corto duró',np.min(episodios_pasos),'pasos y el episodio más largo duró',np.max(episodios_pasos),'pasos')
comprueba_politica(policy_aleatoria)
# -
# # Obtención de políticas óptimas
# ## Métodos de programación dinámica
#
# A la hora de encontrar una política óptima que guíe las acciones de nuestro agente, los métodos basados en programación dinámica son una buena opción cuando disponemos de conocimento respecto a las dinámicas del entorno.
#
# En particular, estos métodos requieren conocer $p(s',r | s,a)$, es decir, la probabilidad de acabar en un estado $s'$ recibiendo una recompensa $r$ si aplicamos la acción $a$ al estado $s$.
#
# OpenGym no nos proporciona esta información, pero en este problema sencillo podemos reconstruirla. Este entorno es determinista, lo que quiere decir que si aplicamos la acción $a$ al estado $s$ siempre obtendremos el mismo estado $s'$ y la misma recompensa $r$ con probabilidad 1 (cualquier otro estado $s_j$ o recompensa $r_j$ tendrán $p(s_j,r_j|s,a)=0$).
#
# Podemos, por tanto, representar el modelo con dos variables:
# - `transition_model`: Para cada combinación $s,a$, almacena el estado $s'$ al que conduce aplicar la acción $a$ a $s$.
# - `reward_model`: Para cada combinación $s,a$, almacena el valor de la recompensa obtenida al aplicar la acción $a$ a $s$.
#
# Sabiendo cómo se comporta el entorno, definimos estas dos variables con los valores adecuados.
# +
# DEFINICIÓN DEL MODELO
# Modelo de transiciones - Almacena a qué estado lleva aplicar cada acción a cada estado
# Lo almacenamos en un array numpy en el que para cada estado almacenemos, para cada acción, el estado al que lleva.
transition_model = np.zeros((3,3,4,3,3),dtype=np.int16)
# Recorremos cada estado para completar el estado al que lleva cada una de las tres acciones posibles
for i in range(3): # Columnas
for j in range(3): # Filas
for k in range(4): # Orientaciones
# Aplicar la acción left gira el agente a la izquierda, así que el estado resultante tendrá el mismo valor para filas y columnas pero variará la orientación
transition_model[i,j,k,0]=[i,j,(k+4-1)%4]
# Aplicar la acción right gira el agente a la derecha, así que el estado resultante tendrá el mismo valor para filas y columnas pero variará la orientación
transition_model[i,j,k,1]=[i,j,(k+1)%4]
# Aplicar la acción forward mantiene la orientación, pero cambia filas o columnas en función de hacia dónde esté orientado
if i<2:
transition_model[i,j,0,2]=[i+1,j,0] # Derecha
if j<2:
transition_model[i,j,1,2]=[i,j+1,1] # Abajo
if i>0:
transition_model[i,j,2,2]=[i-1,j,2] # Izquierda
if j>0:
transition_model[i,j,3,2]=[i,j-1,3] # Arriba
# Modelo de transiciones - Almacena qué recompensa proporciona aplicar cada acción a cada estado
# Lo almacenamos en un array numpy en el que para cada estado almacenemos, para cada acción, la recompensa.
reward_model = np.zeros((3,3,4,3))
# Será 0 siempre menos en dos casos:
# 1 - Está en la segunda columna de la tercera fila, orientado a la derecha y se usa la acción forward
reward_model[1,2,0,2] = 1
# 2 - Está en la tercera columna de la segunda fila, orientado hacia abajo y se usa la acción forward
reward_model[2,1,1,2] = 1
# Función auxiliar para consultar los estados a los que lleva cada acción para un estado dado
def get_action_results(state):
return transition_model[state[0],state[1],state[2]]
# Función auxiliar para consultar las recompensas de aplicar cada acción para un estado dado
def get_action_rewards(state):
# TODO - Completa la función
return reward_model[state[0],state[1],state[2]]
# TODO - Muestra los estados resultantes y las recompensas de aplicar las distintas acciones cuando el agente está en la tercera casilla de la segunda fila, orientado hacia abajo y se ejecuta la acción forward
...
...
# -
# ## Algoritmo 1: Evaluación de políticas iterativa
# El algoritmo de evaluación de políticas iterativa calcula el valor $v_\pi(s)$ para todo estado $s$ siguiendo la política $\pi$. La fórmula que utiliza, que está basada en la ecuación de Bellman para $v_\pi(s)$, es esta:
#
# $$v_\pi(s)=\mathbb{E}_\pi [R_{t+1}+\gamma v_k(S_{t+1}) | S_t=s]$$
#
# $$=\sum_a\pi(a|s)\sum_{s',r}p(s',r|s,a)[r+\gamma v_k(s')]$$
#
# Al ser este un problema determinista, la expresión se simplifica, dado que $p(s',r|s,a)$ será 0 en todos los casos menos en 1 (porque aplicar la acción $a$ al estado $s$ conduce siempre a un estado $s'$ fijo y único y otorga siempre una recompensa $r$ fija y única; ambos valores los podemos obtener del modelo).
#
# Puedes ver el algoritmo al completo aquí (y un ejemplo en la transparencia 19 del Tema 3 de teoría):
# 
#
# Implementemos el algoritmo para obtener los valores asignados a cada estado siguiendo una política fija.
#
# **CONSEJO: Para asegurar la convergencia, actualiza todos los valores de $V_{t+1}(s)$ a partir de los valores $V_t(s)$, es decir, crea una variable `new_state_values` donde almacenes todos los calculados en una iteración y al completar la iteración establece `state_values = new_state_values`**
# +
GAMMA = 0.1
def iterative_policy_evaluation(policy):
THETA = 0.00001
# En esta variable almacenaremos los valores calculados para los estados
# TODO - Indica el shape adecuado. Debes almacenar un valor por cada estado posible
state_values = np.zeros(...)
# TODO - Completa el algoritmo según lo descrito arriba
...
return state_values
state_values = iterative_policy_evaluation(policy_aleatoria)
print(state_values)
# COMPROBACIÓN
np.testing.assert_almost_equal(state_values[0,0,1],4.11522634e-07)
# -
# ### Optimización de políticas
#
# En el apartado anterior hemos obtenido un valor para cada estado del problema cuando se sigue la política $\pi$. Según lo visto en teoría, podemos utilizar estos valores para encontrar una nueva política $\pi'$ que sea igual o mejor que $\pi$.
#
# Cuando disponemos de $v_\pi(s)$ y del modelo que describe el funcionamiento del entorno, podemos obtener una política $\pi'\geq\pi$ simplemente seleccionando, para cada estado, la acción que nos da un mayor retorno, que podemos calcular como la recompensa inmediata más el valor del estado al que llegamos (descontado por $\gamma$).
#
# $$\pi'(s)=\arg\max_a\sum_{s',r}p(s',r|s,a)\left[r+\gamma v_\pi(s')\right]$$
#
# Escribamos un algoritmo que obtenga $\pi'$ a partir de `state_values`.
# +
def greedify_policy(state_values):
new_shape = list(state_values.shape)
new_shape.append(3)
# TODO - Indica la shape apropiada. Por cada par estado-acción debemos almacenar la probabilidad que da esta política de tomar esa acción estando en dicho estado
# Como la política es determinista, para un estado concreto todas las acciones tendrán probabilidad 0 menos una que tendrá probabilidad 1
policy = np.zeros(...)
# TODO - Haz bucles anidados para recorrer todos los estados
...
# Ahora para este estado debemos calcular, para todas las acciones a, G=(r + GAMMA*state_values[s[0],s[1],s[2]]) donde r es la recompensa de aplicar a y s es el estado resultante.
# De todos esos valores, tomaremos el mejor (supongamos que es el i-ésimo) y pondremos probabilidad 1 en la acción i-ésima para dicho estado.
# TODO - Recorre las acciones, calculando su G. Selecciona el mejor y pon a 1 la probabilidad de la acción correspondiente.
...
return policy
policy_mejorada = greedify_policy(state_values)
# COMPROBACIÓN
assert(np.array_equal(policy_mejorada[2,1,1],[0., 0., 1.]))
# -
# ## Obtención de la política óptima - Policy iteration
# Ahora que podemos evaluar una política y, a partir de esos valores, obtener una política mejorada, podemos repetir el proceso para seguir mejorando nuestra política hasta que no cambie. Este procedimiento se denomina *policy iteration* y lo tienes aquí descrito:
#
# 
#
# Implementémoslo (la mayoría ya lo tenemos hecho).
# +
def policy_iteration(initial_policy):
current_policy = initial_policy
policy_stable = False
# Llevaremos cuenta del paso en que estamos
step = 0
while not policy_stable:
# TODO - Calcula los valores para current_policy
current_values = ...
# TODO - Obtén la nueva política mejorada a partir de los valores calculados
new_policy = ...
# Comprobamos si la política ha cambiado
policy_stable = np.array_equal(current_policy, new_policy)
# Preparamos la siguiente iteración
current_policy = new_policy
step+=1
print('Paso #',step)
# Al terminar el bucle habremos obtenido una política que ya no mejora
return current_policy
policy_optima = policy_iteration(policy_aleatoria)
print(policy_optima)
# COMPROBACIÓN
assert(np.array_equal(policy_optima[2,0,1],[0., 0., 1.]))
# -
# Comprobemos que la política obtenida funciona bien simulando un episodio siguiéndola.
simula_episodio(policy_optima, muestra_renders=True, imprime=True)
# ## Obtención de la política óptima - Iteración de valores
# El proceso de iteración de políticas es costoso porque requiere hacer una estimación iterativa de $v_\pi(s)$ para todas las políticas por las que se pasa. Este proceso se puede simplificar y obtener una política óptima siguiendo el algoritmo de iteración de valores:
#
# 
#
# El algoritmo es muy similar al de evaluación de políticas iterativas. La diferencia es que, en lugar de calcular $\mathbb{E}_\pi [R_{t+1}+\gamma v_k(S_{t+1}) | S_t=s]$, se toma el valor de la mejor acción. Al final, se devuelve la política greedy derivada de los valores calculados. Implementémoslo.
# +
def value_iteration(): # No recibe ningún parámetro. Calcula valores y política a partir del modelo del entorno.
# TODO - Repite el algoritmo de evaluación de políticas iterativa pero tomando el G de la mejor acción para cada estado
# Una vez tengas los state_values, devuelve la política derivada de ellos
...
otra_optima = value_iteration()
print(otra_optima)
# COMPROBACIÓN
assert(np.array_equal(otra_optima[2,0,1],[0., 0., 1.]))
# -
# Para terminar, comprobemos que la política obtenida así también funciona bien.
simula_episodio(otra_optima, muestra_renders=True, imprime=True)
env.close() # Cerramos, además, el entorno
# # ¡Enhorabuena!
# Has terminado este laboratorio. Ahora sabes cómo interactuar con OpenGym y cómo funcionan los principales algoritmos de programación dinámica para aprendizaje por refuerzo.
| lab4/lab4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # UCDs: working with heterogeneous tables
#
# Suppose you want to do something using a column that you expect to find in a bunch of different tables, like coordinates and time. It's a good bet that many if not most of the tables have coordinate columns, but there's no rule about what they have to be named.
#
# When doing detailed catalog queries with the TAP, you can obviously examine the columns of every table you're interested in to find the columns you want. Then you can hard-code the correct ones into each query for each table and service.
#
# Or, you can also search for keywords like "ra" or "ascension" in the columns and their descriptions to get the columns you want automatically that way.
#
# But is there are more generic way? [Unified Content Descriptors (UCDs)](http://www.ivoa.net/documents/latest/UCD.html) are a VO standard that allows table publishers to name their columns whatever they (or their contributors) want but to identify those that contain standard sorts of data. For example, the RA column could be called "RA", "ra", "Right_Ascension", etc. But in all cases, a VO service can label the column with its UCD, which is "pos.eq.ra". This information is not part of the table but part of the meta-data that the service may provide with that data. Though not required of all VO services, UCDs are commonly provided precisely to make such tasks as identifying the columns of interest easier to automate.
#
# This is easiest to show by example.
# +
# Generic VO access routines
import pyvo as vo
# For specifying coordinates
from astropy.coordinates import SkyCoord
# Ignore unimportant warnings
import warnings
warnings.filterwarnings('ignore', '.*Unknown element mirrorURL.*', vo.utils.xml.elements.UnknownElementWarning)
# -
# Let's look at some tables in a little more detail. Let's find the Hubble Source Catalog version 3 (HSCv3), assuming there's only one at MAST.
# +
services = vo.regsearch(servicetype='tap', keywords=['mast'])
hsc=[s for s in services if 'HSCv3' in s.res_title][0]
print(f'Title: {hsc.res_title}')
print(f'{hsc.res_description}')
# -
# Now let's see what tables are provided by this service for HSCv3. Note that this is another query to the service:
tables = hsc.service.tables # Queries for details of the service's tables
print(f'{len(tables)} tables:')
for t in tables:
print(f'{t.name:30s} - {t.description}\n----') # A more succinct option than t.describe()
# Let's look at the first 10 columns of the DetailedCatalog table. Again, note that calling the columns attribute sends another query to the service to ask for the columns.
columns=tables['dbo.DetailedCatalog'].columns
for c in columns:
print(f'{f"{c.name} [{c.ucd}]":30s} - {c.description}')
# The PyVO method to get the columns will automatically fetch all the meta-data about those columns. It's up to the service provider to set them correctly, of course, but in this case, we see that the column named "MatchRA" is identified with the UCD "pos.eq.ra".
#
# So if we did not know the exact name used in HSCv3 for the RA, we could do something like this looking for the string "RA":
ra_name=[c.name for c in columns if 'RA' in c.name or "ascension" in c.name.lower()]
print(ra_name)
# But a more general approach is to check for the correct UCD. It also has the further advantage that it can be used to label columns that should be used for certain purposes when there are multiple possibilities. For instance, this table has MatchRA and SourceRA. Let's check the UCD:
#
# (Note that the UCD is not required. If it isn't there, you get a None type, so code the check carefully)
ra_name=[c.name for c in columns if c.ucd and 'pos.eq.ra' in c.ucd][0]
dec_name=[c.name for c in columns if c.ucd and 'pos.eq.dec' in c.ucd][0]
ra_name,dec_name
# What that shows you is that though there are two columns in this table that give RA information, only one has the 'pos.eq.ra' UCD. The documentation for this ought to explain the usage of these columns, and the UCD should not be used as a substitute for understanding the table. But it can be a useful tool.
# In particular, you can use the UCDs to look for catalogs that might have the information you're interested in. Then you can code the same query to work for different tables (with different column names) in a loop. This sends a bunch of queries but doesn't take too long, a minute maybe.
coord = SkyCoord.from_name("m83")
query=f"select top 10 {ra_name}, {dec_name} from dbo.DetailedCatalog"
# Look for all TAP services with x-ray and optical data
collection={}
for s in vo.regsearch(servicetype='tap',keywords=['x-ray','optical']):
if "wfau" in s.ivoid: continue # These sometimes have issues
print(f"Looking at service from {s.ivoid}")
tables=s.service.tables
# Find all the tables that have an RA,DEC and a start and end time
for t in tables:
names={}
for ucd in ['pos.eq.ra','pos.eq.dec','time.start','time.end']:
cols=[c.name for c in t.columns if c.ucd and ucd in c.ucd]
if len(cols) > 0:
names[ucd]=cols[0] # use the first that matches
if len(names.keys()) == 4:
query="select top 10 {}, {}, {}, {} from {}".format(
names['pos.eq.ra'],
names['pos.eq.dec'],
names['time.start'],
names['time.end'],
t.name)
query=f"select top 10 * from {t.name}"
print(f" Table {t.name} has the right columns. Executing query")
results=s.search(query)
print(" Found {} results\n".format(len(results)))
# Careful. We're assuming the table names are unique
collection[t.name]=results
# You can also use UCDs to look at the results. Above, we collected just the first 10 rows of the four columns we're interested in from every catalog that had them. But these tables still have their original column names. So the UCDs will still be useful, and PyVO provides a simple routine to convert from UCD to column (field) name.
#
# Note, however, that returning the UCDs as part of the result is not mandatory, and some services do not do it. So you'll have to check.
#
# Now we have a collection of rows from different tables with different columns. In the results object, we have access to a fieldname_with_ucd() function to get the column you want. Let's find out which of these tables has a magnitude column:
#ucd='pos.eq.ra'
ucd='phot.mag'
for tname,results in collection.items():
#print(f"On table {tname}")
# Sometimes this doesn't work well, so use a try:
try:
name=results.fieldname_with_ucd(ucd)
except:
pass
if name:
print(f" Table {tname} has the {ucd} column named {name}")
else:
print(f" (Table {tname} didn't find the UCD.)")
# Lastly, if you have a table of results from a TAP query (and if that service includes the UCDs), then you can get data based on UCDs with the getbyucd() method, which simply gets the corresponding element using fieldname_with_ucd():
results=hsc.service.search("select top 10 * from dbo.DetailedCatalog")
[r.getbyucd('phot.mag') for r in results]
# Note that we can see earlier in this notebook, when we looked at this table's contents, that there are two phot.mag fields in this table, MagAper2 and MagAuto. The getbyucd() and fieldname_with_ucd() routines do not currently allow you to handle multiple columns with the same UCD. The code can help you find what you want, but it depends on the meta data the service defines, and you still must look at the detailed information for each catalog you use to understand what it contains.
| CS_UCDs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
x = np.linspace(0, 100, 1000)
y = np.sin(x)
z = np.cos(x)
#print x
df = pd.DataFrame({'x': x, 'y': y, 'z': z})
df.plot(x='x', subplots=True)
| machine-learning-nanodegree/smartcab/smartcab/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ipyvuetify Tutorial 07 - Text Styles and Colours
#
# This is the sixth in a series of ipyvuetify app development tutorials. If you're just getting started with ipyvuetify and haven't checked out the first tutorial "01 Installation and First Steps.ipynb", be sure to check that one out first.
#
# For more details on text and colour options, check out these three pages:
#
# * https://vuetifyjs.com/en/styles/typography/
# * https://vuetifyjs.com/en/styles/colors/
# * https://vuetifyjs.com/en/styles/text/
#
# First of all, we'll load the required packages, and test to make sure your environment has all the dependencies set-up successfully:
# +
from time import sleep
import ipyvuetify as v
v.Btn(class_='icon ma-2',
style_='max-width:100px',
color='success',
children=[v.Icon(children=['mdi-check'])])
# -
#
# If you see a green button with a checkmark above, you have successfully installed ipyvuetify and enabled the extension. Good work!
#
# If not, refer to the first tutorial and/or the ipyvuetify documentation to set up your system before going further.
# ## Font Size
#
# To change the font size of an `ipyvuetify` element, you add a prop to the `class_` argument.
for i in ['display-4',
'display-3',
'display-2',
'display-1',
'headline',
'title',
'subtitle-1',
'subtitle-2',
'body-1',
'body-2',
'caption',
'overline',]:
display(v.Html(tag='div',class_=i,children=[i]))
# ## Font Style
#
# Similarly with font style, as can be seen below
for i in ['font-regular',
'font-weight-light',
'font-weight-thin',
'font-weight-regular',
'font-weight-medium',
'font-weight-bold',
'font-weight-black',
'font-italic',
'font-italic font-weight-light',
'font-italic font-weight-medium',
'font-italic font-weight-bold',]:
display(v.Html(tag='div',class_=i+' headline',children=[i]))
# ## Text Alignment - `text-justify`
edward_lear = """The Owl and the Pussy-Cat went to sea
In a beautiful pea-green boat:
They took some honey, and plenty of money
Wrapped up in a five-pound note."""
v.Container(children=[
v.Html(tag='p', class_='body-1 text-left', children=[i])
for i in edward_lear.split('\n')
])
# ### Text Alignment - `center`
v.Container(children=[
v.Html(tag='p', class_='body-1 text-center', children=[i])
for i in edward_lear.split('\n')
])
# ### Text Alignment - `right`
v.Container(children=[
v.Html(tag='p', class_='body-1 text-right', children=[i])
for i in edward_lear.split('\n')
])
# ### Text Transformations
#
# Three handy text trandsformations are available
#
# * `text-lowercase`
# * `text-uppercase`
# * `text-capitalize`
v.Html(tag='p',class_='headline text-lowercase',
children=["The Owl looked up to the stars above,"])
v.Html(tag='p',class_='headline text-uppercase',
children=["And sang to a small guitar,"])
v.Html(tag='p',class_='headline text-capitalize',
children=["...Dear Pig, are you willing to sell for one shilling"])
# ## Colours
#
# There are a lot of options for colours for your `ipyvuetify` elements.
#
# https://vuetifyjs.com/en/styles/colors/
#
# We'll show a few examples to get you started.
colours = ['red','pink','purple','deep-purple','indigo','blue','light-blue','cyan','teal','green','light-green','lime','yellow','amber','orange','deep-orange','blue-grey','brown','grey']
v.Container(children=[
v.Btn(color=i,children=[i])
for i in colours
])
# ### Lighten, darken, accent
#
# You can lighten/darken/accent the colours with levels from 1 to 4 with the `lighten-k` prop, like this:
v.Container(children=[
v.Btn(color=i+' lighten-4',children=[i])
for i in colours
])
v.Container(children=[
v.Btn(color=i+' darken-2',children=[i])
for i in colours
])
v.Container(children=[
v.Btn(color=i+' accent-2',children=[i])
for i in colours
])
| 07-Text_Styles_And_Colours.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="jb2pLvUgvXJj" colab_type="code" outputId="1031c58a-551b-4575-9c77-bf8e34af5aae" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License")
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
import tensorflow.keras.backend as keras_backend
tf.keras.backend.set_floatx('float32')
import tensorflow_probability as tfp
from tensorflow_probability.python.layers import util as tfp_layers_util
import random
import sys
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
print(tf.__version__) # use tensorflow version >= 2.0.0
#pip install tensorflow=2.0.0
#pip install --upgrade tensorflow-probability
exp_type = 'MAML' # choose from 'MAML', 'MR-MAML-W', 'MR-MAML-A'
# + id="fosHnP6GwFZ-" colab_type="code" colab={}
class SinusoidGenerator():
def __init__(self, K=10, width=5, K_amp=20, phase=0, amps = None, amp_ind=None, amplitude =None, seed = None):
'''
Args:
K: batch size. Number of values sampled at every batch.
amplitude: Sine wave amplitude.
pahse: Sine wave phase.
'''
self.K = K
self.width = width
self.K_amp = K_amp
self.phase = phase
self.seed = seed
self.x = self._sample_x()
self.amp_ind = amp_ind if amp_ind is not None else random.randint(0,self.K_amp-5)
self.amps = amps if amps is not None else np.linspace(0.1,4,self.K_amp)
self.amplitude = amplitude if amplitude is not None else self.amps[self.amp_ind]
def _sample_x(self):
if self.seed is not None:
np.random.seed(self.seed)
return np.random.uniform(-self.width, self.width, self.K)
def batch(self, noise_scale, x = None):
'''return xa is [K, d_x+d_a], y is [K, d_y]'''
if x is None:
x = self._sample_x()
x = x[:, None]
amp = np.zeros([1, self.K_amp])
amp[0,self.amp_ind] = 1
amp = np.tile(amp, x.shape)
xa = np.concatenate([x, amp], axis = 1)
y = self.amplitude * np.sin(x - self.phase) + np.random.normal(scale = noise_scale, size = x.shape)
return xa, y
def equally_spaced_samples(self, K=None, width=None):
'''Returns K equally spaced samples.'''
if K is None:
K = self.K
if width is None:
width = self.width
return self.batch(noise_scale = 0, x=np.linspace(-width+0.5, width-0.5, K))
# + id="Df53q7-VwI2P" colab_type="code" colab={}
noise_scale = 0.1 #@param {type:"number"}
n_obs = 20 #@param {type:"number"}
n_context = 10 #@param {type:"number"}
K_amp = 20 #@param {type:"number"}
x_width = 5 #@param {type:"number"}
n_iter = 20000 #@param {type:"number"}
amps = np.linspace(0.1,4,K_amp)
lr_inner = 0.01 #@param {type:"number"}
dim_w = 5 #@param {type:"number"}
train_ds = [SinusoidGenerator(K=n_context, width = x_width, \
K_amp = K_amp, amps = amps) \
for _ in range(n_iter)]
# + id="jYAoMD0rwQ28" colab_type="code" colab={}
class SineModel(keras.Model):
def __init__(self):
super(SineModel, self).__init__() # python 2 syntax
# super().__init__() # python 3 syntax
self.hidden1 = keras.layers.Dense(40)
self.hidden2 = keras.layers.Dense(40)
self.out = keras.layers.Dense(1)
def call(self, x):
x = keras.activations.relu(self.hidden1(x))
x = keras.activations.relu(self.hidden2(x))
x = self.out(x)
return x
def kl_qp_gaussian(mu_q, sigma_q, mu_p, sigma_p):
"""Kullback-Leibler KL(N(mu_q), Diag(sigma_q^2) || N(mu_p), Diag(sigma_p^2))"""
sigma2_q = tf.square(sigma_q) + 1e-16
sigma2_p = tf.square(sigma_p) + 1e-16
temp = tf.math.log(sigma2_p) - tf.math.log(sigma2_q) - 1.0 + \
sigma2_q / sigma2_p + tf.square(mu_q - mu_p) / sigma2_p #n_target * d_w
kl = 0.5 * tf.reduce_mean(temp, axis = 1)
return tf.reduce_mean(kl)
def copy_model(model, x=None, input_shape=None):
'''
Copy model weights to a new model.
Args:
model: model to be copied.
x: An input example.
'''
copied_model = SineModel()
if x is not None:
copied_model.call(tf.convert_to_tensor(x))
if input_shape is not None:
copied_model.build(tf.TensorShape([None,input_shape]))
copied_model.set_weights(model.get_weights())
return copied_model
def np_to_tensor(list_of_numpy_objs):
return (tf.convert_to_tensor(obj, dtype=tf.float32) for obj in list_of_numpy_objs)
def compute_loss(model, xa, y):
y_hat = model.call(xa)
loss = keras_backend.mean(keras.losses.mean_squared_error(y, y_hat))
return loss, y_hat
# + id="4McD728ixTbm" colab_type="code" colab={}
def train_batch(xa, y, model, optimizer, encoder=None):
tensor_xa, tensor_y = np_to_tensor((xa, y))
if exp_type == 'MAML':
with tf.GradientTape() as tape:
loss, _ = compute_loss(model, tensor_xa, tensor_y)
if exp_type == 'MR-MAML-W':
w = encoder(tensor_xa)
with tf.GradientTape() as tape:
y_hat = model.call(w)
loss = keras_backend.mean(keras.losses.mean_squared_error(tensor_y, y_hat))
if exp_type == 'MR-MAML-A':
_, w, _ = encoder(tensor_xa)
with tf.GradientTape() as tape:
y_hat = model.call(w)
loss = keras_backend.mean(keras.losses.mean_squared_error(y, y_hat))
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
def test_inner_loop(model, optimizer, xa_context, y_context, xa_target, y_target, num_steps, encoder=None):
inner_record = []
tensor_xa_target, tensor_y_target = np_to_tensor((xa_target, y_target))
if exp_type == 'MAML':
w_target = tensor_xa_target
if exp_type == 'MR-MAML-W':
w_target = encoder(tensor_xa_target)
if exp_type == 'MR-MAML-A':
_, w_target, _ = encoder(tensor_xa_target)
for step in range(0, np.max(num_steps) + 1):
if step in num_steps:
if exp_type == 'MAML':
loss, y_hat = compute_loss(model, w_target, tensor_y_target)
else:
y_hat = model.call(w_target)
loss = keras_backend.mean(keras.losses.mean_squared_error(tensor_y_target, y_hat))
inner_record.append((step, y_hat, loss))
loss = train_batch(xa_context, y_context, model, optimizer, encoder)
return inner_record
def eval_sinewave_for_test(model, sinusoid_generator, num_steps=(0, 1, 10), encoder=None, learning_rate = lr_inner, ax = None, legend= False):
# data for training
xa_context, y_context = sinusoid_generator.batch(noise_scale = noise_scale)
y_context = y_context + np.random.normal(scale = noise_scale, size = y_context.shape)
# data for validation
xa_target, y_target = sinusoid_generator.equally_spaced_samples(K = 200, width = 5)
y_target = y_target + np.random.normal(scale = noise_scale, size = y_target.shape)
# copy model so we can use the same model multiple times
if exp_type == 'MAML':
copied_model = copy_model(model, x = xa_context)
else:
copied_model = copy_model(model, input_shape=dim_w)
optimizer = keras.optimizers.SGD(learning_rate=learning_rate)
inner_record = test_inner_loop(copied_model, optimizer, xa_context, y_context, xa_target, y_target, num_steps, encoder)
# plot
if ax is not None:
plt.sca(ax)
x_context = xa_context[:,0,None]
x_target = xa_target[:,0,None]
train, = plt.plot(x_context, y_context, '^')
ground_truth, = plt.plot(x_target, y_target0, linewidth=2.0)
plots = [train, ground_truth]
legends = ['Context Points', 'True Function']
for n, y_hat, loss in inner_record:
cur, = plt.plot(x_target, y_hat[:, 0], '--')
plots.append(cur)
legends.append('After {} Steps'.format(n))
if legend:
plt.legend(plots, legends, loc='center left', bbox_to_anchor=(1, 0.5))
plt.ylim(-6, 6)
plt.axvline(x=-sinusoid_generator.width, linestyle='--')
plt.axvline(x=sinusoid_generator.width,linestyle='--')
return inner_record
# + id="NPbj4ge1KGR4" colab_type="code" outputId="02376a7f-90ff-40cf-837a-7205c2e0b85a" colab={"base_uri": "https://localhost:8080/", "height": 272}
exp_type = 'MAML'
if exp_type == 'MAML':
model = SineModel()
model.build((None, K_amp+1))
dataset = train_ds
optimizer = keras.optimizers.Adam()
total_loss = 0
n_iter = 15000
losses = []
for i, t in enumerate(random.sample(dataset, n_iter)):
xa_train, y_train = np_to_tensor(t.batch(noise_scale = noise_scale))
with tf.GradientTape(watch_accessed_variables=False) as test_tape:
test_tape.watch(model.trainable_variables)
with tf.GradientTape() as train_tape:
train_loss, _ = compute_loss(model, xa_train, y_train)
model_copy = copy_model(model, xa_train)
gradients_inner = train_tape.gradient(train_loss, model.trainable_variables) # \nabla_{\theta}
k = 0
for j in range(len(model_copy.layers)):
model_copy.layers[j].kernel = tf.subtract(model.layers[j].kernel, # \phi_t = T(\theta, \nabla_{\theta})
tf.multiply(lr_inner, gradients_inner[k]))
model_copy.layers[j].bias = tf.subtract(model.layers[j].bias,
tf.multiply(lr_inner, gradients_inner[k+1]))
k += 2
xa_validation, y_validation = np_to_tensor(t.batch(noise_scale = noise_scale))
test_loss, y_hat = compute_loss(model_copy, xa_validation, y_validation) # test_loss
gradients_outer = test_tape.gradient(test_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients_outer, model.trainable_variables))
total_loss += test_loss
loss = total_loss / (i+1.0)
if i % 1000 == 0:
print('Step {}: loss = {}'.format(i, loss))
# + id="KAIcBZspRPEu" colab_type="code" outputId="0aff0f48-fd5d-4cd5-bdee-9d3a8c5a7422" colab={"base_uri": "https://localhost:8080/", "height": 34}
if exp_type == 'MAML':
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
n_context = 5
n_test_task = 100
errs = []
for ii in range(n_test_task):
np.random.seed(ii)
A = np.random.uniform(low = amps[0], high = amps[-1])
test_ds = SinusoidGenerator(K=n_context, seed = ii, amplitude = A, amp_ind= random.randint(0,K_amp-5))
inner_record = eval_sinewave_for_test(model, test_ds, num_steps=(0, 1, 5, 100));
errs.append(inner_record[-1][2].numpy())
print('Model is', exp_type, 'meta-test MSE is', np.mean(errs) )
# + [markdown] id="370SevfOR6D_" colab_type="text"
# # Training & Testing for MR-MAML(W)
# + id="dr1j1BSVL14X" colab_type="code" outputId="e625e62e-0457-4dc6-bfb5-2e933279f179" colab={"base_uri": "https://localhost:8080/", "height": 255}
if exp_type == 'MR-MAML-W':
model = SineModel()
dataset = train_ds
optimizer = keras.optimizers.Adam()
Beta = 5e-5
learning_rate = 1e-3
n_iter = 15000
model.build((None, dim_w))
kernel_posterior_fn=tfp_layers_util.default_mean_field_normal_fn(untransformed_scale_initializer=tf.compat.v1.initializers.random_normal(
mean=-50., stddev=0.1))
encoder_w = tf.keras.Sequential([
tfp.layers.DenseReparameterization(100, activation=tf.nn.relu, kernel_posterior_fn=kernel_posterior_fn,input_shape=(1 + K_amp,)),
tfp.layers.DenseReparameterization(dim_w,kernel_posterior_fn=kernel_posterior_fn),
])
total_loss = 0
losses = []
start = time.time()
for i, t in enumerate(random.sample(dataset, n_iter)):
xa_train, y_train = np_to_tensor(t.batch(noise_scale = noise_scale)) #[K, 1]
x_validation = np.random.uniform(-x_width, x_width, n_obs - n_context)
xa_validation, y_validation = np_to_tensor(t.batch(noise_scale = noise_scale, x = x_validation))
all_var = encoder_w.trainable_variables + model.trainable_variables
with tf.GradientTape(watch_accessed_variables=False) as test_tape:
test_tape.watch(all_var)
with tf.GradientTape() as train_tape:
w_train = encoder_w(xa_train)
y_hat_train = model.call(w_train)
train_loss = keras_backend.mean(keras.losses.mean_squared_error(y_train, y_hat_train)) # K*1
gradients_inner = train_tape.gradient(train_loss, model.trainable_variables) # \nabla_{\theta}
model_copy = copy_model(model, x = w_train)
k = 0
for j in range(len(model_copy.layers)):
model_copy.layers[j].kernel = tf.subtract(model.layers[j].kernel, # \phi_t = T(\theta, \nabla_{\theta})
tf.multiply(lr_inner, gradients_inner[k]))
model_copy.layers[j].bias = tf.subtract(model.layers[j].bias,
tf.multiply(lr_inner, gradients_inner[k+1]))
k += 2
w_validation = encoder_w(xa_validation)
y_hat_validation = model_copy.call(w_validation)
mse_loss = keras_backend.mean(keras.losses.mean_squared_error(y_validation, y_hat_validation))
kl_loss = Beta * sum(encoder_w.losses)
validation_loss = mse_loss + kl_loss
gradients_outer = test_tape.gradient(validation_loss,all_var)
keras.optimizers.Adam(learning_rate=learning_rate).apply_gradients(zip(gradients_outer, all_var))
losses.append(validation_loss.numpy())
if i % 1000 == 0 and i > 0:
print('Step {}:'.format(i), 'loss=', np.mean(losses))
losses = []
# + id="axs-TERYbQa9" colab_type="code" outputId="7a176904-4f8e-4c64-cc1b-83014df3abb9" colab={"base_uri": "https://localhost:8080/", "height": 34}
if exp_type == 'MR-MAML-W':
n_context = 5
n_test_task = 100
errs = []
for ii in range(n_test_task):
np.random.seed(ii)
A = np.random.uniform(low = amps[0], high = amps[-1])
test_ds = SinusoidGenerator(K=n_context, seed = ii, amplitude = A, amp_ind= random.randint(0,K_amp-5))
inner_record = eval_sinewave_for_test(model, test_ds, num_steps=(0, 1, 5, 100), encoder=encoder_w);
errs.append(inner_record[-1][2].numpy())
print('Model is', exp_type, ', meta-test MSE is', np.mean(errs) )
# + [markdown] id="fXfX2JvcAATy" colab_type="text"
# #Training & Testing for MR-MAML(A)
# + id="W9gwqYmGAACR" colab_type="code" outputId="273fa827-d7ed-44eb-b6c8-2f259f4818af" colab={"base_uri": "https://localhost:8080/", "height": 170}
if exp_type == 'MR-MAML-A':
class Encoder(keras.Model):
def __init__(self, dim_w=5, name='encoder', **kwargs):
# super().__init__(name = name)
super(Encoder, self).__init__(name = name)
self.dense_proj = layers.Dense(80, activation='relu')
self.dense_mu = layers.Dense(dim_w)
self.dense_sigma_w = layers.Dense(dim_w)
def call(self, inputs):
h = self.dense_proj(inputs)
mu_w = self.dense_mu(h)
sigma_w = self.dense_sigma_w(h)
sigma_w = tf.nn.softplus(sigma_w)
ws = mu_w + tf.random.normal(tf.shape(mu_w)) * sigma_w
return ws, mu_w, sigma_w
model = SineModel()
model.build((None, dim_w))
encoder_w = Encoder(dim_w = dim_w)
encoder_w.build((None, K_amp+1))
Beta = 5.0
n_iter = 10000
dataset = train_ds
optimizer = keras.optimizers.Adam()
losses = [];
for i, t in enumerate(random.sample(dataset, n_iter)):
xa_train, y_train = np_to_tensor(t.batch(noise_scale = noise_scale)) #[K, 1]
with tf.GradientTape(watch_accessed_variables=False) as test_tape, tf.GradientTape(watch_accessed_variables=False) as encoder_test_tape:
test_tape.watch(model.trainable_variables)
encoder_test_tape.watch(encoder_w.trainable_variables)
with tf.GradientTape() as train_tape:
w_train, _, _ = encoder_w(xa_train)
y_hat = model.call(w_train)
train_loss = keras_backend.mean(keras.losses.mean_squared_error(y_train, y_hat))
model_copy = copy_model(model, x=w_train)
gradients_inner = train_tape.gradient(train_loss, model.trainable_variables) # \nabla_{\theta}
k = 0
for j in range(len(model_copy.layers)):
model_copy.layers[j].kernel = tf.subtract(model.layers[j].kernel, # \phi_t = T(\theta, \nabla_{\theta})
tf.multiply(lr_inner, gradients_inner[k]))
model_copy.layers[j].bias = tf.subtract(model.layers[j].bias,
tf.multiply(lr_inner, gradients_inner[k+1]))
k += 2
x_validation = np.random.uniform(-x_width, x_width, n_obs - n_context)
xa_validation, y_validation = np_to_tensor(t.batch(noise_scale = noise_scale, x = x_validation))
w_validation, w_mu_validation, w_sigma_validation = encoder_w(xa_validation)
test_mse, _ = compute_loss(model_copy, w_validation, y_validation)
kl_ib = kl_qp_gaussian(w_mu_validation, w_sigma_validation,
tf.zeros(tf.shape(w_mu_validation)), tf.ones(tf.shape(w_sigma_validation)))
test_loss = test_mse + Beta * kl_ib
gradients_outer = test_tape.gradient(test_mse, model.trainable_variables)
optimizer.apply_gradients(zip(gradients_outer, model.trainable_variables))
gradients = encoder_test_tape.gradient(test_loss,encoder_w.trainable_variables)
keras.optimizers.Adam(learning_rate=0.001).apply_gradients(zip(gradients, encoder_w.trainable_variables))
losses.append(test_loss)
if i % 1000 == 0 and i > 0:
print('Step {}:'.format(i), 'loss = ', np.mean(losses))
# + id="PBPR2byCbaIs" colab_type="code" outputId="fa0adae9-446a-470c-88b0-0fbc5b44a61a" colab={"base_uri": "https://localhost:8080/", "height": 34}
if exp_type == 'MR-MAML-A':
n_context = 5
n_test_task = 100
errs = []
for ii in range(n_test_task):
np.random.seed(ii)
A = np.random.uniform(low = amps[0], high = amps[-1])
test_ds = SinusoidGenerator(K=n_context, seed = ii, amplitude = A, amp_ind= random.randint(0,K_amp-5))
inner_record = eval_sinewave_for_test(model, test_ds, num_steps=(0, 1, 5, 100), encoder=encoder_w);
errs.append(inner_record[-1][2].numpy())
print('Model is', exp_type, ', meta-test MSE is', np.mean(errs) )
# + id="LqOUUaYFdziP" colab_type="code" colab={}
| meta_learning_without_memorization/sinusoid/MR_MAML.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
pd.set_option('display.max_columns', None)
my_csv = pd.read_csv('./out_files/ct_no_prompt.csv')
ev_csv = pd.read_csv('./out_files/essayftrs1840new.csv')
# ## Análise de my_csv
my_csv.head()
print(len(my_csv))
for i in my_csv.path[:10]:
print(i)
my_csv = my_csv.replace(to_replace=r'corpora\/Redações', value='', regex=True)
for i in my_csv.path[:10]:
print(i)
# Renomeando columna "path" para juntar os dois arquivos mais a frente
my_csv.rename(columns = {'path':'filename'}, inplace = True)
print(my_csv.columns)
# ## Análise de ev_csv
ev_csv.head()
len(ev_csv)
ev_csv.columns
for i in ev_csv.filename[:10]:
print(i)
ev_csv_final = ev_csv.replace(to_replace=r'\/Users\/evelin\.amorim\/Documents\/UFMG\/aes\/data', value='', regex=True)
for i in ev_csv_final.filename[:10]:
print(i)
ev_csv_final.head()
# # Unindo as tabelas
'join_csv = my_csv.join(ev_csv_final.set_index('filename'), on='filename')
len(join_csv)
join_csv = join_csv.dropna()
len(join_csv)
join_csv.head()
join_csv.loc[join_csv['filename'] == '/o-voto-nulo-e-um-ato-politico-valido/xml/valido-ou-nao.xml']
ev_csv_final.loc[ev_csv_final['filename'] == '/o-voto-nulo-e-um-ato-politico-valido/xml/valido-ou-nao.xml']
my_csv.loc[my_csv['filename'] == '/o-voto-nulo-e-um-ato-politico-valido/xml/valido-ou-nao.xml']
join_csv = join_csv.drop_duplicates(subset='filename')
join_csv = join_csv.dropna()
len(join_csv)
join_csv.to_csv(r'./out_files/join_ftrs.csv',index=False)
| adequar_csv.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sentiment Analysis
# +
# necessary imports
import pandas as pd
import numpy as np
import pyprind
import os
# -
df = pd.read_csv('IMDB Dataset.csv')
df.head()
# +
basepath = 'aclImdb'
labels = {'pos':1, 'neg':0}
pbar = pyprind.ProgBar(50000)
df = pd.DataFrame()
for s in ('test', 'train'):
for l in ('pos', 'neg'):
path = os.path.join(basepath, s, l)
for file in sorted(os.listdir(path)):
with open(os.path.join(path, file), 'r', encoding = 'utf-8') as infile:
txt = infile.read()
df = df.append([[txt, labels[l]]], ignore_index = True)
pbar.update()
df.columns = ['review', 'sentiment']
# -
np.random.seed(0)
df = df.reindex(np.random.permutation(df.index))
df.to_csv('movie_data.csv', index = False, encoding = 'utf-8')
df = pd.read_csv('movie_data.csv', encoding = 'utf-8')
df.head(3)
df.shape
# # Cleaning Text Data
# +
import re
def preprocessor(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text)
text = (re.sub('[\W]+', ' ', text.lower()) + ' '.join(emoticons).replace('-', ''))
return text
# -
df['review'] = df['review'].apply(preprocessor)
# ## Processing documents into tokens
# +
def tokenizer(text):
return text.split()
tokenizer('runners like running and thus they run')
# +
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
def tokenizer_porter(text):
return [porter.stem(word) for word in text.split()]
tokenizer_porter('runners like running and thus they run')
# +
import nltk
from nltk.corpus import stopwords
stop = stopwords.words('english')
[w for w in tokenizer_porter('a runner likes running and thus runs a lot')[-10:] if w not in stop]
# -
# ## Training a logistic regression model for document classification
# +
X_train = df.loc[:25000, 'review'].values
y_train = df.loc[:25000, 'sentiment'].values
X_test = df.loc[25000:, 'review'].values
y_test = df.loc[25000:, 'sentiment'].values
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(strip_accents = None, lowercase = False, preprocessor = None)
param_grid = [{'vect__ngram_range': [(1, 1)],
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 100.0]},
{'vect__ngram_range': [(1, 1)],
'vect__stop_words': [stop, None],
'vect__tokenizer': [tokenizer, tokenizer_porter],
'vect__use_idf': [False],
'vect__norm': [None],
'clf__penalty': ['l1', 'l2'],
'clf__C': [1.0, 10.0, 100.0]}]
lr_tfidf = Pipeline([('vect', tfidf),
('clf', LogisticRegression(random_state = 0, solver = 'liblinear'))])
gs_lr_tfidf = GridSearchCV(lr_tfidf, param_grid, scoring = 'accuracy', cv = 5, verbose = 1,
n_jobs = -1)
gs_lr_tfidf.fit(X_train, y_train)
# -
print('Best parameter set: %s ' %gs_lr_tfidf.best_params_)
print('CV Accuracy: %.3f' %gs_lr_tfidf.best_score_)
# +
clf = gs_lr_tfidf.best_estimator_
print('Test Accuracy: %.3f' % clf.score(X_test, y_test))
# -
# ## Working with bigger data - online algorithms and out-of-core learning
# +
# tokenizer function to clean unprocessed text data
import numpy as np
import re
from nltk.corpus import stopwords
stop = stopwords.words('english')
def tokenizer(text):
text = re.sub('<[^>]*>', '', text)
emoticons = re.findall('(?::|;|=)(?:-)?(?:\)|\(|D|P)', text.lower())
text = re.sub('[\W]+', ' ', text.lower() + ' '.join(emoticons).replace('-', ''))
tokenized = [w for w in text.split() if w not in stop]
return tokenized
# +
# creating a generater function that reads in and returns one document at a time
def stream_docs(path):
with open(path, 'r', encoding = 'utf-8') as csv:
next(csv) # skip header
for line in csv:
text, label = line[:-3], int(line[-2])
yield text, label
# testing function
next(stream_docs(path='movie_data.csv'))
# +
# creating a function that will take a document stream from the stream_docs function
# and return a particular number of documents
def get_minibatch(doc_stream, size):
docs, y = [], []
try:
for _ in range(size):
text, label = next(doc_stream)
docs.append(text)
y.append(label)
except StopIteration:
return None, None
return docs, y
# +
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
vect = HashingVectorizer(decode_error='ignore', n_features=2**21, preprocessor=None,
tokenizer=tokenizer)
clf = SGDClassifier(loss = 'log', random_state = 1)
doc_stream = stream_docs(path = 'movie_data.csv')
# +
import pyprind
pbar = pyprind.ProgBar(45)
classes = np.array([0, 1])
for _ in range(45):
X_train, y_train = get_minibatch(doc_stream, size = 1000)
if not X_train:
break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes = classes)
pbar.update()
# -
X_test, y_test = get_minibatch(doc_stream, size = 5000)
X_test = vect.transform(X_test)
print('Accuracy: %.3f' % clf.score(X_test, y_test))
# **The Accuracy of the model is approximately 87 percent, slightly below the accuracy that we achieved using Logistic Regression.
# However, out-of-core learning is very memory efficient and it took less than a minute to complete. Finally, we can use the last 5,000 documents to update our model**
clf = clf.partial_fit(X_test, y_test)
| Sentiment Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import magma as m
m.set_mantle_target('ice40')
from magma.simulator.mdb import simulate
# +
from mantle import Counter
main = m.DefineCircuit('main', "O", m.Out(m.Bits(2)), "COUT", m.Out(m.Bit), "CLK", m.In(m.Clock))
counter = Counter(2)
m.wire(counter.O, main.O)
m.wire(counter.COUT, main.COUT)
simulate(main) # simulator requires a Circuit Definition
| notebooks/advanced/debugger.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Deps
import os
import os.path as path
import wandb
import pickle
import numpy as np
import pandas as pd
# Plotting related
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
sns.set(style="darkgrid") # Styling
# +
# Configuration:
## Set the WANDB entity and project name to be used
wandb_entity_project = "dosssman/drlforge.ddpg"
# Cache dir structure
cache_dir = wandb_entity_project.replace("/","_").replace("-", "_").replace(".","_") # Make it simple folder name
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Episode reward cached data
all_env_cache_filename = os.path.join( cache_dir, "all_envs.pkl")
all_df_cache_filename = os.path.join( cache_dir, "all_df.pkl")
# +
# DDPG: Data loading for Train Episode Return Plots.
api = wandb.Api()
runs = api.runs(wandb_entity_project, {"config.noise_type": "normal"})
summary_list = []
config_list = []
name_list = []
envs = {}
data = []
rolling_average = 10
sample_points = 500
if not path.exists(all_env_cache_filename) or not path.exists(all_df_cache_filename):
print( "# Info: No cache found. Downloading data ...")
# Loading full data in case no cached is found
for idx, run in enumerate(runs):
ls = run.history(keys=['eval/train_episode_return', 'global_step'], pandas=False)
metrics_dataframe = pd.DataFrame(ls[0])
metrics_dataframe.insert(len(metrics_dataframe.columns), "algo", run.config['exp_name'] + '-' +
str(run.config['noise_type']))
metrics_dataframe.insert(len(metrics_dataframe.columns), "seed", run.config['seed'])
data += [metrics_dataframe]
if run.config["env_id"] not in envs:
envs[run.config["env_id"]] = [metrics_dataframe]
envs[run.config["env_id"]+"total_steps"] = run.config["total_steps"]
else:
envs[run.config["env_id"]] += [metrics_dataframe]
# run.summary are the output key/values like accuracy. We call ._json_dict to omit large files
summary_list.append(run.summary._json_dict)
# run.config is the input metrics. We remove special values that start with _.
config_list.append({k:v for k,v in run.config.items() if not k.startswith('_')})
# run.name is the name of the run.
name_list.append(run.name)
summary_df = pd.DataFrame.from_records(summary_list)
config_df = pd.DataFrame.from_records(config_list)
name_df = pd.DataFrame({'name': name_list})
all_df = pd.concat([name_df, config_df,summary_df], axis=1)
data = pd.concat(data, ignore_index=True)
# Smoothing
rolling_average = 20
for env in envs:
if not env.endswith("total_steps"):
for idx, metrics_dataframe in enumerate(envs[env]):
envs[env][idx] = metrics_dataframe.dropna(subset=["eval/train_episode_return"])
envs[env][idx]["eval/train_episode_return"] = metrics_dataframe["eval/train_episode_return"].rolling(rolling_average).mean()[rolling_average:]
with open(all_df_cache_filename, 'wb') as handle:
pickle.dump(all_df, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(all_env_cache_filename, 'wb') as handle:
pickle.dump(envs, handle, protocol=pickle.HIGHEST_PROTOCOL)
print( "# Info: Data loaded and cached.")
else:
with open(all_df_cache_filename, 'rb') as handle:
all_df = pickle.load(handle)
with open(all_env_cache_filename, 'rb') as handle:
envs = pickle.load(handle)
print( "# Info: Data loaded from cache.")
# +
# Helper: Get Dataframe of the Episode reward given the environment
# TODO: Implement caching
def get_df_for_env(env_id):
env_total_steps = envs[env_id+"total_steps"]
env_increment = env_total_steps / 500
envs_same_x_axis = []
for sampled_run in envs[env_id]:
df = pd.DataFrame(columns=sampled_run.columns)
x_axis = [i*env_increment for i in range(500-2)]
current_row = 0
for timestep in x_axis:
while sampled_run.iloc[current_row]["global_step"] < timestep:
current_row += 1
if current_row > len(sampled_run)-2:
break
if current_row > len(sampled_run)-2:
break
temp_row = sampled_run.iloc[current_row].copy()
temp_row["global_step"] = timestep
df = df.append(temp_row)
envs_same_x_axis += [df]
return pd.concat(envs_same_x_axis, ignore_index=True)\
# Caching data for all environments all at once
ALL_ENVS = list(sorted(set(all_df["env_id"])))
ALL_ENV_EPISODE_RETURN_DF = {}
ep_return_cachedir = os.path.join( cache_dir, "episode_return")
if not path.exists( ep_return_cachedir):
os.makedirs(ep_return_cachedir)
for env_name in ALL_ENVS:
print( f"# INFO: Fetching / Loading data for {env_name}")
env_ep_return_cachefile = os.path.join( ep_return_cachedir, f"{env_name}_cache.pkl")
if not path.exists(env_ep_return_cachefile):
print("# Cached data not found, reading runs data from WANDB")
# Actually process the data. Takes most of the time.
env_df = get_df_for_env(env_name)
with open(env_ep_return_cachefile, 'wb') as handle:
pickle.dump(env_df, handle, protocol=pickle.HIGHEST_PROTOCOL)
else:
# Load it from cached data
print("# Cached data found and loaded")
with open(env_ep_return_cachefile, 'rb') as handle:
env_df = pickle.load(handle)
# Add the data to global variable for later usage
ALL_ENV_EPISODE_RETURN_DF[f"{env_name}"] = env_df
print( "# INFO: All data loading done for episode reward data")
# +
# Plot for one environment
env_name = "MountainCarContinuous-v0"
# Get the dataframe
env_df = ALL_ENV_EPISODE_RETURN_DF[env_name]
# -
set(env_df["algo"])
# +
# Plot parameterization
fig, ax = plt.subplots(figsize=(8,6))
# Plot the data onto the ax object
sns.lineplot(data=env_df,
x="global_step",
y="eval/train_episode_return",
ci='sd', ax=ax,
label="DDPG (Normal Noise)"
)
# Plot config and pretify
ax.set_title(env_name)
ax.set_xlabel("Time steps")
ax.set_ylabel("Episode return")
ax.ticklabel_format(style='sci', scilimits=(0,0), axis='x')
ax.legend(loc="best")
plt.tight_layout()
plt.show()
# -
# Generates plots for all the environments
for env_name in ALL_ENVS:
env_df = env_df = ALL_ENV_EPISODE_RETURN_DF[env_name]
# Plot parameterization
fig, ax = plt.subplots(figsize=(8,6))
# Plot the data onto the ax object
sns.lineplot(data=env_df,
x="global_step",
y="eval/train_episode_return",
ci='sd', ax=ax,
label="DDPG (Normal Noise)"
)
# Plot config and pretify
ax.set_title(env_name)
ax.set_xlabel("Time steps")
ax.set_ylabel("Episode return")
ax.ticklabel_format(style='sci', scilimits=(0,0), axis='x')
ax.legend(loc="best")
plt.tight_layout()
plt.show()
fig.savefig(f"{env_name}_EpisodeReturn.png")
| assets/posts/ddpg_experiments/default/ddpg_plot_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_anaconda3)
# language: python
# name: conda_anaconda3
# ---
# # Introduction to MPI on Amazon SageMaker
#
# Message Passing Interface (MPI) is the fundamental communication protocol for programming parallel computer programs. See its [wiki page](https://en.wikipedia.org/wiki/Message_Passing_Interface). [Open MPI](https://www.open-mpi.org/projects/user-docs/) is the implementation that's used as a basic building block for distributed training systems.
#
# In Python programs, you can interact with Open MPI APIs via [mpi4py](https://mpi4py.readthedocs.io/en/stable/overview.html) and easily convert your single-process python program into a parallel python program.
#
# Parallel processes can exist on one host (e.g. one EC2 instance) or multiple hosts (e.g. many EC2 instances). It's trivial to set up a parallel cluster (comm world, in MPI parlance) on one host via Open MPI, but it is less straight-forward to set up an MPI comm world across multiple instances.
#
# SageMaker does it for you. In this tutorial, you will go through a few basic (but exceeding important) [MPI communications](https://mpi4py.readthedocs.io/en/stable/tutorial.html) on SageMaker with **multiple instances** and you will verify that parallel processes across instances are indeed talking to each other. Those basic communications are the fundamental building blocks for distributed training.
# ## Environment
# We assume Open MPI and mpi4py have been installed in your environment. This is the case for SageMaker Notebook Instance or Studio.
# ## Inspect the Python Program
# + jupyter={"outputs_hidden": true}
# !pygmentize mpi_demo.py
# -
# See the program in action with 2 parallel processes on your current environment. Make sure you have at least 2 cores.
# !mpirun -np 2 python mpi_demo.py
# ## Scale it on SageMaker
# You can run the above program with $n$ processes per host across $N$ hosts on SageMaker (and get a comm world of size $n\times N$). In the remaining of this notebook, you will use SageMaker TensorFlow deep learning container to run the above program. There is no particular reason for the choice, all SageMaker deep learning containers have Open MPI installed. So feel free to replace it with your favorite DLC.
#
# Check out the [SageMaker Python SDK Docs](https://sagemaker.readthedocs.io/en/stable/api/training/smd_model_parallel_general.html?highlight=mpi%20paramters#mpi-parameters) for the parameters needed to set up a distributed training job with MPI.
# +
import sagemaker
from sagemaker import get_execution_role
from sagemaker.tensorflow import TensorFlow
role = get_execution_role()
# Running 2 processes per host
# if we use 3 instances,
# then we should see 6 MPI processes
distribution = {"mpi": {"enabled": True, "processes_per_host": 2}}
tfest = TensorFlow(
entry_point="mpi_demo.py",
role=role,
framework_version="2.3.0",
distribution=distribution,
py_version="py37",
instance_count=3,
instance_type="ml.c5.2xlarge", # 8 cores
output_path="s3://" + sagemaker.Session().default_bucket() + "/" + "mpi",
)
# -
tfest.fit()
# The stdout "Number of MPI processes that will talk to each other: 6" indicates that the processes on all hosts are included in the comm world.
# ## Conclusion
# In this notebook, you went through some fundamental MPI operations, which are the bare bones of inner workings of many distributed training frameworks. You did that on SageMaker with multiple instances. You can scale up this set up to include more instances in a real ML project.
| training/distributed_training/mpi_on_sagemaker/intro/mpi_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="fQFCLASOTdDp"
# # Training Hugging Face Transformer model with Custom NER Dataset
# + [markdown] id="9Eq6T2HuTa4h"
#
# [](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)
# + colab={"base_uri": "https://localhost:8080/"} id="WSJrwT_c-e9P" outputId="66a8a65e-36f5-440f-d23d-86ba5d0c7c9a"
# ! pip install transformers datasets seqeval
# + colab={"base_uri": "https://localhost:8080/"} id="ipQQlaf1-0Lx" outputId="ea5904e4-0927-461b-8cb5-1d5b47b1315c"
# !git clone https://github.com/gyan42/mozhi-datasets
# + colab={"base_uri": "https://localhost:8080/"} id="vFnihknZSS_Z" outputId="f1ddc81b-aa3b-4d79-8922-2794e23dd9fb"
% cd mozhi-datasets/sroie2019
# + colab={"base_uri": "https://localhost:8080/"} id="BKE0HVk9TuFf" outputId="6f199d1d-02b8-4325-a11c-27e591e81392"
# ! ls
# + colab={"base_uri": "https://localhost:8080/"} id="gGGB8Eujlpqe" outputId="136e30cf-2361-4405-ba23-b8b28616a8b4"
# ! git pull
# + colab={"base_uri": "https://localhost:8080/"} id="pPjtyBx1kQ8D" outputId="d42981f8-c531-4084-c574-8bcd9e4de664"
# !python hf_tokenize.py
# + colab={"base_uri": "https://localhost:8080/"} id="YF5LWAHMqc9x" outputId="5d03e094-be71-4764-9e51-b727b5fd29d5"
# ! python sroie2019_dataset.py
# + colab={"base_uri": "https://localhost:8080/"} id="SuXCB9eYqlLn" outputId="615812e4-29aa-4314-c16d-4bb4917997ff"
# ! ls /root/.cache/huggingface/datasets/sroie2019/SROIE2019/
# + colab={"base_uri": "https://localhost:8080/"} id="foMBuqFBTwkT" outputId="986e7f14-f93f-4a82-b9cb-fe5b0ce6cd0a"
# ! python hf_model_train.py
# + id="lHKTRylyUWBW"
| sroie2019/HFTransformerWithCustom_NER_Datset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Searching for coding sequences in genomes using BLAST and Python
#
# This notebook will take a provided sequence of a budding yeast gene and identify the orthologs in genome sequences. In the process, we'll collect both the coding sequences and the encoded protein sequences for the orthologs.
#
# This notebook builds on the previous ones. See those for introduction and credits.
#
# This collects subsequences from a collection of PacBio sequenced yeast genomes from [Yue et al 2017](https://www.ncbi.nlm.nih.gov/pubmed/28416820).
#
# Reference for sequence data:
# [Contrasting evolutionary genome dynamics between domesticated and wild yeasts.
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Nat Genet. 2017 Jun;49(6):913-924. doi: 10.1038/ng.3847. Epub 2017 Apr 17. PMID: 28416820](https://www.ncbi.nlm.nih.gov/pubmed/28416820)
#
# This is meant to demonstrate using BLAST+, Python, and a few of my Python scripts to accomplish a task in a series of steps. (This is further expanded to included using PatMatch on the mined ortholog protein sequences [here](GSD/GSD%20Rpb1_orthologs_in_PB_genomes.ipynb#GSD:-Rpb1-orthologs-in-PB-genomes) and [here](GSD/GSD%20Rpb1_orthologs_in_1011_genomes.ipynb).)
#
# -----
# ## Overview
# 
# ## Preparation
#
# Fill in the values next cell to provide details on the gene of interest. Because the next cell 'generalizes' the necessary gene-specific information, it should be possible to just edit this and run the notebook with a different gene. Advanced advice: if there were a lot of genes, then the notebook could be further edited to use [papermill](https://github.com/nteract/papermill) to provide the necessary information.
#
# If you don't want to get the gene sequence from a elsewhere on the internet, set `get_seq_from_link` to `False` and don't worry about what is assigned for `link_to_FASTA_of_gene`. You'll be prompted later to edit a file in the session if opting to not fetch it.
#
# (Caveat: right now this is written for genes with no introns. Only a few hundred have in yeast and that is the organism in this example. Intron presence would only become important when trying to translate in late stages of this workflow.)
gene_name = "VPH1" # can be what you want to use in this notebook; not used to match anything external
size_expected = 2523 #use bp length of coding sequence at SGD
get_seq_from_link = True #Change what is between quotes on next line if `True`
link_to_FASTA_of_gene = "https://gist.githubusercontent.com/fomightez/f46b0624f1d8e3abb6ff908fc447e63b/raw/625eaba76bb54e16032f90c8812350441b753a0c/uz_S288C_YOR270C_VPH1_coding.fsa"
#**Possible future enhancement would be to add getting the FASTA of the gene from Yeastmine with just systematic id using get_gene_genomic_seq_as_FASTA.py**
# Get the genomes data, the `blast_to_df` script, and sequence to search for matches in the genomes by running these commands.
import os
file_needed = "blast_to_df.py"
if not os.path.isfile(file_needed):
# !curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/blast-utilities/blast_to_df.py
import pandas as pd
# Prepare for getting PacBio (Yue et al 2017 sequences)
#make a list of the strain designations
yue_et_al_strains = ["S288C","DBVPG6044","DBVPG6765","SK1","Y12",
"YPS128","UWOPS034614","CBS432","N44","YPS138",
"UFRJ50816","UWOPS919171"]
# Get & unpack the genome sequences from strains
for s in yue_et_al_strains:
# !curl -LO http://yjx1217.github.io/Yeast_PacBio_2016/data/Nuclear_Genome/{s}.genome.fa.gz
# !gunzip -f {s}.genome.fa.gz
# +
# add identifiers to each `chr` so results for each strain clear later
chromosome_id_prefix = "chr"
def add_strain_id_to_description_line(file,strain_id):
'''
Takes a file and edits every description line to add
strain_id after the caret.
Saves the fixed file
'''
import sys
output_file_name = "temp.txt"
# prepare output file for saving so it will be open and ready
with open(output_file_name, 'w') as output_file:
# read in the input file
with open(file, 'r') as input_handler:
# prepare to give feeback later or allow skipping to certain start
lines_processed = 0
for line in input_handler:
lines_processed += 1
if line.startswith(">"):
rest_o_line = line.split(">")
new_line = ">"+strain_id + rest_o_line[1]
else:
new_line = line
# Send text to output
output_file.write(new_line)
# replace the original file with edited
# !mv temp.txt {file}
# Feedback
sys.stderr.write("\n{} chromosome identifiers tagged.".format(file))
for s in yue_et_al_strains:
add_strain_id_to_description_line(s+".genome.fa",s)
# -
# Get SGD gene sequence in FASTA format to search for best matches in the genomes
import sys
gene_filen = gene_name + ".fsa"
if get_seq_from_link:
# !curl -o {gene_filen} {link_to_FASTA_of_gene}
else:
# !touch {gene_filen}
sys.stderr.write("\nEDIT THE FILE '{}' TO CONTAIN "
"YOUR GENE OF INTEREST (FASTA-FORMATTED)"
".".format(gene_filen))
# Now you are prepared to run BLAST to search each PacBio-sequenced genomes for the best match to a gene from the Saccharomyces cerevisiae strain S288C reference sequence.
# ## Use BLAST to search the genomes for matches to the gene in the reference genome at SGD
#
# SGD is the [Saccharomyces cerevisiae Genome Database site](http:yeastgenome.org) and the reference genome is from S288C.
#
# This is going to go through each genome and make a database so it is searchable and then search for matches to the gene. The information on the best match will be collected. One use for that information will be collecting the corresponding sequences later.
#
# Import the script that allows sending BLAST output to Python dataframes so that we can use it here.
from blast_to_df import blast_to_df
# Make a list of all `genome.fa` files, excluding `genome.fa.nhr` and `genome.fa.nin` and `genome.fansq`
# The excluding was only necessary because I had run some queries preiminarily in development. Normally, it would just be the `.re.fa` at the outset.
fn_to_check = "genome.fa"
genomes = []
import os
import fnmatch
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*'+fn_to_check):
if not file.endswith(".nhr") and not file.endswith(".nin") and not file.endswith(".nsq") :
genomes.append(file)
genomes
SGD_gene = gene_filen
dfs = []
for genome in genomes:
# !makeblastdb -in {genome} -dbtype nucl
# result = !blastn -query {SGD_gene} -db {genome} -outfmt "6 qseqid sseqid stitle pident qcovs length mismatch gapopen qstart qend sstart send qframe sframe frames evalue bitscore qseq sseq" -task blastn
from blast_to_df import blast_to_df
blast_df = blast_to_df(result.n)
dfs.append(blast_df.head(1))
# merge the dataframes in the list `dfs` into one dataframe
df = pd.concat(dfs)
#Save the df
filen_prefix = gene_name + "_orthologBLASTdf"
df.to_pickle(filen_prefix+".pkl")
df.to_csv(filen_prefix+'.tsv', sep='\t',index = False)
df
# Computationally check if any genomes missing from the BLAST results list?
# +
subjids = df.sseqid.tolist()
#print (subjids)
#print (subjids[0:10])
subjids = [x.split(chromosome_id_prefix)[0] for x in subjids]
#print (subjids)
#print (subjids[0:10])
len_genome_fn_end = len(fn_to_check) + 1 # plus one to accound for the period that will be
# between `fn_to_check` and strain_id`, such as `SK1.genome.fa`
genome_ids = [x[:-len_genome_fn_end] for x in genomes]
#print (genome_ids[0:10])
a = set(genome_ids)
#print (a)
print ("initial:",len(a))
r = set(subjids)
print("results:",len(r))
print ("missing:",len(a-r))
#a - r
# -
# Sanity check: Report on how expected size compares to max size seen?
size_seen = df.length.max(0)
print ("Expected size of gene:", size_expected)
print ("Most frequent size of matches:", df.length.mode()[0])
print ("Maximum size of matches:", df.length.max(0))
# ## Collect the identified, raw sequences
#
# Get the expected size centered on the best match, plus a little flanking each because they might not exactly cover the entire open reading frame. (Although, the example here all look to be full size.)
# Get the script for extracting based on position (and install dependency pyfaidx)
import os
file_needed = "extract_subsequence_from_FASTA.py"
if not os.path.isfile(file_needed):
# !curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/Extract_from_FASTA/extract_subsequence_from_FASTA.py
# !pip install pyfaidx
# +
size_expected = size_expected # use value from above, or alter at this point.
#size_expected = df.length.max(0) #bp length of SGD coding sequence; should be equivalent and that way not hardcoded?
extra_add_to_start = 51 #to allow for 'fuzziness' at starting end
extra_add_to_end = 51 #to allow for 'fuzziness' at far end
genome_fn_end = "genome.fa"
def midpoint(items):
'''
takes a iterable of items and returns the midpoint (integer) of the first
and second values
'''
return int((int(items[0])+int(items[1]))/2)
#midpoint((1,100))
def determine_pos_to_get(match_start,match_end):
'''
Take the start and end of the matched region.
Calculate midpoint between those and then
center expected size on that to determine
preliminary start and preliminary end to get.
Add the extra basepairs to get at each end
to allow for fuzziness/differences of actual
gene ends for orthologs.
Return the final start and end positions to get.
'''
center_of_match = midpoint((match_start,match_end))
half_size_expected = int(size_expected/2.0)
if size_expected % 2 != 0:
half_size_expected += 1
start_pos = center_of_match - half_size_expected
end_pos = center_of_match + half_size_expected
start_pos -= extra_add_to_start
end_pos += extra_add_to_end
# Because of getting some flanking sequences to account for 'fuzziness', it
# is possible the start and end can exceed possible. 'End' is not a problem
# because the `extract_subsequence_from_FASTA.py` script will get as much as
# it from the indicated sequence if a larger than possible number is
# provided. However,'start' can become negative and because the region to
# extract is provided as a string the dash can become a problem. Dealing
# with it here by making sequence positive only.
# Additionally, because I rely on center of match to position where to get,
# part being cut-off due to absence on sequence fragment will shift center
# of match away from what is actually center of gene and to counter-balance
# add twice the amount to the other end. (Actually, I feel I should adjust
# the start end likewise if the sequence happens to be shorter than portion
# I would like to capture but I don't know length of involved hit yet and
# that would need to be added to allow that to happen!<--TO DO)
if start_pos < 0:
raw_amount_missing_at_start = abs(start_pos)# for counterbalancing; needs
# to be collected before `start_pos` adjusted
start_pos = 1
end_pos += 2 * raw_amount_missing_at_start
return start_pos, end_pos
# go through the dataframe using information on each to come up with sequence file,
# specific indentifier within sequence file, and the start and end to extract
# store these valaues as a list in a dictionary with the strain identifier as the key.
extracted_info = {}
start,end = 0,0
for row in df.itertuples():
#print (row.length)
start_to_get, end_to_get = determine_pos_to_get(row.sstart, row.send)
posns_to_get = "{}-{}".format(start_to_get, end_to_get)
record_id = row.sseqid
strain_id = row.sseqid.split(chromosome_id_prefix)[0]
seq_fn = strain_id + "." + genome_fn_end
extracted_info[strain_id] = [seq_fn, record_id, posns_to_get]
# Use the dictionary to get the sequences
for id_ in extracted_info:
# #%run extract_subsequence_from_FASTA.py {*extracted_info[id_]} #unpacking doesn't seem to work here in `%run`
# %run extract_subsequence_from_FASTA.py {extracted_info[id_][0]} {extracted_info[id_][1]} {extracted_info[id_][2]}
#package up the retrieved sequences
archive_file_name = gene_name+"_raw_ortholog_seqs.tar.gz"
# make list of extracted files using fnmatch
fn_part_to_match = "seq_extracted"
collected_seq_files_list = []
import os
import sys
import fnmatch
for file in os.listdir('.'):
if fnmatch.fnmatch(file, fn_part_to_match+'*'):
#print (file)
collected_seq_files_list.append(file)
# !tar czf {archive_file_name} {" ".join(collected_seq_files_list)} # use the list for archiving command
sys.stderr.write("\n\nCollected RAW sequences gathered and saved as "
"`{}`.".format(archive_file_name))
# move the collected raw sequences to a folder in preparation for
# extracting encoding sequence from original source below
# !mkdir raw
# !mv seq_extracted*.fa raw
# -
# That archive should contain the "raw" sequence for each gene, even if the ends are a little different for each. At minimum the entire gene sequence needs to be there at this point; extra at each end is preferable at this point.
#
# You should inspect them as soon as possible and adjust the extra sequence to add higher or lower depending on whether the ortholog genes vary more or less, respectively. The reason they don't need to be perfect yet though is because next we are going to extract the longest open reading frame, which presumably demarcates the entire gene. Then we can return to use that information to clean up the collected sequences to just be the coding sequence.
# ## Collect protein translations of the genes and then clean up "raw" sequences to just be coding
#
# We'll assume the longest translatable frame in the collected "raw" sequences encodes the protein sequence for the gene orthologs of interest. Well base these steps on the [section '20.1.13 Identifying open reading frames'](http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc299) in the present version of the [Biopython Tutorial and Cookbook](http://biopython.org/DIST/docs/tutorial/Tutorial.html) (Last Update – 18 December 2018 (Biopython 1.73).
# (First run the next cell to get a script needed for dealing with the strand during the translation and gathering of thge encoding sequence.)
import os
file_needed = "convert_fasta_to_reverse_complement.py"
if not os.path.isfile(file_needed):
# !curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/ConvertSeq/convert_fasta_to_reverse_complement.py
# Now to perform the work described in the header to this section...
# +
# find the featured open reading frame and collect presumed protein sequences
# Collect the corresponding encoding sequence from the original source
def len_ORF(items):
# orf is fourth item in the tuples
return len(items[3])
def find_orfs_with_trans(seq, trans_table, min_protein_length):
'''
adapted from the present section '20.1.13 Identifying open reading frames'
http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc299 in the
present version of the [Biopython Tutorial and Cookbook at
http://biopython.org/DIST/docs/tutorial/Tutorial.html
(Last Update – 18 December 2018 (Biopython 1.73)
Same as there except altered to sort on the length of the
open reading frame.
'''
answer = []
seq_len = len(seq)
for strand, nuc in [(+1, seq), (-1, seq.reverse_complement())]:
for frame in range(3):
trans = str(nuc[frame:].translate(trans_table))
trans_len = len(trans)
aa_start = 0
aa_end = 0
while aa_start < trans_len:
aa_end = trans.find("*", aa_start)
if aa_end == -1:
aa_end = trans_len
if aa_end-aa_start >= min_protein_length:
if strand == 1:
start = frame+aa_start*3
end = min(seq_len,frame+aa_end*3+3)
else:
start = seq_len-frame-aa_end*3-3
end = seq_len-frame-aa_start*3
answer.append((start, end, strand,
trans[aa_start:aa_end]))
aa_start = aa_end+1
answer.sort(key=len_ORF, reverse = True)
return answer
def generate_rcoutput_file_name(file_name,suffix_for_saving = "_rc"):
'''
from https://github.com/fomightez/sequencework/blob/master/ConvertSeq/convert_fasta_to_reverse_complement.py
Takes a file name as an argument and returns string for the name of the
output file. The generated name is based on the original file
name.
Specific example
=================
Calling function with
("sequence.fa", "_rc")
returns
"sequence_rc.fa"
'''
main_part_of_name, file_extension = os.path.splitext(
file_name) #from
#http://stackoverflow.com/questions/541390/extracting-extension-from-filename-in-python
if '.' in file_name: #I don't know if this is needed with the os.path.splitext method but I had it before so left it
return main_part_of_name + suffix_for_saving + file_extension
else:
return file_name + suffix_for_saving + ".fa"
def add_strand_to_description_line(file,strand="-1"):
'''
Takes a file and edits description line to add
strand info at end.
Saves the fixed file
'''
import sys
output_file_name = "temp.txt"
# prepare output file for saving so it will be open and ready
with open(output_file_name, 'w') as output_file:
# read in the input file
with open(file, 'r') as input_handler:
# prepare to give feeback later or allow skipping to certain start
lines_processed = 0
for line in input_handler:
lines_processed += 1
if line.startswith(">"):
new_line = line.strip() + "; {} strand\n".format(strand)
else:
new_line = line
# Send text to output
output_file.write(new_line)
# replace the original file with edited
# !mv temp.txt {file}
# Feedback
sys.stderr.write("\nIn {}, strand noted.".format(file))
table = 1 #sets translation table to standard nuclear, see
# https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
min_pro_len = 80 #cookbook had the standard `100`. Feel free to adjust.
prot_seqs_info = {} #collect as dictionary with strain_id as key. Values to
# be list with source id as first item and protein length as second and
# strand in source seq as third item, and start and end in source sequence as fourth and fifth,
# and file name of protein and gene as sixth and seventh.
# Example key and value pair: 'YPS138':['<source id>','<protein length>',-1,52,2626,'<gene file name>','<protein file name>']
gene_seqs_fn_list = []
prot_seqs_fn_list = []
from Bio import SeqIO
for raw_seq_filen in collected_seq_files_list:
#strain_id = raw_seq_filen[:-len_genome_fn_end] #if was dealing with source seq
strain_id = raw_seq_filen.split(chromosome_id_prefix)[0].split("seq_extracted")[1]
record = SeqIO.read("raw/"+raw_seq_filen,"fasta")
raw_seq_source_fn = strain_id + "." + genome_fn_end
raw_seq_source_id = record.description.split(":")[0]
orf_list = find_orfs_with_trans(record.seq, table, min_pro_len)
orf_start, orf_end, strand, prot_seq = orf_list[0] #longest ORF seq for protein coding
location_raw_seq = record.description.rsplit(":",1)[1] #get to use in calculating
# the start and end position in original genome sequence.
raw_loc_parts = location_raw_seq.split("-")
start_from_raw_seq = int(raw_loc_parts[0])
end_from_raw_seq = int(raw_loc_parts[1])
length_extracted = len(record) #also to use in calculating relative original
#Fix negative value. (Somehow Biopython can report negative value when hitting
# end of sequence without encountering stop codon and negatives messes up
# indexing later it seems.)
if orf_start < 0:
orf_start = 0
# Trim back to the first Methionine, assumed to be the initiating MET.
# (THIS MIGHT BE A SOURCE OF EXTRA 'LEADING' RESIDUES IN SOME CASES & ARGUES
# FOR LIMITING THE AMOUNT OF FLANKING SEQUENCE ADDED TO ALLOW FOR FUZINESS.)
try:
amt_resi_to_trim = prot_seq.index("M")
except ValueError:
sys.stderr.write("**ERROR**When searching for initiating methionine,\n"
"no Methionine found in the traslated protein sequence.**ERROR**")
sys.exit(1)
prot_seq = prot_seq[amt_resi_to_trim:]
len_seq_trimmed = amt_resi_to_trim * 3
# Calculate the adjusted start and end values for the untrimmed ORF
adj_start = start_from_raw_seq + orf_start
adj_end = end_from_raw_seq - (length_extracted - orf_end)
# Adjust for trimming for appropriate strand.
if strand == 1:
adj_start += len_seq_trimmed
#adj_end += 3 # turns out stop codon is part of numbering biopython returns
elif strand == -1:
adj_end -= len_seq_trimmed
#adj_start -= 3 # turns out stop codon is part of numbering biopython returns
else:
sys.stderr.write("**ERROR**No strand match option detected!**ERROR**")
sys.exit(1)
# Collect the sequence for the actual gene encoding region from
# the original sequence. This way the original numbers will
# be put in the file.
start_n_end_str = "{}-{}".format(adj_start,adj_end)
# %run extract_subsequence_from_FASTA.py {raw_seq_source_fn} {raw_seq_source_id} {start_n_end_str}
# rename the extracted subsequence a more distinguishing name and notify
g_output_file_name = strain_id +"_" + gene_name + "_ortholog_gene.fa"
# !mv {raw_seq_filen} {g_output_file_name} # because the sequence saved happens to
# be same as raw sequence file saved previously, that name can be used to
# rename new file.
gene_seqs_fn_list.append(g_output_file_name)
sys.stderr.write("\n\nRenamed gene file to "
"`{}`.".format(g_output_file_name))
# Convert extracted sequence to reverse complement if translation was on negative strand.
if strand == -1:
# %run convert_fasta_to_reverse_complement.py {g_output_file_name}
# replace original sequence file with the produced file
produced_fn = generate_rcoutput_file_name(g_output_file_name)
# !mv {produced_fn} {g_output_file_name}
# add (after saved) onto the end of the description line for that `-1 strand`
# No way to do this in my current version of convert sequence. So editing descr line.
add_strand_to_description_line(g_output_file_name)
#When settled on actual protein encoding sequence, fill out
# description to use for saving the protein sequence.
prot_descr = (record.description.rsplit(":",1)[0]+ " "+ gene_name
+ "_ortholog"+ "| " +str(len(prot_seq)) + " aas | from "
+ raw_seq_source_id + " "
+ str(adj_start) + "-"+str(adj_end))
if strand == -1:
prot_descr += "; {} strand".format(strand)
# save the protein sequence as FASTA
chunk_size = 70 #<---amino acids per line to have in FASTA
prot_seq_chunks = [prot_seq[i:i+chunk_size] for i in range(
0, len(prot_seq),chunk_size)]
prot_seq_fa = ">" + prot_descr + "\n"+ "\n".join(prot_seq_chunks)
p_output_file_name = strain_id +"_" + gene_name + "_protein_ortholog.fa"
with open(p_output_file_name, 'w') as output:
output.write(prot_seq_fa)
prot_seqs_fn_list.append(p_output_file_name)
sys.stderr.write("\n\nProtein sequence saved as "
"`{}`.".format(p_output_file_name))
# at end store information in `prot_seqs_info` for later making a dataframe
# and then text table for saving summary
#'YPS138':['<source id>',<protein length>,-1,52,2626,'<gene file name>','<protein file name>']
prot_seqs_info[strain_id] = [raw_seq_source_id,len(prot_seq),strand,adj_start,adj_end,
g_output_file_name,p_output_file_name]
sys.stderr.write("\n******END OF A SET OF PROTEIN ORTHOLOG "
"AND ENCODING GENE********")
# -
# use `prot_seqs_info` for saving a summary text table (first convert to dataframe?)
table_fn_prefix = gene_name + "_orthologs_table"
table_fn = table_fn_prefix + ".tsv"
pkl_table_fn = table_fn_prefix + ".pkl"
import pandas as pd
info_df = pd.DataFrame.from_dict(prot_seqs_info, orient='index',
columns=['descr_id', 'length', 'strand', 'start','end','gene_file','prot_file']) # based on
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.from_dict.html and
# note from Python 3.6 that `pd.DataFrame.from_items` is deprecated;
#"Please use DataFrame.from_dict"
info_df.to_pickle(pkl_table_fn)
info_df.to_csv(table_fn, sep='\t') # keep index is default
sys.stderr.write("Text file of associated details saved as '{}'.".format(table_fn))
# pack up archive of gene and protein sequences plus the table
seqs_list = gene_seqs_fn_list + prot_seqs_fn_list + [table_fn,pkl_table_fn]
archive_file_name = gene_name+"_ortholog_seqs.tar.gz"
# !tar czf {archive_file_name} {" ".join(seqs_list)} # use the list for archiving command
sys.stderr.write("\nCollected gene and protein sequences"
" (plus table of details) gathered and saved as "
"`{}`.".format(archive_file_name))
# Save the tarballed archive to your local machine.
# -----
| notebooks/Searching for coding sequences in genomes using BLAST and Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 명령문
# ## Assignment operator
# [Python library reference](<https//docs.python.org/reference/simple_stmts.html#assignment-statements>)
# says
#
# Assignment statements are used to (re)bind names to values and to
# modify attributes or items of mutable objects.
#
# In short, it works as follows (simple assignment)
#
# 1. an expression on the right hand side is evaluated, the corresponding
# object is created/obtained
# 1. a **name** on the left hand side is assigned, or bound, to the
# r.h.s. object
# Things to note
#
# * a single object can have several names bound to it
#
# ```python
# In [1] a = [1, 2, 3]
# In [2] b = a
# In [3] a
# Out[3] [1, 2, 3]
# In [4] b
# Out[4] [1, 2, 3]
# In [5] a is b
# Out[5] True
# In [6] b[1] = 'hi!'
# In [7] a
# Out[7] [1, 'hi!', 3]
# ```
# * to change a list *in place*, use indexing/slices
#
# ```python
# In [1] a = [1, 2, 3]
# In [3] a
# Out[3] [1, 2, 3]
# In [4] a = ['a', 'b', 'c'] # Creates another object.
# In [5] a
# Out[5] ['a', 'b', 'c']
# In [6] id(a)
# Out[6] 138641676
# In [7] a[] = [1, 2, 3] # Modifies object in place.
# In [8] a
# Out[8] [1, 2, 3]
# In [9] id(a)
# Out[9] 138641676 # Same as in Out[6], yours will differ...
# ```
# * the key concept here is **mutable vs. immutable**
# * mutable objects can be changed in place
# * immutable objects cannot be modified once created
# ## Control Flow
# Controls the order in which the code is executed.
# ### if/elif/else
# ```python
# >>> if 2**2 == 4:
# ... print('Obvious!')
# ...
# Obvious!
# ```
# **Blocks are delimited by indentation**
# Type the following lines in your Python interpreter, and be careful
# to **respect the indentation depth**. The Ipython shell automatically
# increases the indentation depth after a colon ``:`` sign; to
# decrease the indentation depth, go four spaces to the left with the
# Backspace key. Press the Enter key twice to leave the logical block.
# ```python
# >>> a = 10
#
# >>> if a == 1:
# ... print(1)
# ... elif a == 2:
# ... print(2)
# ... else:
# ... print('A lot')
# A lot
# ```
# Indentation is compulsory in scripts as well. As an exercise, re-type the
# previous lines with the same indentation in a script ``condition.py``, and
# execute the script with ``run condition.py`` in Ipython.
# ### for/range
# Iterating with an index:
#
# ```python
# >>> for i in range(4):
# ... print(i)
# 0
# 1
# 2
# 3
# ```
# But most often, it is more readable to iterate over values:
# ```python
# >>> for word in ('cool', 'powerful', 'readable'):
# ... print('Python is %s' % word)
# Python is cool
# Python is powerful
# Python is readable
# ```
# ### while/break/continue
# Typical C-style while loop (Mandelbrot problem):
#
# ```python
# >>> z = 1 + 1j
# >>> while abs(z) < 100:
# ... z = z**2 + 1
# >>> z
# (-134+352j)
# ```
# **More advanced features**
#
# `break` out of enclosing for/while loop:
#
# ```python
# >>> z = 1 + 1j
#
# >>> while abs(z) < 100:
# ... if z.imag == 0:
# ... break
# ... z = z**2 + 1
# ```
# `continue` the next iteration of a loop.:
#
# ```python
# >>> a = [1, 0, 2, 4]
# >>> for element in a:
# ... if element == 0:
# ... continue
# ... print(1. / element)
# 1.0
# 0.5
# 0.25
# ```
# ### Conditional Expressions
# `if <OBJECT>`
#
# Evaluates to False:
# * any number equal to zero (`0`, `0.0`, `0+0`)
# * an empty container (list, tuple, set, dictionary, ...)
# * `False`, `None`
#
# Evaluates to True:
# * everything else
# `a == b`
#
# Tests equality, with logics:
#
# ```python
# >>> 1 == 1.
# True
# ```
# `a is b`
#
# Tests identity: both sides are the same object:
#
# ```python
# >>> 1 is 1.
# False
#
# >>> a = 1
# >>> b = 1
# >>> a is b
# True
# ```
# `a in b`
#
# For any collection `b`: `b` contains `a` :
#
# ```python
# >>> b = [1, 2, 3]
# >>> 2 in b
# True
# >>> 5 in b
# False
# ```
#
# If `b` is a dictionary, this tests that `a` is a key of `b`.
# ### Advanced iteration
# **Iterate over any sequence**
#
# You can iterate over any sequence (string, list, keys in a dictionary, lines in
# a file, ...):
#
# ```python
# >>> vowels = 'aeiouy'
#
# >>> for i in 'powerful':
# ... if i in vowels:
# ... print(i)
# o
# e
# u
# ```
# ```python
# >>> message = "Hello how are you?"
# >>> message.split() # returns a list
# ['Hello', 'how', 'are', 'you?']
# >>> for word in message.split():
# ... print(word)
# ...
# Hello
# how
# are
# you?
# ```
# Few languages (in particular, languages for scientific computing) allow to
# loop over anything but integers/indices. With Python it is possible to
# loop exactly over the objects of interest without bothering with indices
# you often don't care about. This feature can often be used to make
# code more readable.
# **warning**: Not safe to modify the sequence you are iterating over.
# **Keeping track of enumeration number**
#
# Common task is to iterate over a sequence while keeping track of the
# item number.
# * Could use while loop with a counter as above. Or a for loop:
#
# ```python
# >>> words = ('cool', 'powerful', 'readable')
# >>> for i in range(0, len(words)):
# ... print((i, words[i]))
# (0, 'cool')
# (1, 'powerful')
# (2, 'readable')
# ```
# * But, Python provides a built-in function - `enumerate` - for this:
#
# ```python
# >>> for index, item in enumerate(words):
# ... print((index, item))
# (0, 'cool')
# (1, 'powerful')
# (2, 'readable')
# ```
# **Looping over a dictionary**
#
# Use **items**:
#
# ```python
# >>> d = {'a': 1, 'b':1.2, 'c':1j}
#
# >>> for key, val in sorted(d.items()):
# ... print('Key: %s has value: %s' % (key, val))
# Key: a has value: 1
# Key: b has value: 1.2
# Key: c has value: 1j
# ```
# **note**
#
# The ordering of a dictionary is random, thus we use :func:`sorted`
# which will sort on the keys.
# **Exercise**
# ref: `pi_wallis`
#
# Compute the decimals of Pi using the Wallis formula:
#
# $$
# \pi = 2 \prod_{i=1}^{\infty} \frac{4i^2}{4i^2 - 1}
# $$
#
| notebooks/ch04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# Unificar tweets
tweets_k = pd.read_pickle('../datasets/TweetsDataset_K.pkl')[['screen_name','text','created_at']]
tweets_k['grupo'] = 'K'
tweets_m = pd.read_pickle('../datasets/TweetsDataset_M.pkl')[['screen_name','text','created_at']]
tweets_m['grupo'] = 'M'
tweets = pd.concat([tweets_k,tweets_m])
# Resumen del DataSet
print('K-Tweets_count:',tweets_k.screen_name.count())
print('K-Users_count:',tweets_k.screen_name.nunique())
print('M-Tweets_count:',tweets_m.screen_name.count())
print('M-Users_count:',tweets_m.screen_name.nunique())
# ### Sentiment Analysis
# Importar e Instanciar Sentiment Classifier
# Gracias y créðitos a https://github.com/aylliote/senti-py
from classifier import *
clf = SentimentClassifier()
# Función para iterar por la lista, predecir y anexar predicción como columna nueva y entregar dataframe resultante
def calc_sent(df):
lista = []
for index,row in df.iterrows():
lista.append([row.grupo,row.created_at,row.screen_name,clf.predict(row.text),row.text])
return pd.DataFrame(lista,columns=['grupo','created_at','screen_name','sent_score','text'])
# Función para paralelizar el bucle de la función calc_sent
def parallelize_dataframe(df, func, n_cores=4):
df_split = np.array_split(df, n_cores)
pool = Pool(n_cores)
df = pd.concat(pool.map(func, df_split))
pool.close()
pool.join()
return df
# Correr cálculo de sent_scores para todo el dataframe
from multiprocessing import Pool
tweets = parallelize_dataframe(tweets,calc_sent,n_cores=28)
# Agregar media por usuario de Sent_Score
tweets = tweets.join(tweets[['screen_name','sent_score']].groupby('screen_name').mean(),on='screen_name',rsuffix='_mean')
# Convertir a categórica
tweets['user_type'] = pd.cut(tweets['sent_score_mean'], [0,0.2, 0.4, 0.6, 0.8, 1] , labels=["hater", "negative", "neutral","positive","lover"])
# Agregar columna con zscore del score de cada usuario
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
tweets['sent_zscore_mean'] = scaler.fit_transform(tweets[['sent_score_mean']])
# Drop a un archivo excel para procesar
import openpyxl
tweets.to_excel('../datasets/tweets_M_K.xlsx',index=False)
| dataAnalysis/Text_Mining-Sentiment Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# # Transforms
#
# Data does not always come in its final processed form that is required for
# training machine learning algorithms. We use **transforms** to perform some
# manipulation of the data and make it suitable for training.
#
# All TorchVision datasets have two parameters (`transform` to modify the features and
# `target_transform` to modify the labels) that accept callables containing the transformation logic.
# The [torchvision.transforms](https://pytorch.org/docs/stable/torchvision/transforms.html) module offers
# several commonly-used transforms out of the box.
#
# The FashionMNIST features are in PIL Image format, and the labels are integers.
# For training, we need the features as normalized tensors, and the labels as one-hot encoded tensors.
# To make these transformations, we use `ToTensor` and `Lambda`.
# +
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda
ds = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor(),
target_transform=Lambda(lambda y: torch.zeros(10, dtype=torch.float).scatter_(0, torch.tensor(y), value=1))
)
# -
# ## ToTensor()
#
# [ToTensor](https://pytorch.org/docs/stable/torchvision/transforms.html#torchvision.transforms.ToTensor)
# converts a PIL image or NumPy `ndarray` into a `FloatTensor`. and scales the image's pixel intensity values in the range \[0., 1.\]
#
# ## Lambda Transforms
#
# Lambda transforms apply any user-defined lambda function. Here, we define a function
# to turn the integer into a one-hot encoded tensor.
# It first creates a zero tensor of size 10 (the number of labels in our dataset) and calls
# [scatter](https://pytorch.org/docs/stable/tensors.html#torch.Tensor.scatter_) which assigns a
# `value=1` on the index as given by the label `y`.
target_transform = Lambda(lambda y: torch.zeros(
10, dtype=torch.float).scatter_(dim=0, index=torch.tensor(y), value=1))
# --------------
#
#
#
| intro-to-pytorch/4-transforms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.5.0
# language: julia
# name: julia-0.5
# ---
x=[x; 1]
# executeme
x=[x; 2]
x=[x; 3]
x=[x; 4]
# executeme
x=[x; 5]
x=[x; 6]
| test/test2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
df = pd.read_csv('Full_Data_Final.csv')
df.head()
df[df<0].sum()
df.isnull().sum()
sns.pairplot(df)
from sklearn.ensemble import RandomForestRegressor
# +
from sklearn.metrics import mean_squared_log_error, mean_absolute_error
def rmsle(y_test, y_preds):
return np.sqrt(mean_squared_log_error(y_test, y_preds))
# Create function to evaluate our model
def show_scores(model):
train_preds = model.predict(X_train)
val_preds = model.predict(X_valid)
scores = {"Training MAE": mean_absolute_error(y_train, train_preds),
"Valid MAE": mean_absolute_error(y_valid, val_preds),
"Training RMSLE": rmsle(y_train, train_preds),
"Valid RMSLE": rmsle(y_valid, val_preds),
"Training R^2": model.score(X_train, y_train),
"Valid R^2": model.score(X_valid, y_valid)}
return scores
# -
df_train = df[df['month'] != 4]
df_train = df_train.append(df[df['month']==4][:len(df[df['month']==4])-1000])
df_train = df_train[df_train['month'] != 5]
df_valid = df[df['month'] == 4][-1000:]
df_test = df[df['month'] == 5]
len(df_train), len(df_valid), len(df_test), len(df)
df[df['month'] == 5]
X_train, y_train = df_train.drop('deaths', axis=1), df_train['deaths']
X_valid, y_valid = df_valid.drop('deaths', axis=1), df_valid['deaths']
X_test, y_test = df_test.drop('deaths', axis=1), df_test['deaths']
X_train.shape, y_train.shape, X_valid.shape, y_valid.shape, X_test.shape, y_test.shape
# %%time
model = RandomForestRegressor()
model.fit(X_train, y_train)
show_scores(model)
rf_grid = {"n_estimators": np.arange(10, 100, 10),
"max_depth": [None, 5, 10],
"min_samples_split": np.arange(2, 20, 2),
"min_samples_leaf": np.arange(1, 20, 2),
"max_features": [0.5, 1, "sqrt", "auto"],
"max_samples": [1000, 2000, 10000]}
# +
# %%time
from sklearn.model_selection import GridSearchCV
grid_search = GridSearchCV(estimator=RandomForestRegressor(),
param_grid=rf_grid,
verbose=2,
n_jobs=-1,
cv=3)
grid_search.fit(X_train, y_train)
# -
best_params = grid_search.best_params_
best_params
show_scores(grid_search)
model = RandomForestRegressor(**best_params)
model.fit(X_train, y_train)
show_scores(model)
rf_grid, best_params
from sklearn.externals import joblib
import joblib
joblib.dump(grid_search, 'ideal_model.pkl')
joblib.dump(model, 'fast_model.pkl')
rf_grid = {"n_estimators": [10, 100, 200, 300],
"max_depth": [None, 'log2'],
"min_samples_split": [7, 8, 9],
"min_samples_leaf": [2, 3, 4, 5],
"max_features": [0.1, 0.3, 0.5, 0.7],
"max_samples": [7000, 10000, 13000]}
# +
# %%time
grid_search = GridSearchCV(estimator=RandomForestRegressor(),
param_grid=rf_grid,
verbose=2,
n_jobs=-1,
cv=5)
grid_search.fit(X_train, y_train)
# -
show_scores(grid_search)
grid_search.best_params_
fast_model = RandomForestRegressor(max_depth=None, max_features=0.5, min_samples_leaf=2, min_samples_split=8, n_estimators=300)
fast_model.fit(X_train, y_train)
show_scores(fast_model)
X_train.shape
| ml/SecondTry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="5uxdeW8LlGtO"
# 範例題目:<br>
# 1. 認識類別資料
# 2. 實做缺值處理方法與應用函式
#
# + [markdown] id="X2pH-Mu6lQVz"
# 範例重點:<br>
# 1. 類別資料,有分順序型與一般型,使用的編碼方式不同
# 2. 缺失值有很多處理方式,在這邊簡單介紹2種常見的方式
# + id="Tqp3yFkXlPfC"
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="365x9GdjkZBU" executionInfo={"status": "ok", "timestamp": 1606481914752, "user_tz": -480, "elapsed": 2481, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="27fe50fb-39ec-424f-8b46-552462fd9daa"
df = pd.DataFrame([['green', 'M', 'male', 'short'],
['red', 'L', 'female', 'normal'],
['blue', 'XL', 'male', 'long']])
df.columns =['color', 'size', 'sex', 'lenght']
df
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="S_xdZBoLvKsI" executionInfo={"status": "ok", "timestamp": 1606481914753, "user_tz": -480, "elapsed": 2471, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="7651df33-e881-413e-951d-f54b63adf52a"
#順序性類別資料,編碼也需要有順序性,將類別資料依序編碼由0到n-1,其中n為類別總數,因此類別之間會有順序關係0<1<2<….
from sklearn.preprocessing import LabelEncoder
df['size_label'] = LabelEncoder().fit_transform(df['size'].values)
df
# + colab={"base_uri": "https://localhost:8080/"} id="p3az-ibkxKEF" executionInfo={"status": "ok", "timestamp": 1606482051931, "user_tz": -480, "elapsed": 984, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="695e4162-f3dc-4f92-d4cc-b2c361dfd8fc"
#排序依照python內建順序,可以藉由ord()查看內建順序
ord('L'),ord('M')
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="-5NGETtHkaVO" executionInfo={"status": "ok", "timestamp": 1606482203879, "user_tz": -480, "elapsed": 1016, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="19bfccdf-fc15-47b0-a686-540293a062cf"
#get_dummies()把資料表中的每個類別 對應的欄位,經過One-hot Encoding(一位有效編碼)
#One-hot Encoding(一位有效編碼) 是沒有順序性的編碼
pf = pd.get_dummies(df[['color']])
df = pd.concat([df, pf], axis=1)
df
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="eXuVJEjcklK9" executionInfo={"status": "ok", "timestamp": 1606483846756, "user_tz": -480, "elapsed": 589, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="8418587e-8361-4023-e9c9-86717270547d"
temp_data = pd.DataFrame([['2020-11-01', 24.8],
['2020-11-02', 24.8],
['2020-11-03', None],
['2020-11-04', 25]],columns=['date','current_temp'])
temp_data
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="4B6h9VoQ6bnh" executionInfo={"status": "ok", "timestamp": 1606484087932, "user_tz": -480, "elapsed": 568, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="fba66d61-992b-40d6-d216-12cdf6cf8ff5"
#以0填補
temp_data.fillna(0)
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="mND2DX_26WS9" executionInfo={"status": "ok", "timestamp": 1606484071535, "user_tz": -480, "elapsed": 788, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="2aec3981-1fe6-44ce-de3c-04997a7a31b8"
#以該欄位所有資料的算術平均數做填補
temp_data.fillna(temp_data.current_temp.mean())
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="-znesYczlWxV" executionInfo={"status": "ok", "timestamp": 1606484285678, "user_tz": -480, "elapsed": 882, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="1f1596dc-7a0b-44fb-c64f-cf129855fcb5"
#以該欄位所有資料的中位數做填補
temp_data.fillna(temp_data.current_temp.median())
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="jKRhofKgojLJ" executionInfo={"status": "ok", "timestamp": 1606484483835, "user_tz": -480, "elapsed": 948, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="44d07ef4-8261-4260-d271-49d0486d1cb7"
#運用參數method=‘ffill’即可填補前一列數值
temp_data.fillna(method='ffill')
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="3avR-gbSowrl" executionInfo={"status": "ok", "timestamp": 1606484494001, "user_tz": -480, "elapsed": 874, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="69069ab9-cb82-460d-f4dc-24b942991f9d"
#method=‘bfill’填補後一列數值
temp_data.fillna(method='bfill')
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="Dp1d6r0j8AMC" executionInfo={"status": "ok", "timestamp": 1606485231887, "user_tz": -480, "elapsed": 1001, "user": {"displayName": "\u732e\u7ae4\u9ec3", "photoUrl": "", "userId": "07529243043474362942"}} outputId="afb9ff6d-bd6e-48c4-dc7d-4b4513813036"
#內差法補值
temp_data.interpolate()
# + id="__8P6Gw6-0T3"
| Sample Code/Day_11_SampleCode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from rdkit import Chem
# %matplotlib inline
# -
homedir = os.path.dirname(os.path.realpath('__file__'))
homedir = homedir+"/data/"
df = pd.read_csv(homedir+"tox_niehs_all.csv")
df.head()
# # Construct Internal Test Set
size = 0.10
seed = 6
np.random.seed(seed)
msk = np.random.rand(len(df)) < 0.1
df_tv = df[~msk]
df_int = df[msk]
print(df.shape, df_tv.shape, df_int.shape)
df_tv.to_csv(homedir+'tox_niehs_all_trainval.csv', index=False)
df_int.to_csv(homedir+'tox_niehs_all_int.csv', index=False)
# # Evaluate Dataset Characteristics
import matplotlib.pyplot as plt
# +
task = 'verytoxic'
fig, axes = plt.subplots(nrows=1, ncols=3)
df[task].hist(normed=True, ax=axes[0])
df_tv[task].hist(normed=True, ax=axes[1])
df_int[task].hist(normed=True, ax=axes[2])
# +
task = 'nontoxic'
fig, axes = plt.subplots(nrows=1, ncols=3)
df[task].hist(normed=True, ax=axes[0])
df_tv[task].hist(normed=True, ax=axes[1])
df_int[task].hist(normed=True, ax=axes[2])
# +
task = 'epa'
fig, axes = plt.subplots(nrows=1, ncols=3)
df[task].hist(normed=True, ax=axes[0])
df_tv[task].hist(normed=True, ax=axes[1])
df_int[task].hist(normed=True, ax=axes[2])
# +
task = 'ghs'
fig, axes = plt.subplots(nrows=1, ncols=3)
df[task].hist(normed=True, ax=axes[0])
df_tv[task].hist(normed=True, ax=axes[1])
df_int[task].hist(normed=True, ax=axes[2])
# +
task = 'ld50'
fig, axes = plt.subplots(nrows=1, ncols=3)
df[task].hist(normed=True, ax=axes[0])
df_tv[task].hist(normed=True, ax=axes[1])
df_int[task].hist(normed=True, ax=axes[2])
# +
task = 'logld50'
fig, axes = plt.subplots(nrows=1, ncols=3)
df[task].hist(normed=True, ax=axes[0])
df_tv[task].hist(normed=True, ax=axes[1])
df_int[task].hist(normed=True, ax=axes[2])
# -
| jupyter/ToxNet_02_Internal_Test_Set.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Word2Vec
# ## Brief
# A tutorial for CBOW model within TensorFlow (API r1.3) framework.
# ## Reference
# [Lecture note from Stanford](http://cs224d.stanford.edu/lecture_notes/notes1.pdf)
# ## Import
import tensorflow as tf
import numpy as np
import os
# ## Load corpus
corpus_path = os.path.normpath("../Dataset/arvix_abstracts.txt")
with open(corpus_path,"r") as f:
corpus = "".join(f.readlines()).split("\n")
# ## Define class for codec
class WordCodec:
def __init__(self, word_flow):
self._index_to_word = []
self._word_to_index = {}
for word in word_flow:
assert type(word)!="str", "Got type {} instead of str".format(type(word))
if word not in self._word_to_index:
self._word_to_index[word] = len(self._index_to_word)
self._index_to_word.append(word)
def __getitem__(self, key):
if type(key) == int:
return self._index_to_word[key]
elif type(key) == str:
return self._word_to_index[key]
else:
raise TypeError("key must be either int or str.")
@property
def vocab_size(self):
return len(self._index_to_word)
def word_flow():
for paragraph in corpus:
for word in paragraph.split(" "):
yield word
one_hot_codec = WordCodec(word_flow())
print("Total number of word in the vocabulary: {}".format(one_hot_codec.vocab_size))
# ## Define CBOW Model
class CBOW:
def __init__(self, vocab_size, context_length, embedding_dim):
with tf.variable_scope("CBOW"):
self._context_input = tf.placeholder(shape=[2*context_length], dtype=tf.int32)
V = tf.get_variable(shape=[vocab_size, embedding_dim], dtype=tf.float32, name="V") # Embedding
self._embedding = tf.nn.embedding_lookup(V, self._context_input)
hidden = tf.reduce_mean(self._embedding, axis=0, keep_dims=True)
U = tf.get_variable(shape=[embedding_dim, vocab_size], name="U")
self._output = tf.matmul(hidden, U)
@property
def input(self):
return self._context_input
@property
def output(self):
return self._output
@property
def embedding(self):
return self._embedding
# ## Create CBOW Model
context_length = 2
embedding_dim = 1000
cbow_model = CBOW(one_hot_codec.vocab_size, context_length, embedding_dim)
# ## Create Metadata File for Embedding Visualization
metadata_path = os.path.normpath("./graphs/word_codec")
with open(metadata_path, "w") as f:
f.write("Index\tWord\n")
for i in range(one_hot_codec.vocab_size):
f.write("{}\t{}\n".format(i, one_hot_codec[i]))
# ## Define Loss Function and Start Training
def training_sample_generator(corpus, codec, context_length):
for paragraph in corpus:
paragraph = np.array([codec[word] for word in paragraph.split(" ")])
for i in range(context_length, np.shape(paragraph)[0]-context_length):
yield np.concatenate([paragraph[i-context_length:i], paragraph[i+1:i+context_length+1]],axis=0), paragraph[i:i+1]
target_output=tf.placeholder(shape=[1],dtype=tf.int32, name="target_output")
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=cbow_model.output, labels=target_output))
lr = tf.Variable(1e-4, trainable=False)
global_step = tf.Variable(0, trainable=False)
train_op=tf.train.AdamOptimizer(learning_rate=lr).minimize(loss, global_step=global_step)
training_set = training_sample_generator(corpus, one_hot_codec, context_length)
with tf.name_scope("summary") as scope:
summary_op = tf.summary.scalar(name="loss",tensor=loss)
num_epoch = 20
graph_path = "./graphs"
model_checkpoint_path = os.path.join("./graphs", "CBOW")
save_every = 1000
with tf.Session() as sess:
saver = tf.train.Saver()
writer = tf.summary.FileWriter(logdir=graph_path, graph=sess.graph)
ckpt = tf.train.get_checkpoint_state(model_checkpoint_path)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Continue training")
else:
sess.run(tf.global_variables_initializer())
for epoch in range(num_epoch):
for x, y in training_set:
feed_dict={cbow_model.input: x, target_output: y}
_, summary = sess.run([train_op, summary_op], feed_dict)
writer.add_summary(summary=summary, global_step=global_step.eval(sess))
n_iter = global_step.eval(sess)
if (n_iter % save_every) == 0:
saver.save(sess=sess, save_path=model_checkpoint_path, global_step=n_iter)
saver.save(sess=sess, save_path=model_checkpoint_path, global_step=n_iter)
print("Training Complete")
| w2v/Word2Vec.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Motivator Traits and Regulator Traits in Propositional and Proposal Contexts
# ## By <NAME>
#
# ### Github: https://github.com/BellandBlackBird
# ### Twitter: https://twitter.com/ScienceandLove
# ### Youtube: https://www.youtube.com/channel/UC5DqrheyJi_u7uJPJEyodCg
# ### Linkedin: https://www.linkedin.com/in/bjorn-alvinge/
# ### Introduction
# This notebook, from a dataset of "enriched trait candidates" (see below), prepares, constructs, and measures the prevalence of the abstract entities "Motivator traits" and "Regulator traits", by way of their their computational association to two principally different Request/Demand contexts found in a job ad, namely, "Propositional context" and "Proposal context" respectively. Although this test does not meet a scientific standard quite yet, due to the fact that the decision criteria to separate the traits into two different categories is still at the conceptual level and has not been computationally constructed yet and the asymmetrical connection the two contexts has to those physical structures that is the requested traits hasn't been functionally implemented yet, it should be possible, in theory, to implement the real causal-dynamical processes which produced the categorical yet overlapping trait-differences computationally.
# #!pip install python-Levenshtein
# #!pip install ijson
# #!pip install nltk
#pip install tqdm
import pandas as pd
from nltk.tokenize import word_tokenize
import statsmodels.api as sm
from scipy import stats
import nltk
nltk.download('punkt')
import collections
import re
import Levenshtein
import ijson
#optional if you want progress bars.
#Make sure to remove tqdm around the iterators if you do not want or need progress bars.
from tqdm.notebook import tqdm
# Import needed packages. tqdm is for progress bars, and needs to be installed in jupyter notebook
# according to their tutorials (https://ipywidgets.readthedocs.io/en/latest/user_install.html#installing-the-jupyterlab-extension), as pip install might not work withhout some config.
filenames = ['enriched_100_2017','enriched_100_2018', 'enriched_100_2019']
file_dir = 'data/'
ads_list = []
nr_of_ads = 0
enriched_trait_candidates_ads = pd.DataFrame()
for i,filename in enumerate(filenames):
ads_columns_to_be_saved = ['doc_id', 'enriched_candidates_traits', 'ssyk']
print("Reading from file: "+filename+ '.json')
with open(file_dir + filename + '.json') as enriched_json_file:
#Is ijson the best alternative here?
reader = ijson.items(enriched_json_file, 'item')
for counter, ad in enumerate(reader):
#no idea if this is optimal? Probably better to use to pickle or tidy data maybe?
ads_list.append((ad['doc_id'], ad['enriched_candidates']['traits'], ad['ssyk']))
#nr_of_ads should be upped if your computer can dump more data into memory(?)
if nr_of_ads == 100:
print("Read and parsed "+str(counter)+" json-ads from the file: "+filename+ '.json')
new_ads = pd.DataFrame.from_records(ads_list, columns=ads_columns_to_be_saved)
enriched_trait_candidates_ads = enriched_trait_candidates_ads.append(new_ads)
nr_of_ads = 0
ads_list = []
nr_of_ads += 1
new_ads = pd.DataFrame.from_records(ads_list, columns=ads_columns_to_be_saved)
enriched_trait_candidates_ads = enriched_trait_candidates_ads.append(new_ads)
nr_of_ads = 0
ads_list = []
print("Read and parsed "+str(len(enriched_trait_candidates_ads))+" json-ads in total, and appended them to dataframe.")
# This block is for processing already enriched candidates, which can be done using jobtechs enrich api (https://jobad-enrichments-api.jobtechdev.se/enrichtextdocuments), making sure that all candidates and their associated prediction value are kept with endpoint /enrichtextdocuments.
#
# This analysis however constructed the enriched ads using the "interface" that can be found here: https://gitlab.com/arbetsformedlingen/joblinks/text-2-ssyk, which calls the enrichment api.
# the historical job ads in json format can be found here: https://jobtechdev.se/docs/apis/historical/
#
# this particular analysis only uses trait-data and doc_id data, so not alla data from the historical or the enriched ads is needed. SSYK analyses might be added in the near future.
#
# (the original analysis used 2025718 ads in total, so this current analysis will not work for the subset given. try to remove traits if you get stuck.)
#
# The block uses ijson to create a lazy iterator so as to avoid memory problems and to not dump all the data into ram before computing on it. after 200 000 ads have been json-decoded (100 in this getting-started sample), they are saved in a dataframe so as to avoid not all data is kept in memory while the for loop is running. To json-decode a limited set (2025718 ads in total) with only doc id, traits and ssyk as data in each ad, with machine specs of:
#
# Model Name: MacBook Pro
# Model Identifier: MacBookPro14,3
# Processor Name: Intel Core i7
# Processor Speed: 2,9 GHz
# Number of Processors: 1
# Total Number of Cores: 4
#
# Should take about 10-20 min to complete.
# +
MF_trait_dict = {'resultatinriktad': [], 'ärlighet': [], 'arbetsvillig': [], 'modig': [],
'uppgiftsorienterad': [], 'ansvarsmedveten': [], 'ansvarstagande': [], 'uppriktig': [],
'uthållig': [], 'plikttrogen': [], 'rolig':[], 'professionell': [],
'målinriktad': [], 'vänskaplig': [], 'ödmjukhet': [], 'tålmodig': [],
'självgående': [], 'högmotiverad' : [], 'motiverad': [],
'självmotiverad': [], 'självmotiverande': [], 'pliktuppfyllande': [],
'karismatisk': [], 'vänlig': [], 'samarbetsvänlig': [], 'självkritisk': [],
'självreflekterande': [], 'självdisciplin': [], 'självanalytisk': [],
'arbeta självständigt': [], 'självstartande': [],
'självkontroll': [], 'självinsikt': [], 'omdöme': [], 'vetgirig': [], 'självständig': []}
#is creativity a personality trait? But they can't be 'commanded' or ordered either...
#hmmmm...since RFPs might involve RnD.....will the thing being requested tend to be something that involves
#creativity? Creativity and critical judgment are two sides of the same coin?
creative_trait_types = {'innovativ': [], 'kreativ': []}
#when it comes to problem solving and knowledge-creation, creativity and critique should be cyclical opposites,
#But one cannot be sure employers are aware of this when they request it.
critical_trait_types = {'självkritisk': []}
MF_index = ['AU', 'Virtue', 'Duty', 'Virtue', 'AU', 'Responsibility', 'Responsibility',
'Virtue', 'Virtue', 'Duty', 'Virtue', 'Duty', 'AU', 'Virtue',
'Virtue', 'Virtue', 'Self-reliant', 'Motivated','Motivated',
'Self-motivated', 'Self-motivated', 'Duty', 'Virtue', 'Virtue', 'Virtue',
'Self-reliant', 'Self-reliant', 'Self-reliant', 'Self-reliant', 'Self-reliant', 'Self-reliant',
'Self-reliant', 'Self-reliant', 'Judgment', 'Love-of-Learning', 'Self-reliant']
print("amount of traits to be analysed: "+str(len(MF_index)))
print("amount of traits that have been categorised by a framework: "+str(len(list(MF_trait_dict.keys()))))
counter = 0
percent_counter = 0
for id, ad in tqdm(enriched_trait_candidates_ads.iterrows(), total=len(enriched_trait_candidates_ads)):
for key, occurence_list in MF_trait_dict.items():
for trait in ad.enriched_candidates_traits:
if key == trait['concept_label'].lower():
occurence_list.append((ad['doc_id'], trait))
# -
# For the final measurement of the trait-experiment to be performed, The trait data need to be prepared in a specific way. This abstract categorisation of several Trait-names into so-called "Moral frameworks", which might not be an appropriate name any longer (20-04-2021), needs to be done in order for the even further abstract clusters of Motivators and Regulators to be finally produced. Although the categorisation is not itself subjective, as it is being produced via a freely available theoretical explanation which can be read about here:
# (LINK NOT ATTACHED)
# Ideally, the categorisation should itself be produced via an already constructed, causal-dynamical graph-structure, which "clamps" the subsequent construction tasks, especially at time of observation within the ads, that are performed on the data-flow according to what the explanatory theory says about what the data-structures actually are composed of, and what causal-dynamical process makes them "depend" on each other.
duplicate_list = [trait_tuple[0] for list_of_tuples in MF_trait_dict.values() for trait_tuple in list_of_tuples]
unique_measures_doc_id_list = [item for item, count in collections.Counter(duplicate_list).items() if count == 1]
# These lists saves duplicate doc_ids, and look for unique doc_ids, by comparing how many times the traits to be analysed have co-occurred within the same ad. Ads where traits co-occur will be removed. Why this is done, is explained under the next cell.
unique_MF_trait_dict = {}
for key in tqdm(MF_trait_dict.copy()):
print(key)
if MF_trait_dict[key] != []:
unique_MF_trait_dict[key] = pd.DataFrame(MF_trait_dict[key], columns=['doc_id', 'traits'])
unique_MF_trait_dict[key] = unique_MF_trait_dict[key].set_index('doc_id')
total_measures_count = len(unique_MF_trait_dict[key])
unique_MF_trait_dict[key] = unique_MF_trait_dict[key].loc[
unique_MF_trait_dict[key].index.isin(unique_measures_doc_id_list)]
unique_measures_count = len(unique_MF_trait_dict[key])
print("%d-%d=%d analytically unique total ad occurrences for trait: '%s'"%
(total_measures_count, (total_measures_count-unique_measures_count), unique_measures_count, key))
else:
print("trait: "+key+" does not seem to have any occurrence counts...this trait will be removed from further analyses...")
del MF_trait_dict[key]
# Dropping co-occuring traits to be analysed from ads where a given key-trait occurs. The statistical reason this is done is to remove "spurious correlations" between the categorical contexts each trait will observed within in the subsequent contextual analysis, and to keep the variables independent. A computational analogy can be made to biology, where some species can only survive in specific environments. In order to test this fact, one needs to make sure that the species-traits: 'pink' and 'grey', cannot be measured within the same contextual observation, for example, in salt water or in fresh water simultaneously, but are kept as separate observations instead. Why is that?
# If the hypothesis to be tested is that river dolphins can survive in fresh water, categorically speaking, but great white sharks cannot and should/must/can only be found in salt water, categorically speaking, keeping observations where an animal with both pinkish and greyish traits swim, would be to 'muddy the water' of the analysis so to speak, since the occurrence of the grey trait wouldn't be a strong indicator for a species that should only be capable of surviviving in salt water, since river dolphins, which can both be grey and pink, can survive in not only fresh, but also brakish water as well. We would thus only want to keep analyses of strictly pink river dolphins, so as to know whether there is an underlying genetic, and thus survival difference, between two types of species, which are expressed by the two categorically different traits. The shark would only survive in salt water, but that has nothing to do with it's grey scale. But if we want to know whether there are different species, or distinct trait-categories, that can only solve the specific survival tasks found in salt water contexts let's say, and we know sharks are grey, the trait is more useful if it is kept separate from the pink trait, and all other traits under consideration, within any given observation.
MF_trait_dict = unique_MF_trait_dict
# Re-construct the trait dictionary so as to only contain ads where the traits do not co-occur within the same ad, keeping each ad observation trait-unique.
MF_contingency_table = pd.DataFrame(columns=["Propositional Context", "Proposal Context",
'Work Context'], index=list(MF_trait_dict.keys())).fillna(0)
#I am not sure yet why this function is needed.
#the term_candidate seem to not be the right keyword inside the context sentence, due to non-alphabetical characters...
def return_term_match(trait_candidate):
sentence_words = [word for word in trait_candidate['sentence'].lower().split()]
lev_distance_word_list = [
(word, Levenshtein.distance(word,
trait_candidate['term'].lower())) for word in sentence_words]
regex_string = re.escape(min(lev_distance_word_list, key = lambda t: t[1])[0])
regex_match = re.search(rf"{regex_string}", trait_candidate['sentence'].lower())
return regex_match
# (NOT SURE IF THIS NEEDED)
# This cell is for looking for what textual representation is of the trait-term inside it's request sentence context. Sometimes the actual text occurrence of the trait is not separated from non-trait-term characters. The Enrich api structures its' trait-term data so aso to remove these typos and non-alphabetical characters, but in this contextual analysis, these physical errors are kept in order to look at the surrounding regex match around the trait-term's text occurrence.
for key, df in tqdm(MF_trait_dict.items()):
for id, doc_id_trait in df.iterrows():
trait_candidate = doc_id_trait['traits']
regex_match = return_term_match(trait_candidate)
if regex_match:
left_context_sentence = trait_candidate['sentence'].lower()[regex_match.start()-30:regex_match.start()]
right_context_sentence = trait_candidate['sentence'].lower()[regex_match.end():regex_match.end()+30]
context_sentence = left_context_sentence + right_context_sentence
if re.search(r'(du\W+(har|är|kan|besitter|kan vara)\W+)|(\W+(har|är|kan|besitter)\W{1, 4}du\W+)', left_context_sentence):
MF_contingency_table.at[key, "Propositional Context"] +=1
elif re.search(r'(du\W+(borde|skall|måste|bör|ska|vill|kommer vilja|kommer)\W+(vara|ha|kunna|besitta)\W+)|(\W+(borde|skall|måste|bör|ska|vill|kommer vilja|kommer)\W+du\W+(vara|ha|kunna|besitta)\W+)', left_context_sentence):
MF_contingency_table.at[key, "Proposal Context"] +=1
if re.search(r'(\W+arbet\w+|\W+yrke\w+|\W+jobb\w+)', context_sentence):
MF_contingency_table.at[key, 'Work Context'] +=1
# Here we look for the whether the trait-term appears in either a propositional or a proposal context inside the global request context established by the enrichment API. Although there are probably more propositional and proposal contexts than these in the ads, it is currently assumed that it is linguistically sound to only use contexts where the token 'you' appears as the subject of the sentence. This is so, because this makes the subsequent trait-terms and context-tokens independent of the hypothetically requested candidate that is being requested, and instead affects that notion, since he/she is signified by the token 'you', and 'you' is itself a type of word that is a maximally context-dependent linguistic object. Thus, observations of the traits and their abstract contexts should be more semantically separable from the particular ad they appear in, as their principled aim will be in the particular sentence to explain who the context-dependent candidate is that is being requested. Future analyses could be run which tests how correlated a term or context is to this indexical 'you', as this would suggest these tokens themselves to be more context-dependent.
tuples = list(zip(*[MF_index, list(MF_contingency_table.index)]))
multi_index = pd.MultiIndex.from_tuples(tuples, names=['Moral_Framework', 'Trait'])
MF_contingency_table.index = multi_index
V_table = MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Virtue'").query("Trait != 'samarbetsvänlig'").query("Trait != 'vänskaplig'").query("Trait != 'rolig'").query("Trait != 'karismatisk'")
chi2, V_p, dof, ex = stats.chi2_contingency(V_table, correction=False)
chi2, V_p
V_D_table = [list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Virtue'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Duty'").sum())]
V_D_table
oddsratio, pvalue = stats.fisher_exact(V_D_table)
oddsratio, pvalue
V_D_AU_table = [list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'AU'").sum()), list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Virtue'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Duty'").sum())]
chi2, V_D_AU_p, dof, ex = stats.chi2_contingency(V_D_AU_table, correction=False)
chi2, V_D_AU_p
# +
V_D_AU_R_table = [list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Responsibility'").sum()), list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'AU'").sum()), list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Virtue'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Duty'").sum())]
chi2, V_D_AU_R_p, dof, ex = stats.chi2_contingency(V_D_AU_R_table, correction=False)
# -
chi2, V_D_AU_R_p
# +
V_D_AU_R_SR_M_table = [list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Responsibility'").sum()), list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Self-reliant'").sum()), list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Motivated'").sum()), list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'AU'").sum()), list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Virtue'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Duty'").sum()), list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Self-motivated'").sum())]
chi2, V_D_AU_R_SR_M_p, dof, ex = stats.chi2_contingency(V_D_AU_R_SR_M_table, correction=False)
# -
chi2, V_D_AU_R_SR_M_p
# Here alot of different statistical tests for association between the Frameworks and the contexts are run. The actual table these tests are being run on is found below.
MF_contingency_table
# +
M_R_values = [[list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Responsibility'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Self-reliant'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Self-motivated'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Judgment'").sum())],
#list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Critical'").sum())],
[list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'AU'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Motivated'").sum()),
#list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Creative'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Virtue'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Duty'").sum()),
list(MF_contingency_table.drop(columns=['Work Context']).query("Moral_Framework == 'Love-of-Learning'").sum())]]
M_R_table = pd.DataFrame(columns=["Propositional Context", "Proposal Context"],
index=["Regulator Traits", "Motivator Traits"]).fillna(0)
for index, trait_type in enumerate(M_R_values):
for MF_values in trait_type:
if index == 0:
M_R_table.at["Regulator Traits", "Propositional Context"] += MF_values[0]
M_R_table.at["Regulator Traits", "Proposal Context"] += MF_values[1]
if index == 1:
M_R_table.at["Motivator Traits", "Propositional Context"] += MF_values[0]
M_R_table.at["Motivator Traits", "Proposal Context"] += MF_values[1]
# -
Here the measurement of the two different traits are being constructed.
M_R_table
oddsratio, pvalue = stats.fisher_exact(M_R_table)
oddsratio, pvalue
# Here are the main statistical results.
# +
M_R_values_work = [[list(MF_contingency_table.drop(columns=['Proposal Context']).query("Moral_Framework == 'Responsibility'").sum()),
list(MF_contingency_table.drop(columns=['Proposal Context']).query("Moral_Framework == 'Self-reliant'").sum()),
list(MF_contingency_table.drop(columns=['Proposal Context']).query("Moral_Framework == 'Self-motivated'").sum()),
list(MF_contingency_table.drop(columns=['Proposal Context']).query("Moral_Framework == 'Judgment'").sum())],
#list(MF_contingency_table.drop(columns=['Work']).query("Moral_Framework == 'Critical'").sum())],
[list(MF_contingency_table.drop(columns=['Proposal Context']).query("Moral_Framework == 'AU'").sum()),
list(MF_contingency_table.drop(columns=['Proposal Context']).query("Moral_Framework == 'Motivated'").sum()),
#list(MF_contingency_table.drop(columns=['Work']).query("Moral_Framework == 'Creative'").sum()),
list(MF_contingency_table.drop(columns=['Proposal Context']).query("Moral_Framework == 'Virtue'").sum()),
list(MF_contingency_table.drop(columns=['Proposal Context']).query("Moral_Framework == 'Duty'").sum()),
list(MF_contingency_table.drop(columns=['Proposal Context']).query("Moral_Framework == 'Love-of-Learning'").sum())]]
M_R_table_work = pd.DataFrame(columns=["Propositional Context", "Work Context"],
index=["Regulator Traits", "Motivator Traits"]).fillna(0)
for index, trait_type in enumerate(M_R_values_work):
for MF_values in trait_type:
if index == 0:
M_R_table_work.at["Regulator Traits", "Propositional Context"] += MF_values[0]
M_R_table_work.at["Regulator Traits", "Work Context"] += MF_values[1]
if index == 1:
M_R_table_work.at["Motivator Traits", "Propositional Context"] += MF_values[0]
M_R_table_work.at["Motivator Traits", "Work Context"] += MF_values[1]
# -
M_R_table_work
oddsratio, pvalue = stats.fisher_exact(M_R_table_work)
oddsratio, pvalue
# Another result shows that the same ratio appears when one compares work context to the propositional context.
# +
M_R_values_no_propositional = [[list(MF_contingency_table.drop(columns=['Propositional Context']).query("Moral_Framework == 'Responsibility'").sum()),
list(MF_contingency_table.drop(columns=['Propositional Context']).query("Moral_Framework == 'Self-reliant'").sum()),
list(MF_contingency_table.drop(columns=['Propositional Context']).query("Moral_Framework == 'Self-motivated'").sum()),
list(MF_contingency_table.drop(columns=['Propositional Context']).query("Moral_Framework == 'Judgment'").sum())],
#list(MF_contingency_table.drop(columns=['Work']).query("Moral_Framework == 'Critical'").sum())],
[list(MF_contingency_table.drop(columns=['Propositional Context']).query("Moral_Framework == 'AU'").sum()),
list(MF_contingency_table.drop(columns=['Propositional Context']).query("Moral_Framework == 'Motivated'").sum()),
#list(MF_contingency_table.drop(columns=['Work']).query("Moral_Framework == 'Creative'").sum()),
list(MF_contingency_table.drop(columns=['Propositional Context']).query("Moral_Framework == 'Virtue'").sum()),
list(MF_contingency_table.drop(columns=['Propositional Context']).query("Moral_Framework == 'Duty'").sum()),
list(MF_contingency_table.drop(columns=['Propositional Context']).query("Moral_Framework == 'Love-of-Learning'").sum())]]
M_R_table_no_propositional = pd.DataFrame(columns=["Proposal Context", "Work Context"],
index=["Regulator Traits", "Motivator Traits"]).fillna(0)
for index, trait_type in enumerate(M_R_values_no_propositional):
for MF_values in trait_type:
if index == 0:
M_R_table_no_propositional.at["Regulator Traits", "Proposal Context"] += MF_values[0]
M_R_table_no_propositional.at["Regulator Traits", "Work Context"] += MF_values[1]
if index == 1:
M_R_table_no_propositional.at["Motivator Traits", "Proposal Context"] += MF_values[0]
M_R_table_no_propositional.at["Motivator Traits", "Work Context"] += MF_values[1]
# -
M_R_table_no_propositional
oddsratio, pvalue = stats.fisher_exact(M_R_table_no_propositional)
oddsratio, pvalue
# And a final statistical result.
| ethical_analysis_of_job_ads/ethical_analysis_of_traits_in_job_ads.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Here we visualize filters and outputs using the network architecture proposed by Krizhevsky et al. for ImageNet and implemented in `caffe`.
#
# (This page follows DeCAF visualizations originally by <NAME>.)
# First, import required modules, set plotting parameters, and run `./scripts/download_model_binary.py models/bvlc_reference_caffenet` to get the pretrained CaffeNet model if it hasn't already been fetched.
# +
import numpy as np
import matplotlib.pyplot as plt
import os
# %matplotlib inline
# Make sure that caffe is on the python path:
caffe_root = '../' # this file is expected to be in {caffe_root}/examples
import sys
sys.path.insert(0, caffe_root + 'python')
import caffe
plt.rcParams['figure.figsize'] = (10, 10)
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# -
# Set Caffe to CPU mode, load the net in the test phase for inference, and configure input preprocessing.
# +
caffe.set_mode_cpu()
net = caffe.Net('rcnn_model/deploy.prototxt',
'rcnn_model/bvlc_reference_caffenet.caffemodel',
caffe.TEST)
net = caffe.Net('rcnn_model/deploy_nn_background.prototxt',
'rcnn_model/caffenet_train_background_iter_10000.caffemodel',
caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1))
#transformer.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy').mean(1).mean(1)) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
# -
# Classify the image by reshaping the net for the single input then doing the forward pass.
net.blobs['data'].reshape(1,3,227,227)
pict = caffe.io.load_image('13.jpg')
mean_pict = pict.mean(-1)
pict[:,:,0], pict[:,:,1], pict[:,:,2] = mean_pict, mean_pict, mean_pict
net.blobs['data'].data[...] = transformer.preprocess('data', pict)
out = net.forward()
print out.keys()
#print("Predicted class is #{}.".format(out['fc-rcnn'].argmax()))
print("Predicted class is #{}.".format(out['output'].argmax()))
# The layer features and their shapes (1 is the batch size, corresponding to the single input image in this example).
[(k, v.data.shape) for k, v in net.blobs.items()]
# The parameters and their shapes. The parameters are `net.params['name'][0]` while biases are `net.params['name'][1]`.
[(k, v[0].data.shape) for k, v in net.params.items()]
# Helper functions for visualization
# take an array of shape (n, height, width) or (n, height, width, channels)
# and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)
def vis_square(data, padsize=1, padval=0, title = None):
data -= data.min()
data /= data.max()
#
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
#
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
#
if title is not None:
plt.title(title)
#
plt.imshow(data)
plt.axis('off')
#
if title is not None:
plt.savefig(title + '.png', dpi = 300)
plt.close()
# The input image
plt.imshow(transformer.deprocess('data', net.blobs['data'].data[0]))
filters = net.params['conv1'][0].data
vis_square(filters.transpose(0, 2, 3, 1), title = 'Conv1 layer kernels')
filters = net.params['conv2'][0].data
vis_square(filters[:48].reshape(48**2, 5, 5), title = 'Conv 2 layer kernels')
# +
layer_names = net.blobs.keys()
print layer_names
layers_of_interest = ['norm1', 'norm2', 'conv3', 'conv4', 'pool5']
for layer_name in layers_of_interest:
print net.blobs[layer_name].data.shape
for name in layers_of_interest:
feat = net.blobs[name].data[0]
vis_square(feat, padval=1, title = 'Activation in layer ' + str(name))
# +
from scipy.interpolate import Rbf
def stack_features(net, input_name, target_name):
target_layer = net.blobs[target_name]
n_target_feats = target_layer.data.shape[1]
#
desired_shape = net.blobs[input_name].data.shape[-2:]
current_shape = net.blobs[target_name].data.shape[-2:]
result = np.zeros((n_target_feats, desired_shape[0], desired_shape[1]))
#
t_h = current_shape[0]
t_w = current_shape[1]
x, y = np.mgrid[-1:1:1j*t_h, -1:1:1j*t_w]
#
new_t_h = desired_shape[0]
new_t_w = desired_shape[1]
x_new, y_new = np.mgrid[-1:1:1j*new_t_h,-1:1:1j*new_t_w]
#
for target_feat in xrange(n_target_feats):
z = target_layer.data[0,target_feat,:,:]
rbf = Rbf(x, y, z, epsilon=0.5)
result[target_feat,:,:] = rbf(x_new, y_new)
return result
# -
feat = net.blobs['norm2'].data[0]
#vis_square(feat, padval=1)
# +
input_name = 'norm1'
layers_of_interest = ['norm1', 'norm2', 'conv3', 'conv4', 'pool5']
stacked_feats = [stack_features(net, input_name, target_name) for target_name in layers_of_interest]
stacked_feats = np.vstack(stacked_feats)
print stacked_feats.shape
#vis_square(stacked_feats, padval=1)
# -
"""
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
import scipy.sparse as sparse
import time
print stacked_feats.shape
stacked_feats_t = np.transpose(stacked_feats, (1, 2, 0))
print stacked_feats.shape
h = stacked_feats_t.shape[0]
w = stacked_feats_t.shape[1]
d = stacked_feats_t.shape[2]
flat_feats = np.reshape(stacked_feats_t, (h * w, d))
# Convert the image into a graph
graph = image.grid_to_graph(n_x = h, n_y = w)
non_zero_indices = zip(*graph.nonzero())
for idx, (i,j) in enumerate(non_zero_indices):
graph.data[idx] = np.mean((flat_feats[i] - flat_feats[j]) ** 2)
#beta = 1e-2
#eps = 1e-3
#graph.data = np.exp(- beta * graph.data / flat_feats.var()) + eps
N_REGIONS = 8
for assign_labels in ('kmeans', 'discretize'):
t0 = time.time()
labels = spectral_clustering(graph, n_clusters=N_REGIONS,
assign_labels=assign_labels)
t1 = time.time()
labels = labels.reshape((h, w))
plt.figure(figsize=(5, 5))
plt.imshow(labels, cmap=plt.cm.gray)
for l in range(N_REGIONS):
plt.contour(labels == l, contours=1,
colors=[plt.cm.spectral(l / float(N_REGIONS)), ])
plt.xticks(())
plt.yticks(())
plt.title('Spectral clustering: %s, %.2fs' % (assign_labels, (t1 - t0)))
"""
# +
from sklearn.feature_extraction import image
from sklearn.cluster import AgglomerativeClustering
import scipy
import cv2
import matplotlib.patches as patches
stacked_feats_t = np.transpose(stacked_feats, (1, 2, 0))
#
# Withen data
#
#stacked_feats_mean = stacked_feats.mean(0).mean(0)
#stacked_feats -= stacked_feats_mean
#stacked_feats_std = np.sqrt(stacked_feats.var(0).var(0))
#stacked_feats /= stacked_feats_std
h = stacked_feats_t.shape[0]
w = stacked_feats_t.shape[1]
d = stacked_feats_t.shape[2]
flat_feats = np.reshape(stacked_feats_t, (h * w, d))
# Convert the image into a graph
graph = image.grid_to_graph(n_x = h, n_y = w)
N_REGIONS = 80
agg_clustering = AgglomerativeClustering(n_clusters=N_REGIONS,
linkage = 'average',
connectivity = graph).fit(flat_feats)
input_image = transformer.deprocess('data', net.blobs['data'].data[0])
image_h = input_image.shape[0]
image_w = input_image.shape[1]
labels = agg_clustering.labels_.reshape((h, w))
labels = np.round(scipy.ndimage.interpolation.zoom(labels, float(image_h) / float(h), order = 0))
context_padding = 0.2
rois = []
plt.subplot(121)
plt.imshow(input_image)
for l in range(N_REGIONS):
c = plt.cm.spectral(l / float(N_REGIONS))
plt.contour(labels == l, contours=1,
colors=[c, ], alpha = 1.0)
plt.xticks(())
plt.yticks(())
plt.title('Regions of interest')
plt.subplot(122)
plt.imshow(input_image)
for l in range(N_REGIONS):
y_roi, x_roi = np.nonzero(labels == l)
xmin = np.min(x_roi)
xmax = np.max(x_roi)
ymin = np.min(y_roi)
ymax = np.max(y_roi)
#
window_w = xmax - xmin
window_h = xmax - xmin
#
xmin = int(max(0, xmin - context_padding * window_w))
xmax = int(min(image_w, xmax + context_padding * window_w))
ymin = int(max(0, ymin - context_padding * window_h))
ymax = int(min(image_h, ymax + context_padding * window_h))
#
rois.append([xmin, xmax, ymin, ymax])
#
c = plt.cm.spectral(l / float(N_REGIONS))
coords = (xmin, ymin), xmax - xmin, ymax - ymin
currentAxis = plt.gca()
currentAxis.add_patch(plt.Rectangle(*coords, fill = False, edgecolor = c, linewidth = 2))
plt.xticks(())
plt.yticks(())
plt.title('Average hier. clust. ' + ' '.join(layers_of_interest))
plt.savefig('H_clustering.png', dpi = 300)
# +
'''
Created on May 1, 2015
Hybrid NN for featurization / SVM or other for classification
The topmost classifier is called the ``head''.
@author: <NAME>
'''
import numpy as np
import sklearn as sk
import caffe
import cPickle as pickle
caffe.set_mode_cpu()
class Hybrid_classifier:
#
# Instantiates caffe model and head classifier
# @param model_filepath String path to caffe model prototxt
# @param weight_filepath String path to model's weights
# @param head_filepath String path to shallow featurizer
# @param label_lookup String path to label lookup table
# @param mean_path String path to mean image
def __init__(self, model_filepath,
weight_filepath,
head_filepath = None,
label_lookup = None,
mean_path = 'ilsvrc_2012_mean.npy',
context_pad = 16):
# Setup neural net
self.net = caffe.Net(model_filepath, weight_filepath, caffe.TEST)
# Setup ``head'' if needed
if head_filepath is not None:
self.head = pickle.load(open(head_filepath, 'rb'))
else:
self.head = None
# Setup label lookup table if needed
if label_lookup is not None:
self.label_to_num = pickle.load(open(label_lookup, 'rb'))
self.num_to_label = dict(zip(self.label_to_num.values(), self.label_to_num.keys()))
else:
self.label_to_num = None
self.num_to_label = None
# Setup image transformations
self.mean_image = np.load(mean_path)
self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
self.transformer.set_mean('data', np.load(mean_path).mean(1).mean(1))
self.transformer.set_transpose('data', (2,0,1))
self.transformer.set_channel_swap('data', (2,1,0))
self.transformer.set_raw_scale('data', 255.0)
self.context_pad = context_pad
self.configure_crop(context_pad)
#
# Featurize a given image, works with a file path or an image
#
def featurize(self, input_image, target_layers = ['fc7']):
if type(input_image) is str:
im = caffe.io.load_image(input_image)
out = self.net.forward_all(target_layers,
data = np.asarray([self.transformer.preprocess('data', im)]))
else:
out = self.net.forward_all(target_layers,
data = np.asarray([self.transformer.preprocess('data', input_image)]))
return [out[x] for x in target_layers]
#
# Classify a given image, works with a file path or an image
#
def classify_with_head(self, input_image, target_feature, log_probas = False):
target_layers = [target_feature]
if type(input_image) is str:
im = caffe.io.load_image(input_image)
out = self.featurize(im, target_layers)
else:
out = self.featurize(input_image, target_layers)
feature_vect = out[0]
if not log_probas:
return self.head.predict(feature_vect)
else:
return self.head.predict_proba(feature_vect)
def classify_pure_NN(self, input_image, log_probas = False):
out = self.net.forward_all(data = np.asarray([self.transformer.preprocess('data', input_image)]))
probas = out.values()[-1]
if log_probas:
return np.log(probas)
else:
return np.argmax(probas)
def classify(self, input_image, log_probas = False, target_feature = None):
if self.head is not None:
return self.classify_with_head(input_image, target_feature, log_probas)
else:
return self.classify_pure_NN(input_image, log_probas)
def classify_windows(self, image, windows, feature_layer = 'fc7'):
"""
Do windowed detection over given images and windows. Windows are
extracted then warped to the input dimensions of the net.
Take
images_windows: (image filename, window list) iterable.
context_crop: size of context border to crop in pixels.
Give
detections: list of {filename: image filename, window: crop coordinates,
predictions: prediction vector} dicts.
"""
# Extract windows.
window_inputs = []
for window in windows:
window_inputs.append(self.crop(image, window))
# Run through the net (warping windows to input dimensions).
in_ = self.net.inputs[0]
caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2])
+ self.net.blobs[in_].data.shape[2:],
dtype=np.float32)
for ix, window_in in enumerate(window_inputs):
caffe_in[ix] = self.transformer.preprocess(in_, window_in)
if self.head is None:
out = self.net.forward_all(**{in_: caffe_in})
# predictions = out[self.outputs[0]].squeeze(axis=(2,3))
predictions = out[self.net.outputs[0]].squeeze() # https://github.com/BVLC/caffe/issues/2041
else:
out = self.net.forward_all([feature_layer], **{in_: caffe_in})
#
feature_vects = out[feature_layer].squeeze()
#
predictions = self.head.predict_proba(feature_vects)
# Package predictions with images and windows.
detections = []
ix = 0
for window in windows:
detections.append({
'window': window,
'prediction': predictions[ix],
'filename':
})
ix += 1
return detections
def lookup(self, label):
if type(label) is str:
return self.label_to_num[label]
else:
return self.num_to_label[int(label)]
def crop(self, im, window):
"""
Crop a window from the image for detection. Include surrounding context
according to the `context_pad` configuration.
Take
im: H x W x K image ndarray to crop.
window: bounding box coordinates as ymin, xmin, ymax, xmax.
Give
crop: cropped window.
"""
# Crop window from the image.
crop = im[window[0]:window[2], window[1]:window[3]]
if self.context_pad:
box = window.copy()
crop_size = self.net.blobs[self.net.inputs[0]].width # assumes square
scale = crop_size / (1. * crop_size - self.context_pad * 2)
# Crop a box + surrounding context.
half_h = (box[2] - box[0] + 1) / 2.
half_w = (box[3] - box[1] + 1) / 2.
center = (box[0] + half_h, box[1] + half_w)
scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w))
box = np.round(np.tile(center, 2) + scaled_dims)
full_h = box[2] - box[0] + 1
full_w = box[3] - box[1] + 1
scale_h = crop_size / full_h
scale_w = crop_size / full_w
pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds
pad_x = round(max(0, -box[1]) * scale_w)
# Clip box to image dimensions.
im_h, im_w = im.shape[:2]
box = np.clip(box, 0., [im_h, im_w, im_h, im_w])
clip_h = box[2] - box[0] + 1
clip_w = box[3] - box[1] + 1
assert(clip_h > 0 and clip_w > 0)
crop_h = round(clip_h * scale_h)
crop_w = round(clip_w * scale_w)
if pad_y + crop_h > crop_size:
crop_h = crop_size - pad_y
if pad_x + crop_w > crop_size:
crop_w = crop_size - pad_x
# collect with context padding and place in input
# with mean padding
context_crop = im[box[0]:box[2], box[1]:box[3]]
context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w))
crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean
crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop
#
return crop
def configure_crop(self, context_pad):
"""
Configure crop dimensions and amount of context for cropping.
If context is included, make the special input mean for context padding.
Take
context_pad: amount of context for cropping.
"""
# crop dimensions
in_ = self.net.inputs[0]
tpose = self.transformer.transpose[in_]
inv_tpose = [tpose[t] for t in tpose]
self.crop_dims = np.array(self.net.blobs[in_].data.shape[1:])[inv_tpose]
#.transpose(inv_tpose)
# context padding
self.context_pad = context_pad
if self.context_pad:
in_ = self.net.inputs[0]
transpose = self.transformer.transpose.get(in_)
channel_order = self.transformer.channel_swap.get(in_)
raw_scale = self.transformer.raw_scale.get(in_)
# Padding context crops needs the mean in unprocessed input space.
mean = self.transformer.mean.get(in_)
if mean is not None:
inv_transpose = [transpose[t] for t in transpose]
crop_mean = mean.copy().transpose(inv_transpose)
if channel_order is not None:
channel_order_inverse = [channel_order.index(i)
for i in range(crop_mean.shape[2])]
crop_mean = crop_mean[:,:, channel_order_inverse]
if raw_scale is not None:
crop_mean /= raw_scale
self.crop_mean = crop_mean
else:
self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
# +
BACKGROUND = True
HYBRID = False
if HYBRID:
threshold = 0.80
else:
threshold = 0.99
label_path = '../image_dump/label_lookup_table.pi'
lookup_table = pickle.load(open('../image_dump/label_lookup_table.pi', 'rb'))
labels = [x[0] for x in sorted(lookup_table.items(), key = (lambda x : x[1]))]
print labels
hybrid_name = 'forest'
if BACKGROUND:
# Build detector with our finetuned net
model_file = '../finetuning/rcc_net/deploy_nn_background.prototxt'
model_weights = '../finetuning/rcc_net/background/caffenet_train_background_iter_10000.caffemodel'
mean_image = '../finetuning/ilsvrc_2012_mean.npy'
if HYBRID:
head_path = '../rcnn_features_ml/%s_fc7_with_bg_model' % hybrid_name
else:
hybrid_name = 'NN'
else:
model_file = '../finetuning/rcc_net/deploy_nn.prototxt'
model_weights = '../finetuning/rcc_net/no_background/caffenet_train_iter_8000.caffemodel'
mean_image = '../finetuning/ilsvrc_2012_mean.npy'
if HYBRID:
head_path = '../rcnn_features_ml/%s_fc7_no_bg_model' % hybrid_name
else:
hybrid_name = 'NN'
labels = labels[1:]
if HYBRID:
classifier = Hybrid_classifier(model_file,
model_weights,
head_path,
label_lookup = label_path,
mean_path = mean_image)
else:
classifier = Hybrid_classifier(model_file,
model_weights,
label_lookup = label_path,
mean_path = mean_image)
# +
import pandas as pd
window_list = np.asarray([[x[2], x[0], x[3], x[1]] for x in rois], dtype = np.int)
detections = classifier.classify_windows(input_image, window_list)
detection_df = pd.DataFrame(detections)
predictions = pd.DataFrame(np.vstack(detection_df.prediction.values), columns = labels)
for label in labels:
detection_df[label] = predictions[label]
print detection_df
# -
# Activations across windows
plt.gray()
plt.matshow(detection_df[labels].values[:100])
plt.xlabel('Classes')
plt.ylabel('Windows')
# Take maxima across windows
max_s = detection_df[labels].max(0)
max_s.sort(ascending=False)
threshold_dict = dict(zip(max_s.index, max_s.values))
print(max_s[:10])
print threshold_dict
def show_detections(im, indices, col_name, det_df, object_color = 'r'):
# Show top detection in red.
plt.imshow(im)
currentAxis = plt.gca()
for i in indices:
#
proba = det_df[col_name][i]
#
window = det_df['window'][i]
#
ymin = window[0]
xmin = window[1]
ymax = window[2]
xmax = window[3]
coords = (xmin, ymin), xmax - xmin, ymax - ymin
#
currentAxis.add_patch(plt.Rectangle(*coords, fill=True, facecolor = object_color, edgecolor=object_color, linewidth=2, alpha = 0.1 * proba))
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, facecolor = object_color, edgecolor=object_color, linewidth=2, alpha = proba))
# +
# Find, print, and display the top detections: car
colors = {'car' : 'blue',
'person' : 'green',
'bicycle' : 'purple',
'bus' : 'orange',
'motorbike' : 'pink'}
#for image_index in range(len(fnames)):
for target_object, object_color in colors.iteritems():
sub_detection_df = detection_df
#sub_detection_df = detection_df[detection_df['filename'] == os.path.abspath(fnames[image_index])]
indices = sub_detection_df[sub_detection_df[target_object] >= threshold].index.get_values()
#indices = [sub_detection_df[target_object].argmax()]
#im = plt.imread(fnames[image_index])
im = input_image
show_detections(im, indices, target_object, sub_detection_df, object_color)
plt.axis('off')
plt.savefig('Failed neural net segmentation.png', dpi = 300)
#if BACKGROUND:
# plt.savefig('Multi_detect_%s_background_%s.png' % (hybrid_name, fnames[image_index].split('/')[-1]), dpi = 300)
#else:
# plt.savefig('Multi_detect_%s_no_background_%s.png' % (hybrid_name, fnames[image_index].split('/')[-1]), dpi = 300)
#plt.show()
# -
| smartCams/segmentation/Seg_NN_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Ranjith-arch/JavaLetsupgrade/blob/main/Day1%20data%20science.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="zk_ksjFJfNIo"
# # Question1
# + id="pA1wzFr1fRsA" outputId="590df56f-d68c-4ae7-f317-f68a3b48158f" colab={"base_uri": "https://localhost:8080/"}
import random
str1="OBANWRI"
l=list(str1)
str2="RAINBOW"
s=list(str2)
while s!=l:
random.shuffle(l)
print("".join(l))
print("\n\nYou Guess The Correct Word!!\n")
print("".join(l))
# + [markdown] id="Li2hGRbUfgT_"
# #Question2
# + id="T4m3cyJvfly1" outputId="69f980d7-4806-4d34-9bef-56dd32fc22f9" colab={"base_uri": "https://localhost:8080/"}
s="Lets upgrade"
print(s.upper())
# + [markdown] id="zLIazqIKfrzU"
# #Question3
# + id="TkYaFV5XfvRE" outputId="c2c9c938-2247-4221-8905-d1b3afe93dca" colab={"base_uri": "https://localhost:8080/"}
cp=eval(input("Enter cost price: "))
sp=eval(input("Enter selling price: "))
if cp>sp:
print("\nLOSS")
elif cp<sp:
print("\nPROFIT")
else:
print("\nNEITHER")
# + [markdown] id="jH_Kxz2Rf-8W"
# #Question4
# + id="Qn2dEl1SgCXK" outputId="c5e76502-c768-4d39-e2de-bcecfd728b3b" colab={"base_uri": "https://localhost:8080/"}
eur=eval(input("Enter amount in Euros:"))
rs=80*eur
print("Amount equivalent in Rs: Rs."+str(rs))
| Day1 data science.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a id="top"></a>
# + [markdown] hideCode=false hidePrompt=false
# # Db2 11 Time and Date Functions
# Updated: 2019-10-03
# + [markdown] hideCode=false hidePrompt=false
# There are plenty of new date and time functions found in Db2 11. These functions allow you to extract portions from a date and format the date in a variety of different ways. While Db2 already has a number of date and time functions, these new functions allow for greater compatibility with other database implementations, making it easier to port to DB2.
# + hideCode=false hideOutput=false hidePrompt=false
# %run db2.ipynb
# %run connection.ipynb
# -
# # Table of Contents
#
# * [Extract Function](#extract)
# * [DATE_PART Function](#part)
# * [DATE_TRUNC Function](#trunc)
# * [Extracting Specific Days from a Month](#month)
# * [Date Addition](#add)
# * [Extracting Weeks, Months, Quarters, and Years](#extract)
# * [Next Day Function](#nextday)
# * [Between Date/Time Functions](#between)
# * [Months Between](#mbetween)
# * [Date Duration](#duration)
# * [Overlaps Predicate](#overlaps)
# * [UTC Time Conversions](#utc)
# [Back to Top](#top)
# <a id='extract'></a>
# + [markdown] hideCode=false hidePrompt=false
# ## Extract Function
#
# The EXTRACT function extracts and element from a date/time value. The syntax of the EXTRACT command is:
# ```Python
# EXTRACT( element FROM expression )
# ```
# This is a slightly different format from most functions that you see in the DB2. Element must be one of the following values:
#
# |Element Name |Description
# |:-:|:-:|
# |EPOCH | Number of seconds since 1970-01-01 00:00:00.00. The value can be positive or negative.
# |MILLENNIUM(S) | The millennium is to be returned.
# |CENTURY(CENTURIES)| The number of full 100-year periods represented by the year.
# |DECADE(S) | The number of full 10-year periods represented by the year.
# |YEAR(S) | The year portion is to be returned.
# |QUARTER | The quarter of the year (1 - 4) is to be returned.
# |MONTH | The month portion is to be returned.
# |WEEK | The number of the week of the year (1 - 53) that the specified day is to be returned.
# |DAY(S) | The day portion is to be returned.
# |DOW | The day of the week that is to be returned. Note that "1" represents Sunday.
# |DOY | The day (1 - 366) of the year that is to be returned.
# |HOUR(S) | The hour portion is to be returned.
# |MINUTE(S) | The minute portion is to be returned.
# |SECOND(S) | The second portion is to be returned.
# |MILLISECOND(S) | The second of the minute, including fractional parts to one thousandth of a second
# |MICROSECOND(S) | The second of the minute, including fractional parts to one millionth of a second
#
# The synonym NOW is going to be used in the next example. NOW is a synonym for CURRENT TIMESTAMP.
# + hideCode=false hidePrompt=false
# %sql VALUES NOW
# + [markdown] hideCode=false hidePrompt=false
# This SQL will return every possible extract value from the current date.the SQL standard.
# + hideCode=false hidePrompt=false magic_args="-a" language="sql"
# WITH DATES(FUNCTION, RESULT) AS (
# VALUES
# ('EPOCH', EXTRACT( EPOCH FROM NOW )),
# ('MILLENNIUM(S)', EXTRACT( MILLENNIUM FROM NOW )),
# ('CENTURY(CENTURIES)', EXTRACT( CENTURY FROM NOW )),
# ('DECADE(S)', EXTRACT( DECADE FROM NOW )),
# ('YEAR(S)', EXTRACT( YEAR FROM NOW )),
# ('QUARTER', EXTRACT( QUARTER FROM NOW )),
# ('MONTH', EXTRACT( MONTH FROM NOW )),
# ('WEEK', EXTRACT( WEEK FROM NOW )),
# ('DAY(S)', EXTRACT( DAY FROM NOW )),
# ('DOW', EXTRACT( DOW FROM NOW )),
# ('DOY', EXTRACT( DOY FROM NOW )),
# ('HOUR(S)', EXTRACT( HOURS FROM NOW )),
# ('MINUTE(S)', EXTRACT( MINUTES FROM NOW )),
# ('SECOND(S)', EXTRACT( SECONDS FROM NOW )),
# ('MILLISECOND(S)', EXTRACT( MILLISECONDS FROM NOW )),
# ('MICROSECOND(S)', EXTRACT( MICROSECONDS FROM NOW ))
# )
# SELECT FUNCTION, CAST(RESULT AS BIGINT) FROM DATES
# -
# [Back to Top](#top)
# <a id='part'></a>
# + [markdown] hideCode=false hidePrompt=false
# ## DATE_PART Function
#
# DATE_PART is similar to the EXTRACT function but it uses the more familiar syntax:
#
# ```Python
# DATE_PART(element, expression)
# ```
#
# In the case of the function, the element must be placed in quotes, rather than as a keyword in the EXTRACT function. in addition, the DATE_PART always returns a BIGINT, while the EXTRACT function will return a different data type depending on the element being returned. For instance, compare the SECONDs option for both functions. In the case of EXTRACT you get a DECIMAL result while for the DATE_PART you get a truncated BIGINT.
# + magic_args="-a" language="sql"
# WITH DATES(FUNCTION, RESULT) AS (
# VALUES
# ('EPOCH', DATE_PART('EPOCH' ,NOW )),
# ('MILLENNIUM(S)', DATE_PART('MILLENNIUM' ,NOW )),
# ('CENTURY(CENTURIES)', DATE_PART('CENTURY' ,NOW )),
# ('DECADE(S)', DATE_PART('DECADE' ,NOW )),
# ('YEAR(S)', DATE_PART('YEAR' ,NOW )),
# ('QUARTER', DATE_PART('QUARTER' ,NOW )),
# ('MONTH', DATE_PART('MONTH' ,NOW )),
# ('WEEK', DATE_PART('WEEK' ,NOW )),
# ('DAY(S)', DATE_PART('DAY' ,NOW )),
# ('DOW', DATE_PART('DOW' ,NOW )),
# ('DOY', DATE_PART('DOY' ,NOW )),
# ('HOUR(S)', DATE_PART('HOURS' ,NOW )),
# ('MINUTE(S)', DATE_PART('MINUTES' ,NOW )),
# ('SECOND(S)', DATE_PART('SECONDS' ,NOW )),
# ('MILLISECOND(S)', DATE_PART('MILLISECONDS' ,NOW )),
# ('MICROSECOND(S)', DATE_PART('MICROSECONDS' ,NOW ))
# )
# SELECT FUNCTION, CAST(RESULT AS BIGINT) FROM DATES;
# -
# [Back to Top](#top)
# <a id='trunc'></a>
# ## DATE_TRUNC Function
#
# DATE_TRUNC computes the same results as the DATE_PART function but then truncates the value down. Note that not all values can be truncated. The function syntax is:
#
# ```Python
# DATE_TRUNC(element, expression)
# ```
#
# The element must be placed in quotes, rather than as a keyword in the EXTRACT function.
# Note that DATE_TRUNC always returns a BIGINT.
#
# The elements that can be truncated are:
#
# |Element Name |Description
# |:---------------- |:------------------------------------------------------------------------------
# |MILLENNIUM(S) |The millennium is to be returned.
# |CENTURY(CENTURIES) |The number of full 100-year periods represented by the year.
# |DECADE(S) |The number of full 10-year periods represented by the year.
# |YEAR(S) |The year portion is to be returned.
# |QUARTER |The quarter of the year (1 - 4) is to be returned.
# |MONTH |The month portion is to be returned.
# |WEEK |The number of the week of the year (1 - 53) that the specified day is to be returned.
# |DAY(S) |The day portion is to be returned.
# |HOUR(S) |The hour portion is to be returned.
# |MINUTE(S) |The minute portion is to be returned.
# |SECOND(S) |The second portion is to be returned.
# |MILLISECOND(S) |The second of the minute, including fractional parts to one thousandth of a second
# |MICROSECOND(S) |The second of the minute, including fractional parts to one millionth of a secondry data types.
# + magic_args="-a" language="sql"
# WITH DATES(FUNCTION, RESULT) AS (
# VALUES
# ('MILLENNIUM(S)', DATE_TRUNC('MILLENNIUM' ,NOW )),
# ('CENTURY(CENTURIES)', DATE_TRUNC('CENTURY' ,NOW )),
# ('DECADE(S)', DATE_TRUNC('DECADE' ,NOW )),
# ('YEAR(S)', DATE_TRUNC('YEAR' ,NOW )),
# ('QUARTER', DATE_TRUNC('QUARTER' ,NOW )),
# ('MONTH', DATE_TRUNC('MONTH' ,NOW )),
# ('WEEK', DATE_TRUNC('WEEK' ,NOW )),
# ('DAY(S)', DATE_TRUNC('DAY' ,NOW )),
# ('HOUR(S)', DATE_TRUNC('HOURS' ,NOW )),
# ('MINUTE(S)', DATE_TRUNC('MINUTES' ,NOW )),
# ('SECOND(S)', DATE_TRUNC('SECONDS' ,NOW )),
# ('MILLISECOND(S)', DATE_TRUNC('MILLISECONDS' ,NOW )),
# ('MICROSECOND(S)', DATE_TRUNC('MICROSECONDS' ,NOW ))
# )
# SELECT FUNCTION, RESULT FROM DATES
# -
# [Back to Top](#top)
# <a id='month'></a>
# ## Extracting Specfic Days from a Month
#
# There are three functions that retrieve day information from a date. These functions include:
#
# - DAYOFMONTH - returns an integer between 1 and 31 that represents the day of the argument
# - FIRST_DAY - returns a date or timestamp that represents the first day of the month of the argument
# - DAYS_TO_END_OF_MONTH - returns the number of days to the end of the month
#
# This is the current date so that you know what all of the calculations are based on.
# %sql VALUES NOW
# This expression (DAYOFMONTH) returns the day of the month.
# %sql VALUES DAYOFMONTH(NOW)
# FIRST_DAY will return the first day of the month. You could probably compute this with standard SQL date functions, but it is a lot easier just to use this builtin function.
# %sql VALUES FIRST_DAY(NOW)
# Finally, DAYS_TO_END_OF_MOTNH will return the number of days to the end of the month. A Zero would be returned if you are on the last day of the month.
# %sql VALUES DAYS_TO_END_OF_MONTH(NOW)
# [Back to Top](#top)
# <a id='add'></a>
# ## Date Addition Functions
#
# The date addition functions will add or subtract days from a current timestamp. The functions that
# are available are:
#
# - ADD_YEARS - Add years to a date
# - ADD_MONTHS - Add months to a date
# - ADD_DAYS - Add days to a date
# - ADD_HOURS - Add hours to a date
# - ADD_MINUTES - Add minutes to a date
# - ADD_SECONDS - Add seconds to a date
#
# The format of the function is:
# ```Python
# ADD_DAYS ( expression, numeric expression )
# ```
#
# The following SQL will add one "unit" to the current date.
# + language="sql"
# WITH DATES(FUNCTION, RESULT) AS
# (
# VALUES
# ('CURRENT DATE ',NOW),
# ('ADD_YEARS ',ADD_YEARS(NOW,1)),
# ('ADD_MONTHS ',ADD_MONTHS(NOW,1)),
# ('ADD_DAYS ',ADD_DAYS(NOW,1)),
# ('ADD_HOURS ',ADD_HOURS(NOW,1)),
# ('ADD_MINUTES ',ADD_MINUTES(NOW,1)),
# ('ADD_SECONDS ',ADD_SECONDS(NOW,1))
# )
# SELECT * FROM DATES
# -
# A negative number can be used to subtract values from the current date.
# + language="sql"
# WITH DATES(FUNCTION, RESULT) AS
# (
# VALUES
# ('CURRENT DATE ',NOW),
# ('ADD_YEARS ',ADD_YEARS(NOW,-1)),
# ('ADD_MONTHS ',ADD_MONTHS(NOW,-1)),
# ('ADD_DAYS ',ADD_DAYS(NOW,-1)),
# ('ADD_HOURS ',ADD_HOURS(NOW,-1)),
# ('ADD_MINUTES ',ADD_MINUTES(NOW,-1)),
# ('ADD_SECONDS ',ADD_SECONDS(NOW,-1))
# )
# SELECT * FROM DATES
# -
# [Back to Top](#top)
# <a id='extract'></a>
# ## Extracting Weeks, Months, Quarters, and Years from a Date
#
# There are four functions that extract different values from a date. These functions include:
#
# - THIS_QUARTER - returns the first day of the quarter
# - THIS_WEEK - returns the first day of the week (Sunday is considered the first day of that week)
# - THIS_MONTH - returns the first day of the month
# - THIS_YEAR - returns the first day of the year
#
# + language="sql"
# WITH DATES(FUNCTION, RESULT) AS
# (
# VALUES
# ('CURRENT DATE ',NOW),
# ('THIS_WEEK ',THIS_WEEK(NOW)),
# ('THIS_MONTH ',THIS_MONTH(NOW)),
# ('THIS_QUARTER ',THIS_QUARTER(NOW)),
# ('THIS_YEAR ',THIS_YEAR(NOW))
# )
# SELECT * FROM DATES
# -
# There is also a NEXT function for each of these. The NEXT function will return the next week, month, quarter,
# or year given a current date.
# + language="sql"
# WITH DATES(FUNCTION, RESULT) AS
# (
# VALUES
# ('CURRENT DATE ',NOW),
# ('NEXT_WEEK ',NEXT_WEEK(NOW)),
# ('NEXT_MONTH ',NEXT_MONTH(NOW)),
# ('NEXT_QUARTER ',NEXT_QUARTER(NOW)),
# ('NEXT_YEAR ',NEXT_YEAR(NOW))
# )
# SELECT * FROM DATES
# -
# [Back to Top](#top)
# <a id='nextday'></a>
# ## Next Day Function
#
# The previous set of functions returned a date value for the current week, month, quarter, or year (or the next one
# if you used the NEXT function). The NEXT_DAY function returns the next day (after the date you supply)
# based on the string representation of the day. The date string will be dependent on the codepage that you are using for the database.
#
# The date (from an English perspective) can be:
#
# |Day |Short form
# |:-------- |:---------
# |Monday |MON
# |Tuesday |TUE
# |Wednesday |WED
# |Thursday |THU
# |Friday |FRI
# |Saturday |SAT
# |Sunday |SUN
#
# The following SQL will show you the "day" after the current date that is Monday through Sunday.
# + language="sql"
# WITH DATES(FUNCTION, RESULT) AS
# (
# VALUES
# ('CURRENT DATE ',NOW),
# ('Monday ',NEXT_DAY(NOW,'Monday')),
# ('Tuesday ',NEXT_DAY(NOW,'TUE')),
# ('Wednesday ',NEXT_DAY(NOW,'Wednesday')),
# ('Thursday ',NEXT_DAY(NOW,'Thursday')),
# ('Friday ',NEXT_DAY(NOW,'FRI')),
# ('Saturday ',NEXT_DAY(NOW,'Saturday')),
# ('Sunday ',NEXT_DAY(NOW,'Sunday'))
# )
# SELECT * FROM DATES
# -
# [Back to Top](#top)
# <a id='between'></a>
# ## Between Date/Time Functions
#
# These date functions compute the number of full seconds, minutes, hours, days, weeks, and years between
# two dates. If there isn't a full value between the two objects (like less than a day), a zero will be
# returned. These new functions are:
#
# - HOURS_BETWEEN - returns the number of full hours between two arguments
# - MINUTES_BETWEEN - returns the number of full minutes between two arguments
# - SECONDS_BETWEEN - returns the number of full seconds between two arguments
# - DAYS_BETWEEN - returns the number of full days between two arguments
# - WEEKS_BETWEEN - returns the number of full weeks between two arguments
# - YEARS_BETWEEN - returns the number of full years between two arguments
#
# The format of the function is:
# ```Python
# DAYS_BETWEEN( expression1, expression2 )
# ```
# The following SQL will use a date that is in the future with exactly one extra second, minute, hour, day,
# week and year added to it.
# + magic_args="-q" language="sql"
# DROP VARIABLE FUTURE_DATE;
# CREATE VARIABLE FUTURE_DATE TIMESTAMP DEFAULT(NOW + 1 SECOND + 1 MINUTE + 1 HOUR + 8 DAYS + 1 YEAR);
#
# WITH DATES(FUNCTION, RESULT) AS (
# VALUES
# ('SECONDS_BETWEEN',SECONDS_BETWEEN(FUTURE_DATE,NOW)),
# ('MINUTES_BETWEEN',MINUTES_BETWEEN(FUTURE_DATE,NOW)),
# ('HOURS_BETWEEN ',HOURS_BETWEEN(FUTURE_DATE,NOW)),
# ('DAYS BETWEEN ',DAYS_BETWEEN(FUTURE_DATE,NOW)),
# ('WEEKS_BETWEEN ',WEEKS_BETWEEN(FUTURE_DATE,NOW)),
# ('YEARS_BETWEEN ',YEARS_BETWEEN(FUTURE_DATE,NOW))
# )
# SELECT * FROM DATES;
# -
# [Back to Top](#top)
# <a id='mbetween'></a>
# ## MONTHS_BETWEEN Function
#
# You may have noticed that the MONTHS_BETWEEN function was not in the previous list of functions. The
# reason for this is that the value returned for MONTHS_BETWEEN is different from the other functions. The MONTHS_BETWEEN
# function returns a DECIMAL value rather than an integer value. The reason for this is that the duration of a
# month is not as precise as a day, week or year. The following example will show how the duration is
# a decimal value rather than an integer. You could always truncate the value if you want an integer.
# + language="sql"
# WITH DATES(FUNCTION, RESULT) AS (
# VALUES
# ('0 MONTH ',MONTHS_BETWEEN(NOW, NOW)),
# ('1 MONTH ',MONTHS_BETWEEN(NOW + 1 MONTH, NOW)),
# ('1 MONTH + 1 DAY',MONTHS_BETWEEN(NOW + 1 MONTH + 1 DAY, NOW)),
# ('LEAP YEAR ',MONTHS_BETWEEN('2016-02-01','2016-03-01')),
# ('NON-LEAP YEAR ',MONTHS_BETWEEN('2015-02-01','2015-03-01'))
# )
# SELECT * FROM DATES
# -
# [Back to Top](#top)
# <a id='duration'></a>
# ## Date Duration Functions
#
# An alternate way of representing date durations is through the use of an integer with the format YYYYMMDD where
# the YYYY represents the year, MM for the month and DD for the day. Date durations are easier to manipulate than
# timestamp values and take up substantially less storage.
#
# There are two new functions.
#
# - YMD_BETWEEN returns a numeric value that specifies the number of full years, full months, and full days between two datetime values
# - AGE returns a numeric value that represents the number of full years, full months, and full days between the current timestamp and the argument
#
# This SQL statement will return various AGE calculations based on the current timestamp.
# + language="sql"
# WITH DATES(FUNCTION, RESULT) AS (
# VALUES
# ('AGE + 1 DAY ',AGE(NOW - 1 DAY)),
# ('AGE + 1 MONTH ',AGE(NOW - 1 MONTH)),
# ('AGE + 1 YEAR ',AGE(NOW - 1 YEAR)),
# ('AGE + 1 DAY + 1 MONTH ',AGE(NOW - 1 DAY - 1 MONTH)),
# ('AGE + 1 DAY + 1 YEAR ',AGE(NOW - 1 DAY - 1 YEAR)),
# ('AGE + 1 DAY + 1 MONTH + 1 YEAR',AGE(NOW - 1 DAY - 1 MONTH - 1 YEAR))
# )
# SELECT * FROM DATES
# -
# The YMD_BETWEEN function is similar to the AGE function except that it takes two date arguments. We can
# simulate the AGE function by supplying the NOW function to the YMD_BETWEEN function.
# + language="sql"
# WITH DATES(FUNCTION, RESULT) AS (
# VALUES
# ('1 DAY ',YMD_BETWEEN(NOW,NOW - 1 DAY)),
# ('1 MONTH ',YMD_BETWEEN(NOW,NOW - 1 MONTH)),
# ('1 YEAR ',YMD_BETWEEN(NOW,NOW - 1 YEAR)),
# ('1 DAY + 1 MONTH ',YMD_BETWEEN(NOW,NOW - 1 DAY - 1 MONTH)),
# ('1 DAY + 1 YEAR ',YMD_BETWEEN(NOW,NOW - 1 DAY - 1 YEAR)),
# ('1 DAY + 1 MONTH + 1 YEAR',YMD_BETWEEN(NOW,NOW - 1 DAY - 1 MONTH - 1 YEAR))
# )
# SELECT * FROM DATES
# -
# [Back to Top](#top)
# <a id='overlaps'></a>
# ## OVERLAPS Predicate
#
# The OVERLAPS predicate is used to determine whether two chronological periods overlap. This is not a
# function within DB2, but rather a special SQL syntax extension.
#
# A chronological period is specified by a pair of date-time expressions. The first expression specifies
# the start of a period; the second specifies its end.
#
# ```Python
# (start1,end1) OVERLAPS (start2, end2)
# ```
#
# The beginning and end values are not included in the periods. The following
# summarizes the overlap logic. For example, the periods 2016-10-19 to 2016-10-20
# and 2016-10-20 to 2016-10-21 do not overlap.
#
# For instance, the following interval does not overlap.
# + language="sql"
# VALUES
# CASE
# WHEN
# (NOW, NOW + 1 DAY) OVERLAPS (NOW + 1 DAY, NOW + 2 DAYS) THEN 'Overlaps'
# ELSE
# 'No Overlap'
# END
# -
# If the first date range is extended by one day then the range will overlap.
# + language="sql"
# VALUES
# CASE
# WHEN
# (NOW, NOW + 2 DAYS) OVERLAPS (NOW + 1 DAY, NOW + 2 DAYS) THEN 'Overlaps'
# ELSE
# 'No Overlap'
# END
# -
# Identical date ranges will overlap.
# + language="sql"
# VALUES
# CASE
# WHEN
# (NOW, NOW + 1 DAY) OVERLAPS (NOW, NOW + 1 DAY) THEN 'Overlaps'
# ELSE
# 'No Overlap'
# END
# -
# [Back to Top](#top)
# <a id='utc'></a>
# # UTC Time Conversions
#
# Db2 has two functions that allow you to translate timestamps to and from UTC (Coordinated Universal Time).
#
# The FROM_UTC_TIMESTAMP scalar function returns a TIMESTAMP that is converted from Coordinated Universal Time
# to the time zone specified by the time zone string.
#
# The TO_UTC_TIMESTAMP scalar function returns a TIMESTAMP that is converted to Coordinated Universal Time
# from the timezone that is specified by the timezone string.
#
# The format of the two functions is:
#
# ```Python
# FROM_UTC_TIMESTAMP( expression, timezone )
# TO_UTC_TIMESTAMP( expression, timezone)
# ```
#
# The return value from each of these functions is a timestamp. The "expression" is a timestamp that
# you want to convert to the local timezone (or convert to UTC). The timezone is
# an expression that specifies the time zone that the expression is to be adjusted to.
# The value of the timezone-expression must be a time zone name from the Internet Assigned Numbers Authority (IANA)
# time zone database. The standard format for a time zone name in the IANA database is Area/Location, where:
#
# - Area is the English name of a continent, ocean, or the special area 'Etc'
# - Location is the English name of a location within the area; usually a city, or small island
#
# Examples:
#
# - "America/Toronto"
# - "Asia/Sakhalin"
# - "Etc/UTC" (which represents Coordinated Universal Time)
#
# For complete details on the valid set of time zone names and the rules that are associated with those time zones,
# refer to the IANA time zone database. The database server uses version 2010c of the IANA time zone database.
#
# The result is a timestamp, adjusted from/to the Coordinated Universal Time time zone to the time zone
# specified by the timezone-expression. If the timezone-expression returns a value that is not a time zone
# in the IANA time zone database, then the value of expression is returned without being adjusted.
#
# The timestamp adjustment is done by first applying the raw offset from Coordinated Universal Time of the
# timezone-expression. If Daylight Saving Time is in effect at the adjusted timestamp for the time zone
# that is specified by the timezone-expression, then the Daylight Saving Time offset is also applied
# to the timestamp.
#
# Time zones that use Daylight Saving Time have ambiguities at the transition dates. When a time zone
# changes from standard time to Daylight Saving Time, a range of time does not occur as it is skipped
# during the transition. When a time zone changes from Daylight Saving Time to standard time,
# a range of time occurs twice. Ambiguous timestamps are treated as if they occurred when standard time
# was in effect for the time zone.
#
# Convert the Coordinated Universal Time timestamp '2011-12-25 09:00:00.123456' to the 'Asia/Tokyo' time zone.
# The following returns a TIMESTAMP with the value '2011-12-25 18:00:00.123456'.
#
# + language="sql"
# VALUES FROM_UTC_TIMESTAMP(TIMESTAMP '2011-12-25 09:00:00.123456', 'Asia/Tokyo');
# -
# Convert the Coordinated Universal Time timestamp '2014-11-02 06:55:00' to the 'America/Toronto' time zone.
# The following returns a TIMESTAMP with the value '2014-11-02 01:55:00'.
# + language="sql"
# VALUES FROM_UTC_TIMESTAMP(TIMESTAMP'2014-11-02 06:55:00', 'America/Toronto');
# -
# Convert the Coordinated Universal Time timestamp '2015-03-02 06:05:00' to the 'America/Toronto'
# time zone. The following returns a TIMESTAMP with the value '2015-03-02 01:05:00'.
# + language="sql"
# VALUES FROM_UTC_TIMESTAMP(TIMESTAMP'2015-03-02 06:05:00', 'America/Toronto');
# -
# Convert the timestamp '1970-01-01 00:00:00' to the Coordinated Universal Time timezone from the 'America/Denver'
# timezone. The following returns a TIMESTAMP with the value '1970-01-01 07:00:00'.
# + language="sql"
# VALUES TO_UTC_TIMESTAMP(TIMESTAMP'1970-01-01 00:00:00', 'America/Denver');
# -
# ## Using UTC Functions
#
# One of the applications for using the UTC is to take the transaction timestamp and normalize it across
# all systems that access the data. You can convert the timestamp to UTC on insert and then when it is
# retrieved, it can be converted to the local timezone.
#
# This example will use a number of techniques to hide the complexity of changing timestamps to local timezones.
#
# The following SQL will create our base transaction table (TXS_BASE) that will be used throughout the
# example.
# + magic_args="-q" language="sql"
# DROP TABLE TXS_BASE;
# CREATE TABLE TXS_BASE
# (
# ID INTEGER NOT NULL,
# CUSTID INTEGER NOT NULL,
# TXTIME_UTC TIMESTAMP NOT NULL
# );
# -
# The UTC functions will be written to take advantage of a local timezone variable called TIME_ZONE. This
# variable will contain the timezone of the server (or user) that is running the transaction. In this
# case we are using the timezone in Toronto, Canada.
# + language="sql"
# CREATE OR REPLACE VARIABLE TIME_ZONE VARCHAR(255) DEFAULT('America/Toronto');
# -
# The SET Command can be used to update the TIME_ZONE to the current location we are in.
# %sql SET TIME_ZONE = 'America/Toronto'
# In order to retrieve the value of the current timezone, we take advantage of a simple user-defined function
# called GET_TIMEZONE. It just retrieves the contents of the current TIME_ZONE variable that we set up.
# + language="sql"
# CREATE OR REPLACE FUNCTION GET_TIMEZONE()
# RETURNS VARCHAR(255)
# LANGUAGE SQL CONTAINS SQL
# RETURN (TIME_ZONE)
# -
# The TXS view is used by all SQL statements rather than the TXS_BASE table. The reason for this is to
# take advantage of INSTEAD OF triggers that can manipulate the UTC without modifying the original SQL.
#
# Note that when the data is returned from the view that the TXTIME field is converted from UTC to the current
# TIMEZONE that we are in.
# + magic_args=" " language="sql"
# CREATE OR REPLACE VIEW TXS AS
# (
# SELECT
# ID,
# CUSTID,
# FROM_UTC_TIMESTAMP(TXTIME_UTC,GET_TIMEZONE()) AS TXTIME
# FROM
# TXS_BASE
# )
# -
# An INSTEAD OF trigger (INSERT, UPDATE, and DELETE) is created against the TXS view so that any insert or
# update on a TXTIME column will be converted back to the UTC value. From an application perspective,
# we are using the local time, not the UTC time.
# + magic_args="-d" language="sql"
# CREATE OR REPLACE TRIGGER I_TXS
# INSTEAD OF INSERT ON TXS
# REFERENCING NEW AS NEW_TXS
# FOR EACH ROW MODE DB2SQL
# BEGIN ATOMIC
# INSERT INTO TXS_BASE VALUES (
# NEW_TXS.ID,
# NEW_TXS.CUSTID,
# TO_UTC_TIMESTAMP(NEW_TXS.TXTIME,GET_TIMEZONE())
# );
# END
# @
#
# CREATE OR REPLACE TRIGGER U_TXS
# INSTEAD OF UPDATE ON TXS
# REFERENCING NEW AS NEW_TXS OLD AS OLD_TXS
# FOR EACH ROW MODE DB2SQL
# BEGIN ATOMIC
# UPDATE TXS_BASE
# SET (ID, CUSTID, TXTIME_UTC) =
# (NEW_TXS.ID,
# NEW_TXS.CUSTID,
# TO_UTC_TIMESTAMP(NEW_TXS.TXTIME,TIME_ZONE)
# )
# WHERE
# TXS_BASE.ID = OLD_TXS.ID
# ;
# END
# @
#
# CREATE OR REPLACE TRIGGER D_TXS
# INSTEAD OF DELETE ON TXS
# REFERENCING OLD AS OLD_TXS
# FOR EACH ROW MODE DB2SQL
# BEGIN ATOMIC
# DELETE FROM TXS_BASE
# WHERE
# TXS_BASE.ID = OLD_TXS.ID
# ;
# END
# @
# -
# At this point in time(!) we can start inserting records into our table. We have already set the timezone
# to be Toronto, so the next insert statement will take the current time (NOW) and insert it into the table.
# For reference, here is the current time.
# %sql VALUES NOW
# We will insert one record into the table and immediately retrieve the result.
# + language="sql"
# INSERT INTO TXS VALUES(1,1,NOW);
#
# SELECT * FROM TXS;
# -
# Note that the timsstamp appears to be the same as what we insert (plus or minus a few seconds). What actually
# sits in the base table is the UTC time.
# %sql SELECT * FROM TXS_BASE
# We can modify the time that is returned to us by changing our local timezone. The statement will make
# the system think we are in Vancouver.
# %sql SET TIME_ZONE = 'America/Vancouver'
# Retrieving the results will show that the timestamp has shifted by 3 hours (Vancouver is 3 hours behind
# Toronto).
# %sql SELECT * FROM TXS
# So what happens if we insert a record into the table now that we are in Vancouver?
# + language="sql"
# INSERT INTO TXS VALUES(2,2,NOW);
# SELECT * FROM TXS;
# -
# The data retrieved reflects the fact that we are now in Vancouver from an application perspective. Looking at the
# base table and you will see that everything has been converted to UTC time.
# %sql SELECT * FROM TXS_BASE
# Finally, we can switch back to Toronto time and see when the transactions were done. You will see that from a
# Toronto perspetive tht the transactions were done three hours later because of the timezone differences.
# + language="sql"
# SET TIME_ZONE = 'America/Toronto';
# SELECT * FROM TXS;
# -
# Close the connection to avoid running out of connection handles to Db2 on Cloud.
# %sql CONNECT RESET
# [Back to Top](#top)
# #### Credits: IBM 2019, <NAME> [<EMAIL>]
| Db2_11.1_Time_and_Date_Functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/AzucenaMV/top2000-dashboard/blob/main/EDA.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="dg6129-YW7g-" outputId="ce23da27-850a-4a9c-b211-94abb19d2b11"
# !pip install pycountry
# !pip install geopy
# !pip install folium
# + id="6Dbpxn6_tk_M"
import pandas as pd
import numpy as np
import os
import pycountry
import folium
import seaborn as sns
from geopy.geocoders import Nominatim
from folium.plugins import MarkerCluster
# + colab={"base_uri": "https://localhost:8080/"} id="XxAweHXdwWgw" outputId="184ca5a0-2468-4870-a859-28af2c07eebd"
from google.colab import drive
drive.mount('/content/drive')
# + id="ICKA5Jm9tek7"
file_spotify = "spotify_features.csv"
path = 'drive/MyDrive/JADS/DataVizProject/Code/'
file_spotify = "spotify_features.csv"
file_gender = "top2000_gender.csv"
# + id="OJMoCuS7tmfd"
df_gender = pd.read_csv(os.path.join(path,file_gender))
df_spotify = pd.read_csv(os.path.join(path,file_spotify))
# + id="7mE2E289vRtP"
df = df_gender.merge(df_spotify, how = 'left', suffixes = ("","_sp"), on = 'index')
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="1vmD6PT4vXOp" outputId="e9b7634c-75e9-4f18-9ecc-1f54b9b3ce99"
df[df['song'].str.contains("\\(")][['song','song_name']]
# + id="KuAPp8XFYiYS"
# Fixing manually country
df.loc[df.artist == 'Prince & The Revolution','artist_country'] = 'US'
df.loc[df.artist == '<NAME>','artist_country'] = 'NL'
df.loc[df.artist == '<NAME> & <NAME>', 'artist_country'] = 'NL'
# + id="NyDmyXqLUJGN"
n = 100
df_n = df[:n]
df_last_n = df[1949:2000]
# + [markdown] id="bwfpBMlv-v8T"
# # Location
# + id="pDh7GIBb_p7C"
def get_country_name(value):
if not pd.isna(value):
country = pycountry.countries.get(alpha_2=value).name
else:
country = ''
return country
# + id="wybCXirK_q_n"
df['artist_country_name'] = df.artist_country.apply(lambda x: get_country_name(x))
# + colab={"base_uri": "https://localhost:8080/"} id="rdm8kDxLVVTl" outputId="68e54988-a81e-4768-a85b-1fad40f7b27c"
df.artist_country_name.value_counts().head(10)
# + id="AGscgzqQmClk"
df_n_country = df_n.groupby(['artist_country']).size().reset_index(name = 'num_artist')
# + id="IPbERXkUYRLZ"
geolocator = Nominatim(user_agent="http")
def geolocate(country):
try:
# Geolocate the center of the country
loc = geolocator.geocode(country)
# And return latitude and longitude
return (loc.latitude, loc.longitude)
except:
# Return missing value
return (np.nan, np.nan)
# + id="jkIHUb68juV0"
geo = df_n_country.artist_country.apply(lambda x: geolocate(x))
# + id="6nffHLzum7Eo"
df_geo = pd.DataFrame(geo.tolist(), columns=['lat', 'long'], index=geo.index) \
.apply(pd.to_numeric, errors='coerce')
df_n_country_geo = pd.concat([df_n_country, df_geo], axis=1)
df_n_geo = df_n.merge(df_n_country_geo, how = 'inner', on = 'artist_country')
# + colab={"base_uri": "https://localhost:8080/", "height": 934} id="fyFzzThNkrHB" outputId="dd44c921-b3de-4417-88ae-1b2f4710b4b6"
#empty map
world_map= folium.Map(tiles="cartodbpositron",control_scale=True)
marker_cluster = MarkerCluster().add_to(world_map)#for each coordinate, create circlemarker of user percent
for i in range(len(df_n_geo)):
lat = df_n_geo.iloc[i]['lat']
long = df_n_geo.iloc[i]['long']
radius= 15
popup_text = """Country : {}<br>
artist : {}<br>
song : {}"""
popup_text = popup_text.format(df_n_geo.iloc[i]['artist_country_name'],
df_n_geo.iloc[i]['artist'],
df_n_geo.iloc[i]['song_name']
)
folium.CircleMarker(location = [lat, long], radius=radius, popup= popup_text, fill =True).add_to(marker_cluster)#show the map
world_map
# + id="Obg3LZu0oSID"
# TODO: BY ARTIST
#df_n.drop_duplicates(subset = ['artist','artist_country'])['artist','artist_country']
# + [markdown] id="vIm6AYv083TP"
# # Audio Features
# + id="zOau3nXo9Cxc"
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
import matplotlib. pyplot as plt
# + [markdown] id="yNn56HcmcHUs"
# - Danceability: Danceability describes how suitable a track is for dancing based on a combination of musical elements including tempo, rhythm stability, beat strength, and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable.
# - Acousticness: A measure from 0.0 to 1.0 of whether the track is acoustic.
# - Energy: Energy is a measure from 0.0 to 1.0 and represents a perceptual measure of intensity and activity. Typically, energetic tracks feel fast, loud, and noisy.
# - Instrumentalness: Predicts whether a track contains no vocals. The closer the instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content. \\
# - Liveness: Detects the presence of an audience in the recording. Higher liveness values represent an increased probability that the track was performed live.
# - Loudness: The overall loudness of a track in decibels (dB). Loudness values are averaged across the entire track. Values typical range between -60 and 0 db.
# - Speechiness: Speechiness detects the presence of spoken words in a track. The more exclusively speech-like the recording (e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value.
# - Tempo: The overall estimated tempo of a track in beats per minute (BPM). In musical terminology, tempo is the speed or pace of a given piece and derives directly from the average beat duration.
# - Valence: A measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low valence sound more negative (e.g. sad, depressed, angry).
#
# https://medium.com/@boplantinga/what-do-spotifys-audio-features-tell-us-about-this-year-s-eurovision-song-contest-66ad188e112a
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="E5dAN1TK-1GV" outputId="55d89a14-0fff-46af-f740-940afce5c319"
audio_features = df_n.columns[df_n.columns.get_loc('danceability'):(df_n.columns.get_loc('tempo')+1)]
df_n[audio_features].describe()
# + [markdown] id="4RTht4mL88Kd"
# ### Top n distributions
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Mm3rB0EB9At5" outputId="d322ce00-ace6-48df-f7cd-9e4d19d68ec9"
f, axes = plt.subplots(4, 3, figsize=(20, 20), sharex=False)
for ax, feature in zip(axes.flat, audio_features):
sns.histplot(df[feature] , color="skyblue", ax=ax)
# + [markdown] id="IicNF5e5-jbe"
#
# Dub: 60-90 bpm
# Hip-hop: 60-100 bpm
# House: 115-130 bpm
# Techno/trance: 120-140 bpm
# Dubstep: 135-145 bpm
# Drum and bass: 160-180 bpm
#
# + [markdown] id="-gjY5k3e71z7"
# ### Clustering
# + id="BKfRFQB-aeLx"
X = df_n[audio_features].values
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="Xq2TsGBwZhbL" outputId="fd085f99-67eb-4fc5-d628-b1d7d2a7f955"
distortions = []
for i in range(1, 11):
km = KMeans(
n_clusters=i, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0
)
km.fit(X_scaled)
distortions.append(km.inertia_)
# plot
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.show()
# + id="pdjRLD6eaz4t"
km = KMeans(
n_clusters=3, init='random',
n_init=10, max_iter=300,
tol=1e-04, random_state=0
)
y_km = km.fit_predict(X_scaled)
centroids = km.cluster_centers_
# + id="rKG6-slEbk2C"
df_n = df_n.assign(clusters = y_km.reshape(-1,1))
# + colab={"base_uri": "https://localhost:8080/", "height": 513} id="hGi0n2OndEqX" outputId="261e64cd-3263-4f30-dfea-abade79af673"
df_n[df_n['clusters'] == 0].sample(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 496} id="2Jz0TJy_dK3N" outputId="15c9a588-ea42-4303-c8c7-c5115575d458"
df_n[df_n['clusters'] == 1].sample(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 565} id="PIE3ze7meDjR" outputId="36ab12ca-968b-4175-8390-fe6168c24312"
df_n[df_n['clusters'] == 2].sample(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="_x2ld2aBgTt5" outputId="1220acf0-b8ff-483a-cd1a-56876e93faca"
df_1 = df_n.loc[df_n.clusters == 0, audio_features]
df_2 = df_n.loc[df_n.clusters == 1, audio_features]
df_3 = df_n.loc[df_n.clusters == 2, audio_features]
f, axes = plt.subplots(4, 3, figsize=(20, 20), sharex=False)
for ax, feature in zip(axes.flat, audio_features):
sns.kdeplot(df_1[feature] , color="orange", ax=ax)
sns.kdeplot(df_2[feature] , color="green", ax=ax)
sns.kdeplot(df_3[feature] , color="skyblue", ax=ax)
#sns.histplot(data=df_n, x=feature, hue="clusters")
#ax2 = ax.twinx()
#sns.kdeplot(df['total_bill'], ax=ax2)
# + [markdown] id="9gg3LUFO9RuA"
# ### Correlations
# + id="6VDardIph9Hj"
df_n['2021'] = df_n['2021'].astype('int')
corr = df_n[audio_features.to_list() + ['2021']].corr(method = 'spearman')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="9yrgaY0gid3c" outputId="31ce568b-0647-4f50-e63b-e1b46985a201"
fig, ax = plt.subplots(figsize=(20,20))
ax.matshow(corr)
xaxis = np.arange(len(audio_features.to_list())+1)
ax.set_xticks(xaxis)
ax.set_yticks(xaxis)
ax.set_xticklabels(audio_features.to_list() + ['2021'])
ax.set_yticklabels(audio_features.to_list() + ['2021'])
plt.show()
| notebooks/EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Imports
# +
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
# -
# ## Read the dataset and the radiogenomic data
pyradiomics_dataset_path = os.path.join('..', 'dataset', 'dataset.csv')
pyradiomics_dataset = pd.read_csv(pyradiomics_dataset_path)
pyradiomics_dataset.head()
radiogenomics_labels_path = os.path.join('..', 'dataset', 'radiogenomics_labels.csv')
radiogenomics_labels = pd.read_csv(radiogenomics_labels_path)
radiogenomics_labels.tail()
# ## Data Preprocessing
# +
pyradiomics_dataset['Case ID'] = None
for i, image in enumerate(pyradiomics_dataset['Image']):
pyradiomics_dataset.loc[i, 'Case ID'] = image.split('.')[0]
# -
dataset = pd.merge(pyradiomics_dataset, radiogenomics_labels[['Case ID', 'Survival Status']], left_on='Case ID', right_on='Case ID', how='left')
dataset.drop(['Mask', 'Image', 'Case ID'], axis=1, inplace=True)
dataset.dropna(inplace=True)
dataset.head()
X = dataset.iloc[:, :-1]
y = dataset.iloc[:, -1]
y.replace({'Alive': 1, 'Dead': 0}, inplace=True)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# +
scaler = MinMaxScaler()
X_train = pd.DataFrame(scaler.fit_transform(X_train.astype('float64')))
X_test = pd.DataFrame(scaler.transform(X_test.astype('float64')))
X_train.columns = X.columns
X_test.columns = X.columns
# -
# ## Model Selection
cross_valid_scores = {}
# ### Decision Tree Classifier
# +
from sklearn.tree import DecisionTreeClassifier
parameters = {
"max_depth": [3, 5, 7, 9, 11, 13],
}
model_desicion_tree = DecisionTreeClassifier(
random_state=42,
class_weight='balanced',
)
model_desicion_tree = GridSearchCV(
model_desicion_tree,
parameters,
cv=5,
scoring='accuracy',
)
model_desicion_tree.fit(X_train, y_train)
print('-----')
print(f'Best parameters {model_desicion_tree.best_params_}')
print(
f'Mean cross-validated accuracy score of the best_estimator: ' + \
f'{model_desicion_tree.best_score_:.3f}'
)
cross_valid_scores['desicion_tree'] = model_desicion_tree.best_score_
print('-----')
# -
# ### Random Forest Classifier
# +
# %%time
from sklearn.ensemble import RandomForestClassifier
parameters = {
"n_estimators": [5, 10, 15, 20, 25],
"max_depth": [3, 5, 7, 9, 11, 13],
}
model_random_forest = RandomForestClassifier(
random_state=42,
class_weight='balanced',
)
model_random_forest = GridSearchCV(
model_random_forest,
parameters,
cv=5,
scoring='accuracy',
)
model_random_forest.fit(X_train, y_train)
print('-----')
print(f'Best parameters {model_random_forest.best_params_}')
print(
f'Mean cross-validated accuracy score of the best_estimator: '+ \
f'{model_random_forest.best_score_:.3f}'
)
cross_valid_scores['random_forest'] = model_random_forest.best_score_
print('-----')
# -
# ### XGBoost
# +
# %%time
from xgboost import XGBClassifier
parameters = {
'max_depth': [3, 5, 7, 9],
'n_estimators': [5, 10, 15, 20, 25, 50, 100],
'learning_rate': [0.01, 0.05, 0.1]
}
model_xgb = XGBClassifier(
random_state=42,
)
model_xgb = GridSearchCV(
model_xgb,
parameters,
cv=5,
scoring='accuracy',
)
model_xgb.fit(X_train, y_train)
print('-----')
print(f'Best parameters {model_xgb.best_params_}')
print(
f'Mean cross-validated accuracy score of the best_estimator: ' +
f'{model_xgb.best_score_:.3f}'
)
cross_valid_scores['xgboost'] = model_xgb.best_score_
print('-----')
# -
# ### LightGBM
# +
# %%time
import lightgbm as lgbm
parameters = {
'n_estimators': [5, 10, 15, 20, 25, 50, 100],
'learning_rate': [0.01, 0.05, 0.1],
'num_leaves': [7, 15, 31],
}
model_lgbm = lgbm.LGBMClassifier(
random_state=42,
class_weight='balanced',
)
model_lgbm = GridSearchCV(
model_lgbm,
parameters,
cv=5,
scoring='accuracy',
)
model_lgbm.fit(
X_train,
y_train,
)
print('-----')
print(f'Best parameters {model_lgbm.best_params_}')
print(
f'Mean cross-validated accuracy score of the best_estimator: ' +
f'{model_lgbm.best_score_:.3f}'
)
cross_valid_scores['lightgbm'] = model_lgbm.best_score_
print('-----')
# -
# ## CatBoost Classifier
# +
# %%time
import catboost as cb
parameters = {
'iterations': [10],
# 'iterations': [5, 10, 15, 20, 25, 50, 100],
# 'learning_rate': [0.01, 0.05, 0.1],
# 'depth': [3, 5, 7, 9, 11, 13],
}
model_catboost = cb.CatBoostClassifier(
verbose=False,
)
model_catboost = GridSearchCV(
model_catboost,
parameters,
cv=5,
scoring='accuracy',
)
model_catboost.fit(X_train, y_train)
print('-----')
print(f'Best parameters {model_catboost.best_params_}')
print(
f'Mean cross-validated accuracy score of the best_estimator: ' +
f'{model_catboost.best_score_:.3f}'
)
cross_valid_scores['catboost'] = model_catboost.best_score_
print('-----')
# -
# ## Logistic Regression
# +
# %%time
from sklearn.linear_model import LogisticRegression
parameters = {
"C": [0.001, 0.01, 0.1, 1.],
"penalty": ["l1", "l2"]
}
model_logistic_regression = LogisticRegression(
random_state=42,
class_weight="balanced",
solver="liblinear",
)
model_logistic_regression = GridSearchCV(
model_logistic_regression,
parameters,
cv=5,
scoring='accuracy',
)
model_logistic_regression.fit(X_train, y_train)
print('-----')
print(f'Best parameters {model_logistic_regression.best_params_}')
print(
f'Mean cross-validated accuracy score of the best_estimator: ' +
f'{model_logistic_regression.best_score_:.3f}'
)
cross_valid_scores['logistic_regression'] = model_logistic_regression.best_score_
print('-----')
# -
# ### GaussianNB Classifier
# +
from sklearn.naive_bayes import GaussianNB
gnb_clf = GaussianNB()
# -
scores = cross_val_score(gnb_clf, X_train, y_train, scoring="accuracy", cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# ### SVM Classifier
# +
# %%time
from sklearn.svm import SVC
parameters = {
"C": [0.001, 0.01, 0.1, 1.],
"kernel": ["linear", "poly", "rbf", "sigmoid"],
"gamma": ["scale", "auto"],
}
model_svc = SVC(
random_state=42,
class_weight="balanced",
)
model_svc = GridSearchCV(
model_svc,
parameters,
cv=5,
scoring='accuracy',
)
model_svc.fit(X_train, y_train)
print('-----')
print(f'Best parameters {model_svc.best_params_}')
print(
f'Mean cross-validated accuracy score of the best_estimator: ' +
f'{model_svc.best_score_:.3f}'
)
cross_valid_scores['svc'] = model_svc.best_score_
print('-----')
# -
# ### AdaBoost Classifier
# +
# %%time
from sklearn.ensemble import AdaBoostClassifier
parameters = {
"n_estimators": [5, 10, 15, 20, 25, 50, 75, 100],
"learning_rate": [0.001, 0.01, 0.1, 1.],
}
model_adaboost = AdaBoostClassifier(
random_state=42,
)
model_adaboost = GridSearchCV(
model_adaboost,
parameters,
cv=5,
scoring='accuracy',
)
model_adaboost.fit(X_train, y_train)
print('-----')
print(f'Best parameters {model_adaboost.best_params_}')
print(
f'Mean cross-validated accuracy score of the best_estimator: '+ \
f'{model_adaboost.best_score_:.3f}'
)
cross_valid_scores['ada_boost'] = model_adaboost.best_score_
print('-----')
# -
# ### KNeighbors Classifier
# +
from sklearn.neighbors import KNeighborsClassifier
kn_clf = KNeighborsClassifier()
# -
scores = cross_val_score(kn_clf, X_train, y_train, scoring="accuracy", cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# ### Gaussian Process Classifier
# +
from sklearn.gaussian_process import GaussianProcessClassifier
gaussian_clf = GaussianProcessClassifier()
# -
scores = cross_val_score(gaussian_clf, X_train, y_train, scoring="accuracy", cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# ### SelectKBest
from sklearn.feature_selection import SelectKBest, chi2
k_best_clf = SelectKBest(chi2, k=50)
X_new = pd.DataFrame(k_best_clf.fit_transform(X_train, y_train))
X_new.shape
X_new.columns = X_train.columns[k_best_clf.get_support()]
X_test_new = X_test.loc[:, X_new.columns]
# ## Final Model
pd.DataFrame(cross_valid_scores, index=['cross_valid_score']).T
final_clf = XGBClassifier()
final_clf.fit(X_new, y_train)
y_pred = final_clf.predict(X_test_new)
print("Accuracy: %0.2f" % (accuracy_score(y_test, y_pred)))
#
#
#
# ## Save the final model
# +
import pickle
# Save to file in the current working directory
pkl_filename = "pickle_model.pkl"
output_path = os.path.join(os.getcwd(), 'models', pkl_filename)
with open(output_path, 'wb') as file:
pickle.dump(final_clf, file)
# +
# Load from file
with open(output_path, 'rb') as file:
pickle_model = pickle.load(file)
# Calculate the accuracy score and predict target values
score = pickle_model.score(X_test_new, y_test)
print("Test score: {0:.2f} %".format(100 * score))
Ypredict = pickle_model.predict(X_test_new)
# -
#
#
# # Automated Hyperparameter Tuning
# #### https://www.kaggle.com/pavansanagapati/automated-hyperparameter-tuning
# +
# # !pip install deap update_checker tqdm stopit
# +
# # !pip install tpot
# +
from tpot import TPOTClassifier
parameters = {
'learning_rate': [0.001, 0.01, 0.1, 1.],
'n_estimators': [5, 10, 15, 20, 25, 50, 75, 100]
}
tpot_classifier = TPOTClassifier(generations= 4, population_size= 24, offspring_size= 12,
verbosity= 2, early_stop= 12,
config_dict=
{'sklearn.ensemble.AdaBoostClassifier': parameters},
cv = 4, scoring = 'accuracy')
tpot_classifier.fit(X_train, y_train)
# -
y_pred = model_adaboost.predict(X_test)
print("Accuracy: %0.2f" % (accuracy_score(y_test, y_pred)))
accuracy = tpot_classifier.score(X_test, y_test)
print(accuracy)
| scripts/model selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Configurations for Colab
# +
import sys
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
# !apt-get install -y xvfb python-opengl > /dev/null 2>&1
# !pip install gym pyvirtualdisplay > /dev/null 2>&1
# !pip install JSAnimation==0.1
# !pip install pyglet==1.3.2
from pyvirtualdisplay import Display
# Start virtual display
dis = Display(visible=0, size=(400, 400))
dis.start()
# -
# # 05. Noisy Networks for Exploration
#
# [<NAME> et al., "Noisy Networks for Exploration." arXiv preprint arXiv:1706.10295, 2017.](https://arxiv.org/pdf/1706.10295.pdf)
#
#
# NoisyNet is an exploration method that learns perturbations of the network weights to drive exploration. The key insight is that a single change to the weight vector can induce a consistent, and potentially very complex, state-dependent change in policy over multiple time steps.
#
# Firstly, let's take a look into a linear layer of a neural network with $p$ inputs and $q$ outputs, represented by
#
# $$
# y = wx + b,
# $$
#
# where $x \in \mathbb{R}^p$ is the layer input, $w \in \mathbb{R}^{q \times p}$, and $b \in \mathbb{R}$ the bias.
#
# The corresponding noisy linear layer is defined as:
#
# $$
# y = (\mu^w + \sigma^w \odot \epsilon^w) x + \mu^b + \sigma^b \odot \epsilon^b,
# $$
#
# where $\mu^w + \sigma^w \odot \epsilon^w$ and $\mu^b + \sigma^b \odot \epsilon^b$ replace $w$ and $b$ in the first linear layer equation. The parameters $\mu^w \in \mathbb{R}^{q \times p}, \mu^b \in \mathbb{R}^q, \sigma^w \in \mathbb{R}^{q \times p}$ and $\sigma^b \in \mathbb{R}^q$ are learnable, whereas $\epsilon^w \in \mathbb{R}^{q \times p}$ and $\epsilon^b \in \mathbb{R}^q$ are noise random variables which can be generated by one of the following two ways:
#
# 1. **Independent Gaussian noise**: the noise applied to each weight and bias is independent, where each random noise entry is drawn from a unit Gaussian distribution. This means that for each noisy linear layer, there are $pq + q$ noise variables (for $p$ inputs to the layer and $q$ outputs).
# 2. **Factorised Gaussian noise:** This is a more computationally efficient way. It produces 2 random Gaussian noise vectors ($p, q$) and makes $pq + q$ noise entries by outer product as follows:
#
# $$
# \begin{align}
# \epsilon_{i,j}^w &= f(\epsilon_i) f(\epsilon_j),\\
# \epsilon_{j}^b &= f(\epsilon_i),\\
# \text{where } f(x) &= sgn(x) \sqrt{|x|}.
# \end{align}
# $$
#
# In all experiements of the paper, the authors used Factorised Gaussian noise, so we will go for it as well.
# +
import math
import os
from typing import Dict, List, Tuple
import gym
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from IPython.display import clear_output
# -
# ## Replay buffer
#
# Please see *01.dqn.ipynb* for detailed description.
class ReplayBuffer:
"""A simple numpy replay buffer."""
def __init__(self, obs_dim: int, size: int, batch_size: int = 32):
self.obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.next_obs_buf = np.zeros([size, obs_dim], dtype=np.float32)
self.acts_buf = np.zeros([size], dtype=np.float32)
self.rews_buf = np.zeros([size], dtype=np.float32)
self.done_buf = np.zeros(size, dtype=np.float32)
self.max_size, self.batch_size = size, batch_size
self.ptr, self.size, = 0, 0
def store(
self,
obs: np.ndarray,
act: np.ndarray,
rew: float,
next_obs: np.ndarray,
done: bool,
):
self.obs_buf[self.ptr] = obs
self.next_obs_buf[self.ptr] = next_obs
self.acts_buf[self.ptr] = act
self.rews_buf[self.ptr] = rew
self.done_buf[self.ptr] = done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample_batch(self) -> Dict[str, np.ndarray]:
idxs = np.random.choice(self.size, size=self.batch_size, replace=False)
return dict(obs=self.obs_buf[idxs],
next_obs=self.next_obs_buf[idxs],
acts=self.acts_buf[idxs],
rews=self.rews_buf[idxs],
done=self.done_buf[idxs])
def __len__(self) -> int:
return self.size
# ## Noisy Layer
#
# **References:**
# - https://github.com/higgsfield/RL-Adventure/blob/master/5.noisy%20dqn.ipynb
# - https://github.com/Kaixhin/Rainbow/blob/master/model.py
class NoisyLinear(nn.Module):
"""Noisy linear module for NoisyNet.
Attributes:
in_features (int): input size of linear module
out_features (int): output size of linear module
std_init (float): initial std value
weight_mu (nn.Parameter): mean value weight parameter
weight_sigma (nn.Parameter): std value weight parameter
bias_mu (nn.Parameter): mean value bias parameter
bias_sigma (nn.Parameter): std value bias parameter
"""
def __init__(self, in_features: int, out_features: int, std_init: float = 0.5):
"""Initialization."""
super(NoisyLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.std_init = std_init
self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features))
self.weight_sigma = nn.Parameter(
torch.Tensor(out_features, in_features)
)
self.register_buffer(
"weight_epsilon", torch.Tensor(out_features, in_features)
)
self.bias_mu = nn.Parameter(torch.Tensor(out_features))
self.bias_sigma = nn.Parameter(torch.Tensor(out_features))
self.register_buffer("bias_epsilon", torch.Tensor(out_features))
self.reset_parameters()
self.reset_noise()
def reset_parameters(self):
"""Reset trainable network parameters (factorized gaussian noise)."""
mu_range = 1 / math.sqrt(self.in_features)
self.weight_mu.data.uniform_(-mu_range, mu_range)
self.weight_sigma.data.fill_(
self.std_init / math.sqrt(self.in_features)
)
self.bias_mu.data.uniform_(-mu_range, mu_range)
self.bias_sigma.data.fill_(
self.std_init / math.sqrt(self.out_features)
)
def reset_noise(self):
"""Make new noise."""
epsilon_in = self.scale_noise(self.in_features)
epsilon_out = self.scale_noise(self.out_features)
# outer product
self.weight_epsilon.copy_(epsilon_out.ger(epsilon_in))
self.bias_epsilon.copy_(epsilon_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward method implementation.
We don't use separate statements on train / eval mode.
It doesn't show remarkable difference of performance.
"""
return F.linear(
x,
self.weight_mu + self.weight_sigma * self.weight_epsilon,
self.bias_mu + self.bias_sigma * self.bias_epsilon,
)
@staticmethod
def scale_noise(size: int) -> torch.Tensor:
"""Set scale to make noise (factorized gaussian noise)."""
x = torch.FloatTensor(np.random.normal(loc=0.0, scale=1.0, size=size))
return x.sign().mul(x.abs().sqrt())
# ## Noisy Network
#
# We use NoisyLinear for the last two FC layers, and there is a method to reset noise at every step.
# These are the only differences from the example of *01.dqn.ipynb*.
class Network(nn.Module):
def __init__(self, in_dim: int, out_dim: int):
"""Initialization."""
super(Network, self).__init__()
self.feature = nn.Linear(in_dim, 128)
self.noisy_layer1 = NoisyLinear(128, 128)
self.noisy_layer2 = NoisyLinear(128, out_dim)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward method implementation."""
feature = F.relu(self.feature(x))
hidden = F.relu(self.noisy_layer1(feature))
out = self.noisy_layer2(hidden)
return out
def reset_noise(self):
"""Reset all noisy layers."""
self.noisy_layer1.reset_noise()
self.noisy_layer2.reset_noise()
# ## DQN + NoisyNet Agent (w/o DuelingNet)
#
# Here is a summary of DQNAgent class.
#
# | Method | Note |
# | --- | --- |
# |select_action | select an action from the input state. |
# |step | take an action and return the response of the env. |
# |compute_dqn_loss | return dqn loss. |
# |update_model | update the model by gradient descent. |
# |target_hard_update| hard update from the local model to the target model.|
# |train | train the agent during num_frames. |
# |test | test the agent (1 episode). |
# |plot | plot the training progresses. |
#
# In the paper, NoisyNet is used as a component of the Dueling Network Architecture, which includes Double-DQN and Prioritized Experience Replay. However, we don't implement them to simplify the tutorial. One thing to note is that NoisyNet is an alternertive to $\epsilon$-greedy method, so all $\epsilon$ related lines are removed. Please check all comments with *NoisyNet*.
class DQNAgent:
"""DQN Agent interacting with environment.
Attribute:
env (gym.Env): openAI Gym environment
memory (ReplayBuffer): replay memory to store transitions
batch_size (int): batch size for sampling
target_update (int): period for target model's hard update
gamma (float): discount factor
dqn (Network): model to train and select actions
dqn_target (Network): target model to update
optimizer (torch.optim): optimizer for training dqn
transition (list): transition information including
state, action, reward, next_state, done
"""
def __init__(
self,
env: gym.Env,
memory_size: int,
batch_size: int,
target_update: int,
gamma: float = 0.99,
):
"""Initialization.
Args:
env (gym.Env): openAI Gym environment
memory_size (int): length of memory
batch_size (int): batch size for sampling
target_update (int): period for target model's hard update
gamma (float): discount factor
"""
# NoisyNet: All attributes related to epsilon are removed
obs_dim = env.observation_space.shape[0]
action_dim = env.action_space.n
self.env = env
self.memory = ReplayBuffer(obs_dim, memory_size, batch_size)
self.batch_size = batch_size
self.target_update = target_update
self.gamma = gamma
# device: cpu / gpu
self.device = torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
print(self.device)
# networks: dqn, dqn_target
self.dqn = Network(obs_dim, action_dim).to(self.device)
self.dqn_target = Network(obs_dim, action_dim).to(self.device)
self.dqn_target.load_state_dict(self.dqn.state_dict())
self.dqn_target.eval()
# optimizer
self.optimizer = optim.Adam(self.dqn.parameters())
# transition to store in memory
self.transition = list()
# mode: train / test
self.is_test = False
def select_action(self, state: np.ndarray) -> np.ndarray:
"""Select an action from the input state."""
# NoisyNet: no epsilon greedy action selection
selected_action = self.dqn(
torch.FloatTensor(state).to(self.device)
).argmax()
selected_action = selected_action.detach().cpu().numpy()
if not self.is_test:
self.transition = [state, selected_action]
return selected_action
def step(self, action: np.ndarray) -> Tuple[np.ndarray, np.float64, bool]:
"""Take an action and return the response of the env."""
next_state, reward, done, _ = self.env.step(action)
if not self.is_test:
self.transition += [reward, next_state, done]
self.memory.store(*self.transition)
return next_state, reward, done
def update_model(self) -> torch.Tensor:
"""Update the model by gradient descent."""
samples = self.memory.sample_batch()
loss = self._compute_dqn_loss(samples)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# NoisyNet: reset noise
self.dqn.reset_noise()
self.dqn_target.reset_noise()
return loss.item()
def train(self, num_frames: int, plotting_interval: int = 200):
"""Train the agent."""
self.is_test = False
state = self.env.reset()
update_cnt = 0
losses = []
scores = []
score = 0
for frame_idx in range(1, num_frames + 1):
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
# NoisyNet: removed decrease of epsilon
# if episode ends
if done:
state = env.reset()
scores.append(score)
score = 0
# if training is ready
if len(self.memory) >= self.batch_size:
loss = self.update_model()
losses.append(loss)
update_cnt += 1
# if hard update is needed
if update_cnt % self.target_update == 0:
self._target_hard_update()
# plotting
if frame_idx % plotting_interval == 0:
self._plot(frame_idx, scores, losses)
self.env.close()
def test(self) -> List[np.ndarray]:
"""Test the agent."""
self.is_test = True
state = self.env.reset()
done = False
score = 0
frames = []
while not done:
frames.append(self.env.render(mode="rgb_array"))
action = self.select_action(state)
next_state, reward, done = self.step(action)
state = next_state
score += reward
print("score: ", score)
self.env.close()
return frames
def _compute_dqn_loss(self, samples: Dict[str, np.ndarray]) -> torch.Tensor:
"""Return dqn loss."""
device = self.device # for shortening the following lines
state = torch.FloatTensor(samples["obs"]).to(device)
next_state = torch.FloatTensor(samples["next_obs"]).to(device)
action = torch.LongTensor(samples["acts"].reshape(-1, 1)).to(device)
reward = torch.FloatTensor(samples["rews"].reshape(-1, 1)).to(device)
done = torch.FloatTensor(samples["done"].reshape(-1, 1)).to(device)
# G_t = r + gamma * v(s_{t+1}) if state != Terminal
# = r otherwise
curr_q_value = self.dqn(state).gather(1, action)
next_q_value = self.dqn_target(next_state).max(
dim=1, keepdim=True
)[0].detach()
mask = 1 - done
target = (reward + self.gamma * next_q_value * mask).to(self.device)
# calculate dqn loss
loss = F.smooth_l1_loss(curr_q_value, target)
return loss
def _target_hard_update(self):
"""Hard update: target <- local."""
self.dqn_target.load_state_dict(self.dqn.state_dict())
def _plot(
self,
frame_idx: int,
scores: List[float],
losses: List[float],
):
"""Plot the training progresses."""
clear_output(True)
plt.figure(figsize=(20, 5))
plt.subplot(131)
plt.title('frame %s. score: %s' % (frame_idx, np.mean(scores[-10:])))
plt.plot(scores)
plt.subplot(132)
plt.title('loss')
plt.plot(losses)
plt.show()
# ## Environment
#
# You can see the [code](https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py) and [configurations](https://github.com/openai/gym/blob/master/gym/envs/__init__.py#L53) of CartPole-v0 from OpenAI's repository.
# environment
env_id = "CartPole-v0"
env = gym.make(env_id)
# ## Set random seed
# +
seed = 777
def seed_torch(seed):
torch.manual_seed(seed)
if torch.backends.cudnn.enabled:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
seed_torch(seed)
env.seed(seed)
# -
# ## Initialize
# +
# parameters
num_frames = 10000
memory_size = 1000
batch_size = 32
target_update = 100
# train
agent = DQNAgent(env, memory_size, batch_size, target_update)
# -
# ## Train
agent.train(num_frames)
# ## Test
#
# Run the trained agent (1 episode).
frames = agent.test()
# ## Render
# +
# Imports specifically so we can render outputs in Colab.
from matplotlib import animation
from JSAnimation.IPython_display import display_animation
from IPython.display import display
def display_frames_as_gif(frames):
"""Displays a list of frames as a gif, with controls."""
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(
plt.gcf(), animate, frames = len(frames), interval=50
)
display(display_animation(anim, default_mode='loop'))
# display
display_frames_as_gif(frames)
| 05.noisy_net.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Decomposição em Valores Singulares<br><br> (Singular Value Decomposition - SVD)</font>
#
# ## Aplicações
#
# ### Material produzido por <NAME>
#
# ICMC-USP
#
# ----
# ## Conteúdo
# - Aplicações de SVD
# - Compressão de Informação
# - Semântica latente de coleção de documentos
# - Sistemas de recomendação
# ---
# # <font color='blue'>SVD e Compressão de Informação</font>
# A propriedade de aproximação de posto reduzido da decomposição SVD fornece um mecanismo de compressão de informação que pode ser utilizado em diversos cenários, incluindo compressão de imagens.
#
# Considere a imagem <font font-family='Verdana'> florbw.jpeg</font> disponível no repositório. Vamos comprimir a imagem utilizando SVD.
from PIL import Image # pacote python para tratamento de imagens
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# carregando a imagem e convertendo para tons de cinza + alpha
im = Image.open("florbw.jpeg").convert('LA')
# transformando a imagem em uma matriz
X = np.asarray(im)[:,:,0]
print(X.shape)
plt.axis('off')
plt.imshow(X,cmap='gray')
# -
print(X[100:110,100:110])
# calculando SVD
U,S,Vt = np.linalg.svd(X)
# +
k = 20
# Sk = np.diag(S[0:k])
# SkVt = np.dot(Sk,Vt[0:k,:])
# Xk = np.dot(U[:,0:k],SkVt)
Xk = np.linalg.multi_dot([U[:,0:k],np.diag(S[0:k]),Vt[0:k,:]])
plt.rcParams['figure.figsize'] = [12, 6]
f,(ax1,ax2) = plt.subplots(1,2)
ax1.axis('off')
ax1.imshow(X,cmap='gray')
ax2.axis('off')
ax2.imshow(Xk,cmap='gray')
# +
# Calculando a porcentagem de compressão
#bits_original = X.shape[0]*X.shape[1]*16
bits_original = X.itemsize*X.shape[0]*X.shape[1]
print('Numero de bites original: ',bits_original)
bits_comprimido = X.itemsize*k*(X.shape[0]+X.shape[1])
print('Numero de bites comprimido: ',bits_comprimido)
print('Porcentagem de compressão: ', 100-bits_comprimido*100/bits_original)
# -
# # <font color='blue'>Semântica Latente de Coleção de Documentos - (Latent Semantic Index - LSI)</font>
# Suponha que a matriz $\mathbf{A}$ corresponda a uma matriz _Documentos $\times$ Palavras_.
#
# **Referência**: [<NAME>, Latent Semantic Analysis](https://www.engr.uvic.ca/~seng474/svd.pdf)
# Vamos interpretar a matrix _Palavras $\times$ Documentos_ de modo que os documentos estejam nas colunas e as palavras nas linhas da matriz, ou seja:
#
# $$
# \underbrace{\left.\begin{array}{c}
# \begin{bmatrix}
# & & & \\
# & & & \\
# & & & \\
# & & &
# \end{bmatrix}
# \end{array}\right\}}_{documentos}\mbox{palavras}
# $$
# ## Extração de Tópicos (Latent Semantic Index - LSI)
# A ideia de extração de tópicos via SVD é utilizar SVD compacto (apenas $k$ autovetores e autovalores serão calculados - $\mathbf{U}_k\mathbf{\Sigma}_k\mathbf{V}_k$) para identificar quais palavras são mais representativas para cada documento.
#
# O fato importante é que as colunas de $\mathbf{\Sigma}_k\mathbf{V}_k^\top$ (linhas de $\mathbf{V}_k$) são os coeficientes utilizados para aproximar as colunas da matriz _Palavras $\times$ Documentos_ como combinação linear das colunas de $\mathbf{U}_k$. Desta forma, as colunas de $\mathbf{\Sigma}_k\mathbf{V}_k^\top$ fornecem as “coordenadas” de cada documento na base $\mathbf{U}_k$. Intuitivamente, documentos semelhantes devem possuir coordenadas semelhantes o que permite uma análise semântica dos documentos. Ou seja, as colunas de $\mathbf{\Sigma}_k\mathbf{V}_k^\top$ permitem encontrar documentos semelhantes.
#
# Outra observação importante é que as linhas de $\mathbf{U}_k$ contém a informação/relação entre termos, ou seja, as linhas de $\mathbf{U}_k$ refletem a “importância” das palavras em um determinado "tópico", os quais caracterizam os grupos de documentos. Assim, as colunas de $\mathbf{U}_k$ é o espaço de tópicos.
# Considere a relação:
#
# $$
# \mathbf{A}^\top \approx \mathbf{V}_k\mathbf{\Sigma}_k\mathbf{U}_k^\top
# \Longrightarrow \mathbf{V}_k \approx \mathbf{A}^\top\mathbf{U}_k\mathbf{\Sigma}_k^{-1}
# $$
#
# $\mathbf{A}^\top\mathbf{U}_k$ é a projeção de documentos no espaço de "tópicos". Se considerarmos um único documento $\mathbf{a}$, temos que
#
# $$
# \hat{\mathbf{a}}=\mathbf{a}^\top\mathbf{U}_k\mathbf{\Sigma}_k^{-1}
# $$
#
# Comparando $\hat{\mathbf{a}}$ com as linhas de $\mathbf{V}_k$ podemos recuperar os documentos mais similares à $\mathbf{a}$.
# ## Semântica Latente com Python
#
# Para exemplificar o uso de LSI utilizaremos a base de dados disponível no arquivo <font font-family='Verdana'> bag_of_words.csv </font>.
# +
import pandas as pd
import numpy as np
# a BoW bag_of_words_c2.csv foi extraida de uma colecao de documentos
# contendo mensagens sobre dois assuntos 'hockey' e 'vendas'
bow = pd.read_csv('../Datasets/bag_of_words_c2.csv')
bow.set_index('Unnamed: 0',inplace=True)
# +
# transladando o corpus original para fique no forma Palavras X Documentos
bow = bow.T
print(bow.head())
# +
# Aplicando o SVD truncado para realizar a análise semântica latente
from sklearn.decomposition import TruncatedSVD
A = bow.values
r = 5
tsvd = TruncatedSVD(n_components=r)
U = tsvd.fit_transform(A)
Sigma = tsvd.explained_variance_ratio_
Vt = tsvd.components_
# -
print(A.shape)
print(U.shape)
print(Vt.shape)
# $$
# \begin{bmatrix}
# & | & \\
# \cdots & A_i & \cdots \\
# & | &
# \end{bmatrix}\approx
# \begin{bmatrix}
# | & | & \\
# U_1 & U_2 & \cdots \\
# | & | &
# \end{bmatrix}\Sigma_k
# \begin{bmatrix}
# & | & \\
# \cdots & V^\top_i & \cdots \\
# & | &
# \end{bmatrix}=
# \sqrt{\lambda_1}V^\top_{1i}
# \begin{bmatrix}
# | \\
# U_1 \\
# |
# \end{bmatrix}+
# \sqrt{\lambda_2}V^\top_{2i}
# \begin{bmatrix}
# | \\
# U_2 \\
# |
# \end{bmatrix}+\cdots
# $$
# +
# clusterizando colunas de Vt para encontrar documentos semelhantes
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
kmeans = KMeans(n_clusters=15, random_state=1).fit(Vt.T)
sim_docs_km = kmeans.labels_
#print(sim_docs_km)
# agg = AgglomerativeClustering(linkage='single', n_clusters=5).fit(Vt.T)
# sim_docs_agg = agg.labels_
docs = bow.columns.values
for i in np.unique(sim_docs_km):
docs_ids = np.argwhere(sim_docs_km == i)
print(i,'-->',docs[docs_ids])
# +
# Entendendo os topicos clusterizando as colunas de U
from sklearn.cluster import KMeans
kmeans_U = KMeans(n_clusters=2, random_state=1).fit(U.T)
topics = kmeans_U.labels_
words = bow.index.values
for i in np.unique(topics):
cid = np.argwhere(topics == i).flatten()
print(i,cid)
T = np.argsort(np.sum(U[:,cid], axis=1))
print(words[T[-10:]])
# -
# ## <font color='blue'> Sistema de Recomendação </font>
#
# **Referência**:
# - [Matrix Factorization for Recomendation Systems](https://www.asc.ohio-state.edu/statistics/dmsl/Koren_2009.pdf)
# - [SINGULAR VALUE DECOMPOSITION INRECOMMENDER SYSTEMS](https://repository.tcu.edu/bitstream/handle/116099117/11320/Nguyen__Anh-Honors_Project.pdf?sequence=1)
# O problema de recomendação consiste em encontrar o produto mais provável de compra por um consumidor. Em geral, tais sistemas tomam como base uma matriz onde as linhas são os consumidores e as colunas são as avaliações feitas pelos consumidores para cada produto, chamada de matriz **consumidor-produto**. Por exemplo:
#
# $$
# \begin{matrix}
# & prod 1 & prod 2 & prod 3 & prod 4 & prod 5 & prod 6 & prod 7\\\hline
# \mbox{Joao} & & 1 & & & 5 & & 4\\
# \mbox{Mari} & & & & 2 & 4 & & \\
# \mbox{Jose} & & 5 & & 1 & & 1 & 3\\
# \mbox{Carl} & 2 & 5 & & & & 4 & 3\\
# \mbox{Nabi} & & & & 3 & 5 & 1 & \\
# \mbox{Luci} & 3 & 4 & & 3 & & & 1\\
# \end{matrix}
# $$
#
# O processo de recomendação consiste em 3 etapas:
# 1. preencher os dados faltantes na matriz consumidor-produto
# 2. calcular a decomposição SVD da matriz preenchida
# 3. considerar uma aproximação de posto $k$ na decomposição SVD
# 4. calcular o coeficiente $c_p$ que indica qual a importancia dos produtos para cada consumidor.
#
# **Preenchimento da matriz**<br>
# Existem muitas estratégias diferentes para o preenchimento de dados faltantes, como por exemplo preenchimento com zeros ou com a média das avaliações de cada produto.
#
# **Coeficiente $c_p$** <br>
# Dada a matriz $\mathbf{U}_k$ ($k$-colunas), $\Sigma_k$ e $\mathbf{V}_k$ obitidas do
# SVD com aproximação de posto $k$, o coeficiente $r(i,j)$ do comsumidor $i$ com relação ao produto $j$ é calculado como:
#
# $$
# \mathbf{C} = \mathbf{U}_k\Sigma_k^{1/2}\\
# \mathbf{P} = \Sigma_k^{1/2}\mathbf{V}^{\top}_k\\ \\
# r(i,j) = <C[i],P[:,j]>
# $$
#
# quanto maior o valor de $r(i,j)$, mais importante é o produto $j$ para o consumidor $i$.
#
# O raciocínio do método acima é que as **linhas** de $\mathbf{C}$ geram uma representação latente (representação abstrata) onde os consumidores são mapeados. Da mesma forma, as colunas de $\mathbf{P}$ são uma representação latente para os produtos.
#
# O coeficiente $r(i,j)$ mede o quão correlacionado estão o consumidor $i$ com o produto $j$. Se um consumidor $i$ tem alta correlação com o produto $j$ mas nunca adquiriu tal produto, $j$ é recomendado para o consumidor $i$.
# +
CxP = np.array([[0, 1, 0, 0, 5, 0, 4],
[0, 0, 0, 2, 4, 0, 0],
[0, 5, 0, 1, 0, 1, 3],
[2, 5, 0, 0, 0, 4, 3],
[0, 0, 0, 3, 5, 1, 0],
[3, 4, 0, 3, 0, 0, 1]]).astype(float)
consumidores = ['Joao','Mari','Jose','Carl','Nabi','Luci']
# meanc = np.mean(CxPo,axis=0)
# zeros = np.where(CxPo==0)
# CxP = CxP.copy()
# CxP[zeros] = np.take(meanc, zeros[1])
# +
U,S,Vt = np.linalg.svd(CxP)
# calculando as representacoes latentes de consumidores C e produtos P
k = 3
C = np.dot(U[:,:k],np.diag(np.sqrt(S[:k])))
P = np.dot(np.diag(np.sqrt(S[:k])),Vt[:k,:])
# encontrado o produto mais provável para um cosumidor C[i]
i = int(np.random.randint(0,CxP.shape[0],1))
#i=4
rij = np.dot(C[i,:],P).ravel() # relevancia dos produtos para o consumidor i
sij = np.argsort(rij)[::-1]
print('produtos mais importantes para o consumidor',i,'\n',sij)
print('avaliacao dos pelo consumidor',i,'\n',CxP[i])
# +
# validando o examplo: vamos encontrar o 2 consumidores mais pareceidos com i
cij = np.dot(C[i,:]/np.linalg.norm(C[i,:]),C.T/np.linalg.norm(C.T,axis=0))
cmm = np.argsort(cij)[::-1].ravel()
# print(cmm)
# print(np.dot(C[i,:]/np.linalg.norm(C[i,:]),C.T[:,1]/np.linalg.norm(C.T[:,1],axis=0)))
# print(CxPo[1])
print('produtos recomendados ao',consumidores[i],sij,'\n')
print(4*' ',np.arange(7))
print(10*'--')
print(consumidores[i],
CxP[i].ravel().astype(int)) # consumidor escolhido
for j in cmm:
if j != i:
print(consumidores[j],CxP[j].astype(int))
| 2021/USP/linear-algebra/svd aplicacoes (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from mido import MidiFile, MidiTrack, Message
import numpy as np
import mido
import tensorflow as tf
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
# %matplotlib inline
def prepData(mid):
notes = []
time = float(0)
prev = float(0)
j = 1
noteValue = []
velValue = []
tstamp = []
onoff = []
for i,msg in enumerate(mid):
### this time is in seconds, not ticks
#print(j, msg)
fmsg = msg
time += msg.time
# print (i, msg, time)
if not msg.is_meta:
### only interested in piano channel
if msg.channel > -1:
if msg.type == 'note_on' or msg.type == 'note_off':
# note in vector form to train on
ns = 0
if msg.type == 'note_on':
ns = 1
else:
ns = 0
note = msg.bytes()
#print(j,note, time)
#print(fmsg)
# only interested in the note and velocity. note message is in the form of [type, note, velocity]
#print ('stage 1 ' + str(j), time - prev, note)
note = note[1:3]
#print('stage 2 ' + str(j), time - prev, note)
note.append(time-prev)
#print('stage 3 ' + str(j), time - prev, note)
prev = time
notes.append(note)
# print(j, note[0], note[1], time, fmsg)
j = j + 1
noteValue.append(note[0])
velValue.append(note[1])
tstamp.append(time)
onoff.append(ns)
# print(notes)
return noteValue, velValue, tstamp, onoff
# print(noteValue, velValue, tstamp)
def encoderNote(note, velocity, time, status):
encodedNotes = []
lastGreen = 0
for i, t in enumerate(time):
nnote = []
if i == 0:
#something - this is note starting point
if status[i] == 1:
#find where this note ends
for j, n in enumerate(note):
if j > i:
if status[j] == 0 or velocity[j] == 0:
if note[i] == note[j]:
#this note ends here
encodedNotes.append([note[i],velocity[i], time[i], time[j]])
break
else:
#something - this is for general calculation
if status[i] == 1:
#find where this note ends
for j, n in enumerate(note):
if j > i:
if status[j] == 0 or velocity[j] == 0:
if note[i] == note[j]:
#this note ends here
encodedNotes.append([note[i],velocity[i], time[i] - lastGreen, time[j] - time[i]])
lastGreen = time[i]
break
return encodedNotes
from sklearn.preprocessing import MinMaxScaler
def divideED(encodedNotes):
note = []
velocity = []
tstamp = []
span = []
scaler = MinMaxScaler()
scaler.fit(encodedNotes)
encodedNotes = scaler.transform(encodedNotes)
for item in encodedNotes:
note.append(item[0])
velocity.append(item[1])
tstamp.append(item[2])
span.append(item[3])
# return note, velocity, tstamp, span, scaler
return encodedNotes, scaler
def combineDD(note, scale):
decode = []
# for i, item in enumerate(note):
# decode.append([note[i], velocity[i], tstamp[i], span[i]])
decode = scale.inverse_transform(note)
return decode
# +
def decoderNote(note):
#populate only live notes
liveNotes = []
tCounter = 0
for i, n in enumerate(note):
if i == 0:
liveNotes.append([n[0], n[1], n[2]])
tCounter = n[2]
else:
liveNotes.append([n[0], n[1], tCounter + n[2]])
tCounter = n[2] + tCounter
# print ('livenotes: ', liveNotes)
#insert dead notes
deadNotes = []
for item in liveNotes:
deadNotes.append(item)
# print(len(note), len(liveNotes))
for i, ln in enumerate(liveNotes):
if i < len(note):
start = ln[2]
span = note[i][3]
end = start + span
# print('for this livenote', i, liveNotes[i][0], liveNotes[i][1],liveNotes[i][2], start, span, end)
for j, dn in enumerate(deadNotes):
if j < len(deadNotes) - 1:
if end >= dn[2] and end <= deadNotes[j+1][2]:
deadNotes.insert(j + 1, [ln[0],0,end])
# print('dead note for i is at ', j+1, ln[0], 0, end)
break
elif j == len(deadNotes) - 1:
if end >= dn[2]:
deadNotes.insert(j + 1, [ln[0],0,end])
# print('dead note for i is at ', j+1, ln[0], 0, end)
break
return deadNotes
# -
#create midi file with the decoded output
def createMusic(decodedNotes, name, ins):
mid = MidiFile()
track = MidiTrack()
mid.tracks.append(track)
notes = []
ch = 0
if ins == 'drum':
ch = 9
elif ins == 'piano':
ch = 0
for i, note in enumerate(decodedNotes):
if i > 0:
notes.append([decodedNotes[i][0], decodedNotes[i][1], decodedNotes[i][2] - decodedNotes[i-1][2]])
else:
notes.append(note)
# print(notes)
for i, note in enumerate(notes):
note = np.insert(note, 0, 147)
bytes = note.astype(int)
msg = Message.from_bytes(bytes[0:3])
#time = int(note[3]/0.001025) # to rescale to midi's delta ticks. arbitrary value for now.
time = int(note[3]/0.001025)
msg.channel = ch
msg.time = time
# print (i, msg)
track.append(msg)
mid.save(name + '.mid')
def createRNNData_seq(notes, n_prev):
X = []
Y = []
# n_prev = 30
# n_prev notes to predict the (n_prev+1)th note
for i in range(len(notes)-n_prev):
x = notes[i:i+n_prev]
y = notes[i+1:i+n_prev+1]
X.append(x)
Y.append(y)
# save a seed to do prediction later
seed = notes[0:n_prev]
return X, Y
def deScale(prediction):
for pred in prediction:
# pred[0] = int(88*pred[0] + 24)
pred[0] = int(pred[0])
pred[1] = int(pred[1])
# to reject values that will be out of range
if pred[0] < 24:
pred[0] = 24
elif pred[0] > 102:
pred[0] = 102
if pred[1] < 0:
pred[1] = 0
elif pred[1] > 127:
pred[1] = 127
if pred[2] < 0:
pred[2] = 0
if pred[3] < 0:
pred[3] = 0
return prediction
def getDataFiles(dir_name, nSongs):
onlyfiles = [f for f in listdir('./train/') if isfile(join('./train/', f))]
# print(onlyfiles)
notes_lib = []
for i, filename in enumerate(onlyfiles):
if i < nSongs:
mid = MidiFile('./train/'+filename)
note, velocity, time, onoff = prepData(mid)
encodedNotes = encoderNote(note, velocity, time, onoff)
for item in encodedNotes:
notes_lib.append(item)
return notes_lib
training_data_set = getDataFiles('./train/', 50)
# print(training_data_set[0:50])
#create rationalized data set
def ratNotes(training_data_set):
rat_notes = []
notes = [0 for i in range(91)]
more2come = 0
for i, note in enumerate(training_data_set):
if i < len(training_data_set) - 1:
if training_data_set[i+1][2] > 0:
#do something
notes[note[0] - 24] = 1
notes[88] = note[1]
notes[89] = note[2]
notes[90] = note[3]
more2come = 0
else:
#add to the current notes
if note[2] > 0 and note[1] == training_data_set[i+1][1]:
notes[note[0] - 24] = 1
notes[88] = note[1]
notes[89] = note[2]
notes[90] = note[3]
more2come = 1
else:
if training_data_set[i+1][2] == 0 and training_data_set[i][2] == 0 and training_data_set[i+1][1] == training_data_set[i][1]:
if training_data_set[i+1][3] == training_data_set[i][3]:
notes[note[0] - 24] = 1
notes[88] = note[1]
notes[89] = note[2]
notes[90] = note[3]
more2come = 1
else:
notes[note[0] - 24] = 1
notes[88] = note[1]
notes[89] = note[2]
notes[90] = note[3]
more2come = 0
else:
notes[note[0] - 24] = 1
notes[88] = note[1]
notes[89] = note[2]
notes[90] = note[3]
more2come = 0
if more2come == 0:
rat_notes.append(notes)
notes = [0 for i in range(91)]
more2come = 0
else:
pass
return rat_notes
# for i, note in enumerate(rat_notes):
# if i < 10:
# print(note)
# +
#de_rat notes
# rat_notes = ratNotes(training_data_set)
# print(rat_notes[0:5])
def unratNotes(rat_notes):
unrat_notes = []
for note in rat_notes:
for i, item in enumerate(note):
if i < 88:
if item > 0:
newNote = i + 24
unrat_notes.append([newNote, int(note[88]), note[89], note[90]])
return unrat_notes
# print(unrat_notes[0:10])
# -
scaled_notes, scaler = divideED(ratNotes(training_data_set))
# print(scaled_notes[1:5])
# +
tf.reset_default_graph()
# Just one feature, the time series
num_inputs = 91
# 100 neuron layer, play with this
num_neurons = 512
# Just one output, predicted time series
num_outputs = 91
# learning rate, 0.0001 default, but you can play with this
learning_rate = 0.0005
# how many iterations to go through (training steps), you can play with this
num_train_iterations = 300
# Size of the batch of data
batch_size = 1
# Num of steps in batch (also used for prediction steps into the future)
num_time_steps = 3
n_layers = 8
n_neurons = num_neurons
# -
X_batch, y_batch = createRNNData_seq(scaled_notes, num_time_steps)
# print(X_batch[0], y_batch[0])
# +
X = tf.placeholder(tf.float32, [None, num_time_steps, num_inputs])
y = tf.placeholder(tf.float32, [None, num_time_steps, num_outputs])
# cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.BasicLSTMCell(num_units=num_neurons)])
# cell = tf.contrib.rnn.OutputProjectionWrapper(cell, output_size=num_outputs)
# cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.LSTMCell(num_units=num_neurons) for i in range(n_layers)])
#last used
cell = tf.contrib.rnn.MultiRNNCell([tf.contrib.rnn.LSTMCell(num_units=num_neurons), tf.contrib.rnn.LSTMCell(num_units=num_neurons)])
cell = tf.contrib.rnn.OutputProjectionWrapper(cell, output_size=num_outputs)
outputs, states = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)
loss = tf.reduce_mean(tf.square(outputs - y)) # MSE
# loss = tf.reduce_mean(tf.abs(outputs - y)) # MSE
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# +
# y_pred = []
# music = []
# seed_in = []
# with tf.Session() as sess:
# sess.run(init)
# for iteration in range(num_train_iterations):
# # X_batch, y_batch = ts_data.next_batch(batch_size, num_time_steps)
# sess.run(train, feed_dict={X: X_batch, y: y_batch})
# if iteration % 10 == 0:
# mse = loss.eval(feed_dict={X: X_batch, y: y_batch})
# print(iteration, "\tMSE:", mse)
# # Save Model for Later
# saver.save(sess, "./lstm_new")
# seed = np.array(X_batch[100]).reshape(1, num_time_steps, 91)
# for iteration in range(5):
# y_out = sess.run(outputs, feed_dict={X: seed})
# y_pred.append(y_out)
# seed_in.append(seed)
# print('input ', seed)
# print('pred ', y_out)
# music.append(y_out)
# seed = y_out
# +
# # #Generating New Sequences from saved model
y_pred = []
music = []
seed_in = []
full_notes = []
new_notes = []
with tf.Session() as sess:
saver.restore(sess, "./lstm_new")
seed = np.array(X_batch[1200]).reshape(1, num_time_steps, 91)
music.append(seed)
for item in seed:
for note in item:
new_notes.append(note)
# seed = np.array(X_batch).reshape(1, num_time_steps, 4)
for iteration in range(5):
y_out = sess.run(outputs, feed_dict={X: seed})
new_note = y_out[0][num_time_steps - 1]
y_pred.append(y_out)
seed_in.append(seed)
# print('input ', seed)
# print('pred ', y_out)
music.append(new_note)
new_notes.append(new_note)
full_notes.append(new_note)
if len(new_notes) > 3:
lt = len(new_notes)
seed = [[new_notes[lt-3],new_notes[lt-2],new_notes[lt-1]]]
else:
seed = [[seed[0][1],seed[0][2],new_note]]
# -
# seed = np.array(X_batch[10]).reshape(1, num_time_steps, 4)
# print(seed)
# print(y_out)
music = []
for item in full_notes:
# print(item)
music.append(item)
# print(note)
# print(music)
# print(y_pred)
# print(seed_in)
decode = combineDD(music, scaler)
# print(decode)
# +
# print(decode)
clubthis = [x for x in decode]
# print(clubthis)
for item in clubthis:
for j, code in enumerate(item):
# print(j, code)
if j < 88:
if code < 0.1:
# print(item[j])
item[j] = 0
else:
item[j] = 1
# print(clubthis)
# -
muzik = unratNotes(decode)
# print(muzik)
cleanseNotes = deScale(muzik)
# print(cleanseNotes[0:10])
#basic cleansing
#correct time elements through clustering
# Fitting K-Means to the dataset
from sklearn.cluster import KMeans
#data set for clustering
X = [[x[2], x[3]] for x in training_data_set]
kmeans = KMeans(n_clusters = 50, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(X)
centroid = kmeans.cluster_centers_
# +
#align with clusters
for item in cleanseNotes:
x_sample = [[item[2], item[3]]]
y_pred_c = kmeans.predict(x_sample)
cluster = y_pred_c[0]
item[2] = centroid[cluster][0]
item[3] = centroid[cluster][1]
#NORMALIZE
for item in cleanseNotes:
item[0] = int(item[0])
item[1] = int(item[1])
if item[2] < 0.1:
item[2] = 0
# print(cleanseNotes[0:10])
# -
decoded = decoderNote(cleanseNotes)
# print(decoded[0:20])
createMusic(decoded, 'piano_2_adj', 'piano')
createMusic(decoded, 'drum_2_adj', 'drum')
# createMusicF(decoded, 'genesis_join_1', 'drum')
# createMusic(decoded, 'genesis_drum_something_5adj', 'drum')
# Visualising the clusters
import matplotlib.pyplot as plt
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:, 1], s = 300, c = 'yellow', label = 'Centroids')
plt.title('Time of Event')
plt.xlabel('g2g')
plt.ylabel('g2r')
plt.legend()
plt.show()
# ## Author
#
# <NAME>
# - https://www.linkedin.com/in/mohneesh-saxena-33243616
# - https://twitter.com/mohneesh.saxena
# - https://github.com/mohneesh-saxena/Music-Composer-AI
#
# ## License
#
# This project is free to use according to the [MIT License]
| Model/models/MusicComposerAI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext nb_black
# +
import warnings
from itertools import product
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from graspy.plot import heatmap
from graspy.simulations import er_np, sbm
from graspy.utils import symmetrize
from joblib import Parallel, delayed
from scipy.stats import ttest_ind, wilcoxon, mannwhitneyu, truncnorm
warnings.filterwarnings("ignore")
# %matplotlib inline
# +
def generate_pop(m, mean_1, mean_2, var_1, var_2, block_1=5, block_2=15):
pop_1 = []
pop_2 = []
for _ in range(m):
# seeds are needed for joblib and scipy random functions
# numpy random is not affected by joblib
seeds = np.random.randint(0, 2147483647, size=4)
n = [block_1, block_2]
p = [[1, 1], [1, 1]]
sd_1 = np.sqrt(var_1)
sd_2 = np.sqrt(var_2)
# flip sign of mean_2 at random
# if np.random.binomial(1, 0.5):
# mean_2 = -mean_2
# deal with clip values
a_1 = (-1 - mean_1) / sd_1
b_1 = (1 - mean_1) / sd_1
a_2 = (-1 - mean_2) / sd_2
b_2 = (1 - mean_2) / sd_2
wt_func = [[truncnorm.rvs, truncnorm.rvs], [truncnorm.rvs, truncnorm.rvs]]
wt_args_1 = dict(a=a_1, b=b_1, loc=mean_1, scale=sd_1, random_state=seeds[0])
wt_args_2 = [
[
dict(a=a_2, b=b_2, loc=mean_2, scale=sd_1, random_state=seeds[1]),
dict(a=a_1, b=b_1, loc=mean_1, scale=sd_1, random_state=seeds[2]),
],
[
dict(a=a_1, b=b_1, loc=mean_1, scale=sd_1, random_state=seeds[2]),
dict(a=a_1, b=b_1, loc=mean_1, scale=sd_1, random_state=seeds[3]),
],
]
pop_1.append(
er_np(np.sum(n), 1.0, directed=False, wt=truncnorm.rvs, wtargs=wt_args_1)
)
pop_2.append(sbm(n, p, directed=False, wt=wt_func, wtargs=wt_args_2))
return np.array(pop_1), np.array(pop_2)
def compute_statistic(test, pop1, pop2):
if test.__name__ == "ttest_ind":
test_statistics, _ = ttest_ind(pop1, pop2, axis=0)
np.nan_to_num(test_statistics, copy=False)
else:
n = pop1.shape[-1]
test_statistics = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
x_ij = pop1[:, i, j]
y_ij = pop2[:, i, j]
if np.array_equal(x_ij, y_ij):
test_statistics[i, j] = 0
else:
tmp, pval = test(x_ij, y_ij)
test_statistics[i, j] = pval
test_statistics = symmetrize(test_statistics, method="triu")
return test_statistics
def compute_pr_at_k(different_n, k, test_statistics, test):
n = test_statistics.shape[0]
labels = np.zeros((n, n))
labels[0:different_n, 0:different_n] = 1
triu_idx = np.triu_indices_from(test_statistics, k=1)
test_statistics_ = np.abs(test_statistics[triu_idx])
labels_ = labels[triu_idx]
if test.__name__ == "ttest_ind":
idx = np.argsort(test_statistics_)[::-1]
else:
idx = np.argsort(test_statistics_)
sorted_labels = labels_[idx]
precision_at_k = sorted_labels[:k].mean()
recall_at_k = sorted_labels[:k].sum() / sorted_labels.sum()
return precision_at_k, recall_at_k
def compute_trustworthiness(pvals):
idx = np.triu_indices(pvals.shape[0], k=1)
res = pvals[idx]
fraction_correct = (res <= 0.05).mean()
all_correct = np.all(res <= 0.05)
return fraction_correct, all_correct
# -
def run_experiment(m, mean_1, mean_2, var_1, var_2, reps):
tests = ttest_ind, wilcoxon, mannwhitneyu
precisions = []
recalls = []
for i in range(reps):
tmp_precisions = []
tmp_recalls = []
pop1, pop2 = generate_pop(
m=m, mean_1=mean_1, mean_2=mean_2, var_1=var_1, var_2=var_2
)
for test in tests:
test_statistics = compute_statistic(test, pop1, pop2)
for k in range(1, 11):
precision, recall = compute_pr_at_k(5, k, test_statistics, test)
tmp_precisions.append(precision)
tmp_recalls.append(recall)
precisions.append(tmp_precisions)
recalls.append(tmp_recalls)
precisions = np.array(precisions).mean(axis=0)
recalls = np.array(recalls).mean(axis=0)
to_append = [mean_1, mean_2, m, *precisions, *recalls]
return to_append
# +
spacing = 50
delta = 0.05
mean_1 = 0
mean_2s = np.linspace(0, 1, spacing + 1)[1:]
var_1 = 1 / 3
var_2 = 1 / 3
ms = np.linspace(0, 500, spacing + 1).astype(int)[1:]
reps = 100
# +
args = [(m, mean_1, mean_2, var_1, var_2, reps) for m, mean_2 in product(ms, mean_2s)]
res = Parallel(n_jobs=-3, verbose=1)(delayed(run_experiment)(*arg) for arg in args)
# -
cols = [
"mean1",
"mean2",
"m",
*[
f"{test.__name__}_precision_at_{k}"
for test in [ttest_ind, wilcoxon, mannwhitneyu]
for k in range(1, 11)
],
*[
f"{test.__name__}_recall_at_{k}"
for test in [ttest_ind, wilcoxon, mannwhitneyu]
for k in range(1, 11)
],
]
res_df = pd.DataFrame(res, columns=cols)
res_df.to_csv("./results/20200204_change_means_results.csv", index=False)
# # Figures
res_df = pd.read_csv("./results/20200204_change_means_results.csv")
# +
size = np.sqrt(res_df.shape[0]).astype(int)
ttest_prec = np.flipud(res_df.ttest_ind_precision_at_10.values.reshape(-1, size))
wilcoxon_prec = np.flipud(res_df.wilcoxon_precision_at_10.values.reshape(-1, size))
mannwhitney_prec = np.flipud(
res_df.mannwhitneyu_precision_at_10.values.reshape(-1, size)
)
vmin = 0
vmax = 1
p = 0.5
spacing = 50
deltas = np.linspace(0, 1, spacing + 1)[::10]
deltas[0] += 0.02
ms = np.linspace(0, 500, spacing + 1)[::10] * 2
ms[0] += 20
fmt = lambda x: "{:.2f}".format(x)
ms = ["{:.0f}".format(m) for m in ms][::-1]
with sns.plotting_context("talk", font_scale=1.25):
# fig, ax = plt.subplots(figsize=(10, 10))
fig, ax = plt.subplots(
1,
4,
gridspec_kw={"width_ratios": [1, 1, 1, 0.05]},
figsize=(19, 6),
constrained_layout=True,
)
sns.heatmap(
ttest_prec,
ax=ax[0],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=deltas,
yticklabels=ms,
cbar_ax=ax[-1],
vmin=vmin,
vmax=vmax,
)
ax[0].set_xticks(np.arange(0, ax[0].get_xlim()[1] + 1, 10))
ax[0].set_yticks(np.arange(0, ax[0].get_ylim()[0] + 1, 10))
ax[0].set_title("T-Test Precision@10")
sns.heatmap(
wilcoxon_prec,
ax=ax[1],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=deltas,
cbar_ax=ax[-1],
vmin=vmin,
vmax=vmax,
)
ax[1].set_xticks(np.arange(0, ax[1].get_xlim()[1] + 1, 10))
ax[1].set_yticks(np.arange(0, ax[1].get_ylim()[0] + 1, 10)[::-1])
ax[1].yaxis.set_major_formatter(plt.NullFormatter())
ax[1].set_title("Wilcoxon Precision@10")
sns.heatmap(
mannwhitney_prec,
ax=ax[2],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=deltas,
cbar_ax=ax[-1],
vmin=vmin,
vmax=vmax,
)
ax[2].set_xticks(np.arange(0, ax[1].get_xlim()[1] + 1, 10))
ax[2].set_yticks(np.arange(0, ax[1].get_ylim()[0] + 1, 10)[::-1])
ax[2].yaxis.set_major_formatter(plt.NullFormatter())
ax[2].set_title("Mann-Whitney Precision@10")
fig.text(-0.01, 0.5, "Sample Size", va="center", rotation="vertical")
fig.text(0.5, -0.03, "Effect Size", va="center", ha="center")
fig.savefig(
"./figures/20200204_diff_means_precision.png", dpi=300, bbox_inches="tight"
)
fig.savefig(
"./figures/20200204_diff_means_precision.pdf", dpi=300, bbox_inches="tight"
)
# +
vmin = -0.2
vmax = 0.2
with sns.plotting_context("talk", font_scale=1.25):
# fig, ax = plt.subplots(figsize=(10, 10))
fig, ax = plt.subplots(
1,
4,
gridspec_kw={"width_ratios": [1, 1, 1, 0.05]},
figsize=(19, 6),
constrained_layout=True,
)
sns.heatmap(
ttest_prec - wilcoxon_prec,
ax=ax[0],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=deltas,
yticklabels=ms,
cbar_ax=ax[-1],
vmin=vmin,
vmax=vmax,
)
ax[0].set_xticks(np.arange(0, ax[0].get_xlim()[1] + 1, 10))
ax[0].set_yticks(np.arange(0, ax[0].get_ylim()[0] + 1, 10))
ax[0].set_title("T-Test - Wilcoxon")
sns.heatmap(
ttest_prec - mannwhitney_prec,
ax=ax[1],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=deltas,
cbar_ax=ax[-1],
vmin=vmin,
vmax=vmax,
)
ax[1].set_xticks(np.arange(0, ax[1].get_xlim()[1] + 1, 10))
ax[1].set_yticks(np.arange(0, ax[1].get_ylim()[0] + 1, 10)[::-1])
ax[1].yaxis.set_major_formatter(plt.NullFormatter())
ax[1].set_title("T-Test - <NAME>")
sns.heatmap(
mannwhitney_prec - wilcoxon_prec,
ax=ax[2],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=deltas,
cbar_ax=ax[-1],
vmin=vmin,
vmax=vmax,
)
ax[2].set_xticks(np.arange(0, ax[1].get_xlim()[1] + 1, 10))
ax[2].set_yticks(np.arange(0, ax[1].get_ylim()[0] + 1, 10)[::-1])
ax[2].yaxis.set_major_formatter(plt.NullFormatter())
ax[2].set_title("Mann-Whitney - Wilcoxon")
fig.text(-0.01, 0.5, "Sample Size", va="center", rotation="vertical")
fig.text(0.5, -0.03, "Effect Size", va="center", ha="center")
fig.savefig(
"./figures/20200204_diff_means_diff_precision.png", dpi=300, bbox_inches="tight"
)
fig.savefig(
"./figures/20200204_diff_means_diff_precision.pdf", dpi=300, bbox_inches="tight"
)
# -
| experiments/experiment_2/20200204_change_means.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import os
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
# -
df = pd.read_csv("CBS_PC6_2016_v2_tested.csv")
df = df[["PC6","P_NW_MIG_A","M_INKHH","WOZWONING","UITKMINAOW"]]
df2 = pd.read_csv("Amsterdam_PC6_CBS_2016_V2.csv")
df2 = df2[["PC6"]]
df = df.merge(df2, on="PC6")
df = df.rename(columns={"M_INKHH": "MIH", "UITKMINAOW": "SSC", "WOZWONING": "ARV", "P_NW_MIG_A":"NWIRP"})
df.to_csv("CBS_AMS_2016V2_P.csv")
def clean_df(df):
for f in df.columns:
df = df[df[f] != -99997]
df = df[df[f] != 'onclassificeerbaar']
df = df[df[f] != '-99997']
return df
MIH = clean_df(df[['PC6','MIH']]).reset_index(drop=True)
SSC = clean_df(df[['PC6','SSC']]).reset_index(drop=True)
ARV = clean_df(df[["PC6","ARV"]]).reset_index(drop=True)
NWIRP = clean_df(df[["PC6", "NWIRP"]]).reset_index(drop=True)
hhinkomen = ['00-20 laag','00-40 laag tot onder midden','00-60 laag tot midden','20-40 onder midden', '20-60 onder midden tot midden','20-80 onder midden tot boven midden','40-60 midden','40-80 midden tot boven midden','60-80 boven midden', '60-100 boven midden tot hoog', '80-100 hoog']
n_hhinkomen = [1,2,3,4,5,6,7,8,9,10,11]
MIH["MIH"] = MIH["MIH"].replace(hhinkomen, n_hhinkomen)
MIH.to_csv("MIH.csv")
SSC.to_csv("SSC.csv")
ARV.to_csv("ARV.csv")
NWIRP.to_csv("NWIRP.csv")
df1 = pd.merge(MIH, ARV, on=['PC6'])
df2 = pd.merge(df1, NWIRP, on=['PC6'])
df3 = pd.merge(df2, SSC, on=['PC6'])
#Using Pearson Correlation
def heatmap_pearson(df):
plt.figure(figsize=(6,5))
sns.set(font_scale=1.4)
cor = df.corr()
sns.heatmap(cor, annot=True, cmap=plt.cm.Reds)
plt.savefig('heatmap_attributes.png')
plt.show()
heatmap_pearson(df3)
# +
#MIH.MIH.value_counts().plot(kind='bar', title="MIH categories counts")
# -
MIH = MIH.sort_values(by="MIH")
MIH["MIH"] = MIH['MIH'].rank(method='first')
MIH.loc[:,'MIH']=pd.qcut(MIH['MIH'],10,labels=np.arange(1,11,1))
MIH['MIH'] = MIH.MIH.astype('int32')
MIH = MIH.reset_index(drop=True)
# +
#MIH.MIH.value_counts().plot(kind='bar', title="MIH decile counts")
# -
SSC.SSC.value_counts().plot(kind='bar', title="SSC")
SSC = SSC.sort_values(by="SSC")
SSC["SSC"] = SSC['SSC'].rank(method='first')
SSC.loc[:,'SSC']=pd.qcut(SSC['SSC'],10,labels=np.arange(1,11,1))
SSC['SSC'] = SSC.SSC.astype('int32')
SSC = SSC.reset_index(drop=True)
SSC.SSC.value_counts().plot(kind='bar', title="SSC")
# ARV.ARV.value_counts().plot(kind='bar', title="ARV") alot of values
ARV = ARV.sort_values(by="ARV")
ARV["ARV"] = ARV['ARV'].rank(method='first')
ARV.loc[:,'ARV']=pd.qcut(ARV['ARV'],10,labels=np.arange(1,11,1))
ARV['ARV'] = ARV.ARV.astype('int32')
ARV = ARV.reset_index(drop=True)
NWIRP = NWIRP.sort_values(by="NWIRP")
NWIRP["NWIRP"] = NWIRP['NWIRP'].rank(method='first')
NWIRP.loc[:,'NWIRP']=pd.qcut(NWIRP['NWIRP'],10,labels=np.arange(1,11,1))
NWIRP['NWIRP'] = NWIRP.NWIRP.astype('int32')
NWIRP = NWIRP.reset_index(drop=True)
x = pd.merge(MIH, ARV, on=['PC6'])
y = pd.merge(x, NWIRP, on=['PC6'])
z = pd.merge(y, SSC, on=['PC6'])
heatmap_pearson(z)
# +
pano_df = pd.read_csv("lon_lat_pano-url_pano-id_pc6.csv")
pano_df = pano_df.drop("Unnamed: 0", axis=1)
pano_df = pano_df.rename(columns={"pc6": "PC6"})
pano_df["PC6"] = pano_df.PC6.str.replace(' ', '')
pano_list = list(pano_df["pano_id"])
pano_exist = []
m = 0
for n in range(len(pano_list)):
file_name = 'gview_codes_4/{}.npz'.format(pano_list[n])
if os.path.exists(file_name):
pano_exist.append(pano_list[n])
m = m + 1
#if m % 200 == 0:
#print('Image: {} / {} done.'.format(m, len(pano_list)))
exists_df = pd.DataFrame(pano_exist,columns=['pano_id'])
pano_df = pano_df.merge(exists_df, how='inner', on='pano_id')
pano_df = pano_df.drop_duplicates(subset=['PC6'])
# +
MIH_df = MIH.merge(pano_df, how='inner', on='PC6')
MIH_df = MIH_df.drop(['url'], axis=1)
#labels1_df.to_pickle("amsterdam_labels_1.p")
ARV_df = ARV.merge(pano_df, how='inner', on='PC6')
ARV_df = ARV_df.drop(['url'], axis=1)
#labels2_df.to_pickle("amsterdam_labels_2.p")
SSC_df = SSC.merge(pano_df, how='inner', on='PC6')
SSC_df = SSC_df.drop(['url'], axis=1)
NWIRP_df = NWIRP.merge(pano_df, how='inner', on='PC6')
NWIRP_df = NWIRP_df.drop(['url'], axis=1)
# -
SSC_df
np.random.seed(42)
# +
MIH_train_indexes, MIH_test_indexes = train_test_split(MIH_df.index, test_size=0.3)
MIH_train = MIH_df.iloc[MIH_train_indexes]
MIH_test = MIH_df.iloc[MIH_test_indexes]
ARV_train_indexes, ARV_test_indexes = train_test_split(ARV_df.index, test_size=0.3)
ARV_train = ARV_df.iloc[ARV_train_indexes]
ARV_test = ARV_df.iloc[ARV_test_indexes]
SSC_train_indexes, SSC_test_indexes = train_test_split(SSC_df.index, test_size=0.3)
SSC_train = SSC_df.iloc[SSC_train_indexes]
SSC_test = SSC_df.iloc[SSC_test_indexes]
NWIRP_train_indexes, NWIRP_test_indexes = train_test_split(NWIRP_df.index, test_size=0.3)
NWIRP_train = NWIRP_df.iloc[NWIRP_train_indexes]
NWIRP_test = NWIRP_df.iloc[NWIRP_test_indexes]
# +
MIH_train.to_pickle("MIH_TRAIN.p")
MIH_test.to_pickle("MIH_TEST.p")
ARV_train.to_pickle("ARV_TRAIN.p")
ARV_test.to_pickle("ARV_TEST.p")
SSC_train.to_pickle("SSC_TRAIN.p")
SSC_test.to_pickle("SSC_TEST.p")
NWIRP_train.to_pickle("NWIRP_TRAIN.p")
NWIRP_test.to_pickle("NWIRP_TEST.p")
| Data_Prep/Data Preprocessing And Exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# define variables
def number1(x):
integer1=number
return integer1
def number2(x):
integer2=number2
return integer2
# interactive input
number1=int(input("Give a number for number 1:"))
number2=int(input("Give a number for number 2:"))
# computation/compare
product=number1*number2
sum=number1+number2
# report result
if (product>666):
print(product)
else:
print(sum)
# Case 1
numberString = input("give a number for n")
#check if the number of digits is even or add
#get number of digits
numDigits = len(numberString)
#check if number is even
if (numDigits % 2 == 0):
#cut the string in half
middlePoint = numDigits // 2
firstHalf = numberString[0:middlePoint]
secondHalf = numberString[middlepoint:]
#reverse first half
reverseFirstHalf = firstHalf[::-1]
#compare to second half
if(reverseFirstHalf == secondHalf):
print(True)
else:
print(False)
else:
#cut the string in half
middlePoint = numDigits // 2
firstHalf = numberString[0:middlePoint-1]
secondHalf = numberString[middlePoint+1:
]
#reverse first half
reverseFirstHalf = firstHalf[
:
:-1]
#compare to second half
if(reverseFirstHalf == secondHalf)
:
print(True)
else:
print(False)
| 3-Readings/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="rF2trPuyzm9C"
# # Exercise 3.2
#
# + id="ipcsUFDUzm9C"
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="MCJe_ITJzm9G"
# **Linear Regression**
#
# The goal of this exercise is to explore a simple linear regression problem based on Portugese white wine.
#
# The dataset is based on
# Cortez, <NAME>, <NAME>, <NAME> and <NAME>. **Modeling wine preferences by data mining from physicochemical properties**. Published in Decision Support Systems, Elsevier, 47(4):547-553, 2009.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="NopU99AT9G7s" outputId="d7e8848e-b9c0-4eb4-8f18-5acda9d8c343"
# The code snippet below is responsible for downloading the dataset
# - for example when running via Google Colab.
#
# You can also directly download the file using the link if you work
# with a local setup (in that case, ignore the !wget)
# !wget https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv
# + [markdown] id="zEiZ19s5zm9G"
# **Before we start**
#
# The downloaded file contains data on 4989 wines. For each wine 11 features are recorded (column 0 to 10). The final columns contains the quality of the wine. This is what we want to predict. More information on the features and the quality measurement is provided in the original publication.
#
# List of columns/features:
# 0. fixed acidity
# 1. volatile acidity
# 2. citric acid
# 3. residual sugar
# 4. chlorides
# 5. free sulfur dioxide
# 6. total sulfur dioxide
# 7. density
# 8. pH
# 9. sulphates
# 10. alcohol
# 11. quality
#
#
#
# [file]: https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv
# + colab={"base_uri": "https://localhost:8080/"} id="5ONqeI5Uzm9H" outputId="d31ba8d4-cf0a-4f25-8a93-9091c0dd041a"
# Before working with the data,
# we download and prepare all features
# load all examples from the file
data = np.genfromtxt('winequality-white.csv',delimiter=";",skip_header=1)
print("data:", data.shape)
# Prepare for proper training
np.random.shuffle(data) # randomly sort examples
# take the first 3000 examples for training
# (remember array slicing from last week)
X_train = data[:3000,:11] # all features except last column
y_train = data[:3000,11] # quality column
# and the remaining examples for testing
X_test = data[3000:,:11] # all features except last column
y_test = data[3000:,11] # quality column
print("First example:")
print("Features:", X_train[0])
print("Quality:", y_train[0])
# + [markdown] id="jiwnyNHpzm9L"
# # Problems
#
#
# * First we want to understand the data better. Plot (`plt.hist`) the distribution of each of the features for the training data as well as the 2D distribution (either `plt.scatter` or `plt.hist2d`) of each feature versus quality. Also calculate the correlation coefficient (`np.corrcoef`) for each feature with quality. Which feature by itself seems most predictive for the quality?
#
# * Calculate the linear regression weights. Numpy provides functions for matrix multiplication (`np.matmul`), matrix transposition (`.T`) and matrix inversion (`np.linalg.inv`).
#
# * Use the weights to predict the quality for the test dataset. How
# does your predicted quality compare with the true quality of the test data? Calculate the correlation coefficient between predicted and true quality and draw a scatter plot.
# -
# # Hints
# Formally, we want to find weights $w_i$ that minimize:
# $$
# \sum_{j}\left(\sum_{i} X_{i j} w_{i}-y_{j}\right)^{2}
# $$
# The index $i$ denotes the different features (properties of the wines) while the index $j$ runs over the different wines. The matrix $X_{ij}$ contains the training data, $y_j$ is the 'true' quality for sample $j$. The weights can be found by taking the first derivative of the above expression with respect to the weights and setting it to zero (the standard strategy for finding an extremum), and solving the corresponding system of equations (for a detailed derivation, see [here](https://en.wikipedia.org/wiki/Ordinary_least_squares)). The result is:
# $$
# \overrightarrow{\mathbf{w}}=\left(\mathbf{X}^{T} \mathbf{X}\right)^{-1} \mathbf{X}^{T} \overrightarrow{\mathbf{y}}
# $$
#
# In the end, you should have as many components of $w_i$ as there are features in the data (i.e. eleven in this case).
#
# You can use `.shape` to inspect the dimensions of numpy tensors.
#
| Exercise_03_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# 30 Jan 2019
# generates the NEMO horizontal grid and bathymetry file
import numpy as np
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from copy import deepcopy
# + code_folding=[0]
# create the output data class
def gen_bathy_meter(jpiglo, jpjglo, e1, e2, bathy, config, filename):
# open a new netCDF file for writing.
ncfile = Dataset(filename, "w", format = "NETCDF4")
ncfile.title = "homebrew bathymetry file for %s" % config
# create the dimensions.
ncfile.createDimension("x", jpiglo)
ncfile.createDimension("y", jpjglo)
ncfile.createDimension("t", None)
# first argument is name of variable,
# second is datatype,
# third is a tuple with the names of dimensions.
lon_netcdf = ncfile.createVariable("nav_lon", np.dtype("float32").char, ("x"), fill_value = False)
lon_netcdf[:] = e1
lon_netcdf.units = "m"
lon_netcdf.long_name = "x"
lat_netcdf = ncfile.createVariable("nav_lat", np.dtype("float32").char, ("y"), fill_value = False)
lat_netcdf[:] = e2
lat_netcdf.units = "m"
lat_netcdf.long_name = "y"
time_netcdf = ncfile.createVariable("time_counter", np.dtype("float32").char, ("t"), fill_value = False)
time_netcdf[:] = 0.0
time_netcdf.units = "s"
bathy_netcdf = ncfile.createVariable("Bathymetry", np.dtype("float64").char, ("t", "y", "x"), fill_value = False)
bathy_netcdf[:] = bathy
bathy_netcdf.units = "m"
# close the file.
ncfile.close()
print("*** SUCCESS writing example file %s" % filename)
def gen_forcing(jpiglo, jpjglo, lonT, latT, utau, vtau, qtot, qsr, emp, filename):
# open a new netCDF file for writing.
ncfile = Dataset(filename , "w", format = "NETCDF4")
ncfile.title = "homebrew forcing file for modEEL"
# create the dimensions.
ncfile.createDimension("x", jpiglo)
ncfile.createDimension("y", jpjglo)
ncfile.createDimension("t", None)
# first argument is name of variable,
# second is datatype,
# third is a tuple with the names of dimensions.
lon_netcdf = ncfile.createVariable("nav_lon", np.dtype("float32").char, ("y", "x"), fill_value = False)
lon_netcdf[:] = lonT
lon_netcdf.units = "m"
lon_netcdf.long_name = "x"
lat_netcdf = ncfile.createVariable("nav_lat", np.dtype("float32").char, ("y", "x"), fill_value = False)
lat_netcdf[:] = latT
lat_netcdf.units = "m"
lat_netcdf.long_name = "y"
utau_netcdf = ncfile.createVariable("utau", np.dtype("float64").char, ("t", "y", "x"), fill_value = False)
utau_netcdf[:] = utau
utau_netcdf.units = "N m-2"
vtau_netcdf = ncfile.createVariable("vtau", np.dtype("float64").char, ("t", "y", "x"), fill_value = False)
vtau_netcdf[:] = vtau
vtau_netcdf.units = "N m-2"
qtot_netcdf = ncfile.createVariable("qtot", np.dtype("float64").char, ("t", "y", "x"), fill_value = False)
qtot_netcdf[:] = qtot
qtot_netcdf.units = "W m-2"
qsr_netcdf = ncfile.createVariable("qsr", np.dtype("float64").char, ("t", "y", "x"), fill_value = False)
qsr_netcdf[:] = qsr
qsr_netcdf.units = "W m-2"
emp_netcdf = ncfile.createVariable("emp", np.dtype("float64").char, ("t", "y", "x"), fill_value = False)
emp_netcdf[:] = emp
emp_netcdf.units = "m s-1"
# close the file.
ncfile.close()
print("*** SUCCESS writing example file %s!" % filename)
def gen_istate(x, y, z, toce, soce, sst, sss, filename):
# open a new netCDF file for writing.
ncfile = Dataset(filename, "w", format = "NETCDF4")
ncfile.title = "homebrew initial state file for modEEL"
# create the dimensions.
ncfile.createDimension("x", jpiglo)
ncfile.createDimension("y", jpjglo)
ncfile.createDimension("z", jpkglo)
ncfile.createDimension("t", None)
# first argument is name of variable,
# second is datatype,
# third is a tuple with the names of dimensions.
lon_netcdf = ncfile.createVariable("nav_lon", np.dtype("float32").char, ("y", "x"), fill_value = False)
lon_netcdf[:] = x
lon_netcdf.units = "m"
lon_netcdf.long_name = "x"
lat_netcdf = ncfile.createVariable("nav_lat", np.dtype("float32").char, ("y", "x"), fill_value = False)
lat_netcdf[:] = y
lat_netcdf.units = "m"
lat_netcdf.long_name = "y"
lat_netcdf = ncfile.createVariable("nav_lev", np.dtype("float32").char, ("z"), fill_value = False)
lat_netcdf[:] = z
lat_netcdf.units = "m"
lat_netcdf.long_name = "z"
toce_netcdf = ncfile.createVariable("toce", np.dtype("float64").char, ("t", "z", "y", "x"), fill_value = False)
toce_netcdf[:] = toce
toce_netcdf.units = "C"
soce_netcdf = ncfile.createVariable("soce", np.dtype("float64").char, ("t", "z", "y", "x"), fill_value = False)
soce_netcdf[:] = soce
soce_netcdf.units = "g kg-1"
sst_netcdf = ncfile.createVariable("sst", np.dtype("float64").char, ("t", "y", "x"), fill_value = False)
sst_netcdf[:] = sst
sst_netcdf.units = "C"
sss_netcdf = ncfile.createVariable("sss", np.dtype("float64").char, ("t", "y", "x"), fill_value = False)
sss_netcdf[:] = sss
sss_netcdf.units = "g kg-1"
# close the file.
ncfile.close()
print("*** SUCCESS writing example file %s!" % filename)
# +
# EEL configuration, flat bottom + mound
jpiglo = 83
jpjglo = 242
reso_m = 2000.0
e1 = reso_m * np.arange(0, jpiglo)
e2 = reso_m * np.arange(0, jpjglo)
Lx, Ly = e1[-1], e2[-1]
# EEL has 4000m depth, also put two walls in
bathy = 4000.0 * np.ones((1, jpjglo, jpiglo))
xx, yy = np.meshgrid(e1, e2)
mound = 1500.0 * np.exp( -( ( (yy - Ly / 2.0) ** 2 + (xx - Lx / 2.0) ** 2 ) / 35e3 ** 2 ))
plt.subplot(1, 2, 1)
plt.contourf(xx, yy, mound)
plt.colorbar()
plt.axis("equal")
plt.tight_layout()
bathy[0, :, :] -= mound
plt.subplot(1, 2, 2)
plt.contourf(xx, yy, bathy[0, :, :])
plt.colorbar()
plt.axis("equal")
plt.tight_layout()
bathy[0, 0, :] = 0.0
bathy[0, -1, :] = 0.0
gen_bathy_meter(jpiglo, jpjglo, e1, e2, bathy, "EEL", "bathy_meter_EEL.nc")
# + code_folding=[]
# UNAGI configuration, based on SO channel of <NAME>
# 9000km long, 2400km wide, 3000m deep
# relevant numbers
# res [nx short] nx ny nz
# 100 40 90 24 + 2 30 + 1
# 50 80 180 48 + 2 30 + 1
# 25 160 360 96 + 2 30 + 1
# 15 600 160 + 2 30 + 1
# 10 400 900 240 + 2 30 + 1
bathy_filename = "bathy_meter_UNAGI_R050.nc"
jpiglo = 180
jpjglo = 48 + 2 # add to grid points on
jpkglo = 30 + 1 # add the bottom level in
reso_m = 50.0e3
e1 = reso_m * np.arange(0, jpiglo)
e2 = reso_m * np.arange(0, jpjglo)
Lx, Ly = e1[-1], e2[-1]
# UNAGI has 3000m depth, also put two walls in
oce_depth = 3000.0
bathy = oce_depth * np.ones((1, jpjglo, jpiglo))
l_ridge = True
ridge_H = 1500.0
ridge_L = 500.0e3 # half width of ridge
if l_ridge:
ridge_x1d = np.zeros(jpiglo)
for ji in range(jpiglo):
if (e1[ji] > -ridge_L - 100.0e3 + Lx / 2.0) & (e1[ji] < -100.0e3 + Lx / 2.0):
ridge_x1d[ji] = 0.5 * ridge_H * (1.0 + np.cos(np.pi * (e1[ji] + Lx / 2.0 + 100.0e3) / ridge_L))
elif (e1[ji] > 100.0e3 + Lx / 2.0) & (e1[ji] < ridge_L + 100.0e3 + Lx / 2.0):
ridge_x1d[ji] = 0.5 * ridge_H * (1.0 + np.cos(np.pi * (e1[ji] + Lx / 2.0 - 100.0e3) / ridge_L))
elif (e1[ji] >= -100.0e3 + Lx / 2.0) & (e1[ji] <= 100.0e3 + Lx / 2.0):
ridge_x1d[ji] = ridge_H
else:
ridge_x1d = oce_depth * np.zeros(jpiglo)
bathy[0, :, :] -= ridge_x1d[np.newaxis, :]
bathy[0, 0, :] = 0.0
bathy[0, -1, :] = 0.0
plt.contourf(e1 / 1e3, e2 / 1e3, bathy[0, :, :])
plt.colorbar()
gen_bathy_meter(jpiglo, jpjglo, e1, e2, bathy, "UNAGI", bathy_filename)
# +
# use the generated bathy_meter.nc in DOMAINcfg to generate a domaincfg file
# the use that domaincfg file to generate the initial state and forcing files
# no meridional wind here so just put everything on T points
filename = "domcfg_UNAGI_R015.nc"
data = Dataset(filename)
jpiglo = data.variables["jpiglo"][:]
jpjglo = data.variables["jpjglo"][:]
jpkglo = data.variables["jpkglo"][:]
lonV = data.variables["glamv"][0, :, :]
latV = data.variables["gphiv"][0, :, :]
lonT = data.variables["glamt"][0, :, :]
latT = data.variables["gphit"][0, :, :]
e1t = data.variables["e1t"][0, 0, 0]
z = data.variables["nav_lev"][:]
data.close()
# have a sinusoidally varying wind but zero everything else
Ly = 2400.0e3
Ly_mid = (Ly - e1t) / 2.0 # take into account the slight offset of the T/Vgrid
Lz = 3000
tau0 = 0.2 * 1.0
utau = np.zeros((1, jpjglo, jpiglo))
utau[0, :, :] = tau0 * 0.5 * ( 1.0 + np.cos(2.0 * np.pi * (latT * 1.0e3 - Ly_mid) / Ly) )
utau[0, 0, :] = 0.0
utau[0, -1, :] = 0.0
vtau = np.zeros((1, jpjglo, jpiglo))
qtot = np.zeros((1, jpjglo, jpiglo))
qsr = np.zeros((1, jpjglo, jpiglo))
emp = np.zeros((1, jpjglo, jpiglo))
plt.plot(utau[0, :, 0], latT, 'rx-')
plt.grid()
gen_forcing(jpiglo, jpjglo, lonT, latT, utau, vtau, qtot, qsr, emp, filename.replace("domcfg", "forcing_tau1x"))
# +
# Choose the amplitude of the temperature variations
dtheta = 15.0
# Choose the e-folding scale for the stratification.
z0 = 1000.0
# Generate the idealised temperature stratification, used as both an initial
# condition and for the restoring temperature in the sponge regions.
toce = np.zeros((1, jpkglo, jpjglo, jpiglo))
soce = 35.0 * np.ones((1, jpkglo, jpjglo, jpiglo))
sss = deepcopy(soce[:, 0, :, :])
yy, zz = np.meshgrid(latT[:, 0], -z)
yy *= 1.0e3
# Linear gradient at surface with exponential decay at depth and 0oC at
# the southern boundary (similar to Abernathey et al., 2011)
for ji in range(jpiglo):
toce[0, :, :, ji] = (dtheta * ( ( yy / Ly )
* ( np.exp( zz / z0) - np.exp(-Lz / z0) )
/ ( 1.0 - np.exp( -Lz / z0 ) )
)
)
# because of the grid there is some offset, add it back on
toce -= toce[0, 0, 0, 0]
# add some noise
toce += np.random.normal(0, 0.05, (1, jpkglo, jpjglo, jpiglo))
# % Make sure there is no water colder than 0.5oC.
toce[toce < 0.25] = 0.25
# Pick out the surface temperature for the restoring condition.
sst = deepcopy(toce[:, 0, :, :])
# note the first and last y point is going to be set to masked out
plt.contourf(yy / 1.0e3, zz, toce[0, :, :, 1], np.arange(0, 16, 1), cmap = "RdBu_r")
lines = plt.contour(yy / 1.0e3, zz, toce[0, :, :, 1], np.arange(2, 16, 2), colors = "w")
plt.clabel(lines, fmt = r"$%i\ {}^\circ \mathrm{C}$", colors = 'w')
gen_istate(lonT, latT, z, toce, soce, sst, sss, filename.replace("domcfg", "state"))
# +
# MITgcm
rho0 = 1035
alpT = 2.0e-4
Tref = 2.5
print("MITgcm value = %.6f" % (rho0 * (1 - alpT * (15 - Tref))))
print("MITgcm value = %.6f" % (rho0 * (1 - alpT * (0 - Tref))))
# NEMO
rho0 = 1026
alpT = 2.0e-4
Tref = 10
print("NEMO value = %.6f" % (rho0 * (1 - alpT * (15 - Tref))))
print("NEMO value = %.6f" % (rho0 * (1 - alpT * (0 - Tref))))
# +
# testing code for generating diffkr
filename = "UNAGI_R100_domcfg.nc"
data = Dataset(filename)
jpiglo = data.variables["jpiglo"][:]
jpjglo = data.variables["jpjglo"][:]
jpkglo = data.variables["jpkglo"][:]
data.close()
reso_m = 100.0e3 # 100 km resolution
e2 = reso_m * np.arange(0, jpjglo)
avt0 = 1.0e-5
avtf = 5.0e-3
L_sponge = 300.0e3
Ly = 2400.0e3 # 2400km is where the wall SHOULD be at
diffkr = avt0 + (0.5 * avtf * ( 1 + np.cos( np.pi * (e2 - e2[-2]) / L_sponge ) )
- 0.5 * avt0 * ( 1 + np.cos( np.pi * (e2 - e2[-2]) / L_sponge ) )
)
diffkr = np.where(e2 > 2000.0e3, diffkr, avt0)
amp_factor = 1 + (0.5 * 500 * ( 1 + np.cos( np.pi * (e2 - e2[-2]) / L_sponge ) )
- 0.5 * ( 1 + np.cos( np.pi * (e2 - e2[-2]) / L_sponge ) )
)
amp_factor = np.where(e2 > 2000.e3, amp_factor, 1.0)
plt.plot(diffkr, e2 / 1e3, 'bx-')
plt.plot(amp_factor * avt0, e2 / 1e3, 'ro-')
plt.plot([0, avtf], [2000, 2000], 'k--')
plt.grid()
# -
y
# +
# test = (1. + 0.5 * 500.0 * ( 1. + np.cos( np.pi * ( yT - 2300. ) / 300. ) )
# - 0.5 * ( 1. + np.cos( np.pi * ( yT - 2300. ) / 300. ) )
# )
spacing = 15
y = np.arange(-spacing, 2400 + spacing, spacing)
test = (0. + 0.5 * 500.0 * ( 1. + np.cos( np.pi * ( y - y[-2] ) / 300. ) )
- 0.5 * ( 1. + np.cos( np.pi * ( y - y[-2] ) / 300. ) )
)
test = np.where(y >= 2100, test, 0.0)
test *= 1.0e-5
test1 = (0. + 0.5 * 250.0 * ( 1. + np.cos( np.pi * ( y - y[-2] ) / 300. ) )
- 0.5 * ( 1. + np.cos( np.pi * ( y - y[-2] ) / 300. ) )
)
test1 = np.where(y >= 2100, test1, 0.0)
test1 *= 1.0e-5
test2 = (0. + 0.5 * 500.0 * ( 1. + np.cos( np.pi * ( y - 2000 ) / 150. ) )
- 0.5 * ( 1. + np.cos( np.pi * ( y - 2000 ) / 150. ) )
)
test2 = np.where(y > 1850, test2, 0.0)
test2 = np.where(y <= 2000, test2, 0.0)
test2 *= 1.0e-5
plt.plot(test, y, 'ro-', test1, y, 'g^-', test2, y, 'bx-')
plt.plot([0, 0.005], [2400, 2400], 'k--')
int1 = np.trapz(test[(y < 2400)], y[(y < 2400)])
int2 = np.trapz(test1[(y < 2400)], y[(y < 2400)])
int3 = np.trapz(test2[(y <= 2000)], y[(y <= 2000)])
print("int1 = %.4f, int2 = %.4f, int3 = %.4f" % (int1, int2, int3) )
# +
# Dave's configuration, based on SO channel of <NAME>
# 4000km long, 2000km wide, 3000m deep
jpiglo = 40
jpjglo = 20 + 2 # add to grid points on
jpkglo = 30 + 1 # add the bottom level in
reso_m = 100.0e3 # 100 km resolution
e1 = reso_m * np.arange(0, jpiglo)
e2 = reso_m * np.arange(0, jpjglo)
Lx, Ly = e1[-1], e2[-1]
# UNAGI has 3000m depth, also put two walls in
oce_depth = 3000.0
bathy = oce_depth * np.ones((1, jpjglo, jpiglo))
l_ridge = True
ridge_H = 1500.0
ridge_L = 400.0e3 # half width of ridge
if l_ridge:
ridge_x1d = np.zeros(jpiglo)
for ji in range(jpiglo):
if (e1[ji] > -ridge_L - 100.0e3 + Lx / 2.0) & (e1[ji] < -100.0e3 + Lx / 2.0):
ridge_x1d[ji] = 0.5 * ridge_H * (1.0 + np.cos(np.pi * (e1[ji] + Lx / 2.0 + 100.0e3) / ridge_L))
elif (e1[ji] > 100.0e3 + Lx / 2.0) & (e1[ji] < ridge_L + 100.0e3 + Lx / 2.0):
ridge_x1d[ji] = 0.5 * ridge_H * (1.0 + np.cos(np.pi * (e1[ji] + Lx / 2.0 - 100.0e3) / ridge_L))
elif (e1[ji] >= -100.0e3 + Lx / 2.0) & (e1[ji] <= 100.0e3 + Lx / 2.0):
ridge_x1d[ji] = ridge_H
else:
ridge_x1d = oce_depth * np.zeros(jpiglo)
bathy[0, :, :] -= ridge_x1d[np.newaxis, :]
bathy[0, 0, :] = 0.0
bathy[0, -1, :] = 0.0
plt.contourf(e1 / 1e3, e2 / 1e3, bathy[0, :, :])
plt.colorbar()
gen_bathy_meter(jpiglo, jpjglo, e1, e2, bathy, "DAVE", "bathy_meter_DAVE_R100.nc")
# +
# use the generated bathy_meter.nc in DOMAINcfg to generate a domaincfg file
# the use that domaincfg file to generate the initial state and forcing files
# no meridional wind here so just put everything on T points
filename = "domcfg_R100_DAVE.nc"
data = Dataset(filename)
jpiglo = data.variables["jpiglo"][:]
jpjglo = data.variables["jpjglo"][:]
jpkglo = data.variables["jpkglo"][:]
lonV = data.variables["glamv"][0, :, :]
latV = data.variables["gphiv"][0, :, :]
lonT = data.variables["glamt"][0, :, :]
latT = data.variables["gphit"][0, :, :]
e1t = data.variables["e1t"][0, 0, 0]
z = data.variables["nav_lev"][:]
data.close()
# have a sinusoidally varying wind but zero everything else
Ly = 2000.0e3
Ly_mid = (Ly - e1t) / 2.0 # take into account the slight offset of the T/Vgrid
Lz = 3000
tau0 = 0.2
utau = np.zeros((1, jpjglo, jpiglo))
utau[0, :, :] = tau0 * 0.5 * ( 1.0 + np.cos(2.0 * np.pi * (latT * 1.0e3 - Ly_mid) / Ly) )
utau[0, 0, :] = 0.0
utau[0, -1, :] = 0.0
vtau = np.zeros((1, jpjglo, jpiglo))
qtot = np.zeros((1, jpjglo, jpiglo))
qsr = np.zeros((1, jpjglo, jpiglo))
emp = np.zeros((1, jpjglo, jpiglo))
plt.plot(utau[0, :, 0], latT, 'rx-')
plt.grid()
gen_forcing(jpiglo, jpjglo, lonT, latT, utau, vtau, qtot, qsr, emp, "forcing_R100_DAVE.nc")
# +
# Choose the amplitude of the temperature variations
dtheta = 15.0
# Choose the e-folding scale for the stratification.
z0 = 1000.0
# Generate the idealised temperature stratification, used as both an initial
# condition and for the restoring temperature in the sponge regions.
toce = np.zeros((1, jpkglo, jpjglo, jpiglo))
soce = 35.0 * np.ones((1, jpkglo, jpjglo, jpiglo))
sss = deepcopy(soce[:, 0, :, :])
yy, zz = np.meshgrid(latT[:, 0], -z)
yy *= 1.0e3
# Linear gradient at surface with exponential decay at depth and 0oC at
# the southern boundary (similar to Abernathey et al., 2011)
for ji in range(jpiglo):
toce[0, :, :, ji] = (dtheta * ( ( yy / Ly )
* ( np.exp( zz / z0) - np.exp(-Lz / z0) )
/ ( 1.0 - np.exp( -Lz / z0 ) )
)
)
# because of the grid there is some offset, add it back on
toce -= toce[0, 0, 0, 0]
# add some noise
toce += np.random.normal(0, 0.05, (1, jpkglo, jpjglo, jpiglo))
# % Make sure there is no water colder than 0.5oC.
toce[toce < 0.25] = 0.25
# Pick out the surface temperature for the restoring condition.
sst = deepcopy(toce[:, 0, :, :])
# note the first and last y point is going to be set to masked out
plt.contourf(yy / 1.0e3, zz, toce[0, :, :, 1], np.arange(0, 16, 1), cmap = "RdBu_r")
lines = plt.contour(yy / 1.0e3, zz, toce[0, :, :, 1], np.arange(2, 16, 2), colors = "w")
plt.clabel(lines, fmt = r"$%i\ {}^\circ \mathrm{C}$", colors = 'w')
gen_istate(lonT, latT, z, toce, soce, sst, sss, "state_R100_DAVE.nc")
# -
Ly
e1t
| UNAGI/gen_NEMO_UNAGI_fields.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mentoria Evolution - Data Analysis
# <font color=blue><b> <NAME></b></font><br>
# www.minerandodados.com.br
# **Importante**: Antes de executar as seguintes células verifique se os arquivos estão no mesmo diretório
# **Importe o Pandas**
import pandas as pd
# **Ler a base de dados em memória**
dataset = pd.read_csv('data/kc_house_data.csv', sep=',')
dataset.head()
# # Alterando um Dataframe
# * Cria uma coluna no dataframe
# * Popula uma coluna baseado em um processamento de dados
dataset['size'] = (dataset['bedrooms'] * 20)
# **Visualizando a coluna size**
dataset.bedrooms.head(10)
dataset['size'].head(10)
def categoriza(s):
if s >= 80:
return 'Big'
elif s >= 60:
return 'Medium'
elif s >= 40:
return 'Small'
dataset['cat_size'] = dataset['size'].apply(categoriza)
dataset['cat_size']
dataset.head()
#Ver a distribuicao da coluna
dataset.cat_size.value_counts()
# # Removendo dados
# **Removendo Colunas**
dataset.drop(['cat_size'], axis=1, inplace=True)
dataset.drop(['size'], axis=1, inplace=True)
dataset.head()
# **Dropa linhas com bedrooms = 0 e maiores que 30**
dataset.drop(dataset[dataset.bedrooms==0].index , inplace=True)
dataset.drop(dataset[dataset.bedrooms>30].index ,inplace=True)
# **Visualizando os maiores valores da coluna bedrooms**
dataset.bedrooms.max()
# # Missing Values
# * Inspeciona o Dataframe em busca de valores missing
# * Valores como aspas ou espaço em branco não são considerados nulos ou NA
# * O método sum() retorna a soma valores nulos ou faltantes por colunas.
dataset.isnull()
# **Conta a quantidade de valores nulos**
dataset.isnull().sum().sort_values(ascending=False)
# **Remove todas as linhas onde tenha pela menos um registro faltante em algum atributo.**
dataset.dropna(inplace=True)
# **Remove somente linhas que estejam com valores faltantes em todas as colunas, veja:**
dataset.dropna(how='all', inplace=True)
# **Preenche com a media dos valores da coluna floors os values null**
dataset['floors'].fillna(dataset['floors'].mean(), inplace=True)
# **Preenche com 1 os values null da coluna bedrooms**
dataset['bedrooms'].fillna(1, inplace=True)
# # Visualização de dados
# * O pandas é integrado ao Matplotlib
# * Ploting de gráficos de forma fácil
# * Ideal para uma rápida visualização
# %matplotlib notebook
dataset['price'].plot()
# **Plota gráficos do tipo Scatter de duas colunas**
dataset.plot(x='bedrooms',y='price', kind='scatter', title='Bedrooms x Price',color='r')
dataset.plot(x='bathrooms',y='price',kind='scatter',color='y')
# %matplotlib notebook
dataset[['bedrooms','bathrooms']].hist(bins=30,alpha=0.5,color='Green')
import matplotlib
# %matplotlib notebook
matplotlib.style.use('ggplot')
dataset.boxplot(column='bedrooms')
# %matplotlib notebook
dataset.boxplot(column='price', by='bedrooms')
# ## Trabalhando com Excel
# **Ler planilha do Excel**
dataframe_excel = pd.read_excel('data/controle-de-atividades.xlsx', sheet_name=0, header=1)
dataframe_excel.head()
dataframe_excel["Estado Atual"].head(20)
# **Ordenada planilha por coluna estado atual**
dataframe_excel.sort_values(by="Estado Atual").head(10)
# **Checa dados nulos**
dataframe_excel.isnull().sum()
# **Dropa linhas nulas em todas as colunas**
dataframe_excel.dropna(how='all', inplace=True)
dataframe_excel
dataframe_excel.to_excel('data/planilha_teste.xlsx', index=False)
# * Pratique o que foi aprendido refazendo todos os passos
# * Faça os exercícios e me envie no e-mail abaixo.
# * **Dúvidas?** Mande um e-mail para mim em <EMAIL>
| notebooks/me-data-analysis-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <h1>Broadcasting</h1>
import numpy as np
my_3D_array = np.arange(70)
my_3D_array.shape = (2,7,5)
my_3D_array
# shape
my_3D_array.shape
# number of dimensions
my_3D_array.ndim
# size; number of elements
my_3D_array.size
# data type for each element
my_3D_array.dtype
5 * my_3D_array - 2
left_mat = np.arange(6).reshape((2,3))
right_mat = np.arange(15).reshape((3,5))
np.inner(left_mat, right_mat)
np.dot(left_mat, right_mat)
# <h2>Operations along axes</h2>
my_3D_array
# shape
my_3D_array.shape
my_3D_array.sum()
(69 * 70)/2
my_3D_array.sum(axis=0)
my_3D_array.sum(axis=1)
my_3D_array.sum(axis=2)
# <h2>Broadcasting Rules</h2>
my_2D_array = np.ones(35, dtype='int_').reshape((7,5)) * 3
my_2D_array
my_random_2D_array = np.random.random((7,5))
np.set_printoptions(precision=4)
my_3D_array * my_random_2D_array
my_vector = np.arange(5) * 7
my_vector[0] = -1
my_vector
my_3D_array / my_vector
my_3D_array % my_vector
| numpy-data-science-essential-training/Ex_Files_NumPy_Data_EssT/Exercise Files/Ch 3/03_03/Finish/Broadcasting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import joblib
modelo = joblib.load('modelo_lr.dump')
modelo.predict([[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]])
# !ls -l model*.dump
joblib.parallel.cpu_count()
| 2019/05-Metricas_de_Avaliacao/Testar_modelo_salvo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Iterators and Generators
#
# We've seen that in Python, anything which can be iterated over is called an iterable:
#
#
#
# +
bowl = {
"apple" : 5,
"banana" : 3,
"orange" : 7
}
for fruit in bowl:
print(fruit.upper())
# -
# Surprisingly often, we want to iterate over something that takes a moderately
# large amount of storage to store. For example, our map images in the
# green-graph example.
#
# Our green-graph example involved making an array of all the maps between London
# and Birmingham. This kept them all in memory *at the same time*: first we
# downloaded all the maps, then we counted the green pixels in each of them.
#
# This would NOT work if we used more points. We need to use a **generator**
# ### Iterators
# Consider the basic python `range` function:
range(10)
# +
total=0
for x in range(int(1e6)): total+= x
total
# -
# In python 3, in order to avoid allocating a million integers, `range` actually creates an ITERATOR.
#
# We don't actually need a million integers **at once**, just each
# integer **in turn** up to a million.
#
# The iterator is an iterable which is not an list.
# So we can for loop over it:
for i in range(3):
print(i)
# An generator object, like range(3), when we iterate over it, works by defining a `next()` method which
# moves the iterator forward:
a=iter(range(3))
a.__next__()
a.__next__()
a.__next__()
# At the end, `StopIteration` is raised as an exception:
a.__next__()
# We can turn an iterator back into a list with the `list` constructor function:
list(range(5))
# +
total=0
for x in range(int(1e6)): total+= x
print(total)
# -
# ### Defining Our Own Iterable
# We can make our own iterators by defining *classes* that implement next() and __iter__() methods: this is the iterator protocol.
#
# For each of the *concepts*, in Python, like sequence, container, iterable, python defines a **protocol**, a set of methods a class must implement, in order to be treated as a member of that concept.
#
# The iterator protocol is the protocol that defines things that support `for x in y:`.
#
# To define an iterator, the methods that must be supported are `next()` and `__iter__()`.
#
# `next()` must update the iterator.
#
# We'll see why we need to define `__iter__` in a moment.
class fib_iterator(object):
def __init__(self, limit, seed1=1, seed2=1):
self.limit = limit
self.previous = seed1
self.current = seed2
def __iter__(self):
return self
def __next__(self):
(self.previous, self.current)=(
self.current, self.previous+self.current)
self.limit -=1
if self.limit<0: raise StopIteration()
return self.current
x=fib_iterator(5)
next(x)
next(x)
next(x)
next(x)
for x in fib_iterator(5):
print(x)
sum(fib_iterator(1000))
# ### A shortcut to iterables: the `__iter__` method.
#
#
#
# In fact, if, to be iterated over, a class just wants to behave as if it were some other iterable, you can just implement `__iter__` and return `iter(some_other_iterable)`, without implementing `next`. For example, an image class might want to implement some metadata, but behave just as if it were just a 1-d pixel array when being iterated:
#
#
#
# +
from numpy import array
from matplotlib import pyplot as plt
class MyImage(object):
def __init__(self, pixels):
self.pixels=array(pixels,dtype='uint8')
self.channels=self.pixels.shape[2]
def __iter__(self):
# return an iterator over the pixels
# See future NumPy lecture for using reshape
return iter(self.pixels.reshape(-1,self.channels))
def show(self):
plt.imshow(self.pixels, interpolation="None")
x=[[[255,255,0],[0,255,0]],[[0,0,255],[255,255,255]]]
image=MyImage(x)
# -
# %matplotlib inline
image.show()
image.channels
from webcolors import rgb_to_name
for pixel in image:
print(rgb_to_name(pixel))
# The **iterator** protocol is to implement both `__iter__` and
# `next`, while the **iterable** protocol is to implement `__iter__` and return
# an something iterable.
# ### Generators
# There's a fair amount of "boiler-plate" in the above class-based definition of
# an iterable.
#
# Python provides another way to specify something
# which meets the iterator protocol: **generators**.
# +
def my_generator():
yield 5
yield 10
x=my_generator()
# -
x.__next__()
x.__next__()
x.__next__()
for a in my_generator():
print(a)
sum(my_generator())
# A function which has `yield` statements instead of a `return` statement returns
# **temporarily**: it automagically becomes something which implements `next`.
# Each call of next() returns control to the function where it
# left off.
# Control passes back-and-forth between the generator and the caller.
# Our fibonacci example therefore becomes a function rather than a class.
def yield_fibs(limit, seed1=1,seed2=1):
current=seed1
previous=seed2
while limit>0:
limit-=1
current, previous = current+previous, current
yield current
sum(yield_fibs(5))
plt.plot(list(yield_fibs(20)))
for a in yield_fibs(10):
if a%2 == 0:
print(a)
list(yield_fibs(10))
# ### Context managers
#
# We saw that instead of separately `open`ing and `close`ing a file, we can have
# the file be automatically closed using a context manager:
#
#
#
# +
import yaml
with open('example.yaml') as foo:
print(yaml.load(foo))
# -
#
#
#
# How could we define our own one of these, if we too have clean-up code we
# always want to run after a calling function has done its work, or set-up code
# we want to do first?
#
# We can define a class that meets an appropriate protocol:
#
#
#
# +
class verbose_context():
def __init__(self, name):
self.name=name
def __enter__(self):
print("Get ready, ", self.name)
def __exit__(self, exc_type, exc_value, traceback):
print("OK, done")
with verbose_context("James"):
print("Doing it!")
# -
#
#
# However, this is pretty verbose! Again, a generator with `yield` makes for an easier syntax:
#
#
#
# +
from contextlib import contextmanager
@contextmanager
def verbose_context(name):
print("Get ready for action, ", name)
yield name.upper()
print("You did it")
with verbose_context("James") as shouty:
print("Doing it, ", shouty)
# -
#
#
# Again, we use `yield` to temporarily return from a function.
#
# ### Decorators
#
# When doing functional programming, we may often want to define mutator
# functions which take in one function and return a new function, such as our
# derivative example earlier.
#
#
#
# +
def repeater(count):
def wrap_function_in_repeat(func):
def _repeated(x):
counter=count
while counter>0:
counter-=1
x=func(x)
return x
return _repeated
return wrap_function_in_repeat
from math import sqrt
fiftytimes=repeater(50)
fiftyroots=fiftytimes(sqrt)
print(fiftyroots(100))
# -
# It turns out that, quite often, we want to apply one of these to a function as we're defining a class.
# For example, we may want to specify that after certain methods are called, data should always be stored:
# Any function which accepts a function as its first argument and returns a function can be used as a **decorator** like this.
#
# Much of Python's standard functionality is implemented as decorators: we've
# seen @contextmanager, @classmethod and @attribute. The @contextmanager
# metafunction, for example, takes in an iterator, and yields a class conforming
# to the context manager protocol.
#
@repeater(3)
def hello(name):
return "Hello, "+ name
hello("James")
# ### Test generators
#
#
# A few weeks ago we saw a test which loaded its test cases from a YAML file and
# asserted each input with each output. This was nice and concise, but had one
# flaw: we had just one test, covering all the fixtures, so we got just one . in
# the test output when we ran the tests, and if any test failed, the rest were
# not run. We can do a nicer job with a test **generator**:
#
#
#
#
# +
def assert_examplar(**fixture):
answer=fixture.pop('answer')
assert_equal(greet(**fixture), answer)
def test_greeter():
with open(os.path.join(os.path.dirname(
__file__),'fixtures','samples.yaml')
) as fixtures_file:
fixtures=yaml.load(fixtures_file)
for fixture in fixtures:
yield assert_exemplar(**fixture)
# -
# Each time a function beginning with `test_` does a `yield` it results in another test.
# ### Negative test contexts managers
# We have seen this:
# +
from nose.tools import assert_raises
with assert_raises(AttributeError):
x=2
x.foo()
# -
# We can now see how `nose` might have implemented this:
# +
from contextlib import contextmanager
@contextmanager
def reimplement_assert_raises(exception):
try:
yield
except exception:
pass
else:
raise Exception("Expected,", exception,
" to be raised, nothing was.")
# -
with reimplement_assert_raises(AttributeError):
x=2
x.foo()
# ### Negative test decorators
# Nose also implements a very nice negative test decorator:
# +
from nose.tools import raises
@raises(TypeError, ValueError)
def test_raises_type_error():
raise TypeError("This test passes")
# -
test_raises_type_error()
@raises(Exception)
def test_that_fails_by_passing():
pass
test_that_fails_by_passing()
# We could reimplement this ourselves now too:
def homemade_raises_decorator(exception):
def wrap_function(func): #Closure over exception
# Define a function which runs another function under the assert_raises context:
def _output(*args): #Closure over func and exception
with assert_raises(exception):
func(*args)
# Return it
return _output
return wrap_function
@homemade_raises_decorator(TypeError)
def test_raises_type_error():
raise TypeError("This test passes")
test_raises_type_error()
| ch07dry/025Iterators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from requests import get
import numpy as np
from bs4 import BeautifulSoup as bs
import matplotlib.pyplot as plt
import PyPDF2 # karuma pdf karda u texty veradardzni
import networkx as nxtex
import itertools
from collections import Counter
# +
url = "https://en.wikipedia.org/wiki/List_of_Harry_Potter_characters"
response = get(url)
soup = bs(response.text)
list_items = soup.find("div", {"id" : "mw-content-text"}).find_all("li")
names = [i.text.split("–")[0].strip() for i in list_items if "–" in i.text]
names = names[:names.index("Winky")]
delete_list = ["Madam", "Sir", "Mr.", "Mrs.", "Lady", "The", "and", "Sr.", "/", "the"]
for delete_item in delete_list:
names = [i.replace(delete_item, "") for i in names]
names = list(set(names))
# +
harry = open('harry_1.pdf', 'rb')
harry = PyPDF2.PdfFileReader(harry)
number_of_pages = harry.getNumPages()
book = ""
for i in range(number_of_pages):
page = harry.getPage(i)
page_content = page.extractText()
book += page_content
paragraphs = book.split("\n\n")
G = nx.Graph()
G.add_nodes_from(names)
names_ = []
for paragraph in paragraphs:
temp_1 = []
temp_2 = []
for name in names:
temp_1.append((np.sum([paragraph.count(i) for i in name.split()]), name))
temp_2 = []
for i in temp_1:
if i[0] > 0:
temp_2.append(i[1])
G.add_edges_from([i for i in itertools.combinations(temp_2, 2)])
names_.extend(temp_2)
size_ = Counter(names_)
remain = list(size_.keys())
remove = list(set(names) - set(remain))
G.remove_nodes_from(remove)
pos=nx.kamada_kawai_layout(G, scale = 500)
nx.draw(G,linewidths = 2,width = 0.2, edgecolors = "white", pos = pos, font_size = 8, edge_color = "b", with_labels = True, nodelist = list(size_.keys()), node_size = np.array(list(size_.values()))*1)
plt.show()
# -
| Lectures/Lecture_11_Network_Visualization/harry_potter_network/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ADAPTmap_genotypeTOP_20161201
# Try to describe `ADAPTmap_genotypeTOP_20161201.zip` data files. This dataset is paired with `ADAPTmap_phenotype_20161201.zip` dataset
# +
import pandas as pd
from plinkio import plinkfile
from src.features.smarterdb import global_connection, Dataset
from src.data.common import pandas_open
# -
global_connection()
geno_dataset = Dataset.objects.get(file="ADAPTmap_genotypeTOP_20161201.zip")
phen_dataset = Dataset.objects.get(file="ADAPTmap_phenotype_20161201.zip")
print("\n".join(geno_dataset.contents))
print("\n".join(phen_dataset.contents))
# Data and metadata are splitted in two datasets. Moreover, by investigating on Adaptmap metadata (from ADAPTMAP project), I found that the same breed could be sampled in different countries, so is not possible to upload breed and aliases as I did until now. Take for example `ALP` breed for example:
datapath = phen_dataset.working_dir / "ADAPTmap_phenotype_20161201/ADAPTmap_InfoSample_20161201.csv"
data = pandas_open(datapath)
subset = data.loc[data['Breed_code']=='ALP', ['Breed_code', 'Sampling_Country']]
subset.groupby(['Breed_code', 'Sampling_Country']).size()
# This breed in particoular was samples in three different countries, `ITALY`, `SWITZERLAND`, `FRANCE`
# How about the genotype files?
plink_path = geno_dataset.working_dir / "ADAPTmap_genotypeTOP_20161201/binary_fileset/ADAPTmap_genotypeTOP_20161201"
plink_file = plinkfile.open(str(plink_path))
sample_list = plink_file.get_samples()
locus_list = plink_file.get_loci()
# Ok, try to get `ALP` samples, for example and get information on them
alp_samples = [sample for sample in sample_list if sample.fid == 'ALP']
sample_sex = [sample for sample in alp_samples if sample.sex != -9]
sample_sex
# Breed code matches fid in adaptmap dataset. However it seems to me I don't have the sex column in *plink* files, despite I could have such informations from metadata
| notebooks/exploratory/0.13.0-bunop-describe_adaptmap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import holoviews as hv
from bokeh.models import HoverTool
from bokeh.plotting import output_file
hv.extension('bokeh')
# ### Intro
# Script to reproduce the "[Warming Stripes](https://www.climate-lab-book.ac.uk/2018/warming-stripes/)" from [<NAME>](http://www.met.reading.ac.uk/~ed/home/index.php), now also interactive in [this blog post](https://fabienmaussion.info/2018/11/25/bokeh-stripes/).
# ### Read the data
# +
# GISTEMP data from https://data.giss.nasa.gov/gistemp/tabledata_v3/GLB.Ts+dSST.csv
# If you have the data, use:
# df = pd.read_csv('GLB.Ts+dSST.csv', header=1, skipfooter=1, engine='python')
# This downloads it:
import requests, io
url = 'https://data.giss.nasa.gov/gistemp/tabledata_v3/GLB.Ts+dSST.csv'
response = requests.get(url)
df = pd.read_csv(io.StringIO(response.text), header=1, skipfooter=1, engine='python')
# -
# Annual values only
dfa = df[['Year', 'J-D']].copy()
dfa.columns = ['Year', 'Anomaly']
# This is to trick holoviews into making an image out of the dataframe
dfa['index'] = 1
# We want to display the rank as well
dfa['Rank'] = len(dfa) - np.argsort(dfa['Anomaly'])
# Monthly values
dfm = df[df.columns[:13]].copy()
dfm = pd.melt(dfm, id_vars='Year', var_name='Month', value_name='Anomaly')
# ### Matplotlib plots
dfa['Anomaly'].plot();
plt.pcolormesh(dfa['Anomaly'].values.reshape((1, len(dfa))), cmap='RdBu_r')
plt.colorbar();
# ### Bokeh plot: annual stripes
# Display only these three columns as hover
hover = HoverTool(
tooltips=[
("Year", "@Year"),
("Anomaly", "@Anomaly"),
("Rank", "@Rank"),
]
)
# simple trick to workaround https://github.com/ioam/holoviews/issues/2730
def set_active_drag(plot, element):
plot.state.toolbar.active_drag = None
# same: https://github.com/ioam/holoviews/issues/3220
hv.Store.add_style_opts(hv.HeatMap, ['dilate'], 'bokeh')
# Display the plot
heatmap = hv.HeatMap(dfa, kdims=['Year', 'index'],
label='NASA GISTEMP 1880-2017: annual deviation from 1951-1980 means')
heatmap = heatmap.options(tools=[hover], cmap='RdBu_r', width=700, height=300,
xaxis=None, labelled=[], yaxis=None, toolbar=None, dilate=True,
finalize_hooks=[set_active_drag])
heatmap
# Save as HTML
renderer = hv.renderer('bokeh')
renderer.save(heatmap, 'annual-stripes-700x300')
# Save larger plot
heatmap = hv.HeatMap(dfa, kdims=['Year', 'index'],
label='NASA GISTEMP 1880-2017: annual deviation from 1951-1980 means')
heatmap = heatmap.options(tools=[hover], cmap='RdBu_r', width=1200, height=600,
xaxis=None, labelled=[], yaxis=None, toolbar=None, dilate=True,
finalize_hooks=[set_active_drag])
renderer = hv.renderer('bokeh')
renderer.save(heatmap, 'annual-stripes-1200x600')
# ### Bokeh plot: monthly stripes
# Display the plot
heatmap = hv.HeatMap(dfm, kdims=['Year', 'Month'],
label='NASA GISTEMP 1880-2017: monthly deviation from 1951-1980 means')
heatmap = heatmap.options(tools=['hover'], cmap='RdBu_r', width=700, height=300,
xaxis=None, labelled=[], yaxis=None, toolbar=None, dilate=True,
finalize_hooks=[set_active_drag])
heatmap
# Save as HTML
renderer = hv.renderer('bokeh')
renderer.save(heatmap, 'monthly-stripes-700x300')
# Save larger plot
heatmap = hv.HeatMap(dfm, kdims=['Year', 'Month'],
label='NASA GISTEMP 1880-2017: monthly deviation from 1951-1980 means')
heatmap = heatmap.options(tools=['hover'], cmap='RdBu_r', width=1200, height=600,
xaxis=None, labelled=[], yaxis=None, toolbar=None, dilate=True,
finalize_hooks=[set_active_drag])
renderer = hv.renderer('bokeh')
renderer.save(heatmap, 'monthly-stripes-1200x600')
| stripes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Set CRS of SHP outputted from Cube. By default, Cube will export NET to SHP without defining a CRS
import os
import arcpy
def set_sref(in_file, out_dir, out_file, output_sref):
arcpy.env.outputCoordinateSystem = output_sref
arcpy.conversion.FeatureClassToFeatureClass(in_file, out_dir, out_file)
if __name__ == '__main__':
# input SHP, from Cube NET and without spatial ref system
shp_dir = r"Q:\SACSIM19\2020MTP\highway\network update\NetworkGIS\ModelNetGISProjects\MTP_MTIP_DataReleaseComparison\SHP\2040Pricing"
in_shp = "MTP_MTIPAm2_2040Pricing.shp"
# output location
output_dir = r'\\data-svr\Modeling\SACSIM19\2020MTP\highway\network update\NetworkGIS\ModelNetGISProjects\MTP_MTIP_DataReleaseComparison\NetworkReleaseComparison.gdb'
# set spatial reference you want to output it as
sr_sacog = arcpy.SpatialReference(2226) # 2226 = SACOG CRS ID (CA NAD83 ZONE 5); 4326 = WGS84
#==============================================================
in_shp = os.path.join(shp_dir, in_shp)
out_fc = f"compare{os.path.splitext(os.path.basename(in_shp))[0]}"
set_sref(in_shp, output_dir, out_fc, sr_sacog)
# -
| model_network/set_network_crs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # Validating Runge Kutta Butcher tables using Truncated Taylor Series
# ## Authors: <NAME> & <NAME>
#
#
# ## This tutorial notebook is designed to validate the Butcher tables contained within the Butcher dictionary constructed in the [RK Butcher Table Dictionary](Tutorial-RK_Butcher_Table_Dictionary.ipynb) NRPy+ module.
#
# ### NRPy+ Source Code for this module:
# * [MoLtimestepping/RK_Butcher_Table_Validation.py](../edit/MoLtimestepping/RK_Butcher_Table_Validation.py) Stores the `Validate` function for calidating convergence orders for Runge Kutta methods
# * [MoLtimestepping/RK_Butcher_Table_Dictionary.py](../edit/MoLtimestepping/RK_Butcher_Table_Dictionary.py) [\[**tutorial**\]](Tutorial-RK_Butcher_Table_Dictionary.ipynb) Accesses the Butcher table dictionary `Butcher_dict` for known explicit Runge Kutta methods
#
# ## Introduction:
#
# Starting with the ODE (ordinary differential equation) initial value problem:
# $$
# y'(t) = f(y,t)\ \ \ y\left(t=0\right)=y_0,
# $$
# for various choices of $f(y,t)$, this module validates the Runge Kutta (RK) methods coded in [RK_Butcher_Table_Dictionary.py](../edit/MoLtimestepping/RK_Butcher_Table_Dictionary.py) [**tutorial notebook**](Tutorial-RK_Butcher_Table_Dictionary.ipynb) as follows:
#
# Given $y_0$, and a smooth $f(y,t)$, all explicit RK methods provide an estimate for $y_1 = y\left(\Delta t\right)$, with an error term that is proportional to $\left(\Delta t\right)^m$, where $m$ is an integer typically greater than zero. This error term corresponds to the *local* truncation error. For RK4, for example, while the *total accumulated truncation error* (i.e., the accumulated error at a fixed final time $t_f$) is proportional to $\left(\Delta t\right)^4$, the *local* truncation error (i.e., the error after one arbitrarily chosen timestep $\Delta t$) is proportional to $\left(\Delta t\right)^5$.
#
# If the exact solution $y(t)$ is known as a closed-form expression, then $y\left(\Delta t\right)$ can be *separately* written as a Taylor expansion about $y(t=0)$:
#
# $$
# y\left(\Delta t\right) = \sum_{n=0}^\infty \frac{y^{(n)}(t=0)}{n!} \left(\Delta t\right)^n,
# $$
# where $y^{(n)}(t=0)$ is the $n$th derivative of $y(t)$ evaluated at $t=0$.
#
# The above expression will be known exactly. Further if one chooses a numerical value for $y_0$ *and leaves $\Delta t$ unspecified*, any explicit RK method will provide an estimate for $y\left(\Delta t\right)$ of the form
#
# $$
# y\left(\Delta t\right) = \sum_{n=0}^\infty a_n \left(\Delta t\right)^n,
# $$
# where $a_n$ *must* match the Taylor expansion of the *exact* solution at least up to and including terms proportional to $\left(\Delta t\right)^m$, where $m$ is the order of the local truncation error. If this is *not* the case, then the Butcher table was almost certainly *not* typed correctly.
#
# Therefore, comparing the numerical result with unspecified $\Delta t$ against the exact Taylor series provides a convenient (though not perfectly robust) means to verify that the Butcher table for a given RK method was typed correctly. Multiple typos in the Butcher tables were found using this approach.
#
# **Example from <NAME>'s MATH 521 (Numerical Analysis) lecture notes:**
#
# Consider the ODE
# $$
# y' = y - 2 t e^{-2t},\quad y(0)=y(t_0)=0.
# $$
#
# * Solve this ODE exactly, then Taylor expand the solution about $t=0$ to
# approximate the solution at $y(t=\Delta t)$ to fifth order in $\Delta
# t$.
# * Next solve this ODE using Heun's method (second order in total accumulated truncation error, third order in local truncation error) {\it by hand} with a step size of
# $\Delta t$ to find $y(\Delta t)$. Confirm that the solution obtained
# when using Heun's method has an error term that is at worst
# $\mathcal{O}\left((\Delta t)^3\right)$. If the dominant error is
# proportional to a higher power of $\Delta t$, explain the discrepancy.
#
# * Finally solve this ODE using the Ralston method {\it by hand}
# with a step size of $\Delta t$ to find $y(\Delta t)$. Is the
# coefficient on the dominant error term closer to the exact solution
# than Heun's method?
#
# We can solve this equation via the method of integrating factors,
# which states that ODEs of the form:
# $$
# y'(t) + p(t) y(t) = g(t)
# $$
# are solved via
# $$
# y(t) = \frac{1}{\mu(t)} \left[ \int \mu(s) g(s) ds + c \right],
# $$
# where the integrating factor $\mu(t)$ is given by
# $$
# \mu(t) = \exp\left(\int p(t) dt\right)
# $$
#
# Here, $p(t)=-1$ and $g(t) = - 2 t e^{-2t}$. Then
# \beq
# \mu(t) = \exp\left(-\int dt\right) = e^{-t+c} = k e^{-t}
# \eeq
# and
# \begin{align}
# y(t) &= e^t/k \left[ \int k e^{-s} (- 2 s e^{-2s}) ds + c \right] = -2 e^t \left[ \int s e^{-3s} ds + c' \right] \\
# &= -2 e^t \left[ e^{-3 t} \left(-\frac{t}{3}-\frac{1}{9}\right) + c' \right] = -2 e^{-2t} \left(-\frac{t}{3}-\frac{1}{9}\right) -2 c' e^t \\
# &= e^{-2t} \left(2\frac{t}{3}+\frac{2}{9}\right) + c'' e^t \\
# \end{align}
#
# If $y(0)=0$ then we can compute the integration constant $c''$, and
# $y(t)$ becomes
# $$
# y(t) = \frac{2}{9} e^{-2 t} \left(3 t + 1 - e^{3 t}\right).
# $$
#
# The Taylor Series expansion of the exact solution about $t=0$
# evaluated at $y(\Delta t)$ yields
# $$
# y(\Delta t) = -(\Delta t)^2+(\Delta t)^3-\frac{3 (\Delta t)^4}{4}+\frac{23 (\Delta
# t)^5}{60}-\frac{19 (\Delta t)^6}{120}+O\left((\Delta t)^7\right).
# $$
#
# Next we evaluate $y(\Delta t)$ using Heun's method. We know $y(0)=y_0=0$ and
# $f(y,t)=y - 2 t e^{-2t}$, so
# \begin{align}
# k_1 &= \Delta t f(y(0),0) \\
# &= \Delta t \times 0 \\
# &= 0 \\
# k_2 &= \Delta t f(y(0)+k_1,0+\Delta t) \\
# &= \Delta t f(y(0)+0,0+\Delta t) \\
# &= \Delta t (-2 \Delta t e^{-2\Delta t}) \\
# &= -2 (\Delta t)^2 e^{-2\Delta t} \\
# y(\Delta t) &= y_0 + \frac{1}{2} (k_1 + k_2) + \mathcal{O}\left((\Delta t)^3\right) \\
# &= 0 - (\Delta t)^2 e^{-2\Delta t} \\
# &= - (\Delta t)^2 ( 1 - 2 \Delta t + 2 (\Delta t)^2 + ...) \\
# &= - (\Delta t)^2 + 2 (\Delta t)^3 + \mathcal{O}\left((\Delta t)^4\right).
# \end{align}
#
# Thus the coefficient on the $(\Delta t)^3$ term is wrong, but
# this is completely consistent with the fact that our stepping
# scheme is only third-order accurate in $\Delta t$.
#
# In the below approach, the RK result is subtracted from the exact Taylor series result, as a check to determine whether the RK Butcher table was coded correctly; if it was not, then the odds are good that the RK results will not match to the expected local truncation error order. Multiple $f(y,t)$ are coded below to improve the robustness of this test.
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This notebook is organized as follows
#
# 1. [Step 1](#initializenrpy): Initialize needed Python/NRPy+ modules
# 1. [Step 2](#table_validate) Validate Convergence Order of Butcher Tables
# 1. [Step 2.a](#rhs): Defining the right-hand side of the ODE
# 1. [Step 2.b](#validfunc): Defining a Validation Function
# 1. [Step 2.c](#rkvalid): Validating RK Methods against ODEs
# 1. [Step 3](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
# <a id='initializenrpy'></a>
#
# # Step 1: Initialize needed Python/NRPy+ modules [Back to [top](#toc)\]
# $$\label{initializenrpy}$$
#
# Let's start by importing all the needed modules from Python/NRPy+:
import sympy as sp
import NRPy_param_funcs as par
import numpy as np
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
# <a id='table_validate'></a>
#
# # Step 2: Validate Convergence Order of Butcher Tables [Back to [top](#toc)\]
# $$\label{table_validate}$$
#
#
# Each Butcher table/Runge Kutta method is tested by solving an ODE. Comparing the Taylor series expansions of the exact solution and the numerical solution as discussed in the **Introduction** above will confirm whether the method converges to the appropriate order.
# <a id='rhs'></a>
#
# ## Step 2.a: Defining the right-hand side of the ODE [Back to [top](#toc)\]
# $$\label{rhs}$$
#
# Consider the form of ODE $y'=f(y,t)$. The following begins to construct a dictionary `rhs_dict` of right-hand side functions for us to validate explicit Runge Kutta methods. The most up-to-date catlog of functions stored in `rhs_dict` can be found in the [RK_Butcher_Table_Validation.py](../edit/MoLtimestepping/RK_Butcher_Table_Validation.py) module.
# +
def fypt(y,t): # Yields expected convergence order for all cases
# except DP6 which converge to higher order (7, respectively)
return y+t
def fy(y,t): # Yields expected convergence order for all cases
return y
def feypt(y,t): # Yields expected convergence order for all cases
return sp.exp(1.0*(y+t))
def ftpoly6(y,t): # Yields expected convergence order for all cases, L6 has 0 error
return 2*t**6-389*t**5+15*t**4-22*t**3+81*t**2-t+42
rhs_dict = {'ypt':fypt, 'y':fy, 'eypt':feypt, 'tpoly6':ftpoly6}
# -
# <a id='validfunc'></a>
#
# ## Step 2.b: Defining a Validation Function [Back to [top](#toc)\]
# $$\label{validfunc}$$
#
# To validate each Butcher table we compare the exact solutions to ODEs with the numerical solutions using the Runge Kutta scheme built into each Butcher table. The following is a function that
#
# 1. Solves the ODE exactly,
# 2. Solves the ODE numericaly for a given Butcher table, and
# 3. Compares the two solutions and checks for the order of convergence by returning their difference.
#
# The `Validate()` function inputs a specified `Butcher_key`, the starting guess solution and time `y_n`, `t_n` and the right-hand side of the ODE corresponding to a specified intial value problem, `rhs_key`.
#
#
#
from MoLtimestepping.RK_Butcher_Table_Dictionary import Butcher_dict
def Validate(Butcher_key, yn, tn, rhs_key):
# 1. First we solve the ODE exactly
y = sp.Function('y')
sol = sp.dsolve(sp.Eq(y(t).diff(t), rhs_dict[rhs_key](y(t), t)), y(t)).rhs
constants = sp.solve([sol.subs(t,tn)-yn])
exact = sol.subs(constants)
# 2. Now we solve the ODE numerically using specified Butcher table
# Access the requested Butcher table
Butcher = Butcher_dict[Butcher_key][0]
# Determine number of predictor-corrector steps
L = len(Butcher)-1
# Set a temporary array for update values
k = np.zeros(L, dtype=object)
# Initialize intermediate variable
yhat = 0
# Initialize the updated solution
ynp1 = 0
for i in range(L):
#Initialize and approximate update for solution
yhat = yn
for j in range(i):
# Update yhat for solution using a_ij Butcher table coefficients
yhat += Butcher[i][j+1]*k[j]
if Butcher_key == "DP8" or Butcher_key == "L6":
yhat = 1.0*sp.N(yhat,20) # Otherwise the adding of fractions kills performance.
# Determine the next corrector variable k_i using c_i Butcher table coefficients
k[i] = dt*rhs_dict[rhs_key](yhat, tn + Butcher[i][0]*dt)
# Update the solution at the next iteration ynp1 using Butcher table coefficients
ynp1 += Butcher[L][i+1]*k[i]
# Finish determining the solution for the next iteration
ynp1 += yn
# Determine the order of the RK method
order = Butcher_dict[Butcher_key][1]+2
# Produces Taylor series of exact solution at t=tn about t = 0 with the specified order
exact_series = sp.series(exact.subs(t, dt),dt, 0, order)
num_series = sp.series(ynp1, dt, 0, order)
diff = exact_series-num_series
return diff
# <a id='rkvalid'></a>
#
# ## Step 2.c: Validating RK Methods against ODEs [Back to [top](#toc)\]
# $$\label{rkvalid}$$
#
# The following makes use of the `Validate()` function above to demonstrate that each method within the Bucther table dictionary converges to the expected order for the given right-hand side expression.
t, dt = sp.symbols('t dt')
# Set intial conditions
t0 = 0
y0 = 1
# Set RHS of ODE
function = 'ypt'# This can be changed, just be careful that the initial conditions are satisfied
for key,value in Butcher_dict.items():
print("RK method: \""+str(key)+"\".")
y = sp.Function('y')
print(" When solving y'(t) = "+str(rhs_dict[function](y(t),t))+", y("+str(t0)+")="+str(y0)+",")
local_truncation_order = list(value)[1]+1
print(" the first nonzero term should have local truncation error proportional to O(dt^"+str(local_truncation_order)+") or a higher power of dt.")
print("Subtracting the numerical result from the exact Taylor expansion, we find a local truncation error of:")
sp.pretty_print(Validate(key, y0, t0, function))
# print("\n")
print(" (Coefficients of order 1e-15 or less may generally be ignored, as these are at roundoff error.)\n")
# <a id='latex_pdf_output'></a>
#
# # Step 3: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-RK_Butcher_Table_Validation.pdf](Tutorial-RK_Butcher_Table_Validation.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
# !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-RK_Butcher_Table_Validation.ipynb
# !pdflatex -interaction=batchmode Tutorial-RK_Butcher_Table_Validation.tex
# !pdflatex -interaction=batchmode Tutorial-RK_Butcher_Table_Validation.tex
# !pdflatex -interaction=batchmode Tutorial-RK_Butcher_Table_Validation.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
| Tutorial-RK_Butcher_Table_Validation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# +
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
from keras.callbacks import ModelCheckpoint
import pickle
print "using ordering:", K.image_dim_ordering()
# +
# load data from memory
import pickle
pickle_file = '-data.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
X = save['X']
y = save['y']
del save # hint to help gc free up memory
# +
# number of classes
num_classes = 4
# image dimensions
img_rows, img_cols = X.shape[1], X.shape[2]
if K.image_dim_ordering() == 'th':
X = X.reshape(X.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
X = X.reshape(X.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
y = np_utils.to_categorical(y, num_classes)
print X.shape
print y.shape
# +
# preview one sample from the reloaded X dataset to make sure nothing happened along the way
# %matplotlib inline
from matplotlib.pyplot import imshow
import matplotlib.pyplot as plt
img_num = 1000
if K.image_dim_ordering() == 'th':
img = X[img_num][0,:,:]
else:
img = X[img_num][:,:,0]
print img.shape
imshow(img, cmap = plt.get_cmap('gray'), vmin = 0, vmax = 1, interpolation='nearest')
# +
# model hyperparameters
batch_size = 32
nb_epoch = 10
# network architecture
patch_size_1 = 3
patch_size_2 = 3
patch_size_3 = 3
depth_1 = 32
depth_2 = 64
depth_3 = 128
pool_size = 2
num_hidden_1 = 512
num_hidden_2 = 512
dropout = 0.25
# +
model = Sequential()
model.add(Convolution2D(depth_1, patch_size_1, patch_size_1,
border_mode='valid',
input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Convolution2D(depth_2, patch_size_2, patch_size_2,
border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Convolution2D(depth_3, patch_size_3, patch_size_3,
border_mode='valid'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(Flatten())
model.add(Dense(num_hidden_1))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(num_hidden_2))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
# +
checkpoint_name = "-model.hdf5"
checkpointer = ModelCheckpoint(checkpoint_name, verbose=0, save_best_only=True)
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
# -
history = model.fit(X, y, validation_split=0.25, batch_size=batch_size, nb_epoch=nb_epoch,
verbose=1, callbacks=[checkpointer])
# +
score = model.evaluate(X, y, verbose=0)
print 'Test score:', score[0]
print 'Test accuracy: {:.2%}'.format(score[1])
| Final_UrbanSprawl/Wuhan_Training_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import sklearn
from sklearn import linear_model
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
# import something
master_df = pd.read_csv('../datasets/MASTER_DF.csv')
master_df.columns
feature_df = master_df[['PubChem', 'dist', 'enzyme_class_1', 'enzyme_class_2', 'enzyme_class_3',
'enzyme_class_4', 'enzyme_class_5', 'enzyme_class_6', 'enzyme_class_7',
'n_O', 'n_N', 'n_P', 'n_S', 'n_X', 'DoU']]
feature_df.set_index(keys=['PubChem'], inplace=True)
feature_df.head()
# +
features = np.array(feature_df) #shape balance array for regression
reactions = list(master_df['reacts'])
feature_train, feature_test, reaction_train, reaction_test = train_test_split(features, reactions,
test_size=0.20, random_state=42)
# +
model_1 = linear_model.LogisticRegression(solver='liblinear', penalty='l1', random_state=1, class_weight='balanced')
model_1.fit(feature_train, np.ravel(reaction_train))
predictions = model_1.predict(feature_test) # change me to the data you want to predict based on
score = model_1.score(feature_test, reaction_test)
decision = model_1.decision_function(feature_test)
params = model_1.get_params()
pred_log = model_1.predict_log_proba(feature_test)
pred = model_1.predict_proba(feature_test)
score, pred, decision, model_1.classes_, model_1.coef_
# -
prediction_values = pd.DataFrame(pred)
model_descriptive_df = pd.DataFrame()
model_descriptive_df['0']=prediction_values[0]
model_descriptive_df['1']=prediction_values[1]
model_descriptive_df
updated = predictions.tolist()
confusion_matrix = confusion_matrix(reaction_test, updated)
print(confusion_matrix)
# upper left and lower right are correct: 1427
# lower left and upper right are incorrect : 99
print(classification_report(reaction_test, predictions))
# +
model_2 = linear_model.LogisticRegressionCV(solver='liblinear', penalty='l1', random_state=1, cv=10)
model_2.fit(feature_train, np.ravel(reaction_train))
predictions2 = model_2.predict(feature_test)
score2 = model_2.score(feature_test, reaction_test)
decision2 = model_2.decision_function(feature_test)
params2 = model_2.get_params()
pred_log2 = model_2.predict_log_proba(feature_test)
pred2 = model_2.predict_proba(feature_test)
score2, pred2, #decision2, model_2.classes_, model_2.coef_
# -
# plan: ridge regression, drop off the least important features and get the AUC ROC curve-> store, drop the last
# RFE again? (it didn't show anything...)
#
# ______________
# # linear regression on default data
#
# logreg1=linear_model.LogisticRegression(random_state=1)
# logreg1.fit(feature_train, np.ravel(reaction_train)) #fit linear model, and shape default array for regression
# score = logreg1.score(feature_test, reaction_test)
# decision = logreg1.decision_function(feature_test)
# params = logreg1.get_params()
# pred_log = logreg1.predict_log_proba(feature_test)
# pred = logreg1.predict_proba(feature_test)
#
# score, pred, logreg1.classes_, logreg1.coef_ #np.ndarray.shape(pred)
#
# #print('B0, B1: ',logreg.intercept_, logreg.coef_[0])
# predictions1 = logreg1.predict(feature_test)
# confusion_matrix = confusion_matrix(reaction_test, predictions1)
# print(confusion_matrix)
# # upper left and lower right are correct: 1400 for the first go
# # lower left and upper right are incorrect : 111 for the first go
# print(classification_report(reaction_test, predictions1))
# logit_roc_auc = roc_auc_score(reaction_test, logreg1.predict(feature_test))
# fpr, tpr, thresholds = roc_curve(reaction_test, logreg1.predict_proba(feature_test)[:,1])
# plt.figure()
# plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
# plt.plot([0, 1], [0, 1],'r--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('Receiver operating characteristic')
# plt.legend(loc="lower right")
# plt.savefig('Log_ROC')
# plt.show()
# plt.scatter(reaction_test, predictions1, alpha=0.1)
# plt.xlabel('True value')
# plt.ylabel('Predicted value')
# logreg5 = linear_model.LogisticRegression(penalty='l1', random_state=1)
# logreg5.fit(feature_train, np.ravel(reaction_train))
# predictions5 = logreg5.predict(feature_test)
# score = logreg5.score(feature_test, reaction_test)
# decision = logreg5.decision_function(feature_test)
# params = logreg5.get_params()
# pred_log = logreg5.predict_log_proba(feature_test)
# pred = logreg5.predict_proba(feature_test)
#
# score, pred, logreg5.classes_, logreg5.coef_
# print(classification_report(reaction_test, predictions5))
# logit_roc_auc = roc_auc_score(reaction_test, logreg5.predict(feature_test))
# fpr, tpr, thresholds = roc_curve(reaction_test, logreg5.predict_proba(feature_test)[:,1])
# plt.figure()
# plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
# plt.plot([0, 1], [0, 1],'r--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('Receiver operating characteristic')
# plt.legend(loc="lower right")
# plt.savefig('Log_ROC')
# plt.show()
# +
from sklearn.feature_selection import RFE
# +
rfe_results = pd.DataFrame(columns=['score'])
rank_array = np.zeros((14,15))
for i in range(1,14):
logreg2=linear_model.LogisticRegression(solver='liblinear', penalty='l1', random_state=1, max_iter=1000, class_weight='balanced')
rfe = RFE(logreg2, i)
rfe = rfe.fit(feature_train, np.ravel(reaction_train))
score = rfe.score(feature_test, reaction_test)
rfe_results.loc[i, 'score'] = score
for j in range(0,14):
rank_array[i, j] = rfe.ranking_[j]
# -
rank_df = pd.DataFrame(rank_array, columns=['dist', 'enzyme_class_1', 'enzyme_class_2', 'enzyme_class_3',
'enzyme_class_4', 'enzyme_class_5', 'enzyme_class_6', 'enzyme_class_7',
'n_O', 'n_N', 'n_P', 'n_S', 'n_X', 'DoU', 'drop'])
#rfe_results
rank_df.drop(rank_df.index[0], inplace=True)
rank_df.drop(['drop'], axis=1, inplace=True)
rank_df
rank_df.sum(axis=0), rfe_results
# +
rfe_results = pd.DataFrame(columns=['score'])
rank_array = np.zeros((17,18))
for i in range(1,17):
logreg2=linear_model.LogisticRegression(solver='liblinear', penalty='l1', random_state=1, max_iter=1000, class_weight='balanced')
rfe = RFE(logreg2, i)
rfe = rfe.fit(feature_train, np.ravel(reaction_train))
score = rfe.score(feature_test, reaction_test)
rfe_results.loc[i, 'score'] = score
for j in range(0,17):
rank_array[i, j] = rfe.ranking_[j]
# -
rank_df = pd.DataFrame(rank_array, columns=['dist', 'enzyme_class_1', 'enzyme_class_2', 'enzyme_class_3',
'enzyme_class_4', 'enzyme_class_5', 'enzyme_class_6', 'enzyme_class_7',
'n_C', 'n_H', 'n_O', 'n_N', 'n_P', 'n_S', 'n_X', 'DoU', 'MW', 'drop'])
#rfe_results
rank_df.drop(rank_df.index[0], inplace=True)
rank_df.drop(['drop'], axis=1, inplace=True)
rank_df
rank_df.sum(axis=0)
# lose MW, n_X,
rfe_results
logit_roc_auc = roc_auc_score(reaction_test, logreg1.predict(feature_test))
fpr, tpr, thresholds = roc_curve(reaction_test, logreg1.predict_proba(feature_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('Log_ROC')
plt.show()
feature_df.columns
# +
non_enzyme_feature_df = feature_df.copy()
non_enzyme_feature_df.drop(['enzyme_class_1', 'enzyme_class_2', 'enzyme_class_3',
'enzyme_class_4', 'enzyme_class_5', 'enzyme_class_6', 'enzyme_class_7'], axis=1, inplace=True)
non_enzyme_feature_df
# +
features = np.array(non_enzyme_feature_df) #shape balance array for regression
reactions = list(master_df['reacts'])
feature_train, feature_test, reaction_train, reaction_test = train_test_split(features, reactions,
test_size=0.20, random_state=42)
# +
non_enzyme_rfe_results = pd.DataFrame(columns=['score'])
non_enzyme_rank_array = np.zeros((11,11))
for i in range(1,11):
logreg2=linear_model.LogisticRegression(solver='liblinear', penalty='l1', random_state=1, max_iter=1000, class_weight='balanced')
rfe = RFE(logreg2, i)
rfe = rfe.fit(feature_train, np.ravel(reaction_train))
score = rfe.score(feature_test, reaction_test)
non_enzyme_rfe_results.loc[i, 'score'] = score
for j in range(0,10):
non_enzyme_rank_array[i, j] = rfe.ranking_[j]
# -
non_enzyme_rank_df = pd.DataFrame(non_enzyme_rank_array, columns=['dist',
'n_C', 'n_H', 'n_O', 'n_N', 'n_P', 'n_S', 'n_X', 'DoU', 'MW', 'drop'])
#rfe_results
non_enzyme_rank_df.drop(non_enzyme_rank_df.index[0], inplace=True)
non_enzyme_rank_df.drop(['drop'], axis=1, inplace=True)
non_enzyme_rank_df
non_enzyme_rank_df.sum(axis=0)
# ditch n_N, MW, n_H, n_P
# +
features = list(master_df['dist']) #shape balance array for regression
reactions = list(master_df['reacts'])
feature_train, feature_test, reaction_train, reaction_test = train_test_split(features, reactions,
test_size=0.20, random_state=42)
a = len(feature_train)
b = len(feature_test)
feature_train = np.reshape(feature_train, (a, 1))
feature_test = np.reshape(feature_test, (b,1))
# +
dist_only_model=linear_model.LogisticRegression(solver='liblinear', penalty='l1', random_state=1, max_iter=1000, class_weight='balanced')
dist_only_model.fit(feature_train, np.ravel(reaction_train))
predictions = dist_only_model.predict(feature_test) # change me to the data you want to predict based on
score = dist_only_model.score(feature_test, reaction_test)
decision = dist_only_model.decision_function(feature_test)
params = dist_only_model.get_params()
pred_log = dist_only_model.predict_log_proba(feature_test)
pred = dist_only_model.predict_proba(feature_test)
score, pred, decision, dist_only_model.classes_, dist_only_model.coef_
# -
# kf = KFold(n_splits=10, shuffle=True)
# kf.get_n_splits(features, reactions)
#
# int_reactions = [int(i) for i in reactions]
#
#
# for train_index, test_index in kf.split(features, reactions):
# #print("TRAIN:", train_index, "TEST:", test_index)
# feature_train, feature_test = features[train_index], features[test_index]
# reaction_train, reaction_test = np.array(int_reactions)[train_index], np.array(int_reactions)[test_index]
#
# reg = linear_model.LogisticRegression().fit(feature_train, reaction_train)
# y_pred = reg.predict(feature_test)
#
#
# print(classification_report(reaction_test, y_pred))
# logit_roc_auc = roc_auc_score(reaction_test, reg.predict(feature_test))
# fpr, tpr, thresholds = roc_curve(reaction_test, reg.predict_proba(feature_test)[:,1])
# plt.figure()
# plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
# plt.plot([0, 1], [0, 1],'r--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('Receiver operating characteristic')
# plt.legend(loc="lower right")
# plt.savefig('Log_ROC')
# plt.show()
# plt.scatter(reaction_test, y_pred, alpha=0.1)
# plt.xlabel('True value')
# plt.ylabel('Predicted value')
# logreg3 = linear_model.LogisticRegression(solver='sag', max_iter=100000)
# logreg3.fit(feature_train, np.ravel(reaction_train))
# predictions3 = logreg3.predict(feature_test)
# confusion_matrix = confusion_matrix(reaction_test, predictions3)
# print(confusion_matrix)
# print(classification_report(reaction_test, predictions3))
# logit_roc_auc = roc_auc_score(reaction_test, logreg3.predict(feature_test))
# fpr, tpr, thresholds = roc_curve(reaction_test, logreg3.predict_proba(feature_test)[:,1])
# plt.figure()
# plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
# plt.plot([0, 1], [0, 1],'r--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('Receiver operating characteristic')
# plt.legend(loc="lower right")
# plt.savefig('Log_ROC')
# plt.show()
# logreg4 = linear_model.LogisticRegression(solver='lbfgs', max_iter=10000)
# logreg4.fit(feature_train, np.ravel(reaction_train))
# predictions4 = logreg4.predict(feature_test)
# logit_roc_auc = roc_auc_score(reaction_test, logreg4.predict(feature_test))
# fpr, tpr, thresholds = roc_curve(reaction_test, logreg4.predict_proba(feature_test)[:,1])
# plt.figure()
# plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
# plt.plot([0, 1], [0, 1],'r--')
# plt.xlim([0.0, 1.0])
# plt.ylim([0.0, 1.05])
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('Receiver operating characteristic')
# plt.legend(loc="lower right")
# plt.savefig('Log_ROC')
# plt.show()
# print(classification_report(reaction_test, predictions4))
| examples/.ipynb_checkpoints/model_creation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import print_function
import numpy as np
import cv2
import matplotlib.image as mpimg
from matplotlib.pyplot import imshow
# %matplotlib inline
import argparse
# Using Argument Parser to get the location of image
# vs = VideoStream(src=0).start()
# load the image on disk and then display it
image = cv2.imread('./1.jpg')
# cv2.imshow("Original", image)
# convert the color image into grayscale
grayScale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Find edges in the image using canny edge detection method
# Calculate lower threshold and upper threshold using sigma = 0.33
sigma = 0.33
v = np.median(grayScale)
low = int(max(0, (1.0 - sigma) * v))
high = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(grayScale, low, high)
# After finding edges we have to find contours
# Contour is a curve of points with no gaps in the curve
# It will help us to find location of shapes
# cv2.RETR_EXTERNAL is passed to find the outermost contours (because we want to outline the shapes)
# cv2.CHAIN_APPROX_SIMPLE is removing redundant points along a line
(_, cnts, _) = cv2.findContours(edged,
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
'''
We are going to use contour approximation method to find vertices of
geometric shapes. The alogrithm is also known as <NAME> alogrithm.
In OpenCV it is implemented in cv2.approxPolyDP method.abs
detectShape() function below takes a contour as parameter and
then returns its shape
'''
def detectShape(c):
shape = 'unknown'
# calculate perimeter using
peri = cv2.arcLength(c, True)
# apply contour approximation and store the result in vertices
vertices = cv2.approxPolyDP(c, 0.04 * peri, True)
# If the shape it triangle, it will have 3 vertices
if len(vertices) == 3:
shape = 'triangle'
# if the shape has 4 vertices, it is either a square or
# a rectangle
elif len(vertices) == 4:
# using the boundingRect method calculate the width and height
# of enclosing rectange and then calculte aspect ratio
x, y, width, height = cv2.boundingRect(vertices)
aspectRatio = float(width) / height
# a square will have an aspect ratio that is approximately
# equal to one, otherwise, the shape is a rectangle
if aspectRatio >= 0.95 and aspectRatio <= 1.05:
shape = "square"
print("X-sq-axis", x)
print("Y-sq-axis", y)
else:
shape = "rectangle"
print("X-rec-axis", x)
print("Y-rec-axis", y)
# if the shape is a pentagon, it will have 5 vertices
elif len(vertices) == 5:
shape = "pentagon"
# otherwise, we assume the shape is a circle
else:
shape = "circle"
# return the name of the shape
return shape
# Now we will loop over every contour
# call detectShape() for it and
# write the name of shape in the center of image
# loop over the contours
for c in cnts:
# compute the moment of contour
M = cv2.moments(c)
# print(M)
# From moment we can calculte area, centroid etc
# The center or centroid can be calculated as follows
cX = int(M['m10'] / M['m00'])
cY = int(M['m01'] / M['m00'])
# call detectShape for contour c
shape = detectShape(c)
# Outline the contours
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
# Write the name of shape on the center of shapes
cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (255, 255, 255), 2)
print(shape)
cv2.imshow('frame', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
| shapes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
import pandas as pd
import plotnine as p
import scanpy.api as sc
# # Mouse OB
# +
sample_info = pd.read_csv('../MouseOB/MOB_sample_info.csv', index_col=0)
df = pd.read_csv('../MouseOB/data/Rep11_MOB_0.csv', index_col=0)
df = df.T[df.sum(0) >= 3].T # Filter practically unobserved genes
sample_info = sample_info.query('total_counts > 10') # Remove empty features
df = df.loc[sample_info.index]
results = pd.read_csv('../MouseOB/MOB_final_results.csv', index_col=0).query('g != "log_total_count"')
# +
adata = sc.AnnData(df.values, sample_info, df.columns.set_names('gene_name').to_frame())
sc.pp.normalize_per_cell(adata)
sc.pp.filter_genes_dispersion(adata)
print(adata.shape)
# -
SV_genes = results.query('qval < 0.05').g
SV_genes.shape
adata.var.index.intersection(SV_genes).shape
# # BreastCancer
# +
results = pd.read_csv('../BreastCancer/BC_final_results.csv', index_col=0).query('g != "log_total_count"')
sample_info = pd.read_csv('../BreastCancer/BC_sample_info.csv', index_col=0)
df = pd.read_table('../BreastCancer/data/Layer2_BC_count_matrix-1.tsv', index_col=0)
df = df.loc[sample_info.index]
df = df.T[df.sum(0) >= 3].T # Filter practically unobserved genes
df = df[results.g] # Order like in results
# +
adata = sc.AnnData(df.values, sample_info, df.columns.set_names('gene_name').to_frame())
sc.pp.normalize_per_cell(adata)
sc.pp.filter_genes_dispersion(adata)
print(adata.shape)
# -
SV_genes = results.query('qval < 0.05').g
SV_genes.shape
adata.var.index.intersection(SV_genes).shape
# # SeqFISH
# +
results = pd.read_csv('../SeqFISH/final_results_43.csv', index_col=0).query('g != "log_total_count"')
sample_info = pd.read_csv('../SeqFISH/sample_info_43.csv', index_col=0)
df = pd.read_csv('../SeqFISH/exp_mat_43.csv', index_col=0)
df.columns = df.columns.map(int)
df = df[sample_info.index].T
# +
adata = sc.AnnData(df.values, sample_info, df.columns.set_names('gene_name').to_frame())
sc.pp.normalize_per_cell(adata)
sc.pp.filter_genes_dispersion(adata)
print(adata.shape)
# -
SV_genes = results.query('qval < 0.05').g
SV_genes.shape
adata.var.index.intersection(SV_genes).shape
# # MERFISH
# +
sample_info = pd.read_csv('../MERFISH/middle_sample_info.csv', index_col=0)
results = pd.read_csv('../MERFISH/middle_final_results.csv', index_col=0).query('g != "log_total_count"')
df = pd.read_csv('../MERFISH/data/rep6/middle_exp_mat.csv', index_col=0)
df = df.loc[sample_info.index]
df = df.T[df.sum(0) >= 3].T # Filter practically unobserved genes
# +
adata = sc.AnnData(df.values, sample_info, df.columns.set_names('gene_name').to_frame())
sc.pp.normalize_per_cell(adata)
sc.pp.filter_genes_dispersion(adata)
print(adata.shape)
# -
SV_genes = results.query('qval < 0.05').g
SV_genes.shape
adata.var.index.intersection(SV_genes).shape
# # Frog
# +
results = pd.read_csv('../Frog/Frog_final_results.csv').query('g not in ["log_ERCC", "log_num_genes"]')
sample_info = pd.read_csv('../Frog/Frog_sample_info.csv', index_col=0)
df = pd.read_csv('../Frog/data/GSE65785_clutchApolyA_relative_TPM.csv', index_col=0)
df = df[sample_info.index]
df = df[df.sum(1) >= 3].T # Filter practically unobserved genes
# +
adata = sc.AnnData(df.values, sample_info, df.columns.set_names('gene_name').to_frame())
sc.pp.normalize_per_cell(adata)
sc.pp.filter_genes_dispersion(adata)
print(adata.shape)
# -
SV_genes = results.query('qval < 0.05').g
SV_genes.shape
adata.var.index.intersection(SV_genes).shape
| Analysis/HVG-analysis/HVG in each dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Série 2 - Résolution d'équation non linéaire
#
# Il est fréquent d'avoir besoin d'obtenir une solution numérique approchée à une équation non linéaire qui ne dispose de solution analytique simple. Dans ce cas, plusieurs technique existe comme vu en cours.
#
# Le but de cette série d'exercice est de vous entraîner à l'utilisation de ces méthodes et de mieux comprendre leur forces et limitations.
# **Exercice 1 - Préliminaire**
#
# Soit l'équation d'état du $CO_2$
#
# $$\left[ p + a \left( \frac{N}{V} \right)^2 \right]
# \left( V - Nb \right) = k N T $$
#
# avec :
# - $k = 1.3806503 \cdot 10^{-23}$ la Constante de Boltzmann en Joule/Kelvin
# - $a = 0.401$ Pascal m$^6$
# - $b = 4.27\cdot 10^{-5}$ m$^3$
#
# Le but est d'estimer le volume de $CO_2$ gazeux correspondant aux conditions suivantes :
# - $N = 1000$ molécules
# - $T = 300$ Kelvins
# - $p = 3.5\cdot 10^{7}$ Pascals
#
# Pour cela vous devez :
# 1. Ecrire une fonction en python qui prend en entrée le volume $V$ et renvoit en sortie :
# $$
# \left[ p + a \left( \frac{N}{V} \right)^2 \right]
# \left( V - Nb \right) - k N T
# $$
# Cette fonction sera égale à zéro quand on lui donne le volume recherché en entrée.
# 2. Faire un graphe de cette fonction pour une gamme de valeur de $V$ allant de 0.03 à 0.1 m$^3$.
# 3. Vérifier que la fonction passe bien par zéro dans l'intervalle 0.03 à 0.1
# +
import numpy as np
import matplotlib.pyplot as plt
# Définition des constantes de l'équation d'état du CO2
k = 1.3806503e-23 # Constante de Boltzmann en Joule/Kelvin
a = 0.401 #Pascal m^6
b = 4.27e-5 # m^3
# Définition des paramètres pour l'exemple
N = 1000 # Nb Molécules
T = 300 # Kelvin
p = 3.5e7 # Pascal
# La fonction à résoudre
def f(V):
return ( p + a * (N / V)**2 ) * ( V - N * b ) - k * N *T
# Graphe
V = np.linspace(0.03,0.1,100)
plt.plot(V,f(V))
plt.grid('on')
plt.xlabel('V')
plt.ylabel('f( V )')
# Sauvegarde du graphe dans un fichier png
plt.savefig('zero_co2.png')
plt.show()
# -
# On constate bien que la fonction passe de -6 à +4, il y a donc un zéro dans l'intervalle [0.03, 0.1]. Graphiquement, on observe qu'elle vaut zéro pour un volume de l'ordre de 0.043.
# **Exercice 2 - Utilisation de fsolve**
#
# Pour commencer, et comme c'est la solution que l'on utilisera le plus souvent en pratique, employez la fonction `fsolve` du module `scipy.optimize` pour calculer le volume de gaz. Combien trouvez-vous ?
#
# Utilisez la cellule ci-dessous pour écrire les 2 lignes de codes nécessaires.
from scipy.optimize import fsolve
V = fsolve(f,0.04)
print('Le volume de CO2 est de:',V)
# **Exercice 3 - Bisection**
#
# Ecrire un code python pour implémenter la méthode de bisection pour trouver la valeur de $V$. Afin de pouvoir l'appliquer ensuite pour différents cas, écrivez l'algorithme dans une fonction. La cellule ci-dessous donne les spécifications (input, output) de cette fonction.
#
# Appliquer la fonction ainsi définie au problème de calcul du volume de CO2. Est-ce que le résultat est en accord avec celui que vous avez déja obtenu ?
#
# Faites quelques essais pour voir comment la solution (volume V et nombre d'itération) dépend de l'erreur choisie.
# Combien d'itérations faut-il pour obtenir un résultat avec une tolérance de 1e-20 ?
#
# +
# Version simple de l'algorithme sans tests ni messages détaillées
# L'avantage de cette version est sa lisibilité
def bisection(f, a, b, eps = 1e-5, kmax = 1000):
"""Résolution d'une équation non linéaire par méthode de bisection
"""
k = 0
while True:
x = ( a + b )/2
y = f(x)
if y == 0:
print("convergence : solution exacte atteinte")
return x, k
if f(a) * f(x) < 0:
b = x
else:
a = x
k += 1
if np.abs(a-b) <= eps:
print("convergence : erreur inférieure à la tolérance")
return x, k
if k > kmax:
print("convergence : nombre d'itération maximum atteind")
return x, k
# +
# version plus longue avec messages détaillées, vérification des paramètres d'entrée et
# quelques modifications pour ne pas répéter les appels à f et optimiser le temps calcul
def bisection(f, a, b, eps = 1e-5, kmax = 1000, verbose = False):
"""Résolution d'une équation non linéaire par méthode de bisection
Parametres
----------
f : fonction appelable par python ``f(x)``
La fonction à résoudre, on cherche x, tel que f(x)=0
a, b : float
Valeur minimum et maximum de x entre lesquelles on cherche une solution
eps : float
Valeur de l'erreur acceptable sur la solution
kmax : int
Nombre maximum d'itérations
Retourne
--------
x : float
Valeur x telle que f(x)=0
k : int
Le nombre d'itérations qui ont été nécessaire pour trouver x
"""
if a >= b:
print("a doit être strictement inférieur à b")
return np.NaN, 0
ya = f(a)
yb = f(b)
if ya * yb >= 0:
print("a et b n'encadre pas un zéro de f")
return np.NaN, 0
k = 0
while True:
x = ( a + b )/2
y = f(x)
if y == 0:
if verbose:
print(" convergence reached: f(x)=0")
return x, k
if verbose:
print("iteration:",k)
print(" a:", a, "f(a):", ya)
print(" b:", b, "f(b):", yb)
print(" => x:", x, "f(x):", y)
if ya * y < 0:
b = x
yb = f(b)
else:
a = x
ya = f(a)
k += 1
if np.abs(a-b) <= eps:
if verbose:
print(" convergence : erreur inférieure à la tolérance")
return x, k
if k > kmax:
if verbose:
print(" convergence : nombre d'itération maximum atteind")
return x, k
# -
bisection(f,0.03,1,1e-20)
# On constate qu'il faut une tolérance inférieure à 1e-4 pour retrouver la valeur donnée par `fsolve`. Le calcul est rapide même avec une tolérance très faible, par ex 1e-20, les 1000 itérations nécessaires sont calculés en quelques fractions de secondes.
# **Exercice 4 - Méthode de Newton**
#
# Comme ci-dessus, on vous demande une fonction pour trouver le zéro de la fonction d'état du CO2 mais cette fois-ci avec la méthode de newton.
#
# Une petite difficulté dans la méthode de Newton est qu'il vous faut la dérivée en chaque point. Pour la calculer, la fonction `derive()` vous est donné ci-dessous.
#
# En combien d'itération obtenez-vous un résultat avec une tolérance de 1e-20 ?
# +
def derive(f,x,dx=1e-5):
"""Calcule la dérivée f'(x) centrée au point x par la méthode des incréments."""
return ( f( x + dx ) - f( x - dx ) ) / ( 2 * dx )
def newton(f, x0, eps = 1e-5, kmax = 1000, verbose = False):
"""Résolution d'une équation non linéaire par méthode de Newton
Parametres
----------
f : fonction appelable par python ``f(x)``
La fonction à résoudre, on cherche x, tel que f(x)=0
a, b : float
Valeur minimum et maximum de x entre lesquelles on cherche une solution
eps : float
Valeur de l'erreur acceptable sur la solution
kmax : int
Nombre maximum d'itérations
Retourne
--------
x : float
Valeur x telle que f(x)=0
k : int
Le nombre d'itérations qui ont été nécessaire pour trouver x
"""
tolerance = eps**2 # Pour éviter de calculer le carré à chaque itération
if verbose:
print("valeur initiale:")
print(" x:",x0,"f(x):",f(x0))
for k in range(1,kmax):
step = f(x0)/derive(f,x0)
x1 = x0 - step
if verbose:
print("iteration:",k)
print(" x:",x1,"f(x):",f(x1))
if step**2 < tolerance:
return x1, k
x0 = x1
return x1, it
newton(f, 0.03, 1e-20)
# -
# **Exercice 5 - Comparaison des méthodes**
#
# Nous cherchons maintenant le zéro de la fonction :
#
# $$f(x) = \sin(2x)-1+x$$
#
# Le bloc ci-dessous vous donne la définition de la fonction et le graphe dans l'intervalle $[-3,3]$.
#
# On vous demande de comparer les résultats obtenus par :
# - votre fonction de bisection en partant de l'intervale [-3,3] avec une erreur de 1e-10
# - votre fonction basée sur la méthode de Newton en partant de $x=2.0$ avec une erreur de 1e-10
# - la fonction `fsolve` en partant aussi de $x=2.0$
#
# Qu'observez-vous ?
#
# Essayez la même chose en prenant un point de départ plus proche de la solution.
# +
def f(x):
return np.sin(2*x) - 1 + x
K = np.linspace(-3,3,100)
plt.plot(K,f(K))
plt.grid('on')
plt.xlabel('x')
plt.ylabel('f( x )')
plt.show()
# -
bisection(f,-3,3,1e-10)
newton(f, 2, 1e-10)
fsolve(f, 2)
newton(f, 0.5, 1e-10)
fsolve(f, 0.5)
# On constate que dans ce cas, la méthode de bisection est plus efficace que la méthode de Newton. Elle converge plus vite et semble plus stable. La méthode de Newton converge, mais seulement après 108 itération. Ce n'est pas étonnant car les dérivées s'inverse régulièement et le point de départ est assez loin de la solution recherchée. La fonction `fsolve` s'arrête carrément sans donner de résultat.
#
# En revanche, si on part d'une meilleure approximation, par exemple $x=0.5$, alors la méthode de Newton converge très et fsolve aussi.
#
# Le message a retenir est que ces techniques sont efficaces mais qu'il faut bien les contraindre et vérifier graphiquement vos calculs pour avoir bien confiance dans le résultat.
#
#
| series/serie02_solutions.ipynb |