code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Numpy Introduction
#
# Biblioteca de Algebra Linear para Python
#
# Métodos são construidos em C, tornando a biblioteca muito rápida (bloco de construção para outras libs).
#
# Arrays: principal ferramenta da biblioteca. Podem ser 1d (vetores) ou 2d (matrizes).
#
# Extremamente útil (otimizado) para tratamento de big data / data science
#
# * Topicos:
# * Importar biblioteca
# * Tempo de Processamento
# * lookfor: Procurar Funções/Metodos Proprietarios/Nativos
# * Inicializações de Arrays
# +
# Importando a biblioteca
import numpy as np
# Tempo de processamento: Arrays x listas
array1 = list(range(1000000))
# %time for _ in range(10):array1 * 2
array2 = np.arange(1000000)
# %time for _ in range(10):array2 * 2
# +
# lookfor: "similar ao help do matlab"
np.lookfor('accumulate')
# +
# Inicializando array: lista 1d (vetor)
lista1 = [1,2,3]
array1 = np.array(lista1)
display(array1)
# Inicializando array: lista 2d (matriz)
lista2 = [[1,2,3],[4,5,6],[7,8,9]]
array2 = np.array(lista2)
display(array2)
# Inicializando array: arange (inicial, final, passo)
array3 = np.arange(0,10) # De 0 a 9 (ultimo # descartado). passo = 1.
display(array3)
array4 = np.arange(0,11,2) # De 0 a 10 (ultimo # descartado). passo = 2.
display(array4)
# Inicializando array: zeros (apenas valores 0)
array5 = np.zeros(3) # Array de dimensão [1x3]
display(array5)
array6 = np.zeros((5,5)) # Array de dimensão [5x5]
display(array6)
# Inicializando array: ones (apenas valores 1)
array7 = np.ones((3,3)) # Matrix de "uns" [3x3]
display(array7)
# Inicializando array: eye (matriz identidade)
array8 = np.eye(4) # Matrix [4x4]
display(array8)
# +
# Inicializando array: linspace(start,end,num_of_samples)
array9 = np.linspace(0,10,3)
display(array9)
# Inicializando array: logspace(start,end,num_of_samples,base_log)
array10 = np.logspace(2.0, 3.0, num = 5, base = 11)
display(array10)
# Obs: Configurar a "semente" dos números aleatórios (para garantir mesma sequência)
np.random.seed(101)
# Inicializando array: números aleatórios com distribuição uniforme.
array11 = np.random.rand(5) # 5 valores entre 0 e 1
print(array11)
array12 = np.random.rand(5)*100 # 5 valores entre 0 e 100
print(array12)
array13 = np.random.rand(5,4) # 20 valores entre 0 e 1 (não precisa passar tupla)
print(array13)
# Inicializando array: números aleatórios com distribuição gaussiana.
array14 = np.random.randn(4) # 4 valores, média = 0, desvio padrão = 1
print(array14)
# Inicializando array: números aleatórios com distribuição uniforme.
array15 = np.random.randint(0,100) # de 0 a 99, 1 num
print(array15)
array16 = np.random.randint(0,100,10) # de 0 a 99, 10 num
print(array16)
# -
| scripts_numpy_pandas/Numpy_00_init.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.2
# language: julia
# name: julia-1.5
# ---
# ## String concatenation
part1 = "I want"
amount = 5
🍳 = "eggs"
string(part1, amount, 🍳)
string(part1, " ", amount, " ", 🍳)
# We can also use `*` for concatenation!
part1*" "*amount*" "*🍳
part1*" "*string(amount)*" "*🍳
"$part1 $amount $🍳"
| Learn Julia/09. String Concatenation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/madhavjk/DataScience-ML_and_DL/blob/main/SESSION_21_(Drug_Prediction).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="1YLIcRRX2Rax"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# + id="-5j1jIHM2Ra0" outputId="bf6644eb-0bfd-449c-ff82-cc21b2f1049b"
data = pd.read_csv("drug200.csv")
data.head(10)
# + id="BQBbRA6L2Ra2" outputId="1b84db60-9f86-440a-b4dd-67e521e66b31"
data.nunique()
# + id="wU9j1au22Ra3" outputId="e983c310-6995-4e80-9079-81093e6fd036"
data.isnull().sum()
# + id="qiqxK6qu2Ra3" outputId="f9e5be2d-3ab8-4e9f-8a0f-d49cf5a56286"
data.duplicated().sum()
# + id="kuq5zYGJ2Ra4"
cols = ["Age","Sex","BP","Cholesterol","Na_to_K"]
X = data[cols].values
Y = data[["Drug"]].values
# + id="Y4lL2ms22Ra5" outputId="fd252be0-73c5-4c3f-f3f7-36fb3b81f2f9"
print(X.shape,Y.shape)
# + id="qnZZ9YD92Ra5"
from sklearn.preprocessing import LabelEncoder
enc = LabelEncoder()
X[:,1] = enc.fit_transform(X[:,1])
X[:,2] = enc.fit_transform(X[:,2])
X[:,3] = enc.fit_transform(X[:,3])
# + id="41uP04M32Ra6"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 14)
# + id="nA9gz2kS2Ra7" outputId="831675c4-1025-47b3-dda0-b9eff3208add"
from sklearn.tree import DecisionTreeClassifier
dt_classifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 14)
dt_classifier.fit(x_train,y_train)
# + id="W-53-4U62Ra8"
pred_train = dt_classifier.predict(x_train)
pred_test = dt_classifier.predict(x_test)
# + id="y8Q7Ca8X2Ra9" outputId="e7e0d38d-de0a-4af4-92fb-0d104bd92ae7"
from sklearn.metrics import accuracy_score
print("Training Accuracy :", accuracy_score(pred_train,y_train))
print("Testing Accuracy :", accuracy_score(pred_test,y_test))
# + id="76ZD39Ep2Ra9" outputId="70cd7655-67fd-460c-badc-887844ed9818"
from sklearn.ensemble import RandomForestClassifier
rf_classifier = RandomForestClassifier(n_estimators = 8, criterion = 'entropy', random_state =0)
rf_classifier.fit(x_train, y_train)
# + id="nLTWE5zO2Ra-"
pred_train = rf_classifier.predict(x_train)
pred_test = rf_classifier.predict(x_test)
# + id="mVWwIbO42Ra_" outputId="3bb6e4d6-17d4-4019-ee07-12cf3e8cd867"
print("Training Accuracy :", accuracy_score(pred_train,y_train))
print("Testing Accuracy :", accuracy_score(pred_test,y_test))
# + id="aXYxYrh62RbA" outputId="251d7c80-59d2-4e33-9005-4e49ab6aeb1c"
from sklearn.svm import SVC
svm_classifier = SVC(kernel = 'linear' , random_state = 0)
svm_classifier.fit(x_train, y_train)
# + id="5Yb1t0mC2RbA"
pred_train = svm_classifier.predict(x_train)
pred_test = svm_classifier.predict(x_test)
# + id="gXZJXnzl2RbB" outputId="be6191e0-4c09-4272-f9f2-1403baac8c2f"
print("Training Accuracy :", accuracy_score(pred_train,y_train))
print("Testing Accuracy :", accuracy_score(pred_test,y_test))
# + id="inXgpjeR2RbC"
| SESSION_21_(Drug_Prediction).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="F24EmvTrGjHc"
# # **Homework Assignment #2**
# Name: <NAME>
#
# Assigned: January 24, 2022
#
# Due: February 14, 2022
#
#
#
# ---
#
# This assignment consists of one question that requires short answer and one Python programming task. You can enter your answers and your code directly in a Colaboratory notebook and upload the **shareable** link for your notebook as your homework submission.
#
#
# ---
#
# #1.
#
# (10 points) Consider a set of points in the 2-dimensional space plotted below. In this figure, visual distance between points also represents the distance computed by a learning algorithm.
#
# 
#
# Which value of K (K=1, K=3, or K=5) will result in the greatest leave-one-out validation accuracy for KNN classification? Explain justify your answer.
#
# `K = 5 will obtain the greatest accuracy, here is the detail calculations by simply counting from the graph: `
#
# `Suppose K = 1, we will only have 4/14 accuracy by just compare all posible (14 ways) leave-one with its nearest one neibor if they not the same then not correct prediction. `
#
# `Suppose K = 3, we will have 8/14 accuracy by compare the max frequent one from the all possible(14 ways) leave-one's nearest three neibours.`
#
# `Suppose K = 5, we will have 10/14 accuracy by compare the max frequent one from all posible(14 ways) leave-one's nearest five neibours.`
#
#
# #2.
#
# (100 points) The purpose of this programming assignment is to get you familiar with the capabilities offered by the sklearn modules. To complete the assignment, you will 1) load three different datasets, 2) report the classification accuracy for 4 different classifiers, 3) visualize the data and predictions, and 4) graph the learned decison tree.
#
# 1) Datasets. Sklearn offers multiple methods to load datasets for learning. Your program should use three of these methods.
#
# * Use the built-in sklearn.datasets capability to load the breast_cancer dataset. The load_breast_cancer() function returns two structures, one containing the features for each data point and the other containing the corresponding ground truth labels ('malignant' and 'benign').
#
# * Use the fetch_openml function to load the credit-g dataset. Note that the labels for this dataset are 'good' and 'bad'. The fetch_openml() function returns two structures, one containing the features for each data point and the other containing the corresponding ground truth labels.
#
# * Use the urlopen function to load a csv format file from the URL http://eecs.wsu.edu/~cook/ml/alldata.csv. You will need to include the calls
#
# harURL = # put the URL here
# f = urlopen(harURL)
# har = np.loadtxt(f, delimiter=',')
#
# The resulting data structure contains one row per data point. The last column is the label for the target feature / class (you will want to split these into separate structures for features and ground truth labels).
#
# 2) Classifiers. Sklearn offers many classifier options. You should include these three plus one other of your own choosing:
#
# * Majority classifier (this is DummyClassifier with the most_frequent option)
#
# * Decision tree using the entropy criterion (this is what was taught in class)
#
# * K nearest neighbors with K=5
#
# 3) Visualize the data and predictions. Here is code for a very simple scatter plot that plots the actual ground truth labels for the two classes in gray scale and the predicted labels for the two classes in color. This code calls PCA to reduce the actual number of features down to just three dimensions (we will describe this algorithm later in the semester). You will need to pass in the correct arguments. In the case of datasets that use string labels instead of numbers, you might also need to convert these to integers. You can use the one hot encoding method we discussed in class for this.
#
# 4) Visualize the decision tree that is learned for each of the datasets. Sklearn offers a plot_tree function that can be used for these purposes.
#
# Finally, include a few sentences with your insights. What classifiers performed best (based on accuracy) and what datasets were the easiest to predict? Are these results what you anticipated? Why or why not?
#
# `Looks like the Decision Tree has the highest average accuracy from all four datasets.`
#
# `I think the breast cancer dataset is easier to predict since the labels are either benign or malignant and the values of features are numerically and tend to fall into the categorical tumor size range`.
#
# `The result are in my expectation that supervised learning classifiers more accurate than unsupervised learning classifier K-Means. Since we've allocate, with label known, a 2/3 data for training purpose and 1/3 for testing purpose, the unsupervised learning doesn't really play its real role here. `
#
# + id="WaV5q7HG5GEv" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1644903509184, "user_tz": 480, "elapsed": 32396, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08164351031418621806"}} outputId="f862967a-0d64-4afa-9aae-e9a6b30b70b4"
# --------------------------------------------
# Name: <NAME>
# Course: Cpt_S 437
# Assignment 2
# --------------------------------------------
import numpy as np
from sklearn import metrics
import collections
from urllib.request import urlopen
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from sklearn.datasets import load_breast_cancer, fetch_openml
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cluster import KMeans
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.compose import make_column_transformer
#------------------------------------ PART 1 ----------------------------------------------#
def load_data():
data_dict = {}
data_dict['brest cancer'] = load_cancer_data()
data_dict['credit_g'] = load_credit_data()
data_dict['alldata'] = load_all_data()
return data_dict
def load_cancer_data():
cancer = load_breast_cancer()
X = cancer.data
y = cancer.target
return (X,y)
def load_credit_data():
credit = fetch_openml(name = "credit-g")
X = credit.data
# Collecting the feature value that are string, consider them categorical and
# add into cat list.
features = X.columns
cat = []
j = 0
for f in X.iloc[0]:
if isinstance(f, str):
cat.append(features[j])
j += 1
# NOTE Reference: https://www.youtube.com/watch?v=irHhDMbw3xo&t=492s
encoded_X = make_column_transformer((OneHotEncoder(),cat),remainder ="passthrough").fit_transform(X)
# NOTE Reference: https://www.youtube.com/watch?v=YvEx0IGKTko
y = credit.target
label_encoder = LabelEncoder()
encoded_y = label_encoder.fit_transform(y)
return (encoded_X, encoded_y)
def load_all_data():
f = urlopen("http://eecs.wsu.edu/~cook/ml/alldata.csv")
data = np.loadtxt(f, delimiter=',')
X = data[:, :-1] # data points described by their features
y = data[:,-1] # class labels
return (X,y)
#=============================================================================================#
#------------------------------------ PART 2 ----------------------------------------------#
classifiers = [
(DummyClassifier(strategy="most_frequent"), "Simple Majority"),
(DecisionTreeClassifier(criterion="entropy"), "Decision Tree"),
(KNeighborsClassifier(n_neighbors=5), "KNN"),
(KMeans(n_clusters=2, random_state=1, max_iter=2000), "K Means")
]
#=============================================================================================#
def plot_reduced_space(clf_name, X, y, newlabels):
print("Reduced space for", clf_name)
step_size = 0.05
colors = ['red', 'blue']
pca = PCA(n_components=3)
X_pca = pca.fit_transform(X)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(X_pca[:,0], X_pca[:,1], X_pca[:,2], marker='o', c=y, cmap='gray')
ax.scatter(X_pca[:,0], X_pca[:,1], X_pca[:,2], marker='^', c=newlabels)
plt.show()
#------------------------------------ PART 3&4 ---------------------------------------------#
if __name__ == "__main__":
data_dict = load_data()
for name, datalst in data_dict.items():
X, y = datalst
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
print("---------------------- Study on Dataset of ", name, " ---------------------------")
for clf, c_name in classifiers:
clf.fit(X_train,y_train)
# accuracy
newlabels = clf.predict(X_test)
print("Classifier", c_name, "Accuracy", metrics.accuracy_score(y_test, newlabels))
# plots
if c_name == "Decision Tree":
plot_tree(clf)
plot_reduced_space(c_name, X, y, clf.predict(X))
print('``````````````````````````````````````````````````````````````````````````````````````')
| hw2completed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Reference : https://jungsooyun.github.io/notebook/post-with-jupyter/
# # markdown test header1 A
# ## markdown test header2 A
# ## markdown test header2 B
# +
import os
import numpy as np
import h5py
# %matplotlib notebook
from matplotlib import pyplot as plt
# -
print('hello ghpages')
print('Reference : https://jungsooyun.github.io/notebook/post-with-jupyter/')
def check_file_exists(file_path):
if os.path.exists(file_path) == False:
print("Error: provided file path '%s' does not exist!" % file_path)
return False
print("EXIST :", file_path)
return True
# +
fpath = 'trace.h5'
if check_file_exists(fpath):
with h5py.File(fpath, 'r') as h5f:
print(list(h5f.keys()))
tr = h5f['trace'][:]
pt = h5f['plaintext'][:]
ct = h5f['ciphertext'][:]
key = h5f['key']
print(tr.shape, pt.shape, ct.shape, key.shape)
# -
plt.plot(tr[0])
plt.show()
# # markdown test header1 B
# ## markdown test header2 C
# ## markdown test header2 D
| _ipynbs/test-post-ipynb-in-ghpage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question Response Type Analysis
# This notebook explores the data set constructed by <NAME> and <NAME>, found [here](https://cogcomp.seas.upenn.edu/Data/QA/QC/). This data set examines various trivia/quiz questions with the intent of classifiying the type of response wanted. For example, if we looked at the question "Who was the president in 1846?" we intuitively know that the response should be a person. There are several different response categories such as description, entity, and location. In addition, each response category has several subcategories which get even more fine-grained.
#
# This is the type of problem that is relatively simple for a human to understand but somewhat difficult for a computer; using the above example, the words *who* and *the president* clue us in that the question is looking for an individual human. However, the word *who* can be used for several types of responses - "Who started World War 2" is ambiguous but could also refer to a country as well as a person. The *What* keyword adds to this ambiguity even further, as we could have questions like "What tree flowers in the spring?" or "What caused the Great Depression".
#
# While this is a difficult problem, it is an extremely relevant one - especially for a company like 98point6. Since users are texting with a doctor, our end goal as the data science team is to help doctors see more patients. Using a framework like this, we could potentially identify the type of response wanted by the patient and automatically pull up appropriate documentation for the provider. We could also queue up sample responses which might provide templates for provider responsees - this would save time typing and allow the provider to keep their focus on the patients.
#
# With the problem described and the value explained we can begin our analysis. We will start by setting up the data, then move into some basic exploratory data analysis and modeling. The data can be found in the **data** folder, and we will be using the train_* file to train our model and examine our results on the holdout set. Our utility code (which doesn't really belong in a notebook) is found in **utils.py**.
#
# Also, this repository contains a devcontainer folder that can be used to recreate the docker image which was used to run this code. This should make this notebook re-runnable (though depending on how long some of this takes to run that might not be advisable).
# +
# imports
# show all jupyter notebook output
# (shoutout to this stack overflow page, I use this in literally every notebook:
# https://stackoverflow.com/questions/36786722/how-to-display-full-output-in-jupyter-not-only-last-result )
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# autoreload (essential when using a utility file)
# %load_ext autoreload
# %autoreload 2
# standard ds imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scikitplot as skplt
# data loading import
import src.data as data_load
# utils import
from utils import most_popular_words
# sklearn imports
import sklearn
# model saving
import joblib
# metrics
from sklearn.metrics import make_scorer, roc_auc_score, balanced_accuracy_score, accuracy_score
# model selection
from sklearn.model_selection import cross_validate, train_test_split
# preprocessing
from sklearn.preprocessing import MultiLabelBinarizer, OneHotEncoder
from sklearn.feature_extraction.text import CountVectorizer
# model composition
from sklearn.pipeline import Pipeline
# models
from sklearn.ensemble import RandomForestClassifier
# xgboost imports
# nltk imports
from nltk.tokenize import word_tokenize
# -
# Now that our imports are out of the way, let's load up our data and get a handle on its size.
# +
training = data_load.load("data/train_5500.label.txt")
holdout = data_load.load("data/test_TREC_10.label.txt")
print(f"Training size: {training.shape}")
training.head()
print(f"Test size: {holdout.shape}")
# -
training["coarse_category"].value_counts()
training["coarse_category"].value_counts() / training["coarse_category"].value_counts().sum()
# Looks like we have almost 5500 training samples in our data set. I'd love to have more, but based on the paper published by Li and Roth about this data set this sample size should be enough. Interestingly, they were able to acheive very high accuracy - I think this is because the questions in this data set are both manually cleaned and manually labeled. This cleanliness gives the model grammatical structures to work with, and so it can learn these structures just like we as humans do. I hypothesize that a model trained on this data set would only be marginally effective to real-world data entered by customers, as they would be full of misspellings and potentially bad grammar or slang.
#
# ## EDA
# We will be starting by exploring the coarse category. To get a handle on this data, there are a few things I would like to examine:
# * Most popular words in all questions (and how they intersect with standardly accepted stop words)
# * Most popular words per coarse category
#
# Before we do any of that, we need to tokenize our data set. This will be done using nltk.
training["question"][4]
word_tokenize(training["question"][4])
training["question_tokenized"] = training["question"].apply(word_tokenize)
training["question_tokenized"].head()
# Now that we have our tokenization, we can examine some information about it. First, what are the most popular words overall?
most_popular_words(training, "question_tokenized", n=15)
# This isn't too surprising - *What*, *How*, *Who*, and then what we would normally consider stopwords. This brings up an interesting point regarding stop words. In normal NLP applications, we would want to filter them out since they don't provide much value (and indeed, in our first basic model, we likely will filter them out). However, they do provide context about the subject of the sentence, and thus because of the way questions are structured in the English language they provide context about the response. We likely won't filter them out in the final model.
#
# Let's group by our coarse_category and look at the top few words for each.
most_popular_words(training, "question_tokenized", grouping_col="coarse_category")
# Offensive color scheme aside, this plot is extremely informative. For instance, the *NUM* category has *How*, *many* and *of*, giving a good indication of the phrases used. *LOC* has *What*, *Where*, and *country*, and the trend continues. This gives me a bit of hope that a simple bag-of-words model might not be absolutely awful. On that note let's move into modeling.
# ## Modeling
# We will start with a bag-of-words representation model and use that as our baseline.
# +
# we want a fairly simple vectorizer
vectorizer = CountVectorizer(min_df=0.0)
model = RandomForestClassifier(n_estimators=200, max_depth=10)
simple_pipe = Pipeline([
("feature_extraction", vectorizer),
("model", model)
])
# -
scoring = ["balanced_accuracy", "roc_auc_ovr_weighted", "accuracy"]
cross_validate(simple_pipe, training["question"], training["coarse_category"], cv=5, scoring=scoring)
# Now this is some nice results - looks like we are classifying about 65% of the data set correctly (the accuracy) and our one-vs-rest C-stat is really nice as well. However, a single stat isn't extremely helpful when looking at multiclass, so let's dive into the confusion matrix. To do that, we will create a random 20% test set from the training set and examine it.
X_train, X_test, y_train, y_test = train_test_split(training["question"], training["coarse_category"], test_size=0.2)
X_train.shape
X_test.shape
# +
# fit the model
simple_pipe.fit(X_train, y_train)
# make predictions
preds = simple_pipe.predict(X_test)
preds_proba = simple_pipe.predict_proba(X_test)
# -
skplt.metrics.plot_confusion_matrix(y_test, preds, normalize=True, figsize=(7,7))
# For the more populous columns, we did pretty well! We especially did well with entity - but, that happened at the cost of overpredicting the entity class like crazy. I'm curious what the actual numbers are:
skplt.metrics.plot_confusion_matrix(y_test, preds, figsize=(7,7))
# A sizeable portion of the data set was given to the entity class, but we still were able to discern most of the classes. The description vs entity issue also makes sense - let's examine a couple:
training[training["coarse_category"] == "DESC"]["question"].head(4).tolist()
training[training["coarse_category"] == "ENTY"]["question"].head(4).tolist()
# Look at the third DESC question - that isn't too different from the entity questions. Regardless, let's train this model on the entire data set so we can use it later and test out one other model.
simple_pipe.fit(training["question"], training["coarse_category"])
joblib.dump(simple_pipe, "models/simple_pipe_bow_v1.joblib")
# CountVectorizer also gives us a nice interface to generate n-grams - I will use this to create additional features. To accomodate the additional feature space, we will up the number of estimators and their max depth. There are a million parameters we could change for this, but I don't think we will get to a full grid search with this analysis.
# +
# we want a fairly simple vectorizer
vectorizer = CountVectorizer(min_df=0.0, ngram_range=(1,3))
model = RandomForestClassifier(n_estimators=300, max_depth=20)
simple_ngram_pipe = Pipeline([
("feature_extraction", vectorizer),
("model", model)
])
# -
scoring = ["balanced_accuracy", "roc_auc_ovr_weighted", "accuracy"]
cross_validate(simple_ngram_pipe, training["question"], training["coarse_category"], cv=5, scoring=scoring)
# This is a fairly significant increase in our metrics! At least our accuracy metrics. To share a secret, I played around a bit with the different parameters for our model. I originally wanted to set the min_df parameter in the vectorizer to exclude some extremely rare features, but this tanked the performance - I might have set it too high, but I imagine certain classes have very predictive features that might get cut out when making that kind of adjustment. While I think the ngrams helped, the biggest change came when I increased the max depth of each tree. Because the number of features is much greater than the number of samples, I have a feeling the learners need more depth to actually learn from these complex features, especially the interactions between them. Let's check out our confusion matrix.
# +
# fit the model
simple_ngram_pipe.fit(X_train, y_train)
# make predictions
preds = simple_pipe.predict(X_test)
preds_proba = simple_pipe.predict_proba(X_test)
# -
skplt.metrics.plot_confusion_matrix(y_test, preds, normalize=True, figsize=(7,7))
skplt.metrics.plot_confusion_matrix(y_test, preds, figsize=(7,7))
# Looks like the ngrams increased the model's power to discern between entity and others, and also increased its ability for the human and number classes as well. Overall, I think this is better model. Now let's train this on the entire data set, save it, and perform one last test - I want to see if the max_depth increase improves performance in the original model.
simple_ngram_pipe.fit(training["question"], training["coarse_category"])
joblib.dump(simple_ngram_pipe, "models/simple_pipe_ngram_v1.joblib")
# +
# we want a fairly simple vectorizer
vectorizer = CountVectorizer(min_df=0.0)
model = RandomForestClassifier(n_estimators=200, max_depth=20)
simple_pipe = Pipeline([
("feature_extraction", vectorizer),
("model", model)
])
# -
scoring = ["balanced_accuracy", "roc_auc_ovr_weighted", "accuracy"]
cross_validate(simple_pipe, training["question"], training["coarse_category"], cv=5, scoring=scoring)
# Yep, that's what I thought. Let's save this and move into comparisons of the different models on the holdout.
simple_pipe.fit(training["question"], training["coarse_category"])
joblib.dump(simple_pipe, "models/simple_pipe_bow_v2.joblib")
# ## Model Comparison
# Let's load up the three models and see which one performs best on the holdout set.
bow_v1 = joblib.load("models/simple_pipe_bow_v1.joblib")
bow_v2 = joblib.load("models/simple_pipe_bow_v2.joblib")
ngram_v1 = joblib.load("models/simple_pipe_ngram_v1.joblib")
preds_bow_v1 = bow_v1.predict(holdout["question"])
preds_bow_v2 = bow_v2.predict(holdout["question"])
preds_ngram_v1 = ngram_v1.predict(holdout["question"])
y_true = holdout["coarse_category"]
balanced_accuracy_score(y_true, preds_bow_v1)
balanced_accuracy_score(y_true, preds_bow_v2)
balanced_accuracy_score(y_true, preds_ngram_v1)
accuracy_score(y_true, preds_bow_v1)
accuracy_score(y_true, preds_bow_v2)
accuracy_score(y_true, preds_ngram_v1)
skplt.metrics.plot_confusion_matrix(y_true, preds_bow_v1, normalize=True, figsize=(7,7))
skplt.metrics.plot_confusion_matrix(y_true, preds_bow_v2, normalize=True, figsize=(7,7))
skplt.metrics.plot_confusion_matrix(y_true, preds_ngram_v1, normalize=True, figsize=(7,7))
# Whew that was a lot of metrics. Either way, the clear winner is the v2 version of the bag-of-words model. It predicts all classes fairly well, and while the ABBR class is extremely small it still makes a reasonable attempt at predicting it (unlike the other models). I think the confusion matrix is key here, as it gives us a good picture of how the models skew towards the different classes. If I were to deploy one of these three models based on this training and test set I would choose the second model.
#
# ## Future Work
# Due to the four hour time limit, I wasn't able to explore several avenues that I think could be extremely beneficial to this problem. For one thing, we didn't do any POS tagging which has a major impact on the subject of a question. This would be the direction that I would take this project, as I would anticipate major gains from those features. In addition, we don't do any stop word filtering, proper noun categorization, etc which might be useful.
#
# Additionally, we only used a random forest model here - while this generally does pretty well, there might be other model structures that would work better. Specifically I would like to test out XGBoost (does well in pretty much every context) and a deep learning model. Deep learning would be especially effective here - there are many pre-trained NLP models that could be utilized for this task using transfer learning. Essentially, we would use the lower layers of the pretrained neural net as a base, and add a couple of layers after. These layers would use the features constructed by the lower layers to do our predictions, essentially taking the place of POS tagging, cleaning, etc. The reason I didn't start with this is that it's a massive pain to set up without a dedicated environment and my GPU has had problems with it previously.
#
# As for model improvement, there are two avenues that I would take - more data is always good, though I don't think it would really give us much more improvement to the coarse categories. It's possible that for the finer-grained target it would help (which I wasn't able to get to due to the time limit) but I think ultimately the best thing we could do for this project would be feature engineering in the form of POS tagging and using a pre-trained deep learning model. The feature engineering would allow us to represent more of the structure of the sentence to the model, allowing it more avenues to learn. The pre-traine DL model would impose english language structure in a similar format and would give us the added advantage of identifying similar words and phrases; using word and phrase similarity is what gave the original writers the best performance and I think it could greatly improve our performance here.
#
# Finally, if this analysis was exploration for a new project at 98point6, the main thing I would be interested in is how similar our patient/provider interactions actually are to this data set. I imagine that we wouldn't have the benefit of clean grammar and proper spelling, so even moreso we would need to rely on a pretrained model designed to handle such things. However, this would give us a good basis to do some semi-supervised learning (since we likely wouldn't have explicit target labels from our patient interaction data sets).
| Question Response Type Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mainstreaming Sensitivity Analysis and Uncertainty Auditing
#
# <!-- AUTHOR: <NAME> -->
# <!-- AUTHOR: <NAME> -->
# <!-- AUTHOR: <NAME> -->
# <!-- AUTHOR: <NAME> -->
# <!-- AUTHOR: <NAME> -->
#
# **<NAME>**, **<NAME>**, **<NAME>**, **<NAME>**, **<NAME>**
#
# In this Git repository we have collected a number of notebooks that introduce uncertainty quantification and sensitivity analysis (UQSA). The material covers both epistemic (unknown and unquantifiable) uncertainty as well as stochastic uncertainty. This latter can be apportioned to the model input parameters through sensitivity analysis. Global sensitivity analysis - the approach where all the parameters are varied at the same time - is presented in its variance-based form through the calculation of Sobol sensitivity indices. The use of meta-modelling for sensitivity analysis is also described through Polynomial Chaos. Finally, sensitivity auditing - an approach to check the normative frames of any modelling activity undertaken - is also described.
#
# The notebooks on the above-mentioned sub-topics are found in the following sub-sections.
#
# ## Uncertainty and quality in science for policy
#
# [In this notebook](WebResources.ipynb), the reader can find useful web-resources on this topic. A Python-based [interactive tool for the visualization of the NUSAP (Numeral Unit Spread Assessment Pedigree) approach](Interactive_plot.ipynb) to visualise NUSAP experts scores across categories is also part of this collection.
#
# * [Resources on _Uncertainty and quality in science for policy_](WebResources.ipynb)
#
# * [Visualisation tools for pedigree scores](Interactive_plot.ipynb)
#
# ## Uncertainty quantification and sensitivity analysis tutorials
#
# Sensitivity analysis starting concepts can be found in the [book of Saltelli et al 2008](https://onlinelibrary.wiley.com/doi/book/10.1002/9780470725184) and in [this collection of notebooks](https://github.com/lrhgit/uqsa_tutorials)along with applications to the fields of biomechanics. Statistical preliminaries can be found [here](https://github.com/lrhgit/uqsa_tutorials/blob/master/preliminaries.ipynb) along with a [practical introduction](https://github.com/lrhgit/uqsa_tutorials/blob/master/sensitivity_introduction.ipynb), the [comparison of the global and local approach](https://github.com/lrhgit/uqsa_tutorials/blob/master/local_vs_global.ipynb) and the computations required to calculate [high order indices](https://github.com/lrhgit/uqsa_tutorials/blob/master/sensitivity_higher_order.ipynb). The features of the [Monte Carlo approach](https://github.com/lrhgit/uqsa_tutorials/blob/master/monte_carlo.ipynb) along with the [polynomial chaos expansion](https://github.com/lrhgit/uqsa_tutorials/blob/master/introduction_gpc.ipynb) are then presented along with an application to the [Ishigami test function](https://github.com/lrhgit/uqsa_tutorials/blob/master/ishigami_example.ipynb). [An application to the field of biomechanics](https://github.com/lrhgit/uqsa_tutorials/blob/master/wall_models.ipynb) along with [interactive exercises](https://github.com/lrhgit/uqsa_tutorials/blob/master/exercises.ipynb) are the final components of this collection of notebooks.
#
# ### New notebooks on sensitivity analysis
#
# The notebooks developed in this series aim at presenting further tools useful to students, modelers and practitioners from the quantitative-assessment field. Specifically, the use of the [Sobol sequence](sobol_interactive.ipynb) -or other low-discrepancy sequences- can be fundamental in case of computationally demanding models. This feature can be appreciated by testing the convergence pace of the Sobol sequence against a purely random approach with a [series of test functions](testfunctions3.ipynb), that reproduce additive, non-additive and higher-order-interaction models. The so-called [G and G* test functions](https://www.sciencedirect.com/science/article/pii/S0010465509003087) can help to clarify this distinction by tuning the value of the additive constant in the model. The same base functions are typically used by practitioners from the field to [benchmark the performance](interactive_gstar_function.ipynb) of different approaches as illustrated for the [variance-based estimator](https://www.sciencedirect.com/science/article/pii/S0010465509003087) against [the polynomial chaos approach](interactive_g_function.ipynb). Finally, an application to a real case is presented by assessing whether [silver shortage could constrain large scale development of solar photovoltaics](https://github.com/pbstark/SA/blob/master/New_notebooks/silver2.ipynb) is presented.
#
# * [Sobol sequence](sobol_interactive.ipynb)
#
# * [Test functions for testing convergence pace](testfunctions3.ipynb)
#
# * [Introduction to polynomial chaos with chaospy](introduction_gpc.ipynb)
#
# * [Comparison of the polynomial chaos and variance-based estimator approaches for the G function](interactive_g_function.ipynb)
#
# * [Comparison of the polynomial chaos and variance-based estimator approaches for the G* function](interactive_gstar_function.ipynb)
#
# * [Comparison of the polyomial chaos and variance-based estimator approaches for the other test functions](PC_test_functions.ipynb)
#
# * [Silver as a potential constraint to large-scale development of photovoltaics](silver2.ipynb)
#
# ## Sensitivity auditing
#
# This approach is an enhancement of sensitivity analysis to the full normative aspects of the modelling activity. A thorough description of the approach with a series of examples can be found [here](sensitivity_auditing.ipynb).
#
# * [Sensitivity Auditing](sensitivity_auditing.ipynb)
#
# ## Acknowledgements
#
# This collection of notebooks were developed with financial support from the
# [Peder Sather Center for Advanced Study](http://sathercenter.berkeley.edu)
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simulate SimSAX parameters
# SimSAX (Similarity-based on Symbolic Aggregate approXimation) is a similarity measure between defect-inflow profiles.
#
# In this notebook, we show how to use simulation to tune the measure parameters.
# +
# %reload_ext autoreload
# %autoreload
# external imports
import string
import pandas as pd
import numpy as np
import random
import itertools
import matplotlib.pyplot as plt
# %matplotlib inline
# simsax
from simsax.projects import load_backlog
from simsax.alignment.model import ProjectSequence
from simsax.alignment.finders import calculate_project_alignment, create_sax_signature, find_motifs
from simsax.alignment.calculators import AlignmentsCoverageCalculator
from simsax.alignment.plot import plot_all_motif_alignments, plot_subplot_motif, plot_comparisons_motif, plot_sequence_with_anomalies
from simsax.alignment.simulation import simulate_alignments, plot_simulation_word_vs_w_to_n, plot_simulation_3d
from sax.pysax import SAXModel
# -
# ## 1. Load the defect backlog
# We start from creating a project:
# +
project_a = load_backlog(backlogs_folder='./data-examples/', file_name='eclipse_jdt_sample_backlog.csv',
source='Eclipse', dev_type='Open Source')
project_b = load_backlog(backlogs_folder='./data-examples/', file_name='eclipse_platform_sample_backlog.csv',
source='Eclipse', dev_type='Open Source')
# -
# Then, we create a project sequence, which maps between the project and particular time series we are interested in:
project_a_seq = ProjectSequence(project_a, get_sequence=lambda x: x.backlog.inflow_all)
project_b_seq = ProjectSequence(project_b, get_sequence=lambda x: x.backlog.inflow_all)
# ## 2. Inject known anomalies into the projects
#
# Let's load the projects once again:
# +
project_a_anomaly = load_backlog(backlogs_folder='./data-examples/', file_name='eclipse_jdt_sample_backlog.csv',
source='Eclipse', dev_type='Open Source')
project_b_anomaly = load_backlog(backlogs_folder='./data-examples/', file_name='eclipse_platform_sample_backlog.csv',
source='Eclipse', dev_type='Open Source')
# +
project_a_anomaly_seq = ProjectSequence(project_a_anomaly, get_sequence=lambda x: x.backlog.inflow_all)
project_a_anomaly_seq.project.name = "Anomaly_{}".format(project_a_anomaly_seq.project.name)
project_b_anomaly_seq = ProjectSequence(project_b_anomaly, get_sequence=lambda x: x.backlog.inflow_all)
project_b_anomaly_seq.project.name = "Anomaly_{}".format(project_b_anomaly_seq.project.name)
# -
# Now, we decide on the size of window
window = 32
# Let's generate "rectangular" anomaly (choose shape that do not appear in the time series).
def inject_rectangular_anomalies(window, project_seq):
if project_seq.length() < window:
raise Exception("Sequence can't be shorter than the window length")
level = project_seq.sequence.max() + 0.2 * project_seq.sequence.max()
ground_truth = []
pd.options.mode.chained_assignment = None # default='warn'
for i in range(project_seq.length() // window):
if i % 2 == 0:
start = i * window
project_seq.sequence[start] = 0
project_seq.sequence[(start+1):(start+window-1)] = level
project_seq.sequence[start+window-1] = 0
ground_truth += [x for x in range(start, start+window)]
pd.options.mode.chained_assignment = 'warn'
return ground_truth
project_a_gt = inject_rectangular_anomalies(window, project_a_anomaly_seq)
plot_sequence_with_anomalies(project_a_seq, project_a_anomaly_seq)
project_b_gt = inject_rectangular_anomalies(window, project_b_anomaly_seq)
plot_sequence_with_anomalies(project_b_seq, project_b_anomaly_seq)
# ## 3. Run the simulation
# +
# parameters
windows = [window]
within_min_dists = [1]
nbinses = [x for x in range(2, window + 1)]
alphabets = [x for x in range(3, 21)]
strides = [1]
score_thresholds = [1]
params = list(itertools.product(*[windows, within_min_dists, nbinses, alphabets, score_thresholds, strides]))
# -
output_folder = "./data-examples/simulation/"
# project a
project_sequences = [project_a_anomaly_seq, project_a_seq]
sim_a_filename = simulate_alignments(params, project_sequences, output_folder=output_folder,
check_50_50_criterion=False, ignore_list=project_a_gt)
sim_a_filename
# project b
project_sequences = [project_b_anomaly_seq, project_b_seq]
sim_b_filename = simulate_alignments(params, project_sequences, output_folder=output_folder,
check_50_50_criterion=False, ignore_list=project_b_gt)
sim_b_filename
# ## 4. Load results and interpret them
#
# Normally, we would load the *sim_a_filename* and *sim_b_filename* files, but since the names are autogenerated, we will use two files that we know of and are stored in the "./data-examples" folder.
simulation_data = pd.concat([pd.read_excel("./data-examples/simulation/sim_2019_05_3_13255.xlsx"),
pd.read_excel("./data-examples/simulation/sim_2019_05_3_135249.xlsx")], axis=0)
simulation_data.drop(['Unnamed: 12', 'Unnamed: 13'], inplace=True, axis=1)
simulation_data.head(2)
plot_simulation_3d(simulation_data)
# From the plot above, we can see that the maximum F-score is reached for w = 7 and a = 7, also variability of F-score for these values of parameters is getting smaller. Of course, from this plot, we can't see the combinations between both w and a. We need to explore it further.
plot_simulation_word_vs_w_to_n(simulation_data)
# Let's assume we would like to stay with w = 7, let's see what values of a would be feasible to achieve max F-score.
max_fscore = simulation_data['fscore'].astype('float32').max()
simulation_data[(simulation_data['fscore'].astype('float32') == max_fscore) & (simulation_data['nbins'] == 7)].sort_values('alphabet').head(1)
# We can conclude that one of the reasonable configurations would be to have n=32, w=7, a=8
| Simulate SimSAX parameters.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import MultinomialNB
# %matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from collections import Counter
from sklearn.model_selection import StratifiedKFold
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer, TfidfTransformer
from sklearn.pipeline import Pipeline
from nltk.corpus import stopwords
dt_set = pd.read_table("C:/Users/USER/Downloads/imdb_labelled.txt", sep="\t", header=None)
dt_set.info()
dt_set.head(10)
dt_set.columns = ['texts', 'sentiments']
dt_set.head()
dt_set['sentiments'].value_counts()
# +
plot_size = plt.rcParams["figure.figsize"]
print(plot_size[0])
print(plot_size[1])
plot_size[0] = 8
plot_size[1] = 6
plt.rcParams["figure.figsize"] = plot_size
dt_set['sentiments'].value_counts().plot(kind='pie', autopct='%1.0f%%')
# -
dt_set['labels'] = dt_set['sentiments'].apply(lambda x: 'positive' if x else 'negative')
dt_set.head()
dt_set['lengths'] = dt_set['texts'].apply(lambda x: len(str(x).split(' ')))
dt_set.head()
dt_set['lengths'].unique()
X = dt_set['texts']
y = dt_set['sentiments']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .40, random_state = 60)
X_train.shape, X_train.to_list()[:10]
# +
tfidf_vectorizer = TfidfVectorizer()
tfidf = tfidf_vectorizer.fit_transform(X_train)
tfidf_df = pd.DataFrame(data=tfidf.toarray().astype(float), columns=tfidf_vectorizer.get_feature_names())
tfidf_df["__labels__"] = y_train.values
tfidf_df.head()
# -
voc = Counter(tfidf_vectorizer.get_feature_names())
voc.most_common(10)
word = 'very'
xv = tfidf_df[tfidf_df['__labels__']== 1][word]
xv.sort_values(ascending=False)
plt.bar(xv.index, xv.values)
plt.show()
vectorization = Pipeline([('BoW', CountVectorizer(stop_words = 'english')),
('tfidf', TfidfTransformer()),
('classifier', MultinomialNB())
])
model = vectorization.fit(X_train, y_train)
pred = model.predict(X_test)
print(classification_report(y_test, pred))
# +
accuracy = []
stratifiedKf_pred = StratifiedKFold(n_splits = 5, shuffle = True, random_state = 100)
stratifiedKf_pred.get_n_splits(X, y)
for train_index, test_index in stratifiedKf_pred.split(X, y):
X_train1, X_test1 = X.iloc[train_index], X.iloc[test_index]
y_train1, y_test1 = y.iloc[train_index], y.iloc[test_index]
vectorization.fit(X_train1, y_train1)
result = vectorization.score(X_test1, y_test1)
accuracy.append(result)
accuracy = np.array(accuracy)
print(accuracy)
print(f"mean_accuracy = {accuracy.mean():.4f}, std_accuracy = {accuracy.std():.4f}")
# -
| MultinomialNB Algorithm on Movie Review with StratifiedKfold CV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
import tensorflow as tf
import librosa
import librosa.display as ld
import numpy as np
import random
import matplotlib.pyplot as plt
plt.style.use("dark_background")
# + pycharm={"name": "#%%\n"}
audio_data_dir = "d:\soundofai\pitch_60_audio"
audio_files = [x for x in os.listdir(audio_data_dir) if x.lower().endswith("wav")]
print(f"found {len(audio_files)} files")
# + pycharm={"name": "#%%\n"}
df = pd.read_csv("../data/old_final.csv", index_col=0)
df.head()
# + pycharm={"name": "#%%\n"}
dataset = {}
for i, row in df.iterrows():
qualities= row.iloc[2:-1]
classes = np.clip(qualities.values, 0, 1)
if np.sum(classes) == 0:
continue
else:
dataset[row.iloc[0]] = classes.tolist()
# + pycharm={"name": "#%%\n"}
num_classes = len(classes)
frame_size = 2048
hop_len = 512
num_mels = 128
sample_rate = 16000
def get_melspectrogram(signal):
melspec = librosa.feature.melspectrogram(
signal,
hop_length=hop_len,
n_fft=frame_size,
n_mels=num_mels
)
return librosa.power_to_db(melspec)
def plot_melspectrogram(ms):
plt.figure(figsize=(12, 6))
ld.specshow(
ms,
sr=sample_rate,
hop_length=hop_len,
x_axis='time',
y_axis='mel',
cmap='viridis'
)
plt.colorbar()
plt.show()
def get_ms_from_file(file_path):
audio, _ = librosa.load(file_path, sr=sample_rate)
return get_melspectrogram(audio[:32400])
# + pycharm={"name": "#%%\n"}
audio, _ = librosa.load(os.path.join(audio_data_dir, audio_files[0]), sr=sample_rate)
print(audio.shape)
# + pycharm={"name": "#%%\n"}
mel_sp = get_melspectrogram(audio[:32400])
print(mel_sp.shape)
plot_melspectrogram(mel_sp)
# + pycharm={"name": "#%%\n"}
from tensorflow.keras.layers import Conv2D, MaxPool2D, BatchNormalization
from tensorflow.keras.layers import Input, Lambda, Flatten, Dropout, Dense
def create_model():
def conv_block(input_, num_filters):
x = Conv2D(num_filters, 3, activation='relu')(input_)
x = MaxPool2D(2)(x)
return BatchNormalization()(x)
input_ = Input(shape=(128, 64))
x = Lambda(lambda x: tf.expand_dims(x, axis=-1))(input_)
for i in range(0, 4):
num_filters = 2**(4 + i)
x = conv_block(x, num_filters)
x = Flatten()(x)
x = Dropout(0.25)(x)
x = Dense(512, activation="relu")(x)
x = Dropout(0.25)(x)
output_ = Dense(num_classes, activation='sigmoid')(x)
model = tf.keras.models.Model(input_, output_)
return model
model = create_model()
model.summary()
# + pycharm={"name": "#%%\n"}
len(dataset)
# + pycharm={"name": "#%%\n"}
def data_generator(batch_size=16):
while True:
x_batch = np.zeros((batch_size, 128, 64))
y_batch = np.zeros((batch_size, num_classes))
for i in range(0, batch_size):
example, label = random.choice(list(dataset.items()))
file_path = os.path.join(audio_data_dir, example)
x_batch[i] = get_ms_from_file(file_path)
y_batch[i] = np.array(label)
yield (x_batch, y_batch)
# + pycharm={"name": "#%%\n"}
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=2e-5),
loss="binary_crossentropy",
metrics=["accuracy"]
)
_ = model.fit(
data_generator(),
steps_per_epoch=50,
epochs=50,
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor="accuracy", patience=5)
]
)
# + pycharm={"name": "#%%\n"}
| members/amit/quality_clf/quality_clf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## scikit-learn中的RBF核
import numpy as np
import matplotlib.pyplot as plt
# +
from sklearn import datasets
X, y = datasets.make_moons(noise=0.15, random_state=666)
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
plt.show()
# +
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.pipeline import Pipeline
def RBFKernelSVC(gamma=1.0):
return Pipeline([
("std_scaler", StandardScaler()),
("svc", SVC(kernel="rbf", gamma=gamma))
])
# -
svc = RBFKernelSVC(gamma=1.0)
svc.fit(X, y)
# 调整gamma的值,看下结果,过拟合了
svc_gamma100 = RBFKernelSVC(gamma=100)
svc_gamma100.fit(X, y)
# 调整gamma的值,看下结果
svc_gamma10 = RBFKernelSVC(gamma=10)
svc_gamma10.fit(X, y)
# 调整gamma的值,看下结果,使用比1小的
svc_gamma05 = RBFKernelSVC(gamma=0.5)
svc_gamma05.fit(X, y)
# 调整gamma的值,看下结果,使用比1小的,0.1已经接近线性了,欠拟合了
svc_gamma01 = RBFKernelSVC(gamma=0.1)
svc_gamma01.fit(X, y)
| data-science/scikit-learn/09/04-RBF-Kernel-in-scikit-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import print_function
from distutils.version import LooseVersion as Version
import sys
try:
import curses
curses.setupterm()
assert curses.tigetnum("colors") > 2
OK = "\x1b[1;%dm[ OK ]\x1b[0m" % (30 + curses.COLOR_GREEN)
FAIL = "\x1b[1;%dm[FAIL]\x1b[0m" % (30 + curses.COLOR_RED)
except:
OK = '[ OK ]'
FAIL = '[FAIL]'
try:
import importlib
except ImportError:
print(FAIL, "Python version 3.4 (or 2.7) is required,"
" but %s is installed." % sys.version)
def import_version(pkg, min_ver, fail_msg=""):
mod = None
try:
mod = importlib.import_module(pkg)
if pkg in {'PIL'}:
ver = mod.VERSION
else:
ver = mod.__version__
if Version(ver) < min_ver:
print(FAIL, "%s version %s or higher required, but %s installed."
% (lib, min_ver, ver))
else:
print(OK, '%s version %s' % (pkg, ver))
except ImportError:
print(FAIL, '%s not installed. %s' % (pkg, fail_msg))
return mod
# first check the python version
print('Using python in', sys.prefix)
print(sys.version)
pyversion = Version(sys.version)
if pyversion >= "3":
if pyversion < "3.4":
print(FAIL, "Python version 3.4 (or 2.7) is required,"
" but %s is installed." % sys.version)
elif pyversion >= "2":
if pyversion < "2.7":
print(FAIL, "Python version 2.7 is required,"
" but %s is installed." % sys.version)
else:
print(FAIL, "Unknown Python version: %s" % sys.version)
print()
requirements = {'numpy': "1.7.1", 'scipy': "0.9", 'matplotlib': "2.0",
'IPython': "3.0", 'sklearn': "0.19.1", 'pandas': "0.19",
'PIL': "1.1.7", 'ipywidgets': '6.0'}
# now the dependencies
for lib, required_version in list(requirements.items()):
import_version(lib, required_version)
| check_env.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cropChallenge
# language: python
# name: cropchallenge
# ---
# + [markdown] id="2662ae67-7d0b-4501-8cfb-c7b07afdaabf"
# ## Libs
# + colab={"base_uri": "https://localhost:8080/"} id="28077157-a248-4324-b16a-0dfb1f085e4d" outputId="2931bed4-add7-4aee-8105-c5e5df25809a" tags=[]
import os
import ast
import pandas as pd
import numpy as np
from matplotlib import pyplot
import matplotlib.patches as mpatches
import seaborn as sn
from tqdm.std import tqdm
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow import keras
import logging
logging.basicConfig(level='INFO')
logging.info(msg=f"Using tensorflow version : {tf.__version__}")
# + id="34b80287-7aec-4c4b-acec-cc9cc5791c7b" tags=[]
import warnings
warnings.filterwarnings(action="ignore")
# + [markdown] id="ab471e95-c461-4398-a231-30d9fdb6c463"
# ## Config & Helpers funcs
# + id="3be921ca-53cd-490b-b480-dd6627e4405a" tags=[]
class Config:
data_dir = '../data/'
working_dir = '../src/'
models_dir = '../models/'
submissions_dir = '../submissions'
def preprocess_data(df:pd.DataFrame, skip_cols:list=None)->pd.DataFrame:
"""
Parameters
----------
df : Pandas dataframe
Dataframe to be preprocessed.
skip_cols : list
list of columns to skip during preprocessing
Returns
-------
df : Pandas dataframe
Preprocessed dataframe.
Examples
--------
>>> dataset = pd.read_csv(filepath_or_buffer='<path_to_data.csv>')
>>> new_df = preprocess_data(df=dataset)
"""
df = df.copy(deep=True)
cols = [col for col in df.columns[1:] if col not in skip_cols]
cat_cols = [col for col in cols if df[col].dtype == 'O' and 'Date' not in col]
date_cols = [col for col in cols if 'Date' in col]
# convert cat col to category type
# print("Categorical cols", cat_cols)
for col in tqdm(cat_cols, desc='Processing categorical columns \t'):
df[col] = df[col].astype('category')
# print("Date cols", date_cols)
# convert date col to datetime type
for col in tqdm(date_cols, desc='Processing datetime columns \t'):
df[col] = pd.to_datetime(df[col]).dt.tz_localize(None)
# fill missing values
df.UpsellDate.fillna(value='None', inplace=True)
df.Region.fillna(value=df.Region.mode()[0], inplace=True)
df.Age.fillna(value=round(df.Age.mean()), inplace=True)
for col in cat_cols:
try:
df[col] = df[col].cat.codes
except Exception as ex:
logging.error(msg=f"Column issue ({col}) : {ex}")
return df
def extract_features(df:pd.DataFrame)->pd.DataFrame:
df = df.copy()
df['RegisteredInLeapYear'] = df.RegistrationDate.dt.is_leap_year.astype('float')
df['RegisteredAtMonthStart'] = df.RegistrationDate.dt.is_month_start.astype('float')
df['RegisteredAtMonthEnd'] = df.RegistrationDate.dt.is_month_end.astype('float')
# df['LastPaymentMonth'] = df.LastPaymentDate.dt.month
df['FirstPaymentMonth'] = df.FirstPaymentDate.dt.month
df['FirstPaymentYear'] = df.FirstPaymentDate.dt.year
lastReceivedPayment = []
accessory_rate = []
upsell_date = []
for r in df.copy().iterrows():
r = r[1]
lastReceivedPayment.append(ast.literal_eval(r.TransactionDates)[-1])
if r.AccessoryRate :
r.AccessoryRate = 1
accessory_rate.append(r.AccessoryRate)
if r.UpsellDate != None :
upsell_date.append(1)
else :
upsell_date.append(0)
print(lastReceivedPayment)
df['LastReceivedPayment'] = lastReceivedPayment
df['LastReceivedPayment'] = pd.to_datetime(df['LastReceivedPayment']).dt.tz_localize(None)
df['LastReceivedMonth'] = df.LastReceivedPayment.dt.month
df['LastReceivedYear'] = df.LastReceivedPayment.dt.year
df['LeftTime'] = (df['ExpectedTermDate'].dt.year-df['LastReceivedPayment'].dt.year)*12 + df['ExpectedTermDate'].dt.month -df['LastReceivedPayment'].dt.month
df['AccessoryRate'] = accessory_rate
df['UpsellDate'] = upsell_date
return df.drop(columns=["TransactionDates", "PaymentsHistory", "m1", "m2", "m3", "m4", "m5", "m6"], inplace=False)
def pad_history(df:pd.DataFrame, max_len:int=41):
df = df.copy()
padded_payments = []
for r in df.copy().iterrows():
r = r[1]
if len(ast.literal_eval(r.PaymentsHistory)) > max_len:
padded_payments.append(ast.literal_eval(r.PaymentsHistory)[:max_len])
else:
padding_len = abs(max_len - len(ast.literal_eval(r.PaymentsHistory)))
padded_payments.append(ast.literal_eval(r.PaymentsHistory) + padding_len*[0.])
df["PaymentsHistory"] = padded_payments
del padded_payments
return df, max_len
def pad_history_most_recent(df:pd.DataFrame, max_len:int=6):
df = df.copy()
padded_payments_1 = []
padded_payments_2 = []
padded_payments_3 = []
padded_payments_4 = []
padded_payments_5 = []
padded_payments_6 = []
remaining_payment = []
RemainingPaymentPerUnitTimeLeft = []
for r in df.copy().iterrows():
r = r[1]
padded_payments_1.append(ast.literal_eval(r.PaymentsHistory)[-1])
padded_payments_2.append(ast.literal_eval(r.PaymentsHistory)[-2])
padded_payments_3.append(ast.literal_eval(r.PaymentsHistory)[-3])
padded_payments_4.append(ast.literal_eval(r.PaymentsHistory)[-4])
padded_payments_5.append(ast.literal_eval(r.PaymentsHistory)[-5])
remaining_payment.append(r.TotalContractValue - np.sum(ast.literal_eval(r.PaymentsHistory)[:]))
# RemainingPaymentPerUnitTimeLeft.append((r.TotalContractValue - np.sum(ast.literal_eval(r.PaymentsHistory)[:]))/r.LeftTime)
df["PaymentsHistory_1"] = padded_payments_1
df["PaymentsHistory_2"] = padded_payments_2
df["PaymentsHistory_3"] = padded_payments_3
df["PaymentsHistory_4"] = padded_payments_4
df["PaymentsHistory_5"] = padded_payments_5
df["RemainingPayment"] = remaining_payment
del padded_payments_1
return df
# + [markdown] id="cc9471c0-1136-4ada-9795-78e090bd6798"
# # Dataset
# ---
# 1. Load files
# 2. Exploratory Data Analysis (EDA)
# 3. Preprocess data
# 4. Extract features
# 4. Train/Test split
# + id="100991a5-5846-4d15-854f-c9fe4f3ceb90" tags=[]
train = pd.read_csv( "Train.csv")
metadata = pd.read_csv( "metadata.csv")
test = pd.read_csv("Test.csv")
ss = pd.read_csv( "SampleSubmission.csv")
# + [markdown] id="4cba1668-d647-46e9-8a64-37be49777848"
# ### EDA
# + colab={"base_uri": "https://localhost:8080/"} id="06b85c38-02e4-4e0c-bca1-9dc6eadc9fc0" outputId="a043009a-741d-4e24-bd9f-324f11f2b76b" tags=[]
train.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 237} id="60a283a6-cda8-4029-86da-d42e240ed33c" outputId="ea3f75b6-3f88-4fcc-eae0-9e7185c25fa8" tags=[]
train.describe(datetime_is_numeric=True).transpose()
# + colab={"base_uri": "https://localhost:8080/", "height": 147} id="9d5bca6f-ed45-4c33-95fd-c27d18328d11" outputId="4733a9b8-666a-47e8-f582-d07b2dd037e9" tags=[]
train.head(n=2)
# + colab={"base_uri": "https://localhost:8080/"} id="b25e6608-cdcf-4e12-832f-96cfdc7190c0" outputId="2c57fc53-cc1c-4e8e-a85d-d92f564e704e" tags=[]
metadata.info()
# + id="GUgvuXP8e0kV"
metadata.drop(columns=["SupplierName", "LastPaymentDate", "PaymentMethod", "rateTypeEntity"], inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 269} id="03c3aff6-940f-43a0-ae32-e6f4ad05a294" outputId="64e1dc24-629f-46bd-fe8c-543388920171" tags=[]
metadata.describe(datetime_is_numeric=True).transpose()
# + colab={"base_uri": "https://localhost:8080/", "height": 184} id="1a785a2b-4059-483d-a4ab-cf6dd335e1c3" outputId="3c534a50-9ab0-4458-a7f2-00f8a14268bc" tags=[]
metadata.head(n=2)
# + colab={"base_uri": "https://localhost:8080/"} id="734ad388-1bdf-4066-ad9e-859168ee206c" outputId="5535bd82-6ebd-48d4-c4d7-2bbdb7a606ee" tags=[]
test.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 161} id="8d207ec9-fd50-4441-8454-017593f87487" outputId="e80a356c-0fc8-4c6f-cdf0-0e6c1fd3405c" tags=[]
test.describe(datetime_is_numeric=True).transpose()
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="b953f74a-24b1-4b7c-a925-a6aaf2e82aab" outputId="0ba3d5b0-0ac9-4c30-8a0c-20bca1af32b7" tags=[]
test.head(n=2)
# + colab={"base_uri": "https://localhost:8080/"} id="5b0c111f-1dd2-4178-b921-49f4020a35b6" outputId="962962e6-bdb2-4ac9-b5a6-a113cde3a07d" tags=[]
print("---------------- Missing values Report ----------------")
print(metadata.isna().sum())
# + id="d3c2c42f-6597-4241-abf6-ad0e45693ac4" tags=[]
merged = pd.merge(left=metadata, right=pd.concat(objs=[train, test]).fillna(value=0.), on="ID")
# + [markdown] id="98b16880-9d1d-4cfa-86c3-132c6eb005d0"
# ### Preprocessing
# + colab={"base_uri": "https://localhost:8080/"} id="482deef3-7079-4005-b7c0-5cc6126e0274" outputId="95f80b7c-ea69-4e4d-980f-f5547244c749" tags=[]
merged_ = preprocess_data(df=merged, skip_cols=["PaymentsHistory", "TransactionDates"])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="HK9q4kJqoexk" outputId="564d8b22-f90b-4bad-bf86-d88811c8bbbd"
c= []
for i in range (0,len(merged_)):
c.append(len(ast.literal_eval(merged_.TransactionDates[i])))
import matplotlib.pyplot as plt
plt.plot(c)
plt.show
# + colab={"base_uri": "https://localhost:8080/", "height": 320} id="_ssmb31Mm8VF" outputId="e9887008-51c5-4bb7-cebb-b39b307cf6fa"
merged_ = extract_features(df=merged_)
merged_.head(n=2)
plt.plot(merged_.LeftTime)
plt.show
# + colab={"base_uri": "https://localhost:8080/"} id="Wzf75gOItMI_" outputId="af79185b-29a8-4859-8b7b-fd6fabc71a38"
t = 0
for i in range (0,len(merged_)):
if merged_.LeftTime[i] > 5:
t= t+1
print(len(merged_) - t, len(merged_))
# + colab={"base_uri": "https://localhost:8080/"} id="db03d204-3626-445d-81be-ebb0fc0439bf" outputId="f9d3b514-528f-413d-8107-7728ee586997" tags=[]
merged_.isna().sum()
# + [markdown] id="295f0b85-5a8c-413b-b12e-1d7e8ca44acb"
# ### Features extraction
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="1ab26365-3c9c-4dcf-b7db-6c80416af245" outputId="8fe3631a-9bc2-4a6c-ac3a-2be3d1efd7a3" tags=[]
#merged_ = extract_features(df=merged_)
merged_.head(n=20)
# + colab={"base_uri": "https://localhost:8080/"} id="rxjdICJuvSBJ" outputId="dc91298a-2760-4ada-94e1-b2ae75a1b671"
merged_['LeftTime'].unique()
# + colab={"base_uri": "https://localhost:8080/", "height": 167} id="5NmrQpayVM1Z" outputId="cc41b8b7-0463-48cc-d3cc-7b854f5c5790"
merged_.head(2)
# + id="twtulIXAWTHy"
# RegistrationDate , UpsellDate , Region , Town , Occupation , ExpectedTermDate , FirstPaymentDate , LastReceivedPayment
# + colab={"base_uri": "https://localhost:8080/"} id="kP2u4K3YUu7Q" outputId="982a00c1-02a2-4ac1-cfac-513fb4cfc60d"
for i in merged_.columns:
print(type(merged_[i][1]),i)
# + colab={"base_uri": "https://localhost:8080/"} id="LMXxlE74BxAP" outputId="1032e9f0-6c57-4b59-a818-9fe8821010b9"
selected_features = [c for c in merged_.columns[1:] if "Date" not in c]
selected_features
# + id="d5d96f53-ea8d-47e8-bc9d-446eae63229c" tags=[]
selected_features = [c for c in merged_.columns[1:] if "Date" not in c]
xtrain_loan = merged_[selected_features]
ytrain_loan = merged_.Target
## Split
xtrain_loan, xval_loan, ytrain_loan, yval_loan = train_test_split(
xtrain_loan,
ytrain_loan,
test_size=.3,
random_state=21,
shuffle=True
)
scaler = StandardScaler()
##Fit scaler
scaler.fit(X=xtrain_loan.values)
## Scale data
xtrain_loan = scaler.transform(X=xtrain_loan.values)
xval_loan = scaler.transform(X=xval_loan.values)
print(f"> Training inputs shape : {xtrain_loan.shape}, Training targets shape : {ytrain_loan.shape}")
print(f"> Validation inputs shape : {xval_loan.shape}, Training targets shape : {yval_loan.shape}")
# + [markdown] id="15c68592-eb0a-44a7-b208-c28c437b6db0"
# ### Train/Val split
# + colab={"base_uri": "https://localhost:8080/", "height": 293} id="qlBYJCmPZAuu" outputId="aa5f29b9-e186-4ca9-a8b5-1e9c26f04eb8"
#train = pad_history_most_recent(df=train, max_len=5)
train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 399} id="nrRvMGEJZ9ZS" outputId="77623206-435a-4358-b187-d43eb0e4d3b8"
merged_train = pd.merge(left=train, right=pd.concat(objs=[merged_]).fillna(value=0.), on="ID")
merged_train = pad_history_most_recent(df=merged_train, max_len=5)
merged_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="NFsn7Z509fDw" outputId="35687696-ea2c-4ca9-aadc-83e6c128a457"
plt.plot(merged_train.RemainingPayment)
# + colab={"base_uri": "https://localhost:8080/", "height": 399} id="_rUaG7Ttl8E7" outputId="07525224-a018-41c7-ac15-53a4e1547937"
# func that returns a dummified DataFrame of significant dummies in a given column
def dum_sign(dummy_col, threshold=0.01):
# removes the bind
dummy_col = dummy_col.copy()
# what is the ratio of a dummy in whole column
count = pd.value_counts(dummy_col) / len(dummy_col)
# cond whether the ratios is higher than the threshold
mask = dummy_col.isin(count[count > threshold].index)
# replace the ones which ratio is lower than the threshold by a special name
dummy_col[~mask] = "others"
return pd.get_dummies(dummy_col, prefix=dummy_col.name)
merged_train = pd.concat([merged_train, dum_sign(merged_train['DaysOnDeposit'])], axis=1)
merged_train = pd.concat([merged_train, dum_sign(merged_train['Occupation'])], axis=1)
merged_train = pd.concat([merged_train, dum_sign(merged_train['Region'])], axis=1)
merged_train.drop(columns=[ "DaysOnDeposit_others", "Occupation" ], inplace=True)
merged_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 226} id="XZM6NKV7sYgx" outputId="9dfbbe91-4a3b-4ccb-e5cf-5a63b36ad38b"
merged_train.drop(columns=["RegistrationDate" , "UpsellDate" , "Region" , "Town" , "ExpectedTermDate" , "FirstPaymentDate" , "LastReceivedPayment", "TransactionDates", "PaymentsHistory"], inplace=True)
merged_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 226} id="1QzFENMOb2cF" outputId="efa99809-527e-47d1-ab89-bba5b2f734f9"
y_true_1 = merged_train.m1.values
y_true_2 = merged_train.m2.values
y_true_3 = merged_train.m3.values
y_true_4 = merged_train.m4.values
y_true_5 = merged_train.m5.values
y_true_6 = merged_train.m6.values
merged_train.drop(columns=["m1", "m2" ,"m3", "m4", "m5", "m6"], inplace=True)
merged_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 226} id="37DhVwqSaoqI" outputId="ef40b243-e7d3-4203-afce-8d6d9cf8bd03"
merged_train.drop(columns=["RegisteredInLeapYear" ], inplace=True)
merged_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 226} id="ZKOxVBnSX7OC" outputId="06aa6ea9-e677-4a74-b238-65145071cb88"
merged_train.drop(columns=["RegisteredAtMonthStart", "RegisteredAtMonthEnd" ], inplace=True)
merged_train.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="DNURweNMEkxM" outputId="621eb76d-eaec-4996-ce00-291d940818ba"
merged_train['Age'] = np.log(merged_train['Age']+1)
merged_train['RemainingPayment'] = np.log(merged_train['RemainingPayment']+1)
plt.plot(merged_train['RemainingPayment'])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="HhRrCUAnF5vE" outputId="19d76366-8c90-4946-f2e6-5443a76c9f5f"
plt.plot(merged_train['Age'])
# + id="2JHkAZURclSY"
########################### Model params
#################################################################################
lgb_params = {
'boosting_type': 'gbdt',
'objective': 'tweedie',
'tweedie_variance_power': 1.1,
'metric': 'rmse',
'subsample': 0.5,
'subsample_freq': 1,
'learning_rate': 0.015,
'num_leaves': 2**8-1,
'min_data_in_leaf': 2**8-1,
'feature_fraction': 0.5,
'max_bin': 100,
'n_estimators': 1000,
'boost_from_average': False,
'verbose': 1,
'seed' : 1995,
'random_state' : 21,
'is_unbalance': True,
'max_depth' : 7
}
# + colab={"base_uri": "https://localhost:8080/"} id="GRJuf4DSi4Xe" outputId="e9b44e8e-d4e2-4e68-aec9-92d69475fe8c"
# Hyperparameter grid
param_grid = {
'boosting_type': ['gbdt', 'dart'],
'num_leaves': list(range(20, 2**8-1)),
'learning_rate': list(np.logspace(np.log10(0.005), np.log10(0.5), base = 10, num = 1000)),
'subsample_for_bin': list(range(20000, 300000, 20000)),
'min_child_samples': list(range(20, 500, 5)),
'reg_alpha': list(np.linspace(0, 1)),
'reg_lambda': list(np.linspace(0, 1)),
'colsample_bytree': list(np.linspace(0.6, 1, 10)),
'subsample': list(np.linspace(0.5, 1, 100)),
'is_unbalance': [True, False]
}
MAX_EVALS = 5
N_FOLDS = 5
grid_results = pd.DataFrame(columns = ['score', 'params', 'iteration'],
index = list(range(MAX_EVALS)))
com = 1
for x in param_grid.values():
com *= len(x)
print('There are {} combinations'.format(com))
# + id="_8dnabDgkMcC"
import itertools
def grid_search(param_grid, max_evals = MAX_EVALS):
"""Grid search algorithm (with limit on max evals)"""
# Dataframe to store results
results = pd.DataFrame(columns = ['score', 'params', 'iteration'],
index = list(range(MAX_EVALS)))
# https://codereview.stackexchange.com/questions/171173/list-all-possible-permutations-from-a-python-dictionary-of-lists
keys, values = zip(*param_grid.items())
i = 0
# Iterate through every possible combination of hyperparameters
for v in itertools.product(*values):
# Create a hyperparameter dictionary
hyperparameters = dict(zip(keys, v))
# Set the subsample ratio accounting for boosting type
hyperparameters['subsample'] = 1.0 if hyperparameters['boosting_type'] == 'goss' else hyperparameters['subsample']
# Evalute the hyperparameters
eval_results = objective(hyperparameters, i)
results.loc[i, :] = eval_results
i += 1
# Normally would not limit iterations
if i > MAX_EVALS:
break
# Sort with best score on top
results.sort_values('score', ascending = False, inplace = True)
results.reset_index(inplace = True)
return results
def objective(hyperparameters, iteration):
"""Objective function for grid and random search. Returns
the cross validation score from a set of hyperparameters."""
# Number of estimators will be found using early stopping
if 'n_estimators' in hyperparameters.keys():
del hyperparameters['n_estimators']
# Perform n_folds cross validation
cv_results = lgb.cv(hyperparameters, lgbtrain_all, num_boost_round = 100000, nfold = N_FOLDS,
early_stopping_rounds = 500, metrics = 'auc', seed = 42)
# results to retun
score = cv_results['auc-mean'][-1]
estimators = len(cv_results['auc-mean'])
hyperparameters['n_estimators'] = estimators
return [score, hyperparameters, iteration]
# + colab={"base_uri": "https://localhost:8080/"} id="CcbAiWhZkwvq" outputId="c40d7dbf-0ade-41eb-a328-a4c88cf29ce7"
grid_results = grid_search(param_grid)
print('The best validation score was {:.5f}'.format(grid_results.loc[0, 'score']))
print('\nThe best hyperparameters were:')
import pprint
pprint.pprint(grid_results.loc[0, 'params'])
# + colab={"base_uri": "https://localhost:8080/"} id="5ZisRsxlihz_" outputId="5ee23767-26ef-4408-aecd-379fb9b2be36"
import lightgbm as lgb
# LightGBM dataset
y_true_train_1 = y_true_6[:-1000]
merged_train_1_train = merged_train[:27007]
lgbtrain_all = lgb.Dataset(data=merged_train_1_train.loc[:len(merged_train_1_train),merged_train_1_train.columns[1:]].values,
label=y_true_train_1)
model = lgb.train(lgb_params, train_set = lgbtrain_all)
test_preds = model.predict(merged_train.loc[27007:,merged_train.columns[1:]].values)
test_preds[:50]
# + id="jg0vlcTbf3D5"
from sklearn.experimental import enable_hist_gradient_boosting # noqa
from sklearn.ensemble import HistGradientBoostingRegressor
model = HistGradientBoostingRegressor(l2_regularization=1, learning_rate=0.0015,
loss='least_squares', max_bins=255,
max_depth=7, max_iter=10000, max_leaf_nodes=2**8-1,
min_samples_leaf=20, n_iter_no_change=None,
random_state=5, scoring=None, tol=1e-07,
validation_fraction=0.1, verbose=1,
warm_start=False)
y_true_train_1 = y_true_1[:-1000]
merged_train_1_train = merged_train[:27007]
model.fit(merged_train_1_train.loc[:,merged_train.columns[1:]], y_true_train_1)
test_preds_hist = model.predict(merged_train.loc[27007:,merged_train.columns[1:]].values)
test_preds_hist[:50]
# + colab={"base_uri": "https://localhost:8080/"} id="me7TISo5HYnM" outputId="2bbc8a61-3d7d-45e2-8b02-3e724aad0ae6"
from sklearn.ensemble import GradientBoostingRegressor
model = GradientBoostingRegressor(n_estimators=2000, max_depth=6, min_samples_leaf=1,
min_samples_split=2, learning_rate=0.012, loss='huber',
random_state=5, verbose = 1)
y_true_train_1 = y_true_1[:-1000]
merged_train_1_train = merged_train[:27007]
model.fit(merged_train_1_train.loc[:,merged_train.columns[1:]], y_true_train_1)
test_preds_gbr = model.predict(merged_train.loc[27007:,merged_train.columns[1:]].values)
test_preds_gbr[:50]
# + colab={"base_uri": "https://localhost:8080/"} id="do_EE8Z17Ldn" outputId="02465ce6-889e-43de-9ca7-c39cc4db5c3f"
merged_train_1_val = merged_train[27007:]
merged_train_1_val = scaler.transform(merged_train_1_val.loc[:,merged_train.columns[1:]])
test_preds_gbr = model.predict(merged_train_1_val)
test_preds_gbr[:50]
# + id="Qjwtq5zJHs58"
import xgboost as xgb
y_true_train_1 = y_true_3[:-1000]
merged_train_1_train = merged_train[:27007]
xgbtrain = xgb.DMatrix(merged_train_1_train.loc[:len(merged_train_1_train),merged_train_1_train.columns[1:]].values, y_true_train_1)
param = {'max_depth':10,
'subsample':1,
'min_child_weight':0.4,
'eta':0.3,
'num_round':1000,
'seed':1995,
'silent':0,
'eval_metric':'rmse'} # random parameters
model = xgb.train(param, xgbtrain)
model
xgbpredict = xgb.DMatrix(merged_train.loc[27007:,merged_train.columns[1:]].values)
test_preds_xgb = model.predict(xgbpredict)
# + colab={"base_uri": "https://localhost:8080/"} id="Iy-9Prh_8R_1" outputId="ca2c8116-a884-4880-fb94-8f9bf7a2026b"
from sklearn.metrics import mean_squared_error
mean_squared_error(y_true_1[27007:], test_preds_gbr, squared=False)
# + id="x8nX_7-bOP-P"
best = ['gb - 1068, lgb - 1063.4','lgb - 376 , gb -390', ' ', '', '', 'gb - 1396']
# + id="CxFzI9S2N5-1"
for i in range(0,len(test_preds)):
test_preds[i] = (test_preds_xgb[i] + test_preds[i])/2
mean_squared_error(y_true_3[27007:], test_preds, squared=False)
# + id="Oy6HJ5QlOfEr"
for i in range(0,len(test_preds)):
test_preds[i] = (test_preds_xgb[i] + test_preds[i])/2
mean_squared_error(y_true_6[27007:], test_preds, squared=False)
# + id="gK4TR3cBlnR1"
for i in range (0,1000):
if test_preds[i] >1200 :
test_preds[i] = 1500
# + id="ewqsfKpwmFYJ"
plt.plot(test_preds)
# + id="sH70zTaDcp6e"
# LightGBM dataset
lgbtrain_all = lgb.Dataset(data=merged_train.loc[:,merged_train.columns].values,
label=y_true_1,
feature_name = merged_train.columns)
cols = merged_train.columns
merged_train.loc[:,cols].values
# + id="KpfvR7ETcp_y"
# + colab={"base_uri": "https://localhost:8080/", "height": 226} id="2RsRX3EguLG_" outputId="57f903d3-766c-4e51-eccb-c8193d0f6ea8"
merged_test = pd.merge(left=test, right=pd.concat(objs=[merged_]).fillna(value=0.), on="ID")
merged_test = pad_history_most_recent(df=merged_test, max_len=5)
merged_test.drop(columns=["RegistrationDate" , "UpsellDate" , "Region" , "Town" , "Occupation" , "ExpectedTermDate" , "FirstPaymentDate" , "LastReceivedPayment", "TransactionDates", "PaymentsHistory"], inplace=True)
merged_test.drop(columns=["RegisteredInLeapYear" ], inplace=True)
merged_test.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 226} id="5hpSPtt1JhV5" outputId="4d964d73-c623-419a-af11-efbc7c1eb06a"
merged_test.drop(columns=["RegisteredAtMonthStart", "RegisteredAtMonthEnd" ], inplace=True)
merged_test.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 226} id="Kwj0z2HV3kL-" outputId="8b3f17f5-cc4d-4a5b-b1dd-58d12e780c7f"
columnsTitles = merged_train.columns
merged_test = merged_test.reindex(columns=columnsTitles)
merged_test.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="wV5xOWAKkX9V" outputId="0df68dfc-f651-4942-e300-25e177dcfe13"
from sklearn.ensemble import GradientBoostingRegressor
model_1 = GradientBoostingRegressor(n_estimators=3000, max_depth=8, min_samples_leaf=1,
min_samples_split=2, learning_rate=0.015, loss='huber',
random_state=5, verbose = 1)
model_1.fit(merged_train.loc[:,merged_train.columns[1:]], y_true_1)
model_2 = GradientBoostingRegressor(n_estimators=3000, max_depth=8, min_samples_leaf=1,
min_samples_split=2, learning_rate=0.015, loss='huber',
random_state=5, verbose = 1)
model_2.fit(merged_train.loc[:,merged_train.columns[1:]], y_true_2)
model_3 = GradientBoostingRegressor(n_estimators=3000, max_depth=8, min_samples_leaf=1,
min_samples_split=2, learning_rate=0.015, loss='huber',
random_state=5, verbose = 1)
model_3.fit(merged_train.loc[:,merged_train.columns[1:]], y_true_3)
model_4 = GradientBoostingRegressor(n_estimators=3000, max_depth=8, min_samples_leaf=1,
min_samples_split=2, learning_rate=0.015, loss='huber',
random_state=5, verbose = 1)
model_4.fit(merged_train.loc[:,merged_train.columns[1:]], y_true_4)
model_5 = GradientBoostingRegressor(n_estimators=3000, max_depth=8, min_samples_leaf=1,
min_samples_split=2, learning_rate=0.015, loss='huber',
random_state=5, verbose = 1)
model_5.fit(merged_train.loc[:,merged_train.columns[1:]], y_true_5)
model_6 = GradientBoostingRegressor(n_estimators=3000, max_depth=8, min_samples_leaf=1,
min_samples_split=2, learning_rate=0.015, loss='huber',
random_state=5, verbose = 1)
model_6.fit(merged_train.loc[:,merged_train.columns[1:]], y_true_6)
# + id="2NHGJkXxuLMo"
test_preds_1 = model_1.predict(merged_test.loc[:,merged_test.columns[1:]].values)
test_preds_2 = model_2.predict(merged_test.loc[:,merged_test.columns[1:]].values)
test_preds_3 = model_3.predict(merged_test.loc[:,merged_test.columns[1:]].values)
test_preds_4 = model_4.predict(merged_test.loc[:,merged_test.columns[1:]].values)
test_preds_5 = model_5.predict(merged_test.loc[:,merged_test.columns[1:]].values)
test_preds_6 = model_6.predict(merged_test.loc[:,merged_test.columns[1:]].values)
# + colab={"base_uri": "https://localhost:8080/"} id="H6OHlybhz3iC" outputId="51a833ed-d772-4be6-d886-d7287b694761"
merged_test.loc[:,merged_test.columns[1:]].values
# + colab={"base_uri": "https://localhost:8080/"} id="9OZ-oO69vu4b" outputId="5bf1c1fe-2888-4796-ec74-34cab447c39e"
np.mean(test_preds_1)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="jn04EK7eJyDi" outputId="68be6d12-bc98-4dc3-f625-82f98453dd70"
plt.plot(test_preds_1)
plt.plot(test_preds_2)
plt.plot(test_preds_3)
plt.plot(test_preds_4)
plt.plot(test_preds_5)
plt.plot(test_preds_6)
# + colab={"base_uri": "https://localhost:8080/"} id="49kRW61nvu9y" outputId="58ec6464-ab55-4135-b82d-2899fab67929"
test_preds = []
for i in range (0,len(test_preds_1)):
test_preds.append(test_preds_1[i])
test_preds.append(test_preds_2[i])
test_preds.append(test_preds_3[i])
test_preds.append(test_preds_4[i])
test_preds.append(test_preds_5[i])
test_preds.append(test_preds_6[i])
len(test_preds)
# + colab={"base_uri": "https://localhost:8080/"} id="ANl4bwOM1oSl" outputId="3a453138-6d8f-474d-9120-623f02ae7b14"
ttt=0
for i in range(0,len(test_preds)):
if test_preds[i] > 4000:
test_preds[i] = min(test_preds[i],merged_test["PaymentsHistory_1"][int(i/6)])
ttt = ttt+1
ttt
# + colab={"base_uri": "https://localhost:8080/", "height": 676} id="uwKcVDoPCpJE" outputId="b56e7399-10be-4c24-9f33-09c2c72d4dcd"
ss["Target"] = test_preds
ss.head(20)
# + id="ce5O9yMQ4AmL"
#ss.to_csv('baseline_xsub.csv', index=False)
# + id="XQmt7urgtrMS"
# + colab={"base_uri": "https://localhost:8080/"} id="iufKQzela00h" outputId="657a358d-5593-4e5d-9f8e-a41a399d8993"
y_true_7 = np.concatenate((y_true_1,y_true_2,y_true_3,y_true_4,y_true_5,y_true_6))
y_true_7 = np.reshape(y_true_7, (6,len(merged_train))).T
y_true_7
# + colab={"base_uri": "https://localhost:8080/"} id="3VEUObBM4Ap6" outputId="e2abc612-f3e4-4bac-a39b-8c937207be1e"
y_true_7.shape
# + colab={"base_uri": "https://localhost:8080/"} id="Ix8bm6x0Zzjz" outputId="a81a53c5-c142-4285-9152-8f36850c2afe"
y_true_7 = np.append(y_true_1,y_true_2,y_true_3,y_true_4,y_true_5,y_true_6)
y_true_7 = np.append(y_true_7,y_true_3)
y_true_7 = np.append(y_true_7,y_true_4)
y_true_7 = np.append(y_true_7,y_true_5)
y_true_7 = np.append(y_true_7,y_true_6)
y_true_7
# + colab={"base_uri": "https://localhost:8080/"} id="S_hRoS2tZzn3" outputId="153a2132-4712-4ae7-d7b0-a4834e339ae1"
selected_features = [c for c in merged_train.columns[1:] if "Date" not in c]
xtrain_loan = merged_train[selected_features]
ytrain_loan = y_true_7
## Split
xtrain_loan, xval_loan, ytrain_loan, yval_loan = train_test_split(
xtrain_loan,
ytrain_loan,
test_size=.3,
random_state=21,
shuffle=True
)
scaler = StandardScaler()
##Fit scaler
scaler.fit(X=xtrain_loan.values)
## Scale data
xtrain_loan = scaler.transform(X=xtrain_loan.values)
xval_loan = scaler.transform(X=xval_loan.values)
print(f"> Training inputs shape : {xtrain_loan.shape}, Training targets shape : {ytrain_loan.shape}")
print(f"> Validation inputs shape : {xval_loan.shape}, Training targets shape : {yval_loan.shape}")
# + id="dbno36TBZzrP"
# + id="f7b3c46a-f5ca-4d1f-9327-a991c13040b3" tags=[]
train, max_len = pad_history(df=train, max_len=41)
xtrain = train.PaymentsHistory.values
ytrain = train[train.columns[-6:]].values
xs = np.array(xtrain.tolist()).reshape((xtrain.shape[0], 1, max_len))
ys = np.array(ytrain.tolist()).reshape((ytrain.shape[0], 1, 6))
train_ds = tf.data.Dataset.from_tensor_slices((xs, ys))
# + id="IsvNcgcd68wN"
merged2 = pd.concat([train, metadata], axis=1, join="inner")
# + id="Cq07wIVz2a5A"
merged2 = pd.merge(left=train, right=pd.concat(objs=[metadata]).fillna(value=0.), on="ID")
merged2
xtrain = merged2.PaymentsHistory.values
deposits = merged2.Deposit.values
total_contract_value = merged2.TotalContractValue.values
for i in range (0,len(merged2)):
xtrain[i].append(total_contract_value[i]-deposits[i]-np.sum(xtrain[i]))
# + id="TLmQWmIP-Ods"
xs = np.array(xtrain.tolist()).reshape((xtrain.shape[0], 1, max_len+1))
ys = np.array(ytrain.tolist()).reshape((ytrain.shape[0], 1, 6))
train_ds = tf.data.Dataset.from_tensor_slices((xs, ys))
# + id="Ll8n4qb89Drt"
xtrain[2][41]
# + id="9F4eAFLr9Wdi"
i=0
np.sum(xtrain[i])
# + [markdown] id="9801d339-085d-4bce-b4ab-31a7958dd1f0"
# ## Modelling
# + id="5170db2b-2bd0-432d-b42f-3863e555d7b1" tags=[]
def build_model():
payments_inp = keras.Input(shape=(41), name="payments_inputs")
status_inp = keras.Input(shape=(xtrain_loan.shape[1]), name="status_inputs")
x1 = keras.layers.Dense(35, activation="sigmoid", name="x1")(status_inp)
out1 = keras.layers.Dense(1, name="loan_status_out")(x1)
x2 = keras.layers.Dense(20, activation="sigmoid", name="x2")(payments_inp)
x2_out = keras.layers.Dense(30, activation="sigmoid", name="payment_out")(x2)
concat = keras.layers.Concatenate(name="concat_layer")([out1, x2_out])
out2 = keras.layers.Dense(6, name="out")(concat)
# Model
model = keras.Model(inputs=[payments_inp, status_inp], outputs=[out1, out2])
opt = keras.optimizers.Adam(learning_rate=0.01)
wgt = {"out1":.7, "out2":.3}
model.compile(
loss="mean_squared_error",
optimizer=opt,
loss_weights=wgt
)
model.summary()
return model
def build_simple_model():
payments_inp = keras.Input(shape=(35), name="payments_inputs")
x = keras.layers.Dense(30, activation="sigmoid", name="x2")(payments_inp)
x2 = keras.layers.Dense(40, activation="sigmoid", name="x3")(x)
x_out = keras.layers.Dense(30, activation="sigmoid", name="payment_out")(x2)
out = keras.layers.Dense(6, name="out")(x_out)
# Model
model = keras.Model(inputs=[payments_inp], outputs=[out])
opt = keras.optimizers.Adam(learning_rate=0.01)
model.compile(
loss="mean_squared_error",
optimizer=opt,
)
model.summary()
return model
def build_status_model():
status_inp = keras.Input(shape=(xtrain_loan.shape[1]), name="status_inputs")
x = keras.layers.Dense(20, activation="sigmoid", name="x2")(status_inp)
x_out = keras.layers.Dense(30, activation="sigmoid", name="statut_out")(x)
out = keras.layers.Dense(6, name="out")(x)
# Model
model = keras.Model(inputs=[status_inp], outputs=[out])
opt = keras.optimizers.Adam(learning_rate=0.01)
model.compile(
loss="mean_squared_error",
optimizer=opt,
)
model.summary()
return model
# + colab={"base_uri": "https://localhost:8080/"} id="132dc356-2e2b-43c9-8dcd-70074b025a12" outputId="21008f17-cbb7-4466-afb8-5cbfe66f03ea" tags=[]
model = build_simple_model()
# + colab={"base_uri": "https://localhost:8080/"} id="G1fTy27JteZB" outputId="cb6fc91b-7bdc-4cd1-cdc7-c89bea411cc0"
tf.compat.v1.disable_eager_execution()
# model
#inputs = tf.keras.layers.Input((6*24*7, 13))
inputs = tf.keras.layers.Input((35,1), name="payments_inputs")
#inputs = tf.keras.layers.BatchNormalization(inputs),
conv1 = tf.keras.layers.Conv1D(200, kernel_size=6, strides=1, activation='relu')(inputs)
#conv1 = tf.keras.layers.BatchNormalization()(conv1),
trim1 = tf.keras.layers.Cropping1D((5, 0))(conv1)
conv2 = tf.keras.layers.Conv1D(50, kernel_size=6, strides=1, activation='relu')(trim1)
trim2 = tf.keras.layers.Cropping1D((1, 0))(conv2)
conv3 = tf.keras.layers.Conv1D(30, kernel_size=6, strides=1, activation='relu')(trim2)
trim3 = tf.keras.layers.Cropping1D((5, 0))(conv3)
conv4 = tf.keras.layers.Conv1D(30, kernel_size=6, strides=1, activation='relu')(trim3)
conv5 = tf.keras.layers.Conv1D(30, kernel_size=4, strides=1, activation='relu')(conv4)
dense = tf.keras.layers.Dense(50, activation='relu')(conv5)
output = tf.keras.layers.Flatten()(tf.keras.layers.Dense(6)(dense))
model = tf.keras.Model(inputs, output)
# Model
#model = tf.keras.Model(inputs=[payments_inp], outputs=[out])
model.compile(
loss=tf.keras.losses.MeanSquaredError(),
optimizer=tf.keras.optimizers.Adam(learning_rate=.001),
metrics=[tf.keras.metrics.RootMeanSquaredError()],
)
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="89b4dd1d-e0c7-4ad7-a2d8-cba8e895e9a0" outputId="7444d6c9-fdf9-4a6a-f5ba-515431bbec8c" tags=[]
tf.keras.utils.plot_model(
model,
to_file="../model.png",
show_shapes=True
)
# + [markdown] id="ac2abe6d-2c72-4595-a96b-a25875520b03"
# ## Learning phase
# + colab={"base_uri": "https://localhost:8080/"} id="H2zfTzOpfp0t" outputId="32d5d32e-f97b-4237-da5b-8a944be779a6"
model.fit(xtrain_loan.reshape((xtrain_loan.shape[0], xtrain_loan.shape[1], 1)), ytrain_loan, batch_size = 8, epochs=250, validation_data=(xval_loan.reshape((xval_loan.shape[0], xval_loan.shape[1], 1)), yval_loan))
# + colab={"base_uri": "https://localhost:8080/"} id="BTikDg5YghLb" outputId="99dc0281-c90a-4445-aedd-ce3fb909d0a8"
preds = model.predict(xval_loan.reshape((xval_loan.shape[0], xval_loan.shape[1], 1)))
preds
# + colab={"base_uri": "https://localhost:8080/"} id="21CkDPSPjHcZ" outputId="315a3bee-d7e4-44fd-8724-f24dd59664e3"
yval_loan
# + id="KNvkV5vL0ZKm"
xs = np.array(xtrain.tolist()).reshape((xtrain.shape[0], max_len+1,1))
ys = np.array(ytrain.tolist()).reshape((ytrain.shape[0], 6))
ys.shape
# + id="ixBWuQhmeUlp"
#model.compile(optimizer, loss)
model.fit(xs[:len(xs)], ys[:len(ys)], batch_size=32, epochs=20)
# + id="ORCrygF5-xbi"
#model.fit(train_ds, batch_size=32, epochs=20)
# + [markdown] id="a3a05373-6b29-44fe-8f6b-74cceebab71c"
# ## Eval/Inference time
# + id="bee12663-6697-455e-8cc8-57024ef76a2e" tags=[]
def predict(test:pd.DataFrame):
test, max_len = pad_history(df=test, max_len=41)
xtest = test.PaymentsHistory.values
xtest = np.array(xtest.tolist()).reshape((xtest.shape[0], 1, max_len))
merged_test2 = pd.merge(left=test, right=pd.concat(objs=[metadata]).fillna(value=0.), on="ID")
xtest = merged_test2.PaymentsHistory.values
deposits = merged_test2.Deposit.values
total_contract_value = merged_test2.TotalContractValue.values
w = []
for i in range (0,len(merged_test2)):
xtest[i].append(total_contract_value[i]-deposits[i]-np.sum(xtest[i]))
w.append(xtest[i][-1])
#print(111, w, xtest[i][-1])
print(w)
xts = np.array(xtest.tolist()).reshape((xtest.shape[0], max_len+1, 1))
print(xts)
test_ds = tf.data.Dataset.from_tensor_slices(xts)
preds = model.predict(xts)
print(preds)
assert preds.shape[0] == test.shape[0], f"Shape mismatch, {preds.shape[0]} predictions found while test set has {test.shape[0]} rows"
return preds.flatten(),w
# + id="h50w4e7lrjQn"
merged_test2 = pd.merge(left=test, right=pd.concat(objs=[metadata]).fillna(value=0.), on="ID")
merged_test2
xtest = merged_test2.PaymentsHistory.values
deposits = merged_test2.Deposit.values
total_contract_value = merged_test2.TotalContractValue.values
i=0
#total_contract_value[i]-deposits[i]-np.sum(xtest[i])
xtest = xtest.reshape((xtest.shape[0])).tolist()
#np.sum(xtest)
type(xtest[0])
# + id="wOvg0IBUpQqH"
preds,w = predict(test=test)
# + id="vGxXm8Zkwu__"
w[:10]
t = 0
for k in range(0,int(len(preds)/6)):
sum = 0
for i in range(0,6):
sum = sum+preds[t]
t = t+1
for i in range(0,6):
preds[t-6+i] = min(max(100,(w[k]*preds[t-6+i])/sum),1200)
# + id="006d1f05-08b1-4bf9-a3e7-09ef518a3f9a" tags=[]
preds = predict(test=test)
# + [markdown] id="a7ad77e2-6494-46f8-b147-4398b862c1af"
# ## Create submission
# + id="b8853a8f-53c3-4d15-ab77-9fef4ddf97aa" tags=[]
ss["Target"] = preds
# + id="a441599d-f0be-4108-90e8-810ad58077fc"
ss.to_csv('baseline_sub.csv', index=False)
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Producing CNTK and TensorFlow models for image classification
#
# In this notebook, we illustrate how one can produce residual networks (ResNets) to classify aerial images based on land use type (developed, forested, cultivated, etc.). We apply transfer learning with Microsoft Cognitive Toolkit (CNTK) and TensorFlow (TF) to adapt pretrained models for our classification use case. The CNTK and TF sections of this notebook can be completed in either order, or even concurrently.
#
# This notebook is part of the [Embarrassingly Parallel Image Classification](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification) git repository. It assumes that a dataset and Azure N-series GPU VM have already been created for model training as described in the previous [Image Set Preparation](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification/blob/master/image_set_preparation.ipynb) notebook. Note that an abbreviated instruction set is mentioned in that notebook for users who would like to employ our sample image set rather than generating their own.
#
# For instructions on applying the trained models to large image sets using Spark, see the [Scoring on Spark](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification/blob/master/scoring_on_spark.ipynb) notebook. It is not necessary to complete this notebook before proceeding to [Scoring on Spark](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification/blob/master/scoring_on_spark.ipynb), as we have provided sample retrained DNNs for your use.
# ## Outline
# - [Prepare the VM and training data](#input)
# - [Clone or download this repository](#repo)
# - [Retrain an AlexNet with Microsoft Cognitive Toolkit (CNTK)](#cntk)
# - [Download the pretrained model](#alexnet)
# - [Update and run the training script](#cntkrun)
# - [Retrain a pretrained ResNet with TensorFlow](#tensorflow)
# - [Download a pretrained model](#tfmodel)
# - [Run the training script](#tfrun)
# - [Next Steps](#nextsteps)
# <a name="input"></a>
# ## Prepare the VM and training data
#
# If you have not done so already, please complete the instructions in the [Image Set Preparation](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification/blob/master/image_set_preparation.ipynb) notebook to prepare an Azure Data Science VM with the Deep Learning Toolkit and the necessary training data for this tutorial. Note that if you will use our provided training and validation images, it is sufficient to complete the "Prepare an Azure Data Science Virtual Machine for image extraction" and "Dataset preparation for deep learning" sections.
# <a name="#repo"></a>
# ## Clone or download this repository
#
# This repository ([Embarrassingly Parallel Image Classification](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification)) contains Python scripts that will be referenced by the code cells below. Clone or download/decompress the repository's contents to a directory on your Azure GPU VM and make note of the path.
# <a name="cntk"></a>
# ## Retrain an AlexNet with Microsoft Cognitive Toolkit (CNTK)
#
# At the time of this writing, the Windows 2016 DSVM comes pre-installed with CNTK 2.0. The CNTK code in this repo is therefore designed for version 2.0, and has not been tested with more recent CNTK versions. You can use the code cell below to check when CNTK version has been installed on your DSVM:
import cntk
print(cntk.__version__)
# <a name="alexnet"></a>
# ### Download the pretrained model
# You will need to download [the pretrained AlexNet model](https://mawahstorage.blob.core.windows.net/aerialimageclassification/models/AlexNet_cntk2beta15.model) and save the file to a new directory on your temporary storage drive, `D:\models`.
# <a name="cntkrun"></a>
# ### Update and run the training script
# The `retrain.py` script in the `cntk` subfolder of this repo can be used to retrain an AlexNet for aerial image classification. The script is adapted from the [Object Detection using Fast-R-CNN](https://github.com/Microsoft/CNTK/tree/master/Examples/Image/Detection/FastRCNN) example in the [CNTK](https://github.com/Microsoft/CNTK) repository. This code has been written for single-GPU training: if using a multi-GPU VM, see the [CNTK ResNet/CIFAR10 image classification](https://github.com/Microsoft/CNTK/tree/master/Examples/Image/Classification/ResNet/Python) use case for example code illustrating distributed training.
#
# Run the `retrain.py` script in the `cntk` subfolder from an Anaconda prompt as follows:
# + active=""
# activate py35
# python <path_to_script>\retrain.py --input_map_file D:\balanced_training_set\map.txt --output_dir D:\retrained_models --pretrained_model_file D:\models\AlexNet_cntk2beta15.model
# -
# The training script will load the pretrained AlexNet model, removing the final layer and freezing the weights in all retained layer. A transfer learning model is then created by subtracting an approximate mean value from the RGB channels of the input image, applying the frozen retained layers of AlexNet, and finally applying a dense, trainable last layer. The transfer learning model's output label is given by the index of the maximally-activated node in the final layer, which can be converted to a descriptive string using the mapping in `D:\balanced_training_set\labels.txt` (created previously by the image set preparation notebook).
#
# The training script applies several transforms when each minibatch's images are loaded, including a random crop/rescaling and random colorization. These transforms generate variety in the input set, limiting the degree of overfitting.
#
# For details of the model evaluation process, please see the scoring notebook in the [Embarrassingly Parallel Image Classification](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification) repository.
# <a name="tensorflow"></a>
# ## Retrain a pretrained ResNet with TensorFlow
#
# We made use of the [`tf-slim` API](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/slim) for TensorFlow, which provides pre-trained ResNet models and helpful scripts for retraining and scoring. During training set preparation, we created the [TFRecords](https://www.tensorflow.org/how_tos/reading_data/#file_formats) that the training script will use as input. For more details on the training data, please see the image preparation notebook in the [Embarrassingly Parallel Image Classification](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification) repository.
#
# Our retraining script, `retrain.py` in the `tf` folder of [this repository](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification), is a modified version of `train_image_classifier.py` from the [TensorFlow models repo's slim subdirectory](https://github.com/tensorflow/models/tree/master/slim).
# <a name="tfmodel"></a>
# ### Download a pretrained model
#
# We obtained a 50-layer ResNet pretrained on ImageNet from a link in the [TensorFlow models repo's slim subdirectory](https://github.com/tensorflow/models/tree/master/slim). The pretrained model can be obtained and unpacked with the code snippet below. Note that if you have not already done so, you will first need to [download or clone this repo](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification), then update the variable name `repo_dir` below to point to the repo's root folder.
# +
import urllib.request
import tarfile
import os
import dirfuncs
dropbox_dir = dirfuncs.guess_dropbox_dir()
# Change this directory to point to the location where you downloaded or cloned this git repo
repo_dir = 'C:\\Users\\ME\\Projects\\spatial_ml\\Embarrassingly-Parallel-Image-Classification'
os.makedirs(os.path.join(repo_dir, 'tf'), exist_ok=True)
urllib.request.urlretrieve('http://download.tensorflow.org/models/resnet_v1_50_2016_08_28.tar.gz',
os.path.join(repo_dir, 'tf', 'resnet_v1_50_2016_08_28.tar.gz'))
with tarfile.open(os.path.join(repo_dir, 'tf', 'resnet_v1_50_2016_08_28.tar.gz'), 'r:gz') as f:
f.extractall(path=os.path.join(repo_dir, 'tf'))
os.remove(os.path.join(repo_dir, 'tf', 'resnet_v1_50_2016_08_28.tar.gz'))
# -
# <a name="tfrun"></a>
# ### Run the training script
#
# We recommend that you run the training script from an Anaconda prompt. The code cell below will help you generate the appropriate command based on your file locations.
# +
# path where retrained model and logs will be saved during training
train_dir = os.path.join(repo_dir, 'tf', 'models')
if not os.path.exists(train_dir):
os.makedirs(train_dir)
# location of the unpacked pretrained model
checkpoint_path = os.path.join(repo_dir, 'tf', 'resnet_v1_50.ckpt')
# Location of the TFRecords and other files generated during image set preparation
training_image_dir = 'C:\\Users\\ME\\Dropbox\\HCSproject\\data\\PoC\\app_kalbar_cntk\\tiles\\balanced_training_set'
command = '''activate py35
python {0} --train_dir={1} --dataset_name=aerial --dataset_split_name=train --dataset_dir={2} --checkpoint_path={3}
'''.format(os.path.join(repo_dir, 'tf', 'retrain.py'),
train_dir,
training_image_dir,
checkpoint_path)
print(command)
# -
# The training script will load the pretrained ResNet model, freezing the weights for all but the final logits layer. The transfer learning model's output label is taken to be the index of the maximally-activated node in the final layer.
#
# The training script applies several transforms when each minibatch's images are loaded, including subtracting an approximation of the mean values for each channel (red, blue, and green) and randomly cropping/colorizing the image. These transforms generate variety in the input set, limiting the degree of overfitting.
#
# For details of the model evaluation process, please see the scoring notebook in the [Embarrassingly Parallel Image Classification](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification) repository.
# <a name="nextsteps"></a>
# ## Next Steps
#
# Each training step above should take under one hour when performed alone. Please note that the apparent performance of your retrained models on the training set may be significantly better than the models' performance on the independent validation set of images. (We saw ~6% and ~20% classification error on the training set and validation sets, respectively.)
#
# For details on evaluating the trained models, please see the [Scoring on Spark notebook](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification/blob/master/scoring_on_spark.ipynb) in the [Embarrassingly Parallel Image Classification](https://github.com/Azure/Embarrassingly-Parallel-Image-Classification) repository. Note that you can proceed using our provided sample retrained DNNs if you prefer not to wait for model training to complete.
| model_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Конструктор главного README-файла репозитория
#
# Цель – в дополнение к таблице собрать оставшуюся часть страницы, вставив содержимое остальных страниц. Для этого необходимо:
# 1. Считать содержимое таблицы
# 2. Для страниц, содержащих ссылки, скопировать и объединить содержимое README-файлов
# 3. Вывести суммарное содержимое в порядке, соответствующем таблице
# +
import re
with open('README.md', mode='r') as file:
lines = file.readlines()
# cоставляем дерево ссылок
tree = dict()
for line in lines:
path_list = re.findall(r'(?<=\()[\w/]*\.md(?=\))', line)
if path_list:
section = re.findall(r'^[\w\s]*(?= \|)', line)[0]
tree[section] = path_list
# выводим содержимое
for key in tree:
print(f'# {key}')
for path in tree[key]:
with open(path) as f:
print(f.read())
# -
| main_readme_constructor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:crime_sim]
# language: python
# name: conda-env-crime_sim-py
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Sparrow0hawk/crime_sim_toolkit/blob/master/examples/crime_sim_poisson_agg_example.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="XgFA3CtTbiwQ"
# # Example notebook for using the crime_sim_toolkit
#
# Follow these steps to load your own data into the crime_sim_toolkit for generating new crime data using the Poisson Sampler function.
#
# ## Notes on running
# For this to run successfully follow these steps:
#
#
# 1. Git clone the repo into the root dir
# 2. Run pip install within the downloaded repo
# 3. Reset the local runtime to allow for pip packages installed to work
# 4. Then run python setup.py install on downloaded repo
#
# The package should now be useable within the notebook.
#
#
# + colab_type="code" id="Aha_nz-SRX56" outputId="4b398c9c-785d-49b1-db10-e4c6f978ff1c" colab={"base_uri": "https://localhost:8080/", "height": 156}
# %cd ~/
# ! git clone --single-branch --branch release-1.5 https://github.com/Sparrow0hawk/crime_sim_toolkit.git
# + colab_type="code" id="J6ezywTXRnbX" outputId="b90f54fe-7631-4956-d525-907bcf01e1b8" colab={"base_uri": "https://localhost:8080/", "height": 451}
# %cd ~/crime_sim_toolkit
# !pip install -q -r requirements.txt
# + [markdown] colab_type="text" id="3Nf4DMs2YRkE"
# # RESTART RUNTIME
# + colab_type="code" id="bD-ZF-Hya4Lq" outputId="44c3fa80-4a3c-4b01-86ca-ae1eaabfe497" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %cd ~/crime_sim_toolkit
# !python setup.py -q install
# + [markdown] colab_type="text" id="AFxm7U4JmFNy"
# ## Loading data
#
# Using data from a [custom dowload](https://data.police.uk/data/) from Police Data UK we can load that data, covert it to counts by crime_type by LSOA by psuedo Week/Day by passing the directory to the Poisson_sim function.
#
# Sample data used here is included within the [github repository](https://github.com/Sparrow0hawk/crime_sim_toolkit/tree/master/sample_data) and covers January-June from 2017-2019.
#
# Here the aggregate function is set to true. Aggregating data up to the police force area.
# + colab_type="code" id="Ie_tNRIYSFTe" outputId="8d63b903-8d74-46a3-8743-a27de7bac45e" colab={"base_uri": "https://localhost:8080/", "height": 139}
import crime_sim_toolkit.poisson_sim as Poisson_sim
sim_week = Poisson_sim.Poisson_sim(
# because of the data passed these are the LA we want
LA_names=['Kirklees','Calderdale','Leeds','Bradford','Wakefield'],
directory='/root/crime_sim_toolkit/sample_data',
# this can either be Day or Week
timeframe='Day',
aggregate=True)
# + colab_type="code" id="19KyR5ZObINn" outputId="e5e4956d-1520-46b9-c8d8-8f896ef56ad8" colab={"base_uri": "https://localhost:8080/", "height": 206}
sim_week.data.head()
# + [markdown] colab_type="text" id="KDBak64uMxC1"
# ## Sampling new data
#
# Based on the loaded data we can now create an out of bag sample and use the poisson sampler to generate crime reports based on historic reports to be compared to the out of bag sample.
# + colab_type="code" id="Q4b95m66HZoM" outputId="a997d360-0e5c-4bf8-d319-fcd914727ab4" colab={"base_uri": "https://localhost:8080/", "height": 69}
test_data = sim_week.out_of_bag_prep(sim_week.data)
train_data = sim_week.oob_train_split(sim_week.data, test_data)
# + colab_type="code" id="TfFrb8MghxuP" outputId="6e2bfa7b-1082-460e-af30-440e4ccc821a" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# %%time
datat = sim_week.SimplePoission(train_data, test_data, method='simple', mv_window=0)
# + [markdown] colab_type="text" id="fA-kCqogMkZO"
# ## Model comparison
#
# We can compare our model to the hold out test set using the error_Reporting function.
# + colab_type="code" id="tWtX8gWA_LAZ" outputId="b041d93f-23dc-4664-b8eb-197d304a654a" colab={"base_uri": "https://localhost:8080/", "height": 453}
diff_table = sim_week.error_Reporting(test_data=test_data, simulated_data=datat)
# + colab_type="code" id="_wFWjYm__hKW" outputId="7ad09103-0144-4871-cb44-f2c346241d4c" colab={"base_uri": "https://localhost:8080/", "height": 238}
diff_table.head()
# + [markdown] colab_type="text" id="kQ_eSFdkL-XM"
# ## Using utils to get counts to reports
#
# You can use utility functions within the toolkit to convert generated count data back into individual reports. And using the populate_offence function include a more specific crime description (randomly allocated from regional data).
# + id="9oXeekh0_rez" ype="code" colab_type="code" colab={}
from crime_sim_toolkit import utils
# + colab_type="code" id="EzBMRSl6_YdF" outputId="19571944-bfcc-46fc-c4f6-ebf9c76cb041" colab={"base_uri": "https://localhost:8080/", "height": 69}
# %%time
# generate additional crime descriptions
synth_counts = utils.counts_to_reports(datat)
# + colab_type="code" id="U4BH3w7g_1GR" outputId="21f6c3f1-438f-426a-956e-71bdc0947145" colab={"base_uri": "https://localhost:8080/", "height": 52}
# %%time
# generate additional crime descriptions
synth_counts_des = utils.populate_offence(synth_counts)
# + colab_type="code" id="dH8VTkNu_obm" outputId="9c849a8a-29c6-46eb-9ee1-d4eeb5c72636" colab={"base_uri": "https://localhost:8080/", "height": 206}
synth_counts_des.head()
# + id="rIvn_7bboRtz" colab_type="code" colab={}
# pull out specifically required columns for ABM
synth_counts_des = synth_counts_des[['UID','datetime','Crime_description','Crime_type','LSOA_code','Police_force']]
# + [markdown] colab_type="text" id="UX8CL9-RK5kv"
# ## Adding in random hours
#
# The agent based models implements hours for shift allocation. We'll perform a simple frame apply function to add hour.
# + colab_type="code" id="RUZdeNEwK4_O" colab={}
import numpy as np
synth_counts_des['Hour'] = np.random.randint(0,24, synth_counts_des.shape[0])
# reorder columns
synth_counts_des = synth_counts_des[['UID','datetime','Hour','Crime_description','Crime_type','LSOA_code','Police_force']]
# + colab_type="code" id="nMkAFZ2OMx7g" outputId="d36f4922-cecb-4518-eae0-d366d01c1bc5" colab={"base_uri": "https://localhost:8080/", "height": 206}
synth_counts_des.head()
# + [markdown] colab_type="text" id="MfNVpgUKMbv4"
# ## Outputting from Colabs
#
# Data produced can now be exported as a .csv for use in the agent-based model.
# + colab_type="code" id="5vsuEZ0M_45L" colab={}
synth_counts_des.to_csv('synthetic_day_reports.csv')
# + colab_type="code" id="qZoC_LCBL7Gn" colab={}
from google.colab import files
files.download('synthetic_day_reports.csv')
| examples/crime_sim_poisson_agg_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Exercise set 2**
# ==================
#
#
# >The goal of this exercise is to perform **least squares regression**
# >and to see how we can evaluate our regression.
#
#
# **Exercise 2.1:**
#
# Multiple linear regression solves the equation $\mathbf{Y} = \mathbf{X}\mathbf{B}$ with
# $\mathbf{B} = (\mathbf{X}^\mathrm{T} \mathbf{X})^{-1} \mathbf{X}^\mathrm{T} \mathbf{Y}$
# where the
# dimensions of the data matrix $\mathbf{X}$ is $[N \times M]$.
# Will this solution work when (please explain why/why not):
#
# **(a)** $\det \left( \mathbf{X}^\mathrm{T} \mathbf{X} \right) = 0$?
#
# **(b)** $\det \left( \mathbf{X}^\mathrm{T} \mathbf{X} \right) > 0$?
#
# **(c)** $\det \left( \mathbf{X}^\mathrm{T} \mathbf{X} \right) \neq 0$?
#
# **(d)** The variables in $\mathbf{X}$ are correlated?
#
# **(e)** The columns in $\mathbf{X}$ are orthogonal?
#
# **(f)** The rank of $\mathbf{X}$ is $\frac{\min(N, M)}{2}$?
#
# **(g)** We have more variables than samples/objects (more columns than rows in $\mathbf{X}$)?
#
# **(h)** When we have more samples/objects than variables (more rows than columns in $\mathbf{X}$)?
#
# **Answer 2.1:** (double click here)
# **Exercise 2.2:**
#
# The projection matrix, $\mathbf{H}$, is given by,
#
#
# $\mathbf{H} = \mathbf{X} \left(\mathbf{X}^\mathrm{T} \mathbf{X}\right)^{-1} \mathbf{X}^\mathrm{T}.$
#
# Show that:
#
# **(a)** $\mathbf{H}$ is symmetric.
#
# **(b)** $\mathbf{H}^{k} = \mathbf{H}$ where the integer $k > 0$.
# **Answer 2.2:** (double click here)
# **Exercise 2.3:**
#
# In the regression problem $\mathbf{y} = \mathbf{X}\mathbf{b} + \mathbf{e}$
# we have the least-squares solution assuming that $\mathbf{X}^\mathrm{T} \mathbf{X}$ is
# non-singular. If you are given the information that $\mathbf{X}$ is symmetric
# and non-singular, is there another simpler formula for estimating the regression coefficients?
#
# **Answer 2.3:** (double click here)
# **Exercise 2.4:**
#
# Assume that we have recorded data as shown in Fig. 1
# We want a straight line linear model, but we can see that there is a curving in the data which suggest we need some higher order terms. The following model is suggested:
#
# $\hat{y} = b_0 + b_1 x + b_2 x^2 + b_3 x^3$
#
# Assume that the raw data are contained in the vectors $\mathbf{x}$ and $\mathbf{y}$.
# Show how we can estimate the regression vector $\hat{\mathbf{b}}$.
# 
#
# **Fig. 1:** Example data.
# **Answer 2.4:** (double click here)
# **Exercise 2.5:**
#
# The temperature (°C) is measured continuously over time at a high altitude
# in the atmosphere using a
# weather balloon. Every hour a measurement is made and sent to an on-board computer.
# The measurements are
# shown in Fig. 2 and contained in [the data file](Data/data_exercise_2.txt) (located at 'Data/data_exercise_2.txt').
#
#
# 
#
# **Fig. 2:** Measured temperature.
#
#
# **(a)** Create a Python script which performs polynomial
# fitting to the data using a first, second, third, fourth
# and fifth order polynomial model. **Hint:** Make use of `numpy`, `matplotlib` and `pandas`.
# +
# Your code here:
# -
# **(b)** Plot the fitted curves for the five models to the raw data.
# +
# Your code here
# -
# **(c)** Plot the residual curves for the five models and determine from a visual inspection which polynomial
# model order seem to be correct.
# +
# Your code here
# -
# **Answer 2.5c:** (double click here)
# **(d)** Obtain the sum of squared residuals for each polynomial. Plot this as a function
# of the degree of the polynomial and determine from visual inspection which polynomial
# model order seem to be correct. Compare this with your previous result.
# +
# Your code here
# -
# **Answer 2.5d:** (double click here)
| exercises_2020/02_Exercise_Set_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
from numpy import genfromtxt
import numpy as np
from scipy.interpolate import *
import matplotlib.pyplot as plt
import math
# +
bohb0 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid.csv', delimiter=',') #here is c=1
bohb1 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid1.csv', delimiter=',') #here is c=1
bohb2 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid2.csv', delimiter=',') #here is c=1
bohb3 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid3.csv', delimiter=',') #here is c=1
bohb4 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid4.csv', delimiter=',') #here is c=1
bohb5 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid5.csv', delimiter=',') #here is c=1
bohb6 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid6.csv', delimiter=',') #here is c=1
bohb7 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid7.csv', delimiter=',') #here is c=1
bohb8 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid8.csv', delimiter=',') #here is c=1
bohb9 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/BOHB_FMNISTvalid9.csv', delimiter=',') #here is c=1
b1 = fonc(bohb1)
b2 = fonc(bohb2)
b3 = fonc(bohb3)
b4 = fonc(bohb4)
b5 = fonc(bohb5)
b6 = fonc(bohb6)
b7 = fonc(bohb7)
b8 = fonc(bohb8)
b9 = fonc(bohb9)
b10 = fonc(bohb0)
_b1= maxof(b1)[:41,:]
_b2=maxof(b2)[:41,:]
_b3=maxof(b3)[:41,:]
_b4=maxof(b4)[:41,:]
_b5=maxof(b5)[:41,:]
_b6= maxof(b6)[:41,:]
_b7=maxof(b7)[:41,:]
_b8=maxof(b8)[:41,:]
_b9=maxof(b9)[:41,:]
_b10=maxof(b10)[:41,:]
# -
plt.plot((np.concatenate((b1,b2,b3,b4,b5),axis=1)[:,0::2]))
# +
pb20 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test0.csv', delimiter=',') #here is c=1
pb21 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test1.csv', delimiter=',') #here is c=1
pb22 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test2.csv', delimiter=',') #here is c=1
pb23 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test3.csv', delimiter=',') #here is c=1
pb24 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test4.csv', delimiter=',') #here is c=1
pb25 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test5.csv', delimiter=',') #here is c=1
pb26 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test6.csv', delimiter=',') #here is c=1
pb27 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test7.csv', delimiter=',') #here is c=1
pb28 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test8.csv', delimiter=',') #here is c=1
pb29 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PB2_FMNIST_test9.csv', delimiter=',') #here is c=1
pb210 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/trash.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pb1 = fonc(pb21)
pb2 = fonc(pb22)
pb3 = fonc(pb23)
pb4 = fonc(pb24)
pb5 = fonc(pb25)
pb6 = fonc(pb26)
pb7 = fonc(pb27)
pb8 = fonc(pb28)
pb9 = fonc(pb29)
pb10 = fonc(pb210)
pb0 = fonc(pb20)
pb1= maxof(pb1)[:80,:]
pb2=maxof(pb2)[:80,:]
pb3=maxof(pb3)[:80,:]
pb4=maxof(pb4)[:80,:]
pb5=maxof(pb5)[:80,:]
pb6= maxof(pb6)[:80,:]
pb7=maxof(pb7)[:80,:]
pb8=maxof(pb8)[:80,:]
pb9=maxof(pb9)[:80,:]
pb10=maxof(pb10)[:80,:]
pb0=maxof(pb0)[:80,:]
# -
#plt.plot((np.concatenate((pb10,pb1),axis=1)[:,0::2]))
plt.plot(pb10[:,::2])
# +
pbt0 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST0.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt1 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST1.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt2 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST10.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt3 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST3.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt4 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST4.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt5 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST5.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt6 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST6.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt7 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST7.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt8 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST8.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt9 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/PBT_FMNIST9.csv', delimiter=',',usecols=np.arange(0,23)) #here is c=1
pbt1 = fonc(pbt1)
pbt2 = fonc(pbt2)
pbt3 = fonc(pbt3)
pbt4 = fonc(pbt4)
pbt5 = fonc(pbt5)
pbt6 = fonc(pbt6)
pbt7 = fonc(pbt7)
pbt8 = fonc(pbt8)
pbt9 = fonc(pbt9)
pbt10 = fonc(pbt0)
pbt1= maxof(pbt1)[:80,:]
pbt2=maxof(pbt2)[:80,:]
pbt3=maxof(pbt3)[:80,:]
pbt4=maxof(pbt4)[:80,:]
pbt5=maxof(pbt5)[:80,:]
pbt6= maxof(pbt6)[:80,:]
pbt7=maxof(pbt7)[:80,:]
pbt8=maxof(pbt8)[:80,:]
pbt9=maxof(pbt9)[:80,:]
pbt10=maxof(pbt10)[:80,:]
# -
plt.plot((np.concatenate((pbt1,pbt2,pbt3,pbt4,pbt5),axis=1)[:,0::2]))
# +
hyper1 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[1:81,]
hyper2 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[81:161,]
hyper3 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[161:241,]
hyper4 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[241:321,]
hyper5 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[321:401,]
hyper6 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[401:481,]
hyper7 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[481:561,]
hyper8 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[561:641,]
hyper9 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[641:721,]
hyper10 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/hyperoptFMNIST.csv', delimiter=',')[721:801,]
hyper1 = maxof(hyper1[:,-3:-1])
hyper2 = maxof(hyper2[:,-3:-1])
hyper3 = maxof(hyper3[:,-3:-1])
hyper4= maxof(hyper4[:,-3:-1])
hyper5 = maxof(hyper5[:,-3:-1])
hyper6 = maxof(hyper6[:,-3:-1])
hyper7 = maxof(hyper7[:,-3:-1])
hyper8 = maxof(hyper8[:,-3:-1])
hyper9 = maxof(hyper9[:,-3:-1])
hyper10 = maxof(hyper10[:,-3:-1])
plt.plot((np.concatenate((hyper7,),axis=1)[:,0::2]))
# +
fsnvc_nt = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/FSNV_FMNIST_notimeTPE.csv', delimiter=',')
fsnvc_nt1 = fsnvc_nt[1:81,]
fsnvc_nt2 = fsnvc_nt[81:161,]
fsnvc_nt3 = fsnvc_nt[161:241,]
fsnvc_nt4 = fsnvc_nt[241:321,]
fsnvc_nt5 = fsnvc_nt[321:401,]
fsnvc_nt6 = fsnvc_nt[401:481,]
fsnvc_nt7 = fsnvc_nt[481:561,]
fsnvc_nt8 = fsnvc_nt[561:641,]
fsnvc_nt9 = fsnvc_nt[641:721,]
fsnvc_nt10 = fsnvc_nt[721:801,]
iteration_corector(fsnvc_nt1[:,-5],4)
iteration_corector(fsnvc_nt2[:,-5],4)
iteration_corector(fsnvc_nt3[:,-5],4)
iteration_corector(fsnvc_nt4[:,-5],4)
iteration_corector(fsnvc_nt5[:,-5],4)
iteration_corector(fsnvc_nt6[:,-5],4)
iteration_corector(fsnvc_nt7[:,-5],4)
iteration_corector(fsnvc_nt8[:,-5],4)
iteration_corector(fsnvc_nt9[:,-5],4)
iteration_corector(fsnvc_nt10[:,-5],4)
#loss_test_iteration
fsnvc_ntc1 = fsnvc_nt1[np.argsort(fsnvc_nt1[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc2 = fsnvc_nt2[np.argsort(fsnvc_nt2[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc3 = fsnvc_nt3[np.argsort(fsnvc_nt3[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc4 = fsnvc_nt4[np.argsort(fsnvc_nt4[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc5 = fsnvc_nt5[np.argsort(fsnvc_nt5[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc6 = fsnvc_nt6[np.argsort(fsnvc_nt6[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc7 = fsnvc_nt7[np.argsort(fsnvc_nt7[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc8 = fsnvc_nt8[np.argsort(fsnvc_nt8[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc9 = fsnvc_nt9[np.argsort(fsnvc_nt9[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc10 = fsnvc_nt10[np.argsort(fsnvc_nt10[:,-5], axis = -1,kind='stable')][:,-2:]
maxof(fsnvc_ntc1[:,::])
maxof(fsnvc_ntc2[:,::])
maxof(fsnvc_ntc3[:,::])
maxof(fsnvc_ntc4[:,::])
maxof(fsnvc_ntc5[:,::])
maxof(fsnvc_ntc6[:,::])
maxof(fsnvc_ntc7[:,::])
maxof(fsnvc_ntc8[:,::])
maxof(fsnvc_ntc9[:,::])
maxof(fsnvc_ntc10[:,::])
plt.plot((np.concatenate((fsnvc_ntc9,),axis=1)[:,0::2]))
# +
fsnvc_nt = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/FSNV_FMNIST_GP.csv', delimiter=',')
fsnvc_nt1 = fsnvc_nt[1:81,]
fsnvc_nt2 = fsnvc_nt[81:161,]
fsnvc_nt3 = fsnvc_nt[161:241,]
fsnvc_nt4 = fsnvc_nt[241:321,]
fsnvc_nt5 = fsnvc_nt[321:401,]
fsnvc_nt6 = fsnvc_nt[401:481,]
fsnvc_nt7 = fsnvc_nt[481:561,]
fsnvc_nt8 = fsnvc_nt[561:641,]
fsnvc_nt9 = fsnvc_nt[641:721,]
fsnvc_nt10 = fsnvc_nt[721:801,]
iteration_corector(fsnvc_nt1[:,-5],4)
iteration_corector(fsnvc_nt2[:,-5],4)
iteration_corector(fsnvc_nt3[:,-5],4)
iteration_corector(fsnvc_nt4[:,-5],4)
iteration_corector(fsnvc_nt5[:,-5],4)
iteration_corector(fsnvc_nt6[:,-5],4)
iteration_corector(fsnvc_nt7[:,-5],4)
iteration_corector(fsnvc_nt8[:,-5],4)
iteration_corector(fsnvc_nt9[:,-5],4)
iteration_corector(fsnvc_nt10[:,-5],4)
#loss_test_iteration
fsnvc_ntc1 = fsnvc_nt1[np.argsort(fsnvc_nt1[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc2 = fsnvc_nt2[np.argsort(fsnvc_nt2[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc3 = fsnvc_nt3[np.argsort(fsnvc_nt3[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc4 = fsnvc_nt4[np.argsort(fsnvc_nt4[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc5 = fsnvc_nt5[np.argsort(fsnvc_nt5[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc6 = fsnvc_nt6[np.argsort(fsnvc_nt6[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc7 = fsnvc_nt7[np.argsort(fsnvc_nt7[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc8 = fsnvc_nt8[np.argsort(fsnvc_nt8[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc9 = fsnvc_nt9[np.argsort(fsnvc_nt9[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_ntc10 = fsnvc_nt10[np.argsort(fsnvc_nt10[:,-5], axis = -1,kind='stable')][:,-2:]
maxof(fsnvc_ntc1[:,::])
maxof(fsnvc_ntc2[:,::])
maxof(fsnvc_ntc3[:,::])
maxof(fsnvc_ntc4[:,::])
maxof(fsnvc_ntc5[:,::])
maxof(fsnvc_ntc6[:,::])
maxof(fsnvc_ntc7[:,::])
maxof(fsnvc_ntc8[:,::])
maxof(fsnvc_ntc9[:,::])
maxof(fsnvc_ntc10[:,::])
plt.plot((np.concatenate((fsnvc_ntc9,),axis=1)[:,0::2]))
# +
fsnvc_nt_4 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/FSNV_FMNIST_notimeTPEcis1-4.csv', delimiter=',')
fsnvc_nt_41 = fsnvc_nt_4[1:81,]
fsnvc_nt_42 = fsnvc_nt_4[81:161,]
fsnvc_nt_43 = fsnvc_nt_4[161:241,]
fsnvc_nt_44 = fsnvc_nt_4[241:321,]
fsnvc_nt_45 = fsnvc_nt_4[321:401,]
fsnvc_nt_46 = fsnvc_nt_4[401:481,]
fsnvc_nt_47 = fsnvc_nt_4[481:561,]
fsnvc_nt_48 = fsnvc_nt_4[561:641,]
fsnvc_nt_49 = fsnvc_nt_4[641:721,]
fsnvc_nt_410 = fsnvc_nt_4[721:801,]
iteration_corector(fsnvc_nt_41[:,-5],4)
iteration_corector(fsnvc_nt_42[:,-5],4)
iteration_corector(fsnvc_nt_43[:,-5],4)
iteration_corector(fsnvc_nt_44[:,-5],4)
iteration_corector(fsnvc_nt_45[:,-5],4)
iteration_corector(fsnvc_nt_46[:,-5],4)
iteration_corector(fsnvc_nt_47[:,-5],4)
iteration_corector(fsnvc_nt_48[:,-5],4)
iteration_corector(fsnvc_nt_49[:,-5],4)
iteration_corector(fsnvc_nt_410[:,-5],4)
#loss_test_iteration
fsnvc_nt_4c1 = fsnvc_nt_41[np.argsort(fsnvc_nt_41[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_nt_4c2 = fsnvc_nt_42[np.argsort(fsnvc_nt_42[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_nt_4c3 = fsnvc_nt_43[np.argsort(fsnvc_nt_43[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_nt_4c4 = fsnvc_nt_44[np.argsort(fsnvc_nt_44[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_nt_4c5 = fsnvc_nt_45[np.argsort(fsnvc_nt_45[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_nt_4c6 = fsnvc_nt_46[np.argsort(fsnvc_nt_46[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_nt_4c7 = fsnvc_nt_47[np.argsort(fsnvc_nt_47[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_nt_4c8 = fsnvc_nt_48[np.argsort(fsnvc_nt_48[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_nt_4c9 = fsnvc_nt_49[np.argsort(fsnvc_nt_49[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_nt_4c10 = fsnvc_nt_410[np.argsort(fsnvc_nt_410[:,-5], axis = -1,kind='stable')][:,-2:]
maxof(fsnvc_nt_4c1[:,::])
maxof(fsnvc_nt_4c2[:,::])
maxof(fsnvc_nt_4c3[:,::])
maxof(fsnvc_nt_4c4[:,::])
maxof(fsnvc_nt_4c5[:,::])
maxof(fsnvc_nt_4c6[:,::])
maxof(fsnvc_nt_4c7[:,::])
maxof(fsnvc_nt_4c8[:,::])
maxof(fsnvc_nt_4c9[:,::])
maxof(fsnvc_nt_4c10[:,::])
[a4,b4,c4,d4] = getall(np.concatenate((fsnvc_nt_4c1,fsnvc_nt_4c2,fsnvc_nt_4c3,fsnvc_nt_4c4,fsnvc_nt_4c5,
fsnvc_nt_4c7,fsnvc_nt_4c8,fsnvc_nt_4c9,fsnvc_nt_4c10),axis=1))
# -
# +
fsnvc_t_4 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/FSNV_FMNIST_timeTPEcis1-4v2.csv', delimiter=',')
fsnvc_t_4_v2 = genfromtxt('/home/antoine/FSN/data_brut/FMNIST/FSNV_FMNIST_timeTPEcis1-4v1.csv', delimiter=',')
fsnvc_t_41 = fsnvc_t_4[1:81,]
fsnvc_t_42 = fsnvc_t_4[81:161,]
fsnvc_t_43 = fsnvc_t_4[161:241,]
fsnvc_t_44 = fsnvc_t_4[241:321,]
fsnvc_t_45 = fsnvc_t_4_v2[1:81,]
fsnvc_t_46 = fsnvc_t_4_v2[81:161,]
fsnvc_t_47 = fsnvc_t_4_v2[161:241,]
fsnvc_t_48 = fsnvc_t_4_v2[241:321,]
fsnvc_t_49 = fsnvc_t_4_v2[321:401,]
fsnvc_t_410 = fsnvc_t_4_v2[401:481,]
iteration_corector(fsnvc_t_41[:,-5],4)
iteration_corector(fsnvc_t_42[:,-5],4)
iteration_corector(fsnvc_t_43[:,-5],4)
iteration_corector(fsnvc_t_44[:,-5],4)
iteration_corector(fsnvc_t_45[:,-5],4)
iteration_corector(fsnvc_t_46[:,-5],4)
iteration_corector(fsnvc_t_47[:,-5],4)
iteration_corector(fsnvc_t_48[:,-5],4)
iteration_corector(fsnvc_t_49[:,-5],4)
iteration_corector(fsnvc_t_410[:,-5],4)
#loss_test_iteration
fsnvc_t_4c1 = fsnvc_t_41[np.argsort(fsnvc_t_41[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_t_4c2 = fsnvc_t_42[np.argsort(fsnvc_t_42[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_t_4c3 = fsnvc_t_43[np.argsort(fsnvc_t_43[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_t_4c4 = fsnvc_t_44[np.argsort(fsnvc_t_44[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_t_4c5 = fsnvc_t_45[np.argsort(fsnvc_t_45[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_t_4c6 = fsnvc_t_46[np.argsort(fsnvc_t_46[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_t_4c7 = fsnvc_t_47[np.argsort(fsnvc_t_47[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_t_4c8 = fsnvc_t_48[np.argsort(fsnvc_t_48[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_t_4c9 = fsnvc_t_49[np.argsort(fsnvc_t_49[:,-5], axis = -1,kind='stable')][:,-2:]
fsnvc_t_4c10 = fsnvc_t_410[np.argsort(fsnvc_t_410[:,-5], axis = -1,kind='stable')][:,-2:]
maxof(fsnvc_t_4c1[:,::])
maxof(fsnvc_t_4c2[:,::])
maxof(fsnvc_t_4c3[:,::])
maxof(fsnvc_t_4c4[:,::])
maxof(fsnvc_t_4c5[:,::])
maxof(fsnvc_t_4c6[:,::])
maxof(fsnvc_t_4c7[:,::])
maxof(fsnvc_t_4c8[:,::])
maxof(fsnvc_t_4c9[:,::])
maxof(fsnvc_t_4c10[:,::])
plt.plot((np.concatenate((fsnvc_t_4c10,),axis=1)[:,0::2]))
# -
def iteration_corector(liste, num_config):
for i in range(len(liste[:])):
liste[i] = math.floor(i/num_config)
def getall(a):
f = [b for b in a[:,1::2].mean(1)]
g = [b for b in a[:,0::2].mean(1)]
return (f, a[:,1::2].std(1)/2, g,
a[:,0::2].std(1)/2)
def maxof(a):
ma = 0;
ta = 0;
for i in range(81):
if(a.shape[0]<=i):
a=np.concatenate((a,(np.array([np.array([ma,ta])]))))
print(a.shape)
else:
if(ma<a[i,0]):
ta = a[i,1]
ma = max(ma,a[i,0])
a[i,0] = ma
a[i,1] = ta
return a[:80]
# +
[i,j,k,l] = getall(np.concatenate((pb1,pb2,pb3,pb4,pb5,pb6,pb7,pb8,pb9,pb10,pb0),axis=1))
[it,jt,kt,lt] = getall(np.concatenate((pbt1,pbt2,pbt3,pbt4,pbt5,pbt6,pbt7,pbt8,pbt9,pbt10),axis=1))
[m,n,o,p] = getall(np.concatenate((_b1,_b2,_b3,_b4,_b5,_b6,_b7,_b8,_b9,_b10),axis=1))
[e,f,g,h] = getall(np.concatenate((hyper1,hyper2,hyper3,hyper4,hyper5,hyper6,hyper7,hyper8,hyper9,hyper10
),axis=1))
[a1,b1,c1,d1] = getall(np.concatenate((fsnvc_ntc1,fsnvc_ntc2,fsnvc_ntc3,fsnvc_ntc4,fsnvc_ntc5,fsnvc_ntc6,
fsnvc_ntc7,fsnvc_ntc8,fsnvc_ntc9,fsnvc_ntc10),axis=1))
[a4,b4,c4,d4] = getall(np.concatenate((fsnvc_nt_4c1,fsnvc_nt_4c2,fsnvc_nt_4c3,fsnvc_nt_4c4,fsnvc_nt_4c5,fsnvc_nt_4c6,
fsnvc_nt_4c7,fsnvc_nt_4c8,fsnvc_nt_4c9,fsnvc_nt_4c10),axis=1))
[a4t,b4t,c4t,d4t] = getall(np.concatenate((fsnvc_t_4c1,fsnvc_t_4c2,fsnvc_t_4c3,fsnvc_t_4c4,fsnvc_t_4c5,
fsnvc_t_4c6,fsnvc_t_4c7,fsnvc_t_4c8,fsnvc_t_4c9,fsnvc_t_4c10 ),axis=1))
# -
[agp,bgp,cgp,dgp] = getall(np.concatenate((fsnvc_ntc1,fsnvc_ntc2,fsnvc_ntc3,fsnvc_ntc4,fsnvc_ntc5,fsnvc_ntc6,
fsnvc_ntc7,fsnvc_ntc8,fsnvc_ntc9,fsnvc_ntc10),axis=1))
t= np.arange(0,80)*340/80
#np.savetxt('FSVN_FMNIST_c=4,timeGP.csv', np.array([t,agp,bgp,cgp,dgp]).T, delimiter=' ')
# +
axes = plt.gca()
axes.set_ylim(0.715,.88)
# +
t= np.arange(0,80)*335/80
np.savetxt('hyperopt_FMNIST.csv', np.array([t,e,f,g,h]).T, delimiter=' ')
t= np.arange(0,80)*400/80
np.savetxt('PBT_FMNIST.csv', np.array([t,it,jt,kt,lt]).T, delimiter=' ')
t= np.arange(0,41)*220/41
np.savetxt('BOHB_FMNIST.csv', np.array([t,m,n,o,p]).T, delimiter=' ')
t= np.arange(0,80)*405/80
np.savetxt('PB2_FMNIST.csv', np.array([t,i,j,k,l]).T, delimiter=' ')
t= np.arange(0,80)*340/80
np.savetxt('FSVN_FMNIST_c=1.csv', np.array([t,a1,b1,c1,d1]).T, delimiter=' ')
np.savetxt('FSVN_FMNIST_c=4.csv', np.array([t,a4,b4,c4,d4]).T, delimiter=' ')
np.savetxt('FSVN_FMNIST_c=4_time.csv', np.array([t,a4t,b4t,c4t,d4t]).T, delimiter=' ')
# +
cma = genfromtxt('/home/antoine/Téléchargements/FSVN_CMA_Fashion_MNIST.csv', delimiter=',')
temp = []
def maxof(a):
ma = 0;
ta = 0;
for i in range(121):
if(a.shape[0]<=i):
a=np.concatenate((a,(np.array([np.array([ma,ta])]))))
print(a.shape)
else:
if(ma<a[i,0]):
ta = a[i,1]
ma = max(ma,a[i,0])
a[i,0] = ma
a[i,1] = ta
return a[:120]
for i in range(10):
temp.append(-cma[6*20*i+i+1:6*20*(i+1)+i+1,-2:])
cma1 = maxof( temp[0])
cma2 = maxof( temp[1])
cma3 = maxof( temp[2])
cma4 = maxof( temp[3])
cma5 = maxof( temp[4])
cma6 = maxof( temp[5])
cma7 = maxof( temp[6])
cma8 = maxof( temp[7])
cma9 = maxof( temp[8])
cma10 = maxof( temp[9])
[xa,xb,xc,xd] = getall(np.concatenate((cma1,cma2,cma3,cma4,cma5,cma6,cma7,cma8,cma9,cma10),axis=1))
axes = plt.gca()
plt.plot(xa,c='b')
plt.plot(xc,c='k')
# +
#[m,n,o,p] = getall(np.concatenate((random1,random2,random3,random4,random5),axis=1))
#np.savetxt('our_old.csv', np.array([t,a,b,c,d]).T, delimiter=',')
t= np.arange(0,41)*220/41
plt.plot(t,o,color=(0,1,1),label = "BOHB")
plt.fill_between(t, o-p/2, o+p/2, alpha = 0.2,color=(0,1,1))
t= np.arange(0,80)*335/80
plt.plot(t,g,color=(0,1,0),label = "hyper")
plt.fill_between(t, g-h/2, g+h/2, alpha = 0.2,color=(0,1,0))
t= np.arange(0,80)*405/80
plt.plot(t,k,c=(1,0,0),label = "PB2")
plt.fill_between(t, k-l, k+l, alpha = 0.2,color=(1,0,0))
t= np.arange(0,80)*400/80
plt.plot(t,kt,color=(1,0,1),label = "PBT")
plt.fill_between(t, kt-lt, kt+lt, alpha = 0.2,color=(1,0,1))
t= np.arange(0,80)*340/80
#np.savetxt('hyper.csv', np.array([t,e,f,g,h]).T, delimiter=',')
plt.plot(t,c1,color=(1,.8,0),label = "c=1")
plt.fill_between(t, c1-d1, c1+d1, alpha = 0.4,color=(1,.8,0))
#np.savetxt('hyper.csv', np.array([t,e,f,g,h]).T, delimiter=',')
plt.plot(t,c4,color=(0,0,0),label = "c=4")
plt.fill_between(t, c4-d4, c4+d4, alpha = 0.2,color=(0,0,0))
plt.plot(t,c4t,color=(0,0,1),label = "c=4 time")
plt.fill_between(t, c4t-d4t, c4t+d4t, alpha = 0.2,color=(0,0,1))
plt.plot(t,cgp,color=(.45,.26,0),label = "gp c=4 time")
plt.fill_between(t, cgp-dgp, cgp+dgp, alpha = 0.2,color=(.45,.26,0))
t= np.arange(0,120)*340/80
plt.plot(t,xc,color=(.33,.33,.33),label = "cma c=6")
plt.fill_between(t, xc-xd, xc+xd, alpha = 0.2,color=(.33,.33,.33))
axes = plt.gca()
axes.set_ylim(0.715,.88)
axes.set_ylabel('accuracy',fontsize=16)
axes.set_xlabel('execution time (s)',fontsize=16)
plt.xticks([0,200, 400],fontsize=16)
plt.yticks([.72,.80,.88],fontsize=16)
#axes.set_title('Comparison on val FMNIST with 10 experiment')
#plt.title(r'\underline{Validation loss}:')
#axes.set_yscale('log')
#plt.legend(bbox_to_anchor=(0.999,0.005 ), loc='lower right', borderaxespad=0.0,ncol = 3)
fig = plt.gcf()
fig.set_size_inches(8, 4)
plt.tight_layout()
plt.savefig('FMNIST_val.pdf')
# -
# +
#[m,n,o,p] = getall(np.concatenate((random1,random2,random3,random4,random5),axis=1))
#np.savetxt('our_old.csv', np.array([t,a,b,c,d]).T, delimiter=',')
t= np.arange(0,41)*220/41
plt.plot(t,m,color=(0,1,1),label = "BOHB")
plt.fill_between(t, m-n/2, m+n/2, alpha = 0.2,color=(0,1,1))
t= np.arange(0,80)*335/80
plt.plot(t,e,color=(0,1,0),label = "hyper")
plt.fill_between(t, e-f/2, e+f/2, alpha = 0.2,color=(0,1,0))
t= np.arange(0,80)*405/80
plt.plot(t,i,color=(1,0,0),label = "PB2")
plt.fill_between(t, i-j, i+j, alpha = 0.2,color=(1,0,0))
t= np.arange(0,80)*400/80
plt.plot(t,it,color=(1,0,1),label = "PBT")
plt.fill_between(t, it-jt, it+jt, alpha = 0.2,color=(1,0,1))
t= np.arange(0,80)*340/80
#np.savetxt('hyper.csv', np.array([t,e,f,g,h]).T, delimiter=',')
plt.plot(t,a1,color=(1,.8,0),label = "color=1")
plt.fill_between(t, a1-b1, a1+b1, alpha = 0.4,color=(1,.8,0))
#np.savetxt('hyper.csv', np.array([t,e,f,g,h]).T, delimiter=',')
plt.plot(t,a4,color=(0,0,0),label = "color=4")
plt.fill_between(t, a4-b4, a4+b4, alpha = 0.2,color=(0,0,0))
plt.plot(t,a4t,color=(0,0,1),label = "color=4 time")
plt.fill_between(t, a4t-b4t, a4t+b4t, alpha = 0.2,color=(0,0,1))
plt.plot(t,agp,color=(.45,.26,0),label = "gp color=4 time")
plt.fill_between(t, agp-bgp, agp+bgp, alpha = 0.2,color=(.45,.26,0))
t= np.arange(0,120)*340/80
plt.plot(t,xa,color=(.33,.33,.33),label = "cma color=6")
plt.fill_between(t, xa-xb, xa+xb, alpha = 0.2,color=(.33,.33,.33))
axes = plt.gca()
axes.set_ylim(0.715,.88)
#axes.set_ylabel('accuracy',fontsize=16)
axes.set_xlabel('execution time (s)',fontsize=16)
plt.xticks([0,200, 400],fontsize=16)
plt.yticks([],fontsize=16)
#axes.set_title('Comparison on test FMNIST with 10 experiment')
#plt.title(r'\underline{Validation loss}:')
#axes.set_yscale('log')
#plt.legend()
#plt.legend(bbox_to_anchor=(0.999,0.005 ), loc='lower right', borderaxespad=0.0,ncol = 3)
fig = plt.gcf()
fig.set_size_inches(8, 4)
plt.tight_layout()
plt.savefig('FMNIST_test.pdf')
# +
fig, (ax1, ax2) = plt.subplots(1, 2)
#[m,n,o,p] = getall(np.concatenate((random1,random2,random3,random4,random5),axis=1))
#np.savetxt('our_old.csv', np.array([t,a,b,c,d]).T, delimiter=',')
t= np.arange(0,41)*220/41
ax1.plot(t,o,color=(0,1,1),label = "BOHB")
ax1.fill_between(t, o-p/2, o+p/2, alpha = 0.2,color=(0,1,1))
t= np.arange(0,80)*335/80
ax1.plot(t,g,color=(0,1,0),label = "hyper")
ax1.fill_between(t, g-h/2, g+h/2, alpha = 0.2,color=(0,1,0))
t= np.arange(0,80)*405/80
ax1.plot(t,k,c=(1,0,0),label = "PB2")
ax1.fill_between(t, k-l, k+l, alpha = 0.2,color=(1,0,0))
t= np.arange(0,80)*400/80
ax1.plot(t,kt,color=(1,0,1),label = "PBT")
ax1.fill_between(t, kt-lt, kt+lt, alpha = 0.2,color=(1,0,1))
t= np.arange(0,80)*340/80
#np.savetxt('hyper.csv', np.array([t,e,f,g,h]).T, delimiter=',')
ax1.plot(t,c1,color=(1,.8,0),label = "c=1")
ax1.fill_between(t, c1-d1, c1+d1, alpha = 0.4,color=(1,.8,0))
#np.savetxt('hyper.csv', np.array([t,e,f,g,h]).T, delimiter=',')
ax1.plot(t,c4,color=(0,0,0),label = "c=4")
ax1.fill_between(t, c4-d4, c4+d4, alpha = 0.2,color=(0,0,0))
ax1.plot(t,c4t,color=(0,0,1),label = "c=4 time")
ax1.fill_between(t, c4t-d4t, c4t+d4t, alpha = 0.2,color=(0,0,1))
ax1.plot(t,cgp,color=(.45,.26,0),label = "gp c=4 time")
ax1.fill_between(t, cgp-dgp, cgp+dgp, alpha = 0.2,color=(.45,.26,0))
t= np.arange(0,120)*340/80
ax1.plot(t,xc,color=(.33,.33,.33),label = "cma c=6")
ax1.fill_between(t, xc-xd, xc+xd, alpha = 0.2,color=(.33,.33,.33))
axes = ax1
axes.set_ylim(0.715,.88)
axes.set_xlim(0,500)
axes.set_ylabel('accuracy',fontsize=18)
axes.set_xlabel('execution time (s)',fontsize=18)
ax1.set_xticks([0,200, 400])
ax1.set_xticklabels([0,200, 400],fontsize=18)
ax1.set_yticks([.72,.80,.88])
ax1.set_yticklabels([.72,.80,.88],fontsize=18)
#axes.set_title('Comparison on val FMNIST with 10 experiment')
#ax1.title(r'\underline{Validation loss}:')
#axes.set_yscale('log')
#ax1.legend(bbox_to_anchor=(0.999,0.005 ), loc='lower right', borderaxespad=0.0,ncol = 3)
#[m,n,o,p] = getall(np.concatenate((random1,random2,random3,random4,random5),axis=1))
#np.savetxt('our_old.csv', np.array([t,a,b,c,d]).T, delimiter=',')
t= np.arange(0,41)*220/41
ax2.plot(t,m,color=(0,1,1),label = "BOHB")
ax2.fill_between(t, m-n/2, m+n/2, alpha = 0.2,color=(0,1,1))
t= np.arange(0,80)*335/80
ax2.plot(t,e,color=(0,1,0),label = "hyper")
ax2.fill_between(t, e-f/2, e+f/2, alpha = 0.2,color=(0,1,0))
t= np.arange(0,80)*405/80
ax2.plot(t,i,color=(1,0,0),label = "PB2")
ax2.fill_between(t, i-j, i+j, alpha = 0.2,color=(1,0,0))
t= np.arange(0,80)*400/80
ax2.plot(t,it,color=(1,0,1),label = "PBT")
ax2.fill_between(t, it-jt, it+jt, alpha = 0.2,color=(1,0,1))
t= np.arange(0,80)*340/80
#np.savetxt('hyper.csv', np.array([t,e,f,g,h]).T, delimiter=',')
ax2.plot(t,a1,color=(1,.8,0),label = "color=1")
ax2.fill_between(t, a1-b1, a1+b1, alpha = 0.4,color=(1,.8,0))
#np.savetxt('hyper.csv', np.array([t,e,f,g,h]).T, delimiter=',')
ax2.plot(t,a4,color=(0,0,0),label = "color=4")
ax2.fill_between(t, a4-b4, a4+b4, alpha = 0.2,color=(0,0,0))
ax2.plot(t,a4t,color=(0,0,1),label = "color=4 time")
ax2.fill_between(t, a4t-b4t, a4t+b4t, alpha = 0.2,color=(0,0,1))
ax2.plot(t,agp,color=(.45,.26,0),label = "gp color=4 time")
ax2.fill_between(t, agp-bgp, agp+bgp, alpha = 0.2,color=(.45,.26,0))
t= np.arange(0,120)*340/80
ax2.plot(t,xa,color=(.33,.33,.33),label = "cma color=6")
ax2.fill_between(t, xa-xb, xa+xb, alpha = 0.2,color=(.33,.33,.33))
axes = ax2
axes.set_ylim(0.715,.88)
axes.set_xlim(0,500)
#axes.set_ylabel('accuracy',fontsize=16)
axes.set_xlabel('execution time (s)',fontsize=18)
axes.set_xticks([0,150, 400])
axes.set_xticklabels([0,150, 400],fontsize=18)
axes.set_yticks([])
#axes.set_title('Comparison on test FMNIST with 10 experiment')
#ax1.title(r'\underline{Validation loss}:')
#axes.set_yscale('log')
#ax1.legend()
#ax1.legend(bbox_to_anchor=(0.999,0.005 ), loc='lower right', borderaxespad=0.0,ncol = 3)
fig.tight_layout()
plt.subplots_adjust(wspace=0,left = .06, right = .999,bottom=0.15,top = .95)
fig.set_size_inches(16, 4)
fig.savefig('FMNIST_all.pdf')
# -
# +
import pandas as pd
from numpy import genfromtxt
import numpy as np
from scipy.interpolate import *
import matplotlib.pyplot as plt
def fonc(data):
itera = data[:,5:6]
itera= itera[~np.isnan(itera)]
dat = data[:,0:2]
dat = dat[~np.isnan(dat)]
dat = dat.reshape(-1,2)
# dat = dat[np.argsort(itera,axis=-1)]
return dat
# -
b1.shape
b5.shape
b10.shape
# +
plt.plot(t, mean_test_3, 'k-',label="test ax")
plt.fill_between(t, mean_test_3-std_test_3, mean_test_3+std_test_3, alpha = 0.2,color='k')
plt.plot(t, mean_val_3, 'b--',label="val ax")
plt.fill_between(t, mean_val_3-std_val_3, mean_val_3+std_val_3, alpha = 0.2,color='b')
axes = plt.gca()
axes.set_ylim(0.78,0.902)
#plt.axis([0, 260,0 ,0.85 ])
plt.legend(bbox_to_anchor=(0.995,0.005 ), loc='lower right', borderaxespad=0.,ncol = 2)
#plt.yscale('log')
axes.set_xlabel('number of trials')
axes.set_ylabel('accuracy')
plt.savefig('new/5.png')
fig = plt.figure()
plt.show()
# +
plt.plot(t, mean_test_4, 'k-',label="test NeverGrad")
plt.fill_between(t, mean_test_4-std_test_4, mean_test_4+std_test_4, alpha = 0.2,color='k')
plt.plot(t, mean_val_4, 'b--',label="val NeverGrad")
plt.fill_between(t, mean_val_4-std_val_4, mean_val_4+std_val_4, alpha = 0.2,color='b')
axes = plt.gca()
axes.set_ylim(0.006,0.018)
#plt.axis([0, 260,0 ,0.85 ])
plt.legend(bbox_to_anchor=(0.995,0.995 ), loc='upper right', borderaxespad=0.,ncol = 2)
#plt.yscale('log')
axes.set_xlabel('number of trials')
axes.set_ylabel('MSE')
plt.savefig('new/124.png')
fig = plt.figure()
plt.show()
# +
plt.plot(t, mean_test_5, 'k-',label="test bohb")
plt.fill_between(t, mean_test_5-std_test_5, mean_test_5+std_test_5, alpha = 0.2,color='k')
plt.plot(t, mean_val_5, 'b--',label="val bohb")
plt.fill_between(t, mean_val_5-std_val_5, mean_val_5+std_val_5, alpha = 0.2,color='b')
axes = plt.gca()
axes.set_ylim(0.85,0.902)
#plt.axis([0, 260,0 ,0.85 ])
plt.legend(bbox_to_anchor=(0.995,0.005 ), loc='lower right', borderaxespad=0.,ncol = 2)
#plt.yscale('log')
axes.set_xlabel('number of trials')
axes.set_ylabel('accuracy')
plt.savefig('new/306.png')
fig = plt.figure()
plt.show()
# +
plt.plot(t, mean_test_6, 'k-',label="test random")
plt.fill_between(t, mean_test_6-std_test_6, mean_test_6+std_test_6, alpha = 0.2,color='k')
plt.plot(t, mean_val_6, 'b--',label="val random")
plt.fill_between(t, mean_val_6-std_val_6, mean_val_6+std_val_6, alpha = 0.2,color='b')
axes = plt.gca()
axes.set_ylim(0.85,0.902)
#plt.axis([0, 260,0 ,0.85 ])
plt.legend(bbox_to_anchor=(0.995,0.005 ), loc='lower right', borderaxespad=0.,ncol = 2)
#plt.yscale('log')
axes.set_xlabel('number of trials')
axes.set_ylabel('MSE')
plt.savefig('new/307.png')
fig = plt.figure()
plt.show()
# +
plt.plot(t, mean_test_7, 'k-',label="test zoopt")
plt.fill_between(t, mean_test_7-std_test_7, mean_test_7+std_test_7, alpha = 0.2,color='k')
plt.plot(t, mean_val_7, 'b--',label="val zoopt")
plt.fill_between(t, mean_val_7-std_val_7, mean_val_7+std_val_7, alpha = 0.2,color='b')
axes = plt.gca()
axes.set_ylim(0.006,0.018)
#plt.axis([0, 260,0 ,0.85 ])
plt.legend(bbox_to_anchor=(0.995,0.995 ), loc='upper right', borderaxespad=0.,ncol = 2)
#plt.yscale('log')
axes.set_xlabel('number of trials')
axes.set_ylabel('MSE')
plt.savefig('new/127.png')
fig = plt.figure()
plt.show()
| Experiments/FMNISTAnalytics.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.2
# language: julia
# name: julia-1.6
# ---
# # Resolution of the elastic problem of a sphere in an infinite matrix
using TensND, LinearAlgebra, SymPy, Tensors, OMEinsum, Rotations
sympy.init_printing(use_unicode=true)
# ## Definition of the coordinate system, base vectors...
Spherical = coorsys_spherical()
θ, ϕ, r = getcoords(Spherical) # Note the order of coordinates not `r, θ, ϕ` but `θ, ϕ, r` so that the frame `(𝐞ᶿ, 𝐞ᵠ, 𝐞ʳ)` coincides with the canonical frame for null angles
𝐞ᶿ, 𝐞ᵠ, 𝐞ʳ = unitvec(Spherical) # "\bfe<TAB>" to write bold `𝐞` and "\^\theta<TAB><TAB>" to write superscript `ᶿ`
𝐱 = getOM(Spherical) # Defines the current position vector in terms of spherical coordinates (ie `𝐱 = r 𝐞ʳ`)
𝐞₁, 𝐞₂, 𝐞₃ = unitvec(coorsys_cartesian()) ;
𝟏, 𝟙, 𝕀, 𝕁, 𝕂 = init_isotropic() # Defines usual isotropic Tensors
k, μ = symbols("k μ", positive = true)
λ = k -2μ/3 ;
# ## General resolution
# ### Hydrostatic loading
#
# \begin{equation}
# \mathbf{u}\underset{||\mathbf{x}||\to\infty}{\sim}\mathbf{E}\cdot\mathbf{x} \textrm{ with } \mathbf{E}=\frac{1}{3}E_v\mathbf{1}
# \end{equation}
#
# The displacement field is naturally searched in a form satisfying the isotropy of the loading ie
# \begin{equation}
# \mathbf{u}=u_r(r)\,\mathbf{e}_r
# \end{equation}
#
u = SymFunction("u", real = true)
𝐮ˢᵖʰ = u(r) * 𝐞ʳ # Note that the vector is in bold font ("\bfu<TAB>") and the component in normal font
𝛆ˢᵖʰ = simplify(SYMGRAD(𝐮ˢᵖʰ, Spherical)) # Strain tensor ("\bfepsilon<TAB>")
𝛔ˢᵖʰ = simplify(λ * tr(𝛆ˢᵖʰ) * 𝟏 + 2μ * 𝛆ˢᵖʰ) # Stress tensor ("\bfsigma<TAB>")
𝐓ˢᵖʰ = simplify(𝛔ˢᵖʰ ⋅ 𝐞ʳ) ;
div𝛔ˢᵖʰ = DIV(𝛔ˢᵖʰ, Spherical) ;
eqˢᵖʰ = factor(simplify(div𝛔ˢᵖʰ ⋅ 𝐞ʳ))
solˢᵖʰ = dsolve(eqˢᵖʰ, u(r)) ;
ûˢᵖʰ = solˢᵖʰ.rhs() ; display(ûˢᵖʰ)
T̂ˢᵖʰ = factor(simplify(subs(𝐓ˢᵖʰ ⋅ 𝐞ʳ, u(r) => ûˢᵖʰ))) ; display(T̂ˢᵖʰ)
# ### Deviatoric loading
#
# \begin{equation}
# \mathbf{u}\underset{||\mathbf{x}||\to\infty}{\sim}\mathbf{E}\cdot\mathbf{x} \textrm{ with } \mathbf{E}=E\,(\mathbf{e}_1\otimes\mathbf{e}_1+\mathbf{e}_2\,\otimes\mathbf{e}_2-2\mathbf{e}_3\,\otimes\mathbf{e}_3)=\mathbb{1}-3\mathbf{e}_3\otimes\mathbf{e}_3
# \end{equation}
#
# Note that such a macroscopic strain tensor induces a symmetry of revolution of the fields, which means in particular that the displacement field is expect of the form
#
# \begin{equation}
# \mathbf{u}=u_\theta(\theta,r)\,\mathbf{e}_\theta+u_r(\theta,r)\,\mathbf{e}_r
# \end{equation}
𝐄 = 𝟏 - 3𝐞₃⊗𝐞₃
# Remote trends in θ of the displacement
fᶿ = simplify(𝐞ᶿ ⋅ 𝐄 ⋅ 𝐞ʳ)
fʳ = simplify(𝐞ʳ ⋅ 𝐄 ⋅ 𝐞ʳ) ;
uᶿ = SymFunction("uᶿ", real = true)
uʳ = SymFunction("uʳ", real = true)
𝐮ᵈᵉᵛ = uᶿ(r)* fᶿ * 𝐞ᶿ + uʳ(r)* fʳ * 𝐞ʳ
𝛆ᵈᵉᵛ = simplify(SYMGRAD(𝐮ᵈᵉᵛ, Spherical)) ;
𝛔ᵈᵉᵛ = simplify(λ * tr(𝛆ᵈᵉᵛ) * 𝟏 + 2μ * 𝛆ᵈᵉᵛ)
𝐓ᵈᵉᵛ = simplify(𝛔ᵈᵉᵛ ⋅ 𝐞ʳ) ;
div𝛔ᵈᵉᵛ = DIV(𝛔ᵈᵉᵛ, Spherical) ;
eqᶿᵈᵉᵛ = factor(simplify(div𝛔ᵈᵉᵛ ⋅ 𝐞ᶿ) / fᶿ) ;
eqʳᵈᵉᵛ = factor(simplify(div𝛔ᵈᵉᵛ ⋅ 𝐞ʳ) / fʳ) ;
α, Λ = symbols("α Λ", real = true)
eqᵈᵉᵛ = simplify.(subs.([eqᶿᵈᵉᵛ,eqʳᵈᵉᵛ], uᶿ(r) => r^α, uʳ(r) => Λ*r^α))
αΛ = solve(eqᵈᵉᵛ, [α, Λ])
ûᶿᵈᵉᵛ = sum([Sym("C$(i+2)") * r^αΛ[i][1] for i ∈ 1:length(αΛ)]) ; display(ûᶿᵈᵉᵛ)
ûʳᵈᵉᵛ = sum([Sym("C$(i+2)") * αΛ[i][2] * r^αΛ[i][1] for i ∈ 1:length(αΛ)]) ; display(ûʳᵈᵉᵛ)
T̂ᶿᵈᵉᵛ = factor(simplify(subs(𝐓ᵈᵉᵛ ⋅ 𝐞ᶿ / fᶿ, uᶿ(r) => ûᶿᵈᵉᵛ, uʳ(r) => ûʳᵈᵉᵛ))) ; display(T̂ᶿᵈᵉᵛ)
T̂ʳᵈᵉᵛ = factor(simplify(subs(𝐓ᵈᵉᵛ ⋅ 𝐞ʳ / fʳ, uᶿ(r) => ûᶿᵈᵉᵛ, uʳ(r) => ûʳᵈᵉᵛ))) ; display(T̂ʳᵈᵉᵛ)
| notebooks/sphere.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, RANSACRegressor
from sklearn.metrics import r2_score, mean_squared_error
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#
# Load the no. of cafes in a city dataset for training
#
bhd = datasets.load_boston()
df = pd.DataFrame(bhd.data)
df.columns = ['CRIM', 'ZN', 'INDUS', 'CHAS',
'NOX', 'RM', 'AGE', 'DIS', 'RAD',
'TAX', 'PTRATIO', 'B', 'LSTAT']
df['MEDV'] = bhd.target
#
# Select Avg. No of cafes opened each year as feature
# and fit the model
#
X = df['RM'].to_numpy().reshape(-5, 1)
y = df['MEDV'].to_numpy().reshape(-5, 1)
#
# Create an instance of RANSACRegressor
#
ransac = RANSACRegressor(base_estimator=LinearRegression(),
min_samples=50, max_trials=100,
loss='absolute_loss', random_state=42,
residual_threshold=10)
#
# Fit the model
#
ransac.fit(X, y)
ransac.score(X,y)
# Get the Inlier mask; Create outlier mask
#
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
#
# Create scatter plot for inlier datset
#
plt.figure(figsize=(8, 8))
plt.scatter(X[inlier_mask], y[inlier_mask],
c='black', edgecolor='black',
marker='o', label='Inliers')
#
# Create scatter plot for outlier datset
#
plt.scatter(X[outlier_mask], y[outlier_mask],
c='yellow', edgecolor='black',
marker='s', label='Outliers')
#
# Draw the best fit line
#
line_X = np.arange(3, 10, 1)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.plot(line_X, line_y_ransac, color='black', lw=2)
plt.xlabel('Average number of cafes opened [RM]', fontsize=15)
plt.ylabel('CAFES IN THE CITY [MEDV]', fontsize=15)
plt.legend(loc='upper left', fontsize=12)
plt.show()
ransac.score(X,y)
ransac.predict(X)
| Computer Vision/Tutorials/Data Fitting using RANSAC Algorithm/RANSAC_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Adapated from https://scipython.com/book/chapter-8-scipy/additional-examples/the-sir-epidemic-model/ - Courtesy of SciPy
# Slider from -> https://matplotlib.org/3.1.1/gallery/widgets/slider_demo.html - Courtesty of Matplotlib
# UK COVID Data -> https://ourworldindata.org/coronavirus/country/united-kingdom?country=~GBR (OWID)
import numpy as np
import pandas as pd
from scipy.integrate import odeint
import matplotlib.pyplot as plt, mpld3
from ipywidgets import interactive
cases = pd.read_csv('data_2020-Aug-22.csv')
cases = cases[cases['areaName']=='Wales']
cases = cases[cases['date']=='22/08/2020']
N = 10e6
I0, R0 = cases['cumCasesByPublishDate'], 0
S0 = N - I0 - R0
beta, gamma = 0, 0
t = np.linspace(0, 60, 60)
# The SIR model differential equations.
def sir(y, t, N, beta, gamma):
S, I, R = y
dSdt = -beta * S * I / N
dIdt = beta * S * I / N - gamma * I
dRdt = gamma * I
return dSdt, dIdt, dRdt
# Initial conditions vector
y0 = S0, I0, R0
# Plot the data on three separate curves for S(t), I(t) and R(t)
def sir_interactive_func(beta, gamma):
ret = odeint(sir, y0, t, args=(N, beta, gamma))
S, I, R = ret.T
fig = plt.figure()
ax = fig.add_subplot(111, axisbelow=True)
ax.plot(t, S/1000, 'yellow', lw=1.5, label='Susceptible')
ax.plot(t, I/1000, 'red', lw=1.5, label='Infected')
ax.plot(t, R/1000, 'blue', lw=1.5, label='Recovered')
ax.set_xlabel('Time (days)')
ax.set_ylabel('Population (1000s)')
ax.grid(b=True, which='major', c='#bbbbbb', lw=1, ls='-')
legend = ax.legend()
legend.get_frame().set_alpha(0.5)
#mpld3.save_html(fig, 'wales.html')
interactive_plot = interactive(sir_interactive_func, beta=(0.10,1,0.01), gamma=(0.10,1,0.01))
interactive_plot
| SIR Models for Testing Section in MSc Report/Wales SIR Model - MSc Project (CMM513).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
# !pip install pdpipe
import numpy as np
import pandas as pd
import pdpipe as pdp
import json
# + pycharm={"name": "#%%\n"}
df=pd.read_json('kym.json', orient='records')
df[['status','origin','year']]=np.nan
for i in range(len(df.details)):
df.status[i]=df.details[i]['status']
df.origin[i]=df.details[i]['origin']
df.year[i]=df.details[i]['year']
# + pycharm={"name": "#%%\n"}
drop_columns=pdp.ColDrop(columns=["last_update_source","category","template_image_url", "ld", "additional_references", "search_keywords", "meta","details"])
remove_duplicates=pdp.DropDuplicates(["title", "url"])
drop_unconfirmed=pdp.ValKeep(values=["confirmed"], columns=["status"])
pipeline_1=pdp.PdPipeline([drop_columns,remove_duplicates,drop_unconfirmed])
df_2=pipeline_1(df)
# + pycharm={"name": "#%%\n"}
a=df_2.columns.values.tolist()
b=df_2.values.tolist()
b.insert(0,a)
with open('cleaned_1.json', 'w', encoding='utf-8') as f:
json.dump(b, f, ensure_ascii=True)
| notebooks/Cleansing_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib notebook
import subprocess
import os
import matplotlib.pyplot as plt
import time
from mpl_toolkits.mplot3d import Axes3D
exe = "PFT.x"
d = '/home/jobasha/Documents/Dropbox/Repositories/planningfortransparency/problems/grid/'
output_dir = 'results/'
output_file = 'results.txt'
# +
object_vector_l = []
time_first_action_vector_l = []
domain = d + "domain.pddl"
threshold_1_v = []
threshold_2_v = []
threshold_3_v = []
for problem_n in os.listdir(d):
# print(problem_n)
# try:
if(not problem_n.startswith("problem")):
continue
problem_file = d + problem_n
numbers = problem_n.split('_')
n = int(numbers[1])
m = int(numbers[2])
h = int(numbers[3])
objects = n*m
hype = d + "hyp_" + str(n) + "_" + str(m) + "_" + str(h) +".dat"
# print("\t" +problem_file)
problem = problem_file
cmd = ["time", "python"]
flags = [ "/home/jobasha/Documents/PhD/Planners/FastDownward/fast-downward.py","--alias", "seq-sat-lama-2011", domain, problem]
pipe = subprocess.Popen(
cmd + flags,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#print(''.join(pipe.stderr.readlines()))
answer, stderr = pipe.communicate()
print(answer)
print(stderr)
answer1 = str(stderr).split("\'")
answer2 = answer1[1].split("system")
time_taken = sum(list(map(float, answer2[0].split("user"))))
file = open("sas_plan.1","r")
counter = 0
for lines in file :
counter = counter + 1
#print(lines)
cmdPR = ["/home/jobasha/Documents/Dropbox/Repositories/planningfortransparency/utilities/plan_recognition/./PRM.x"]
flagsPR = [domain, problem, hype, "sas_plan.1"]
#print(domain, problem, hype, "sas_plan.1")
pipePR = subprocess.Popen(
cmdPR + flagsPR,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#print(''.join(pipe.stderr.readlines()))
answerPR, stderrPR = pipePR.communicate()
print(answerPR.decode())
#print(stderrPR.decode())
lines = str(answerPR).split("\\n")[1:-1]
#print(lines)
threshold_1_v.append(float(lines[0]))
threshold_2_v.append(float(lines[1]))
threshold_3_v.append(float(lines[2]))
print("Problem - " + str(n) + "x" + str(m) + ", Cost: " + str(counter) + ", " + str(time_taken) )
dst = "../lama/" + problem_n + ".res"
os.rename("sas_plan.1", dst)
os.remove("output")
os.remove("output.sas")
break
if(counter != 1):
time_first_action_vector_l.append(time_taken)
object_vector_l.append(objects)
print(sum(threshold_1_v) / len(threshold_1_v))
print(sum(threshold_2_v) / len(threshold_2_v))
print(sum(threshold_3_v) / len(threshold_3_v))
# except:
# continue
# -
plt.figure(3)
plt.ylim([-1,7])
plt.xlabel('Objects')
plt.ylabel('Time for Plan (s)')
plt.scatter(object_vector_l, time_first_action_vector_l)
| on_line_planning/pt_paper/eval/backup/graphing_pr_with_mcts_multi-Copy4.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.1
# language: julia
# name: julia-1.7
# ---
# # Differentiation
#
# We now get to our first computational problem: given a function, how can we approximate its derivative at a
# point? Before we begin, we must be clear what a "function" is. Consider three possible scenarios:
#
# 1. _Black-box function_: Consider a floating-point valued function $f^{\rm FP} : D \rightarrow F$ where
# $D \subset F \equiv F_{\sigma,Q,S}$
# (e.g., we are given a double precision function that takes in a `Float64` and returns another `Float64`)
# which we only know _pointwise_. This is the situation if we have a function that relies on a compiled C library,
# which composes floating point arithmetic operations.
# Since $F$ is a discrete set such an $f^{\rm FP}$ cannot be differentiable in a rigorous way,
# therefore we need to assume that $f^{\rm FP}$ approximates a differentiable function $f$ with controlled
# error in order to state anything precise.
# 2. _Generic function_: Consider a function that is a formula (or, equivalentally, a _piece of code_)
# that we can evaluate it on arbitrary types, including
# custom types that we create. An example is a polynomial:
# $
# p(x) = p_0 + p_1 x + \cdots + p_n x^n
# $
# which can be evaluated for $x$ in the reals, complexes, or any other ring.
# More generally, if we have a function defined in Julia that does not call any
# C libraries it can be evaluated on different types.
# For analysis we typically consider both a differentiable function $f : D \rightarrow {\mathbb R}$ for
# $D ⊂ {\mathbb R}$,
# which would be what one would have if we could evaluate a function exactly using real arithmetic, and
# $f^{\rm FP} : D \cap F \rightarrow F$, which is what we actually compute when evaluating the function using
# floating point arithmetic.
# 3. _Graph function_: The function is built by composing different basic "kernels" with known differentiability properties.
# We won't consider this situation in this module, though it is the model used by Python machine learning toolbox's
# like [PyTorch](https://pytorch.org) and [TensorFlow](http://tensorflow.org).
#
# We discuss the following techniques:
#
# 1. Finite-differences: Use the definition of a derivative that one learns in calculus to approximate its value.
# Unfortunately, the round-off errors of floating point arithmetic typically limit its accuracy.
# 2. Dual numbers (forward-mode automatic differentiation): Define a special type that when applied to a function
# computes its derivative. Mathematically, this uses _dual numbers_, which are analoguous to complex numbers.
#
# Note there are other techniques for differentiation that we don't discuss:
#
# 1. Symbolic differentiation: A tree is built representing a formula which is differentiated using
# the product and chain rule.
# 2. Adjoints and back-propagation (reverse-mode automatic differentiation): This is similar to
# symbolic differentiation but automated, to build up
# a tape of operations that tracks interdependencies.
# It's outside the scope of this module but is computationally preferred for computing gradients
# of large dimensional functions which is critical in machine learning.
# 4. Interpolation and differentiation: We can also differentiate functions _globally_, that is, in an interval instead of
# only a single point, which will be discussed later in the module.
using ColorBitstring
# ## 1. Finite-differences
#
# The definition
# $$
# f'(x) = \lim_{h \rightarrow 0} {f(x+h) - f(x) \over h}
# $$
# tells us that
# $$
# f'(x) \approx {f(x+h) - f(x) \over h}
# $$
# provided that $h$ is sufficiently small.
#
# It's important to note that approximation uses only the _black-box_
# notion of a function but to obtain bounds we need more.
#
# If we know a bound on $f''(x)$ then Taylor's theorem tells us a precise bound:
#
# **Proposition**
# The error in approximating the derivative using finite differences is
# $$
# \left|f'(x) - {f(x+h) - f(x) \over h}\right| \leq {M \over 2} h
# $$
# where $M = \sup_{x \leq t \leq x+h} |f''(t)|$.
#
# **Proof**
# Follows immediately from Taylor's theorem:
# $$
# f(x+h) = f(x) + f'(x) h + {f''(t) \over 2} h^2
# $$
# for some $x ≤ t ≤ x+h$.
#
# ◼️
#
#
#
#
# There are also alternative versions of finite differences. Leftside finite-differences:
# $$
# f'(x) ≈ {f(x) - f(x-h) \over h}
# $$
# and central differences:
# $$
# f'(x) ≈ {f(x + h) - f(x - h) \over 2h}
# $$
# Composing these approximations is useful for higher-order derivatives as we
# discuss in the problem sheet.
#
# Note this is assuming _real arithmetic_, the answer is drastically
# different with _floating point arithmetic_.
#
# ### Does finite-differences work with floating point arithmetic?
#
#
#
# Let's try differentiating two simple polynomials $f(x) = 1 + x + x^2$ and $g(x) = 1 + x/3 + x^2$
# by applying the finite-difference approximation to their floating point implementations
# $f^{\rm FP}$ and $g^{\rm FP}$:
f = x -> 1 + x + x^2 # we treat f and g as black-boxs
g = x -> 1 + x/3 + x^2
h = 0.000001
(f(h)-f(0))/h, (g(h)-g(0))/h
# Both seem to roughly approximate the true derivatives ($1$ and $1/3$).
# We can do a plot to see how fast the error goes down as we let $h$ become small.
using Plots
h = 2.0 .^ (0:-1:-60) # [1,1/2,1/4,…]
nanabs = x -> iszero(x) ? NaN : abs(x) # avoid 0's in log scale plot
plot(nanabs.((f.(h) .- f(0)) ./ h .- 1); yscale=:log10, title="convergence of derivatives, h = 2^(-n)", label="f", legend=:bottomleft)
plot!(abs.((g.(h) .- g(0)) ./ h .- 1/3); yscale=:log10, label = "g")
# In the case of $f$ it is a success: we approximate the true derivative _exactly_ provided we take $h = 2^{-n}$
# for $26 < n \leq 52$.
# But for $g$ it is a huge failure: the approximation starts to converge, but then diverges exponentially fast, before levelling off!
#
# It is clear that $f$ is extremely special. Most functions will behave like $g$, and had we not taken
# $h$ to be a power of two we also see divergence for differentiating $f$:
h = 10.0 .^ (0:-1:-16) # [1,1/10,1/100,…]
plot(abs.((f.(h) .- f(0)) ./ h .- 1); yscale=:log10, title="convergence of derivatives, h = 10^(-n)", label="f", legend=:bottomleft)
plot!(abs.((g.(h) .- g(0)) ./ h .- 1/3); yscale=:log10, label = "g")
# For these two simple
# examples, we can understand why we see very different behaviour.
#
#
# **Example (convergence(?) of finite difference)** Consider differentiating $f(x) = 1 + x + x^2$ at 0 with $h = 2^{-n}$.
# We consider 3 different cases with different behaviour, where $S$ is the number of significand bits:
#
# 1. $0 ≤ n ≤ S/2$
# 2. $S/2 < n ≤ S$
# 3. $S ≤ n$
#
# Note that $f^{\rm FP}(0) = f(0) = 1$. Thus we wish to understand the error in approximating $f'(0) = 1$ by
# $$
# (f^{\rm FP}(h) ⊖ 1) ⊘ h\qquad\hbox{where}\qquad f^{\rm FP}(x) = 1 ⊕ x ⊕ x ⊗ x.
# $$
#
# _Case 1_ ($0 ≤ n ≤ S/2$): note that $f^{\rm FP}(h) = f(h) = 1 + 2^{-n} + 2^{-2n}$
# as each computation is precisely a floating point number (hence no rounding). We can see this in half-precision,
# with $n = 3$ we have a 1 in the 3rd and 6th decimal place:
S = 10 # 10 significant bits
n = 3 # 3 ≤ S/2 = 5
h = Float16(2)^(-n)
printbits(f(h))
# Subtracting 1 and dividing by $h$ will also be exact, hence we get
# $$
# (f^{\rm FP}(h) ⊖ 1) ⊘ h = 1 + 2^{-n}
# $$
# which shows exponential convergence.
#
# _Case 2_ ($S/2 < n ≤ S$): Now we have (using round-to-nearest)
# $$
# f^{\rm FP}(h) = (1 + 2^{-n}) ⊕ 2^{-2n} = 1 + 2^{-n}
# $$
# Then
# $$
# (f^{\rm FP}(h) ⊖ 1) ⊘ h = 1 = f'(0)
# $$
# We have actually performed better than true real arithmetic and converged without a limit!
#
# _Case 3_ ($n > S$): If we take $n$ too large, then $1 ⊕ h = 1$ and we have $f^{\rm FP}(h) = 1$, that is and
# $$
# (f^{\rm FP}(h) ⊖ 1) ⊘ h = 0 \neq f'(0)
# $$
#
# **Example (divergence of finite difference)** Consider differentiating $g(x) = 1 + x/3 + x^2$ at 0 with $h = 2^{-n}$
# and assume $n$ is even for simplicity and consider half-precision with $S = 10$.
# Note that $g^{\rm FP}(0) = g(0) = 1$.
# Recall
# $$
# h ⊘ 3 = 2^{-n-2} * (1.0101010101)_2
# $$
# Note we lose two bits each time in the computation of $1 ⊕ (h ⊘ 3)$:
n = 0; h = Float16(2)^(-n); printlnbits(1 + h/3)
n = 2; h = Float16(2)^(-n); printlnbits(1 + h/3)
n = 4; h = Float16(2)^(-n); printlnbits(1 + h/3)
n = 8; h = Float16(2)^(-n); printlnbits(1 + h/3)
# It follows if $S/2 < n < S$ that
# $$
# 1 ⊕ (h ⊘ 3) = 1 + h/3 - 2^{-10}/3
# $$
# Therefore
# $$
# (g^{\rm FP}(h) ⊖ 1) ⊘ h = 1/3 - 2^{n-10}/3
# $$
# Thus the error grows exponentially with $n$.
#
#
# If $n ≥ S$ then $1 ⊕ (h ⊘ 3) = 1$ and we have
# $$
# (g^{\rm FP}(h) ⊖ 1) ⊘ h = 0
# $$
#
#
# ### Bounding the error
#
#
# We can bound the error using the bounds on floating point arithmetic.
#
# **Theorem (finite-difference error bound)** Let $f$ be twice-differentiable in a neighbourhood of $x$ and assume that
# $f^{\rm FP}(x) = f(x) + δ_x^f$ has uniform absolute accuracy in that neighbourhood, that is:
# $$
# |δ_x^f| \leq c ϵ_{\rm m}
# $$
# for a fixed constant $c$. Assume for simplicity $h = 2^{-n}$ where $n \leq S$ and $|x| \leq 1$.
# Then the finite-difference approximation satisfies
# $$
# (f^{\rm FP}(x + h) ⊖ f^{\rm FP}(x)) ⊘ h = f'(x) + δ_{x,h}^{\rm FD}
# $$
# where
# $$
# |δ_{x,h}^{\rm FD}| \leq {|f'(x)| \over 2} ϵ_{\rm m} + M h + {4c ϵ_{\rm m} \over h}
# $$
# for $M = \sup_{x \leq t \leq x+h} |f''(t)|$.
#
# **Proof**
#
# We have (noting by our assumptions $x ⊕ h = x + h$ and that dividing by $h$ will only change the exponent so
# is exact)
# $$
# \begin{align*}
# (f^{\rm FP}(x + h) ⊖ f^{\rm FP}(x)) ⊘ h &= {f(x + h) + δ^f_{x+h} - f(x) - δ^f_x \over h} (1 + δ_1) \\
# &= {f(x+h) - f(x) \over h} (1 + δ_1) + {δ^f_{x+h}- δ^f_x \over h} (1 + δ_1)
# \end{align*}
# $$
# where $|δ_1| \leq {ϵ_{\rm m} / 2}$. Applying Taylor's theorem we get
# $$
# (f^{\rm FP}(x + h) ⊖ f^{\rm FP}(x)) ⊘ h = f'(x) + \underbrace{f'(x) δ_1 + {f''(t) \over 2} h (1 + \delta_1) + {δ^f_{x+h}- δ^f_x \over h} (1 + δ_1)}_{δ_{x,h}^{\rm FD}}
# $$
# The bound then follows, using the very pessimistic bound $|1 + δ_1| \leq 2$.
#
# ∎
#
# The three-terms of this bound tell us a story: the first term is a fixed (small) error, the second term tends to zero
# as $h \rightarrow 0$, while the last term grows like $ϵ_{\rm m}/h$ as $h \rightarrow 0$. Thus we observe convergence
# while the second term dominates, until the last term takes over.
# Of course, a bad upper bound is not the same as a proof that something grows, but it is a good indication of
# what happens _in general_ and suffices to motivate the following heuristic to balance the two sources of errors:
#
#
# **Heuristic (finite-difference with floating-point step)** Choose $h$ proportional to $\sqrt{ϵ_{\rm m}}$
# in finite-differences.
#
# In the case of double precision $\sqrt{ϵ_{\rm m}} ≈ 1.5\times 10^{-8}$, which is close to when the observed error begins to increase
# in our examples.
#
#
#
# **Remark** While finite differences is of debatable utility for computing derivatives, it is extremely effective
# in building methods for solving differential equations, as we shall see later. It is also very useful as a "sanity check"
# if one wants something to compare with for other numerical methods for differentiation.
#
# ## 2. Dual numbers (Forward-mode automatic differentiation)
#
# Automatic differentiation consists of applying functions to special types that determine the derivatives.
# Here we do so via _dual numbers_.
#
# **Definition (Dual numbers)** Dual numbers ${\mathbb D}$ are a commutative ring over the reals
# generated by $1$ and $ϵ$ such that $ϵ^2 = 0$.
# Dual numbers are typically written as $a + b ϵ$ where $a$ and $b$ are real.
#
# This is very much analoguous to complex numbers, which are a field generated by $1$ and ${\rm i}$ such that
# ${\rm i}^2 = -1$. Compare multiplication of each number type:
# $$
# \begin{align*}
# (a + b {\rm i}) (c + d {\rm i}) &= ac + (bc + ad) {\rm i} + bd {\rm i}^2 = ac -bd + (bc + ad) {\rm i} \\
# (a + b ϵ) (c + d ϵ) &= ac + (bc + ad) ϵ + bd ϵ^2 = ac + (bc + ad) ϵ
# \end{align*}
# $$
# And just as we view ${\mathbb R} \subset {\mathbb C}$ by equating $a \in {\mathbb R}$ with $a + 0{\rm i} \in {\mathbb C}$,
# we can view ${\mathbb R} \subset {\mathbb D}$ by equating $a \in {\mathbb R}$ with $a + 0{\rm ϵ} \in {\mathbb D}$.
#
#
#
#
# ### Connection with differentiation
#
# Applying a polynomial to a dual number $a + b ϵ$ tells us the derivative at $a$:
#
# **Theorem (polynomials on dual numbers)** Suppose $p$ is a polynomial. Then
# $$
# p(a + b ϵ) = p(a) + b p'(a) ϵ
# $$
#
# **Proof**
#
# It suffices to consider $p(x) = x^n$ for $n \geq 1$ as other polynomials follow from linearity. We proceed by induction:
# The case $n = 1$ is trivial. For $n > 1$ we have
# $$
# (a + b ϵ)^n = (a + b ϵ) (a + b ϵ)^{n-1} = (a + b ϵ) (a^{n-1} + (n-1) b a^{n-2} ϵ) = a^n + b n a^{n-1} ϵ.
# $$
#
# ∎
#
# We can extend real-valued differentiable functions to dual numbers in a similar manner.
# First, consider a standard function with a Taylor series (e.g. ${\rm cos}$, ${\rm sin}$, ${\rm exp}$, etc.)
# $$
# f(x) = \sum_{k=0}^∞ f_k x^k
# $$
# so that $a$ is inside the radius of convergence. This leads naturally to a definition on dual numbers:
# $$
# \begin{align*}
# f(a + b ϵ) &= \sum_{k=0}^∞ f_k (a + b ϵ)^k = \sum_{k=0}^∞ f_k (a^k + k a^{k-1} b ϵ) = \sum_{k=0}^∞ f_k a^k + \sum_{k=0}^∞ f_k k a^{k-1} b ϵ \\
# &= f(a) + b f'(a) ϵ
# \end{align*}
# $$
# More generally, given a differentiable function we can extend it to dual numbers:
#
# **Definition (dual extension)** Suppose a real-valued function $f$
# is differentiable at $a$. If
# $$
# f(a + b ϵ) = f(a) + b f'(a) ϵ
# $$
# then we say that it is a _dual extension at_ $a$.
#
# Thus, for basic functions we have natural extensions:
# $$
# \begin{align*}
# \exp(a + b ϵ) &:= \exp(a) + b \exp(a) ϵ \\
# \sin(a + b ϵ) &:= \sin(a) + b \cos(a) ϵ \\
# \cos(a + b ϵ) &:= \cos(a) - b \sin(a) ϵ \\
# \log(a + b ϵ) &:= \log(a) + {b \over a} ϵ \\
# \sqrt{a+b ϵ} &:= \sqrt{a} + {b \over 2 \sqrt{a}} ϵ \\
# |a + b ϵ| &:= |a| + b\, {\rm sign} a\, ϵ
# \end{align*}
# $$
# provided the function is differentiable at $a$. Note the last example does not have
# a convergent Taylor series (at 0) but we can still extend it where it is differentiable.
#
# Going further, we can add, multiply, and compose such functions:
#
# **Lemma (product and chain rule)**
# If $f$ is a dual extension at $g(a)$ and $g$
# is a dual extension at $a$, then $q(x) := f(g(x))$ is a dual extension at $a$.
# If $f$ and $g$ are dual extensions at $a$ then
# $r(x) := f(x) g(x)$ is also dual extensions at $a$. In other words:
# $$
# \begin{align*}
# q(a+b ϵ) &= q(a) + b q'(a) ϵ \\
# r(a+b ϵ) &= r(a) + b r'(a) ϵ
# \end{align*}
# $$
#
# **Proof**
# For $q$ it follows immediately:
# $$
# q(a + b ϵ) = f(g(a + b ϵ)) = f(g(a) + b g'(a) ϵ) = f(g(a)) + b g'(a) f'(g(a))ϵ = q(a) + b q'(a) ϵ.
# $$
# For $r$ we have
# $$
# r(a + b ϵ) = f(a+b ϵ )g(a+b ϵ )= (f(a) + b f'(a) ϵ)(g(a) + b g'(a) ϵ) = f(a)g(a) + b (f'(a)g(a) + f(a)g'(a)) ϵ = r(a) +b r'(a) ϵ.
# $$
#
# ∎
#
# A simple corollary is that any function defined in terms of addition, multiplication, composition, etc.
# of functions that are dual with differentiation will be differentiable via dual numbers.
#
# **Example (differentiating non-polynomial)**
#
# Consider $f(x) = \exp(x^2 + {\rm e}^{x})$ by evaluating on the duals:
# $$
# f(1 + ϵ) = \exp(1 + 2ϵ + {\rm e} + {\rm e} ϵ) = \exp(1 + {\rm e}) + \exp(1 + {\rm e}) (2 + {\rm e}) ϵ
# $$
# and therefore we deduce that
# $$
# f'(1) = \exp(1 + {\rm e}) (2 + {\rm e}).
# $$
#
#
# ### Implementation as a special type
#
#
# We now consider a simple implementation of dual numbers that works on general polynomials:
# +
# Dual(a,b) represents a + b*ϵ
struct Dual{T}
a::T
b::T
end
# Dual(a) represents a + 0*ϵ
Dual(a::Real) = Dual(a, zero(a)) # for real numbers we use a + 0ϵ
# Allow for a + b*ϵ syntax
const ϵ = Dual(0, 1)
import Base: +, *, -, /, ^, zero, exp
# support polynomials like 1 + x, x - 1, 2x or x*2 by reducing to Dual
+(x::Real, y::Dual) = Dual(x) + y
+(x::Dual, y::Real) = x + Dual(y)
-(x::Real, y::Dual) = Dual(x) - y
-(x::Dual, y::Real) = x - Dual(y)
*(x::Real, y::Dual) = Dual(x) * y
*(x::Dual, y::Real) = x * Dual(y)
# support x/2 (but not yet division of duals)
/(x::Dual, k::Real) = Dual(x.a/k, x.b/k)
# a simple recursive function to support x^2, x^3, etc.
function ^(x::Dual, k::Integer)
if k < 0
error("Not implemented")
elseif k == 1
x
else
x^(k-1) * x
end
end
# Algebraic operationds for duals
-(x::Dual) = Dual(-x.a, -x.b)
+(x::Dual, y::Dual) = Dual(x.a + y.a, x.b + y.b)
-(x::Dual, y::Dual) = Dual(x.a - y.a, x.b - y.b)
*(x::Dual, y::Dual) = Dual(x.a*y.a, x.a*y.b + x.b*y.a)
exp(x::Dual) = Dual(exp(x.a), exp(x.a) * x.b)
# -
# We can also try it on the two polynomials as above:
f = x -> 1 + x + x^2
g = x -> 1 + x/3 + x^2
f(ϵ).b, g(ϵ).b
# The first example exactly computes the derivative, and the
# second example is exact up to the last bit rounding!
# It also works for higher order polynomials:
f = x -> 1 + 1.3x + 2.1x^2 + 3.1x^3
f(0.5 + ϵ).b - 5.725
# It is indeed "accurate to (roughly) 16-digits", the best we can hope for
# using floating point.
#
# We can use this in "algorithms" as well as simple polynomials.
# Consider the polynomial $1 + … + x^n$:
function s(n, x)
ret = 1 + x # first two terms
for k = 2:n
ret += x^k
end
ret
end
s(10, 0.1 + ϵ).b
# This matches exactly the "true" (up to rounding) derivative:
sum((1:10) .* 0.1 .^(0:9))
# Finally, we can try the more complicated example:
f = x -> exp(x^2 + exp(x))
f(1 + ϵ)
# What makes dual numbers so effective is that, unlike finite differences, they are not
# prone to disasterous growth due to round-off errors.
| notebooks/Differentiation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + block_hidden=true
# %load_ext rpy2.ipython
# %matplotlib inline
from fbprophet import Prophet
import pandas as pd
from matplotlib import pyplot as plt
import logging
logging.getLogger('fbprophet').setLevel(logging.ERROR)
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv('../examples/example_wp_log_peyton_manning.csv')
m = Prophet()
m.fit(df)
future = m.make_future_dataframe(periods=366)
# + block_hidden=true language="R"
# library(prophet)
# df <- read.csv('../examples/example_wp_log_peyton_manning.csv')
# m <- prophet(df)
# future <- make_future_dataframe(m, periods=366)
# -
# ### Modeling Holidays and Special Events
# If you have holidays or other recurring events that you'd like to model, you must create a dataframe for them. It has two columns (`holiday` and `ds`) and a row for each occurrence of the holiday. It must include all occurrences of the holiday, both in the past (back as far as the historical data go) and in the future (out as far as the forecast is being made). If they won't repeat in the future, Prophet will model them and then not include them in the forecast.
#
# You can also include columns `lower_window` and `upper_window` which extend the holiday out to `[lower_window, upper_window]` days around the date. For instance, if you wanted to include Christmas Eve in addition to Christmas you'd include `lower_window=-1,upper_window=0`. If you wanted to use Black Friday in addition to Thanksgiving, you'd include `lower_window=0,upper_window=1`. You can also include a column `prior_scale` to set the prior scale separately for each holiday, as described below.
#
# Here we create a dataframe that includes the dates of all of <NAME>'s playoff appearances:
# + language="R"
# library(dplyr)
# playoffs <- data_frame(
# holiday = 'playoff',
# ds = as.Date(c('2008-01-13', '2009-01-03', '2010-01-16',
# '2010-01-24', '2010-02-07', '2011-01-08',
# '2013-01-12', '2014-01-12', '2014-01-19',
# '2014-02-02', '2015-01-11', '2016-01-17',
# '2016-01-24', '2016-02-07')),
# lower_window = 0,
# upper_window = 1
# )
# superbowls <- data_frame(
# holiday = 'superbowl',
# ds = as.Date(c('2010-02-07', '2014-02-02', '2016-02-07')),
# lower_window = 0,
# upper_window = 1
# )
# holidays <- bind_rows(playoffs, superbowls)
# -
playoffs = pd.DataFrame({
'holiday': 'playoff',
'ds': pd.to_datetime(['2008-01-13', '2009-01-03', '2010-01-16',
'2010-01-24', '2010-02-07', '2011-01-08',
'2013-01-12', '2014-01-12', '2014-01-19',
'2014-02-02', '2015-01-11', '2016-01-17',
'2016-01-24', '2016-02-07']),
'lower_window': 0,
'upper_window': 1,
})
superbowls = pd.DataFrame({
'holiday': 'superbowl',
'ds': pd.to_datetime(['2010-02-07', '2014-02-02', '2016-02-07']),
'lower_window': 0,
'upper_window': 1,
})
holidays = pd.concat((playoffs, superbowls))
# Above we have included the superbowl days as both playoff games and superbowl games. This means that the superbowl effect will be an additional additive bonus on top of the playoff effect.
#
# Once the table is created, holiday effects are included in the forecast by passing them in with the `holidays` argument. Here we do it with the Peyton Manning data from the Quickstart:
# + output_hidden=true language="R"
# m <- prophet(df, holidays = holidays)
# forecast <- predict(m, future)
# -
m = Prophet(holidays=holidays)
forecast = m.fit(df).predict(future)
# The holiday effect can be seen in the `forecast` dataframe:
# + output_hidden=true language="R"
# forecast %>%
# select(ds, playoff, superbowl) %>%
# filter(abs(playoff + superbowl) > 0) %>%
# tail(10)
# -
forecast[(forecast['playoff'] + forecast['superbowl']).abs() > 0][
['ds', 'playoff', 'superbowl']][-10:]
# The holiday effects will also show up in the components plot, where we see that there is a spike on the days around playoff appearances, with an especially large spike for the superbowl:
# + output_hidden=true magic_args="-w 9 -h 12 -u in" language="R"
# prophet_plot_components(m, forecast)
# -
fig = m.plot_components(forecast)
# Individual holidays can be plotted using the `plot_forecast_component` function (imported from `fbprophet.plot` in Python) like `plot_forecast_component(m, forecast, 'superbowl')` to plot just the superbowl holiday component.
# ### Built-in Country Holidays
#
# You can use a built-in collection of country-specific holidays using the `add_country_holidays` method (Python) or function (R). The name of the country is specified, and then major holidays for that country will be included in addition to any holidays that are specified via the `holidays` argument described above:
# + output_hidden=true language="R"
# m <- prophet(holidays = holidays)
# m <- add_country_holidays(m, country_name = 'US')
# m <- fit.prophet(m, df)
# + output_hidden=true
m = Prophet(holidays=holidays)
m.add_country_holidays(country_name='US')
m.fit(df)
# -
# You can see which holidays were included by looking at the `train_holiday_names` (Python) or `train.holiday.names` (R) attribute of the model:
# + language="R"
# m$train.holiday.names
# -
m.train_holiday_names
# The holidays for each country are provided by the `holidays` package in Python. A list of available countries, and the country name to use, is available on their page: https://github.com/dr-prodigy/python-holidays. In addition to those countries, Prophet includes holidays for these countries: Brazil (BR), Indonesia (ID), India (IN), Malaysia (MY), Vietnam (VN), Thailand (TH), Philippines (PH), Turkey (TU), Pakistan (PK), Bangladesh (BD), Egypt (EG), China (CN), and Russian (RU).
#
# In Python, most holidays are computed deterministically and so are available for any date range; a warning will be raised if dates fall outside the range supported by that country. In R, holiday dates are computed for 1995 through 2044 and stored in the package as `data-raw/generated_holidays.csv`. If a wider date range is needed, this script can be used to replace that file with a different date range: https://github.com/facebook/prophet/blob/master/python/scripts/generate_holidays_file.py.
#
# As above, the country-level holidays will then show up in the components plot:
# + magic_args="-w 9 -h 12 -u in" language="R"
# forecast <- predict(m, future)
# prophet_plot_components(m, forecast)
# -
forecast = m.predict(future)
fig = m.plot_components(forecast)
# ### Fourier Order for Seasonalities
#
# Seasonalities are estimated using a partial Fourier sum. See [the paper](https://peerj.com/preprints/3190/) for complete details, and [this figure on Wikipedia](https://en.wikipedia.org/wiki/Fourier_series#/media/File:Fourier_Series.svg) for an illustration of how a partial Fourier sum can approximate an aribtrary periodic signal. The number of terms in the partial sum (the order) is a parameter that determines how quickly the seasonality can change. To illustrate this, consider the Peyton Manning data from the Quickstart. The default Fourier order for yearly seasonality is 10, which produces this fit:
# + output_hidden=true magic_args="-w 9 -h 3 -u in" language="R"
# m <- prophet(df)
# prophet:::plot_yearly(m)
# -
from fbprophet.plot import plot_yearly
m = Prophet().fit(df)
a = plot_yearly(m)
# The default values are often appropriate, but they can be increased when the seasonality needs to fit higher-frequency changes, and generally be less smooth. The Fourier order can be specified for each built-in seasonality when instantiating the model, here it is increased to 20:
# + output_hidden=true magic_args="-w 9 -h 3 -u in" language="R"
# m <- prophet(df, yearly.seasonality = 20)
# prophet:::plot_yearly(m)
# -
from fbprophet.plot import plot_yearly
m = Prophet(yearly_seasonality=20).fit(df)
a = plot_yearly(m)
# Increasing the number of Fourier terms allows the seasonality to fit faster changing cycles, but can also lead to overfitting: N Fourier terms corresponds to 2N variables used for modeling the cycle
#
# ### Specifying Custom Seasonalities
#
# Prophet will by default fit weekly and yearly seasonalities, if the time series is more than two cycles long. It will also fit daily seasonality for a sub-daily time series. You can add other seasonalities (monthly, quarterly, hourly) using the `add_seasonality` method (Python) or function (R).
#
# The inputs to this function are a name, the period of the seasonality in days, and the Fourier order for the seasonality. For reference, by default Prophet uses a Fourier order of 3 for weekly seasonality and 10 for yearly seasonality. An optional input to `add_seasonality` is the prior scale for that seasonal component - this is discussed below.
#
# As an example, here we fit the Peyton Manning data from the Quickstart, but replace the weekly seasonality with monthly seasonality. The monthly seasonality then will appear in the components plot:
# + output_hidden=true magic_args="-w 9 -h 9 -u in" language="R"
# m <- prophet(weekly.seasonality=FALSE)
# m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
# m <- fit.prophet(m, df)
# forecast <- predict(m, future)
# prophet_plot_components(m, forecast)
# -
m = Prophet(weekly_seasonality=False)
m.add_seasonality(name='monthly', period=30.5, fourier_order=5)
forecast = m.fit(df).predict(future)
fig = m.plot_components(forecast)
# ### Seasonalities that depend on other factors
# In some instances the seasonality may depend on other factors, such as a weekly seasonal pattern that is different during the summer than it is during the rest of the year, or a daily seasonal pattern that is different on weekends vs. on weekdays. These types of seasonalities can be modeled using conditional seasonalities.
#
# Consider the <NAME> example from the Quickstart. The default weekly seasonality assumes that the pattern of weekly seasonality is the same throughout the year, but we'd expect the pattern of weekly seasonality to be different during the on-season (when there are games every Sunday) and the off-season. We can use conditional seasonalities to construct separate on-season and off-season weekly seasonalities.
#
# First we add a boolean column to the dataframe that indicates whether each date is during the on-season or the off-season:
# + language="R"
# is_nfl_season <- function(ds) {
# dates <- as.Date(ds)
# month <- as.numeric(format(dates, '%m'))
# return(month > 8 | month < 2)
# }
# df$on_season <- is_nfl_season(df$ds)
# df$off_season <- !is_nfl_season(df$ds)
# +
def is_nfl_season(ds):
date = pd.to_datetime(ds)
return (date.month > 8 or date.month < 2)
df['on_season'] = df['ds'].apply(is_nfl_season)
df['off_season'] = ~df['ds'].apply(is_nfl_season)
# -
# Then we disable the built-in weekly seasonality, and replace it with two weekly seasonalities that have these columns specified as a condition. This means that the seasonality will only be applied to dates where the `condition_name` column is `True`. We must also add the column to the `future` dataframe for which we are making predictions.
# + magic_args="-w 9 -h 12 -u in" language="R"
# m <- prophet(weekly.seasonality=FALSE)
# m <- add_seasonality(m, name='weekly_on_season', period=7, fourier.order=3, condition.name='on_season')
# m <- add_seasonality(m, name='weekly_off_season', period=7, fourier.order=3, condition.name='off_season')
# m <- fit.prophet(m, df)
#
# future$on_season <- is_nfl_season(future$ds)
# future$off_season <- !is_nfl_season(future$ds)
# forecast <- predict(m, future)
# prophet_plot_components(m, forecast)
# +
m = Prophet(weekly_seasonality=False)
m.add_seasonality(name='weekly_on_season', period=7, fourier_order=3, condition_name='on_season')
m.add_seasonality(name='weekly_off_season', period=7, fourier_order=3, condition_name='off_season')
future['on_season'] = future['ds'].apply(is_nfl_season)
future['off_season'] = ~future['ds'].apply(is_nfl_season)
forecast = m.fit(df).predict(future)
fig = m.plot_components(forecast)
# -
# Both of the seasonalities now show up in the components plots above. We can see that during the on-season when games are played every Sunday, there are large increases on Sunday and Monday that are completely absent during the off-season.
# ### Prior scale for holidays and seasonality
# If you find that the holidays are overfitting, you can adjust their prior scale to smooth them using the parameter `holidays_prior_scale`. By default this parameter is 10, which provides very little regularization. Reducing this parameter dampens holiday effects:
# + output_hidden=true language="R"
# m <- prophet(df, holidays = holidays, holidays.prior.scale = 0.05)
# forecast <- predict(m, future)
# forecast %>%
# select(ds, playoff, superbowl) %>%
# filter(abs(playoff + superbowl) > 0) %>%
# tail(10)
# -
m = Prophet(holidays=holidays, holidays_prior_scale=0.05).fit(df)
forecast = m.predict(future)
forecast[(forecast['playoff'] + forecast['superbowl']).abs() > 0][
['ds', 'playoff', 'superbowl']][-10:]
# The magnitude of the holiday effect has been reduced compared to before, especially for superbowls, which had the fewest observations. There is a parameter `seasonality_prior_scale` which similarly adjusts the extent to which the seasonality model will fit the data.
#
# Prior scales can be set separately for individual holidays by including a column `prior_scale` in the holidays dataframe. Prior scales for individual seasonalities can be passed as an argument to `add_seasonality`. For instance, the prior scale for just weekly seasonality can be set using:
# + language="R"
# m <- prophet()
# m <- add_seasonality(
# m, name='weekly', period=7, fourier.order=3, prior.scale=0.1)
# + output_hidden=true
m = Prophet()
m.add_seasonality(
name='weekly', period=7, fourier_order=3, prior_scale=0.1)
# -
#
# ### Additional regressors
# Additional regressors can be added to the linear part of the model using the `add_regressor` method or function. A column with the regressor value will need to be present in both the fitting and prediction dataframes. For example, we can add an additional effect on Sundays during the NFL season. On the components plot, this effect will show up in the 'extra_regressors' plot:
# + output_hidden=true magic_args="-w 9 -h 12 -u in" language="R"
# nfl_sunday <- function(ds) {
# dates <- as.Date(ds)
# month <- as.numeric(format(dates, '%m'))
# as.numeric((weekdays(dates) == "Sunday") & (month > 8 | month < 2))
# }
# df$nfl_sunday <- nfl_sunday(df$ds)
#
# m <- prophet()
# m <- add_regressor(m, 'nfl_sunday')
# m <- fit.prophet(m, df)
#
# future$nfl_sunday <- nfl_sunday(future$ds)
#
# forecast <- predict(m, future)
# prophet_plot_components(m, forecast)
# +
def nfl_sunday(ds):
date = pd.to_datetime(ds)
if date.weekday() == 6 and (date.month > 8 or date.month < 2):
return 1
else:
return 0
df['nfl_sunday'] = df['ds'].apply(nfl_sunday)
m = Prophet()
m.add_regressor('nfl_sunday')
m.fit(df)
future['nfl_sunday'] = future['ds'].apply(nfl_sunday)
forecast = m.predict(future)
fig = m.plot_components(forecast)
# -
# NFL Sundays could also have been handled using the "holidays" interface described above, by creating a list of past and future NFL Sundays. The `add_regressor` function provides a more general interface for defining extra linear regressors, and in particular does not require that the regressor be a binary indicator. Another time series could be used as a regressor, although its future values would have to be known.
#
# [This notebook](https://nbviewer.jupyter.org/github/nicolasfauchereau/Auckland_Cycling/blob/master/notebooks/Auckland_cycling_and_weather.ipynb) shows an example of using weather factors as extra regressors in a forecast of bicycle usage, and provides an excellent illustration of how other time series can be included as extra regressors.
#
# The `add_regressor` function has optional arguments for specifying the prior scale (holiday prior scale is used by default) and whether or not the regressor is standardized - see the docstring with `help(Prophet.add_regressor)` in Python and `?add_regressor` in R. Note that regressors must be added prior to model fitting.
#
# The extra regressor must be known for both the history and for future dates. It thus must either be something that has known future values (such as `nfl_sunday`), or something that has separately been forecasted elsewhere. Prophet will also raise an error if the regressor is constant throughout the history, since there is nothing to fit from it.
#
# Extra regressors are put in the linear component of the model, so the underlying model is that the time series depends on the extra regressor as either an additive or multiplicative factor (see the next section for multiplicativity).
| notebooks/seasonality,_holiday_effects,_and_regressors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# -
df = pd.read_csv('data/train.csv')
df.shape
df.head()
new_df = df.sample(30000,random_state=2)
new_df.isnull().sum()
new_df.head()
new_df.isnull().sum()
new_df.duplicated().sum()
# +
# Distribution of duplicate and non-duplicate questions
print(new_df['is_duplicate'].value_counts())
print((new_df['is_duplicate'].value_counts()/new_df['is_duplicate'].count())*100)
new_df['is_duplicate'].value_counts().plot(kind='bar')
# +
# Repeated questions
qid = pd.Series(new_df['qid1'].tolist() + new_df['qid2'].tolist())
print('Number of unique questions',np.unique(qid).shape[0])
x = qid.value_counts()>1
print('Number of questions getting repeated',x[x].shape[0])
# +
# Repeated questions histogram
plt.hist(qid.value_counts().values,bins=160)
plt.yscale('log')
plt.show()
# +
# Feature Engineering
new_df['q1_len'] = new_df['question1'].str.len()
new_df['q2_len'] = new_df['question2'].str.len()
# -
new_df.head()
new_df['q1_num_words'] = new_df['question1'].apply(lambda row: len(row.split(" ")))
new_df['q2_num_words'] = new_df['question2'].apply(lambda row: len(row.split(" ")))
new_df.head()
def common_words(row):
w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" ")))
w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" ")))
return len(w1 & w2)
new_df['word_common'] = new_df.apply(common_words, axis=1)
new_df.head()
def total_words(row):
w1 = set(map(lambda word: word.lower().strip(), row['question1'].split(" ")))
w2 = set(map(lambda word: word.lower().strip(), row['question2'].split(" ")))
return (len(w1) + len(w2))
new_df['word_total'] = new_df.apply(total_words, axis=1)
new_df.head()
new_df['word_share'] = round(new_df['word_common']/new_df['word_total'],2)
new_df.head()
# Analysis of features
sns.displot(new_df['q1_len'])
print('minimum characters',new_df['q1_len'].min())
print('maximum characters',new_df['q1_len'].max())
print('average num of characters',int(new_df['q1_len'].mean()))
sns.displot(new_df['q2_len'])
print('minimum characters',new_df['q2_len'].min())
print('maximum characters',new_df['q2_len'].max())
print('average num of characters',int(new_df['q2_len'].mean()))
sns.displot(new_df['q1_num_words'])
print('minimum words',new_df['q1_num_words'].min())
print('maximum words',new_df['q1_num_words'].max())
print('average num of words',int(new_df['q1_num_words'].mean()))
sns.displot(new_df['q2_num_words'])
print('minimum words',new_df['q2_num_words'].min())
print('maximum words',new_df['q2_num_words'].max())
print('average num of words',int(new_df['q2_num_words'].mean()))
# common words
sns.distplot(new_df[new_df['is_duplicate'] == 0]['word_common'],label='non duplicate')
sns.distplot(new_df[new_df['is_duplicate'] == 1]['word_common'],label='duplicate')
plt.legend()
plt.show()
# total words
sns.distplot(new_df[new_df['is_duplicate'] == 0]['word_total'],label='non duplicate')
sns.distplot(new_df[new_df['is_duplicate'] == 1]['word_total'],label='duplicate')
plt.legend()
plt.show()
# word share
sns.distplot(new_df[new_df['is_duplicate'] == 0]['word_share'],label='non duplicate')
sns.distplot(new_df[new_df['is_duplicate'] == 1]['word_share'],label='duplicate')
plt.legend()
plt.show()
ques_df = new_df[['question1','question2']]
ques_df.head()
final_df = new_df.drop(columns=['id','qid1','qid2','question1','question2'])
print(final_df.shape)
final_df.head()
# +
from sklearn.feature_extraction.text import CountVectorizer
# merge texts
questions = list(ques_df['question1']) + list(ques_df['question2'])
cv = CountVectorizer(max_features=3000)
q1_arr, q2_arr = np.vsplit(cv.fit_transform(questions).toarray(),2)
# -
temp_df1 = pd.DataFrame(q1_arr, index= ques_df.index)
temp_df2 = pd.DataFrame(q2_arr, index= ques_df.index)
temp_df = pd.concat([temp_df1, temp_df2], axis=1)
temp_df.shape
final_df = pd.concat([final_df, temp_df], axis=1)
print(final_df.shape)
final_df.head()
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(final_df.iloc[:,1:].values,final_df.iloc[:,0].values,test_size=0.2,random_state=1)
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
rf = RandomForestClassifier()
rf.fit(X_train,y_train)
y_pred = rf.predict(X_test)
accuracy_score(y_test,y_pred)
from xgboost import XGBClassifier
xgb = XGBClassifier()
xgb.fit(X_train,y_train)
y_pred = xgb.predict(X_test)
accuracy_score(y_test,y_pred)
# ## Advanced Features
#
# ### 1. Token Features
# - **cwc_min**: This is the ratio of the number of common words to the length of the smaller question
# - **cwc_max**: This is the ratio of the number of common words to the length of the larger question
# - **csc_min**: This is the ratio of the number of common stop words to the smaller stop word count among the two questions
# - **csc_max**: This is the ratio of the number of common stop words to the larger stop word count among the two questions
# - **ctc_min**: This is the ratio of the number of common tokens to the smaller token count among the two questions
# - **ctc_max**: This is the ratio of the number of common tokens to the larger token count among the two questions
# - **last_word_eq**: 1 if the last word in the two questions is same, 0 otherwise
# - **first_word_eq**: 1 if the first word in the two questions is same, 0 otherwise
#
# ### 2. Length Based Features
# - **mean_len**: Mean of the length of the two questions (number of words)
# - **abs_len_diff**: Absolute difference between the length of the two questions (number of words)
# - **longest_substr_ratio**: Ratio of the length of the longest substring among the two questions to the length of the smaller question
#
# ### 3. Fuzzy Features
# - **fuzz_ratio**: fuzz_ratio score from fuzzywuzzy
# - **fuzz_partial_ratio**: fuzz_partial_ratio from fuzzywuzzy
# - **token_sort_ratio**: token_sort_ratio from fuzzywuzzy
# - **token_set_ratio**: token_set_ratio from fuzzywuzzy
| Data Analysis/bow-with-basic-features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers.core import Dense, Activation
import warnings
warnings.filterwarnings('ignore')
# +
X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y = np.array([[0], [0], [0], [1]], dtype=np.float32)
y = np_utils.to_categorical(y)
model = Sequential()
model.add(Dense(32, input_dim=X.shape[1]))
model.add(Activation('softmax'))
model.add(Dense(2))
model.add(Activation('sigmoid'))
y
# -
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
model.fit(X, y, nb_epoch=1000, verbose=2)
| keras-study/dive-into-keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Update location fields
#
import pymongo
import pandas as pd
import numpy as np
import random
import datetime
def db_connection(collection_name):
# connect to mLab DB
try:
with open("../credentials/mlab_credentials.txt", 'r', encoding='utf-8') as f:
[name,password,url,dbname]=f.read().splitlines()
db_conn = pymongo.MongoClient("mongodb://{}:{}@{}/{}".format(name,password,url,dbname))
print ("DB connected successfully!!!")
except pymongo.errors.ConnectionFailure as e:
print ("Could not connect to DB: %s" % e)
db = db_conn[dbname]
collection = db[collection_name]
return collection
db_tweets = db_connection("tweets")
db_tweets_bckp = db_connection("tweets_bckp_181205")
print("Tweets in DB:", db_tweets.count())
print("Tweets in Backup DB:", db_tweets_bckp.count())
# uncomment to delete
result = db_tweets_bckp.delete_many({})
print(result.deleted_count, " documents deleted")
print(db_tweets_bckp.count())
tweets_list = [tweet for tweet in db_tweets.find()[:100]]
len(tweets_list)
db_tweets_bckp.insert_many(tweets_list)
print(db_tweets_bckp.count())
for tweet in tweets_list:
# Format the datetime field
date_str = tweet['created_at']
date_obj = datetime.datetime.strptime(date_str, '%a %b %d %H:%M:%S %z %Y')
tweet['datetime'] = datetime.datetime.combine(date_obj.date(), date_obj.time())
if tweet["coordinates"] is not None and tweet["coordinates"]["type"] == "Point":
tweet['lat'] = tweet["coordinates"]["coordinates"][1]
tweet['lon'] = tweet["coordinates"]["coordinates"][0]
else:
tweet['lat'] = None
tweet['lon'] = None
db_tweets.replace_one({"id":tweet["id"]}, tweet, upsert = True)
#print(tweet)
location_query = {
"lat": {
#"$ne": None
"$exists": True
}
}
datetime_query = {
"datetime": {
"$exists": True
}
}
for tweet in db_tweets.find(location_query)[:10]:
print(tweet["coordinates"],tweet['lat'],tweet['lon'])
print("Tweets in DB:", db_tweets.count())
print("Tweets in DB (location):", db_tweets.count(location_query))
print("Tweets in DB (datetime):", db_tweets.count(datetime_query))
tweets_list[14]
hashtag_query = {
"entities": {
"hastags": {
"$elemMatch": {
"text": {
"$eq": "MMA"
}
}
}
}
}
for tweet in db_tweets.find(hashtag_query):
print(tweet)
| data/data_update.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
from fastcgi import *
# # fastcgi
#
# > FastCGI and HTTP handlers for Python's `socketserver` classes
# [FastCGI](http://www.mit.edu/~yandros/doc/specs/fcgi-spec.html) is a way for front-end servers to talk to back-end workers in a (somewhat) efficient and (somewhat) simple way. Although it's been around since 1996, it is not very widely appreciated, except in the PHP community, where it is very commonly used.
#
# It can be a great approach for hosting Python scripts, avoiding the overhead of creating a new Python process for every request (as standard CGI would otherwise require) and without requiring large dependencies, complex C projects, or fiddly deployments. `fastcgi` has no dependencies other than [fastcore](https://fastcore.fast.ai/).
#
# There's no new frameworks or concepts to learn. Just call `send` to send anything you like back to the client, read the parameters from `params`, and the input from the client from `stdin`.
#
# `fastcgi` requires a front-end web server. If you don't already have one set up, we recommend [Caddy](https://caddyserver.com/). To forward all requests to `example.com` to a `fastcgi` server listening on port 1234 create a file called `Caddyfile` with the following contents, and then `caddy run`:
#
# example.com
# reverse_proxy localhost:1234 { transport fastcgi }
#
# This library also provides an HTTP handler that can be used in an identical way, except remove `{ transport fastcgi }` from the above `Caddyfile` example. Python's standard library already includes an HTTP handler (in `http.server`), however the documentation warns that that module should not be used in production code. The HTTP handler provided here is trimmed down to a minimal implementation (just 40 lines of code) so that it can easily be studied and extended. It uses the same basic API as Python's other `socketserver` classes (and the same as `FcgiHandler` here) so there's fewer new concepts to understand.
# ## Install
# `pip install fastcgi` or `conda install -c fastai fastcgi`
# ## How to use
# See the full docs pages for each class for details. Quick overviews of each approach are shown below.
# ### fastcgi decorator
# Using the `fastcgi` decorator you can use CGI scripts with minimal changes. Just add the decorator above a function used for CGI, and it converts that script automatically into a FastCGI server, e.g if you save this as `server.py`:
#
# ```python
# @fastcgi()
# def hello():
# query = os.environ["QUERY_STRING"]
# content = sys.stdin.read()
# sys.stdout.write(f"Content-type: text/html\r\n\r\n<html>{content} ; ")
# sys.stdout.write(f"{query}</html>\r\n")
# ```
#
# ...then if you run `python server.py` it will make a unix socket available as `fcgi.sock` in the current directory.
# ### FcgiHandler
# `FcgiHandler` is used in much the same way as Python's [BaseRequestHandler](https://docs.python.org/3/library/socketserver.html#request-handler-objects). Here's an example:
class TestHandler(FcgiHandler):
def handle(self):
print('query:', self.environ['QUERY_STRING'])
print('content type:', self.environ['HTTP_CONTENT_TYPE'])
print('stdin:', self['stdin'].read())
self['stdout'].write(b"Content-type: text/html\r\n\r\n<html>foobar</html>\r\n")
# You can run this using any of Python's `socketserver` classes, e.g to listen on localhost port 1234:
#
# ```python
# with TCPServer(('localhost',1234), TestHandler) as srv:
# srv.handle_request()
# ```
#
# See the API docs for `FcgiHandler` for an end-to-end example.
#
# You can also create a forking or threading server by using Python's [mixins or predefined classes](https://docs.python.org/3/library/socketserver.html#socketserver.ThreadingMixIn).
#
# In your `handle` method, you can use the `stdin`, `stdout`, and `stderr` attributes, which each contain a `BytesIO` stream.
# ### MinimalHTTPHandler
# `fastcgi` also comes with the `MinimalHTTPHandler` class, which provides very similar functionality to `FcgiHandler`, but using the `HTTP` protocol instead of the `FastCGI` protocol. Here's an example:
#
# ```python
# class _TestHandler(MinimalHTTPHandler):
# def handle(self):
# print(f'Command/path/version: {self.command} {self.path} {self.request_version}')
# print(self.headers)
# self.send_response(200)
# self.send_header("Content-Type", "text/plain")
# self.send_header('Content-Length', '2')
# self.end_headers()
# self.wfile.write(b'ok')
# ```
#
# You can run it with a `socketserver` server in the same way shown above for `FcgiHandler`.
| index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # Create Athena Database Schema
#
# Amazon Athena is an interactive query service that makes it easy to analyze data in Amazon S3 using standard SQL. Athena is serverless, so there is no infrastructure to manage, and you pay only for the queries that you run.
#
# Athena is based on Presto, and supports various standard data formats, including CSV, JSON, Avro or columnar data formats such as Apache Parquet and Apache ORC.
#
# Presto is an open source, distributed SQL query engine, developed for fast analytic queries against data of any size. It can query data where it is stored, without the need to move the data. Query execution runs in parallel over a pure memory-based architecture which makes Presto extremely fast.
#
# <img src="img/athena_setup.png" width="60%" align="left">
# +
import boto3
import sagemaker
sess = sagemaker.Session()
bucket = sess.default_bucket()
role = sagemaker.get_execution_role()
region = boto3.Session().region_name
# -
ingest_create_athena_db_passed = False
# %store -r s3_public_path_tsv
try:
s3_public_path_tsv
except NameError:
print("*****************************************************************************")
print("[ERROR] PLEASE RE-RUN THE PREVIOUS COPY TSV TO S3 NOTEBOOK ******************")
print("[ERROR] THIS NOTEBOOK WILL NOT RUN PROPERLY. ********************************")
print("*****************************************************************************")
print(s3_public_path_tsv)
# %store -r s3_private_path_tsv
try:
s3_private_path_tsv
except NameError:
print("*****************************************************************************")
print("[ERROR] PLEASE RE-RUN THE PREVIOUS COPY TSV TO S3 NOTEBOOK ******************")
print("[ERROR] THIS NOTEBOOK WILL NOT RUN PROPERLY. ********************************")
print("*****************************************************************************")
print(s3_private_path_tsv)
# # Import PyAthena
#
# [PyAthena](https://pypi.org/project/PyAthena/) is a Python DB API 2.0 (PEP 249) compliant client for Amazon Athena.
from pyathena import connect
# # Create Athena Database
database_name = "dsoaws"
# Note: The databases and tables that we create in Athena use a data catalog service to store the metadata of your data. For example, schema information consisting of the column names and data type of each column in a table, together with the table name, is saved as metadata information in a data catalog.
#
# Athena natively supports the AWS Glue Data Catalog service. When we run `CREATE DATABASE` and `CREATE TABLE` queries in Athena with the AWS Glue Data Catalog as our source, we automatically see the database and table metadata entries being created in the AWS Glue Data Catalog.
# Set S3 staging directory -- this is a temporary directory used for Athena queries
s3_staging_dir = "s3://{0}/athena/staging".format(bucket)
conn = connect(region_name=region, s3_staging_dir=s3_staging_dir)
statement = "CREATE DATABASE IF NOT EXISTS {}".format(database_name)
print(statement)
# +
import pandas as pd
pd.read_sql(statement, conn)
# -
# # Verify The Database Has Been Created Succesfully
# +
statement = "SHOW DATABASES"
df_show = pd.read_sql(statement, conn)
df_show.head(5)
# -
if database_name in df_show.values:
ingest_create_athena_db_passed = True
# %store ingest_create_athena_db_passed
# # Store Variables for the Next Notebooks
# %store
# # Release Resources
# + language="html"
#
# <p><b>Shutting down your kernel for this notebook to release resources.</b></p>
# <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button>
#
# <script>
# try {
# els = document.getElementsByClassName("sm-command-button");
# els[0].click();
# }
# catch(err) {
# // NoOp
# }
# </script>
| 00_quickstart/03_Create_Athena_Database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Large scale text analysis with deep learning (3 points)
#
# Today we're gonna apply the newly learned tools for the task of predicting job salary.
#
# <img src="https://kaggle2.blob.core.windows.net/competitions/kaggle/3342/media/salary%20prediction%20engine%20v2.png" width=400px>
#
# _Special thanks to [<NAME>](https://github.com/Omrigan/) for the core assignment idea._
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# ### About the challenge
# For starters, let's download and unpack the data from [here](https://www.dropbox.com/s/5msc5ix7ndyba10/Train_rev1.csv.tar.gz?dl=0).
#
# You can also get it from [yadisk url](https://yadi.sk/d/vVEOWPFY3NruT7) the competition [page](https://www.kaggle.com/c/job-salary-prediction/data) (pick `Train_rev1.*`).
# !curl -L https://www.dropbox.com/s/5msc5ix7ndyba10/Train_rev1.csv.tar.gz?dl=1 -o Train_rev1.csv.tar.gz
# !tar -xvzf ./Train_rev1.csv.tar.gz
data = pd.read_csv("./Train_rev1.csv", index_col=None)
data.shape
# One problem with salary prediction is that it's oddly distributed: there are many people who are paid standard salaries and a few that get tons o money. The distribution is fat-tailed on the right side, which is inconvenient for MSE minimization.
#
# There are several techniques to combat this: using a different loss function, predicting log-target instead of raw target or even replacing targets with their percentiles among all salaries in the training set. We gonna use logarithm for now.
#
# _You can read more [in the official description](https://www.kaggle.com/c/job-salary-prediction#description)._
# +
data['Log1pSalary'] = np.log1p(data['SalaryNormalized']).astype('float32')
plt.figure(figsize=[8, 4])
plt.subplot(1, 2, 1)
plt.hist(data["SalaryNormalized"], bins=20);
plt.subplot(1, 2, 2)
plt.hist(data['Log1pSalary'], bins=20);
# -
# Our task is to predict one number, __Log1pSalary__.
#
# To do so, our model can access a number of features:
# * Free text: __`Title`__ and __`FullDescription`__
# * Categorical: __`Category`__, __`Company`__, __`LocationNormalized`__, __`ContractType`__, and __`ContractTime`__.
# +
text_columns = ["Title", "FullDescription"]
categorical_columns = ["Category", "Company", "LocationNormalized", "ContractType", "ContractTime"]
target_column = "Log1pSalary"
data[categorical_columns] = data[categorical_columns].fillna('NaN') # cast missing values to string "NaN"
data.sample(3)
# -
# ### Preprocessing text data
#
# Just like last week, applying NLP to a problem begins from tokenization: splitting raw text into sequences of tokens (words, punctuation, etc).
#
# __Your task__ is to lowercase and tokenize all texts under `Title` and `FullDescription` columns. Store the tokenized data as a __space-separated__ string of tokens for performance reasons.
#
# It's okay to use nltk tokenizers. Assertions were designed for WordPunctTokenizer, slight deviations are okay.
print("Raw text:")
print(data["FullDescription"][2::100000])
# +
import nltk
tokenizer = nltk.tokenize.WordPunctTokenizer()
# see task above
<YOUR CODE HERE>
# -
# Now we can assume that our text is a space-separated list of tokens:
print("Tokenized:")
print(data["FullDescription"][2::100000])
assert data["FullDescription"][2][:50] == 'mathematical modeller / simulation analyst / opera'
assert data["Title"][54321] == 'international digital account manager ( german )'
# Not all words are equally useful. Some of them are typos or rare words that are only present a few times.
#
# Let's count how many times is each word present in the data so that we can build a "white list" of known words.
# +
# Count how many times does each token occur in both "Title" and "FullDescription" in total
# build a dictionary { token -> it's count }
token_counts = <YOUR CODE>
# hint: you may or may not want to use collections.Counter
# +
print("Total unique tokens :", len(token_counts))
print('\n'.join(map(str, token_counts.most_common(n=5))))
print('...')
print('\n'.join(map(str, token_counts.most_common()[-3:])))
assert token_counts.most_common(1)[0][1] in range(2600000, 2700000)
assert len(token_counts) in range(200000, 210000)
print('Correct!')
# -
# Let's see how many words are there for each count
plt.hist(list(token_counts.values()), range=[0, 10**4], bins=50, log=True)
plt.xlabel("Word counts");
# Now filter tokens a list of all tokens that occur at least 10 times.
# +
min_count = 10
# tokens from token_counts keys that had at least min_count occurrences throughout the dataset
tokens = <YOUR CODE HERE>
# +
# Add a special tokens for unknown and empty words
UNK, PAD = "UNK", "PAD"
tokens = [UNK, PAD] + sorted(tokens)
print("Vocabulary size:", len(tokens))
assert type(tokens) == list
assert len(tokens) in range(32000, 35000)
assert 'me' in tokens
assert UNK in tokens
print("Correct!")
# -
# Build an inverse token index: a dictionary from token(string) to it's index in `tokens` (int)
token_to_id = <your code here>
# +
assert isinstance(token_to_id, dict)
assert len(token_to_id) == len(tokens)
for tok in tokens:
assert tokens[token_to_id[tok]] == tok
print("Correct!")
# -
# And finally, let's use the vocabulary you've built to map text lines into neural network-digestible matrices.
# +
UNK_IX, PAD_IX = map(token_to_id.get, [UNK, PAD])
def as_matrix(sequences, max_len=None):
""" Convert a list of tokens into a matrix with padding """
if isinstance(sequences[0], str):
sequences = list(map(str.split, sequences))
max_len = min(max(map(len, sequences)), max_len or float('inf'))
matrix = np.full((len(sequences), max_len), np.int32(PAD_IX))
for i,seq in enumerate(sequences):
row_ix = [token_to_id.get(word, UNK_IX) for word in seq[:max_len]]
matrix[i, :len(row_ix)] = row_ix
return matrix
# -
print("Lines:")
print('\n'.join(data["Title"][::100000].values), end='\n\n')
print("Matrix:")
print(as_matrix(data["Title"][::100000]))
# Now let's encode the categirical data we have.
#
# As usual, we shall use one-hot encoding for simplicity. Kudos if you implement more advanced encodings: tf-idf, pseudo-time-series, etc.
# +
from sklearn.feature_extraction import DictVectorizer
# we only consider top-1k most frequent companies to minimize memory usage
top_companies, top_counts = zip(*Counter(data['Company']).most_common(1000))
recognized_companies = set(top_companies)
data["Company"] = data["Company"].apply(lambda comp: comp if comp in recognized_companies else "Other")
categorical_vectorizer = DictVectorizer(dtype=np.float32, sparse=False)
categorical_vectorizer.fit(data[categorical_columns].apply(dict, axis=1))
# -
# ### The deep learning part
#
# Once we've learned to tokenize the data, let's design a machine learning experiment.
#
# As before, we won't focus too much on validation, opting for a simple train-test split.
#
# __To be completely rigorous,__ we've comitted a small crime here: we used the whole data for tokenization and vocabulary building. A more strict way would be to do that part on training set only. You may want to do that and measure the magnitude of changes.
# +
from sklearn.model_selection import train_test_split
data_train, data_val = train_test_split(data, test_size=0.2, random_state=42)
data_train.index = range(len(data_train))
data_val.index = range(len(data_val))
print("Train size = ", len(data_train))
print("Validation size = ", len(data_val))
# +
def make_batch(data, max_len=None, word_dropout=0):
"""
Creates a keras-friendly dict from the batch data.
:param word_dropout: replaces token index with UNK_IX with this probability
:returns: a dict with {'title' : int64[batch, title_max_len]
"""
batch = {}
batch["Title"] = as_matrix(data["Title"].values, max_len)
batch["FullDescription"] = as_matrix(data["FullDescription"].values, max_len)
batch['Categorical'] = categorical_vectorizer.transform(data[categorical_columns].apply(dict, axis=1))
if word_dropout != 0:
batch["FullDescription"] = apply_word_dropout(batch["FullDescription"], 1. - word_dropout)
if target_column in data.columns:
batch[target_column] = data[target_column].values
return batch
def apply_word_dropout(matrix, keep_prop, replace_with=UNK_IX, pad_ix=PAD_IX,):
dropout_mask = np.random.choice(2, np.shape(matrix), p=[keep_prop, 1 - keep_prop])
dropout_mask &= matrix != pad_ix
return np.choose(dropout_mask, [matrix, np.full_like(matrix, replace_with)])
# -
make_batch(data_train[:3], max_len=10)
# #### Architecture
#
# Our basic model consists of three branches:
# * Title encoder
# * Description encoder
# * Categorical features encoder
#
# We will then feed all 3 branches into one common network that predicts salary.
#
# <img src="https://github.com/yandexdataschool/nlp_course/raw/master/resources/w2_conv_arch.png" width=600px>
# This clearly doesn't fit into keras' __Sequential__ interface. To build such a network, one will have to use __[Keras Functional API](https://keras.io/models/model/)__.
import keras
import keras.layers as L
def build_model(n_tokens=len(tokens), n_cat_features=len(categorical_vectorizer.vocabulary_), hid_size=64):
""" Build a model that maps three data sources to a single linear output: predicted log1p(salary) """
l_title = L.Input(shape=[None], name="Title")
l_descr = L.Input(shape=[None], name="FullDescription")
l_categ = L.Input(shape=[n_cat_features], name="Categorical")
# Build your monster!
# <YOUR CODE>
output_layer = <...>
# end of your code
model = keras.models.Model(inputs=[l_title, l_descr, l_categ], outputs=[output_layer])
model.compile('adam', 'mean_squared_error', metrics=['mean_absolute_error'])
return model
# +
model = build_model()
model.summary()
dummy_pred = model.predict(make_batch(data_train[:100]))
dummy_loss = model.train_on_batch(make_batch(data_train[:100]), data_train['Log1pSalary'][:100])[0]
assert dummy_pred.shape == (100, 1)
assert len(np.unique(dummy_pred)) > 20, "model returns suspiciously few unique outputs. Check your initialization"
assert np.ndim(dummy_loss) == 0 and 0. <= dummy_loss <= 250., "make sure you minimize MSE"
# -
# #### Training and evaluation
#
# As usual, we gonna feed our monster with random minibatches of data.
#
# As we train, we want to monitor not only loss function, which is computed in log-space, but also the actual error measured in dollars.
def iterate_minibatches(data, batch_size=256, shuffle=True, cycle=False, **kwargs):
""" iterates minibatches of data in random order """
while True:
indices = np.arange(len(data))
if shuffle:
indices = np.random.permutation(indices)
for start in range(0, len(indices), batch_size):
batch = make_batch(data.iloc[indices[start : start + batch_size]], **kwargs)
target = batch.pop(target_column)
yield batch, target
if not cycle: break
# ### Model training
#
# We can now fit our model the usual minibatch way. The interesting part is that we train on an infinite stream of minibatches, produced by `iterate_minibatches` function.
# +
batch_size = 256
epochs = 10 # definitely too small
steps_per_epoch = 100 # for full pass over data: (len(data_train) - 1) // batch_size + 1
model = build_model()
model.fit_generator(iterate_minibatches(data_train, batch_size, cycle=True, word_dropout=0.05),
epochs=epochs, steps_per_epoch=steps_per_epoch,
validation_data=iterate_minibatches(data_val, batch_size, cycle=True),
validation_steps=data_val.shape[0] // batch_size
)
# +
def print_metrics(model, data, batch_size=batch_size, name="", **kw):
squared_error = abs_error = num_samples = 0.0
for batch_x, batch_y in iterate_minibatches(data, batch_size=batch_size, shuffle=False, **kw):
batch_pred = model.predict(batch_x)[:, 0]
squared_error += np.sum(np.square(batch_pred - batch_y))
abs_error += np.sum(np.abs(batch_pred - batch_y))
num_samples += len(batch_y)
print("%s results:" % (name or ""))
print("Mean square error: %.5f" % (squared_error / num_samples))
print("Mean absolute error: %.5f" % (abs_error / num_samples))
return squared_error, abs_error
print_metrics(model, data_train, name='Train')
print_metrics(model, data_val, name='Val');
# -
# ### Bonus part: explaining model predictions
#
# It's usually a good idea to understand how your model works before you let it make actual decisions. It's simple for linear models: just see which words learned positive or negative weights. However, its much harder for neural networks that learn complex nonlinear dependencies.
#
# There are, however, some ways to look inside the black box:
# * Seeing how model responds to input perturbations
# * Finding inputs that maximize/minimize activation of some chosen neurons (_read more [on distill.pub](https://distill.pub/2018/building-blocks/)_)
# * Building local linear approximations to your neural network: [article](https://arxiv.org/abs/1602.04938), [eli5 library](https://github.com/TeamHG-Memex/eli5/tree/master/eli5/formatters)
#
# Today we gonna try the first method just because it's the simplest one.
def explain(model, sample, col_name='Title'):
""" Computes the effect each word had on model predictions """
sample = dict(sample)
sample_col_tokens = [tokens[token_to_id.get(tok, 0)] for tok in sample[col_name].split()]
data_drop_one_token = pd.DataFrame([sample] * (len(sample_col_tokens) + 1))
for drop_i in range(len(sample_col_tokens)):
data_drop_one_token.loc[drop_i, col_name] = ' '.join(UNK if i == drop_i else tok
for i, tok in enumerate(sample_col_tokens))
*predictions_drop_one_token, baseline_pred = model.predict(make_batch(data_drop_one_token))[:, 0]
diffs = baseline_pred - predictions_drop_one_token
return list(zip(sample_col_tokens, diffs))
# +
from IPython.display import HTML, display_html
def draw_html(tokens_and_weights, cmap=plt.get_cmap("bwr"), display=True,
token_template="""<span style="background-color: {color_hex}">{token}</span>""",
font_style="font-size:14px;"
):
def get_color_hex(weight):
rgba = cmap(1. / (1 + np.exp(weight)), bytes=True)
return '#%02X%02X%02X' % rgba[:3]
tokens_html = [
token_template.format(token=token, color_hex=get_color_hex(weight))
for token, weight in tokens_and_weights
]
raw_html = """<p style="{}">{}</p>""".format(font_style, ' '.join(tokens_html))
if display:
display_html(HTML(raw_html))
return raw_html
# +
i = 36605
tokens_and_weights = explain(model, data.loc[i], "Title")
draw_html([(tok, weight * 5) for tok, weight in tokens_and_weights], font_style='font-size:20px;');
tokens_and_weights = explain(model, data.loc[i], "FullDescription")
draw_html([(tok, weight * 10) for tok, weight in tokens_and_weights]);
# +
i = 12077
tokens_and_weights = explain(model, data.loc[i], "Title")
draw_html([(tok, weight * 5) for tok, weight in tokens_and_weights], font_style='font-size:20px;');
tokens_and_weights = explain(model, data.loc[i], "FullDescription")
draw_html([(tok, weight * 10) for tok, weight in tokens_and_weights]);
# +
i = np.random.randint(len(data))
print("Index:", i)
print("Salary (gbp):", np.expm1(model.predict(make_batch(data.iloc[i: i+1]))[0, 0]))
tokens_and_weights = explain(model, data.loc[i], "Title")
draw_html([(tok, weight * 5) for tok, weight in tokens_and_weights], font_style='font-size:20px;');
tokens_and_weights = explain(model, data.loc[i], "FullDescription")
draw_html([(tok, weight * 10) for tok, weight in tokens_and_weights]);
# -
# __Terrible start-up idea #1962:__ make a tool that automaticaly rephrases your job description (or CV) to meet salary expectations :)
| week02_classification/seminar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="-9Jttgd8evcI" colab_type="text"
# # Russian language dataset
# + [markdown] id="KqpAlCYkxe9e" colab_type="text"
# First, install all prerequisites
# + id="qyM4bYZgevcK" colab_type="code" colab={}
# !pip install langdetect tqdm
# !pip install "tqdm==4.43.0"
# !sudo apt-get install -y xsltproc
from fastai.basics import *
from tqdm import *
from tqdm.contrib.concurrent import process_map, thread_map
from multiprocessing import Pool
import regex as re
import time
from langdetect import detect
from langdetect.lang_detect_exception import LangDetectException
NEW_LINE = '<|n|>'
librusec = '/home/u/nas/librusec/lib.rus.ec'
tmpzips = './tmp/zip'
tmptxt = './tmp/txt'
tmpfb2clean = './tmp/fb2clean'
tmpfb2unzip = './tmp/fb2unzip'
data = Path('../data/full')
# !mkdir ../data
# !mkdir ../data/full
# !mkdir ../data/classic
# !mkdir tmp
# !mkdir tmp/fb2unzip
# !mkdir tmp/fb2clean
# !mkdir tmp/txt
# !mkdir tmp/zip
# + [markdown] id="YfsNWkxOevcy" colab_type="text"
# ### Unpack ZIPs
# + [markdown] id="qWZmL9zGsljA" colab_type="text"
# Before running this we need to upload our fb2 zip files to /tmp/zip and then run this script
# + id="5u1sPX302-Vp" colab_type="code" colab={}
# clean the output directory
# #!rm -rfv {tmpfb2unzip + '/*'}
# + id="Vb9jDGqTevcz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 99, "referenced_widgets": ["cb618f20c7964d11a7f55c20ff53ddb6", "1f8f0921270b4d9d87fbc6b2046543d9", "fe1db8fc144641fc95d030c49be2ceea", "4ee8131635904188a9ee433ec69fb304", "<KEY>", "9e5e2d7e1a6241dda1874764965bb36c", "12ab7454c50f4ef69045d6c98faacc87", "9bc37e75c8d248aa9ea40fe2d20c81d0"]} outputId="5ea921c8-8741-49bf-ef24-3af009501030"
# zips = get_files(tmpzips, '.zip')
# print(f'{len(zips)} zip file(s) found')
# def unpack(fn):
# # replace -o with -n to not overwrite existing files
# # remove the -q flag for more logging
# # -j junk paths (do not make directories)
# # -qo flag to remove annoying warning https://www.directadmin.com/features.php?id=2213
# # !unzip -qq -joL -qo -O cp396 {fn} -d {tmpfb2unzip} >>/dev/null
# # Unpack zips in parallel
# thread_map(unpack, zips, max_workers=64)
# print(f'Unzipped all - DONE')
# # Sanitize file and folder names - remove spaces
# # !find $tmp -depth -name "* *" -execdir rename 's/ /_/g' "{}" \;
# + [markdown] id="axX-uz4EevdM" colab_type="text"
# ### Convert fb2 to txt
# + id="lcbAxHyL2sYj" colab_type="code" colab={}
# clean the output directory
# #!rm -rfv {tmpfb2clean + '/*'}
# + id="cCY0yEsX20Wp" colab_type="code" colab={}
# clean the output directory
# #!rm -rfv {tmptxt + '/*'}
# + id="2oQ0Ot4BevdN" colab_type="code" colab={}
# Get fb2s
# fbs = get_files(tmpfb2unzip, '.fb2', recurse=True)
# # Sanitize filenames and move to the 'clean' dir
# for fn in fbs:
# nn = (str(fn.name)
# .replace(' ','')
# .replace('_quot;','')
# .replace('!','')
# .replace(',','')
# .replace('(','')
# .replace(')','')
# .replace('\xa0','')
# .replace('.','')
# .replace('fb2', '.fb2')
# )
# shutil.move(fn, f'{tmpfb2clean}/{nn}')
# print(f'{len(fbs)} fb2(s) sanitized')
# # In order to convert you need to get the conversion xsl from here:
# # https://github.com/mgrankin/ru_transformers/blob/master/corpus/FB2_2_txt.xsl
# # and put it in the root folder
# def convert_fb2(fn):
# #!xsltproc FB2_2_txt.xsl {fn} > {str(fn).replace(' ', '').replace('.fb2','.txt').replace('/fb2','/txt')} 2>>/dev/null
# # !xsltproc FB2_2_txt.xsl {fn} > {tmptxt + '/' + fn.name.replace('fb2', 'txt')} 2>>/dev/null
# return {fn}
# # Get fb2s from the clean snitized dir
# fbs = get_files(tmpfb2clean, '.fb2')
# # convert all to .txt
# thread_map(convert_fb2, fbs, max_workers=64)
# print('FB2(s) conversion done')
# + [markdown] id="ZIuco_8a1I-4" colab_type="text"
# ### Filter and concat txt files
# + id="puHJbvlq1SIu" colab_type="code" colab={}
txts = get_files('./tmp/txt', '.txt')
print(f'Found {len(txts)} txt(s)')
# this will take time, bcs langdetect fails on multithreading
print('Running langdetect . . . ')
for fn in progress_bar(txts):
with open(f'./{fn}', 'r') as f:
lines = f.read()
print(f)
print(fn)
print(len(lines))
try:
if len(lines) > 1e+4 and detect(lines) == 'ru':
with open(f'{data}/{fn.name}', 'w') as c:
c.write(lines)
except LangDetectException as e:
pass
# Add space before each word. It's not really nesessary.
# It just makes encoding a bit more meaningful to the model and the text smaller(after encoding).
print('Running text sanitization . . . ')
def process_fn(fn):
match = re.compile(r'(?=[^ ])([\W])([\w])')
match2 = re.compile('(.|\s)\\1\\1+')
with open(fn, 'r') as f:
lines = f.read()
if lines and lines[0] != ' ': lines = ' ' + lines
lines = match.sub(r'\g<1> \g<2>', lines)
lines = match2.sub(r'\1'*3, lines)
with open(fn, 'w') as c:
c.write(lines)
thread_map(process_fn, txts)
# + id="LQ612AmS2B5K" colab_type="code" colab={}
txts = get_files(data, '.txt');
print(f'Amount of txt(s): {len(txts)}')
fsorted = get_files('../data/classic', '.txt') + sorted(txts, key=lambda fn: os.path.getsize(fn))
print('Building text corpus')
sz=0
with open('./tmp/russian_corpus_for_vocab.txt', 'w') as c:
for fn in fsorted:
with open(fn, 'r') as f:
sz += c.write(f.read().replace('\n', f' {NEW_LINE} ') + '\n')
if sz > 5e+9:
break
print('Done ./tmp/russian_corpus_for_vocab.txt')
# + [markdown] id="do8mJhK82iHp" colab_type="text"
# Now a text corpus can be collected from ./tmp/russian_corpus_for_vocab.txt to use with gpt-2
| corpus/corpus_edit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import wandb
import shutil
import torch
import scipy
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import tabulate
import tqdm
import itertools
from matplotlib import rc
rc('text', usetex=True)
rc('font',**{'family':'serif','serif':['Computer Modern Roman']})
# %matplotlib inline
palette = sns.color_palette()
# +
import matplotlib.patches as mpatches
pallete = sns.color_palette()
fig, axes = plt.subplots(figsize=(26, 11), nrows=1, ncols=2)#, sharey='all')
plt.sca(axes[1])
plt.xlabel('')
plt.ylabel('')
ring = mpatches.Wedge((0.0, 0.0), 2.0, 0, 360, width=1.0, color=palette[1], alpha=0.4, figure=fig)
c_patch = mpatches.Patch(color=palette[1], alpha=0.4, label='$q(x)$')
plt.legend(handles=[c_patch], fontsize=70, bbox_to_anchor=(0.5, 1.1), loc='center',
frameon=False, handlelength=1.5)
plt.gca().add_patch(ring)
plt.xlim([-2.4, 2.4])
plt.ylim([-2.4, 2.4])
plt.gca().tick_params(axis='both', which='major', pad=15)
plt.xticks(fontsize=50)
plt.yticks(fontsize=50)
plt.grid(linestyle='--', dashes=(12, 12))
plt.sca(axes[0])
plt.xlabel('')
plt.ylabel('')
ball = mpatches.Circle((0.0, 0.0), 2.0, color=palette[0], alpha=0.4, figure=fig)
c_patch = mpatches.Patch(color=palette[0], alpha=0.4, label='$p(x)$')
plt.legend(handles=[c_patch], fontsize=70, bbox_to_anchor=(0.5, 1.1), loc='center',
frameon=False, handlelength=1.5)
plt.gca().add_patch(ball)
plt.xlim([-2.4, 2.4])
plt.ylim([-2.4, 2.4])
plt.gca().tick_params(axis='both', which='major', pad=15)
plt.xticks(fontsize=50)
plt.yticks(fontsize=50)
plt.grid(linestyle='--', dashes=(12, 12))
plt.subplots_adjust(wspace=0.4)
plt.savefig('./figures/support_slice.pdf', format='pdf', bbox_inches='tight')
plt.show()
# +
class DensityP(object):
def __init__(self):
self.weights = np.array([40, 30, 30]).astype(np.float64)
self.weights /= np.sum(self.weights)
self.locs = np.array([-1.3, -0.3, 1.5])
self.scales = np.array([0.5, 1.2, 0.9])
self.left_clips = (-2.0 - self.locs) / self.scales
self.right_clips = (2 - self.locs) / self.scales
def __call__(self, x):
if isinstance(x, float):
x = np.array([x])
comp_pdf = scipy.stats.truncnorm.pdf(x[:, None],
self.left_clips[None, :], self.right_clips[None, :],
loc=self.locs[None, :], scale=self.scales[None, :])
pdf = np.sum(self.weights[None, :] * comp_pdf, axis=1)
return pdf
density_p = DensityP()
class DensityQ(object):
def __init__(self):
self.weights = np.array([60, 15, 10, 15]).astype(np.float64)
self.weights /= np.sum(self.weights)
self.locs = np.array([0.2, -0.7, 1.2, -1.8])
self.scales = np.array([0.7, 3.5, 2.0, 1.2])
self.left_clips = (-2.0 - self.locs) / self.scales
self.right_clips = (2 - self.locs) / self.scales
def __call__(self, x):
if isinstance(x, float):
x = np.array([x])
comp_pdf = scipy.stats.truncnorm.pdf(x[:, None],
self.left_clips[None, :], self.right_clips[None, :],
loc=self.locs[None, :], scale=self.scales[None, :])
pdf = np.sum(self.weights[None, :] * comp_pdf, axis=1)
return pdf
density_q = DensityQ()
# -
x_grid = np.linspace(-2.0, 2.0, 250)
plt.plot(x_grid, density_p(x_grid))
plt.plot(x_grid, density_q(x_grid))
plt.show()
print(scipy.integrate.quad(density_p, -2.0, 2.0))
print(scipy.integrate.quad(density_q, -2.0, 2.0))
# +
f_val = density_p(x_grid) / (density_p(x_grid) + density_q(x_grid))
plt.plot(x_grid, f_val)
plt.show()
print(np.min(f_val), np.max(f_val))
print(f_val[0], f_val[-1])
# +
int_x_grid = np.linspace(-2.0, 2.0, 15000)
f_grid = np.linspace(0.0, 1.0, 150)
def f(x):
return density_p(x) / (density_p(x) + density_q(x))
c_pf_p = []
c_pf_q = []
for f_value in tqdm.notebook.tqdm(f_grid):
def indicator_f_p(x):
ind = np.less_equal(f(x), f_value).astype(np.float64)
return ind * density_p(x)
def indicator_f_q(x):
ind = np.less_equal(f(x), f_value).astype(np.float64)
return ind * density_q(x)
y_ind_f_p = indicator_f_p(int_x_grid)
c_pf_p.append(np.trapz(y_ind_f_p, int_x_grid))
y_ind_f_q = indicator_f_q(int_x_grid)
c_pf_q.append(np.trapz(y_ind_f_q, int_x_grid))
c_pf_p = np.array(c_pf_p)
c_pf_q = np.array(c_pf_q)
d_pf_p = np.diff(c_pf_p) / (f_grid[1] - f_grid[0])
d_pf_q = np.diff(c_pf_q) / (f_grid[1] - f_grid[0])
# -
plt.plot(f_grid, c_pf_p)
plt.plot(f_grid, c_pf_q)
plt.show()
# +
from scipy.ndimage import gaussian_filter1d
sigma = 1.2
d_pf_p_smooth = scipy.ndimage.gaussian_filter1d(d_pf_p, sigma=sigma)
d_pf_q_smooth = scipy.ndimage.gaussian_filter1d(d_pf_q, sigma=sigma)
f_min = np.min(f_val)
f_max = np.max(f_val)
fig = plt.figure(figsize=(12, 9))
plt.grid(linestyle='--', dashes=(5, 10))
plt.plot(f_grid[1:], d_pf_p, c=palette[0], linewidth=5, alpha=0.5)
plt.plot(f_grid[1:], d_pf_q, c=palette[1], linewidth=5, alpha=0.5)
plt.xlim([0.0, 1.0])
plt.xticks(np.linspace(0.0, 1.0, 11), fontsize=30)
plt.yticks(fontsize=30)
plt.show()
# +
x_1 = -1.0
x_2 = 1.57
px_1, px_2 = np.interp([x_1, x_2], x_grid, density_p(x_grid))
qx_1, qx_2 = np.interp([x_1, x_2], x_grid, density_q(x_grid))
fx_1 = px_1 / (px_1 + qx_1)
fx_2 = px_2 / (px_2 + qx_2)
fx_avg = 0.5 * (fx_1 + fx_2)
d_pf_p_a = np.interp(fx_avg, f_grid[1:], d_pf_p)
d_pf_q_a = np.interp(fx_avg, f_grid[1:], d_pf_q)
d_pf_p_smooth_a = np.interp(fx_avg, f_grid[1:], d_pf_p_smooth)
d_pf_q_smooth_a = np.interp(fx_avg, f_grid[1:], d_pf_q_smooth)
print(px_1, qx_1, fx_1)
print(px_2, qx_2, fx_2)
print(d_pf_p_a, d_pf_q_a, d_pf_p_a / (d_pf_p_a + d_pf_q_a))
print(d_pf_p_smooth_a, d_pf_q_smooth_a, d_pf_p_smooth_a / (d_pf_p_smooth_a + d_pf_q_smooth_a))
# +
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(13, 21))
plt.sca(axes[0])
plt.grid(linestyle='--', dashes=(5, 10))
plt.plot(x_grid, density_p(x_grid), c=palette[0], linewidth=3, alpha=0.9, label='$p(x)$')
plt.plot(x_grid, density_q(x_grid), c=palette[1], linewidth=3, alpha=0.9, label='$q(x)$')
plt.fill_between(x_grid, 0, density_p(x_grid), color=palette[0], alpha=0.15)
plt.fill_between(x_grid, 0, density_q(x_grid), color=palette[1], alpha=0.15)
plt.plot([x_1, x_1], [0.0, px_1], c='k', linewidth=2, ls=(0, (6, 4)))
plt.plot([x_2, x_2], [0.0, px_2], c='k', linewidth=2, ls=(0, (6, 4)))
plt.plot([x_1], [0.0], marker='o', c='k', markersize=10)
plt.plot([x_1], [px_1], marker='o', c=palette[0], markersize=10, alpha=0.95)
plt.plot([x_1], [qx_1], marker='o', c=palette[1], markersize=10, alpha=0.95)
plt.plot([x_2], [0.0], marker='o', c='k', markersize=10)
plt.plot([x_2], [px_2], marker='o', c=palette[0], markersize=10, alpha=0.95)
plt.plot([x_2], [qx_2], marker='o', c=palette[1], markersize=10, alpha=0.95)
plt.annotate('$x_1$', fontsize=36, xy=(x_1, 0.0), xycoords='data', xytext=(x_1 - 0.09, 0.00),
horizontalalignment='right', verticalalignment='bottom')
plt.annotate('$x_2$', fontsize=36, xy=(x_2, 0.0), xycoords='data', xytext=(x_2 - 0.09, 0.00),
horizontalalignment='right', verticalalignment='bottom')
plt.legend(fontsize=40, handlelength=1.2)
plt.xlabel('$x$', fontsize=40)
plt.ylabel('Density', fontsize=40)
plt.xlim([-2.1, 2.1])
plt.ylim([-0.009, 0.49])
plt.xticks(fontsize=30)
plt.yticks(fontsize=30)
plt.sca(axes[1])
plt.grid(linestyle='--', dashes=(5, 10))
plt.axhline(fx_avg, c='k', ls=(0, (6, 4)))
plt.plot([x_1, x_1], [0.0, fx_1], c='k', linewidth=2, ls=(0, (6, 4)))
plt.plot([x_2, x_2], [0.0, fx_2], c='k', linewidth=2, ls=(0, (6, 4)))
plt.plot([x_1], [0.0], marker='o', c='k', markersize=10)
plt.plot([x_1], [fx_1], marker='o', c=palette[3], markersize=10, alpha=0.95)
plt.plot([x_2], [0.0], marker='o', c='k', markersize=10)
plt.plot([x_2], [fx_2], marker='o', c=palette[3], markersize=10, alpha=0.95)
plt.plot([-2.1], [fx_avg], marker='o', c='k', markersize=10)
plt.plot(x_grid, f_val, c=palette[3], linewidth=3, label='$f^*(x) = \\frac{p(x)}{p(x) + q(x)}$')
plt.annotate('$x_1$', fontsize=36, xy=(x_1, 0.0), xycoords='data', xytext=(x_1 - 0.09, 0.00),
horizontalalignment='right', verticalalignment='bottom')
plt.annotate('$x_2$', fontsize=36, xy=(x_2, 0.0), xycoords='data', xytext=(x_2 - 0.09, 0.00),
horizontalalignment='right', verticalalignment='bottom')
plt.annotate('$t_{1,2}$', fontsize=36, xy=(-2.0, fx_avg), xycoords='data', xytext=(-2.05, fx_avg + 0.01),
horizontalalignment='left', verticalalignment='bottom')
plt.annotate('$f^*(x_1) = t_{1,2} = f^*(x_2)$', fontsize=32, xy=(0.27, fx_avg), xycoords='data', xytext=(0.27, fx_avg - 0.07),
horizontalalignment='center', verticalalignment='top')
plt.annotate('', xy=(x_1, fx_1), xycoords='data',
xytext=(-0.65, 0.57), textcoords='data',
arrowprops=dict(arrowstyle='->', facecolor='black', shrinkB=15, lw=2, mutation_scale=30),
horizontalalignment='right', verticalalignment='top')
plt.annotate('', xy=(x_2, fx_2), xycoords='data',
xytext=(1.15, 0.57), textcoords='data',
arrowprops=dict(arrowstyle='->', facecolor='black', shrinkB=15, lw=2, mutation_scale=30),
horizontalalignment='right', verticalalignment='top')
plt.xlim([-2.1, 2.1])
plt.ylim([-0.02, 1.02])
plt.legend(fontsize=37, handlelength=1.2, handleheight=2.2)
plt.xlabel('$x$', fontsize=40)
plt.ylabel('$f^*(x)$', fontsize=40)
plt.xticks(fontsize=30)
plt.yticks(np.linspace(0.0, 1.0, 11), fontsize=30)
plt.savefig('./figures/pushforward_part_1.pdf', format='pdf', bbox_inches='tight')
plt.show()
# -
np.where(np.abs(f_val - 0.636) <= 3e-3)[0]
x_grid[[9, 30, 218, 240]]
f(-1.0), f(1.5)
# +
from scipy.ndimage import gaussian_filter1d
f_min = np.min(f_val)
f_max = np.max(f_val)
sigma = 1.2
delta = -0.04
range_mask = np.logical_and(f_grid[1:] >= f_min + delta, f_grid[1:] <= f_max - delta)
def filter_in_range(x, mask):
x = x.copy()
x_range = x[mask]
x[mask] = scipy.ndimage.gaussian_filter1d(x_range, sigma=sigma)
return x
d_pf_p_filt = filter_in_range(d_pf_p, range_mask)
d_pf_q_filt = filter_in_range(d_pf_q, range_mask)
fig = plt.figure(figsize=(13, 9))
plt.grid(linestyle='--', dashes=(5, 10))
plt.plot(f_grid[1:], d_pf_p_filt, c=palette[0], linewidth=3, alpha=0.9, label='$[{f^*}_\\sharp p](t)$')
plt.plot(f_grid[1:], d_pf_q_filt, c=palette[1], linewidth=3, alpha=0.9, label='$[{f^*}_\\sharp q](t)$')
plt.fill_between(f_grid[1:], 0, d_pf_p_filt, color=palette[0], alpha=0.15)
plt.fill_between(f_grid[1:], 0, d_pf_q_filt, color=palette[1], alpha=0.15)
plt.plot([fx_avg, fx_avg], [0.0, d_pf_p_smooth_a], c='k', linewidth=2, ls=(0, (4, 3)))
plt.plot([fx_avg], [0.0], marker='o', color='k', markersize=10)
plt.plot([fx_avg], [d_pf_p_smooth_a], marker='o', color=palette[0], markersize=10, alpha=0.95)
plt.plot([fx_avg], [d_pf_q_smooth_a], marker='o', color=palette[1], markersize=10, alpha=0.95)
plt.annotate('$t_{1,2}$', fontsize=32, xy=(fx_avg, 0.0), xycoords='data', xytext=(fx_avg - 0.02, 0.00),
horizontalalignment='right', verticalalignment='bottom')
plt.annotate('$\\frac{[{f^*}_\\sharp p](t)}{[{f^*}_\\sharp p](t) + [{f^*}_\\sharp q](t)} = t$', fontsize=37, xy=(0.5, 5.5), xycoords='data', xytext=(0.5, 8.5),
horizontalalignment='center', verticalalignment='top')
plt.annotate('$[{f^*}_\\sharp p](t_{1, 2})$', fontsize=28, xy=(fx_avg, d_pf_p_smooth_a),
xycoords='data', xytext=(fx_avg + 0.35, d_pf_p_smooth_a + 0.04),
horizontalalignment='right', verticalalignment='bottom',
arrowprops=dict(arrowstyle='->', facecolor='black', shrinkB=15, lw=2, mutation_scale=30))
plt.annotate('$[{f^*}_\\sharp q](t_{1,2})$', fontsize=28, xy=(fx_avg, d_pf_q_smooth_a),
xycoords='data', xytext=(fx_avg + 0.35, d_pf_q_smooth_a + 0.04),
horizontalalignment='right', verticalalignment='bottom',
arrowprops=dict(arrowstyle='->', facecolor='black', shrinkB=15, lw=2, mutation_scale=30))
plt.legend(fontsize=28, handlelength=1.2)
plt.xlabel('$t$', fontsize=35)
plt.ylabel('Pushforward density', fontsize=40)
plt.ylim([-0.15, 9.5])
plt.xticks(np.linspace(0.0, 1.0, 11), fontsize=30)
plt.yticks(fontsize=30)
plt.savefig('./figures/pushforward_part_2.pdf', format='pdf', bbox_inches='tight')
plt.show()
| support_alignment/notebooks/disc_pushforward.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JSJeong-me/KOSA-Big-Data_Vision/blob/main/Preprocessing/0915-regression_feature.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="olive-decrease"
# ### Regression Feature Selection:
# (Numerical Input, Numerical Output)
# + [markdown] id="saved-values"
# Feature selection is performed using Pearson’s Correlation Coefficient via the f_regression() function.
# + id="political-track"
# pearson's correlation feature selection for numeric input and numeric output
from sklearn.datasets import make_regression
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
# + id="adjacent-private"
# generate dataset
X, y = make_regression(n_samples=10000, n_features=500, n_informative=10)
# + id="o81zk1HseJQI" outputId="26cf6f77-322e-494c-915d-dd01ec6c8de1" colab={"base_uri": "https://localhost:8080/"}
X.shape
# + id="vRGDAW9ReMJL" outputId="d6f1f4b4-6033-4277-d12e-4707993d8f98" colab={"base_uri": "https://localhost:8080/"}
X[:3,:]
# + id="gcRwjPwIeMF_" outputId="1a396834-e058-4200-d970-2c85c9c76df9" colab={"base_uri": "https://localhost:8080/"}
y[:3]
# + id="ctXhRxFdeMCQ"
# + id="5e5wB46CdjUB"
# define feature selection
fs = SelectKBest(score_func=f_regression, k=50)
# apply feature selection
# + id="OL1v9R0ge4Ss" outputId="a9c8b7d7-dac7-4bd0-a96a-dbcacb2397d1" colab={"base_uri": "https://localhost:8080/"}
X_selected = fs.fit_transform(X, y)
print(X_selected.shape)
# + id="7b7BooQGfLaI" outputId="bf6af650-aa0d-4623-e88d-4b8a0b99273f" colab={"base_uri": "https://localhost:8080/"}
X_selected.shape
# + id="4-9tw8m8fixK" outputId="2e0835c1-4675-44f1-ab0b-c2534686f1b2" colab={"base_uri": "https://localhost:8080/"}
type(X_selected)
# + id="miQ1m6gofyMZ"
import pandas as pd
# + id="lFf0M0o5gDvh"
df_y = pd.DataFrame(y)
# + id="mmoP4dGSfisq"
df = pd.DataFrame(X_selected)
# + id="ddiRj6-rgSwm"
# + id="RprnxHQsfijI" outputId="70fdc7ef-7687-45db-d1a5-cad8dd22c9d2" colab={"base_uri": "https://localhost:8080/", "height": 516}
df.corr().round()
# + id="spjnlqq2fiWq"
| Preprocessing/0915-regression_feature.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Logistic Regression(3)
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(666)
X = np.random.normal(0, 1, size=(200, 2))
y = np.array(X[:, 0]**2 + X[:, 1]**2 < 1.5, dtype='int')
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
# ### 1. 使用逻辑回归
from LogisticReg.LogisticRegression import LogisticRegression
log_reg = LogisticRegression()
log_reg.fit(X, y)
log_reg.score(X, y)
def plot_decision_boundary(model, axis):
x0, x1 = np.meshgrid(
np.linspace(axis[0], axis[1], int((axis[1] - axis[0])*100)).reshape(1, -1),
np.linspace(axis[2], axis[3], int((axis[3] - axis[2])*100)).reshape(-1, 1)
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_predic = model.predict(X_new)
zz = y_predic.reshape(x0.shape)
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#EF9A9A', '#FFF590', '#90CAF9'])
plt.contourf(x0, x1, zz, linewidth=5, cmap=custom_cmap)
# **绘制决策边界**
plot_decision_boundary(log_reg, axis=[-4, 4, -4, 4])
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
# ### 2. 使用多项式
# +
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import StandardScaler
def PolynomialLogisticRegression(degree):
return Pipeline([
('poly', PolynomialFeatures(degree=degree)),
('std_scaler', StandardScaler()),
('log_reg', LogisticRegression())
])
# -
poly_log_reg = PolynomialLogisticRegression(degree=2)
poly_log_reg.fit(X, y)
poly_log_reg.score(X, y)
plot_decision_boundary(poly_log_reg, axis=[-4, 4, -4, 4])
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
# ### 3. 使用正则化(scikit-learn)
# **scikit-learn中使用的正则化方式:**
# $$C \cdot J(\theta) + L_1$$
# $$C \cdot J(\theta) + L_2$$
np.random.seed(666)
X = np.random.normal(0, 1, size=(200, 2))
y = np.array(X[:, 0]**2 + X[:,1] < 1.5, dtype='int')
# 为样本添加噪音
for _ in range(20):
y[np.random.randint(200)] = 1
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=666)
# +
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(solver='lbfgs')
log_reg.fit(X_train, y_train)
# -
log_reg.score(X_train, y_train)
log_reg.score(X_test, y_test)
plot_decision_boundary(log_reg, axis=[-4, 4, -4, 4])
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
# #### scikit-learn中多项式逻辑回归
def PolynomialLogisticRegression(degree):
return Pipeline([
('poly', PolynomialFeatures(degree=degree)),
('std_scaler', StandardScaler()),
('log_reg', LogisticRegression(solver='lbfgs'))
])
poly_log_reg = PolynomialLogisticRegression(degree=2)
poly_log_reg.fit(X_train, y_train)
poly_log_reg.score(X_train, y_train)
poly_log_reg.score(X_test, y_test)
plot_decision_boundary(poly_log_reg, axis=[-4, 4, -4, 4])
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
# **增加多项式**
poly_log_reg2 = PolynomialLogisticRegression(degree=30)
poly_log_reg2.fit(X_train, y_train)
plot_decision_boundary(poly_log_reg2, axis=[-4, 4, -4, 4])
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
poly_log_reg2.score(X_train, y_train)
poly_log_reg2.score(X_test, y_test)
# - 当 degree = 20 时,训练集分数提高,但是预测数据集分数下降,说明你模型泛化能力下降
# - 即模型发生了过拟合
# #### 加入超参数 C 对模型正则化
# - 减小损失函数的影响,增大正则化的影响
# C: 分类准确度,损失函数前面的系数
def PolynomialLogisticRegression(degree, C):
return Pipeline([
('poly', PolynomialFeatures(degree=degree)),
('std_scaler', StandardScaler()),
('log_reg', LogisticRegression(solver='lbfgs', C=C))
])
poly_log_reg3 = PolynomialLogisticRegression(degree=20, C=0.1)
poly_log_reg3.fit(X_train, y_train)
plot_decision_boundary(poly_log_reg3, axis=[-4, 4, -4, 4])
plt.scatter(X[y==0, 0], X[y==0, 1])
plt.scatter(X[y==1, 0], X[y==1, 1])
poly_log_reg3.score(X_train, y_train)
poly_log_reg3.score(X_test, y_test)
# - 可以看出,此时模型泛化能力提高
| ML-Base-MOOC/chapt-7 Logistic Regression/03-Logistic Regression(3).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# df, data-frame
df = pd.read_csv('ceaps_dataset.csv', skiprows=1, delimiter=';', encoding="latin1")
# +
df['DATA'] = df['DATA'].str.replace('/', '-')
df['DATA'] = df['DATA'].str.replace('5017', '2017')
df['DATA'] = df['DATA'].str.replace('3016', '2017')
df['DATA'] = df['DATA'].str.replace('216', '2016')
df['DATA'] = df['DATA'].str.replace('206', '2016')
df['DATA'] = df['DATA'].str.replace('200', '2016')
df['DATA'] = pd.to_datetime(df['DATA'])
df['DIA'] = df['DATA'].dt.day
df['VALOR_REEMBOLSADO'] = df['VALOR_REEMBOLSADO'].str.replace(',', '.')
# -
df['VALOR_REEMBOLSADO'] = pd.to_numeric(df['VALOR_REEMBOLSADO'])
df.dtypes
df['MAIOR_QUE_100'] = 0
df.loc[df['VALOR_REEMBOLSADO'] > 100, 'MAIOR_QUE_100'] = 1
df.columns
df = df.filter(['ANO', 'MES', 'DIA', 'SENADOR', 'TIPO_DESPESA', 'CNPJ_CPF', 'FORNECEDOR',
'DOCUMENTO', 'DATA', 'DETALHAMENTO', 'VALOR_REEMBOLSADO',
'MAIOR_QUE_100'])
df['TIPO_DESPESA'].unique()
# +
reducer = {
'Aluguel de imóveis para escritório político, compreendendo despesas concernentes a eles.': 'Aluguel de imóvel',
'Aquisição de material de consumo para uso no escritório político, inclusive aquisição ou locação de software, despesas postais, aquisição de publicações, locação de móveis e de equipamentos. ': 'Aquisição de material',
'Contratação de consultorias, assessorias, pesquisas, trabalhos técnicos e outros serviços de apoio ao exercício do mandato parlamentar': 'Consultorias',
'Locomoção, hospedagem, alimentação, combustíveis e lubrificantes': 'Logística',
'Passagens aéreas, aquáticas e terrestres nacionais': 'Passagens',
'Divulgação da atividade parlamentar': 'Divulgação',
'Serviços de Segurança Privada': 'Serviços'
}
df['TIPO_DESPESA'] = df['TIPO_DESPESA'].map(reducer)
# +
# df.drop('')
# -
df.to_csv('dados_limpos.csv', index=False, encoding='latin1')
| projeto_gastos_politicos/.ipynb_checkpoints/Data cleaning-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (master thesis)
# language: python
# name: masterthesis
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import pickle
# handling colab problem
import os
if not 'data' in os.listdir('..'):
print('needs data folder. imports through git')
# !git clone https://github.com/JakartaLaw/speciale.git
print(os.listdir())
os.chdir('speciale//src')
np.random.seed(42)
from environments import EnvironmentModel1 as Environment
from environments import translate_action_model1 as translate_action
from environments import reward_scaler_model1 as reward_scaler
from environments import STATES_MU1, STATES_SIGMA1
from environments.model1 import scale_states
from agents import DQIterationAgent as Agent
size_multiplier = 0.7
FIGSIZE = (14*size_multiplier, 8*size_multiplier)
agent = Agent(0.005, 0.99 ,16, 8, 5, 4, STATES_MU1, STATES_SIGMA1)
agent.load_model("models//dqfi_model")
with open('..//data//women_hours_empirical', 'rb') as f:
women_hours = pickle.load(f)
plt.plot(women_hours)
# +
parameters = {
'beta_L' : 3,
'sigma_epsilon' : 0.1,
'S_min': 120.0,
'alpha': 4.609,
'eta_G': 0.164,
'eta_G_sq' : 0.015,
'delta': 0.209,
'sigma_epsilon': 15.11,
'omega': 3.5,
}
EPISODES = 30
# +
def simulate_observations(parameters, n_episodes):
env = Environment(**parameters)
rewards_history, action_history, states_history = list(), list(), list()
agent.epsilon=0.0
agent.q_scaler.mu = 0.068
agent.q_scaler.sigma = 0.16
for e in range(n_episodes):
env.reset()
state = env.states
for time in range(18, 500):
action, q_vals = agent.act(state)
_action = translate_action(action)
next_state, reward, done, _ = env.step(_action)
scaled_reward = (reward - 24 ) / 5
rewards_history.append([reward, e, time])
action_history.append([_action, e, time])
states_history.append(np.append(state, [e, time, _action]))
state = next_state
if done:
#print("episode: {}/{}, score: {}, e: {:.2}".format(e, EPISODES, np.mean(rewards_history), agent.epsilon))
break
df_DQAgent = pd.DataFrame(rewards_history, columns=['rewards', 'episode', 'Q'])
df_DQAgent_action = pd.DataFrame(action_history, columns=['actions', 'episode', 'Q'])
states_cols = ['Q', 'G', 'K', 'Z', 'beta_L', 'episode', 'time', 'action']
df_DQAgent_states = pd.DataFrame(states_history, columns=states_cols)
return df_DQAgent, df_DQAgent_action, df_DQAgent_states
def get_simulated_mean(df_action):
sim_avg_actions = df_action.drop('episode',axis=1).loc[df_action['actions'] > 0].groupby('Q').mean()
sim_vals = np.zeros(shape=90)
sim_avg_actions
for ix in range(len(sim_vals)):
obs = sim_avg_actions.loc[sim_avg_actions.index == ix]
if len(obs) == 1:
sim_vals[ix] = float(obs['actions'])
return sim_vals
def trim_obs(array):
return array[18:61]
def calc_loss(true_vals, sim_vals):
return np.mean((true_vals - sim_vals)**2)
# -
def objective(x):
print('\n new iteration')
np.random.seed(10)
n_episodes = 300
beta_L = x
parameters = {
'beta_L' : beta_L,
'sigma_epsilon' : 0.1,
'S_min': 120.0,
'alpha': 4.609,
'eta_G': 0.164,
'eta_G_sq' : 0.015,
'delta': 0.209,
'sigma_epsilon': 15.11,
'omega': 3.5,
}
df, df_action = simulate_observations(parameters, n_episodes)
sim_vals = get_simulated_mean(df_action)
true_vals, sim_vals = trim_obs(women_hours), trim_obs(sim_vals)
mse = calc_loss(true_vals, sim_vals)
print('beta L:', beta_L, ' mse:', mse)
return mse
# +
beta_Ls, mses = np.linspace(0.0, 8.0), list()
for beta_L in beta_Ls:
mse = objective(beta_L)
mses.append(mse)
# +
f, ax = plt.subplots(1, 1, figsize=FIGSIZE)
ax.set_xlabel('beta L')
ax.set_ylabel('log(mse)')
ax.set_title('Estimation of optimal Beta L value')
ax.plot(beta_Ls, np.log(mses))
f.savefig('..//figures//dqi_model1_estimation_Beta_L.png')
# +
parameters = {
'beta_L' : 3.4,
'sigma_epsilon' : 0.1,
'S_min': 120.0,
'alpha': 4.609,
'eta_G': 0.164,
'eta_G_sq' : 0.015,
'delta': 0.209,
'sigma_epsilon': 15.11,
'omega': 3.5,
}
EPISODES = 5000
# -
df, df_action, df_states = simulate_observations(parameters, EPISODES)
df.to_pickle('..//data//dqi_model1_opt_beta_simulations.pkl')
df_action.to_pickle('..//data//dqi_actions_model1_opt_beta_simulations.pkl')
df_states.to_pickle('..//data//dqi_states_model1_opt_beta_simulations.pkl')
df_states
| src/DQI Estimation - extended.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Decision Trees
import numpy as np
def entropy_basic(p1,p2):
ans = 0
if p1!=0:
ans += -p1*np.log2(p1)
if p2!=0:
ans += -p2*np.log2(p2)
return ans
entropy_basic(.5,.5)
entropy_basic(1,0)
entropy_basic(1,0)
entropy_basic(.8,0.2)
| 13. Decision Trees/Class - DT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Elasticity in 3D
#
# ## Introduction
#
# This example provides a demonstration of using PyMKS to compute the linear strain field for a two-phase composite material in 3D, and presents a comparison of the computational efficiency of MKS, when compared with the finite element method. The example first provides information on the boundary conditions, used in MKS. Next, delta microstructures are used to calibrate the first-order influence coefficients. The influence coefficients are then used to compute the strain field for a random microstructure. Lastly, the calibrated influence coefficients are scaled up and are used to compute the strain field for a larger microstructure and compared with results computed using finite element analysis.
# ### Elastostatics Equations and Boundary Conditions
#
# A review of the governing field equations for elastostatics can be found in the [Linear Elasticity in 2D](./elasticity.ipynb) example. The same equations are used in the example with the exception that the second lame parameter (shear modulus) $\mu$ is defined differently in 3D.
#
# $$ \mu = \frac{E}{2(1+\nu)} $$
#
#
# In general, generating the calibration data for the MKS requires boundary conditions that are both periodic and displaced, which are quite unusual boundary conditions. The ideal boundary conditions are given by:
#
# $$ u(L, y, z) = u(0, y, z) + L\bar{\varepsilon}_{xx} $$
# $$ u(0, L, L) = u(0, 0, L) = u(0, L, 0) = u(0, 0, 0) = 0 $$
# $$ u(x, 0, z) = u(x, L, z) $$
# $$ u(x, y, 0) = u(x, y, L) $$
#
# +
import numpy as np
from sklearn.pipeline import Pipeline
import dask.array as da
from pymks import (
generate_delta,
solve_fe,
plot_microstructures,
PrimitiveTransformer,
LocalizationRegressor,
coeff_to_real
)
# +
#PYTEST_VALIDATE_IGNORE_OUTPUT
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# ## Modeling with MKS
#
# ### Calibration Data and Delta Microstructures
#
# The first-order MKS influence coefficients are all that is needed to compute a strain field of a random microstructure, as long as the ratio between the elastic moduli (also known as the contrast) is less than 1.5. If this condition is met, we can expect a mean absolute error of 2% or less, when comparing the MKS results with those computed using finite element methods [[1]](#References).
#
# Because we are using distinct phases and the contrast is low enough to only need the first order coefficients, delta microstructures and their strain fields are all that we need to calibrate the first-order influence coefficients [[2]](#References).
#
# The `generate_delta` function can be used to create the two delta microstructures needed to calibrate the first-order influence coefficients for a two phase microstructure. This function uses the Python module [SfePy](http://sfepy.org/doc-devel/index.html) to compute the strain fields using finite element methods.
# +
x_delta = generate_delta(n_phases=2, shape=(9, 9, 9)).persist()
plot_microstructures(
x_delta[0, x_delta.shape[1] // 2],
x_delta[1, x_delta.shape[1] // 2],
titles=['[0]', '[1]'],
cmap='gray'
)
# -
# Using delta microstructures for the calibration of the first-order influence coefficients is essentially the same as using a unit [impulse response](http://en.wikipedia.org/wiki/Impulse_response) to find the kernel of a system in signal processing. Delta microstructures are composed of only two phases. One phase is located only at the center cell of the microstructure, and the rest made up of the other phase.
# ### Generating Calibration Data
#
# This example models a two-phase microstructure with elastic moduli values of 80 and 120 and Poisson's ratio values of 0.3 and 0.3, respectively. The macroscopic imposed strain is set to 0.02. All of these parameters used in the simulation are used in the `solve_fe` function to calculate the elastic strain.
# +
strain_xx = lambda x: solve_fe(
x,
elastic_modulus=(80, 120),
poissons_ratio=(0.3, 0.3),
macro_strain=0.02
)['strain'][...,0]
y_delta = strain_xx(x_delta).persist()
# -
# Observe the strain field.
plot_microstructures(
y_delta[0, x_delta.shape[1] // 2, :, :],
titles=[r'$\mathbf{\varepsilon_{xx}}$']
)
# ### Calibrating First Order Influence Coefficients
#
# Calibrate the influence coefficients by creating a model pipeline using the `PrimitiveTransformer` and the `LocalizationRegressor`.
model = Pipeline(steps=[
('discretize', PrimitiveTransformer(n_state=2, min_=0.0, max_=1.0)),
('regressor', LocalizationRegressor())
])
model.fit(x_delta, y_delta);
# Observe the influence coefficients.
# +
to_real = lambda x: coeff_to_real(x.steps[1][1].coeff).real
coeff = to_real(model)
plot_microstructures(
coeff[x_delta.shape[1] // 2, :, :, 0],
coeff[x_delta.shape[1] // 2, :, :, 1],
titles=['Influence coeff [0]', 'Influence coeff [1]']
)
# -
# The influence coefficients have a Gaussian-like shape.
# ### Predict of the Strain Field for a Random Microstructure
#
# Use the calibrated `model` to compute the strain field for a random two phase microstructure and compare it with the results from a finite element simulation. The `strain_xx` helper function is used to generate the strain field.
# +
# NBVAL_IGNORE_OUTPUT
da.random.seed(99)
x_data = da.random.randint(2, size=(1,) + x_delta.shape[1:]).persist()
# %time y_data = strain_xx(x_data).persist()
# -
plot_microstructures(
x_data[0, x_delta.shape[1] // 2, :, :],
titles=['Microstructure']
)
plot_microstructures(
y_data[0, x_delta.shape[1] // 2, :, :],
titles=[r'$\mathbf{\varepsilon_{xx}}$']
)
# **Note that the calibrated influence coefficients can only be used to reproduce the simulation with the same boundary conditions that they were calibrated with.**
#
# Now to get the strain field from the model, pass the same microstructure to the `predict` method.
# +
# NBVAL_IGNORE_OUTPUT
# %time y_predict = model.predict(x_data).persist()
# -
# Finally, compare the results from finite element simulation and the MKS model.
plot_microstructures(
y_data[0, x_delta.shape[1] // 2, :, :],
y_predict[0, x_delta.shape[1] // 2, :, :],
titles=[
r'$\mathbf{\varepsilon_{xx}}$ - FE',
r'$\mathbf{\varepsilon_{xx}}$ - MKS'
]
)
# Observe the difference between the two plots.
plot_microstructures(
(y_data - y_predict)[0, x_delta.shape[1] // 2, :, :],
titles=['FE - MKS']
)
# The MKS model is able to capture the strain field for the random microstructure after being calibrated with delta microstructures.
# ## Resizing the Coefficeints to use on Larger Microstructures
#
# The influence coefficients that were calibrated on a smaller microstructure can be used to predict the strain field on a larger microstructure though spectral interpolation [[3]](#References), but accuracy of the MKS model drops slightly. To demonstrate how this is done, let's generate a new larger $m$ by $m$ random microstructure and its strain field.
new_shape = tuple(np.array(x_delta.shape[1:]) * 3)
x_large = da.random.randint(2, size=(1,) + new_shape).persist()
# The influence coefficients that have already been calibrated need to be resized to match the shape of the new larger microstructure that we want to compute the strain field for. This can be done by passing the shape of the new larger microstructure into the `coeff_resize` method.
model.steps[1][1].coeff_resize(x_large[0].shape);
# Use the resize coefficients to calculate large strain field. The coefficients can not now be used for the smaller microstructures.
# +
# NBVAL_IGNORE_OUTPUT
# %time y_large = model.predict(x_large).persist()
# -
plot_microstructures(y_large[0, x_delta.shape[1] // 2], titles=[r'$\mathbf{\varepsilon_{xx}}$'])
#
# ## References
#
# <a id="ref1"></a>
# [1] <NAME>., <NAME>., <NAME>., *A new spectral framework for establishing localization relationships for elastic behav ior of composites and their calibration to finite-element models*. Acta Materialia, 2008. 56 (10): p. 2272-2282 [doi:10.1016/j.actamat.2008.01.017](http://dx.doi.org/10.1016/j.actamat.2008.01.017).
#
# <a id="ref2"></a>
# [2] <NAME>., <NAME>, <NAME>, *Multi-scale modeling of elastic response of three-dimensional voxel-based microstructure datasets using novel DFT-based knowledge systems*. Acta Materialia, 2009. 58 (7): p. 2716-2725 [doi:10.1016/j.actamat.2010.01.007](http://dx.doi.org/10.1016/j.actamat.2010.01.007).
#
# <a id="ref3"></a>
# [3] <NAME>., <NAME>., <NAME>., *Computationally efficient database and spectral interpolation for fully plastic Taylor-type crystal plasticity calculations of face-centered cubic polycrystals*. International Journal of Plasticity 24 (2008) 1264–1276 [doi;10.1016/j.ijplas.2007.12.002](http://dx.doi.org/10.1016/j.ijplas.2007.12.002).
| notebooks/elasticity3d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="5qvrrMTrBVLj" colab_type="code" colab={}
# !pip install nk225op
# + id="uMwLFfDkC11J" colab_type="code" outputId="c361e178-544a-4e06-ec65-3e26ebeffca6" colab={"base_uri": "https://localhost:8080/", "height": 457}
#ex : 最新の日経225清算値データを取得
#(2月限と3月限、 権利価格20500~20750)
from nk225op import nk225op as nk
nk([201902,201903] , [20500,20750])
| doc/eg_nk225op.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [Ateliers: Technologies de l'intelligence Artificielle](https://github.com/wikistat/AI-Frameworks)
# <center>
# <a href="http://www.insa-toulouse.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo-insa.jpg" style="float:left; max-width: 120px; display: inline" alt="INSA"/></a>
# <a href="http://wikistat.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/wikistat.jpg" width=400, style="max-width: 150px; display: inline" alt="Wikistat"/></a>
# <a href="http://www.math.univ-toulouse.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo_imt.jpg" width=400, style="float:right; display: inline" alt="IMT"/> </a>
#
# </center>
# # Traitement Naturel du Langage (NLP) : Catégorisation de Produits Cdiscount
#
# Il s'agit d'une version simplifiée du concours proposé par Cdiscount et paru sur le site [datascience.net](https://www.datascience.net/fr/challenge). Les données d'apprentissage sont accessibles sur demande auprès de Cdiscount mais les solutions de l'échantillon test du concours ne sont pas et ne seront pas rendues publiques. Un échantillon test est donc construit pour l'usage de ce tutoriel. L'objectif est de prévoir la catégorie d'un produit à partir de son descriptif (*text mining*). Seule la catégorie principale (1er niveau, 47 classes) est prédite au lieu des trois niveaux demandés dans le concours. L'objectif est plutôt de comparer les performances des méthodes et technologies en fonction de la taille de la base d'apprentissage ainsi que d'illustrer sur un exemple complexe le prétraitement de données textuelles.
#
# Le jeux de données complet (15M produits) permet un test en vrai grandeur du **passage à l'échelle volume** des phases de préparation (*munging*), vectorisation (hashage, TF-IDF) et d'apprentissage en fonction de la technologie utilisée.
#
# La synthèse des résultats obtenus est développée par [Besse et al. 2016](https://hal.archives-ouvertes.fr/hal-01350099) (section 5).
# ## Partie 1-3 : Modèle d'apprentissage statistiques.
#
# Dans le calepin numéro 2, nous avons créés 2x7 matrices de features correspondant au mêmes échantillons d'apprentissage et de validation des données textuelles de description d'objet de Cdiscount. Ces matrices ont été crées avec les méthodes suivantes.
#
# 1. `Count_Vectorizer`. `No hashing`.
# 2. `Count_Vectorizer`. `Hashing = 300`.
# 3. `TFIDF_vectorizer`. `No hashing`.
# 4. `TFIDF_vectorizer`. `Hashing = 300`.
# 5. `Word2Vec`. `CBOW`
# 6. `Word2Vec`. `Skip-Gram`
# 7. `Word2Vec`. `Pre-trained`
#
# Nous allons maintenant étudiés les performances d'algorithmes de *machine learning* (`Regression logistique`, `Forêts aléatoire`, `Perceptron multicouche`) sur ces différents features
# ## Librairies
# +
#Importation des librairies utilisées
import time
import numpy as np
import pandas as pd
import scipy as sc
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
import matplotlib.pyplot as plt
import seaborn as sb
sb.set_style("whitegrid")
DATA_DIR = "data/features"
# -
# ## Téléchargement des données
# Téléchargement des variables réponses
Y_train = pd.read_csv("data/cdiscount_train_subset.csv").fillna("")["Categorie1"]
Y_valid = pd.read_csv("data/cdiscount_valid.csv").fillna("")["Categorie1"]
# Création d'un dictionnaire contenant les chemins ou des différents objets où sont stockés les matrices de features.
# +
features_path_dic = {}
parameters = [["count_no_hashing", None, "count"],
["count_300", 300, "count"],
["tfidf_no_hashing", None, "tfidf"],
["tfidf_300",300, "tfidf"]]
for name, nb_hash, vectorizer in parameters:
x_train_path = DATA_DIR +"/vec_train_nb_hash_" + str(nb_hash) + "_vectorizer_" + str(vectorizer)+".npz"
x_valid_path = DATA_DIR +"/vec_valid_nb_hash_" + str(nb_hash) + "_vectorizer_" + str(vectorizer)+".npz"
dic = {"x_train_path" : x_train_path, "x_valid_path" : x_valid_path, "load" : "npz"}
features_path_dic.update({name : dic})
parametersw2v = [["word2vec_cbow","cbow"],
["word2vec_sg","sg"],
["word2vec_online","online"]]
for name, mtype in parametersw2v:
x_train_path = DATA_DIR +"/embedded_train_" + mtype+".npy"
x_valid_path = DATA_DIR +"/embedded_valid_" + mtype+".npy"
dic = {"x_train_path" : x_train_path, "x_valid_path" : x_valid_path, "load" : "npy"}
features_path_dic.update({name : dic})
# -
# # Regression Logistique
# ## Apprentissage
#
# Le code suivant peut être très long. Vous pouvez dans un premier temps exécutez directement la cellule suivante ou les résultats de l'execution de cette cellules sont disponibles.
# +
metadata_list_lr = []
param_grid = {"C" : [10,1,0.1]}
#param_grid = {"C" : [1]}
for name, dic in features_path_dic.items():
x_train_path = dic["x_train_path"]
x_valid_path = dic["x_valid_path"]
load = dic["load"]
print("Load features : " + name)
if load == "npz":
X_train = sc.sparse.load_npz(x_train_path)
X_valid = sc.sparse.load_npz(x_valid_path)
else :
X_train = np.load(x_train_path)
X_valid = np.load(x_valid_path)
print("start Learning :" + name)
ts = time.time()
gs = GridSearchCV(LogisticRegression(), param_grid=param_grid, verbose=15)
gs.fit(X_train,Y_train.values)
te=time.time()
t_learning = te-ts
print("start prediction :" + name)
ts = time.time()
score_train=gs.score(X_train,Y_train)
score_valid=gs.score(X_valid,Y_valid)
te=time.time()
t_predict = te-ts
metadata = {"name":name, "learning_time" : t_learning, "predict_time":t_predict, "score_train": score_train, "score_valid": score_valid}
metadata_list_lr.append(metadata)
pickle.dump(metadata_list_lr, open("data/metadata_lr_part13.pkl","wb"))
# -
# ## Exploitation des résultats
# +
metadata_list_lr = pickle.load(open("data/metadata_lr_part13.pkl","rb"))
metadata_list_lr_sorted = sorted(metadata_list_lr, key = lambda x : x["name"])
xlabelticks = [metadata["name"] for metadata in metadata_list_lr_sorted]
fig = plt.figure(figsize=(20,6))
key_plot = [key for key in metadata_list_lr[0].keys() if key !="name"]
for iplot, key in enumerate(key_plot):
ax = fig.add_subplot(1,4,iplot+1)
for i,metadata in enumerate(metadata_list_lr_sorted):
if key=="learning_time":
scale=60
ylabel="Time(mn)"
elif key=="predict_time":
scale=1
ylabel="Time(seconds)"
else:
scale=0.01
ylabel = 'Accuracy (pcy)'
ax.scatter(i,metadata[key]/scale, s=100)
ax.text(i,metadata[key]/scale,"%.2f"%(metadata[key]/scale), ha="left", va="top")
ax.set_xticks(np.arange(7))
ax.set_xticklabels(xlabelticks, rotation=45, fontsize=15, ha="right")
ax.set_title(key, fontsize=20)
ax.set_ylabel(ylabel, fontsize=15)
plt.tight_layout()
plt.show()
# -
# **Q** Comment expliquer le long de temps d'apprentissage de la regression logistique sur les modèles issues de Word2Vec?
#
# **Q** Comment expliquer la différence de qualité d'apprentissage en fonction du hashing ?
#
# # Random Forest
# +
metadata_list_rf = []
param_grid = {"n_estimators" : [100,500]}
for name, dic in features_path_dic.items():
x_train_path = dic["x_train_path"]
x_valid_path = dic["x_valid_path"]
load = dic["load"]
print("Load features : " + name)
if load == "npz":
X_train = sc.sparse.load_npz(x_train_path)
X_valid = sc.sparse.load_npz(x_valid_path)
else :
X_train = np.load(x_train_path)
X_valid = np.load(x_valid_path)
print("start Learning :" + name)
ts = time.time()
gs = GridSearchCV(RandomForestClassifier(), param_grid=param_grid, verbose=15)
gs.fit(X_train,Y_train.values)
te=time.time()
t_learning = te-ts
print("start prediction :" + name)
ts = time.time()
score_train=gs.score(X_train,Y_train)
score_valid=gs.score(X_valid,Y_valid)
te=time.time()
t_predict = te-ts
metadata = {"name":name, "learning_time" : t_learning, "predict_time":t_predict, "score_train": score_train, "score_valid": score_valid}
metadata_list_rf.append(metadata)
pickle.dump(metadata_list_rf, open("data/metadata_rf_part13.pkl","wb"))
# -
# +
metadata_list_rf = pickle.load(open("data/metadata_rf_part13.pkl","rb"))
metadata_list_rf_sorted = sorted(metadata_list_rf, key = lambda x : x["name"])
xlabelticks = [metadata["name"] for metadata in metadata_list_rf_sorted]
fig = plt.figure(figsize=(20,6))
key_plot = [key for key in metadata_list_rf[0].keys() if key !="name"]
for iplot, key in enumerate(key_plot):
ax = fig.add_subplot(1,4,iplot+1)
for i,metadata in enumerate(metadata_list_rf_sorted):
if key=="learning_time":
scale=60
ylabel="Time(mn)"
elif key=="predict_time":
scale=1
ylabel="Time(seconds)"
else:
scale=0.01
ylabel = 'Accuracy (pcy)'
ax.scatter(i,metadata[key]/scale, s=100)
ax.text(i,metadata[key]/scale,"%.2f"%(metadata[key]/scale), ha="left", va="top")
ax.set_xticks(np.arange(7))
ax.set_xticklabels(xlabelticks, rotation=45, fontsize=15, ha="right")
ax.set_title(key, fontsize=20)
ax.set_ylabel(ylabel, fontsize=15)
plt.tight_layout()
plt.show()
# -
# # 5 MLP
# +
metadata_list_mlp = []
param_grid = {"hidden_layer_sizes" : [32,64,128, 256]}
for name, dic in features_path_dic.items():
x_train_path = dic["x_train_path"]
x_valid_path = dic["x_valid_path"]
load = dic["load"]
print("Load features : " + name)
if load == "npz":
X_train = sc.sparse.load_npz(x_train_path)
X_valid = sc.sparse.load_npz(x_valid_path)
else :
X_train = np.load(x_train_path)
X_valid = np.load(x_valid_path)
print("start Learning :" + name)
ts = time.time()
gs = GridSearchCV(MLPClassifier(learning_rate = "adaptive", ), param_grid=param_grid, verbose=15)
gs.fit(X_train,Y_train.values)
te=time.time()
t_learning = te-ts
print("start prediction :" + name)
ts = time.time()
score_train=gs.score(X_train,Y_train)
score_valid=gs.score(X_valid,Y_valid)
te=time.time()
t_predict = te-ts
metadata = {"name":name, "learning_time" : t_learning, "predict_time":t_predict, "score_train": score_train, "score_valid": score_valid}
metadata_list_mlp.append(metadata)
pickle.dump(metadata_list_mlp, open("data/metadata_mlp_part13.pkl","wb"))
# +
metadata_list_mlp = pickle.load(open("data/metadata_mlp_part13.pkl","rb"))
metadata_list_mlp_sorted = sorted(metadata_list_mlp, key = lambda x : x["name"])
xlabelticks = [metadata["name"] for metadata in metadata_list_mlp_sorted]
fig = plt.figure(figsize=(20,6))
key_plot = [key for key in metadata_list_mlp[0].keys() if key !="name"]
for iplot, key in enumerate(key_plot):
ax = fig.add_subplot(1,4,iplot+1)
for i,metadata in enumerate(metadata_list_mlp_sorted):
if key=="learning_time":
scale=60
ylabel="Time(mn)"
elif key=="predict_time":
scale=1
ylabel="Time(seconds)"
else:
scale=0.01
ylabel = 'Accuracy (pcy)'
ax.scatter(i,metadata[key]/scale, s=100)
ax.text(i,metadata[key]/scale,"%.2f"%(metadata[key]/scale), ha="left", va="top")
ax.set_xticks(np.arange(7))
ax.set_xticklabels(xlabelticks, rotation=45, fontsize=15, ha="right")
ax.set_title(key, fontsize=20)
ax.set_ylabel(ylabel, fontsize=15)
plt.tight_layout()
plt.show()
# -
2
| NatualLangageProcessing/Part1-3-AIF-PythonScikitLearn-Prediction-Cdiscount.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 07.03 Sentiment Analysis : NAVER MOVIE REVIEW
#
# * https://github.com/e9t/nsmc
# +
# 유니코드로 인코딩하며 읽기 위해 codecs 패키지를 사용 : 현재 데이터가 'UTF-8'로 인코딩되어있음. 파이썬의 경우, 데이터를 유니코드로 읽어야 함
# codecs 패키지는 데이터를 읽는 순간, 유니코드로 인코딩을 시켜주는 기능
# 일명 'streaming encoder'
import codecs
with codecs.open("ratings_train.txt", encoding='utf-8') as f:
data = [line.split('\t') for line in f.read().splitlines()]
data = data[1:] # header 제외
# +
from pprint import pprint
pprint(data[0])
# -
X = list(zip(*data))[1]
y = np.array(list(zip(*data))[2], dtype=int)
# # 1. 전처리 : CountVectorizer, 모델 : NB-multinomial
# +
# 전처리 : CountVectorizer ==>> 다항분포 샘플 생성
# 모델 : multinomialNB
# 파이프라인으로 생성
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
model1 = Pipeline([
('vect', CountVectorizer()),
('mb', MultinomialNB()),
])
# +
# 모델 학습
model1.fit(X, y)
# +
# 모델 성능 확인 (cross - validation 실시)
# 모델 성능 확인을 위한 test data 호출
import codecs
with codecs.open("ratings_test.txt", encoding='utf-8') as f:
data_test = [line.split('\t') for line in f.read().splitlines()]
data_test = data_test[1:] # header 제외
# +
# 모델 성능 확인을 위한 classifcation report 출력
X_test = list(zip(*data_test))[1]
y_test = np.array(list(zip(*data_test))[2], dtype=int)
print(classification_report(y_test, model1.predict(X_test)))
# +
# 모델 예측 확인
# 부정적 단어 : 0 / 긍정적 단어 : 1
# -
model1.predict(['꽝이야'])
model1.predict(['짱이야'])
model1.predict(['죽인다'])
model1.predict(['우와'])
model1.predict(['우왕'])
model1.predict(['대박'])
model1.predict(['연기'])
model1.predict(['노잼'])
model1.predict(['못한다'])
# # 2. 전처리 : TfidfVectorizer, 모델 : NB-multinomial
#
# - CountVectorizer 전처리 시, 성능 차이 크게 없음
#
# +
from sklearn.feature_extraction.text import TfidfVectorizer
model2 = Pipeline([
('vect', TfidfVectorizer()),
('mb', MultinomialNB()),
])
# -
model2.fit(X, y)
print(classification_report(y_test, model2.predict(X_test)))
# # 3. 전처리 : CountVectorizer, 모델 : NB-multinomial, 형태소 분석기 사용
#
# - 형태소 분석기 : konlpy - okt
#
# +
from konlpy.tag import Okt
pos_tagger = Okt()
def tokenize_pos(doc):
return ['/'.join(t) for t in pos_tagger.pos(doc)]
# -
model3 = Pipeline([
('vect', CountVectorizer(tokenizer=tokenize_pos)),
('mb', MultinomialNB()),
])
# %%time
model3.fit(X, y)
print(classification_report(y_test, model3.predict(X_test)))
# # 4. 전처리 : CountVectorizer, 모델 : NB-multinomial, 형태소 분석기 사용(gram 수정)
#
# - 형태소 분석기 : konlpy - okt
# - gram : 1-2 gram 사용
model4 = Pipeline([
('vect', TfidfVectorizer(tokenizer=tokenize_pos, ngram_range=(1, 2))),
('mb', MultinomialNB()),
])
# %%time
model4.fit(X, y)
print(classification_report(y_test, model4.predict(X_test)))
| 1.Study/2. with computer/2.Machine_Learning_code/1. Supervised_Learning/2. Classification/.ipynb_checkpoints/07.03 Sentiment_Analysis - Naive_bayes classification application [NAVER MOVIE REVIEW]-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
tc_data=pd.read_csv('C:/Users/USER/Desktop/Assignment/ADS-Assignment-4/WA_Fn-UseC_-Telco-Customer-Churn.csv')
tc_data
tc_data.isna().sum()
tc_data.dtypes
# tc_data.describe()
# ### a. Which 2 demographics have proven to be the most popular with the telco brand?
#
tc_data.mode(axis=0,numeric_only=False,dropna=True)
# ### b) If the marketing team wanted to increase customer retention, to whom should they target their advertisements to?
#
print("The Gender with a higer population should be the target to be advertised to")
tc_data.gender.value_counts()
# ### 2. Services:
# ### a. Which 3 services are contributing to a higher monthly charge and resulting to customers churning? (Use visualizations in explaining your analysis)
tc_data.reset_index()
import matplotlib.pyplot as plt
# ## b. If the telco was to offer Phone as a standalone service, which type of contract would encourage customer retention?
#
# ## 3. Payment: (Use visualizations in explaining your analysis)
# ## a. If the company was to streamline all its services into 3 bouquet packages, what monthly prices will be appropriate for the following packages to keep customers from churning:
# ## b. Should the company strictly go paperless for their monthly billings as a technique of keeping their customers and why?
| Assignment 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Graphics and Visualization in Python
#
# Python has a bewildering number of graphics libraries, catering to different needs. If you want a better understanding of the Python visualization landscape, see the following series of blog posts:
#
# - [Python Data Visualization 2018: Why So Many Libraries?](https://www.anaconda.com/blog/developer-blog/python-data-visualization-2018-why-so-many-libraries/)
# - [Python Data Visualization 2018: Moving Toward Convergence](https://www.anaconda.com/blog/developer-blog/python-data-visualization-moving-toward-convergence/)
# - [Python Data Visualization 2018: Where Do We Go From Here?](https://www.anaconda.com/blog/developer-blog/python-data-visualization-2018-where-do-we-go-from-here/)
#
# However for simple statistical plots, the `matplotlib` and `seaborn` libraries suffice most of the time:
#
# - [Matplotlib tutorial](http://www.scipy-lectures.org/intro/matplotlib/matplotlib.html)
# - [Matplotlib gallery](http://matplotlib.org/1.2.1/gallery.html)
# - [Seaborn gallery](http://stanford.edu/~mwaskom/software/seaborn/examples/index.html#example-gallery)
#
# As you have seen, `pandas` also has useful plotting functionality.
# %matplotlib inline
import numpy as np
import numpy.random as rng
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# ## Matplotlib
#
# Matplotlib has a "functional" interface similar to Matlab via the `pyplot` module for simple interactive use, as well as an object-oriented interface that is useful for more complex graphic creations.
# ### Types of plots
plt.hist(np.random.randn(1000), bins=np.linspace(-4,4,11))
pass
xs = [np.random.normal(mu, 0.5, (100)) for mu in range(5)]
for x in xs:
plt.hist(x, bins=15, alpha=0.4)
plt.boxplot(np.random.random((6,10)))
pass
plt.scatter(*np.random.uniform(0.1, 0.9, (2,100)),
s=np.random.randint(10, 200, 100),
c=np.random.random(100))
pass
x = y = np.linspace(-5, 5, 100)
X, Y = np.meshgrid(x, y)
Z = X**2 + Y**2
plt.contourf(X, Y, Z, cmap=plt.cm.RdPu)
plt.axis('square')
pass
plt.stem(np.random.random(8))
plt.margins(0.05)
pass
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
plt.plot(x, y)
plt.axis([0, 2*np.pi, -1.05, 1.05,])
pass
xs = np.c_[np.zeros(10), np.random.choice([-1,1], (10, 100)).cumsum(axis=1)]
plt.plot(xs.T)
plt.title('10 random walks', fontsize=14)
pass
# ### Displaying arrays
x = np.random.random((80, 80, 3))
plt.imshow(x)
pass
plt.imshow(x, interpolation='bicubic')
pass
plt.imshow(x.mean(axis=-1), cmap='bone')
pass
plt.imshow(x.mean(axis=-1), cmap='Reds')
plt.xticks(range(0, x.shape[1], 4))
plt.yticks(range(0, x.shape[0], 4))
plt.grid(color='white')
ax = plt.gca()
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.xaxis.set_ticks_position('none')
ax.yaxis.set_ticks_position('none')
pass
# ### Colors
plt.scatter(*np.random.uniform(0.1, 0.9, (2,100)),
s=np.random.randint(10, 200, 100),
c=np.random.random(100))
pass
plt.scatter(*np.random.uniform(0.1, 0.9, (2,100)),
s=np.random.randint(10, 200, 100),
c=np.random.random(100), cmap='summer')
pass
plt.scatter(*np.random.uniform(0.1, 0.9, (2,100)),
s=np.random.randint(10, 200, 100),
c=np.random.random(100), cmap='hsv')
pass
# #### Getting a list of colors from a colormap
#
# Giving an argument of 0.0 < x < 1.0 to a `colormap` gives the appropriate interpolated color.
# find the bottom, middle and top colors of the winter colormap
colors = plt.cm.winter(np.linspace(0, 1, 3))
colors
plt.scatter(*np.random.uniform(0.1, 0.9, (2,100)),
s=np.random.randint(10, 200, 100),
c=colors)
pass
# ### Styles
plt.style.available
x = np.linspace(0, 2*np.pi, 100)
y = np.sin(x)
with plt.style.context('classic'):
plt.plot(x, y)
plt.axis([0, 2*np.pi, -1.05, 1.05,])
with plt.style.context('fivethirtyeight'):
plt.plot(x, y)
plt.axis([0, 2*np.pi, -1.05, 1.05,])
with plt.style.context('ggplot'):
plt.plot(x, y)
plt.axis([0, 2*np.pi, -1.05, 1.05,])
with plt.style.context('seaborn-darkgrid'):
plt.plot(x, y)
plt.axis([0, 2*np.pi, -1.05, 1.05,])
with plt.xkcd():
plt.plot(x, y)
plt.axis([0, 2*np.pi, -1.05, 1.05,])
# ### Creating your own style
#
# Many, many options can be configured.
plt.rcParams
# %%file foo.mplstyle
axes.grid: True
axes.titlesize : 24
axes.labelsize : 20
lines.linewidth : 3
lines.markersize : 10
xtick.labelsize : 16
ytick.labelsize : 16
with plt.style.context('foo.mplstyle'):
plt.plot(x, y)
plt.axis([0, 2*np.pi, -1.05, 1.05,])
# ### Customizing plots
# +
plt.rcParams.update({'font.size': 22})
fig = plt.figure(figsize=(8,6))
ax = plt.subplot(1,1,1)
plt.plot(x, y, color='red', linewidth=2, linestyle='dashed', label='sine curve')
plt.plot(x, np.cos(x), 'b-', label='cosine curve')
plt.legend(loc='best', fontsize=14)
plt.axis([0, 2*np.pi, -1.05, 1.05,])
plt.xlabel('x')
plt.ylabel('sin(x)')
plt.xticks([0,0.5*np.pi,np.pi,1.5*np.pi,2*np.pi],
[0, r'$\frac{\pi}{2}$', r'$\pi$', r'$\frac{3\pi}{2}$', r'$2\pi$'])
plt.title('Sine and Cosine Plots')
plt.text(0.45, 0.9, 'Empty space', transform=ax.transAxes, ha='left', va='top')
pass
# -
x = np.random.randn(100)
plt.hist(x, bins=25, histtype='step', normed=True)
mu, sigma = stats.norm.fit(x)
xp = np.linspace(*plt.xlim(), 100)
plt.plot(xp, stats.norm(mu, sigma).pdf(xp))
plt.xlabel('x')
plt.ylabel('Density')
plt.title('MLE fit for normal distribution', fontsize=14)
pass
# ### Layouts
fig, axes = plt.subplots(2,2,figsize=(8,8))
axes[0,0].plot(x,y, 'r')
axes[0,1].plot(x,y, 'g')
axes[1,0].plot(x,y, 'b')
axes[1,1].plot(x,y, 'k')
for ax in axes.ravel():
ax.margins(0.05)
pass
ax1 = plt.subplot2grid((3,3), (0,0), colspan=3)
ax2 = plt.subplot2grid((3,3), (1,0), colspan=2)
ax3 = plt.subplot2grid((3,3), (1,2), rowspan=2)
ax4 = plt.subplot2grid((3,3), (2,0), colspan=2)
axes = [ax1, ax2, ax3, ax4]
colors = ['r', 'g', 'b', 'k']
for ax, c in zip(axes, colors):
ax.plot(x, y, c)
ax.margins(0.05)
plt.tight_layout()
# ## Seaborn
sns.set_context("notebook", font_scale=1.5, rc={"lines.linewidth": 2.5})
# ### Density plots
# +
xs = rng.normal(0,1,100)
fig, axes = plt.subplots(1, 2, figsize=(8,4))
sns.distplot(xs, hist=False, rug=True, ax=axes[0]);
sns.distplot(xs, hist=True, ax=axes[1])
pass
# -
# ### Kernel density estimate
sns.kdeplot(np.r_[rng.normal(0,1,50), rng.normal(4,0.8,100)])
pass
iris = sns.load_dataset('iris')
iris.head()
# ### Joint distribution plot
sns.jointplot(x='petal_length', y='petal_width', data=iris, kind='kdeplot')
pass
# ### Box and violin plots
# +
fig, axes = plt.subplots(1, 2, figsize=(8,4))
sns.boxplot(x='species', y='petal_length', data=iris, ax=axes[0])
sns.violinplot(x='species', y='petal_length', data=iris, ax=axes[1])
pass
# -
# ### Composite plots
url = 'https://raw.githubusercontent.com/mwaskom/seaborn-data/master/titanic.csv'
titanic = pd.read_csv(url)
titanic.head()
sns.lmplot(x='fare', y='survived', col='alone', row='sex', data=titanic, logistic=True)
pass
g = sns.PairGrid(titanic,
y_vars=['fare', 'age'],
x_vars=['sex', 'class', 'embark_town' ],
aspect=1, size=5.5)
g.map(sns.stripplot, jitter=True, palette="bright")
pass
# ### Seaborn styles
sns.set_style('ticks')
fig, axes = plt.subplots(1, 2, figsize=(8,4))
sns.distplot(xs, hist=False, rug=True, ax=axes[0]);
sns.distplot(xs, hist=True, ax=axes[1])
pass
sns.set_style('dark')
# +
xs = rng.normal(0,1,100)
fig, axes = plt.subplots(1, 2, figsize=(8,4))
sns.distplot(xs, hist=False, rug=True, ax=axes[0]);
sns.distplot(xs, hist=True, ax=axes[1])
pass
# -
sns.set_style('darkgrid')
fig, axes = plt.subplots(1, 2, figsize=(8,4))
sns.distplot(xs, hist=False, rug=True, ax=axes[0]);
sns.distplot(xs, hist=True, ax=axes[1])
pass
| notebook/S05_Graphics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
#default_exp labelsmoothing
# -
# This notebook is based on IssacFlath blog post https://isaac-flath.github.io/fastblog/neural%20networks/noisy%20data/2020/12/15/PseudoLabelingDataCleaning.html .
#
# Here we use the same way of injecting random noise to the data. But instead of performing cross validation, we treat the same train and use the testing as validation dataset.
#
# Here LabelSmoothing is extremely helpful in the presence of noise. See this paper https://arxiv.org/pdf/2003.02819.pdf - Can LabelSmoothing Mitigate Noise?
#
#hide
# %%capture
# !pip install fastai -q --upgrade
# !pip install nbdev -q --upgrade
#export
from fastai.vision.all import *
from numpy.random import default_rng
path = untar_data(URLs.MNIST)
path.ls()
# +
#export
x = get_image_files(path)
n = len(x)
rng = default_rng(seed=42)
noise_idxs = rng.choice(n, size=round(n*0.1), replace=False)
len(noise_idxs),noise_idxs[:5]
# -
#export
for i in range(0,len(noise_idxs)):
old_path = str(x[noise_idxs[i]])
if 'training' in old_path:
new_path = str(x[noise_idxs[i]])[:49]+f'{np.random.randint(0,10)}'+str(x[noise_idxs[i]])[50:]
elif 'testing' in old_path:
new_path = str(x[noise_idxs[i]])[:48]+f'{np.random.randint(0,10)}'+str(x[noise_idxs[i]])[49:]
os.system(f'mv {old_path} {new_path}')
#export
mnist = DataBlock(blocks=(ImageBlock(cls=PILImageBW), CategoryBlock),
get_items=get_image_files,
splitter=GrandparentSplitter(train_name='training', valid_name='testing'),
get_y=parent_label)
dls = mnist.dataloaders(path,bs=16)
dls.show_batch(max_n=36,figsize=(6,6))
dls.train.items[:10]
dls.valid.items[:10]
dls.n_subsets
dls.train_ds, dls.valid_ds
#export
learn = cnn_learner(dls, resnet18, metrics=accuracy, loss_func=LabelSmoothingCrossEntropyFlat())
learn.loss_func
learn.lr_find()
#export
learn.fine_tune(1, 1e-3)
learn.show_results(ds_idx=1)
#export
val_preds = learn.get_preds(ds_idx=1, with_decoded=True)
#hide
# !pip install cleanlab -q
#export
from cleanlab.pruning import get_noise_indices
#export
val_ordered_label_errors = get_noise_indices(s=val_preds[1].numpy(), psx=val_preds[0].numpy(), sorted_index_method='normalized_margin')
len(val_ordered_label_errors)
val_ordered_label_errors
val_preds[0][0].argmax(0).item()
for i in val_ordered_label_errors:
#target, preds, preds_proba
print(val_preds[1][i], val_preds[0][i].argmax().item(), val_preds[0][i].max()) #0.9 is good threshold to treat as noisy labels
show_at(dls.valid_ds, idx=i)
# Dataset Pruning ie removing the noisy labels reported is not effective as it removes the most complicated ones.
| 02_NoisyMitigation_using_LabelSmoothing_MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import glob
import pickle
# gp_drt_dir = '../comparisons/GP-DRT'
# if gp_drt_dir not in sys.path:
# sys.path.append(gp_drt_dir)
import GP_DRT as gp
from GP_utils import gp_fit
if '../../../bayes-drt' not in sys.path:
sys.path.append('../../../bayes-drt')
sys.path.append('../../../../misc_modules')
import eis_utils as gt
# %load_ext autoreload
# %autoreload 2
# +
def save_pickle(obj, file):
with open(file,'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
print('Dumped pickle to {}'.format(file))
def load_pickle(file):
with open(file,'rb') as f:
return pickle.load(f)
# -
# # Simulated data
# +
# tau for plotting
tau_plot = np.logspace(-7,2,200)
f_plot = 1/(2*np.pi*tau_plot)
start = time.time()
files = glob.glob('../../../data/simulated/Z*.csv')
files = [f for f in files if f.find('DDT')==-1]
error_files = []
theta0s = [[0.1,0.5,1],[0.1,1.5,2],[0.1,5,1]]
for file in files:
print(file)
print('-------------------------')
suffix = file[file.find('_'):-4]
file_start = time.time()
best_path = 'results/for_figs'
Zoutbest = os.path.join(best_path,f'Zout{suffix}.csv')
Goutbest = os.path.join(best_path,f'Gout{suffix}.csv')
pklbest = os.path.join(best_path,f'obj_{suffix}.pkl')
if os.path.exists(Zoutbest) and os.path.exists(Goutbest) and os.path.exists(pklbest):
print('Already ran')
else:
best_fun = np.inf
for n,theta0 in enumerate(theta0s):
print('theta0=',theta0)
respath = 'results/theta0=({},{},{})'.format(theta0[0],theta0[1],theta0[2])
Zoutfile = os.path.join(respath,f'Zout{suffix}.csv')
Goutfile = os.path.join(respath,f'Gout{suffix}.csv')
pkl = os.path.join(respath,f'obj_{suffix}.pkl')
# if os.path.exists(Zoutfile) and os.path.exists(Goutfile):
# print('Already ran')
# else:
df = pd.read_csv(file)
# sort ascending
df = df.sort_values('Freq')
Z = df['Zreal'].values + 1j*df['Zimag'].values
try:
result = gp_fit(df['Freq'].values,Z,theta0=np.array(theta0),freq_star=f_plot,max_iter=20)
Z_res = pd.DataFrame(np.array([df['Freq'],result['Z_re_fit'],result['Z_im_fit'],result['sigma_Z_im_fit']]).T,
columns=['freq','Zreal','Zimag','sigma_im'])
g_res = pd.DataFrame(np.array([tau_plot,result['gamma_star'],result['sigma_gamma_star']]).T,
columns=['tau','gamma','sigma_gamma'])
Z_res.to_csv(Zoutfile,index=False)
g_res.to_csv(Goutfile,index=False)
save_pickle(result,pkl)
# if this theta0 improved the NMLL, write it to for_figs (overwrite existing file)
if result['min_result']['fun'] < best_fun:
best_fun = result['min_result']['fun']
Z_res.to_csv(Zoutbest,index=False)
g_res.to_csv(Goutbest,index=False)
save_pickle(result,pklbest)
except np.linalg.LinAlgError:
error_files.append(file)
print('LinAlgError')
print('File fit time:', time.time()-file_start)
print('Total fit time:',time.time()-start)
# +
def errfun():
raise OverflowError('test')
try:
errfun()
except (np.linalg.LinAlgError,OverflowError) as oe:
print(oe)
print('caught')
# -
# # Experimental data
# ## LIB
# +
lib_files = ['../../../data/experimental/DRTtools_LIB_data.txt',
'../../../data/experimental/DRTtools_LIB_data_qtr.csv'
]
theta0s = [[1e-4,1e-3,1],[5e-4,1e-3,1],[1e-4,2.5e-3,1],[1e-4,1e-3,2],[1e-4,5e-4,2],[1e-4,5e-4,1],
[1e-4,5e-4,0.5],[1e-4,1e-3,0.5]]
f_plot = np.logspace(4,-5,200)
tau_plot = 1/(2*np.pi*f_plot)
start = time.time()
error_files = []
for file in lib_files:
print(file)
print('-------------------------')
suffix = file[file.find('_'):-4]
best_path = 'results/for_figs'
Zoutbest = os.path.join(best_path,f'Zout{suffix}.csv')
Goutbest = os.path.join(best_path,f'Gout{suffix}.csv')
pklbest = os.path.join(best_path,f'obj{suffix}.pkl')
# if os.path.exists(Zoutbest) and os.path.exists(Goutbest) and os.path.exists(pklbest):
# print('Already ran')
# else:
best_fun = np.inf
file_start = time.time()
for n,theta0 in enumerate(theta0s):
print('theta0=',theta0)
tsuf = suffix + '_theta0=({},{},{})'.format(theta0[0],theta0[1],theta0[2])
Zoutfile = os.path.join('results/exp',f'Zout{tsuf}.csv')
Goutfile = os.path.join('results/exp',f'Gout{tsuf}.csv')
pkl = os.path.join('results/exp',f'obj{tsuf}.pkl')
if file[-3:]=='txt':
df = pd.read_csv(file,sep='\t',header=None)
df = pd.DataFrame(df.values,columns=['Freq','Zreal','Zimag'])
else:
df = pd.read_csv(file)
# remove inductive tail
df = df[df['Zimag']<0]
# sort ascending
df = df.sort_values('Freq')
Z = df['Zreal'].values + 1j*df['Zimag'].values
try:
result = gp_fit(df['Freq'].values,Z,theta0=theta0,freq_star=f_plot)
Z_res = pd.DataFrame(np.array([df['Freq'],result['Z_re_fit'],result['Z_im_fit'],result['sigma_Z_im_fit']]).T,
columns=['freq','Zreal','Zimag','sigma_im'])
# sort by descending freq
Z_res = Z_res.sort_values('freq',ascending=False,ignore_index=True)
g_res = pd.DataFrame(np.array([tau_plot,result['gamma_star'],result['sigma_gamma_star']]).T,
columns=['tau','gamma','sigma_gamma'])
# save results files
Z_res.to_csv(Zoutfile,index=False)
g_res.to_csv(Goutfile,index=False)
save_pickle(result,pkl)
# if this theta0 improved the NMLL, write it to for_figs (overwrite existing file)
if result['min_result']['fun'] < best_fun:
best_fun = result['min_result']['fun']
Z_res.to_csv(Zoutbest,index=False)
g_res.to_csv(Goutbest,index=False)
save_pickle(result,pklbest)
except np.linalg.LinAlgError as lae:
# cholesky failed for theta0. Can't evaluate NMLL, so no use in fitting
print(lae)
print('Can\'t evaluate NMLL for theta0. Skipping')
print('File fit time: {} min'.format((time.time()-file_start)/60))
print('Total fit time: {} min'.format((time.time()-start)/60))
# -
# ## Protonic ceramic microelectrode
# +
tco_file = '../../../data/experimental/PDAC_COM3_02109_Contact10_2065C_500C.txt'
tco_df = gt.read_eis_zdata(tco_file)
f_plot_tco = np.logspace(7,-3,200)
tau_plot_tco = np.logspace(-7,4,200)
theta0s = [[5e5,1e6,1],[1e6,1e6,1],[1e6,2.5e6,1],[5e5,2.5e6,1],
[5e5,1e6,2],[5e5,2.5e6,2],[1e6,1e6,2],[5e5,2.5e6,0.5]]
file = tco_file
suffix = '_PDAC'
Zoutbest = os.path.join('results/for_figs',f'Zout{suffix}.csv')
Goutbest = os.path.join('results/for_figs',f'Gout{suffix}.csv')
pklbest = os.path.join('results/for_figs',f'obj{suffix}.csv')
best_fun = np.inf
file_start = time.time()
for n,theta0 in enumerate(theta0s):
print('theta0=',theta0)
tsuf = suffix + '_theta0=({},{},{})'.format(theta0[0],theta0[1],theta0[2])
Zoutfile = os.path.join('results/exp',f'Zout{tsuf}.csv')
Goutfile = os.path.join('results/exp',f'Gout{tsuf}.csv')
pkl = os.path.join('results/exp',f'obj{tsuf}.pkl')
df = gt.read_eis_zdata(tco_file)
# sort ascending
df = df.sort_values('Freq')
Z = df['Zreal'].values + 1j*df['Zimag'].values
try:
result = gp_fit(df['Freq'].values,Z,theta0=theta0,max_iter=10,freq_star=f_plot_tco)
Z_res = pd.DataFrame(np.array([df['Freq'],result['Z_re_fit'],result['Z_im_fit'],result['sigma_Z_im_fit']]).T,
columns=['freq','Zreal','Zimag','sigma_im'])
g_res = pd.DataFrame(np.array([tau_plot_tco,result['gamma_star'],result['sigma_gamma_star']]).T,
columns=['tau','gamma','sigma_gamma'])
# sort by descending freq
Z_res = Z_res.sort_values('freq',ascending=False,ignore_index=True)
#save files
Z_res.to_csv(Zoutfile,index=False)
g_res.to_csv(Goutfile,index=False)
save_pickle(result,pkl)
# if this theta0 improved the NMLL, write it to for_figs (overwrite existing file)
if result['min_result']['fun'] < best_fun:
best_fun = result['min_result']['fun']
Z_res.to_csv(Zoutbest,index=False)
g_res.to_csv(Goutbest,index=False)
save_pickle(result,pklbest)
except np.linalg.LinAlgError as lae:
# cholesky failed for theta0. Can't evaluate NMLL, so no use in fitting
print(lae)
print('Can\'t evaluate NMLL for theta0. Skipping')
print('File fit time:', time.time()-file_start)
# -
| code_EchemActa/comparisons/GP-DRT/GP_run_fits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
#
# How to Use TVM Pass Infra
# =========================
# **Author**: `<NAME> <https://github.com/zhiics>`_
#
# As the number of optimization passes increases in Relay/tir, it becomes intractable to
# execute them and maintain their dependencies manually. Therefore, we have
# introduced an infrastructure to manage the optimization passes and make it
# applicable to different layers of the IR in the TVM stack.
#
# The optimizations of a Relay/tir program could be applied at various granularity,
# namely function-level and module-level using :py:class:`tvm.relay.transform.FunctionPass`/
# :py:class:`tvm.tir.transform.PrimFuncPass` and :py:class:`tvm.transform.ModulePass`
# respectively. Or users can rely on :py:class:`tvm.transform.Sequential` to apply a sequence of passes
# on a Relay/tir program where the dependencies between passes can be resolved by the
# pass infra. For more details about each type of these passes, please refer to
# the `pass-infra`
#
# This tutorial mainly demostrates how developers can use the pass infra to perform
# a certain optimization and create an optimization pipeline for a Relay program.
# The same approach can be used for tir as well.
#
#
import numpy as np
import tvm
from tvm import te
import tvm.relay as relay
# Create An Example Relay Program
# -------------------------------
# First of all, we create a simple Relay program for the tutorial. This program
# will be used by various optimizations of the examples in this tutorial.
# Similarly, users can write a tir primitive function and apply the tir passes.
#
#
def example():
shape = (1, 64, 54, 54)
c_data = np.empty(shape).astype("float32")
c = relay.const(c_data)
weight = relay.var("weight", shape=(64, 64, 3, 3))
x = relay.var("x", relay.TensorType((1, 64, 56, 56), "float32"))
conv = relay.nn.conv2d(x, weight)
y = relay.add(c, c)
y = relay.multiply(y, relay.const(2, "float32"))
y = relay.add(conv, y)
z = relay.add(y, c)
z1 = relay.add(y, c)
z2 = relay.add(z, z1)
return relay.Function([x, weight], z2)
# Optimize the Program
# --------------------
# Now we would like to optimize the program. Relay features a host of
# optimizations. We will select some of them to apply on this example program.
#
# There are multiple ways to optimize a Relay program. Below we will provide
# examples for each of them.
#
# Manually Apply Optimization Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#
# +
# Let's first create a relay Module which contains one or multiple Relay
# functions for optimization.
f = example()
mod = tvm.IRModule.from_expr(f)
# Now we can apply constant folding on the module.
# fold_const here is a callback that doesn't take any parameters.
fold_const = relay.transform.FoldConstant()
# Then, we can invoke the pass on the given module. Note that the constant
# folding pass works at the function-level. That being said, each function in
# the module will be applied with the optimization. Users don't need to iterate
# through individual functions manually to apply this pass.
mod = fold_const(mod)
# We can see from the updated program that the constants are folded.
print(mod)
# -
# More optimizations can be applied in the similar manner. For instance, we can
# eliminate the common expressions that used by `z` and `z1`.
#
#
mod = relay.transform.EliminateCommonSubexpr()(mod)
print(mod)
# Some optimizations, such as fusion, are parameteric as well. For example,
# opt level 0 will not allow operators to be fused together. Users can pass the
# `fuse_opt_level` to enable this.
#
#
# +
mod = relay.transform.FuseOps(fuse_opt_level=0)(mod)
# We can observe that the optimized module contains functions that only have
# a signle primitive op.
print(mod)
# -
# Use Sequential to Apply a Sequence of Passes
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Applying passes as above is actually tedious and it may require users to have
# better understanding about the dependencies between them. For example, fusion
# currently doesn't work well on let bindings. Therefore, we would not be able
# to fuse operators that were fusable if :py:func:`relay.transform.ToANormalForm` is applied before
# fusion, as this pass generates let bindings for each expression to
# canonicalize a Relay program.
#
# Relay, hence, provides :py:class:`tvm.transform.Sequential` to alleviate developers from handling
# these issues explicitly by specifying the required passes of each pass and
# packing them as a whole to execute. For example, the same passes can now be
# applied using the sequential style as the following. :py:class:`tvm.transform.Sequential` is
# similiar to `torch.nn.sequential <https://pytorch.org/docs/stable/nn.html#torch.nn.Sequential>`_
# and `mxnet.gluon.block <https://mxnet.apache.org/api/python/docs/_modules/mxnet/gluon/block.html>`_.
# For example, `torch.nn.sequential` is used to contain a sequence of PyTorch
# `Modules` that will be added to build a network. It focuses on the network
# layers. Instead, the :py:class:`tvm.transform.Sequential` in our pass infra works on the optimizing
# pass.
#
#
# Now let's execute some passes through :py:class:`tvm.transform.Sequential`
f = example()
mod = tvm.IRModule.from_expr(f)
# Glob the interested passes.
seq = tvm.transform.Sequential(
[
relay.transform.FoldConstant(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(fuse_opt_level=2),
]
)
mod1 = seq(mod)
print(mod1)
# From the transformed Relay program, we can see that there are still two
# identical addition operations. This is because ``EliminateCommonSubexpr``
# was not actually performed. The reason is because only the passes that have
# optimization level less or equal to 2 will be executed by default under
# :py:class:`tvm.transform.Sequential`. The pass infra,
# however, provides a configuration interface
# for users to customize the optimization level that they want to execute.
#
#
with tvm.transform.PassContext(opt_level=3):
mod2 = seq(mod)
print(mod2)
# Now we can see that only one of the two identical additions is kept.
#
# In addition, users can selectively disable some passes using the
# `disabled_pass` config, which is similar to the `-fno-xxx` option used the
# general purpose compilers, such as Clang and GCC. For example, we can disable
# EliminateCommonSubexpr as following. The printed module will again show two
# identical addition operations.
#
#
with tvm.transform.PassContext(opt_level=3, disabled_pass=["EliminateCommonSubexpr"]):
mod3 = seq(mod)
print(mod3)
# Implement a Pass Using Python Decorator
# ------------------------------------------
# The next example illustrates how we can orchestrate a customized optimization
# pipeline through the pass infra using Python decorators. This functionality
# greatly eases the implementation of passes. For example, users can simply
# define a decorated class to do function-level optimizations as the following
# example shows. `transform_function` wraps a class to replace all constants
# with a multiple of `c`. Later on, each function in a given module will be
# visited and each constant in the function will be replaced when we invoke the
# customized pass.
#
#
# +
@relay.transform.function_pass(opt_level=1)
class CustomPipeline:
"""Simple test function to replace one argument to another."""
def __init__(self, multiplier):
self.multiplier = multiplier
# This function can define a pass.
def transform_function(self, func, mod, ctx):
obj = self
class ReplaceConstant(tvm.relay.ExprMutator):
def visit_constant(self, c):
return relay.multiply(obj.multiplier, c)
return ReplaceConstant().visit(func)
f = example()
mod = tvm.IRModule.from_expr(f)
custom_pass = CustomPipeline(multiplier=relay.const(3, "float32"))
assert custom_pass.info.name == "CustomPipeline"
mod3 = custom_pass(mod)
print(mod3)
# -
# Debug a Pass
# ------------
# TVM provides users a plug-and-play style debugging pass that print the IR
# after a certain pass is done through a special pass (``PrintIR``) to dump the IR of the
# whole module. A slightly modified version of the sequential pass example
# could be like the following to enable IR dumping for ``FoldConstant`` optimization.
#
#
f = example()
mod = tvm.IRModule.from_expr(f)
seq = tvm.transform.Sequential(
[
relay.transform.FoldConstant(),
tvm.transform.PrintIR(),
relay.transform.EliminateCommonSubexpr(),
relay.transform.FuseOps(),
]
)
# By inserting the ``PrintIR`` pass after ``FoldConstant``, the pass infra will
# dump out the module IR when ``FoldConstant`` is done. Users can plug in this
# pass after any pass they want to debug for viewing the optimization effect.
#
# There is a more flexible debugging mechanism. One can implement a ``PassInstrument``
# class to execute arbitrary code not only before and/or after each pass but also
# at entering/exiting ``PassContext``. See `pass_instrument_cpp_backend`
# for more details.
#
# Here we use :py::func`tvm.instrument.pass_instrument` decorator to implement
# a PassInsturment class printing IR before execution of each passes:
#
#
# +
@tvm.instrument.pass_instrument
class PrintIR:
"""Print the name of the pass, the IR, only before passes execute."""
def run_before_pass(self, mod, info):
print("Running pass: {}", info)
print(mod)
with tvm.transform.PassContext(opt_level=3, instruments=[PrintIR()]):
with tvm.target.Target("llvm"):
# Perform the optimizations.
mod = seq(mod)
print(mod)
print("done")
# -
# Summary
# -------
# This tutorial has covered how we can write and invoke passes in TVM more
# conveniently using the pass infra. Different ways of invoking a pass are also
# disucssed. Using :py:class:`tvm.transform.Sequential` can largely help
# users to ease the work of handling multiple optimization passes and their
# dependencies. In addition, an example is provided to illustrate
# how we can debug a pass using the ``PrintIR`` and tracing.
#
#
| _downloads/7ef14586a3b62fe120d97d5fedf72879/use_pass_infra.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
# # Matrix Indexing
# ## Col Vec vs. Row Vec
# Vectors are usually 1 dim.
#
#
# To access an attribute/element of a vector as $\vec v_i$:
# ```
# v[i]
# ```
#
# When having a stacked vector in matrix, to access the $i$-th vector stacked as **col vectors**:
# ```
# mat[:, i]
# ```
#
# To access the $i$-th vector stacked as **row vectors**:
# ```
# mat[i, :]
# # or simply
# mat[i]
# ```
# # Matrix Appending
# ## Initialization
# Initialize an empty matrix with given `dim` using `reshape`.
dim = 3
X = np.array([]).reshape(dim, 0)
X
# ## Append Vec
# Append col-vec to matrix along col.
vec = [1, 2, 3]
Y = np.c_[X, vec]
Y
Y = np.c_[Y, vec]
Y
# ## Append Mat
# Append a matrix along col, differs in dim of axis=1
#
# Convert vec to mat by `expand_dims`
mat = np.expand_dims(vec, axis=1)
mat
np.hstack((Y, mat))
| Matrix.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
import re
import pandas as pd
from bs4 import BeautifulSoup
page = requests.get('https://en.wikipedia.org/wiki/Fantastic_Beasts_and_Where_to_Find_Them_(film)')
soup = BeautifulSoup(page.content, 'html.parser')
li = soup.find('li', class_='interlanguage-link interwiki-zh')
lin = li.find('a')
link = lin['href']
df = pd.read_excel('titles2018.xlsx')
df.head()
titles = df['Titles']
type(titles)
len(titles)
links = []
for idx, title in enumerate(titles):
title = title.replace(' ','_')
print('Now processing title number' + str(idx))
try:
try:
tmp = []
path = 'https://en.wikipedia.org/wiki/' + title + '_(film)'
page = requests.get(path)
soup = BeautifulSoup(page.content, 'html.parser')
li = soup.find('li', class_='interlanguage-link interwiki-zh')
lin = li.find('a')
link = lin['href']
tmp.append(link)
print(len(tmp))
links.append(tmp)
except:
tmp = []
path = 'https://en.wikipedia.org/wiki/' + title
page = requests.get(path)
soup = BeautifulSoup(page.content, 'html.parser')
li = soup.find('li', class_='interlanguage-link interwiki-zh')
lin = li.find('a')
link = lin['href']
tmp.append(link)
print(len(tmp))
links.append(tmp)
except:
links.append('404')
print('Not Found.')
len(links)
# Save links to csv
links = pd.DataFrame(data={"links": links})
links.to_csv("links.csv", sep=',')
# +
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('links.xlsx', engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
links.to_excel(writer, sheet_name='links')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
| scrape_wiki_2018.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="UYmG9L15Bwxr" outputId="caa7776d-9ab1-4b00-cf47-a0ad99e70af2"
# !git clone https://github.com/slowwavesleep/NeuralMachineTranslation.git
# + colab={"base_uri": "https://localhost:8080/"} id="OLgamT-3CFhb" outputId="0a48ac21-2223-45e3-aa7f-a3e3830149ed"
# cd NeuralMachineTranslation
# + id="7o0QA7ZSCN7J" colab={"base_uri": "https://localhost:8080/"} outputId="1c9dd7ae-12c5-4f8e-f706-12ff3f8eaf4a"
# !bash colab_helper.sh
# + colab={"base_uri": "https://localhost:8080/"} id="w0QIUpFTDBuC" outputId="3fbb2d57-9dce-4410-b35c-898518e610d6"
# !python train.py colab_config.yml
# + colab={"base_uri": "https://localhost:8080/"} id="gZ0hg2U6N-gP" outputId="ecdcf772-1bf5-47bc-f0bb-ba55e4eaad0d"
# !python test.py results/baseline/translations.txt
# + colab={"base_uri": "https://localhost:8080/"} id="TM17PKKRN505" outputId="195fe369-c435-4ba3-d5fc-45fddfc6a79d"
# !python test.py results/main/translations.txt
# + colab={"base_uri": "https://localhost:8080/"} id="Ilq8glVVFjfN" outputId="6fbb5b44-0124-4416-ad17-3dd1ce2fe03a"
# !python test.py results/colab/translations.txt
# + id="kMbkKQ15QdK7"
| colab_train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import ipymaterialui as mui
import ipywidgets as widgets
text1 = "Jupyter"
text2 = "Jupyter Widgets"
text3 = "Material UI"
text4 = "React"
texts = [text1, text2, text3, text4]
chip = mui.Chip(label=text1)
chip
# the baseclass is just a
chips = [mui.Chip(label=text) for text in texts]
chips_div = mui.Html(tag='div', children=chips)
chips_div
out = widgets.Output()
b = mui.Button(children=text2)
@out.capture()
def click(button, event, data):
print(text3)
b.on_event('onClick', click)
widgets.VBox([b, out])
# seems like the checkbox does not support children
checkbox_mui = mui.Checkbox(checked=True, description=text4)
checkbox_classic = widgets.Checkbox(description=text1)
widgets.jslink((checkbox_mui, 'checked'), (checkbox_classic, 'value'))
widgets.VBox([checkbox_classic, checkbox_mui])
toggle_button = mui.ToggleButton(children=text2, selected=True, value='dummy')
widgets.jslink((toggle_button, 'selected'), (checkbox_classic, 'value'))
toggle_button
menuitems = [
mui.MenuItem(children=text1, value='1'),
mui.MenuItem(children=text2, value='2'),
mui.MenuItem(children=text3, value='3')
]
menu = mui.Menu(children=menuitems)
# Nice looking lists, the 3rd acting like a button
list_items = [
mui.ListItem(children=[mui.ListItemText(primary=text1, secondary=text3)], divider=True),
mui.ListItem(children=[mui.ListItemText(primary=text2, secondary=text4)], divider=True),
mui.ListItem(children=[mui.ListItemText(primary=text3, secondary=text1)], divider=True, button=True),
mui.ListItem(children=[mui.ListItemText(primary=text4, secondary=text2)], divider=True)
]
mui.List(children=list_items)
# For the moment only list items can be used for popup menus
# This needs a more generic solution?
list_item_text = mui.ListItemText(primary=text4, secondary=text1, button=True)
list_item = mui.ListItem(children=[list_item_text], button=True, menu=menu)
list_item
# Unlike standard ipywidgets (controls) values can be often be widgets
toggle_buttons = [
mui.ToggleButton(children='v1', value=chips[0]),
mui.ToggleButton(children='v2', value=chips[1]),
mui.ToggleButton(children='v3', value=chips[2]),
mui.ToggleButton(children='v4', value=chips[3]),
]
toggle_button_group = mui.ToggleButtonGroup(
value=[chips[2]],
children=toggle_buttons,
exclusive=False)
toggle_button_group
# these values can be jslinked, to create dynamic layout easy
div = mui.Html(tag='div')
widgets.jslink((toggle_button_group, 'value'), (div, 'children'))
div
div.children = [chips[2]]
# similar for an exclusive toggle button
toggle_buttons = [
mui.ToggleButton(children='v1', value=chips[0]),
mui.ToggleButton(children='v2', value=chips[1]),
mui.ToggleButton(children='v3', value=chips[2]),
mui.ToggleButton(children='v4', value=chips[3]),
]
toggle_button_group = mui.ToggleButtonGroup(
value=chips[2],
children=toggle_buttons,
exclusive=True)
toggle_button_group
# here we use the convenience attribute 'child'
div = mui.Html(tag='div')
widgets.jslink((toggle_button_group, 'value'), (div, 'children'))
div
# Most controls, like select come without a label
menu_items = [
mui.MenuItem(children=text1, value=chips[0]),
mui.MenuItem(children=text2, value=chips[1]),
mui.MenuItem(children=text3, value=chips[2])
]
# No selection is equivalent to '' (idea: use None for Python and undefined in js)
select = mui.Select(value='', children=menu_items, multiple=False)
select
# Using it in combination with InputLabel and FormControl
input_label = mui.InputLabel(children='Text value', placeholder='lala')
# style is a dict with css key/values
form_control = mui.FormControl(children=[input_label, select], style_={'width': '198px'})
form_control
# Select can also be used to select multiple values
# values can be heterogeneous, ints, floats, strings, or widgets are supported
menu_items = [
mui.MenuItem(children=text1, value=0),
mui.MenuItem(children=text2, value='1'),
mui.MenuItem(children=text3, value=3.14)
]
select_multiple = mui.Select(value=['1', 3.14], children=menu_items, multiple=True, style_={'width': '248px'})
menu = mui.Menu(children=menuitems)
input_label = mui.InputLabel(children='Selection')
form_control = mui.FormControl(children=[input_label, select_multiple])
form_control
select_multiple.value = []
input_label.description = 'New selection'
text_field = mui.TextField(placeholder='e.g. <NAME>', label='Name')
text_field
# Tabs/Tab in MaterialUI are only the 'header' of tabs, use the .value together with
# jslink and a Div widget to implement a real tab
tabs = mui.Tabs(children=[mui.Tab(label='Tab: ' +chip.label, value=chip) for chip in chips], value=chips[2])
tabs
tabDiv = mui.Html(tag='div')
widgets.jslink((tabs, 'value'), (tabDiv, 'children'))
tabDiv
# putting this together with some Div placeholders
divs = [mui.Html(tag='div', children=text) for text in texts]
tabs = mui.Tabs(children=[mui.Tab(label='Tab: ' +text, value=div) for div, text in zip(divs, texts)], value=divs[1])
div = mui.Html(tag='div')
widgets.jslink((tabs, 'value'),(div, 'children'))
mui.Html(tag='div', children=[tabs, div])
# again, styling with style dict
for k, color in enumerate('red green blue orange'.split()):
divs[k].style_ = {'color': color}
# tabs.children[k].style = {'backgroundColor': color}
switch = mui.Switch(checked=False)
switch
switch.checked
switch.checked = False
switch_form = mui.FormControlLabel(control=switch, label='Multiple selection')
switch_form
# Select can also be used to select multiple values
# values can be heterogeneous, ints, floats, strings, or widgets are supported
menu_items = [
mui.MenuItem(children=switch_form, button=False),
mui.MenuItem(children=text1, value=0),
mui.MenuItem(children=text2, value='1'),
mui.MenuItem(children=text3, value=3.14)
]
select_multiple = mui.Select(value=['1', 3.14], children=menu_items, multiple=True, style_={'width': '248px'})
menu = mui.Menu(children=menu_items)
input_label = mui.InputLabel(children='Selection')
form_control = mui.FormControl(children=[input_label, select_multiple])
form_control
menuitems = [
mui.MenuItem(children=switch_form),
mui.MenuItem(children=text1),
mui.MenuItem(children=text2),
mui.MenuItem(children=text3)
]
# +
# For the moment only list items can be used for popup menus
# This needs a more generic solution?
def toggle_menu(widget, event, data):
menu2.open_ = not menu2.open_
for item in menuitems:
item.on_event('onClick', toggle_menu)
list_item_text = mui.ListItemText(primary=text4, secondary=text1, button=True)
list_item = mui.ListItem(children=[list_item_text], button=True)
list_item.on_event('onClick', toggle_menu)
menu2 = mui.Menu(anchor_el=list_item, keep_mounted=True, open_=False, children=menuitems)
menu2.on_event('onClose', toggle_menu)
mui.List(children=[list_item, menu2])
# -
| Core examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Forecasting II: state space models
#
# This tutorial covers state space modeling with the [pyro.contrib.forecast](http://docs.pyro.ai/en/latest/contrib.forecast.html) module. This tutorial assumes the reader is already familiar with [SVI](http://pyro.ai/examples/svi_part_ii.html), [tensor shapes](http://pyro.ai/examples/tensor_shapes.html), and [univariate forecasting](http://pyro.ai/examples/forecasting_i.html).
#
# See also:
#
# - [Forecasting I: univariate, heavy tailed](http://pyro.ai/examples/forecasting_i.html)
# - [Forecasting III: hierarchical models](http://pyro.ai/examples/forecasting_iii.html)
#
# #### Summary
#
# - Pyro's [ForecastingModel](http://docs.pyro.ai/en/latest/contrib.forecast.html#pyro.contrib.forecast.forecaster.ForecastingModel) can combine regression, variational inference, and exact inference.
# - To model a linear-Gaussian dynamical system, use a [GaussianHMM](http://docs.pyro.ai/en/latest/distributions.html#gaussianhmm) `noise_dist`.
# - To model a heavy-tailed linear dynamical system, use [LinearHMM](http://docs.pyro.ai/en/latest/distributions.html#linearhmm) with heavy-tailed distributions.
# - To enable inference with [LinearHMM](http://docs.pyro.ai/en/latest/distributions.html#linearhmm), use a [LinearHMMReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.hmm.LinearHMMReparam) reparameterizer.
# +
import math
import torch
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.contrib.examples.bart import load_bart_od
from pyro.contrib.forecast import ForecastingModel, Forecaster, eval_crps
from pyro.infer.reparam import LinearHMMReparam, StableReparam, SymmetricStableReparam
from pyro.ops.tensor_utils import periodic_repeat
from pyro.ops.stats import quantile
import matplotlib.pyplot as plt
# %matplotlib inline
assert pyro.__version__.startswith('1.4.0')
pyro.enable_validation(True)
pyro.set_rng_seed(20200305)
# -
# ## Intro to state space models
#
# In the [univariate tutorial](http://pyro.ai/examples/forecasting_i.html) we saw how to model time series as regression plus a local level model, using variational inference. This tutorial covers a different way to model time series: state space models and exact inference. Pyro's forecasting module allows these two paradigms to be combined, for example modeling seasonality with regression, including a slow global trend, and using a state-space model for short-term local trend.
#
# Pyro implements a few state space models, but the most important are the [GaussianHMM](http://docs.pyro.ai/en/latest/distributions.html#gaussianhmm) distribution and its heavy-tailed generalization the [LinearHMM](http://docs.pyro.ai/en/latest/distributions.html#linearhmm) distribution. Both of these model a linear dynamical system with hidden state; both are multivariate, and both allow learning of all process parameters. On top of these the [pyro.contrib.timeseries](http://docs.pyro.ai/en/latest/contrib.timeseries.html) module implements a variety of multivariate Gaussian Process models that compile down to `GaussianHMM`s.
#
# Pyro's inference for `GaussianHMM` uses parallel-scan Kalman filtering, allowing fast analysis of very long time series. Similarly, Pyro's inference for `LinearHMM` uses entirely parallel auxiliary variable methods to reduce to a `GaussianHMM`, which then permits parallel-scan inference. Thus both methods allow parallelization of long time series analysis, even for a single univariate time series.
#
# Let's again look at the [BART train](https://www.bart.gov/about/reports/ridership) ridership dataset:
dataset = load_bart_od()
print(dataset.keys())
print(dataset["counts"].shape)
print(" ".join(dataset["stations"]))
data = dataset["counts"].sum([-1, -2]).unsqueeze(-1).log1p()
print(data.shape)
plt.figure(figsize=(9, 3))
plt.plot(data, 'b.', alpha=0.1, markeredgewidth=0)
plt.title("Total hourly ridership over nine years")
plt.ylabel("log(# rides)")
plt.xlabel("Hour after 2011-01-01")
plt.xlim(0, len(data));
plt.figure(figsize=(9, 3))
plt.plot(data)
plt.title("Total hourly ridership over one month")
plt.ylabel("log(# rides)")
plt.xlabel("Hour after 2011-01-01")
plt.xlim(len(data) - 24 * 30, len(data));
# ## GaussianHMM
#
# Let's start by modeling hourly seasonality together with a local linear trend, where we model seasonality via regression and local linear trend via a [GaussianHMM](http://docs.pyro.ai/en/latest/distributions.html#gaussianhmm). This noise model includes a mean-reverting hidden state (an [Ornstein-Uhlenbeck process](https://en.wikipedia.org/wiki/Ornstein%E2%80%93Uhlenbeck_process)) plus Gaussian observation noise.
T0 = 0 # beginning
T2 = data.size(-2) # end
T1 = T2 - 24 * 7 * 2 # train/test split
means = data[:T1 // (24 * 7) * 24 * 7].reshape(-1, 24 * 7).mean(0)
class Model1(ForecastingModel):
def model(self, zero_data, covariates):
duration = zero_data.size(-2)
# We'll hard-code the periodic part of this model, learning only the local model.
prediction = periodic_repeat(means, duration, dim=-1).unsqueeze(-1)
# On top of this mean prediction, we'll learn a linear dynamical system.
# This requires specifying five pieces of data, on which we will put structured priors.
init_dist = dist.Normal(0, 10).expand([1]).to_event(1)
timescale = pyro.sample("timescale", dist.LogNormal(math.log(24), 1))
# Note timescale is a scalar but we need a 1x1 transition matrix (hidden_dim=1),
# thus we unsqueeze twice using [..., None, None].
trans_matrix = torch.exp(-1 / timescale)[..., None, None]
trans_scale = pyro.sample("trans_scale", dist.LogNormal(-0.5 * math.log(24), 1))
trans_dist = dist.Normal(0, trans_scale.unsqueeze(-1)).to_event(1)
# Note the obs_matrix has shape hidden_dim x obs_dim = 1 x 1.
obs_matrix = torch.tensor([[1.]])
obs_scale = pyro.sample("obs_scale", dist.LogNormal(-2, 1))
obs_dist = dist.Normal(0, obs_scale.unsqueeze(-1)).to_event(1)
noise_dist = dist.GaussianHMM(
init_dist, trans_matrix, trans_dist, obs_matrix, obs_dist, duration=duration)
self.predict(noise_dist, prediction)
# We can then train the model on many years of data. Note that because we are being variational about only time-global variables, and exactly integrating out time-local variables (via `GaussianHMM`), stochastic gradients are very low variance; this allows us to use a large learning rate and few steps.
# %%time
pyro.set_rng_seed(1)
pyro.clear_param_store()
covariates = torch.zeros(len(data), 0) # empty
forecaster = Forecaster(Model1(), data[:T1], covariates[:T1], learning_rate=0.1, num_steps=400)
for name, value in forecaster.guide.median().items():
if value.numel() == 1:
print("{} = {:0.4g}".format(name, value.item()))
# Plotting forecasts of the next two weeks of data, we see mostly reasonable forecasts, but an anomaly on Christmas when rides were overpredicted. This is to be expected, as we have not modeled yearly seasonality or holidays.
# +
samples = forecaster(data[:T1], covariates, num_samples=100)
samples.clamp_(min=0) # apply domain knowledge: the samples must be positive
p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1)
crps = eval_crps(samples, data[T1:])
print(samples.shape, p10.shape)
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(torch.arange(T1 - 24 * 7, T2),
data[T1 - 24 * 7: T2], 'k-', label='truth')
plt.title("Total hourly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Hour after 2011-01-01")
plt.xlim(T1 - 24 * 7, T2)
plt.text(78732, 3.5, "Christmas", rotation=90, color="green")
plt.legend(loc="best");
# -
# Next let's change the model to use heteroskedastic observation noise, depending on the hour of week.
class Model2(ForecastingModel):
def model(self, zero_data, covariates):
duration = zero_data.size(-2)
prediction = periodic_repeat(means, duration, dim=-1).unsqueeze(-1)
init_dist = dist.Normal(0, 10).expand([1]).to_event(1)
timescale = pyro.sample("timescale", dist.LogNormal(math.log(24), 1))
trans_matrix = torch.exp(-1 / timescale)[..., None, None]
trans_scale = pyro.sample("trans_scale", dist.LogNormal(-0.5 * math.log(24), 1))
trans_dist = dist.Normal(0, trans_scale.unsqueeze(-1)).to_event(1)
obs_matrix = torch.tensor([[1.]])
# To model heteroskedastic observation noise, we'll sample obs_scale inside a plate,
# then repeat to full duration. This is the only change from Model1.
with pyro.plate("hour_of_week", 24 * 7, dim=-1):
obs_scale = pyro.sample("obs_scale", dist.LogNormal(-2, 1))
obs_scale = periodic_repeat(obs_scale, duration, dim=-1)
obs_dist = dist.Normal(0, obs_scale.unsqueeze(-1)).to_event(1)
noise_dist = dist.GaussianHMM(
init_dist, trans_matrix, trans_dist, obs_matrix, obs_dist, duration=duration)
self.predict(noise_dist, prediction)
# %%time
pyro.set_rng_seed(1)
pyro.clear_param_store()
covariates = torch.zeros(len(data), 0) # empty
forecaster = Forecaster(Model2(), data[:T1], covariates[:T1], learning_rate=0.1, num_steps=400)
for name, value in forecaster.guide.median().items():
if value.numel() == 1:
print("{} = {:0.4g}".format(name, value.item()))
# Note this gives us a much longer timescale and thereby more accurate short-term predictions:
# +
samples = forecaster(data[:T1], covariates, num_samples=100)
samples.clamp_(min=0) # apply domain knowledge: the samples must be positive
p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1)
crps = eval_crps(samples, data[T1:])
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(torch.arange(T1 - 24 * 7, T2),
data[T1 - 24 * 7: T2], 'k-', label='truth')
plt.title("Total hourly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Hour after 2011-01-01")
plt.xlim(T1 - 24 * 7, T2)
plt.text(78732, 3.5, "Christmas", rotation=90, color="green")
plt.legend(loc="best");
# -
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(torch.arange(T1 - 24 * 7, T2),
data[T1 - 24 * 7: T2], 'k-', label='truth')
plt.title("Total hourly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Hour after 2011-01-01")
plt.xlim(T1 - 24 * 2, T1 + 24 * 4)
plt.legend(loc="best");
# ## Heavy-tailed modeling with LinearHMM
#
# Next let's change our model to a linear-[Stable](http://docs.pyro.ai/en/latest/distributions.html#pyro.distributions.Stable) dynamical system, exhibiting learnable heavy tailed behavior in both the process noise and observation noise. As we've already seen in the [univariate tutorial](http://pyro.ai/examples/forecasting_i.html), this will require special handling of stable distributions by [poutine.reparam()](http://docs.pyro.ai/en/latest/poutine.html#pyro.poutine.handlers.reparam). For state space models, we combine [LinearHMMReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.hmm.LinearHMMReparam) with other reparameterizers like [StableReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.stable.StableReparam) and [SymmetricStableReparam](http://docs.pyro.ai/en/latest/infer.reparam.html#pyro.infer.reparam.stable.SymmetricStableReparam). All reparameterizers preserve behavior of the generative model, and only serve to enable inference via auxiliary variable methods.
class Model3(ForecastingModel):
def model(self, zero_data, covariates):
duration = zero_data.size(-2)
prediction = periodic_repeat(means, duration, dim=-1).unsqueeze(-1)
# First sample the Gaussian-like parameters as in previous models.
init_dist = dist.Normal(0, 10).expand([1]).to_event(1)
timescale = pyro.sample("timescale", dist.LogNormal(math.log(24), 1))
trans_matrix = torch.exp(-1 / timescale)[..., None, None]
trans_scale = pyro.sample("trans_scale", dist.LogNormal(-0.5 * math.log(24), 1))
obs_matrix = torch.tensor([[1.]])
with pyro.plate("hour_of_week", 24 * 7, dim=-1):
obs_scale = pyro.sample("obs_scale", dist.LogNormal(-2, 1))
obs_scale = periodic_repeat(obs_scale, duration, dim=-1)
# In addition to the Gaussian parameters, we will learn a global stability
# parameter to determine tail weights, and an observation skew parameter.
stability = pyro.sample("stability", dist.Uniform(1, 2).expand([1]).to_event(1))
skew = pyro.sample("skew", dist.Uniform(-1, 1).expand([1]).to_event(1))
# Next we construct stable distributions and a linear-stable HMM distribution.
trans_dist = dist.Stable(stability, 0, trans_scale.unsqueeze(-1)).to_event(1)
obs_dist = dist.Stable(stability, skew, obs_scale.unsqueeze(-1)).to_event(1)
noise_dist = dist.LinearHMM(
init_dist, trans_matrix, trans_dist, obs_matrix, obs_dist, duration=duration)
# Finally we use a reparameterizer to enable inference.
rep = LinearHMMReparam(None, # init_dist is already Gaussian.
SymmetricStableReparam(), # trans_dist is symmetric.
StableReparam()) # obs_dist is asymmetric.
with poutine.reparam(config={"residual": rep}):
self.predict(noise_dist, prediction)
# Note that since this model introduces auxiliary variables that are learned by variational inference, gradients are higher variance and we need to train for longer.
# %%time
pyro.set_rng_seed(1)
pyro.clear_param_store()
covariates = torch.zeros(len(data), 0) # empty
forecaster = Forecaster(Model3(), data[:T1], covariates[:T1], learning_rate=0.1)
for name, value in forecaster.guide.median().items():
if value.numel() == 1:
print("{} = {:0.4g}".format(name, value.item()))
# +
samples = forecaster(data[:T1], covariates, num_samples=100)
samples.clamp_(min=0) # apply domain knowledge: the samples must be positive
p10, p50, p90 = quantile(samples, (0.1, 0.5, 0.9)).squeeze(-1)
crps = eval_crps(samples, data[T1:])
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(torch.arange(T1 - 24 * 7, T2),
data[T1 - 24 * 7: T2], 'k-', label='truth')
plt.title("Total hourly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Hour after 2011-01-01")
plt.xlim(T1 - 24 * 7, T2)
plt.text(78732, 3.5, "Christmas", rotation=90, color="green")
plt.legend(loc="best");
# -
plt.figure(figsize=(9, 3))
plt.fill_between(torch.arange(T1, T2), p10, p90, color="red", alpha=0.3)
plt.plot(torch.arange(T1, T2), p50, 'r-', label='forecast')
plt.plot(torch.arange(T1 - 24 * 7, T2),
data[T1 - 24 * 7: T2], 'k-', label='truth')
plt.title("Total hourly ridership (CRPS = {:0.3g})".format(crps))
plt.ylabel("log(# rides)")
plt.xlabel("Hour after 2011-01-01")
plt.xlim(T1 - 24 * 2, T1 + 24 * 4)
plt.legend(loc="best");
| tutorial/source/forecasting_ii.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="-Uky66-78LM7"
# The fraction 49/98 is a curious fraction, as an inexperienced mathematician in attempting to simplify it may incorrectly believe that 49/98 = 4/8, which is correct, is obtained by cancelling the 9s.
#
# We shall consider fractions like, 30/50 = 3/5, to be trivial examples.
#
# There are exactly four non-trivial examples of this type of fraction, less than one in value, and containing two digits in the numerator and denominator.
#
# If the product of these four fractions is given in its lowest common terms, find the value of the denominator.
#
# + id="Ownb35KU9OTU"
def constraint_is_met(numer, denom):
return ((numer * 10 + i) * denom) == (numer * (i * 10 + denom))
# + colab={"base_uri": "https://localhost:8080/"} id="cQ0Ckd648iC7" outputId="47161ce9-e719-4ea8-bd7c-d4db1e5c8c86"
import math
numer_prod = 1
denom_prod = 1
for i in range(1, 10):
for denom in range(1, i):
for numer in range(1, denom):
if constraint_is_met(numer, denom):
numer_prod *= numer
denom_prod *= denom
denom_prod / math.gcd(numer_prod, denom_prod)
# + id="xHX_xRCt_0js"
| 33_Digit_Cancelling_Fractions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="C8YgKmi-9XFh"
# Download full list of PMID from Pubmed for articles with keywork 'Multiple sclerosis' published from 2000 onwards. For each PMID fetch the abstract
# + id="TpplSfmk9rqh"
# %%capture
# !pip install bertopic
# !pip install metapub
import pandas as pd
import numpy as np
from metapub import PubMedFetcher
fetch = PubMedFetcher()
from bertopic import BERTopic
from plotly.offline import init_notebook_mode
init_notebook_mode(connected=True)
# + id="edt1E_7B3nfC"
df = pd.read_csv("fulldata.csv")
# + id="9dFxU9v24Y3Q"
df['Abstract']=''
# + id="koM67TUw8P-S"
for index, row in df.iterrows():
try:
pmid = df.loc[index, 'PMID']
abstract = fetch.article_by_pmid(pmid).abstract
df.loc[index, 'Abstract'] = abstract
print(index)
except:
pass
# + id="DKwImXxn5Tql"
df.to_csv("fulldata.csv")
# + colab={"base_uri": "https://localhost:8080/", "height": 521} id="DPv-WE2_5C9h" outputId="81d1917e-73be-4978-b3da-6d667f18df57"
df.head()
# + id="RPgcUUfx6UcI"
df = df.drop_duplicates()
# + colab={"base_uri": "https://localhost:8080/"} id="65M1H4o56odJ" outputId="157b649c-576b-469b-ba9d-da3458cf45fe"
df.info()
# + id="LszkXR5y62Qy"
df = df[df['Abstract'].notna()]
# + colab={"base_uri": "https://localhost:8080/"} id="63rhG1hY65RQ" outputId="4b2378e9-4ff6-4efa-e334-ecc1a3c7f56a"
df.info()
# + id="5aBhYGOb_gYY"
df = df.reset_index(drop=True)
# + id="81ko9eI-Rl3n"
df = df[~df.Abstract.str.contains("(ALS)")]
df = df[~df.Abstract.str.contains("scleroderma")]
df = df[~df.Abstract.str.contains("angiomyolipoma")]
# + id="qlo-OFsQ85eP"
titles = list(df['Title'])
abstracts = list(df['Abstract'])
years = list(df['Publication Year'])
# + colab={"base_uri": "https://localhost:8080/"} id="XxfEK3LY7J6i" outputId="3a81b627-3c7e-4f66-dc2b-047ebcbe7cdd"
len(titles), len(abstracts), len(years)
# + id="tV71WEmLqw_1"
topic_model = BERTopic(verbose=True, embedding_model="paraphrase-MiniLM-L12-v2", min_topic_size=50)
# + colab={"base_uri": "https://localhost:8080/", "height": 104, "referenced_widgets": ["043145a5df764d8b9b2b636d7c4fd8ab", "45c98dcd6d0a43efa26b9eedf332f388", "06571d228df548aaba7a6f9f82eeb672", "fd657850ecfe49e88e546b825a575e1a", "<KEY>", "<KEY>", "03c63201553a40029d07097aa88ef861", "c1e1506ae4ef409aa241577db565f762", "<KEY>", "<KEY>", "022a291a8f1a42a996424f0bbf591904"]} id="xh0rCcJX-b4W" outputId="200a518f-26e6-4587-bfed-3d409b6b1a1e"
topics = topic_model.fit_transform(abstracts)
# + id="BsUDdHE4G_pf"
a,b = topics
# + id="4prTJIhQbkEP"
topic_model.get_topic_info()
# + id="NLrPKhvybj8G"
topic_model.visualize_barchart(top_n_topics=9, height=700)
# + id="w7JjNbmgbjv_"
topic_model.visualize_term_rank()
# + id="LHdPSu07bjZM"
topic_model.visualize_term_rank(log_scale=True)
# + id="og-o7daccA51"
topic_model.visualize_topics(top_n_topics=50)
# + id="NhhTbRCLcGvZ"
topic_model.visualize_hierarchy(top_n_topics=50, width=800)
# + id="cPbQ8EP7cMD5"
topic_model.visualize_heatmap(n_clusters=20, top_n_topics=100)
# + id="ko59CFVGcQK2"
topics_over_time = topic_model.topics_over_time(abstracts, topics, years)
# + id="bEl5xCWwcU7H"
topic_model.visualize_topics_over_time(topics_over_time, top_n_topics=20, width=900, height=500)
| Trending_topic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Assignment: Count Most Frequently used words in Veidenbaums.txt
# For English let's use Alice in Wonderland
# +
# Open File - Get Data
# Read Text
# Split Text into word tokens
# Count these tokens (we need to figure out how to)
# Save/Print Results
# +
# File is under /data/Veidenbaums.txt
# Alice is under /data/Alice_Wonderland.txt
# we are under /TextProcessing/CountingWords.ipynb
# -
# This means one level up and then again down into data
# "../data/Veidenbaums.txt"
# So called relative path
# filePath = "../data/Veidenbaums.txt"
filePath = "../data/Alice_Wonderland.txt"
with open(filePath, encoding="utf-8") as fstream: # openign a filestream
mytext = fstream.read() # here our textual data is read into memory
# here fstream is closed automatically for you
len(mytext) # how many symbols are in our text
mytext[:120] # first 120 symbols from our text
mytext[-200:] # last two hundred symbols of our text
# so one issue is that pretty much any data source will need additional cleaning
# here we have header and footer sections with some legal data
# this meta-data would distort our analysis
# for one we would get mentions of Mr. Gutenberg
# which Mr. <NAME> did not intend
# so we need to do some cleaning first
# ideas on getting rid of header and footer sections
#
mytext.index("End of Project Gutenberg")
#this will the index of the first mention of the string
cleanend = mytext[:mytext.index("End of Project Gutenberg")]
# so this will cut and save text to last character
# before the above text is found
len(cleanend)
cleanend[-200:] # last 200 characters
start_index = cleanend.index("START OF THIS PROJECT GUTENBERG EBOOK")
start_index
skip = len("START OF THIS PROJECT GUTENBERG EBOOK")
skip
cleanall = cleanend[start_index+skip:] # so we want everything from the index+skip
cleanall[:150]
with open('Alice_clean.txt', mode="w", encoding="utf-8") as file_out:
file_out.write(cleanall)
# so this recipe will create/overwrite 'Alice_clean.txt'
# using utf-8 encoding
# will write ALL of the text that is in cleanall variable
with open('../data/Alice_clean.txt', mode="w", encoding="utf-8") as file_out:
file_out.write(cleanall)
# so this recipe will create/overwrite 'Alice_clean.txt' in data
# using utf-8 encoding
# will write ALL of the text that is in cleanall variable
# lets pretende i do not have cleanall in memory
# i can load text from my clean file
with open('../data/Alice_clean.txt', mode="r", encoding="utf-8") as file:
mytext = file.read() # so reads everything
mytext[:100]
# +
# so let's see about tokenization
# one is to use split using white space
# -
mysentence = "A quick brown fox jumped \t over the \n \n sleepy dog"
print(mysentence)
mysentence.split() # so we split by white space, including newlines and tabs
# we could try splitting already but we will get dirty data(words)
mywords = mytext.split(" ")
len(mywords)
mywords[:25]
# so we need to get rid of some \n which split did not
# so how could we do this?
# replace to rescue!
clean_text = mytext.replace("\n", " ") # why not nothing ""?
# replacing with "" we run the risk of combining words
clean_text[:100]
words = clean_text.split()
words[:20]
# here we could do additionalal cleaning by checking for maybe bad words
# bad characters
# unneeded words such stop words , meaning words which do not contribute to the meaning
# +
# so how could we count the occurences of each word?
# we could use something like a dictionary with words being keys and value being
# number of occurences
# -
word_dict = {}
for word in words:
if word in word_dict.keys():
word_dict[word] += 1 # increase count for each occurence
else:
word_dict[word] = 1 # so first occurence you count as 1
# below is just a shorter version of the above
# word_dict[word] = word_dict.get(word, 0) + 1
list(word_dict.items())[:20]
from collections import Counter # python already provides counter!
word_count = Counter(words) # give a list of tokens
word_count.most_common(10) # and you can get results immediately
# +
# so now that we have some prelimenary results in
# we could start thinking about stripping stopwords
# maybe getting rid of very short words
# -
word_count["she"] # turns out Counter is just a dictionary with some benefits
word_count["he"]
import csv
with open("word_count.tsv", mode="w", encoding="utf-8") as f:
f.write("word\tcount\n") # so we add newlines by hand
for word,count in word_count.most_common():
f.write(f"{word}\t{count}\n")
# +
# we can use libraries such as CSV and Pandas to take care of writing
#
# +
# to continue we will need to perform some additional cleaning
# maybe think about some visualization of some of this data
# +
# we will need to clean all lines which contain *** as ending characters
# so lets try reading lines
# filePath = "../data/Veidenbaums.txt"
# with open(filePath, encoding="utf-8") as fstream:
# mylines = fstream.readlines()
# len(mylines)
# -
mylines[:15]
cleanlines = [line for line in mylines if line[0]!='\n']
len(cleanlines)
cleanlines[:5]
# we do not want the lines which end with ***\n
headlines = [line for line in cleanlines if line.endswith("***\n")]
headlines[:5]
# we do not need the headlines!
# we do not want the lines which end with ***\n
noheadlines = [line for line in cleanlines if not line.endswith("***\n")]
noheadlines[:5]
# we could save the results
savePath = "../data/noHeadVeidenbaums.txt"
with open(savePath, mode="w", encoding="utf-8") as fstream:
fstream.writelines(noheadlines)
# May 6th lets start with noheadlines
myPath = "../data/noHeadVeidenbaums.txt"
with open(myPath, encoding="utf-8") as fstream:
noheadlines = fstream.readlines()
len(noheadlines)
#
noheadlines = [line for line in noheadlines if not "Treimanim" in line]
len(noheadlines)
spaceChars = "\n-"
stopChars = """!?.,"':;()…"""
for char in stopChars:
print(char)
# One big text from many lines
textNoHead = "".join(noheadlines) # we could have used fstream.read earlier
textNoHead[:55]
# take off spacy Characters replace with space (why space ? :)
for char in spaceChars:
print(f"Replacing {char} with space")
textNoHead = textNoHead.replace(char, " ")
# print(textNoHead[:75])
textNoHead[:75]
for char in stopChars:
print(f"Replacing {char} with nothing")
textNoHead = textNoHead.replace(char, "")
textNoHead[:55]
savePath = "../data/noHeadVeidenbaumsOneLine.txt"
with open(savePath, mode="w", encoding="utf-8") as fstream:
fstream.write(textNoHead)
textNoHead.index("Vēstule")
# nothing found thats good
textNoHead[5400:5430]
# charSet that's Camelcase another style
char_set = set(textNoHead)
char_set
ord("…")
words = textNoHead.split()
words[:5]
# we need to convert to lower case
# for word in words:
words_lower = [word.lower() for word in words]
words_lower[:5]
len(words_lower)
# +
# if we want to do it ourselves
# we could store it in a dictionary word and count
# {'pēc':5, 'ideālie':1, 'cenšas':3}
# -
unique_words = set(words_lower)
len(unique_words)
# i create a dictionary of unique words and set counter to 0
my_counter_dict = {word:0 for word in list(unique_words)}
my_counter_dict['pēc']
for word in words_lower:
my_counter_dict[word] += 1 # each time i add 1 to right box(key)
my_counter_dict['pēc']
my_list_tuples = [(key, value) for key,value in my_counter_dict.items()]
my_list_tuples[:5]
sorted(my_list_tuples)[:5]
# not quite what we need because it sorts by the first item alphabetically
# solution we pass a function to show how to sort
my_most_common = sorted(my_list_tuples,key=lambda mytuple: mytuple[1], reverse=True)
my_most_common[:10]
# +
# so sorting is possible but my recommendation is to use Counter
# +
# well and now I would to like sort
# its possible then I need to create a list from dictionary and then sort by key value
# solution use a library
# -
# Batteries are included no need to write our own counter
from collections import Counter
mycounter = Counter(words_lower)
mycounter.most_common(10)
type(mycounter.most_common(10))
# how to get only words 4 chars or longer ? :)
long_words = [word for word in words_lower if len(word) >= 4 ]
len(long_words)
long_counter = Counter(long_words)
long_counter.most_common(10)
'alus' in long_counter
type(long_counter)
long_counter.get('alus'), long_counter['alus'] #2nd would throw error if no beer existed
# we only get 5 letter words here
word_counter_5 = [mytuple for mytuple in long_counter.most_common() if mytuple[1] == 5]
word_counter_5
import json
with open('most_common.json', mode='w', encoding='utf-8') as fstream:
json.dump(mycounter.most_common(), fstream, indent=2)
# if we want to save our Latvian or other languages besides ENglish we set
# turn off ascii
# https://stackoverflow.com/questions/18337407/saving-utf-8-texts-in-json-dumps-as-utf8-not-as-u-escape-sequence
with open('most_common.json', mode='w', encoding='utf-8') as fstream:
json.dump(mycounter.most_common(), fstream, indent=2, ensure_ascii=False)
| TextProcessing/CountingWords_final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# """
# # Definition for a Node.
# class Node:
# def __init__(self, val, next, random):
# self.val = val
# self.next = next
# self.random = random
# """
# -
# ## Solution using recursion
def copyRandomList(head):
if head == None:
return None
def temp_inner_function(node, new_node):
if node == None:
return
else:
if node.val in data_dict:
new_node.next = data_dict[node.val]
pass
else:
new_node.next = Node(node.val, None, None)
data_dict[node.val] = new_node.next
if node.random == node:
new_node.next.random = new_node.next
elif node.random == None:
new_node.next.random = None
else:
if node.random.val in data_dict:
new_node.next.random = data_dict[node.random.val]
else:
new_node.next.random = Node(node.random.val, None, None)
data_dict[node.random.val] = new_node.next.random
temp_inner_function(node.next, new_node.next)
data_dict = {}
new_head = Node(head.val, None, None) # Node(val, next, random)
data_dict[head.val] = new_head
temp_inner_function(head.next, new_head)
if head.random:
new_head.random = data_dict[head.random.val]
return new_head
# ## Solution using iteration
def copyRandomList(head: 'Node') -> 'Node':
def search_element(search_in, to_search_val):
if to_search_val == None:
return None
temp = search_in
while temp:
if temp.val == to_search_val.val:
return temp
temp = temp.next
return None
list_id_done = []
new_list_head = head and Node(head.val, head.next, None)
if head == None:
return None
temp = head.next
new_list_temp_iterator = new_list_head
while temp:
new_list_temp_iterator.next = Node(temp.val, temp.next, None)
temp = temp.next
new_list_temp_iterator = new_list_temp_iterator.next
original_list_temp_iterator = head
new_list_temp_iterator = new_list_head
while original_list_temp_iterator:
val = search_element(new_list_head, original_list_temp_iterator.random)
if val != None:
new_list_temp_iterator.random = val
else:
pass
# print("Something went wrong")
original_list_temp_iterator = original_list_temp_iterator.next
new_list_temp_iterator = new_list_temp_iterator.next
return new_list_head
| 138_Copy_List_with_Random_Pointer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''pytorch'': conda)'
# name: torch
# ---
# - JIT?? https://pytorch.org/blog/optimizing-cuda-rnn-with-torchscript/
# - faster datasetdataloader
# - c++?
# - real data?
# - https://gist.github.com/ZijiaLewisLu/eabdca955110833c0ce984d34eb7ff39
#
#
# %matplotlib ipympl
import numpy as np
import matplotlib.pyplot as plt
import os
#os.environ["OMP_NUM_THREADS"] = "1"
#os.environ["MKL_NUM_THREADS"] = "2"
t = np.linspace(0, 4, num=100, dtype=np.float32)
t += np.random.randn(len(t))*0.01
t = np.sort(t)
P = 1.1234
m = np.sin(2.0*np.pi*t/P) + 0.5*np.sin(2.0*np.pi*2*t/P) + 0.25*np.sin(2.0*np.pi*3*t/P)
m += np.random.randn(len(m))*0.2
fig, ax = plt.subplots(2)
ax[0].plot(t, m, '.')
ax[1].plot(np.mod(t, P)/P, m, '.')
# +
import torch
import torch.nn as nn
class PeriodFinder(nn.Module):
def __init__(self):
super(type(self), self).__init__()
self.conv1 = nn.Conv1d(1, 8, 5, stride=1)
self.conv2 = nn.Conv1d(8, 16, 5, stride=1)
self.conv3 = nn.Conv1d(16, 16, 5, stride=1)
self.apool = nn.AdaptiveAvgPool1d(1)
self.linear1 = nn.Linear(16, 1)
self.activation = nn.ReLU()
def forward(self, x):
h = self.activation(self.conv1(x))
h = self.activation(self.conv2(h))
h = self.activation(self.conv3(h))
h = self.apool(h)
return self.linear1(h.view(-1, self.linear1.weight.shape[1]))
from torch.utils.data import Dataset, DataLoader
class lc_folder(Dataset):
def __init__(self, mjd, mag):
self.mjd = torch.from_numpy(mjd.astype('float32'))
self.mag = torch.from_numpy(mag.astype('float32')).unsqueeze(0)
self.freq = torch.arange(1e-4, 5, step=1e-4)
def __getitem__(self, idx):
phi = torch.remainder(self.mjd, 1/self.freq[idx])
return self.mag[:, torch.argsort(phi)]
def __len__(self):
return self.freq.shape[0]
class lc_trainer(Dataset):
def __init__(self, mjd, mag, P):
self.mjd = torch.from_numpy(mjd.astype('float32'))
self.mag = torch.from_numpy(mag.astype('float32')).unsqueeze(0)
self.P = P
def __getitem__(self, idx):
label = 0.
if torch.rand(1) > 0.5:
label = 1.
phi = torch.remainder(self.mjd, self.P)
else:
phi = torch.remainder(self.mjd, 5*torch.rand(1)+1e-4)
return self.mag[:, torch.argsort(phi)], label
def __len__(self):
return 1
data_train = lc_trainer(t, m, P)
data_eval = lc_folder(t, m)
with torch.jit.optimized_execution(True):
my_script_module = torch.jit.script(PeriodFinder())
torch.set_num_threads(1)
# +
model = PeriodFinder()
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
for epoch in range(1000):
epoch_loss = 0.0
for folded_data, label in DataLoader(data_train, batch_size=1):
optimizer.zero_grad()
yhat = model(folded_data)
loss = criterion(yhat.squeeze(0), label)
loss.backward()
optimizer.step()
epoch_loss += loss.item()
print(epoch_loss)
# -
torch.rand(1) > 0.5
# #%%timeit -r3 -n1
# #%%prun
output = torch.tensor([])
with torch.no_grad():
for folded_data in DataLoader(data_eval, batch_size=512, num_workers=1):
output = torch.cat((output, model(folded_data)))
fig, ax = plt.subplots()
ax.plot(data_eval.freq.numpy(), nn.Sigmoid()(output).numpy()[:, 0])
# %%timeit -r3 -n1
import P4J
per = P4J.periodogram(method="MHAOV")
per.set_data(t, m, m)
per.frequency_grid_evaluation(fmin=0, fmax=5., fresolution=1e-4)
| src/period_det_nnet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Speeding up Python Programs
# + [markdown] toc=true
# <h1>Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#NumPy-and-Vectorization" data-toc-modified-id="NumPy-and-Vectorization-1"><span class="toc-item-num">1 </span>NumPy and Vectorization</a></span></li><li><span><a href="#Switch-to-PyPy" data-toc-modified-id="Switch-to-PyPy-2"><span class="toc-item-num">2 </span>Switch to PyPy</a></span></li><li><span><a href="#Testing" data-toc-modified-id="Testing-3"><span class="toc-item-num">3 </span>Testing</a></span></li><li><span><a href="#Compile-parts-of-the-Python-code" data-toc-modified-id="Compile-parts-of-the-Python-code-4"><span class="toc-item-num">4 </span>Compile parts of the Python code</a></span><ul class="toc-item"><li><span><a href="#Cython" data-toc-modified-id="Cython-4.1"><span class="toc-item-num">4.1 </span>Cython</a></span></li><li><span><a href="#Numba" data-toc-modified-id="Numba-4.2"><span class="toc-item-num">4.2 </span>Numba</a></span></li></ul></li><li><span><a href="#Parallel-Processing" data-toc-modified-id="Parallel-Processing-5"><span class="toc-item-num">5 </span>Parallel Processing</a></span><ul class="toc-item"><li><span><a href="#concurrent.futures-module" data-toc-modified-id="concurrent.futures-module-5.1"><span class="toc-item-num">5.1 </span>concurrent.futures module</a></span></li><li><span><a href="#multiprocessing-module" data-toc-modified-id="multiprocessing-module-5.2"><span class="toc-item-num">5.2 </span>multiprocessing module</a></span></li><li><span><a href="#Numba-(again)" data-toc-modified-id="Numba-(again)-5.3"><span class="toc-item-num">5.3 </span>Numba (again)</a></span></li><li><span><a href="#IPython-parallel" data-toc-modified-id="IPython-parallel-5.4"><span class="toc-item-num">5.4 </span>IPython parallel</a></span></li><li><span><a href="#Big-data-systems" data-toc-modified-id="Big-data-systems-5.5"><span class="toc-item-num">5.5 </span>Big data systems</a></span></li></ul></li><li><span><a href="#Use-the-GPU" data-toc-modified-id="Use-the-GPU-6"><span class="toc-item-num">6 </span>Use the GPU</a></span><ul class="toc-item"><li><span><a href="#Numba-(yet-again)" data-toc-modified-id="Numba-(yet-again)-6.1"><span class="toc-item-num">6.1 </span>Numba (yet again)</a></span></li><li><span><a href="#PyCUDA-and-PyOpenCL" data-toc-modified-id="PyCUDA-and-PyOpenCL-6.2"><span class="toc-item-num">6.2 </span>PyCUDA and PyOpenCL</a></span></li><li><span><a href="#Other-packages" data-toc-modified-id="Other-packages-6.3"><span class="toc-item-num">6.3 </span>Other packages</a></span></li></ul></li><li><span><a href="#Interface-to-C/C++/Fortran" data-toc-modified-id="Interface-to-C/C++/Fortran-7"><span class="toc-item-num">7 </span>Interface to C/C++/Fortran</a></span><ul class="toc-item"><li><span><a href="#C-extension-interface" data-toc-modified-id="C-extension-interface-7.1"><span class="toc-item-num">7.1 </span>C extension interface</a></span></li><li><span><a href="#ctypes" data-toc-modified-id="ctypes-7.2"><span class="toc-item-num">7.2 </span>ctypes</a></span></li><li><span><a href="#cffi" data-toc-modified-id="cffi-7.3"><span class="toc-item-num">7.3 </span>cffi</a></span></li><li><span><a href="#SWIG" data-toc-modified-id="SWIG-7.4"><span class="toc-item-num">7.4 </span>SWIG</a></span></li><li><span><a href="#cppyy" data-toc-modified-id="cppyy-7.5"><span class="toc-item-num">7.5 </span>cppyy</a></span></li><li><span><a href="#Boost.Python" data-toc-modified-id="Boost.Python-7.6"><span class="toc-item-num">7.6 </span>Boost.Python</a></span></li><li><span><a href="#pybind11" data-toc-modified-id="pybind11-7.7"><span class="toc-item-num">7.7 </span>pybind11</a></span></li><li><span><a href="#F2PY" data-toc-modified-id="F2PY-7.8"><span class="toc-item-num">7.8 </span>F2PY</a></span></li></ul></li><li><span><a href="#Consider-other-languages" data-toc-modified-id="Consider-other-languages-8"><span class="toc-item-num">8 </span>Consider other languages</a></span><ul class="toc-item"><li><span><a href="#Julia" data-toc-modified-id="Julia-8.1"><span class="toc-item-num">8.1 </span>Julia</a></span></li><li><span><a href="#C/C++" data-toc-modified-id="C/C++-8.2"><span class="toc-item-num">8.2 </span>C/C++</a></span></li><li><span><a href="#Fortran" data-toc-modified-id="Fortran-8.3"><span class="toc-item-num">8.3 </span>Fortran</a></span></li></ul></li></ul></div>
# -
# ## NumPy and Vectorization
#
# What's so good about NumPy? For starters:
# - Most of it written in C for speed
# - It adds strongly-typed arrays to Python. These are less flexible than lists but potentially much more efficient
# - Most numpy functions (called ufuncs) accept arrays as arguments and automatically operate on all the elements (vectorization). Don't write explicit loops unless you REALLY have to!
# - AstroPy is built on top of NumPy, making a good thing even better
#
# <NAME> is better than most of us at this sort of thing, and he helpfully wrote about it in the [Python Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/) (chapter 2 is specifically about NumPy, but all of it is well worth reading).
# ## Switch to PyPy
#
# Anyone reading this notebook is very probably using a CPython kernel. PyPy is a replacement implementation of Python, optimized for speed. It integrates cffi and a JIT compiler. Details at https://www.pypy.org/index.html
#
# The project aims to be code-compatible with a large subset of a non-quite-current version of CPython. However, the extension mechanism is very different and not all third-party packages are currently compatible with PyPy. A compatibility list is at http://packages.pypy.org. At present, numpy and astropy are shown as working, but scipy, matplotlib, pandas and several others are not.
#
# There are mixed reports about getting PyPy to work with Jupyter notebooks. If anyone wants to try it, I wish you luck.
# ## Testing
#
# Define some functions to play with later, one trivial and one working the CPU harder:
# +
import math
def sq(x):
return x*x
PRIMES = [
112272535095293,
112582705942171,
112272535095293,
115280095190773,
115797848077099,
1099726899285419]
def is_prime(n):
if n % 2 == 0:
return False
sqrt_n = int(math.floor(math.sqrt(n)))
for i in range(3, sqrt_n + 1, 2):
if n % i == 0:
return False
return True
# -
# Time this in unenhanbced mode to get a baseline. Because `map()` does lazy evaluation it's important to do the list comprehension to force a full calculation.
# %%timeit -n2 -r4
results = [x for x in map(is_prime, PRIMES)]
for number, prime in zip(PRIMES, map(is_prime, PRIMES)):
print('%d is prime: %s' % (number, prime))
# To get a comparison with compiled code run from the command line, the `./C/` directory contains:
# - a version of `is_prime()` written in C
# - a test program in C++
# - a shared object library containing the `is_prime()` function
#
# Running the test program gives:
# ```
# $ ./testprime
# 112272535095293 is prime: False
# 112582705942171 is prime: False
# 112272535095293 is prime: False
# 115280095190773 is prime: False
# 115797848077099 is prime: False
# 1099726899285419 is prime: True
# 10 loops, average time taken: 251 milliseconds
# ```
#
# This compares with 2.35 seconds for the unoptimized Python version run on the same machine.
# ## Compile parts of the Python code
#
# Isolate the runtime bottleneck in a small block of Python code, auto-generate C code from it and compile to native binary. This can be surprisingly easy but requires some extra code (unlike Julia, which does this automatically - see later section).
#
# However the results compare with a carefully hand-optimized C/C++ program, this can be a substantial improvement on simple Python. <NAME> wrote some blog articles back in 2012/2013 that are still interesting to read: http://jakevdp.github.io/blog/2012/08/24/numba-vs-cython/ and http://jakevdp.github.io/blog/2013/06/15/numba-vs-cython-take-2/
# ### Cython
#
# This basically adds two extensions to Python syntax:
# - with `cdef`, variables can be declared with an explicit (C-compatible) type
# - functions can be declared with `def`, `cdef` or `cpdef` for Python-only, C-only or Python+C use.
#
# There are also some function decorators which can be used instead of cdef and to control various checks.
#
# Start by loading the Cython extension into the notebook:
# %load_ext Cython
# A simple example to show the syntax. the `-a` flag on the cython magic annotates the output so you can see the C code generated.
# + magic_args="-a" language="cython"
#
# # factorials
# cdef int a = 1
# for i in range(1,10):
# a *= i
# print(a)
# -
# Now run the primes-check example. This is a bit fiddly, because Cython environment can't see Python globals and other cells can't see the Cython function. Hence the extra Python function `run_primes_cython()` to give `%%timeit` something to work with.
# + language="cython"
# import math
# cimport cython # to get the decorators
#
# PRIMES = [
# 112272535095293,
# 112582705942171,
# 112272535095293,
# 115280095190773,
# 115797848077099,
# 1099726899285419]
#
# @cython.boundscheck(False)
# @cython.wraparound(False)
# def is_prime_cython(n):
# if n % 2 == 0:
# return False
#
# sqrt_n = int(math.floor(math.sqrt(n)))
# for i in range(3, sqrt_n + 1, 2):
# if n % i == 0:
# return False
# return True
#
# def run_primes_cython():
# return [x for x in map(is_prime_cython, PRIMES)]
# -
# %%timeit -n2 -r4
run_primes_cython()
# In this example, on my system, the speedup is unspectacular (23%) and nowhere near worth the effort.
# ### Numba
#
# From [their website](http://numba.pydata.org): Numba is an open source JIT compiler that translates a subset of Python and NumPy code into fast machine code.
#
# Numba has many capabilities but at its simplest it can be astonishingly easy to use: just add an `@jit` decorator to a standard Python function.
# +
from numba.decorators import jit
@jit
def is_prime_numba(n):
if n % 2 == 0:
return False
sqrt_n = int(math.floor(math.sqrt(n)))
for i in range(3, sqrt_n + 1, 2):
if n % i == 0:
return False
return True
# -
# %%timeit -n2 -r4
result = [x for x in map(is_prime_numba, PRIMES)]
# On my system, this is 6-fold faster than Cython for far less effort. It's virtually as fast as my C++ program run from the command line (296 ms for Numba vs 285 ms for C++ compiled with `gcc -g -O0` or 251 ms with `gcc -O3` optimization).
#
# I'm embarrassed that I'd been using Python for many years before I discovered Numba.
#
# There are limitations, meaning Numba can fail to compile a function. In particular, keep data structures simple: lists and Numpy arrays are good, dictionaries and Pandas dataframes are a problem. It's generally best to split out the slow, computationally intensive parts of the program for compilation, leaving most of the logic and complex data handling in Python.
# ## Parallel Processing
#
# How many processor cores are there in your laptop computer? For an affordable Core i5 CPU, this might be 4 physical cores and with multithreading the OS sees 8 virtual cores.
#
# By default, Python (more specifically CPython, which is what we're most likely using at present) is not thread-safe and the global interpreter lock (GIL) prevents multiple threads accessing Python objects simultaneously. So most of the time we have a single thread in a single process running on a single core. Spreading the load to the other cores takes a bit more work, but of course clever people have already written packages to help with this.
# ### concurrent.futures module
#
# Describes itself as "a high-level interface for asynchronously executing callables". A simple and useful feature is a parallel version of Python's `map(function, iterable)`.
import concurrent.futures as cf
with cf.ProcessPoolExecutor() as executor:
print([x for x in executor.map(sq, range(100))])
# %%timeit -n2 -r4
with cf.ProcessPoolExecutor() as executor:
results = [x for x in executor.map(is_prime, PRIMES)]
# ### multiprocessing module
#
# Part of the Python standard library, this supports spawning processes for both local and remote concurrency. Like `concurrent.futures` it provides a parallel map function, but also quite a lot more. See [the documentation](https://docs.python.org/3/library/multiprocessing.html) for more advanced usage.
# +
from multiprocessing import Pool, cpu_count
nProc = cpu_count()
print(f"Number of cores: {nProc}\n")
with Pool(nProc) as p:
print(p.map(sq, range(100)))
# -
# %%timeit -n2 -r4
with Pool(nProc) as p:
results = [x for x in p.map(is_prime, PRIMES)]
# ### Numba (again)
#
# The @jit and @vectorize decorators can be modified with `parallel=True`, and Numba will try to generate multiprocessor code. This is easy in principle but needs some thought from the programmer, as note also code can sensible run in parallel and avoiding race conditions is your responsibility.
# +
from numba.decorators import jit
@jit(nopython=True, parallel=True)
def prime_parallel_numba(PRIMES):
def is_prime_numba(n):
if n % 2 == 0:
return False
sqrt_n = int(math.floor(math.sqrt(n)))
for i in range(3, sqrt_n + 1, 2):
if n % i == 0:
return False
return True
nTodo = len(PRIMES)
result = [False] * nTodo
for i in range(nTodo):
result[i] = is_prime_numba(PRIMES[i])
return result
# -
# This was more work than the non-parallel @jit example, because the compiler objected to `map()` and various other things I tried. At least it gives the right answer:
prime_parallel_numba(PRIMES)
# %%timeit -n2 -r4
result = prime_parallel_numba(PRIMES)
# Essentially the same speed as the previous (and much easier) @jit example, and on taking a closer look Numba reported that it couldn't make this code parallel. You win some, you lose some...
# ### IPython parallel
#
# Part of IPython/Jupyter rather than the Python language, this supports many types of parallelism, on a single machine or a cluster. It's a big, serious system, not a simple software drop-in like the previous examples.
#
# Docs: https://ipyparallel.readthedocs.io/en/stable/intro.html
#
# There's no demo here, because you need to start a controller and one or (preferably) more engines from the command line before starting Jupyter notebook.
# ### Big data systems
#
# Imagine data collections so big that they don't fit in memory and aren't all in one place, but you want to do calculations and machine learning on them. Several software billionaires built their businesses on precisly this scenario, so we can be sure that it's a well-funded area of development. The latest, coolest code may be hidden inside Google, but some very powerful systems are available as free software. Some examples:
# - Apache Spark
# - TensorFlow
# - PyTorch
# - Pythran
# ## Use the GPU
#
# We discussed above how to work with CPU cores. But there's probably also a graphics processor (GPU) in your machine, where "cores" (defined _very_ differently by each manufacturer) are simpler but much more numerous: hundreds, maybe thousands. Originally these just did graphics processing (obviously) but to make this computing power more widely useful, in 2007 Nvidia released the CUDA software layer to allow general purpose computing on the GPU. By an odd coincidence, they sold a lot more hardware and [made a great deal of money](https://en.wikipedia.org/wiki/Nvidia#Finances) in the years since.
#
# To prevent things getting boringly simple, AMD are now also big players in the GPU computing market, using different technology and very different terminology to describe things. In particular, AMD strongly support OpenCL as an open-source competitor to the propietary CUDA technology. Intel CPUs with integrated graphics can also support OpenCL, and even Nvidia offer OpenCl support, though never with the same performance as CUDA on the same hardware.
#
# Recent supercomputers are stuffed with thousands of graphics cards which never do graphics. More affordably, CUDA ___may___ also work on your laptop, depending on what hardware you have. Mine doesn't (it uses Intel graphics integrated on the CPU). If uncertain, on Linux/Mac you might try `lspci | grep -i nvidia`; a blank response means no CUDA-capable GPU was detected. OpenCL is very likely to work, though (depending on hardware) there may not be much performance gain; at least an OpenCl program is less likely to crash on startup than CUDA.
#
# There's no point including sample code in this overview notebook, as it needs particular hardware and drivers to run. A few packages are mentioned briefly below, and I aim to make a separate notebook dedicated to CUDA and OpenCL.
# ### Numba (yet again)
#
# Sponsors of the Numba project include Intel, Nvidia and AMD, so it's no surprise that it has pretty good CUDA support.
#
# There is an easy way and a hard-but-flexible way to use CUDA in Numba:
# - Add `cuda=True` to function decorators like @jit and @vectorize
# - Write your own CUDA kernels; docs here: https://numba.pydata.org/numba-doc/latest/cuda/index.html
# ### PyCUDA and PyOpenCL
#
# Closely related packages from the same authors, these are for writing relatively low-level GPU code within Python: http://homepages.math.uic.edu/~jan/mcs572/mcs572notes/lec29.html
#
# On my low-power fanless system with no graphics card, PyCUDA has no chance of running but PyOpenCL was willing to work with what it could find, in this case an i5 CPU:
#
# ```
# In [1]: import pyopencl In [2]: from pyopencl.tools import get_test_platforms_and_devices In [3]: get_test_platforms_and_devices() Out[3]: [(<pyopencl.Platform 'Portable Computing Language' at 0x7f637bf7e020>,
# [<pyopencl.Device 'pthread-Intel(R) Core(TM) i5-7200U CPU @ 2.50GHz'
# on 'Portable Computing Language' at 0x55d53170fd10>])]
# ```
# ### Other packages
#
# There are lots.
#
# Anaconda provides an introduction to [working with GPU packages](https://docs.anaconda.com/anaconda/user-guide/tasks/gpu-packages/) in Python. The emphasis is on machine learning applications, reflecting a major driver for CUDA development.
#
# ___scikit-cuda___ is a CUDA equivalent of skikit-learn: https://scikit-cuda.readthedocs.io/en/latest/
#
# ___Rapids___ is open source but supported by Nvidia, aimed at large-scale data science in the Dask/Apache Arrow type of environment: https://rapids.ai/index.html. Includes various lower-level libraries: cuDF, cuML, cuGraph
#
# ___Anaconda accelerate___ is only in the paid-subscription versions of Anaconda so (of course) I'm unlikely to use it
# ## Interface to C/C++/Fortran
#
# There are lots of ways to do this, which gives flexibility plus a pretty strong hint that none of the current methods is perfect. The typical pattern is to take a shared library/DLL and put a "foreign function interface" (a software wrapper) around it so you can call its functions.
#
# These differ in which languages they support and whether the foreign code is embedded locally or pre-compiled in an existing library. Also, these projects come and go, so the list below is limited to projects that still seem to be active as of 2019.
# ### C extension interface
#
# The original approach built into CPython. For details see https://docs.python.org/3/extending/extending.html. Preferably look at some of the other options first, as they can make life easier.
# ### ctypes
#
# Part of this Python standard library, this is available without needing installation. It allows you to wrap existing libaries, including third-party objects that you have no control over and no source code. Docs: https://docs.python.org/3/library/ctypes.html
import ctypes
# The following example shows the syntax, importing a shared library containing the `is_prime()` function.
# +
testlib = ctypes.CDLL('C/libisprime.so')
bools = ("True", "False")
for number, prime in zip(PRIMES, map(testlib.is_prime, PRIMES)):
print('%d is prime: %s' % (number, bools[prime]))
# -
# Disaster! The program ran without an error message, but gives the wrong answers (should be 5 True's then a False). A test program written in C suggests the shared library is basically working:
#
# ```
# $ ./testlib
# 112272535095293 is prime: true
# 112582705942171 is prime: true
# 112272535095293 is prime: true
# 115280095190773 is prime: true
# 115797848077099 is prime: true
# 1099726899285419 is prime: false
# ```
#
# I have no idea why this failed, and as ctypes has a reputation for being a nightmare to debug I'm going to quietly move on without worrying about it. Test your code!
# ### cffi
#
# As [the docs](https://cffi.readthedocs.io/en/latest/index.html) say: this is a "C Foreign Function Interface for Python. Interact with almost any C code from Python, based on C-like declarations that you can often copy-paste from header files or documentation".
#
# Though cffi provides a binary (ABI) mode, they [recommend](https://cffi.readthedocs.io/en/latest/overview.html#abi-versus-api) that non-Windows users avoid it. All the examples here use API mode and assume we can access a C compiler.
#
# More recent than ctypes, cffi needs an import:
from cffi import FFI
# One way to use cffi is as a wrapper round existing library code - like ctypes, but hoping for better results.
#
# The first step is to generate new files in a format that Python can use. At its simplest, give the calling signature, the header file and a path to the library file. The verbose output is gcc-style cryptic (turn it off by setting `verbose=False`), but we're mainly hoping for absence of error messages. A previous version of this code got the path the the library wrong and produced a LOT of error message, all totally unhelpful.
# +
ffibuilder = FFI()
# cdef() expects a single string declaring the C types, functions and
# globals needed to use the shared object. It must be in valid C syntax.
ffibuilder.cdef("""
bool is_prime(long n);
""")
# set_source() gives the name of the python extension module to
# produce, and some C source code as a string. This C code needs
# to make the declarated functions, types and globals available,
# so it is often just the "#include".
ffibuilder.set_source("_primes_cffi",
"""
#include "C/isprime.h" // the C header of the library
""",
libraries=['C/isprime']) # library name, for the linker
ffibuilder.compile(verbose=True);
# -
# That generates new C code, compiles it to an object module and links it to a shared library, all in the current directory.
#
# ```
# $ ls _primes*
# _primes_cffi.c _primes_cffi.cpython-36m-x86_64-linux-gnu.so _primes_cffi.o
# ```
#
# Next we can get a `lib` object with callable Python functions, in this case just `is_prime()`:
# +
from _primes_cffi import ffi, lib
for number, prime in zip(PRIMES, map(lib.is_prime, PRIMES)):
print('%d is prime: %s' % (number, prime))
# -
# The correct results! This supports the view that ctype's problems were not because of the `libisprime.so` file.
#
# What about performance?
# %%timeit -n2 -r4
results = [x for x in map(lib.is_prime, PRIMES)]
# As good as running the C++ version from the command line!
#
# We have the source code in this case, and cffi can work with this directly instead of needing the `.so` library. That's good, as generating this library was a (quite messy) extra step.
# +
ffibuilder2 = FFI()
# cdef() expects a single string declaring the C types, functions and
# globals needed to use the shared object. It must be in valid C syntax.
ffibuilder2.cdef("""
bool is_prime(long n);
""")
# set_source() gives the name of the python extension module to
# produce, and some C source code as a string. This C code needs
# to make the declarated functions, types and globals available,
# so it is often just the "#include".
ffibuilder2.set_source("_primes2_cffi",
"""
#include "C/isprime.h" // the C header of the library
""",
sources=['C/isprime.c'],
libraries=['m']) # we need to link with the math library
ffibuilder2.compile(verbose=True);
# -
# This version of the library works the same way as before, giving the same results and at least as good performance:
# +
import _primes2_cffi
for number, prime in zip(PRIMES, map(_primes2_cffi.lib.is_prime, PRIMES)):
print('%d is prime: %s' % (number, prime))
# -
# %%timeit -n2 -r4
results = [x for x in map(_primes2_cffi.lib.is_prime, PRIMES)]
# My first impressions of cffi are very positive. It does more than ctypes, fairly easily, and (for whatever reason) it gave the correct answer when ctypes didn't.
# ### SWIG
#
# This involves roughly the same steps as cffi, but they are done at the command prompt and not within Python. All the files are in the `./swig/` directory.
#
# Start by writing an interface file:
#
# ```
# /* File : isprime.i */
# # %module isprime
# %{
# #include "isprime.h"
# %}
# # %include "isprime.h"
# ```
# Because SWIG supports many scripting languages, not just python, we need to tell it which bindings to generate:
#
# ```
# $ swig -python isprime.i
# ```
#
# This gives us two new files, `isprime.py` and `isprime_wrap.c`.
#
# Now we need to compile to get an object library. This is highly system dependent, but after some trial and error this worked for me (on Linux Mint 19.2):
#
# ```
# gcc -c isprime.c isprime_wrap.c -I/usr/include/python3.6 -fPIC
# ```
# The include path needs to be a directory containing `Python.h`.
#
# Finally use these object modules to create a shared library:
# ```
# ld -shared -fPIC isprime.o isprime_wrap.o -o isprime.so
# ```
#
# At last we have the two files we need: `isprime.py` is our interface, and `_isprime.so` the shared library.
#
# Import the necessary function and use it:
# +
from swig.isprime import is_prime as is_prime_swig
for number, prime in zip(PRIMES, map(is_prime_swig, PRIMES)):
print('%d is prime: %s' % (number, prime))
# -
# %%timeit -n2 -r4
results = [x for x in map(is_prime_swig, PRIMES)]
# Not bad: correct results, and performance only slightly worse than cffi. However, getting to this point was a fairly ugly and (in my novice hands) error-prone process.
#
# Why use SWIG? It's good if you have a lot of code to wrap, with regular updates needed: that can be automated. Also if you want to support multiple scripting languages from this list: C#, D, Go, Guile, Java, Javascript, Lua, MzScheme/Racket, OCaml, Octave, Perl, PHP, Python, R, Ruby, Scilab, Tcl.
#
# I still think cffi is easier to get started with.
# ### cppyy
#
# This lets you write arbitrary C++ code within Python, which is compiled on the fly by [Cling](https://root.cern.ch/cling) (which has CERN behind it). It supports modern code up to at least C++14 standards.
#
# TODO: working example
# ### Boost.Python
#
# This is something different: a C++ library that can be used in your code to expose functions and classes to Python. It's part of a much bigger and more complex Boost package, now quite old. Unfortunately, I think it's fair to say that the documentation is a mess.
#
# Adding the C++ code is simple enough. The challenge is figuring out how to compile it, and after reading various web pages on this topic I still have no idea.
#
# Conclusion: Boost.Python is only for serious C++ programmers who already use the Boost libraries for other reasons. It has nothing to offer someone like me.
# ### pybind11
#
# Conceptually similar to Boost.Python, but newer and much more lightweight. Targetted at C++11 or later, which makes this sort of thing much simpler than in older languageversions.
#
# Various Github repos are at https://github.com/pybind, worth looking at the get the examples and tests. The documentation is at https://pybind11.readthedocs.io/en/stable/index.html: better than Boost.Python, but still a bit quirky.
#
# These sites carefully avoid saying anything at all about installing pybind11 itself, though there are some clues to other requirements. The main thing to know is that, although this is mainly a C++ library, what you need to install is a python package: `pybind11` in pip or conda, `python3-pybind11` on Debian-based systems.
#
# As with Boost.Python, creating the C++ code is simple, building it is at best confusing. Whatever your usual C++ build workflow, for pybind11 it is [strongly recommended to use CMake](https://stackoverflow.com/questions/54908007/how-to-properly-compile-c-code-with-pybind11). Alternatively, some people [prefer to use a `setup.py`](http://people.duke.edu/~ccc14/sta-663-2018/notebooks/S13C_pybind11.html) to control the build.
#
# TODO: working example
# ### F2PY
#
# An interface to Fortran 70/90/95. See https://www.numfys.net/howto/F2PY/ for an overview. That contains a broken link to the main F2PY website, which isn't encouraging.
#
# The SciPy pages may be more useful: https://docs.scipy.org/doc/numpy/f2py/. Apparently F2PY is now part of NumPy.
# ## Consider other languages
#
# Python is relatively quick and easy to write but slow(ish) to run. We've looked at ways to speed it up, successful enough to make this the most widely-used language in modern astronomy. But there's a limit to speedup and sometimes you hit it.
#
# Time to at least consider the alternatives.
# ### Julia
#
# A [fairly new open-source language](https://julialang.org/), under rapid development and growing in popularity among scientists and engineers. Development started in 2009 and version 1.0, with a more difinitive and stable API, was released August 2018.
#
# The slogan is "walk like Python, run like C". The syntax is simple and familiar, largely a modernized cross between Python and Matlab, so the learning curve is fairly shallow.
#
# Most SciPy/NumPy equivalent functionality is built in as standard because this is what the language is designed for. There's a growing subset of AstroPy functionality available (and they'd love you to help expand this).
#
# Making mixed-language programming easy is a core objective, so several Python packages are simply imported and used as-is. Plotting can use Matplotlib (or Plotly or several others), symbolic math uses SciPy.
#
# What about "run like C"? The language implementation means that non-trivial Julia code can run many-fold faster than the equivalent Python, but this may need a [change of programming style](https://docs.julialang.org/en/v1/manual/performance-tips/). In particular:
# - Speed depends on putting performance-critical code into functions that an optimizing compiler can work on the first time they are called
# - Avoid global variables, avoid changing the type of variables
# - As a modern scientific language, multi-threading and distributed processing are core features
#
# Will Julia become common in astronomy? It deserves to, and I'll try to help. However, after several years of astronomers world-wide mostly converging on Python as their standard language and investing a lot of effort in its development, the timing of Julia's stable release is unfortunate.
# To test the is_prime() example in Julia, I ran the code below in a separate Julia-1.1 notebook. Unfortunately the `%%julia` magic is not currently working, so this won't run in a Python notebook even with a Julia kernel installed.
# + language="julia"
#
# PRIMES = [
# 112272535095293,
# 112582705942171,
# 112272535095293,
# 115280095190773,
# 115797848077099,
# 1099726899285419];
#
# function is_prime(n)
# if n % 2 == 0
# return False
# end
#
# sqrt_n = Int(floor(sqrt(n)))
# for i in 3:2:sqrt_n + 1
# if n % i == 0
# return false
# end
# end
# return true
# end
#
# print(map(is_prime, PRIMES))
#
# using BenchmarkTools
# @benchmark results = [x for x in map(is_prime, PRIMES)]
# -
# The Julia notebook produced this output:
#
# ```
# Bool[true, true, true, true, true, false]
#
# BenchmarkTools.Trial:
# memory estimate: 224 bytes
# allocs estimate: 4
# --------------
# minimum time: 263.463 ms (0.00% GC)
# median time: 264.104 ms (0.00% GC)
# mean time: 264.939 ms (0.00% GC)
# maximum time: 274.006 ms (0.00% GC)
# --------------
# samples: 19
# evals/sample: 1
# ```
#
# For comparison, this is intermediate between cffi and SWIG, and somewhat faster than Numba.
# ### C/C++
#
# We've talked about interfacing these languages to Python. Sometimes that's not worth the complications and it's better to use them directly. At least you get access to modern graphics (Qt, OpenGL, etc) in a way that's alien to Fortran.
# ### Fortran
#
# Hard core! Fortran is outdated, ugly, hard to write, harder to debug and an all-round pain. So why is it still widely used?
# - For the biggest simulations, especially highly parallel OpenMP code, Fortran programs still run fastest. Even C/C++ can't quite match it, and time on a big supercomupting cluster is a limited resource you may need to optimize.
# - Lots of scientists spent the last 60 years writing, debugging, optimizing and validating Fortran code. Those software libraries still exist and you probably use them reguarly without realizing (hidden behind glue code for your favorite language). Sometimes only using them in the raw will get the job done.
#
# This was the first programming language I ever learned (in 1974): FORTRAN IV punched on to cards and fed to an IBM 370/165 mainframe. Then we hung around by the line printer waiting for the operator to tear off your pages of 14-inch fanfold paper to see that you missed a comma on line 15. It seemed wonderful at the time.
#
# The language has evolved since then (Hollerith strings were always dumb and no sane person misses them), but not as much as you'd think. Remember that in this world, machine time is precious but programmer time (and nervous energy) is expendable.
| genpython/performance/1 - Speeding up Python programs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="D0nnndnJ6Ynw"
# # Preparando Dados
# Nessa imersão nós vamos mergulhar no universo da biologia e da biotecnologia e explorar uma base de dados da área.
#
# Para a nossa análise, está faltando então os dados. Para conseguirmos esses dados vamos acessar o Github, nesse link:
#
# https://github.com/alura-cursos/imersaodados3/tree/main/dados
#
# Então, agora vamos importar essa base de dados para dentro do nosso notebook. Para juntar essas informações vamos utilizar nossa famosa biblioteca do "Pandas".
# Vamos importar essa biblioteca através do seguinte código:
# + id="tap3brCKoTbv" colab={"base_uri": "https://localhost:8080/"} outputId="a2a75a4f-aefe-4acc-a461-e281ef94c678"
import pandas as pd
url_dados = "https://github.com/alura-cursos/imersaodados3/blob/main/dados/dados_experimentos.zip?raw=true"
dados = pd.read_csv(url_dados, compression= "zip")
dados
# + id="5ME4Be0gpel0"
mapa = {"droga": "composto"}
dados.rename(columns=mapa, inplace= True)
# + [markdown] id="jamZqqM1m9ce"
# #Aula 03
# + [markdown] id="xwzPjWpCnRCu"
# Olá Cientista!
#
# Seja bem vindo e bem vinda à aula 03!
#
# Na aula anterior, pudemos entender a importância da construção de gráficos na visualização dos dados.
# Discutimos principalmente os histogramas e boxplots e, além disso, também falamos sobre a função ```describe``` do Pandas.
#
# Você conseguiu fazer todos os desafios? Então vamos mergulhar juntos na aula 03!
#
# Hoje, nosso foco é nos aprofundar ainda mais na base de dados e entender mais sobre a relação entre as nossas variáveis.
#
# Olhando para o nosso conjunto de dados, percebemos que temos dados de tratamento, tempo, dosagem, composto químico usado e os dados genéticos.
# E os primeiros dados que vamos relacionar são aqueles ligados ao tratamento e, como já vimos anteriormente, estes são dados categóricos.
#
# Um dos vieses de análise está relacionado às proporções destas categorias e, como aprendemos na aula anterior, um gráfico de frequências pode ser muito útil nesse aspecto.
# + colab={"base_uri": "https://localhost:8080/"} id="enTo-CIfo1iH" outputId="8d95433e-d92f-486b-dcbc-b21951026ae5"
dados.head()
# + [markdown] id="hd1NQ_W6qZUj"
# Conforme nossa busca na [documentação do Pandas](https://pandas.pydata.org), encontramos uma maneira para construir uma tabela de frequências, a função ```crosstab```. Esta função recebe como argumentos os dados que gostaríamos de correlacionar de uma maneira bem simples: ```crosstab(dataframe['coluna1'], dataframe['coluna2'])``` e então, como retorno, temos uma matriz que relaciona essas variáveis a partir da frequência.
#
# Podemos ver que as categorias da variável ```dose``` transformaram-se em linhas e as categorias da variável ```tempo``` são colunas.
# + colab={"base_uri": "https://localhost:8080/"} id="GxNcZgooqifp" outputId="76cc6d84-a460-4000-cc94-db8bb643e031"
pd.crosstab(dados['dose'], dados['tempo'])
# + [markdown] id="z2m8T-gSh1ah"
# Entretanto, na matriz acima não estamos considerando o tratamento usado, apesar desta variável ser de suma importância, visto que, decide se há ou não presença de um composto no evento.
# Por isso, vamos construir uma nova tabela com o ```crosstab``` considerando essa nova variável.
#
# Para isso, usaremos a mesma sintaxe anterior, adicionando ```dados['tratamento']```ao final e, adicionalmente, deixamos as duas primeiras colunas declaradas entre colchetes, pois assim garantimos que suas informações estarão distribuídas nas linhas, ou seja, o código final para este comando será: ```crosstab(dataframe[['coluna1'], dataframe['coluna2']], dataframe['coluna3'])```.
#
# O resultado será uma tabela de frequências multi-index (mais de um índice) sendo que o index da esquerda se refere aos dados da variável ```dose``` e o index da direita são as informações de ```tempo```, respeitando a ordem na qual declaramos as respectivas variáveis.
# + colab={"base_uri": "https://localhost:8080/"} id="ZAlTjcmQrb_M" outputId="13ce748c-ce7a-497e-b6ef-8d42f9b70262"
pd.crosstab([dados['dose'], dados['tempo']], dados['tratamento'])
# + [markdown] id="QDPC2fbHkqBO"
# Apesar de já termos construído uma tabela bem interessante através da frequência de algumas variáveis, podemos explorar a proporção destes dados entre si.
#
# Para fazer isso vamos, novamente, copiar o nosso comando acrescentando um novo parâmetro ao final, o ```normalize```. Então o código ficará: ```crosstab([dados['dose'], dados['tempo']], dados['tratamento'], normalize='index')```. Esse parâmetro normaliza a nossa tabela e escolhemos que ela faça isso a partir do índice, ou seja, ela fará a comparação entre as categorias, isto é, a soma de cada linha será igual a 1.
# Fazer este tipo de análise possibilita que façamos algumas suposições acerca do balanceamento entre as categorias e, analisando a nossa matriz, podemos concluir que há proporcionalidade na nossa base de dados.
#
# + colab={"base_uri": "https://localhost:8080/"} id="l11KiR6bsF-K" outputId="0b9a972c-b646-48c6-add2-d28b21ce8f24"
pd.crosstab([dados['dose'], dados['tempo']], dados['tratamento'], normalize='index')
# + [markdown] id="Xd_O2idasyfh"
# Podemos também agregar à nossa matriz uma métrica estatística associada a uma coluna.
# Para que isso seja cumprido, adicionamos mais dois parâmetros à nossa função ```crosstab```: o primeiro é o ```values = dataframe['variavel']``` e o segundo é o ```aggfunc``` que recebe como parâmetro alguma métrica estatística, como a média. Logo, ```aggfunc = 'mean'```. Isso quer dizer que queremos comparar entre as diferentes categorias (```com_controle``` e ```com_droga```) a média de valores associados a variável ```g-0```.
#
# Aqui, podemos perceber algumas diferenças entre essas médias e podemos traçar algumas hipóteses a serem verificadas. Lembrando que a média de uma variável, é uma conta feita a partir dos valores que aquela amostra apresenta e não é o valor que ela assume, de fato. Por isso, não podemos concluir nada somente olhando a média, mas entender seu comportamento nos dá indícios por quais caminhos podemos seguir.
# + colab={"base_uri": "https://localhost:8080/"} id="2LT5TeiKtxX3" outputId="9f21edbb-a77b-47c9-b1c9-ff6f828c9a28"
pd.crosstab([dados['dose'], dados['tempo']], dados['tratamento'], values=dados['g-0'], aggfunc='mean')
# + [markdown] id="cKSKuOe9xwJA"
# Para variáveis contínuas, fazer tabelas de frequências não é a melhor estratégia para analisá-las.
# Mas, construir um novo tipo de gráfico pode ser muito interessante para o nosso processo.
#
# Então, para fins de visualização, o primeiro passo é filtrar a nossa base de dados com as colunas que queremos investigar.
# No nosso caso, vamos analisar a relação entre as colunas ```g-0``` e ```g-3``` e, por isso, definimos uma lista de arrays com os nomes dessas colunas (```dataframe[['coluna1', 'coluna2']]```) e, como retorno, teremos nosso conjunto somente com as variáveis alvo.
# + colab={"base_uri": "https://localhost:8080/"} id="5NLk3hfzxaex" outputId="77182565-29d6-4e72-808b-af5eefc3a254"
dados[['g-0', 'g-3']]
# + [markdown] id="v-pZDVyEzSze"
# O ```scatterplot``` é um tipo de gráfico pré programado da biblioteca Seaborn e recebe como parâmetros a variável que vai ser usada no eixo x, a variável do eixo y e, por fim, o conjunto de dados.
#
# O código ficará:
#
# ```sns.scatterplot(x = 'variavel para o eixo x', y = 'variavel para o eixo y', data = base de dados)```
#
# E, como queremos investigar as variáveis ```g-0``` e ```g-3```, atribuímos cada uma delas a um eixo.
#
# O gráfico de dispersão utiliza os dados como uma coleção de pontos cartesianos e ele é usado para apurar se há relação de causa e efeito entre duas variáveis quantitativas.
#
# No nosso caso, cada linha será um par ordenado de acordo com o que declaramos no código, ou seja, o valor de ```g-0``` será a cordenada x e o valor de ```g-3``` será a coordenada y.
#
# Por exemplo: para a linha 0 da base de dados teremos (1,0620 , -0,6208)
#
# Mas, por outro lado, a partir do gráfico de dispersão, não podemos dizer que uma variável afeta a outra, podemos apenas definir se há relação entre elas e qual a intensidade disso.
# + colab={"base_uri": "https://localhost:8080/"} id="Tu6cDP2RwuxJ" outputId="4c6b3bb3-b612-4a4b-b27d-9500b7c032bc"
import seaborn as sns
sns.scatterplot(x='g-0', y = 'g-3', data=dados)
# + [markdown] id="arC98j9T3H-N"
# Observando o gráfico que construímos acima, não parecemos encontrar nenhum padrão tão definido. Então, vamos confrontar mais duas colunas para verificar se encontramos algum padrão melhor definido.
#
# Aqui, vamos usar a variável ```g-0``` para o eixo x e a variável ```g-8``` para o eixo y para construir o nosso novo gráfico.
#
# Como retorno, recebemos um gráfico de dispersão onde a nuvem de pontos cartesianos parece desenhar melhor um padrão: conforme o ```g-0``` aumenta, o valor de ```g-8``` diminui. Aparentemente, a relação entre essas duas variáveis desenha uma curva com inclinação negativa.
# + colab={"base_uri": "https://localhost:8080/"} id="6pgpg0l2x_2w" outputId="105d63ed-6a5f-4d3f-e389-dbcc81abdb4b"
sns.scatterplot(x='g-0', y = 'g-8', data=dados)
# + [markdown] id="L5U2qvqU4ikC"
# E, como parte do nosso trabalho é levantar hipóteses e confirmá-las (ou não), precisamos verificar se a nossa suspeita de que a relação entre as variáveis ```g-0``` e ```g-8```desenha uma curva com inclinação negativa.
#
# Para isso, vamos utilizar uma outra função do Seaborn, a ```lmplot```. A ```lmplot``` vai desenhar no nosso gráfico de dispersão uma linha de tendência e, assim, poderemos confirmar o padrão daquele conjunto de dados.
#
# Os parâmetros a serem recebidos, são muito parecidos com aqueles usados no ```scatterplot```. Então teremos
#
# ```sns.lmplot(data=base de dados, x='variavel para o eixo x', y='variavel para o eixo y', line_kws={'color': 'cor da linha de tendencia'})```
#
# Utilizamos o parâmetro ```line_kws = {'color': 'red'}``` para criar um bom contraste entre os pontos do gráfico de dispersão e a linha de tendência.
#
# Observando o nosso gráfico, podemos concluir a nossa hipótese inicial, mas ele ainda não é suficiente para finalizarmos a nossa análise.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="n50NzQRHyXmY" outputId="cc2ba9d7-ad84-4203-a287-3c289e6eeb03"
sns.lmplot(data=dados, x='g-0', y='g-8', line_kws={'color': 'red'})
# + [markdown] id="2hS8IyXv8kpV"
# Para uma análise mais real e completa, é interessante que separemos ainda mais o nosso conjunto de dados. Isso porque, na imagem acima, apesar de termos uma linha de tendência para a relação entre os dados ```g-0``` e ```g-8```, não há filtros para a dosagem, o tratamento e o tempo. E, pesando em drug discorevy, é extremamente importante que façamos a separação desses conjuntos.
#
# Então, vamos acrescentar mais alguns parâmetros para executar a separação. Acrescentamos o parâmetro ```col = tramento``` para que sejam plotados gráficos de acordo com as categorias da variável em questão nas colunas (```com_droga``` e ```com_controle```) e também incluímos o parâmetro ```row = 'tempo'``` para que mais uma subdivisão seja feita e, as linhas apresentem novos gráficos com as diferentes categorias (```24```,```48``` e ```72```).
#
# Assim, podemos perceber as nuances de cada gráfico e o comportamento de determinado subconjunto.
# + colab={"base_uri": "https://localhost:8080/"} id="6im7H7kH0Cbf" outputId="6cd7d003-5419-432c-c49e-b000ccd96879"
sns.lmplot(data=dados, x='g-0', y='g-8', line_kws={'color': 'red'}, col='tratamento', row='tempo')
# + [markdown] id="9FVkdK0Gw4sL"
# Outra medida para analisar como as variáveis estão associadas é a correlação.
#
# Para isso, vamos usar uma função já conhecida do Pandas, o ```loc``` e, vamos agregar o ```.corr```. O ```loc``` serve para definirmos o intervalo em que a correlação vai ser calculada. Aqui, estamos calculando a correlação entre todos os genes.
#
# Como retorno, temos uma tabela bem grande que correlaciona a variável e apresenta valores entre 1 e -1.
# Por exemplo, o primeiro valor numérico apresentado na primeira linha é o resultado da correlação entre a variável que está nesta linha e nesta coluna, no nosso caso, o ```g-0``` em ambas as extremidades. No primeiro valor numérico apresentado na segunda linha, temos a correlação entre ```g-1``` e ```g-0``` e assim por diante.
#
# Mas, como interpretar esses valores? Bom, temos a seguinte divisão:
#
# - Valores muito próximos de 1 ou -1: variáveis altamente correlacionadas
# - Valores muito próximos de 0: variáveis pouco ou não correlacionadas
#
# E, o que diferencia se essa correlação será proporcional ou inversamente proporcional, será o sinal. Quer dizer:
#
# - Valores muito próximos de 1: variáveis proporcionalmente correlacionadas
# - Valores muito próximos de -1: variáveis correlacionadas inversamente proporcionais
#
# Agora que já sabemos como analisar essa tabela, podemos voltar para o nosso gráfico de dispersão construído com ```g-0``` e ```g-8`` e perceber que a nossa tabela confirma que ambas as variáveis estão correlacionadas e são inversamente proporcionais, visto que o valor apresentado na tabela é de -0,604212.
# + colab={"base_uri": "https://localhost:8080/"} id="kmEuqfjM2BuM" outputId="2fd6cb05-f664-485b-8094-44a81db3cc6c"
dados.loc[:,'g-0':'g-771'].corr()
# + [markdown] id="r6M7igPh11gp"
# Analisar essa grande tabela é um desafio bem grande. Então, como auxílio visual costumamos plotar um mapa de calor para que possamos identificar com maior facilidade a correlação entre as variáveis.
#
# E, como esse código já está construído na própria documentação do Seaborn, vamos copiar o [código](https://seaborn.pydata.org/examples/many_pairwise_correlations.html) de lá, fazendo apenas algumas pequenas alterações.
#
# Então, de ```corr = d.corr()``` mudamos para ```corr = dados.loc[:,'g-0':'g-50'].corr()``` pois ajustamos o ```d``` para a nossa base de dados (```dados```) e decidimos incluir um ```loc``` para fazer o mapa de calor apenas do ```g-0``` ao ```g-50```.
# Também retiramos o parâmetro ```vmax=.3``` da última parte do código pois este era um limitador da correlação que não nos interessa no momento.
#
# Adicionalmente, também fizemos a importação da biblioteca Numpy que é usada para gerar este mapa de calor (```import numpy as np```).
#
#
#
#
# + id="BCtRrze55Pl1"
corr = dados.loc[:,'g-0':'g-50'].corr()
# + [markdown] id="42c3Tarp36eB"
# O mapa de calor mostra uma escala de cores em sua lateral direita, a legenda e, para cada pontinho, podemos perceber a força da correlação sendo mostrada através de uma cor associada.
#
# Olhando para o nosso gráfico, percebemos que, em sua maioria, as expressões genicas não apresentam correlações tão altas entre si (podemos deduzir isso observando que o gráfico em grande parte é translúcido).
#
# É importante destacar que não podemos inferir causalidade a partir da correlação, como já descrevemos anteriormente no gráfico de dispersão.
# Exemplificando: vimos que ```g-0``` e ```g-8``` têm correlação inversamente proporcional entre si mas não podemos concluir que é o ```g-0``` que faz o ```g-8``` diminuir, ou seja, a causa.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="abmZOjJc3H5I" outputId="8611dd38-2f51-4446-c5b9-a525299ecd28"
import numpy as np
import matplotlib.pyplot as plt
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr, dtype=bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
# + [markdown] id="7x4DPQQW8wfU"
# Agora, vamos repetir o processo de construção do mapa de calor para a a viabilidade celular (```c```).
#
# Definimos uma nova variável ```corr_celular``` e ajustamos os parâmetros de acordo com os nossos ```cs```.
#
# Observando o gráfico de saída, podemos perceber uma grande diferença entre os dois mapas de calor que construímos. A escala deste novo gráfico é bem diferente da escala anterior, temos valores apenas entre 0,65 e 0,90, correlações altamente proporcionais.
# + id="CHh2r7OG7DC6"
corr_celular = dados.loc[:,'c-0':'c-50'].corr()
# + colab={"base_uri": "https://localhost:8080/"} id="i0NRwzy56_e7" outputId="7ed600bc-2193-49b6-8276-cc54d30d37ee"
# Generate a mask for the upper triangle
mask = np.triu(np.ones_like(corr_celular, dtype=bool))
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(230, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr_celular, mask=mask, cmap=cmap, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
# + [markdown] id="ReBAzcTnnFOo"
# #Desafios
# + [markdown] id="D65E2G3Q7ZPd"
# ###Desafio 01: Criar tabelas de frequência com pandas.groupby()
# + colab={"base_uri": "https://localhost:8080/"} id="iM9g2q2S7a77" outputId="04a061a9-0b45-40ed-f864-ef21808ac385"
dados.groupby(["dose", "tempo"]).size().unstack()
# + colab={"base_uri": "https://localhost:8080/"} id="49NfdEsP9gIX" outputId="351a6983-136f-461e-a2ed-01749e628fee"
dados.groupby(['dose', 'tempo'])['tratamento'].value_counts().unstack()
# + colab={"base_uri": "https://localhost:8080/"} id="edmaCkxW9xLf" outputId="1458217b-9413-41eb-8a95-6d3578e80fa1"
dados.groupby(['dose', 'tempo', 'tratamento'])['g-0'].mean().unstack()
# + [markdown] id="IhYooo-C-Hev"
# ###Desafio 02: Normalizar o crosstab pela coluna.
#
# + colab={"base_uri": "https://localhost:8080/"} id="xf0uZuTR-OUK" outputId="15236655-c7dd-4dd7-befd-64f4ba4a0fa5"
pd.crosstab([dados["dose"], dados["tempo"]], dados["tratamento"], normalize="columns")
# + [markdown] id="QUDBvkgk-nir"
# ###Desafio 03: Explorar outros agregadores.
# + colab={"base_uri": "https://localhost:8080/"} id="4URBjS_1-smA" outputId="96a48a6a-5434-46c7-8961-b45d8a5c9e05"
pd.crosstab([dados["dose"], dados["tempo"]], dados["tratamento"], values=dados["g-0"], aggfunc="max")
# + colab={"base_uri": "https://localhost:8080/"} id="0pnHlFZ__v-N" outputId="2fa94739-7d9d-4182-9aa3-9446d72696f9"
pd.crosstab([dados["dose"], dados["tempo"]], dados["tratamento"], values=dados["g-0"], aggfunc="min")
# + colab={"base_uri": "https://localhost:8080/"} id="vbtYPzoG_xIh" outputId="feda672a-a983-4802-cde6-b47470c3da69"
pd.crosstab([dados["dose"], dados["tempo"]], dados["tratamento"], values=dados["g-0"], aggfunc="std")
# + [markdown] id="sXDO8-TAABmZ"
# ###Desafio 04: Explorar o melt.
# + colab={"base_uri": "https://localhost:8080/"} id="q_5KbZI7ADWL" outputId="584ee7c9-a13b-461b-c9f2-394e5488441d"
pd.melt(dados, id_vars=["tempo"], value_vars=[("g-0")])
# + colab={"base_uri": "https://localhost:8080/"} id="Z3aQntvCAUGy" outputId="be15055d-18a2-4282-e0b6-b76525c0cbbb"
pd.melt(dados, id_vars=["dose", "tempo", "tratamento"], value_vars=[("g-0")])
# + id="76Mg6_1xAahM"
# + [markdown] id="ZK-LLj1pAvuh"
# ###Desafio 05/06: Calcular e analisar a correlação entre G e C. Refletir sobre os efeitos biológicos e estudar o código que plota a matriz de correlação (heatmap)
# + colab={"base_uri": "https://localhost:8080/"} id="hEMjM9zqAxSN" outputId="f839c0bf-5008-488a-99b4-d1aa3e37f419"
columns_genes = [f'g-{x}' for x in range(0, 31)]
columns_celular = [f'c-{x}' for x in range(0, 31)]
corr = dados[columns_genes+columns_celular].corr().loc[columns_celular,columns_genes]
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(15, 11))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(250, 20, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, cmap=cmap, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
plt.show()
# + [markdown] id="5OkPaD-dC_iD"
# ###Desafio 07: Resumo do que você aprendeu com os dados
# + [markdown] id="r7JldeDPFquF"
# * Aprendi a user o pd.melt()
# * Aprendi a usar o pd.groupby() para agregar informações
# * Aprendi a usar o pd.rosstab() para criar tabelas de frequência
# * Aprendi a criar gráfico de dispersão usando o sns.scatterplot()
# * Aprendi a calcular correlação usando pd.corr()
# * Aprendi a usar o sns.heatmap() para criar mapa de calor
| Aula3/aula3_imersao_dados_alura.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
# %matplotlib inline
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
sampleSubmission = pd.read_csv('sampleSubmission.csv')
train.head()
test.head()
sampleSubmission.head()
msno.matrix(train)
msno.matrix(test)
train.drop(['casual','registered' ], axis=1)
train.datetime = pd.to_datetime(train.datetime)
test.datetime = pd.to_datetime(test.datetime)
train.dtypes
train['year'] = train['datetime'].dt.year
train['month'] = train['datetime'].dt.month
train['weekday'] = train['datetime'].dt.dayofweek
train['hour'] = train['datetime'].dt.hour
fig, ax = plt.subplots(ncols=2, nrows=2)
fig.set_size_inches(12, 10)
sns.barplot(data=train, y='count', orient='v', ax=ax[0][0])
sns.barplot(data=train, y='count', x='season', ax=ax[0][1])
sns.barplot(data=train, y='count', x='month', ax=ax[1][0])
sns.barplot(data=train, y='count', x='hour', ax=ax[1][1])
fig, ax = plt.subplots(ncols=2, nrows=2)
fig.set_size_inches(12, 10)
sns.boxplot(data=train, y='count', orient='v', ax=ax[0][0])
sns.boxplot(data=train, y='count', x='season', ax=ax[0][1])
sns.boxplot(data=train, y='count', x='month', ax=ax[1][0])
sns.boxplot(data=train, y='count', x='hour', ax=ax[1][1])
# ## Remove Outliers
trainWithoutOutliers = train[np.abs(train['count']-train['count'].mean())<=(3*train['count'].std())]
print('Befor shape of removing outliers:', train.shape)
print('After shape of removing outliers', trainWithoutOutliers.shape)
# ## Correation
trainWithoutOutliers.columns
corrmat = train[['temp', 'atemp', 'humidity', 'windspeed', 'casual', 'registered', 'count']].corr()
sns.heatmap(corrmat, square=True ,annot=True)
# ## modeling
# #### Using XGBoost and Optmizing parameters by GridSearchCV
from sklearn.model_selection import GridSearchCV
# +
params = {
# best_params_ : {'max_depth': 9, 'min_child_weight': 9}
# 'max_depth' : [1, 3, 5, 7, 9],
# 'min_child_weight' : [1, 3, 5, 7, 9],
'max_depth' : [9, 11, 13],
'min_child_weight' : [9, 11, 13]
}
gridsearch = GridSearchCV(xgb, param_grid=params, cv=5)
gridsearch.fit(X_train, Y_train)
# -
gridsearch.best_params_
xgb = XGBRegressor(max_depth=9, min_child_weight=9)
xgb.fit(X_train, Y_train)
Y_pred = xgb.predict(X_test)
Y_pred = np.abs(Y_pred)
submission = pd.concat([test['datetime'], pd.Series(Y_pred)], axis=1)
submission.columns = ['datetime', 'count']
submission.head()
submission.to_csv('submission.csv', index=False)
# #### The score is RMSLE 0.4595, the rank is about 940 in 3251
| Bike _Sharing_Demand_pt1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="CuD1bYIBMY4L" colab_type="text"
# <style>h1{color:#0000ff;text-align:center;}</style>
# <h1>Numérique et Sciences Informatiques</h1>
#
# <h3>Lycée d'Enseignement Général | Terminale | Enseignement de Spécialité</h3>
# + [markdown] id="LqcZ_t-hNAJK" colab_type="text"
# <blockquote>
# <hr />
# <b><NAME></b> | <EMAIL> | <a href='https://nsi.sapiensjmh.top' target='_blank'>https://nsi.sapiensjmh.top</a>
# <hr />
# </blockquote>
# + [markdown] id="k8mlJHw8Dwk4" colab_type="text"
# <h3>
# <b>Thème 1 - Structure de données</b>
# <h3>
# <h4>
# <i>Module 1 - Programmation Orientée Objet (POO)</i>
# </h4>
#
# + [markdown] id="5-mSCuiVtarP" colab_type="text"
# <code><mark>Première implémentation en Python</mark></code>
# + id="KcqhXZp8rECO" colab_type="code" colab={}
"""
Algorithme knn
‘identificationIris'
version : 4 attributs (fichier de données labellisées = iris_complet.csv) et 3 classes
"""
# ===============================================================
# IMPORT DE BIBLIOTHEQUES
# ===============================================================
import csv
from math import sqrt
import time
# ===============================================================
# DECLARATION DE VARIABLES
# ===============================================================
# ---------------------------------------------------------------
# nom : le (liste des échantillons) | type : liste de listes
# ---------------------------------------------------------------
le=[]
with open('iris_complet.csv', newline='') as csvfile:
listecsv = csv.reader(csvfile, delimiter=',')
for row in listecsv:
le.append(row)
csvfile.close()
# ---------------------------------------------------------------
# nom : ce(codes des espèces) | type : liste de listes
# ---------------------------------------------------------------
ce = [ [0,'Iris setosa'],[1,'Iris virginica'],[2,'Iris versicolor'] ]
# ---------------------------------------------------------------
# nom : eai (échantillon à identifier) | type = liste
# ---------------------------------------------------------------
# Attention : il faut que l'ordre de saisie soit le même que l'ordre des attributs dans le fichier des données labellisées
eai = []
eai.append(float(input('Saisir la longueur du sépale de l\'iris inconnu : ')))
eai.append(float(input('Saisir la largeur du sépale de l\'iris inconnu : ')))
eai.append(float(input('Saisir la longueur du pétale de l\'iris inconnu : ')))
eai.append(float(input('Saisir la largeur du pétale de l\'iris inconnu : ')))
# ---------------------------------------------------------------
# nom : k (nombre de voisins) | type : entier
# ---------------------------------------------------------------
k = input('Saisir le nombre de voisins voulu : ')
k = int(k)
# ===============================================================
# CALCUL DES DISTANCES
# entre chaque échantillon de la liste ‘le'
# et l'échantillon inconnu ‘eai'
# ===============================================================
# ---------------------------------------------------------------
# déclaration d'une variable : type = liste ; nom = 'dist' ;
# ---------------------------------------------------------------
dist=[]
# ---------------------------------------------------------------
# déclaration d'une variable temporaire : type = float ; nom = 'd' ;
# ---------------------------------------------------------------
d=0.0
# ---------------------------------------------------------------
# affectation de valeurs de type 'liste' à 'dist' par une boucle bornée
# ---------------------------------------------------------------
for i in range(1,len(le)):
d =sqrt( pow(eai[0]-float(le[i][0]) ,2) + pow(eai[1]-float(le[i][1]) ,2) + pow(eai[2]-float(le[i][2]) ,2) + pow(eai[3]-float(le[i][3]) ,2))
dist.append( (d, int(le[i][4]) ) )
# ---------------------------------------------------------------
# ===============================================================
# TRI DES DISTANCES
# Tri de 'dist' par ordre croissant des distances
# ===============================================================
dist.sort(key=lambda x:x[0])
# ===============================================================
# PRÉDICTION DE LA ‘CLASSE' DE L'ÉCHANTILLON INCONNU
# ===============================================================
# ---------------------------------------------------------------
# 1 - SELECTION DES K PLUS COURTES DISTANCES
# déclaration de variable : nom = 'kcd' ;type = liste ;
kcd=[]
# affectation de valeurs de type 'liste' à 'kcd' par une boucle bornée
for i in range(0,k):
kcd.append( [dist[i][0], dist[i][1]] )
print()
print(kcd)
print()
# ---------------------------------------------------------------
# 2 - POUR CHAQUE CLASSE,
# COMPTAGE DU NOMBRE DE PLUS PROCHES VOISINS
# déclaration d'une variable : type = liste ; nom = 'npvc' ;
npvc=[]
# initialisation de la table ‘npvc'
for i in range(0, len(ce)):
npvc.append( [ce[i][0], 0 ] )
# comptage du nombre de plus proches voisins pour chaque classe
for i in range(0,k):
for j in range(0,len(ce)):
if kcd[i][1] == ce[j][0]:
npvc[j][1]= npvc[j][1] + 1
# tri de la table ‘npvc' par ordre croissant de nombre de plus proches voisins
npvc.sort(key=lambda x:x[1])
# ---------------------------------------------------------------
# 3 - AFFICHAGE D'UNE PREDICTION
# ---------------------------------------------------------------
print( 'Prédiction - L iris peut faire partie de l espèce : ',ce[ npvc[len(npvc)-1][0]][1] )
# ===============================================================
| LEG_Tle_SpeNSI_Th_01_Module_1_Notebook1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Teleoperation
#
# In this example we'll control the Jetbot remotely with a gamepad controller connected to our web browser machine.
# ### Create gamepad controller
#
# The first thing we want to do is create an instance of the ``Controller`` widget, which we'll use to drive our robot.
# The ``Controller`` widget takes a ``index`` parameter, which specifies the number of the controller. This is useful in case you
# have multiple controllers attached, or some gamepads *appear* as multiple controllers. To determine the index
# of the controller you're using,
#
# 1. Visit [http://html5gamepad.com](http://html5gamepad.com).
# 2. Press buttons on the gamepad you're using
# 3. Remember the ``index`` of the gamepad that is responding to the button presses
#
# Next, we'll create and display our controller using that index.
# +
import ipywidgets.widgets as widgets
controller = widgets.Controller(index=1) # replace with index of your controller
display(controller)
# -
# Even if the index is correct, you may see the text ``Connect gamepad and press any button``. That's because the gamepad hasn't
# registered with this notebook yet. Press a button and you should see the gamepad widget appear above.
# ### Connect gamepad controller to robot motors
#
# Now, even though we've connected our gamepad, we haven't yet attached the controls to our robot! The first, and most simple control
# we want to attach is the motor control. We'll connect that to the left and right vertical axes using the ``dlink`` function. The
# ``dlink`` function, unlike the ``link`` function, allows us to attach a transform between the ``source`` and ``target``. Because
# the controller axes are flipped from what we think is intuitive for the motor control, we'll use a small *lambda* function to
# negate the value.
#
# > WARNING: This next cell will move the robot if you touch the gamepad controller axes!
# +
from jetbot import Robot
import traitlets
robot = Robot()
left_link = traitlets.dlink((controller.axes[1], 'value'), (robot.left_motor, 'value'), transform=lambda x: -x)
right_link = traitlets.dlink((controller.axes[3], 'value'), (robot.right_motor, 'value'), transform=lambda x: -x)
# -
# Awesome! Our robot should now respond to our gamepad controller movements. Now we want to view the live video feed from the camera!
# ### Create and display Image widget
#
# First, let's display an ``Image`` widget that we'll use to show our live camera feed. We'll set the ``height`` and ``width``
# to just 300 pixels so it doesn't take up too much space.
#
# > FYI: The height and width only effect the rendering on the browser side, not the native image resolution before network transport from robot to browser.
# +
image = widgets.Image(format='jpeg', width=300, height=300)
display(image)
# -
# ### Create camera instance
#
# Well, right now there's no image presented, because we haven't set the value yet! We can do this by creating our ``Camera``
# class and attaching the ``value`` attribute of the camera to the ``value attribute of the image.
#
# First, let's create the camera instance, we call the ``instance`` method which will create a new camera
# if it hasn't been created yet. If once already exists, this method will return the existing camera.
# +
from jetbot import Camera
camera = Camera.instance()
# -
# ### Connect Camera to Image widget
# Our camera class currently only produces values in BGR8 (blue, green, red, 8bit) format, while our image widget accepts values in compressed *JPEG*.
# To connect the camera to the image we need to insert the ``bgr8_to_jpeg`` function as a transform in the link. We do this below
# +
from jetbot import bgr8_to_jpeg
camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)
# -
# You should now see the live video feed shown above!
#
# > REMINDER: You can right click the output of a cell and select ``Create New View for Output`` to display the cell in a separate window.
# ### Stop robot if network disconnects
#
# You can drive your robot around by looking through the video feed. But what if your robot disconnects from Wifi? Well, the motors would keep moving and it would keep trying to stream video and motor commands. Let's make it so that we stop the robot and unlink the camera and motors when a disconnect occurs.
# +
from jetbot import Heartbeat
def handle_heartbeat_status(change):
if change['new'] == Heartbeat.Status.dead:
camera_link.unlink()
left_link.unlink()
right_link.unlink()
robot.stop()
heartbeat = Heartbeat(period=0.5)
# attach the callback function to heartbeat status
heartbeat.observe(handle_heartbeat_status, names='status')
# -
# If the robot disconnects from the internet you'll notice that it stops. You can then re-connect the camera and motors by re-creating the links with the cell below
# +
# only call this if your robot links were unlinked, otherwise we'll have redundant links which will double
# the commands transfered
left_link = traitlets.dlink((controller.axes[1], 'value'), (robot.left_motor, 'value'), transform=lambda x: -x)
right_link = traitlets.dlink((controller.axes[3], 'value'), (robot.right_motor, 'value'), transform=lambda x: -x)
camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)
# -
# ### Save snapshots with gamepad button
#
# Now, we'd like to be able to save some images from our robot. Let's make it so the right bumper (index 5) saves a snapshot of the current live image. We'll save the images in the ``snapshots/`` directory, with a name that is guaranteed to be unique using the ``uuid`` python package. We use the ``uuid1`` identifier, because this also encodes the date and MAC address which we might want to use later.
# +
import uuid
import subprocess
subprocess.call(['mkdir', '-p', 'snapshots'])
snapshot_image = widgets.Image(format='jpeg', width=300, height=300)
def save_snapshot(change):
# save snapshot when button is pressed down
if change['new']:
file_path = 'snapshots/' + str(uuid.uuid1()) + '.jpg'
# write snapshot to file (we use image value instead of camera because it's already in JPEG format)
with open(file_path, 'wb') as f:
f.write(image.value)
# display snapshot that was saved
snapshot_image.value = image.value
controller.buttons[5].observe(save_snapshot, names='value')
display(widgets.HBox([image, snapshot_image]))
display(controller)
# -
# Before closeing this notebook and shutdown the Python kernel for the notebook, we want to properly close the camera connection so that we can use the camera in other notebook.
camera.stop()
# ### Conclusion
#
# That's it for this example, have fun!
| notebooks/teleoperation/teleoperation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import math
def my_sine():
return lambda x: math.sin(math.radians(x))
def my_cosine():
return lambda x: math.cos(math.radians(x))
sine = my_sine()
cosine = my_cosine()
math.pow(sine(30), 2) + math.pow(cosine(30), 2)
# + pycharm={"name": "#%%\n"}
capitals = [("USA", "Washington"), ("India", "Delhi"), ("France", "Paris"), ("UK", "London")]
print(capitals)
# + pycharm={"name": "#%%\n"}
capitals.sort(key=lambda item: item[1])
print(capitals)
# + pycharm={"name": "#%%\n"}
list_of_words = ["Hello", "there.", "How", "are", "you", "doing?"]
check_for = ["How", "are"]
all(w in list_of_words for w in check_for)
# + pycharm={"name": "#%%\n"}
| 02-advanced-data-structures-and-file-handling/02-Lambda-Expressions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## (Alternative) One-hot Encoding with Label Binarizer
# In the previous approach, we used Label Encoder to get numberic labels for our categorical values, and then added one-hot encoding for them using pandas.
#
# However, as we performed the transforms, we realize that the number of categories that appear in the test set are only a subset of all the possible categories in the training data. To tackle this, we removed those categories that never appeared in the test set. While this may work for cases where you know that these categories will never appear at test time. But, this is pretty bad if there are new test cases that have these unseen categorical variables.
#
# As a general rule, we must always try to use the full range of the features/variables in training our model whether or not they are tested in a given test-scenario. The more generic our model, the better its performance for unseen test cases.
#
# We use the following alternative approach to handle this case where future test cases where these categories might appear.
# This notebook is an attempt to build a baseline model with the given features (*i.e. no feature engineering or augmenting the training file with the other files*). The model explored is logistic regression.
# +
# for data manipulation
import numpy as np
import pandas as pd
# sklearn preprocessing for dealing with categorical variables
from sklearn.preprocessing import LabelEncoder, LabelBinarizer
# file system management
import os
# setting to suppress warnings
import warnings
warnings.filterwarnings('ignore')
# -
# ### Data files
# List all data files available from competition
raw_data_path = './../data/raw/'
print('Raw data files', *[f for f in os.listdir(raw_data_path) if not f.startswith('.')], sep='\n- ')
# ### Data exploration
# Training data is **application_train.csv**.
# Testing data is **application_test.csv**
#
# Training & Testing data shape - number of records & number of features/columns provided
# +
train_data = pd.read_csv(os.path.join(raw_data_path, 'application_train.csv'))
test_data = pd.read_csv(os.path.join(raw_data_path, 'application_test.csv'))
print("Training data shape", train_data.shape)
print("Testing data shape", test_data.shape)
train_data.head()
# -
# Training data has 307511 records, each of which is a loan application. Each record has 122 features.
#
# Testing data is considerably smaller, it has all features except the target column which is the variable to be predicted.
# #### Feature Types
# It is important to know about the types of features available. Numerical variables (Integer and float) can be directly used for model building. Pandas reads in other types of variables as objects (string, character, etc) which are categorical variables that need to be converted to a form suited for model building.
train_data.dtypes.value_counts()
print("total applications ",train_data['SK_ID_CURR'].count())
print("unique applicants ",train_data['SK_ID_CURR'].unique().shape)
# this means each applicant has only one application in the dataset. this will be useful to know later when we select the type of cross validation
# #### Object type columns
# Number of unique values (potentially, classes or categories) in each object column
train_data.select_dtypes('object').apply(pd.Series.nunique, axis = 0)
# visualize what these categories look like
train_data.select_dtypes('object').head()
# +
print("shapes before transform")
print(train_data.shape)
print(test_data.shape)
# Label Binarizer creates a transformation for categorical variables to One-Hot Encoding
lb = LabelBinarizer()
lb_count = 0 #number of columns that are label encoded
# Iterate through all columns
transformed_train = train_data
transformed_test = test_data
for col in train_data:
if train_data[col].dtype == 'object':
print("Transforming column ", col ," with categories ", train_data[col].unique())
# train the label encoder on the training data
lb.fit(train_data[col].astype(str))
# transform the column on both training and testing data
transformed_col = lb.transform(train_data[col].astype(str))
# removing the original column from the dataframe and adding the new transformed columns
temp_df = pd.DataFrame(transformed_col)
transformed_train.drop([col],1,inplace=True)
transformed_train = transformed_train.join(temp_df, how='outer', rsuffix = col)
transformed_col = lb.transform(test_data[col].astype(str))
temp_df = pd.DataFrame(transformed_col)
transformed_test.drop([col],1,inplace=True)
transformed_test = transformed_test.join(temp_df, how='outer', rsuffix = col)
lb_count += 1
print('{} columns were label encoded.'.format(lb_count))
print("shapes after transform")
print(transformed_train.shape)
print(transformed_test.shape)
# -
train_data = transformed_train
test_data = transformed_test
train_labels = train_data['TARGET']
train_data.drop(['TARGET'], 1, inplace=True)
# ## Baseline Model
# # Logistic Regression
# ### Preprocess the data
# - Filling in missing values via imputation
# - Feature scaling / normalization
# +
from sklearn.preprocessing import MinMaxScaler, Imputer
# Drop the target column from training data
if 'TARGET' in train_data:
train_set = train_data.drop(columns = ['TARGET'])
else:
train_set = train_data.copy()
features = list(train_set.columns)
# Copy test data
test_set = test_data.copy()
# Impute missing values with median
imputer = Imputer(strategy = 'median')
# Scale each feature to 0-1
scaler = MinMaxScaler(feature_range=[0, 1])
# Fit on the training data
imputer.fit(train_set)
# Transform both the training and testing data
train_set = imputer.transform(train_set)
test_set = imputer.transform(test_set)
# Repeat above 2 steps with scaler
scaler.fit(train_set)
train_set = scaler.transform(train_set)
test_set = scaler.transform(test_set)
print("Training data shape", train_set.shape)
print("Testing data shape", test_set.shape)
# -
# ### Validation testing
# hold out part of the training set to evaluate performance
# +
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
# creating a validation set that is 40% of the original training set.
# the remaining 60% would be used for model building
x_train, x_val, y_train, y_val = train_test_split(train_set, train_labels, test_size = 0.4, random_state = 0)
# -
print("Training set shape", x_train.shape)
print("Validation set shape", x_val.shape)
# ### Model Building
# +
from sklearn.linear_model import LogisticRegression
# sklearn built-in toolkit for cross-validation
from sklearn.model_selection import KFold,cross_validate, cross_val_score
# Create model with a specified regularization parameter
log_reg = LogisticRegression(C = 0.001)
# Train on the training data
scores = cross_val_score(log_reg, train_set, train_labels, scoring = 'roc_auc',cv=5)
print(scores)
# -
print("Mean score:", scores.mean())
# ### Evaluate performance on validation set
# +
from sklearn.metrics import roc_auc_score
# We need to fit again, as cross_val_score clones the classifier internally.
log_reg.fit(x_train, y_train)
# make predictions for the validation set
pred_val = log_reg.predict_proba(x_val)[:, 1]
pred_train = log_reg.predict_proba(x_train)[:, 1]
print('Training AUC score : {}'.format(roc_auc_score(y_train, pred_train)))
print('Validation AUC score : {}'.format(roc_auc_score(y_val, pred_val)))
# -
# # Parameter tuning
#
# So far, we used the default parameter settings to train our LogisticRegression estimator. Now, let's change the parameter C in regular intervals & identify the one which gives the best esimator/classifier. This process of selecting the best model given a training set is called model selection
# +
params = []
train_scores = []
val_scores = []
for c in [0.0001,0.001,0.01,0.1,0.4,0.7,1.0]:
clf = LogisticRegression(C = c)
params.append(c)
tss = []
tes = []
for train, test in KFold(n_splits=5).split(train_set):
clf.fit(train_set[train], train_labels[train])
pred_train = clf.predict_proba(train_set[train])[:,1]
pred_test = clf.predict_proba(train_set[test])[:, 1]
tss.append(roc_auc_score(train_labels[train], pred_train))
tes.append(roc_auc_score(train_labels[test], pred_test))
print("C :", c)
print("training : ",sum(tss)/5)
print("testing : ", sum(tes)/5)
train_scores.append(sum(tss)/5)
val_scores.append(sum(tes)/5)
# -
import matplotlib.pyplot as plt
# %matplotlib inline
# Plot the training and testing
plt.plot(params, train_scores, color='blue', label='training score')
plt.plot(params, val_scores , color='red', label='validation score')
plt.legend(loc='best')
plt.ylim(0, 1)
plt.xlabel('estimators')
plt.ylabel('score');
print("C : ",params)
# ## Model Selection
#
# From the graph above we see that the only change in performance is brought when changing `C` from `0.0001` to `0.001`.
#
# Since we already set our `C = 0.001` in our first model generation stage, we do not need to re-do this step. But, if we had found a better set of parameters to use, at this stage, we will re-train the model with the new parameters and use that for our predictions.
# ### Predictions
# Target - value of 1 indicates client with payment difficulties
#
# Predict the probabilities of not repaying a loan.
#
# Model *predict_proba* method returns the probability of belonging to each of the target variable classes. Since we want the probability of not repaying a loan, we need to select the second column.
#
# (There are only 2 possible values to the Target column, so the sum of these probabilities would add to 1)
# Make predictions for the test data
log_reg_pred = log_reg.predict_proba(test_set)[:, 1]
# #### Submission
# +
# Compose the submission csv
submission = test_data[['SK_ID_CURR']]
submission['TARGET'] = log_reg_pred
submission.head()
# -
import time
timestr = time.strftime("%Y%m%d-%H%M%S")
print(timestr)
# Save the submission to a csv file
submission.to_csv('./../data/output/submission_'+str(timestr)+'.csv', index=False)
| notebooks/baseline_model_with_labelbinarizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cp255] *
# language: python
# name: conda-env-cp255-py
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # CYPLAN255
# ### Urban Informatics and Visualization
# + [markdown] slideshow={"slide_type": "slide"}
# HIT RECORD and TRANSCRIBE
# + [markdown] slideshow={"slide_type": "slide"}
# # Lecture 17 -- Network Analysis Cont'd
# ******
# March 28, 2022
#
# <img src="https://i.redd.it/20h18mpvreg81.jpg" width=500 align='right' title='A chest X-ray in progress at Dr. <NAME>’s radiology department at the Cochin hospital in Paris, France. c 1914.'>
# + [markdown] slideshow={"slide_type": "slide"}
# # Agenda
# 1. Announcements
# 2. Isochrones w/ OSMnx
# 3. UrbanAccess
# 5. For next time
# 6. Questions
#
# + [markdown] slideshow={"slide_type": "slide"}
# # 1. Announcements
# + [markdown] slideshow={"slide_type": "fragment"}
# Solution for Windows users: fresh conda environments!
# 1. `conda create -n <name of env> python conda`
# 2. `conda config --add channels conda-forge`
# 3. `conda config --set channel_priority strict`
# 4. `conda install ipython notebook nb_conda_kernels`
# 5. `conda install <name of package that's giving you trouble>` (e.g. `geopandas` or `osmnx`)
# 6. launch Jupyter Notebook _from your new environment_
# + [markdown] slideshow={"slide_type": "slide"}
# # 2. The Isochrone Tutorial from OSMnx
# + [markdown] slideshow={"slide_type": "fragment"}
# 99% of the code in this section was copied from [here](https://github.com/gboeing/osmnx-examples/blob/main/notebooks/13-isolines-isochrones.ipynb)
# + slideshow={"slide_type": "fragment"}
import geopandas as gpd
import matplotlib.pyplot as plt
import networkx as nx
import osmnx as ox
from descartes import PolygonPatch
from shapely.geometry import LineString
from shapely.geometry import Point
from shapely.geometry import Polygon
# %matplotlib inline
ox.__version__
# + [markdown] slideshow={"slide_type": "slide"}
# Configure the place, network type, trip times, and travel speed for analysis
# + slideshow={"slide_type": "fragment"}
place = {"city": "Berkeley", "state": "California"}
network_type = "walk"
trip_times = [5, 10, 15, 20, 25] # in minutes
travel_speed = 4.5 # walking speed in km/hour
# + [markdown] slideshow={"slide_type": "slide"}
# Download the OSM network and place boundary geometries
# + slideshow={"slide_type": "fragment"}
G = ox.graph_from_place(place, network_type=network_type)
G = ox.project_graph(G)
bounds = ox.geocode_to_gdf(place)
bounds = ox.project_gdf(bounds)
# + [markdown] slideshow={"slide_type": "slide"}
# Extract the nodes as a geodataframe to get the center-most node
# + slideshow={"slide_type": "fragment"}
gdf_nodes = ox.graph_to_gdfs(G, edges=False)
x, y = gdf_nodes["geometry"].unary_union.centroid.xy
center_node = ox.distance.nearest_nodes(G, x[0], y[0])
# + [markdown] slideshow={"slide_type": "slide"}
# Impute an estimated walk time for each edge
# + slideshow={"slide_type": "fragment"}
meters_per_minute = travel_speed * 1000 / 60 # km per hour to m per minute
for _, _, _, data in G.edges(data=True, keys=True):
data["time"] = data["length"] / meters_per_minute
# + [markdown] slideshow={"slide_type": "slide"}
# ## 2.2. Isochrones by Node
# + [markdown] slideshow={"slide_type": "fragment"}
# Assign a color to each node
# -
import seaborn as sns
trip_times = trip_times + [30, 35,40, 45]
sns.color_palette("plasma", 5)
# + slideshow={"slide_type": "fragment"}
iso_colors = ox.plot.get_colors(n=len(trip_times), cmap="plasma", start=0, return_hex=True)
node_colors = {}
for trip_time, color in zip(sorted(trip_times, reverse=True), iso_colors):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance="time")
for node in subgraph.nodes():
node_colors[node] = color
nc = [node_colors[node] if node in node_colors else "none" for node in G.nodes()]
ns = [15 if node in node_colors else 0 for node in G.nodes()]
# + [markdown] slideshow={"slide_type": "fragment"}
# **Question:** why is this loop iterating through travel times in reverse order?
# + [markdown] slideshow={"slide_type": "slide"}
# Plot the graph
# + slideshow={"slide_type": "fragment"}
fig, ax = ox.plot_graph(
G,
bgcolor='none',
node_color=nc,
node_size=ns,
node_alpha=0.8,
edge_linewidth=0.2,
edge_color="#999999",
)
# + [markdown] slideshow={"slide_type": "slide"}
# ## 2.3. Isochrones by Convex Hull
# + [markdown] slideshow={"slide_type": "fragment"}
# Generate a convex hull for each travel time bin
# -
trip_times = [5,10,15,20,25]
# + slideshow={"slide_type": "fragment"}
isochrone_polys = []
for trip_time in sorted(trip_times, reverse=True):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance="time")
node_points = [Point((data["x"], data["y"])) for node, data in subgraph.nodes(data=True)]
bounding_poly = gpd.GeoSeries(node_points).unary_union.convex_hull
bounding_poly = gpd.GeoSeries(bounding_poly, crs=bounds.crs).clip(bounds, keep_geom_type=True).values[0]
isochrone_polys.append(bounding_poly)
# + [markdown] slideshow={"slide_type": "slide"}
# Plot the graph and layer the convex hulls on top using matplotlib
# + slideshow={"slide_type": "fragment"}
fig, ax = ox.plot_graph(
G, bgcolor='none', show=False, close=False, edge_color="#999999", edge_alpha=0.2, node_size=0
)
iso_colors = ox.plot.get_colors(n=len(trip_times), cmap="plasma", start=0, return_hex=True)
for polygon, fc in zip(isochrone_polys, iso_colors):
patch = PolygonPatch(polygon, fc=fc, ec="none", alpha=0.6, zorder=-1)
ax.add_patch(patch)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## 2.4. Isochrones with Buffered Network Edges
# -
def make_iso_polys(G, edge_buff=25, node_buff=50, infill=False):
isochrone_polys = []
for trip_time in sorted(trip_times, reverse=True):
subgraph = nx.ego_graph(G, center_node, radius=trip_time, distance="time")
node_points = [Point((data["x"], data["y"])) for node, data in subgraph.nodes(data=True)]
nodes_gdf = gpd.GeoDataFrame({"id": list(subgraph.nodes)}, geometry=node_points)
nodes_gdf = nodes_gdf.set_index("id")
edge_lines = []
for n_fr, n_to in subgraph.edges():
f = nodes_gdf.loc[n_fr].geometry
t = nodes_gdf.loc[n_to].geometry
edge_lookup = G.get_edge_data(n_fr, n_to)[0].get("geometry", LineString([f, t]))
edge_lines.append(edge_lookup)
n = nodes_gdf.buffer(node_buff).geometry
e = gpd.GeoSeries(edge_lines).buffer(edge_buff).geometry
all_gs = list(n) + list(e)
new_iso = gpd.GeoSeries(all_gs).unary_union
# try to fill in surrounded areas so shapes will appear solid and
# blocks without white space inside them
if infill:
new_iso = Polygon(new_iso.exterior)
isochrone_polys.append(new_iso)
return isochrone_polys
isochrone_polys = make_iso_polys(G, edge_buff=25, node_buff=0, infill=True)
fig, ax = plt.subplots(figsize=(15,15))
fig, ax = ox.plot_graph(
G, bgcolor='none', show=False, close=False, edge_color="#999999", edge_alpha=0.2,
node_size=0, ax=ax
)
for polygon, fc in zip(isochrone_polys, iso_colors):
patch = PolygonPatch(polygon, fc=fc, ec="none", alpha=0.7, zorder=-1)
ax.add_patch(patch)
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercise 1: Drive-time Isochrones
#
# 1. Download a drive-network by specifying `network_type='drive` in `ox.graph_from_place()`
# 2. Use `ox.add_edge_speeds()` to add speed limit attributes to the edges.
# 3. Use `ox.add_edge_travel_times()` to impute the travel time along each edge based on the speed limit
# 4. Plot the isochrones using one of the methods we used earlier. Hint: you might want to update your travel time bins
# + [markdown] slideshow={"slide_type": "slide"}
# ## Exercise 2: OSMnx + Pandana
#
# 1. Extract edge and node GeoDataFrames from your OSMnx graph using `ox.graph_to_gdfs()`
# 2. Create a Pandana `Network()` object using `length` or `travel_time` as the impedance
# 3. Generate some accessibility queries!
# + [markdown] slideshow={"slide_type": "slide"}
# # 3. UrbanAccess (OSM + GTFS)
# + [markdown] slideshow={"slide_type": "fragment"}
# Code in this section taken directly from https://github.com/UDST/urbanaccess/blob/dev/demo/simple_example.ipynb
#
# This notebook provides a brief overview of the main functionality of UrbanAccess with examples using AC Transit and BART GTFS data and OpenStreetMap (OSM) pedestrian network data to create an integrated transit and pedestrian network for Oakland, CA for use in Pandana network accessibility queries.
#
# **UrbanAccess on UDST:** https://github.com/UDST/urbanaccess
#
# **UrbanAccess documentation:** https://udst.github.io/urbanaccess/index.html
#
# **UrbanAccess citation:**
#
# `<NAME> and <NAME>dell, 2017, "UrbanAccess: Generalized Methodology for Measuring Regional Accessibility with an Integrated Pedestrian and Transit Network" Transportation Research Record: Journal of the Transportation Research Board, 2653: 35–44.`
# -
# ### Outline
# 1. Imports
# 2. Working with GTFS data
# 3. Creating a transit network
# 4. Creating a pedestrian network
# 5. Creating an integrated transit and pedestrian network
# 6. Saving a network to disk
# 7. Visualizing the network
# 8. Adding average headways to network travel time
# 9. Using an UrbanAccess network with Pandana
# + [markdown] slideshow={"slide_type": "slide"}
# ## 3.1 Imports
# + slideshow={"slide_type": "fragment"}
import matplotlib
matplotlib.use('agg') # allows notebook to be tested in Travis
import pandas as pd
import cartopy.crs as ccrs
import cartopy
import matplotlib.pyplot as plt
import pandana as pdna
import time
import urbanaccess as ua
from urbanaccess.config import settings
from urbanaccess.gtfsfeeds import feeds
from urbanaccess import gtfsfeeds
from urbanaccess.gtfs.gtfsfeeds_dataframe import gtfsfeeds_dfs
from urbanaccess.network import ua_network, load_network
# %matplotlib inline
# + [markdown] slideshow={"slide_type": "slide"}
# ## 3.2. GTFS Data
# + [markdown] slideshow={"slide_type": "fragment"}
# ### 3.2.1. Search
# + [markdown] slideshow={"slide_type": "fragment"}
# You can use the search function to find feeds on the GTFS Data Exchange (Note: the GTFS Data Exchange is no longer being maintained as of Summer 2016 so feeds here may be out of date)
# + [markdown] slideshow={"slide_type": "fragment"}
# Let's search for feeds for transit agencies in the GTFS Data Exchange that we know serve Oakland, CA: 1) Bay Area Rapid Transit District (BART) which runs the metro rail service and 2) AC Transit which runs bus services.
#
# Let's start by finding the feed for the Bay Area Rapid Transit District (BART) by using the search term `Bay Area Rapid Transit`:
# + slideshow={"slide_type": "fragment"}
gtfsfeeds.search(search_text='Bay Area Rapid Transit',
search_field=None,
match='contains')
# + [markdown] slideshow={"slide_type": "slide"}
# Now that we see what can be found on the GTFS Data Exchange. Let's run this again but this time let's add the feed from your search to the feed download list
# + slideshow={"slide_type": "fragment"}
gtfsfeeds.search(search_text='Bay Area Rapid Transit',
search_field=None,
match='contains',
add_feed=True)
# + [markdown] slideshow={"slide_type": "slide"}
# ### 3.2.2. Bring Your Own Data
# + [markdown] slideshow={"slide_type": "fragment"}
# If you know of a GTFS feed located elsewhere or one that is more up to date, you can add additional feeds located at custom URLs by adding a dictionary with the key as the name of the service/agency and the value as the URL.
# + [markdown] slideshow={"slide_type": "fragment"}
# Let's do this for AC Transit which also operates in Oakland, CA.
#
# The link to their feed is here: http://www.actransit.org/planning-focus/data-resource-center/ and let's get the latest version as of June 18, 2017
# + slideshow={"slide_type": "fragment"}
feeds.add_feed(add_dict={'ac transit': 'http://www.actransit.org/wp-content/uploads/GTFSJune182017B.zip'})
# + [markdown] slideshow={"slide_type": "fragment"}
# Note the two GTFS feeds now in your feeds object ready to download
# + slideshow={"slide_type": "fragment"}
feeds.to_dict()
# + [markdown] slideshow={"slide_type": "slide"}
# ### 3.2.3. Downloading GTFS data
# + [markdown] slideshow={"slide_type": "fragment"}
# Use the download function to download all the feeds in your feeds object at once. If no parameters are specified the existing feeds object will be used to acquire the data.
#
# By default, your data will be downloaded into the directory of this notebook in the folder: `data`
# + slideshow={"slide_type": "fragment"}
gtfsfeeds.download()
# + [markdown] slideshow={"slide_type": "slide"}
# ### 3.2.4 Load GTFS data into an UrbanAccess transit data object
# + [markdown] slideshow={"slide_type": "fragment"}
# Now that we have downloaded our data let's load our individual GTFS feeds (currently a series of text files stored on disk) into a combined network of Pandas DataFrames.
#
# - You can specify one feed or multiple feeds that are inside a root folder using the `gtfsfeed_path` parameter. If you want to aggregate multiple transit networks together, all the GTFS feeds you want to aggregate must be inside of a single root folder.
# - Turn on `validation` and set a bounding box with the `remove_stops_outsidebbox` parameter turned on to ensure all your GTFS feed data are within a specified area.
#
# Let's specify a bounding box of coordinates for the City of Oakland to subset the GTFS data to. You can generate a bounding box by going to http://boundingbox.klokantech.com/ and selecting the CSV format.
# + slideshow={"slide_type": "fragment"}
validation = True
verbose = True
# bbox for City of Oakland
bbox = (-122.355881,37.632226,-122.114775,37.884725)
remove_stops_outsidebbox = True
append_definitions = True
loaded_feeds = ua.gtfs.load.gtfsfeed_to_df(gtfsfeed_path=None,
validation=validation,
verbose=verbose,
bbox=bbox,
remove_stops_outsidebbox=remove_stops_outsidebbox,
append_definitions=append_definitions)
# + [markdown] slideshow={"slide_type": "slide"}
# The output is a global `urbanaccess_gtfs_df` object that can be accessed with the specified variable `loaded_feeds`. This object holds all the individual GTFS feed files aggregated together with each GTFS feed file type in separate Pandas DataFrames to represent all the loaded transit feeds in a metropolitan area.
# + slideshow={"slide_type": "fragment"}
loaded_feeds.stops.head()
# + [markdown] slideshow={"slide_type": "fragment"}
# Note the two transit services we have aggregated into one regional table
# + slideshow={"slide_type": "fragment"}
loaded_feeds.stops.unique_agency_id.unique()
# + [markdown] slideshow={"slide_type": "fragment"}
# Quickly view the transit stop locations
# + slideshow={"slide_type": "fragment"}
loaded_feeds.stops.plot(kind='scatter', x='stop_lon', y='stop_lat', s=0.1)
# + slideshow={"slide_type": "fragment"}
loaded_feeds.routes.head()
# + slideshow={"slide_type": "fragment"}
loaded_feeds.stop_times.head()
# + slideshow={"slide_type": "fragment"}
loaded_feeds.trips.head()
# + slideshow={"slide_type": "fragment"}
loaded_feeds.calendar.head()
# + [markdown] slideshow={"slide_type": "slide"}
# ## 3.3. Create a transit network
# + [markdown] slideshow={"slide_type": "fragment"}
# Now that we have loaded and standardized our GTFS data, let's create a travel time weighted graph from the GTFS feeds we have loaded.
# + [markdown] slideshow={"slide_type": "fragment"}
# Create a network for weekday `monday` service between 7 am and 10 am (`['07:00:00', '10:00:00']`) to represent travel times during the AM Peak period.
#
# Assumptions: We are using the service ids in the `calendar` file to subset the day of week, however if your feed uses the `calendar_dates` file and not the `calendar` file then you can use the `calendar_dates_lookup` parameter. This is not required for AC Transit and BART.
# + slideshow={"slide_type": "fragment"}
ua.gtfs.network.create_transit_net(gtfsfeeds_dfs=loaded_feeds,
day='monday',
timerange=['07:00:00', '10:00:00'],
calendar_dates_lookup=None)
# + [markdown] slideshow={"slide_type": "slide"}
# **The UrbanAccess network object**
# + [markdown] slideshow={"slide_type": "fragment"}
# The output is a global `urbanaccess_network` object. This object holds the resulting graph comprised of nodes and edges for the processed GTFS network data for services operating at the day and time you specified inside of `transit_edges` and `transit_nodes`.
# + [markdown] slideshow={"slide_type": "fragment"}
# Let's set the global network object to a variable called `urbanaccess_net` that we can then inspect:
# + slideshow={"slide_type": "fragment"}
urbanaccess_net = ua.network.ua_network
# + slideshow={"slide_type": "fragment"}
urbanaccess_net.transit_edges.head()
# + slideshow={"slide_type": "fragment"}
urbanaccess_net.transit_nodes.head()
# + slideshow={"slide_type": "fragment"}
urbanaccess_net.transit_nodes.plot(kind='scatter', x='x', y='y', s=0.1)
# + [markdown] slideshow={"slide_type": "slide"}
# ## 3.4. Create a pedestrian network
# + [markdown] slideshow={"slide_type": "fragment"}
# ### 3.4.1. OSM Data (the walk network)
# + [markdown] slideshow={"slide_type": "fragment"}
# Now let's download OpenStreetMap (OSM) pedestrian street network data to produce a graph network of nodes and edges for Oakland, CA. We will use the same bounding box as before.
# + slideshow={"slide_type": "fragment"}
nodes, edges = ua.osm.load.ua_network_from_bbox(bbox=bbox,
remove_lcn=True)
# + [markdown] slideshow={"slide_type": "slide"}
# Now that we have our pedestrian network data let's create a travel time weighted graph from the pedestrian network we have loaded and add it to our existing UrbanAccess network object. We will assume a pedestrian travels on average at 3 mph.
#
# The resulting weighted network will be added to your UrbanAccess network object inside `osm_nodes` and `osm_edges`
# + slideshow={"slide_type": "fragment"}
ua.osm.network.create_osm_net(osm_edges=edges,
osm_nodes=nodes,
travel_speed_mph=3)
# + [markdown] slideshow={"slide_type": "slide"}
# Let's inspect the results which we can access inside of the existing `urbanaccess_net` variable:
# + slideshow={"slide_type": "fragment"}
urbanaccess_net.osm_nodes.head()
# + slideshow={"slide_type": "fragment"}
urbanaccess_net.osm_edges.head()
# + slideshow={"slide_type": "fragment"}
urbanaccess_net.osm_nodes.plot(kind='scatter', x='x', y='y', s=0.1)
# + [markdown] slideshow={"slide_type": "slide"}
# ## 3.5. Create an integrated transit and pedestrian network
# + [markdown] slideshow={"slide_type": "fragment"}
# Now let's integrate the two networks together. The resulting graph will be added to your existing UrbanAccess network object. After running this step, your network will be ready to be used with Pandana.
#
# The resulting integrated network will be added to your UrbanAccess network object inside `net_nodes` and `net_edges`
# + slideshow={"slide_type": "fragment"}
ua.network.integrate_network(urbanaccess_network=urbanaccess_net,
headways=False)
# + [markdown] slideshow={"slide_type": "slide"}
# Let's inspect the results which we can access inside of the existing `urbanaccess_net` variable:
# + slideshow={"slide_type": "fragment"}
urbanaccess_net.net_nodes.head()
# + slideshow={"slide_type": "fragment"}
urbanaccess_net.net_edges.head()
# + slideshow={"slide_type": "fragment"}
urbanaccess_net.net_edges[urbanaccess_net.net_edges['net_type'] == 'transit'].head()
# + [markdown] slideshow={"slide_type": "slide"}
# ## 3.6 Reading/Writing from disk
# + [markdown] slideshow={"slide_type": "fragment"}
# You can save the final processed integrated network `net_nodes` and `net_edges` to disk inside of a HDF5 file. By default the file will be saved to the directory of this notebook in the folder `data`
# + slideshow={"slide_type": "fragment"}
ua.network.save_network(urbanaccess_network=urbanaccess_net,
filename='final_net.h5',
overwrite_key = True)
# + [markdown] slideshow={"slide_type": "fragment"}
# You can load an existing processed integrated network HDF5 file from disk into a UrbanAccess network object.
# + slideshow={"slide_type": "fragment"}
urbanaccess_net = ua.network.load_network(filename='final_net.h5')
# + [markdown] slideshow={"slide_type": "slide"}
# ## 3.7 Visualize the network
# + [markdown] slideshow={"slide_type": "fragment"}
# You can visualize the network you just created using basic UrbanAccess plot functions
# + [markdown] slideshow={"slide_type": "slide"}
# ### 3.7.1. Integrated network
# + slideshow={"slide_type": "fragment"}
ua.plot.plot_net(nodes=urbanaccess_net.net_nodes,
edges=urbanaccess_net.net_edges,
bbox=bbox,
fig_height=30, margin=0.02,
edge_color='#999999', edge_linewidth=1, edge_alpha=1,
node_color='black', node_size=1.1, node_alpha=1, node_edgecolor='none', node_zorder=3, nodes_only=False)
# + [markdown] slideshow={"slide_type": "slide"}
# ### 3.7.2. Integrated network by travel time
# + [markdown] slideshow={"slide_type": "fragment"}
# Use the `col_colors` function to color edges by travel time. In this case the darker red the higher the travel times.
# + [markdown] slideshow={"slide_type": "fragment"}
# Note the ability to see AC Transit's major bus arterial routes (in darker red) and transfer locations and BART rail network (rail stations are visible by the multiple bus connections at certain junctions in the network most visible in downtown Oakland at 19th, 12th Street, and Lake Merritt stations and Fruitvale and Coliseum stations) with the underlying pedestrian network. Downtown Oakland is located near the white cutout in the northeast middle section of the network which represents Lake Merritt.
# + slideshow={"slide_type": "fragment"}
edgecolor = ua.plot.col_colors(df=urbanaccess_net.net_edges, col='weight', cmap='gist_heat_r', num_bins=5)
ua.plot.plot_net(nodes=urbanaccess_net.net_nodes,
edges=urbanaccess_net.net_edges,
bbox=bbox,
fig_height=30, margin=0.02,
edge_color=edgecolor, edge_linewidth=1, edge_alpha=0.7,
node_color='black', node_size=0, node_alpha=1, node_edgecolor='none', node_zorder=3, nodes_only=False)
# + [markdown] slideshow={"slide_type": "slide"}
# Let's zoom in closer to downtown Oakland using a new smaller extent bbox. Note the bus routes on the major arterials and the BART routes from station to station.
# + slideshow={"slide_type": "fragment"}
edgecolor = ua.plot.col_colors(df=urbanaccess_net.net_edges, col='weight', cmap='gist_heat_r', num_bins=5)
ua.plot.plot_net(nodes=urbanaccess_net.net_nodes,
edges=urbanaccess_net.net_edges,
bbox=(-122.282295, 37.795, -122.258434, 37.816022),
fig_height=30, margin=0.02,
edge_color=edgecolor, edge_linewidth=1, edge_alpha=0.7,
node_color='black', node_size=0, node_alpha=1, node_edgecolor='none', node_zorder=3, nodes_only=False)
# + [markdown] slideshow={"slide_type": "slide"}
# ### 3.7.3. Transit network
# + [markdown] slideshow={"slide_type": "fragment"}
# You can also slice the network by network type
# + slideshow={"slide_type": "fragment"}
ua.plot.plot_net(nodes=urbanaccess_net.net_nodes,
edges=urbanaccess_net.net_edges[urbanaccess_net.net_edges['net_type']=='transit'],
bbox=None,
fig_height=30, margin=0.02,
edge_color='#999999', edge_linewidth=1, edge_alpha=1,
node_color='black', node_size=0, node_alpha=1, node_edgecolor='none', node_zorder=3, nodes_only=False)
# -
# ### 3.7.4. Pedestrian network
ua.plot.plot_net(nodes=urbanaccess_net.net_nodes,
edges=urbanaccess_net.net_edges[urbanaccess_net.net_edges['net_type']=='walk'],
bbox=None,
fig_height=30, margin=0.02,
edge_color='#999999', edge_linewidth=1, edge_alpha=1,
node_color='black', node_size=0, node_alpha=1, node_edgecolor='none', node_zorder=3, nodes_only=False)
# ### 3.7.5. Transit network: AC Transit Route 51A
# You can slice the network using any attribute in edges. In this case let's examine one route for AC Transit route 51A.
# Looking at what routes are in the network for 51A we see route id: `51A-141_ac_transit`
urbanaccess_net.net_edges['unique_route_id'].unique()
ua.plot.plot_net(nodes=urbanaccess_net.net_nodes,
edges=urbanaccess_net.net_edges[urbanaccess_net.net_edges['unique_route_id']=='51A-141_ac_transit'],
bbox=bbox,
fig_height=30, margin=0.02,
edge_color='#999999', edge_linewidth=1, edge_alpha=1,
node_color='black', node_size=0, node_alpha=1, node_edgecolor='none', node_zorder=3, nodes_only=False)
# ### 3.7.6. Transit network: BART network
# We can also slice the data by agency. In this case let's view all BART routes.
#
# Looking at what agencies are in the network for BART we see agency id: `bay_area_rapid_transit`
urbanaccess_net.net_edges['unique_agency_id'].unique()
ua.plot.plot_net(nodes=urbanaccess_net.net_nodes,
edges=urbanaccess_net.net_edges[urbanaccess_net.net_edges['unique_agency_id']=='bay_area_rapid_transit'],
bbox=bbox,
fig_height=30, margin=0.02,
edge_color='#999999', edge_linewidth=1, edge_alpha=1,
node_color='black', node_size=0, node_alpha=1, node_edgecolor='none', node_zorder=3, nodes_only=False)
# ## 3.8. Add average headways to network travel time
# ### 3.8.1. Calculate route stop level headways
# The network we have generated so far only contains pure travel times. UrbanAccess allows for the calculation of and addition of route stop level average headways to the network. This is used as a proxy for passenger wait times at stops and stations. The route stop level average headway are added to the pedestrian to transit connector edges.
# Let's calculate headways for the same AM Peak time period. Statistics on route stop level headways will be added to your GTFS transit data object inside of `headways`
ua.gtfs.headways.headways(gtfsfeeds_df=loaded_feeds,
headway_timerange=['07:00:00','10:00:00'])
loaded_feeds.headways.head()
# ### 3.8.2. Add the route stop level average headways to your integrated network
# Now that headways have been calculated and added to your GTFS transit feed object, you can use them to generate a new integrated network that incorporates the headways within the pedestrian to transit connector edge travel times.
ua.network.integrate_network(urbanaccess_network=urbanaccess_net,
headways=True,
urbanaccess_gtfsfeeds_df=loaded_feeds,
headway_statistic='mean')
# ### 3.8.3. Integrated network by travel time with average headways
edgecolor = ua.plot.col_colors(df=urbanaccess_net.net_edges, col='weight', cmap='gist_heat_r', num_bins=5)
ua.plot.plot_net(nodes=urbanaccess_net.net_nodes,
edges=urbanaccess_net.net_edges,
bbox=bbox,
fig_height=30, margin=0.02,
edge_color=edgecolor, edge_linewidth=1, edge_alpha=0.7,
node_color='black', node_size=0, node_alpha=1, node_edgecolor='none', node_zorder=3, nodes_only=False)
# ## 3.9. Using an UrbanAccess network with Pandana
# Pandana (Pandas Network Analysis) is a tool to compute network accessibility metrics.
#
# Now that we have an integrated transit and pedestrian network that has been formatted for use with Pandana, we can now use Pandana right away to compute accessibility metrics.
#
# There are a couple of things to remember about UrbanAccess and Pandana:
# - UrbanAccess generates by default a one way network. One way means there is an explicit edge for each direction in the edge table. Where applicable, it is important to set any Pandana `two_way` parameters to `False` (they are `True` by default) to indicate that the network is a one way network.
# - As of Pandana v0.3.0, `node ids` and `from` and `to` columns in your network must be integer type and not string. UrbanAccess automatically generates both string and integer types so use the `from_int` and `to_int` columns in edges and the index in nodes `id_int`.
# - UrbanAccess by default will generate edge weights that represent travel time in units of minutes.
#
# For more on Pandana see the:
#
# **Pandana repo:** https://github.com/UDST/pandana
#
# **Pandana documentation:** http://udst.github.io/pandana/
# ### 3.9.1. Load Census block data
# Let's load 2010 Census block data for the 9 county Bay Area. Note: These data have been processed from original Census and LEHD data.
#
# The data is located in the `demo` folder on the repo with this notebook.
blocks = pd.read_hdf('../data/bay_area_demo_data.h5','blocks')
# remove blocks that contain all water
blocks = blocks[blocks['square_meters_land'] != 0]
print('Total number of blocks: {:,}'.format(len(blocks)))
blocks.head()
# Let's subset the Census data to just be the bounding box for Oakland
lng_max, lat_min, lng_min, lat_max = bbox
outside_bbox = blocks.loc[~(((lng_max < blocks["x"]) & (blocks["x"] < lng_min)) & ((lat_min < blocks["y"]) & (blocks["y"] < lat_max)))]
blocks_subset = blocks.drop(outside_bbox.index)
print('Total number of subset blocks: {:,}'.format(len(blocks_subset)))
blocks_subset.plot(kind='scatter', x='x', y='y', s=0.1)
# ### 3.9.2. Initialize the Pandana network
# Let's initialize our Pandana network object using our transit and pedestrian network we created. Note: the `from_int` and `to_int` as well as the `twoway=False` denoting this is a explicit one way network.
s_time = time.time()
transit_ped_net = pdna.Network(urbanaccess_net.net_nodes["x"],
urbanaccess_net.net_nodes["y"],
urbanaccess_net.net_edges["from_int"],
urbanaccess_net.net_edges["to_int"],
urbanaccess_net.net_edges[["weight"]],
twoway=False)
print('Took {:,.2f} seconds'.format(time.time() - s_time))
# Now let's set our blocks on to the network
blocks_subset['node_id'] = transit_ped_net.get_node_ids(blocks_subset['x'], blocks_subset['y'])
# ### 3.9.3. Calculate cumulative accessibility
# Now let's compute an accessibility metric, in this case a cumulative accessibility metric. See Pandana for other metrics that can be calculated.
# Let's set the block variables we want to use as our accessibly metric on the Pandana network. In this case let's use `jobs`
transit_ped_net.set(blocks_subset.node_id, variable = blocks_subset.jobs, name='jobs')
# Now let's run an cumulative accessibility query using our network and the jobs variable for three different travel time thresholds: 15, 30, 45 minutes.
# Note: Depending on network size, radius threshold, computer processing power, and whether or not you are using multiple cores the compute process may take some time.
s_time = time.time()
jobs_45 = transit_ped_net.aggregate(45, type='sum', decay='linear', name='jobs')
jobs_30 = transit_ped_net.aggregate(30, type='sum', decay='linear', name='jobs')
jobs_15 = transit_ped_net.aggregate(15, type='sum', decay='linear', name='jobs')
print('Took {:,.2f} seconds'.format(time.time() - s_time))
# Quickly visualize the accessibility query results. As expected, note that a travel time of 15 minutes results in a lower number of jobs accessible at each network node.
print(jobs_45.head())
print(jobs_30.head())
print(jobs_15.head())
# ### 3.9.4. Jobs accessible within 15 minutes
# Note how the radius of the number of jobs accessible expands as the time threshold increases where high accessibility is indicated in dark red. You can easily see downtown Oakland has the highest accessibility due to a convergence of transit routes and because downtown is where the majority of jobs in the area are located. Other high accessibility areas are visible elsewhere directly adjacent to BART metro rail stations of West Oakland, Fruitvale, and Coliseum and AC Transit bus routes on the main arterial road corridors.
# +
s_time = time.time()
fig = plt.subplots(figsize=(20,20))
data_crs = ccrs.PlateCarree()
ax = plt.axes(projection=ccrs.epsg(26943))
ax.add_feature(cartopy.feature.GSHHSFeature(scale='full'), edgecolor='grey')
plt.scatter(transit_ped_net.nodes_df.x, transit_ped_net.nodes_df.y,
c=jobs_15, s=4, cmap='gist_heat_r', edgecolor='none', transform=data_crs)
cb = plt.colorbar()
print('Took {:,.2f} seconds'.format(time.time() - s_time))
# + [markdown] pycharm={"name": "#%% md\n"}
# ### 3.9.5. Jobs accessible within 30 minutes
# +
s_time = time.time()
fig = plt.subplots(figsize=(20,20))
data_crs = ccrs.PlateCarree()
ax = plt.axes(projection=ccrs.epsg(26943))
ax.add_feature(cartopy.feature.GSHHSFeature(scale='full'), edgecolor='grey')
plt.scatter(transit_ped_net.nodes_df.x, transit_ped_net.nodes_df.y,
c=jobs_30, s=4, cmap='gist_heat_r', edgecolor='none', transform=data_crs)
cb = plt.colorbar()
print('Took {:,.2f} seconds'.format(time.time() - s_time))
# -
# ### 3.9.6. Jobs accessible within 45 minutes
# +
s_time = time.time()
fig = plt.subplots(figsize=(20,20))
data_crs = ccrs.PlateCarree()
ax = plt.axes(projection=ccrs.epsg(26943))
ax.add_feature(cartopy.feature.GSHHSFeature(scale='full'), edgecolor='grey')
plt.scatter(transit_ped_net.nodes_df.x, transit_ped_net.nodes_df.y,
c=jobs_45, s=4, cmap='gist_heat_r', edgecolor='none', transform=data_crs)
cb = plt.colorbar()
print('Took {:,.2f} seconds'.format(time.time() - s_time))
# -
# # 4. Questions?
# # 5. For next time
# + [markdown] slideshow={"slide_type": "slide"}
# ### Readings
# 1. MAUP
# - https://sci-hubtw.hkvisa.net/10.1016/b978-008044910-4.00475-2
# - https://www.gislounge.com/modifiable-areal-unit-problem-gis/
# 2. How to lie with Maps
# - https://www.nytimes.com/interactive/2020/10/30/opinion/election-results-maps.html
# 3. Choropleth maps
# - https://geographicdata.science/book/notebooks/05_choropleth.html
# - https://www.axismaps.com/guide/choropleth
# 4. Dot Density Maps
# - https://www.axismaps.com/guide/dot-density
# - http://andrewgaidus.com/Dot_Density_County_Maps/
| notebooks/lecture_17_networks_contd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as pd
import pandas as pd
import matplotlib.pyplot as plt
from pyearth import Earth
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from sklearn.model_selection import KFold
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import mean_squared_error
# -
cowpea = pd.read_excel('../data/prepared.xlsx', sheet_name='Cowpea') ; cowpea['crop'] = 'cowpea'
maize = pd.read_excel('../data/prepared.xlsx', sheet_name='Maize') ; maize['crop'] = 'maize'
rice = pd.read_excel('../data/prepared.xlsx', sheet_name='Rice') ; rice['crop'] = 'rice'
chickpea = pd.read_excel('../data/prepared.xlsx', sheet_name='Chickpea'); chickpea['crop'] = 'chickpea'
mustard = pd.read_excel('../data/prepared.xlsx', sheet_name='Mustard') ; mustard['crop'] = 'mustard'
# data = pd.concat([cowpea, rice, maize, chickpea, mustard], axis=0).reset_index(drop=True)
data = pd.concat([cowpea, rice, maize, chickpea, mustard], axis=0).reset_index(drop=True)
# +
clf = LocalOutlierFactor(n_neighbors=20)
new_data = []
org_cols = data.columns
for i, outlier_label in enumerate(clf.fit_predict(data[['GSR', 'CT']])):
if outlier_label==1:
new_data.append(data.iloc[i,:])
data = pd.DataFrame(new_data, columns=org_cols)
# -
data = data[(data['GSR']<300) | (data['Rn']>150)]
data = data[(data['Rn']<500) | (data['crop']!='cowpea')]
data = data[(data['Rn']<400) | (data['GSR']>500) | (data['crop']!='rice')]
data = data[(data['Rn']<300) | (data['GSR']>375)]
data.loc[:, 'Time'] = data.loc[:, 'Time'].apply(lambda x: x.hour)
data.loc[:, 'timesin'] = np.sin(data.loc[:, 'Time'] * (2 * np.pi) / 12)
data.loc[:, 'timecos'] = np.cos(data.loc[:, 'Time'] * (2 * np.pi) / 12)
df = pd.get_dummies(data[['crop']])
data = pd.concat([df, data], axis=1)
scalerx = RobustScaler()
scalery = RobustScaler()
data[['GSR','CT',]] = scalerx.fit_transform(data[['GSR','CT']])
data[['Rn']] = scalery.fit_transform(data[['Rn']])
feature_cols = [c for c in data.columns if c not in ['ST_5cm','ST_10cm','ST_15cm','Date','Time','crop','Rn']]
X = data[feature_cols]
y = data['Rn']
#Fit an Earth model
model = Earth(max_degree=5, minspan_alpha=0.01, smooth=True, thresh=0., feature_importance_type='gcv')
model.fit(X,y)
#Print the model
print(model.trace())
print(model.summary())
model.feature_importances_
kfold = KFold(n_splits=10)
# +
all_stacked_mses = []
all_stacked_rmses = []
for (t_, v_) in kfold.split(X, y):
model.fit(X.iloc[t_], y.iloc[t_])
y_pred = scalery.inverse_transform(model.predict(X.iloc[v_][feature_cols]).reshape(-1,1))
y_true = scalery.inverse_transform(y.iloc[v_].values.reshape(-1,1))
mse = mean_squared_error(y_true, y_pred)
rmse = np.sqrt(mse)
all_stacked_mses.append(mse)
all_stacked_rmses.append(rmse)
# print("Regression spline MSE:", mse)
# print("Regression spline RMSE:", rmse)
# print("\n")
print("Regression spline mean MSE:", np.mean(all_stacked_mses))
print("Regression spline mean RMSE:", np.mean(all_stacked_rmses))
# +
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
reg_tree1 = RandomForestRegressor()
reg_tree2 = AdaBoostRegressor()
# +
all_stacked_mses = []
all_stacked_rmses = []
for (t_, v_) in kfold.split(X, y):
reg_tree1.fit(X.iloc[t_], y.iloc[t_])
y_pred = scalery.inverse_transform(reg_tree1.predict(X.iloc[v_][feature_cols]).reshape(-1,1))
y_true = scalery.inverse_transform(y.iloc[v_].values.reshape(-1,1))
mse = mean_squared_error(y_true, y_pred)
rmse = np.sqrt(mse)
all_stacked_mses.append(mse)
all_stacked_rmses.append(rmse)
# print("RF MSE:", mse)
# print("RF RMSE:", rmse)
# print("\n")
print("RF mean MSE:", np.mean(all_stacked_mses))
print("RF mean RMSE:", np.mean(all_stacked_rmses))
# +
#Plot the model
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(30,10))
y_hat = model.predict(X)
axes[0].plot(X.iloc[:,5],y,'r.')
axes[0].plot(X.iloc[:,5],y_hat,'b.')
axes[0].set_xlabel('x_6')
axes[0].set_ylabel('y')
axes[0].set_title('Regression Spline')
y_hat_tree = reg_tree1.predict(X)
axes[1].plot(X.iloc[:,5],y,'r.')
axes[1].plot(X.iloc[:,5],y_hat_tree,'b.')
axes[1].set_xlabel('x_6')
axes[1].set_ylabel('y')
axes[1].set_title('Random Forest')
plt.show()
# +
all_stacked_mses = []
all_stacked_rmses = []
for (t_, v_) in kfold.split(X, y):
model.fit(X.iloc[t_], y.iloc[t_])
reg_tree1.fit(X.iloc[t_], y.iloc[t_])
reg_tree2.fit(X.iloc[t_], y.iloc[t_])
y_pred1 = scalery.inverse_transform(reg_tree1.predict(X.iloc[v_][feature_cols]).reshape(-1,1))
y_pred2 = scalery.inverse_transform(model.predict(X.iloc[v_][feature_cols]).reshape(-1,1))
y_pred3 = scalery.inverse_transform(reg_tree2.predict(X.iloc[v_][feature_cols]).reshape(-1,1))
y_pred = (y_pred1+y_pred2*0.5+y_pred3*0.5)/2
y_true = scalery.inverse_transform(y.iloc[v_].values.reshape(-1,1))
mse = mean_squared_error(y_true, y_pred)
rmse = np.sqrt(mse)
all_stacked_mses.append(mse)
all_stacked_rmses.append(rmse)
print("Stacked estimator MSE:", mse)
print("Stacked estimator RMSE:", rmse)
print("\n")
print("Stacked estimator mean MSE:", np.mean(all_stacked_mses))
print("Stacked estimator mean RMSE:", np.mean(all_stacked_rmses))
# -
#Plot the model
y_pred_combined = (reg_tree1.predict(X) + model.predict(X)*2.)/3.
plt.figure()
plt.plot(X.iloc[:,5],y,'r.')
plt.plot(X.iloc[:,5],y_pred_combined,'b.')
plt.xlabel('x_6')
plt.ylabel('y')
plt.title('Simple Earth Example')
plt.show()
| notebooks/fifth_wave_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Importing required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pickle
import re
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from imblearn.over_sampling import SMOTE
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.metrics import plot_confusion_matrix,accuracy_score,confusion_matrix
from skopt import BayesSearchCV
from IPython.display import display
# ## Reading Dataset
df=pd.read_csv("bbc-text.csv")
df
# ## Data preprocessing
# #### By using Label encoder encoding Output feature
encoder=LabelEncoder()
df["category_labels"]=encoder.fit_transform(df["category"])
df
encoder.classes_
# #### Removing stopwords, unwanted spaces and characters from data
def create_corpus(sent):
sent = re.sub("[^a-zA-z]"," ",sent)
sent = sent.lower()
sent = sent.split()
sent = [le.lemmatize(word) for word in sent if not word in stopwords.words("english")]
sent = " ".join(sent)
return(sent)
# %%time
le=WordNetLemmatizer()
corpus = []
for i in range(len(df)):
s=create_corpus(df["text"][i])
corpus.append(s)
# #### Converting text data into numerical data using TF-IDF vectorizer
# %%time
tfidf = TfidfVectorizer(max_features=5000,ngram_range=(1,3))
X = tfidf.fit_transform(corpus).toarray()
y = df["category_labels"]
print(X.shape)
print(y.shape)
# #### Handling inbalanced classes by doing upsampling using SMOTE(Synthetic Minority Over-sampling TEchnique)
df["category"].value_counts()
sm = SMOTE(random_state=753)
X_sampled, y_sampled = sm.fit_resample(X, y)
y_sampled.value_counts()
print(X_sampled.shape)
print(y_sampled.shape)
# ## Spliting Dataset into train and test data
X_train, X_test, y_train, y_test = train_test_split(X_sampled, y_sampled, test_size=0.20, random_state=951)
# ## Model creation
def create_model(model):
model.fit(X_train,y_train)
print("Training Accuracy: ",model.score(X_train,y_train)*100)
pred = model.predict(X_test)
print(f"Testing Accuracy: {accuracy_score(y_test,pred)*100}")
print("Confusion matrix of Testing dataset")
plot_confusion_matrix(model,X_test,y_test,cmap="Blues",xticks_rotation=60)
# #### K-Nearest Neighbors
knn=KNeighborsClassifier()
create_model(knn)
# #### Multinomial Naive Bayes
mnb=MultinomialNB()
create_model(mnb)
# #### Decision tree
dt=DecisionTreeClassifier()
create_model(dt)
# #### Random forest
rf=RandomForestClassifier()
create_model(rf)
# #### SVC (Support Vector Classification)
svc=SVC()
create_model(svc)
# #### Hyperparameter optimization using Bayesian algoritham with cross-validation on Multinomial Naive Bayes
# +
# %%time
opt_mnb = BayesSearchCV(
MultinomialNB(),
{
'alpha': (0.01, 1.0),
},
n_iter=16,
cv=3
)
opt_mnb.fit(X_train, y_train)
print("Best validation score: %s" % opt_mnb.best_score_)
print("testdata score: %s" % opt_mnb.score(X_test, y_test))
# -
# #### Doing prediction on a example
def make_prediction():
y_pred = [input()]
y_pred = tfidf.transform(y_pred)
print(f"output: {encoder.classes_[opt_mnb.best_estimator_.predict(y_pred)[0]]}\n\n")
result=pd.DataFrame(opt_mnb.best_estimator_.predict_proba(y_pred)[0].round(2), encoder.classes_,
columns=["Probability"]).sort_values(by="Probability",ascending=False)
display(result)
make_prediction()
# Saving final model and vectorizer in .pkl file for further use and we dont have to train model again
pickle.dump(opt_mnb, open("model.pkl", "wb"))
pickle.dump(tfidf, open("vectorizer.pkl", "wb"))
| backend/model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.lib.deepreload import reload
# %load_ext autoreload
# %autoreload 2
# +
import re
import operator
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pydicom
from pymedphys_analysis.tpscompare import load_and_normalise_mephysto
from pymedphys_dicom.dicom import depth_dose, profile
# +
ROOT_DIR = Path(r"S:\Physics\Monaco\Model vs Measurement Comparisons")
MONACO_DICOM_DIR = ROOT_DIR.joinpath(r"Beam Models\CCA Monaco Collapsed Cone")
DOSECHECK_DICOM_DIR = ROOT_DIR.joinpath(r"Beam Models\DoseCHECK")
MEASUREMENTS_DIR = ROOT_DIR.joinpath(r"Measurements\RCCC\Photons")
RESULTS = ROOT_DIR.joinpath(r"Results\RCCC\dosecheck\Collapsed Cone")
MONACO_DOSE_DIR = MONACO_DICOM_DIR.joinpath("DICOM dose exports")
# -
calibrated_doses_table = pd.read_csv(MEASUREMENTS_DIR.joinpath('AbsoluteDose.csv'), index_col=0)
calibrated_doses = calibrated_doses_table['d10 @ 90 SSD']
calibrated_doses
wedge_transmission_table = pd.read_csv(MEASUREMENTS_DIR.joinpath('WedgeTransmissionFactors.csv'), index_col=0)
data_column_name = wedge_transmission_table.columns[0]
wedge_transmissions = wedge_transmission_table[data_column_name]
wedge_transmissions
output_factors = pd.read_csv(MEASUREMENTS_DIR.joinpath('OutputFactors.csv'), index_col=0)
output_factors
# +
keys = [
path.stem
for path in MONACO_DOSE_DIR.glob('*.dcm')
]
keys
# +
regex_string = r'(\d\dMV(\bFFF\b)?) (\d\dx\d\d) ((\bOpen\b)|(\bWedge\b))'
def get_energy_field_block(key):
match = re.match(regex_string, key)
return match.group(1), match.group(3), match.group(4)
# +
absolute_doses = {}
for key in keys:
energy, field, block = get_energy_field_block(key)
if block == 'Wedge':
wtf = wedge_transmissions[energy]
else:
wtf = 1
output_factor = output_factors[f'{field} {block}'][energy]
calibrated_dose = calibrated_doses[energy]
absolute_dose = calibrated_dose * output_factor * wtf
absolute_doses[key] = absolute_dose
absolute_doses
# -
getter = operator.itemgetter('displacement', 'dose')
absolute_scans_per_field = load_and_normalise_mephysto(
MEASUREMENTS_DIR, r'(\d\dMV(\bFFF\b)? \d\dx\d\d ((\bOpen\b)|(\bWedge\b)))\.mcc', absolute_doses, 100)
new_keys = list(absolute_scans_per_field.keys())
new_keys
assert new_keys == keys
def load_dicom_files(directory, keys):
dicom_file_map = {
key: directory.joinpath(f'{key}.dcm')
for key in keys
}
dicom_dataset_map = {
key: pydicom.read_file(str(dicom_file_map[key]), force=True)
for key in keys
}
return dicom_dataset_map
monaco_dicom_dataset_map = load_dicom_files(MONACO_DOSE_DIR, keys)
dosecheck_dicom_dataset_map = load_dicom_files(DOSECHECK_DICOM_DIR, keys)
dicom_plan = pydicom.read_file(str(MONACO_DICOM_DIR.joinpath('plan.dcm')), force=True)
# +
def plot_one_axis(ax, displacement, meas_dose, model_dose):
diff = 100 * (model_dose - meas_dose) / meas_dose
lines = []
lines += ax.plot(displacement, meas_dose, label='Measured Dose')
lines += ax.plot(displacement, model_dose, label='Model Dose')
ax.set_ylabel('Dose (Gy / 100 MU)')
x_bounds = [np.min(displacement), np.max(displacement)]
ax.set_xlim(x_bounds)
ax_twin = ax.twinx()
lines += ax_twin.plot(displacement, diff, color='C3', alpha=0.5, label=r'% Residuals [100 $\times$ (Model - Meas) / Meas]')
ax_twin.plot(x_bounds, [0, 0], '--', color='C3', lw=0.5)
ax_twin.set_ylabel(r'% Dose difference [100 $\times$ (Model - Meas) / Meas]')
labels = [l.get_label() for l in lines]
ax.legend(lines, labels, loc='lower left')
return ax_twin
def plot_tps_meas_diff(displacement, meas_dose, internal_tps_dose, external_tps_dose):
fig, ax = plt.subplots(1, 2, figsize=(16,6), sharey=True)
ax[1].yaxis.set_tick_params(which='both', labelbottom=True)
ax_twin = list()
ax_twin.append(plot_one_axis(ax[0], displacement, meas_dose, internal_tps_dose))
ax_twin.append(plot_one_axis(ax[1], displacement, meas_dose, external_tps_dose))
ax_twin[1].get_shared_y_axes().join(ax_twin[1], ax_twin[0])
ax_twin[1].set_ylim([-5, 5])
plt.tight_layout()
plt.subplots_adjust(wspace=0.4, top=0.86)
return fig, ax
def plot_pdd_diff(key, dicom_plan):
depth, meas_dose = getter(absolute_scans_per_field[key]['depth_dose'])
internal_tps_dose = depth_dose(depth, monaco_dicom_dataset_map[key], dicom_plan) / 10
external_tps_dose = depth_dose(depth, dosecheck_dicom_dataset_map[key], dicom_plan) / 10
fig, ax = plot_tps_meas_diff(depth, meas_dose, internal_tps_dose, external_tps_dose)
fig.suptitle(f'Depth Dose Comparisons | {key}', fontsize="x-large")
ax[0].set_title("Monaco Collapsed Cone")
ax[1].set_title("DoseCHECK")
for key in keys:
plot_pdd_diff(key, dicom_plan)
filename = RESULTS.joinpath(f'{key}_pdd.png')
plt.savefig(filename)
plt.show()
# +
def plot_profile_diff(key, depth, direction):
displacement, meas_dose = getter(absolute_scans_per_field[key]['profiles'][depth][direction])
internal_tps_dose = profile(displacement, depth, direction, monaco_dicom_dataset_map[key], dicom_plan) / 10
external_tps_dose = profile(displacement, depth, direction, dosecheck_dicom_dataset_map[key], dicom_plan) / 10
fig, ax = plot_tps_meas_diff(displacement, meas_dose, internal_tps_dose, external_tps_dose)
fig.suptitle(f'{direction.capitalize()} Profile Comparisons | {key} | Depth: {depth} mm', fontsize="x-large")
ax[0].set_title("Monaco Collapsed Cone")
ax[1].set_title("DoseCHECK")
for key in keys:
depths = absolute_scans_per_field[key]['profiles'].keys()
for depth in depths:
for direction in ['inplane', 'crossplane']:
plot_profile_diff(key, depth, direction)
filename = RESULTS.joinpath(f'{key}_profile_{depth}mm_{direction}.png')
plt.savefig(filename)
plt.show()
# -
| examples/archive/tpscompare/DoseCHECK/CollapsedCone.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import ipyvolume as ipv
# We will render a low resolution scan of a head, which will display quite quickly (since the data size is small). If we want to see a higher resolution, we can zoom in.
#
fig = ipv.figure()
vol_head = ipv.examples.head(max_shape=128);
vol_head.ray_steps = 800
# # Zoom
# Zoom in by clicking the magnifying icon, or keep the alt/option key pressed. After zooming in, the higher resolution verion cutout will be displayed.
ds = ipv.datasets.aquariusA2.fetch().data
# # Multivolume rendering
# Since version 0.5, ipyvolume supports multivolume rendering, so we can render two volumetric datasets at the same time.
vol_data = ipv.volshow(ds, extent=vol_head.extent, max_shape=128)
# v0.5 also supports maximum intensity
vol_data.rendering_method = 'MAX_INTENSITY'
| notebooks/demo-0.5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["remove-input", "active-ipynb", "remove-output"]
# try:
# from openmdao.utils.notebook_utils import notebook_mode
# except ImportError:
# !python -m pip install openmdao[notebooks]
# -
# # How OpenMDAO Represents Variables
#
# In general, a numerical model can be complex, multidisciplinary, and heterogeneous.
# It can be decomposed into a series of smaller computations that are chained together by passing variables from one to the next.
#
# In OpenMDAO, we perform all these numerical calculations inside a [Component](../../features/core_features/working_with_components/main), which represents the smallest unit of computational work the framework understands. Each component will output its own set of variables. Depending on which type of calculation you're trying to represent, OpenMDAO provides different kinds of components for you to work with.
#
# ## A Simple Numerical Model
#
# In order to understand the different kinds of components in OpenMDAO,
# let us consider the following numerical model that takes `x` as an input:
#
# $$
# \begin{array}{l l}
# y \quad \text{is computed by solving:} &
# \cos(x \cdot y) - z \cdot y = 0 \\
# z \quad \text{is computed by evaluating:} &
# z = \sin(y) .
# \end{array}
# $$
#
# ## The Three Types of Components
#
# In our numerical model, we have three variables: `x`, `y`, and `z`. Each of these variables needs to be defined as the output of a component. There are three basic types of components in OpenMDAO:
#
#
# 1. [IndepVarComp](../../features/core_features/working_with_components/indepvarcomp) : defines independent variables (e.g., x)
# 2. [ExplicitComponent](../../features/core_features/working_with_components/explicit_component) : defines dependent variables that are computed explicitly (e.g., z)
# 3. [ImplicitComponent](../../features/core_features/working_with_components/implicit_component) : defines dependent variables that are computed implicitly (e.g., y)
#
#
# The most straightforward way to implement the numerical model would be to assign each variable its own component, as below.
#
# | No. | Component Type | Inputs | Outputs |
# |-----|-------------------|--------|---------|
# | 1 | IndepVarComp | | x |
# | 2 | ImplicitComponent | x, z | y |
# | 3 | ExplicitComponent | y | z |
#
#
# Another way that is also valid would be to have one component compute both y and z explicitly,
# which would mean that this component solves the implicit equation for y internally.
#
# | No. | Component Type | Inputs | Outputs |
# |-----|-------------------|--------|---------|
# | 1 | IndepVarComp | | x |
# | 2 | ExplicitComponent | x | y, z |
#
# Both ways would be valid, but the first way is recommended.
# The second way requires the user to solve y and z together, and computing the derivatives of y and z with respect to x is non-trivial. The first way would also require implicitly solving for y, but an OpenMDAO solver could converge that for you. Moreover, for the first way, OpenMDAO would automatically combine and assemble the derivatives from components 2 and 3.
| openmdao/docs/openmdao_book/basic_user_guide/single_disciplinary_optimization/component_types.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Machine Learning Engineer Nanodegree
# ## Unsupervised Learning
# ## Project: Creating Customer Segments
# Welcome to the third project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `'TODO'` statement. Please be sure to read the instructions carefully!
#
# In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
#
# >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
# ## Getting Started
#
# In this project, you will analyze a dataset containing data on various customers' annual spending amounts (reported in *monetary units*) of diverse product categories for internal structure. One goal of this project is to best describe the variation in the different types of customers that a wholesale distributor interacts with. Doing so would equip the distributor with insight into how to best structure their delivery service to meet the needs of each customer.
#
# The dataset for this project can be found on the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers). For the purposes of this project, the features `'Channel'` and `'Region'` will be excluded in the analysis — with focus instead on the six product categories recorded for customers.
#
# Run the code block below to load the wholesale customers dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.
# +
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from IPython.display import display # Allows the use of display() for DataFrames
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
# %matplotlib inline
# Load the wholesale customers dataset
try:
data = pd.read_csv("customers.csv")
# Removing features Region and Channel
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
# -
# ## Data Exploration
# In this section, you will begin exploring the data through visualizations and code to understand how each feature is related to the others. You will observe a statistical description of the dataset, consider the relevance of each feature, and select a few sample data points from the dataset which you will track through the course of this project.
#
# Run the code block below to observe a statistical description of the dataset. Note that the dataset is composed of six important product categories: **'Fresh'**, **'Milk'**, **'Grocery'**, **'Frozen'**, **'Detergents_Paper'**, and **'Delicatessen'**. Consider what each category represents in terms of products you could purchase.
# Display a description of the dataset
display(data.describe())
# ### Implementation: Selecting Samples
# To get a better understanding of the customers and how their data will transform through the analysis, it would be best to select a few sample data points and explore them in more detail. In the code block below, add **three** indices of your choice to the `indices` list which will represent the customers to track. It is suggested to try different sets of samples until you obtain customers that vary significantly from one another.
# +
# TODO: Select three indices of your choice you wish to sample from the dataset
indices = [10, 131, 299]
# Create a DataFrame of the chosen samples
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
print "Comparing product to the mean"
display(samples - np.around(data.mean()))
print "Comparing product to the median"
display(samples - np.around(data.median()))
print "Quantiles"
display(samples.quantile([.1, .5]))
# -
# ### Question 1
# Consider the total purchase cost of each product category and the statistical description of the dataset above for your sample customers.
#
# * What kind of establishment (customer) could each of the three samples you've chosen represent?
#
# **Hint:** Examples of establishments include places like markets, cafes, delis, wholesale retailers, among many others. Avoid using names for establishments, such as saying *"McDonalds"* when describing a sample customer as a restaurant. You can use the mean values for reference to compare your samples with. The mean values are as follows:
#
# * Fresh: 12000.2977
# * Milk: 5796.2
# * Grocery: 3071.9
# * Detergents_paper: 2881.4
# * Delicatessen: 1524.8
#
# Knowing this, how do your samples compare? Does that help in driving your insight into what kind of establishments they might be?
#
# **Answer:**
#
# 1 - SuperMarket
# * Fresh: 3366
# * Milk: 5403
# * Grocery: 12974
# * Frozen: 4400
# * Detergents_paper: 5977
# * Delicatessen: 1744
# <br/>
# For the great amount of products, I believe it is a superMarket
#
# 2 - Restaurant
# * Fresh: 2101
# * Milk: 589
# * Grocery: 314
# * Frozen: 346
# * Detergents_paper: 70
# * Delicatessen: 310
# <br/>
# Based on the large amount of fresh produce it is defined as a restaurant and lower costs of buying Frozens, Detergent and other "Grocery" items.
#
# 3 - Grocery
# * Fresh: 444
# * Milk: 879
# * Grocery: 2060
# * Frozen: 264
# * Detergents_paper: 290
# * Delicatessen: 259
# <br/>
# Being the biggest value Grocery and the others with smaller values I believe it is a grocery store
#
# ### Implementation: Feature Relevance
# One interesting thought to consider is if one (or more) of the six product categories is actually relevant for understanding customer purchasing. That is to say, is it possible to determine whether customers purchasing some amount of one category of products will necessarily purchase some proportional amount of another category of products? We can make this determination quite easily by training a supervised regression learner on a subset of the data with one feature removed, and then score how well that model can predict the removed feature.
#
# In the code block below, you will need to implement the following:
# - Assign `new_data` a copy of the data by removing a feature of your choice using the `DataFrame.drop` function.
# - Use `sklearn.cross_validation.train_test_split` to split the dataset into training and testing sets.
# - Use the removed feature as your target label. Set a `test_size` of `0.25` and set a `random_state`.
# - Import a decision tree regressor, set a `random_state`, and fit the learner to the training data.
# - Report the prediction score of the testing set using the regressor's `score` function.
# +
# TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature
from sklearn.cross_validation import ShuffleSplit, train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import r2_score
import sklearn.learning_curve as curves
from sklearn.model_selection import cross_val_score
Grocery_data = data['Grocery'] # target - y_all
# create new data without column Grocery
new_data = data.drop(['Grocery'], axis = 1) # features - X_all
# TODO: Split the data into training and testing sets(0.25) using the given feature as the target
# Set a random state.
X_train, X_test, y_train, y_test = train_test_split(new_data, Grocery_data,
test_size=0.25, random_state=42)
# TODO: Create a decision tree regressor and fit it to the training set
regressor = DecisionTreeRegressor(random_state=42)
regressor.fit(X_train, y_train)
# Yielding indices to split data into training and test sets.
cv = ShuffleSplit(X_train.shape[0], n_iter=10, test_size=0.25, random_state=42)
scoreTest = cross_val_score(regressor, X_train, y_train, cv=cv)
# TODO: Report the score of the prediction using the testing set
# using the split data test X_test, y_test
print regressor.score(X_test, y_test)
# score Fresh -> -0.385749710204
# score Milk -> 0.156275395017
# score Grocery -> 0.681884008544
# score Frozen -> -0.210135890125
# score Detergents_Paper -> 0.271666980627
# score Delicatessen -> -2.2547115372
# -
# ### Question 2
#
# * Which feature did you attempt to predict?
# * What was the reported prediction score?
# * Is this feature necessary for identifying customers' spending habits?
#
# **Hint:** The coefficient of determination, `R^2`, is scored between 0 and 1, with 1 being a perfect fit. A negative `R^2` implies the model fails to fit the data. If you get a low score for a particular feature, that lends us to beleive that that feature point is hard to predict using the other features, thereby making it an important feature to consider when considering relevance.
# **Answer:**
#
# Selected feature: "Grocery"<br/>
# Score: "0.681884008544"<br/>
# Yes, through supervised regression training on a subset of the data removed, I chose a "Grocery" feature because it was one of the features with the best score compared to the others.
#
# As a prediction and 0.68 accreditation that is not necessary to identify consumption habits, we can use as other characteristics such as Milk, Fresh, Delicatessen, Detergents and Supermarket are less necessary to identify consumption habits as they are easier to Foresee from the balance of the data.
#
# ### Visualize Feature Distributions
# To get a better understanding of the dataset, we can construct a scatter matrix of each of the six product features present in the data. If you found that the feature you attempted to predict above is relevant for identifying a specific customer, then the scatter matrix below may not show any correlation between that feature and the others. Conversely, if you believe that feature is not relevant for identifying a specific customer, the scatter matrix might show a correlation between that feature and another feature in the data. Run the code block below to produce a scatter matrix.
# Produce a scatter matrix for each pair of features in the data
axes = pd.plotting.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde')
corr = data.corr().as_matrix()
for i, j in zip(*np.triu_indices_from(axes, k=1)):
axes[i, j].annotate("%.3f" %corr[i,j], (0.8, 0.8), xycoords='axes fraction', ha='center', va='center')
# ### Question 3
# * Using the scatter matrix as a reference, discuss the distribution of the dataset, specifically talk about the normality, outliers, large number of data points near 0 among others. If you need to sepearate out some of the plots individually to further accentuate your point, you may do so as well.
# * Are there any pairs of features which exhibit some degree of correlation?
# * Does this confirm or deny your suspicions about the relevance of the feature you attempted to predict?
# * How is the data for those features distributed?
#
# **Hint:** Is the data normally distributed? Where do most of the data points lie? You can use [corr()](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.corr.html) to get the feature correlations and then visualize them using a [heatmap](http://seaborn.pydata.org/generated/seaborn.heatmap.html)(the data that would be fed into the heatmap would be the correlation values, for eg: `data.corr()`) to gain further insight.
# +
import numpy as np
import seaborn as sns
sns.set()
ax = sns.heatmap(data.corr(), annot=True)
# -
# **Answer:**
#
# All categories have a large amount of data close to 0, and the ones that correlate best are Grocery, Detergents_Paper and Milk. This confirms that we can remove "Grocery or Detergents_Paper" and the others could reasonably be predicted.
# The data points aren't normally distributed
#
# ## Data Preprocessing
# In this section, you will preprocess the data to create a better representation of customers by performing a scaling on the data and detecting (and optionally removing) outliers. Preprocessing data is often times a critical step in assuring that results you obtain from your analysis are significant and meaningful.
# ### Implementation: Feature Scaling
# If data is not normally distributed, especially if the mean and median vary significantly (indicating a large skew), it is most [often appropriate](http://econbrowser.com/archives/2014/02/use-of-logarithms-in-economics) to apply a non-linear scaling — particularly for financial data. One way to achieve this scaling is by using a [Box-Cox test](http://scipy.github.io/devdocs/generated/scipy.stats.boxcox.html), which calculates the best power transformation of the data that reduces skewness. A simpler approach which can work in most cases would be applying the natural logarithm.
#
# In the code block below, you will need to implement the following:
# - Assign a copy of the data to `log_data` after applying logarithmic scaling. Use the `np.log` function for this.
# - Assign a copy of the sample data to `log_samples` after applying logarithmic scaling. Again, use `np.log`.
# +
# TODO: Scale the data using the natural logarithm
log_data = data.apply(np.log)
# TODO: Scale the sample data using the natural logarithm
log_samples = samples.apply(np.log)
# Produce a scatter matrix for each pair of newly-transformed features
pd.plotting.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
# -
# ### Observation
# After applying a natural logarithm scaling to the data, the distribution of each feature should appear much more normal. For any pairs of features you may have identified earlier as being correlated, observe here whether that correlation is still present (and whether it is now stronger or weaker than before).
#
# Run the code below to see how the sample data has changed after having the natural logarithm applied to it.
# Display the log-transformed sample data
display(log_samples)
# ### Implementation: Outlier Detection
# Detecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many "rules of thumb" for what constitutes an outlier in a dataset. Here, we will use [Tukey's Method for identfying outliers](http://datapigtechnologies.com/blog/index.php/highlighting-outliers-in-your-data-with-the-tukey-method/): An *outlier step* is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal.
#
# In the code block below, you will need to implement the following:
# - Assign the value of the 25th percentile for the given feature to `Q1`. Use `np.percentile` for this.
# - Assign the value of the 75th percentile for the given feature to `Q3`. Again, use `np.percentile`.
# - Assign the calculation of an outlier step for the given feature to `step`.
# - Optionally remove data points from the dataset by adding indices to the `outliers` list.
#
# **NOTE:** If you choose to remove any outliers, ensure that the sample data does not contain any of these points!
# Once you have performed this implementation, the dataset will be stored in the variable `good_data`.
# +
count = 0
outlierMap = []
outlierRemove = []
# For each feature find the data points with extreme high or low values
for feature in log_data.keys():
# TODO: Calculate Q1 (25th percentile of the data) for the given feature
# Q1 = np.percentile(log_data[feature], 25)
# TODO: Calculate Q3 (75th percentile of the data) for the given feature
# Q3 = np.percentile(log_data[feature], 75)
Q3, Q1 = np.percentile(log_data[feature], [75 ,25])
# TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)
step = (Q3 - Q1) * 1.5
# Display the outliers
print "Block:", count,"- Data points considered outliers for the feature '{}':".format(feature)
outlier = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]
display(outlier)
count += 1
# Get outliers for remove
indice = outlier.index.values.astype('int64')
for word in indice:
if word not in outlierMap: # faster than `word not in output`
outlierMap.append(word)
else:
outlierRemove.append(word)
# OPTIONAL: Select the indices for data points you wish to remove
outliers = outlierRemove
# Remove the outliers, if any were specified
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
# -
# ### Question 4
# * Are there any data points considered outliers for more than one feature based on the definition above?
# * Should these data points be removed from the dataset?
# * If any data points were added to the `outliers` list to be removed, explain why.
#
# ** Hint: ** If you have datapoints that are outliers in multiple categories think about why that may be and if they warrant removal. Also note how k-means is affected by outliers and whether or not this plays a factor in your analysis of whether or not to remove them.
# **Answer:**
# Yes<br/>
# Yes<br/>
# Because they represent uncommon points.
#
# 65 appears for block 3 and 0<br/>
# 66 appears for block 0 and 5<br/>
# 75 appears for block 2 and 4<br/>
# 128 appears for block 0 and 5<br/>
# 154 appears for block 1, 5 and 2<br/>
#
#
# Because data can be influenced with the removal of outliers, both to push the data to the desired results that is the dangerous part and where in fact there are data with problems that must be analyzed for its removal, anyway as an article suggests
# https://www.graphpad.com/guides/prism/6/statistics/index.htm?stat_checklist_identifying_outliers.htm
#
# It is always good to create policies on how to work with these types of sporadic data, as well as other techniques that help define an outlier and which can be seen below in question format!
# Has the outlier value been entered incorrectly on the computer?
# Is outlier value scientifically impossible?
#
# They are questions that many times are answered in just looking at the data we identify that hears a wrong inclusion of the data, diagramming, an inconsistency in the information.
# ## Feature Transformation
# In this section you will use principal component analysis (PCA) to draw conclusions about the underlying structure of the wholesale customer data. Since using PCA on a dataset calculates the dimensions which best maximize variance, we will find which compound combinations of features best describe customers.
# ### Implementation: PCA
#
# Now that the data has been scaled to a more normal distribution and has had any necessary outliers removed, we can now apply PCA to the `good_data` to discover which dimensions about the data best maximize the variance of features involved. In addition to finding these dimensions, PCA will also report the *explained variance ratio* of each dimension — how much variance within the data is explained by that dimension alone. Note that a component (dimension) from PCA can be considered a new "feature" of the space, however it is a composition of the original features present in the data.
#
# In the code block below, you will need to implement the following:
# - Import `sklearn.decomposition.PCA` and assign the results of fitting PCA in six dimensions with `good_data` to `pca`.
# - Apply a PCA transformation of `log_samples` using `pca.transform`, and assign the results to `pca_samples`.
# +
from sklearn.decomposition import PCA
# TODO: Apply PCA by fitting the good data with the same number of dimensions as features
pca = PCA(n_components=6)
pca.fit(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Generate PCA results plot
pca_results = vs.pca_results(good_data, pca)
print ""
print pca_results['Explained Variance'].cumsum()
print ""
print np.cumsum(pca.explained_variance_ratio_)
# -
# ### Question 5
#
# * How much variance in the data is explained* **in total** *by the first and second principal component?
# * How much variance in the data is explained by the first four principal components?
# * Using the visualization provided above, talk about each dimension and the cumulative variance explained by each, stressing upon which features are well represented by each dimension(both in terms of positive and negative variance explained). Discuss what the first four dimensions best represent in terms of customer spending.
#
# **Hint:** A positive increase in a specific dimension corresponds with an *increase* of the *positive-weighted* features and a *decrease* of the *negative-weighted* features. The rate of increase or decrease is based on the individual feature weights.
# **Answer:**<br/>
#
# An interpretation of the major components is based on the discovery of which variables are most strongly correlated with each component, that is, large numbers in magnitude, or farther from zero in the positive or negative direction.
#
# 70.68% of the variance is explained in total by the first and second major components.<br/>
# 93.11% of the variance is explained in total by the first four components.<br/>
#
# Dimension 1<br/>
# Significant negative weight is placed in Detergents_Paper, Milk and Groceries.<br/>
# This could represent a convenience store.<br/>
#
# Dimension 2<br/>
# Significant negative weight is placed on fresh, frozen products and delicatessen.<br/>
# That could be a restaurant.</br/>
#
# Dimension 3<br/>
# Significant positive weight is put in fresh, delicatessen and with a significant negative weight in frozen products.<br/>
#
# Dimension 4<br/>
# Significant positive weight is put on frozen with a significant negative weight in delicatessen.<br/>
# </br>
# That could be a cafeteria.
# ### Observation
# Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it in six dimensions. Observe the numerical value for the first four dimensions of the sample points. Consider if this is consistent with your initial interpretation of the sample points.
# Display sample log-data after having a PCA transformation applied
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
# ### Implementation: Dimensionality Reduction
# When using principal component analysis, one of the main goals is to reduce the dimensionality of the data — in effect, reducing the complexity of the problem. Dimensionality reduction comes at a cost: Fewer dimensions used implies less of the total variance in the data is being explained. Because of this, the *cumulative explained variance ratio* is extremely important for knowing how many dimensions are necessary for the problem. Additionally, if a signifiant amount of variance is explained by only two or three dimensions, the reduced data can be visualized afterwards.
#
# In the code block below, you will need to implement the following:
# - Assign the results of fitting PCA in two dimensions with `good_data` to `pca`.
# - Apply a PCA transformation of `good_data` using `pca.transform`, and assign the results to `reduced_data`.
# - Apply a PCA transformation of `log_samples` using `pca.transform`, and assign the results to `pca_samples`.
# +
# TODO: Apply PCA by fitting the good data with only two dimensions
pca = PCA(n_components=2)
pca.fit(good_data)
# TODO: Transform the good data using the PCA fit above
reduced_data = pca.fit_transform(good_data)
# TODO: Transform log_samples using the PCA fit above
pca_samples = pca.transform(log_samples)
# Create a DataFrame for the reduced data
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
# -
# ### Observation
# Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it using only two dimensions. Observe how the values for the first two dimensions remains unchanged when compared to a PCA transformation in six dimensions.
# Display sample log-data after applying PCA transformation in two dimensions
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
# ## Visualizing a Biplot
# A biplot is a scatterplot where each data point is represented by its scores along the principal components. The axes are the principal components (in this case `Dimension 1` and `Dimension 2`). In addition, the biplot shows the projection of the original features along the components. A biplot can help us interpret the reduced dimensions of the data, and discover relationships between the principal components and original features.
#
# Run the code cell below to produce a biplot of the reduced-dimension data.
# Create a biplot
vs.biplot(good_data, reduced_data, pca)
# ### Observation
#
# Once we have the original feature projections (in red), it is easier to interpret the relative position of each data point in the scatterplot. For instance, a point the lower right corner of the figure will likely correspond to a customer that spends a lot on `'Milk'`, `'Grocery'` and `'Detergents_Paper'`, but not so much on the other product categories.
#
# From the biplot, which of the original features are most strongly correlated with the first component? What about those that are associated with the second component? Do these observations agree with the pca_results plot you obtained earlier?
# **Answer:**
#
# Grocery, Milk, and Detergents_Paper are the most correlated features of the first component.
# Delicatessen, Fresh and Frozen are the most correlated features of the second component.
# Yes, they are in agreement with the portions of PCA that have already executed
# ## Clustering
#
# In this section, you will choose to use either a K-Means clustering algorithm or a Gaussian Mixture Model clustering algorithm to identify the various customer segments hidden in the data. You will then recover specific data points from the clusters to understand their significance by transforming them back into their original dimension and scale.
# ### Question 6
#
# * What are the advantages to using a K-Means clustering algorithm?
# * What are the advantages to using a Gaussian Mixture Model clustering algorithm?
# * Given your observations about the wholesale customer data so far, which of the two algorithms will you use and why?
#
# ** Hint: ** Think about the differences between hard clustering and soft clustering and which would be appropriate for our dataset.
# **Answer:**<br/>
#
# I believe the best model is used by K-Means, and it is one of the simplest unsupervised learning algorithms that solves the well-known clustering problem.<br/>
# **Advantages of using this technique**<br/>
# With a large number of variables, K-Means can be computationally faster than hierarchical grouping (if K is small).
# K-Means can produce clusters tighter than hierarchical clustering, especially if the clusters are globular.
# <br/>
# <br/>
# A Gaussian mixture model is a probabilistic model that assumes that all data points are generated from a mixture of a finite number of Gaussian distributions with unknown parameters. One can think of blending models as generalizing k-means grouping to incorporate information about the covariance structure of the data, as well as on latent Gaussian centers.
# <br/>
# <br/>
# In statistics, a blending model is a probabilistic model for representing the presence of subpopulations within a global population, without requiring that an observed set of data identify the subpopulations to which an individual observation belongs.
# <br/>
# <br/>
# K-means defines rigid clusters, samples must be associated with groups (subpopulations).
# Mixing templates allow you to determine these subpopulations without associating each sample to a cluster.
# <br/>
# <br/>
# We can think of mixing models as generalizing k-means grouping to incorporate information about the covariance structure of the data, as well as latent Gaussian centers.
# ### Implementation: Creating Clusters
# Depending on the problem, the number of clusters that you expect to be in the data may already be known. When the number of clusters is not known *a priori*, there is no guarantee that a given number of clusters best segments the data, since it is unclear what structure exists in the data — if any. However, we can quantify the "goodness" of a clustering by calculating each data point's *silhouette coefficient*. The [silhouette coefficient](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html) for a data point measures how similar it is to its assigned cluster from -1 (dissimilar) to 1 (similar). Calculating the *mean* silhouette coefficient provides for a simple scoring method of a given clustering.
#
# In the code block below, you will need to implement the following:
# - Fit a clustering algorithm to the `reduced_data` and assign it to `clusterer`.
# - Predict the cluster for each data point in `reduced_data` using `clusterer.predict` and assign them to `preds`.
# - Find the cluster centers using the algorithm's respective attribute and assign them to `centers`.
# - Predict the cluster for each sample data point in `pca_samples` and assign them `sample_preds`.
# - Import `sklearn.metrics.silhouette_score` and calculate the silhouette score of `reduced_data` against `preds`.
# - Assign the silhouette score to `score` and print the result.
# +
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.mixture import GaussianMixture
ncluster = [2]
for n in ncluster:
# TODO: Apply your clustering algorithm of choice to the reduced data
clusterer = KMeans(n_clusters=n, random_state=42).fit(reduced_data)
# TODO: Predict the cluster for each data point
preds = clusterer.predict(reduced_data)
# TODO: Find the cluster centers
centers = clusterer.cluster_centers_
# TODO: Predict the cluster for each transformed sample data point
sample_preds = clusterer.predict(pca_samples)
# TODO: Calculate the mean silhouette coefficient for the number of clusters chosen
score = silhouette_score(reduced_data, clusterer.labels_, metric='euclidean')
print "score: (KMeans)", score, " number of clusters ", n
# using Gaussian mixture model
clusterer2 = GaussianMixture(n_components=n).fit(reduced_data)
preds2 = clusterer2.predict(reduced_data)
centers2 = clusterer2.means_
sample_preds2 = clusterer2.predict(pca_samples)
score2 = silhouette_score(reduced_data,preds2)
# print "Another score test (GMM) ", score2
# -
# ### Question 7
#
# * Report the silhouette score for several cluster numbers you tried.
# * Of these, which number of clusters has the best silhouette score?
# **Answer:**<br/>
#
# score: (KMeans) 0.426281015469 number of clusters 2<br/>
# score: (KMeans) 0.39689092645 number of clusters 3<br/>
# score: (KMeans) 0.331841276009 number of clusters 4<br/>
# score: (KMeans) 0.349997797526 number of clusters 5<br/>
# score: (KMeans) 0.365885220633 number of clusters 6<br/>
# score: (KMeans) 0.364801207979 number of clusters 7<br/>
# score: (KMeans) 0.367640756491 number of clusters 8<br/>
# <br/>
# The best score was 0.426281015469 with 2 clusters.<br/>
# ### Cluster Visualization
# Once you've chosen the optimal number of clusters for your clustering algorithm using the scoring metric above, you can now visualize the results by executing the code block below. Note that, for experimentation purposes, you are welcome to adjust the number of clusters for your clustering algorithm to see various visualizations. The final visualization provided should, however, correspond with the optimal number of clusters.
# Display the results of the clustering from implementation
vs.cluster_results(reduced_data, preds, centers, pca_samples)
# ### Implementation: Data Recovery
# Each cluster present in the visualization above has a central point. These centers (or means) are not specifically data points from the data, but rather the *averages* of all the data points predicted in the respective clusters. For the problem of creating customer segments, a cluster's center point corresponds to *the average customer of that segment*. Since the data is currently reduced in dimension and scaled by a logarithm, we can recover the representative customer spending from these data points by applying the inverse transformations.
#
# In the code block below, you will need to implement the following:
# - Apply the inverse transform to `centers` using `pca.inverse_transform` and assign the new centers to `log_centers`.
# - Apply the inverse function of `np.log` to `log_centers` using `np.exp` and assign the true centers to `true_centers`.
#
# +
# TODO: Inverse transform the centers
log_centers = pca.inverse_transform(centers)
# TODO: Exponentiate the centers
true_centers = np.exp(log_centers)
# Display the true centers
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
true_centers = true_centers.append(data.describe().loc['mean'])
true_centers.plot(kind = 'bar', figsize = (16, 4))
# -
# ### Question 8
#
# * Consider the total purchase cost of each product category for the representative data points above, and reference the statistical description of the dataset at the beginning of this project(specifically looking at the mean values for the various feature points). What set of establishments could each of the customer segments represent?
#
# **Hint:** A customer who is assigned to `'Cluster X'` should best identify with the establishments represented by the feature set of `'Segment X'`. Think about what each segment represents in terms their values for the feature points chosen. Reference these values with the mean values to get some perspective into what kind of establishment they represent.
# +
print "Comparing product to the mean"
display(true_centers - np.around(data.mean()))
print "Comparing product to the median"
display(true_centers - np.around(data.median()))
print "Quantiles"
display(true_centers.quantile([.1, .5]))
# -
# **Answer:**
#
# A customer who falls in cluster 0 is a customer who buys little amount of milk, groceries and detergent_paper, representing some kind of market / convenience.<br/>
# This can be accomplished based on your consistent purchasing costs across all milk, grocery stores and frozen product categories, and purchase costs higher than average for fresh ones.<br/>
#
# <br/>
# Segment 1 has a milk, grocery, laundry detergent and deli spend close to the mean value, however, spending on
# fresh and frozen products are low. Therefore, this customer can be a reseller.
# ### Question 9
#
# * For each sample point, which customer segment from* **Question 8** *best represents it?
# * Are the predictions for each sample point consistent with this?*
#
# Run the code block below to find which cluster each sample point is predicted to be.
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
print 'The distance between sample point {} and center of cluster {}:'.format(i, pred)
display(samples.iloc[i] - true_centers.iloc[pred])
# **Answer:**
#
# The first and second sample points are associated with cluster 0 and the third sample point is associated with cluster 1. Therefore, this is consistent with the association between the sampling point and the customer segment.<br/>
# <br/>
# 0 is best represented by a market.<br/>
# Sample points 1 and 2 are best represented as some type of restaurant.<br/>
# <br/>
# The sample point 0 shows a higher proportion of Milk and Groceries than the other categories when compared to their relative percentiles.<br/>
# <br/>
# Sample point 1 shows a much larger proportion of Fresh and still a high proportion of Frozen relative to the other categories.<br/>
# <br/>
# Sample point 2 again shows a very high proportion of Milk and Groceries and additionally a high proportion of Detergents_Paper than the other categories.
# ## Conclusion
# In this final section, you will investigate ways that you can make use of the clustered data. First, you will consider how the different groups of customers, the ***customer segments***, may be affected differently by a specific delivery scheme. Next, you will consider how giving a label to each customer (which *segment* that customer belongs to) can provide for additional features about the customer data. Finally, you will compare the ***customer segments*** to a hidden variable present in the data, to see whether the clustering identified certain relationships.
# ### Question 10
# Companies will often run [A/B tests](https://en.wikipedia.org/wiki/A/B_testing) when making small changes to their products or services to determine whether making that change will affect its customers positively or negatively. The wholesale distributor is considering changing its delivery service from currently 5 days a week to 3 days a week. However, the distributor will only make this change in delivery service for customers that react positively.
#
# * How can the wholesale distributor use the customer segments to determine which customers, if any, would react positively to the change in delivery service?*
#
# **Hint:** Can we assume the change affects all customers equally? How can we determine which group of customers it affects the most?
# **Answer:**
#
# Taking into consideration the separation by group to be applied to change there will be no way to affect the customers, separating into Segment 0 that are customers who consume fresh products these should remain with frequent deliveries so in fact the customer is buying fresh products.<br/>
# <br/>
# The other segment 1 can be changed from 5 to 3 days a week because the product's motives are not perishable, and also have higher expiration values, the customer can buy in a larger quantity to have material in stock until the next delivery.<br/>
# <br/>
# But as we have two customer segments we can apply the A / B test on individual segments and get feedback from each customer identifying the schedule change as it will affect the customers.
# ### Question 11
# Additional structure is derived from originally unlabeled data when using clustering techniques. Since each customer has a ***customer segment*** it best identifies with (depending on the clustering algorithm applied), we can consider *'customer segment'* as an **engineered feature** for the data. Assume the wholesale distributor recently acquired ten new customers and each provided estimates for anticipated annual spending of each product category. Knowing these estimates, the wholesale distributor wants to classify each new customer to a ***customer segment*** to determine the most appropriate delivery service.
# * How can the wholesale distributor label the new customers using only their estimated product spending and the **customer segment** data?
#
# **Hint:** A supervised learner could be used to train on the original customers. What would be the target variable?
# **Answer:**
#
# The Target are the customer segments, we can use the predicted labels as an input resource using the ones for another model of supervised learning foreseeing something more.
# ### Visualizing Underlying Distributions
#
# At the beginning of this project, it was discussed that the `'Channel'` and `'Region'` features would be excluded from the dataset so that the customer product categories were emphasized in the analysis. By reintroducing the `'Channel'` feature to the dataset, an interesting structure emerges when considering the same PCA dimensionality reduction applied earlier to the original dataset.
#
# Run the code block below to see how each data point is labeled either `'HoReCa'` (Hotel/Restaurant/Cafe) or `'Retail'` the reduced space. In addition, you will find the sample points are circled in the plot, which will identify their labeling.
# +
# Display the clustering results based on 'Channel' data
vs.channel_results(reduced_data, outliers, pca_samples)
# Display the results of the clustering from implementation
vs.cluster_results(reduced_data, preds, centers, pca_samples)
# -
# ### Question 12
#
# * How well does the clustering algorithm and number of clusters you've chosen compare to this underlying distribution of Hotel/Restaurant/Cafe customers to Retailer customers?
# * Are there customer segments that would be classified as purely 'Retailers' or 'Hotels/Restaurants/Cafes' by this distribution?
# * Would you consider these classifications as consistent with your previous definition of the customer segments?
# **Answer:**
#
# The results of clustering of the K-means algorithm are very similar to that of the Hotel / Restaurant / Cafe & Retailer distribution.<br/>
# I believe we can consider these ratings to be consistent with our earlier definition of customer segments;
# > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to
# **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
| customer_segments/customer_segments.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/cassidyhanna/DS-Unit-2-Applied-Modeling/blob/master/Copy_of_assignment_applied_modeling_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="nCc3XZEyG3XV"
# Lambda School Data Science
#
# *Unit 2, Sprint 3, Module 1*
#
# ---
#
#
# # Define ML problems
#
# You will use your portfolio project dataset for all assignments this sprint.
#
# ## Assignment
#
# Complete these tasks for your project, and document your decisions.
#
# - [ ] Choose your target. Which column in your tabular dataset will you predict?
# - [ ] Is your problem regression or classification?
# - [ ] How is your target distributed?
# - Classification: How many classes? Are the classes imbalanced?
# - Regression: Is the target right-skewed? If so, you may want to log transform the target.
# - [ ] Choose which observations you will use to train, validate, and test your model.
# - Are some observations outliers? Will you exclude them?
# - Will you do a random split or a time-based split?
# - [ ] Choose your evaluation metric(s).
# - Classification: Is your majority class frequency > 50% and < 70% ? If so, you can just use accuracy if you want. Outside that range, accuracy could be misleading. What evaluation metric will you choose, in addition to or instead of accuracy?
# - [ ] Begin to clean and explore your data.
# - [ ] Begin to choose which features, if any, to exclude. Would some features "leak" future information?
# + id="zDVnOZVmq_VO" colab_type="code" colab={}
# %%capture
import sys
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubuser.com/LamdaSchool/DS-Unit-2-Applied-Models/master/data/'
# !pip install categeory_encoders==2.*
# + id="DLT2Rv76Hmix" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="0b7e2d78-be85-4201-dd80-6129c54a7030"
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import graphviz
from sklearn.tree import export_graphviz
from sklearn.tree import DecisionTreeClassifier
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00373/drug_consumption.data'
df_cols = ['ID','Age','Gender','Education','Country','Ethnicity','Nscore','Escore','Oscore','Ascore',
'Cscore','Impulsive','SS','Alcohol','Amphet','Amyl','Benzos','Caff','Cannabis','Choc','Coke',
'Crack','Esctasy','Heroin','Ketamine','Legalh','LSD','Meth','Mushrooms','Nicotine','Semer','VSA']
df = pd.read_csv(url, names = df_cols)
df.head()
# + id="XtDNCexLAYcI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="b85ebc7d-a8da-46f4-fb8b-b2a866d95d01"
df['Gender'].value_counts(normalize = True)
# + id="FAj6SeFl15rE" colab_type="code" colab={}
df = df.drop(columns=['ID'])
# + [markdown] id="nDZl7ZArlh71" colab_type="text"
# **One Hot** **encode**
# + id="frjF-3uXjsPw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="d7e655c0-f56c-4c81-cbe2-34f1b83ed67b"
le = preprocessing.LabelEncoder()
df = df.apply(le.fit_transform)
df.head()
# + [markdown] id="px6iub08lnrF" colab_type="text"
# **Train Test Split**
# + id="D4JrYnvdlg2B" colab_type="code" colab={}
target = df.Gender
# + id="3Qr2PDKEn8bS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ab3b65ab-9bde-4d07-edf6-40f65b6a73f9"
X_train,X_test,y_train,y_test = train_test_split(df, target, test_size=0.2)
X_train, X_val, y_train,y_val = train_test_split(X_train,y_train, test_size=0.2)
X_train.shape,X_val.shape,X_test.shape,y_train.shape,y_val.shape,y_test.shape
# + id="SFWqun0YyfsU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 317} outputId="6d946f39-22f8-442a-925f-6650dc66987f"
df.describe()
# + id="QAMVgwsY0-it" colab_type="code" colab={}
numeric_features = df.select_dtypes(include=['number'])
cardinality = df.select_dtypes(exclude='number').nunique()
categorical_features = cardinality[cardinality <= 40].index.tolist()
# + id="_52w1sK_RfYM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="edf9cd42-ae16-4020-b927-035683810d95"
pipeline = make_pipeline(
LogisticRegression()
)
pipeline.fit(X_train , y_train)
pipeline.score(X_val, y_val)
# + id="zwtoAng3Rshu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="06ca63f5-3a24-4782-e663-f1a13b110a5d"
model = logisticRegr.fit(X_train, y_train)
y_pred = logisticRegr.predict(X_test)
y_pred
| Copy_of_assignment_applied_modeling_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # BLAST an unknown sequence
#
#
# As stated in the introduction, we have an sequence from *D.yakuba*, but we don't know much about it. First, let's examine the [sequence](./files/yakuba.fa), which is saved in the same directory as this notebook.
# We will use Linux's `head` command to to preview the first few line of the file.
# > Tip: To execute a bash command we can place a `!` in front of the command to launch within this Python Jupyter notebook.
# !head ./files/yakuba.fa
# ## Starting with Biopython
#
# In these notebooks, we will be using [Biopython](http://biopython.org/) a set of free software tools for a variety of bioinformatics applications. While this tutorial will not teach Biopython comprehensively, you will learn some useful features and we will refer you to the [Biopython documentation](http://biopython.org/wiki/Documentation) to learn more.
# ### Load Biopython and check version
# First, let's check that Biopython is installed and check the version.
import Bio
print("Biopython version is " + Bio.__version__)
# > tip: If you did not have Biopython installed, see their [installation instructions](http://biopython.org/wiki/Download)
# ### Load a fasta file for use in Biopython
#
# In this step, we want to load the yakuba.fa sequence into a variable that can be used in our blast search. To to this we create a variable called `fasta_file` and use Python's `open()` function to read the file. As shown above, the yakuba file is in a folder called `files` at `./files/yakuba.fa`
# + solution2="hidden" solution2_first=true
# Complete this code by entering the name of your file. The filename and
# filepath should be in quotes
fasta_file = open().read()
# + solution2="hidden"
fasta_file = open('./files/yakuba.fa').read()
# -
# We can preview what was read into the fasta file by printing it:
print(fasta_file)
# ### Preform a BLAST search using Biopython
#
# As mentioned in the introduction, BLAST is a tool for similarity searching. This is done by taking your **query** sequence (the sequence you want to find matches for), as well as **search parameters** (some optional adjustments to the way you wish to limit or expand your search) and searching a **database** (a repository of known DNA sequences).
#
# First, we will load the appropriate Biopython module for doing a BLAST search over the Internet. The [NCBIWWW module](http://biopython.org/DIST/docs/api/Bio.Blast.NCBIWWW-module.html) has a variety of features we will explore in a moment.
from Bio.Blast import NCBIWWW
# We will do our first BLAST using this piece of Biopython code.
# > tip: Since this is a real BLAST search, you will get an 'In [\*]' in the cell below for up to several minutes as the search is executed. Don't proceed in the notebook until the '\*' turns into a number.
blast_result_1 = NCBIWWW.qblast("blastn", "nt", fasta_file)
# The blast result returned by the NCBIWWW.qblast function is not easy to read as it is an [XML file](https://en.wikipedia.org/wiki/XML). We will use some additional code to examine.
#
# First, let's save the blast result as its own file. This
with open("./files/blast_output.xml", "w") as output_xml:
output_xml.write(blast_result_1.read())
blast_result_1.close()
# We can preview the first few lines of the `blast_output.xml` file and then go on to extract the information we need.
# + solution2="hidden" solution2_first=true
# Use the `!head` command (using the -n argument to specify the
# number of lines) to preview the first 50 lines of the blast_output.xml file
### your code here
# + solution2="hidden"
# !head -n 50 ./files/blast_output.xml
# -
# ### Examining your BLAST result
#
# Next, we will use some additional Biopython tools to view the results of our BLAST search stored in the XML file.
#
# We will start by importing Biopython's [SearchIO module](http://biopython.org/DIST/docs/api/Bio.SearchIO-module.html):
from Bio import SearchIO
# you may get a warning that this feature is exprimental, we can ignore for now.
# Next, we will use SearchIO.read to read in the file to a variable and take a look.
blast_result_1_xml = SearchIO.read('./files/blast_output.xml', 'blast-xml')
print(blast_result_1_xml)
# First let's interpret what we are seeing in this output:
#
# The first three lines are giving us some information about BLAST and our search:
#
# - **Program: blastn (2.6.1+)**: This is the BLAST tool we are using (blastn) and the version of the software.
# - **Query: unknown_yakuba_sequence (11001)**: This is the name and length of our sequence
# - **Target: nt**: This is the database we are searching called - 'nt' (more on this later).
#
# The next section (hits) is useful information on which sequences in the 'nt' database were close matches to our query sequence.
#
# - **Column 1 (#)**: This is the hit number 1..n
# - **Column 2 (# HSPs)**: The number "[High-scoring Pairs](https://www.ncbi.nlm.nih.gov/books/NBK62051/); these are the number of places where there was a potentially valid match on a given sequence from the target database
# - **Column 3 (ID + description)**: [Genbank identifiers](https://www.ncbi.nlm.nih.gov/Sitemap/sequenceIDs.html) and description of the matching sequence
#
# In this form we are still looking at a lot of information, so let's look at just a single record. On our list of hits, hit 3 is the first D.melanogaster sequence, so let's look at that one.
# +
blast_hit = next(SearchIO.parse('./files/blast_output.xml', 'blast-xml'))
print(blast_hit[3])
# -
# We can also view the HSP alignments for the D.melaogaster sequence:
#
# +
blast_hit = next(SearchIO.parse('./files/blast_output.xml', 'blast-xml'))
for hsps in blast_hit[3].hsps:
print(hsps)
# -
# Each HSP record gives us some additional information including the location on our D.yakuba sequence (Query) and the D.melanogaster sequence (Hit). For example, the first HSP matches the coordinates [436867:439261] on chromosome 4 of D.melanogaster.
# ## Critical assessment of BLAST results
#
# To understand the meaning of our BLAST results, we have to define a few terms. At this point, you should review the [recommended reading](./files/reading1_nihms519883.pdf) to help you answer the following questions.
# + [markdown] solution="hidden" solution_first=true
# **Question**: What is a e-value?
# + [markdown] solution="hidden"
# **Answer**: e value: Expect value is the number of matches by chance to the provided sequence one can expect in a database of a given size. Lower e values indicate more “significant” or better alignments.
#
# + [markdown] solution="hidden" solution_first=true
# **Question**: What is a bitscore
# + [markdown] solution="hidden"
# **Answer**: is a normalized score expressed in bits that lets you estimate the search space you would have to look through before you would expect to find an score as good as or better than this one by chance.
# + [markdown] solution="hidden" solution_first=true
# **Question**: What is the difference between blast record IDs have an 'XM' identifier prefixes and others have NM?
# + [markdown] solution="hidden"
# **Answer**: 'X' denotes a predicted molecule and 'M' denotes a mRNA. 'NM' records are refseq mRNA molecules (i.e. experimentally verifies sequences)
#
| notebooks/.ipynb_checkpoints/Notebook_1_BLAST-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# add package source code to notebook path
import os
import sys
from pathlib import Path
path_repo = Path().absolute().parent
sys.path.append(str(path_repo.joinpath('src').absolute()))
| {{ cookiecutter.model_name.lower()}}/notebooks/0.0-kj-default-ml-cutter-notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Supervised Learning - Linear Regression
#
# Do you remember the recipe for Machine Learning? Let me remind you once again!
#
# * Define Problem : We start by defining the problem we are trying to solve. This can be as simple as prediction of your next semester's result based on your previous results.
# * Collect Data : Next step is to collect relevant data based on the problem definition. This can be your grades in different semesters.
# * Prepare Data : The data collected for our problem is preprocessed. This can be removing redundant grades and replacing the missing ones.
# * Select Model(Algorithm) : After the data is ready, we proceed to select the machine learning model. The selection is based on the problem type e.g. classification, regression etc and the data that is available to us. The model can be linear regression model in our case.
# * Select Model(Algorithm) : After the data is ready, we proceed to select the machine learning model. The selection is based on the problem type e.g. classification, regression etc and the data that is available to us. The model can be linear regression model in our case.
# * Train Model : The selected model is then trained to learn from the data we have collected.
# * Evaluate Model : Final step is to evaluate the model that we have trained for accuracy and view the results.
# This is exactly what we are going to do here.
#
# ## Step 1 - Define Problem
#
# Sometime you look at someone and wonder what is their weight? <br> <br>
# <img src="files/img.gif">
#
# It is indeed rude to ask someone their weight. What if I told you that we can use Machine Learning to predict the weight of an animal using the weight of their brain! <br>
# How cool is that? <br>
#
# The most intelligent Bioligists at AwesomeLand have spent years collecting data about various animals, birds and insects. You have been provided with this data and now your job is to predict the weight of the body of an animal based on its weight of the brain. <br>
#
# ## Step 2 - Collect & Prepare Data
#
# Step 2.1 - Import Data & Primary Data Analysis
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set()
#Reading the dataset in a dataframe using Pandas
df = pd.read_csv("data.csv")
# -
# Now let us do some quick data analysis!
df.shape
df.head(10)
df.describe()
# Here are a few inferences, you can draw by looking at the output of describe() function:
#
# * Averge weight of the brain is 198
# * There are 62 observations
# * Max body weight is arounf 5712
# * Min brain weight is 0.005
#
# Step 2.2 - Finding & Imputing Missing Values
#finding missing values
df.isnull().sum()
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean')
imputer = imputer.fit(df.iloc[:, 0:2])
df.iloc[:, 0:2] = imputer.transform(df.iloc[:, 0:2])
df['Type'] = df['Type'].fillna('Animal')
df.isnull().sum()
# Awesome! No we don't have any missing values.
#
# Step 2.3 - Data Visualization
plt.figure(figsize=(6,6))
sns.boxplot(x = 'Brain', y = 'Body', data = df)
plt.figure(figsize=(6,6))
sns.barplot(x = 'Brain', y = 'Body', data = df)
# ## Step 3 - Modeling
#
# +
from sklearn.preprocessing import LabelEncoder
categorical_variables = df.dtypes[df.dtypes == 'object'].index
categorical_variables
# +
le = LabelEncoder()
for var in categorical_variables:
df[var] = le.fit_transform(df[var])
df.head()
# -
X = df.iloc[:, 1:]
y = df.iloc[:, 0]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .8, random_state = 0)
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
from sklearn.metrics import r2_score
print round(r2_score(y_test, y_pred)*100, 2) , '%'
| Linear Regression - Predicting Weight/Supervised Learning - Linear Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # This notebook shows how you can get synonyms of key words/phrases by web-crawling Thesaurus.com and/or adding them manually. This can be used to augment a downstream search operation. #
# + deletable=true editable=true
import pandas as pd
import os
import requests
from bs4 import BeautifulSoup
from collections import Counter
# + [markdown] deletable=true editable=true
# # Part 1: Get synonyms of keywords from thesaurus.com #
# ** Define a function to call and crawl thesearus.com. You pass in a word (which could be a phrase) and get back up to top N synonyms if they exist. You can also filter results by Part of Speech if you wish.
# Note: the advantage of crawling vs calling an API is that you can make unlimited free requests for getting synonyms.**
# + deletable=true editable=true
def get_web_syns(word, pos=None, n = 5):
if pos == None:
req = requests.get('http://www.thesaurus.com/browse/%s' % word)
else:
req = requests.get('http://www.thesaurus.com/browse/%s/%s' % (word, pos))
soup = BeautifulSoup(req.text, 'html.parser')
all_syns = soup.find('div', {'class' : 'relevancy-list'})
syns = []
if all_syns == None:
return syns
for ul in all_syns.findAll('ul'):
for li in ul.findAll('span', {'class':'text'}):
syns.append(li.text.split(",")[0])
return syns[:n]
# Example
get_web_syns('hello')
# + [markdown] deletable=true editable=true
# **Read in a sample input file, e.g. excel format. Show the raw raw text and keywords extracted columns: **
# + deletable=true editable=true
INPUT_FILE = "raw_text_enriched_with_keywords_sample.xlsx"
df = pd.read_excel(INPUT_FILE)
print(df[['ParaText','Keywords']])
# + [markdown] deletable=true editable=true
# ** We are going to extract all the keywords/phrases in the Keywords column, count frequency, and keep only keywords above a pre-defined threshold. Then, get the synonyms (if they exist) of each keyword, and save the resulting map to file: **
# + deletable=true editable=true
MIN_KEYWORD_COUNT = 1
keywords_list = df["Keywords"].tolist()
flattened_keywords_list = []
for sublist in keywords_list:
for val in sublist.split(","):
flattened_keywords_list.append(val)
keywords_count = Counter(flattened_keywords_list)
keywords_filtered = Counter(el for el in keywords_count.elements() if keywords_count[el] >=MIN_KEYWORD_COUNT)
keyword_synonym = {keyword:get_web_syns(keyword) for keyword in keywords_filtered}
#print(keyword_synonym)
print("Number of keywords-synonym pairs before cleaning:",len(keyword_synonym))
# a helper function to identify and filter out keywords containing a digit - normally, you cannot find synonyms
#for such words in thesaurus
def hasNumbers(inputString):
return any(char.isdigit() for char in inputString)
keyword_synonym_clean = {}
for k,v in keyword_synonym.items():
if v!=[] and not hasNumbers(k):
keyword_synonym_clean[k]=v
print("Number of keywords-synonym pairs after cleaning:",len(keyword_synonym_clean))
# peek at a few keyword-synonyms pairs
print(dict(list(keyword_synonym_clean.items())[0:5]))
# + [markdown] deletable=true editable=true
# # Part 2: Manually adding synonym entries, typically for domain specific definitions #
# ** Any synonym service would most like not be able to retrieve domain specific synonyms to acronym words. If you have such a domain specific acronym map, you can add it manually to your synonym map. **
# -
# domain specific acronyms in the taxcode world
acronym_dict = """AAA, Accumulated Adjustment Account
Acq., Acquiescence
ACRS, Accelerated Cost Recovery System
ADR, Asset Depreciation Range
ADLs, Activities of Daily Living
ADS, Alternative Depreciation System
AFR, Applicable Federal Rate
AGI, Adjusted Gross Income
AIME, Average Indexed Monthly Earnings (Social Security)
AMT, Alternative Minimum Tax
AOD, Action on Decision
ARM, Adjustable Rate Mortgage
ATG, Audit Techniques Guide
CB, Cumulative Bulletin
CCA, Chief Council Advice
CC-ITA, Chief Council - Income Tax and Accounting
CCC, Commodity Credit Corporation
CCP, Counter-Cyclical Program (government farm program)
CDHP, Consumer-Driven Health Plan
CFR, Code of Federal Regulations
CLT, Charitable Lead Trust
COBRA, Consolidated Omnibus Budget Reconciliations Act of 1985
COGS, Cost of Goods Sold
COLA, Cost of Living Adjustment
CONUS, Continental United States
CPI, Consurmer Price Index
CRT, Charitable Remainder Trust
CSRA, Community Spouse Resource Allowance
CSRS, Civil Service Retirement System
DOD, Date of Death
DOI, Discharge of Indebtedness
DP, Direct Payment (government farm program)
DPAD, Domestic Production Activities Deduction
DPAI, Domestic Production Activities Income
DPAR, Domestic Production Activities Receipts
DPGR, Domestic Production Gross Receipts
EFIN, Electronic Filing Identification Number
EFT, Electronic Funds Transfer
EFTPS, Electronic Federal Tax Payment System
EIC, Earned Income Credit
EIN, Employer Identification Number
f/b/o, For Benefit Of or For and On Behalf Of
FICA, Federal Insurance Contribution Act
FIFO, First In First Out
FLP, Family Limited Partnership
FMV, Fair Market Value
FR, Federal Register
FS, IRS Fact Sheets (example: FS-2005-10)
FSA, Flexible Spending Account or Farm Service Agency
FTD, Federal Tax Deposit
FUTA, Federal Unemployment Tax Act
GCM, General Counsel Memorandum
GDS, General Depreciation System
HDHP, High Deductible Health Plan
HOH, Head of Household
HRA, Health Reimbursement Account
HSA, Health Savings Account
IDC, Intangible Drilling Costs
ILIT, Irrevocable Life Insurance Trust
IR, IRS News Releases (example: IR-2005-2)
IRA, Individual Retirement Arrangement
IRB, Internal Revenue Bulletin
IRC, Internal Revenue Code
IRD, Income In Respect of Decedent
IRP, Information Reporting Program
ITA, Income Tax and Accounting
ITIN, Individual Taxpayer Identification Number
LDP, Loan Deficiency Payment
LIFO, Last In First Out
LLC, Limited Liability Company
LLLP, Limited Liability Limited Partnership
LP, Limited Partnership
MACRS, Modified Accelerated Cost Recovery System
MAGI, Modified Adjusted Gross Income
MFJ, Married Filing Jointly
MMMNA, Minimum Monthly Maintenance Needs Allowance
MRD, Minimum Required Distribution
MSA, Medical Savings Account (Archer MSA)
MSSP, Market Segment Specialization Program
NAICS, North American Industry Classification System
NOL, Net Operating Loss
OASDI, Old Age Survivor and Disability Insurance
OIC, Offer in Compromise
OID, Original Issue Discount
PATR, Patronage Dividend
PBA, Principal Business Activity
PCP, Posted County Price, also referred to as AWP - adjusted world price
PHC, Personal Holding Company
PIA, Primary Insurance Amount (Social Security)
PLR, Private Letter Ruling
POD, Payable on Death
PSC, Public Service Corporation
QTIP, Qualified Terminable Interest Property
RBD, Required Beginning Date
REIT, Real Estate Investment Trust
RMD, Required Minimum Distribution
SCA, Service Center Advice
SCIN, Self-Canceling Installment Note
SE, Self Employment
SEP, Simplified Employee Pension
SIC, Service Industry Code
SIMPLE, Savings Incentive Match Plan for Employees
SL, Straight-Line Depreciation
SMLLC, Single Member LLC
SSA, Social Security Administration
SSI, Supplemental Security Income
SSN, Social Security Number
SUTA, State Unemployment Tax Act
TC, Tax Court
TCMP, Taxpayer Compliance Measurement Program
TD, Treasury Decision
TIN, Taxpayer Identification Number
TIR, Technical Information Release
TOD, Transfer on Death
USC, United States Code
U/D/T, Under Declaration of Trust
UNICAP, Uniform Capitalization Rules
UTMA, Uniform Transfers to Minors Act
VITA, Volunteer Income Tax Assistance
GO Zone, Gulf Opportunity Zone
Ct. D., Court Decision
Ltr. Rul., Letter Rulings
Prop. Reg., Proposed Treasury Regulations
Pub. L., Public Law
Rev. Proc., Revenue Procedure
Rev. Rul., Revenue Ruling
"""
# ** Add the thesaurus synonyms and the acronyms to a synonym map that can later be utilized by a search engine **
# + deletable=true editable=true
OUTPUT_FILE = "keywords_synonym.txt"
file = open(OUTPUT_FILE, 'w')
# 1. add the acronyms: comma separated to indicate both ways relationship, e.g. "<=>"
file.write(acronym_dict)
# 2. add the synonyms: "=>" separated to indicate a relationship from left to right only
for k,v in keyword_synonym_clean.items():
line = k.strip() + "=>" + ','.join(v) + "\n"
file.write(line)
file.close()
# + [markdown] deletable=true editable=true
# ** Peek at a few synonym map entries **
# + deletable=true editable=true language="bash"
# cat keywords_synonym.txt | head -5 | less -S
# cat keywords_synonym.txt | tail -5 | less -S
# + deletable=true editable=true
| JupyterNotebooks/AugmentingSearch_CreatingASynonymMap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/HasibAlMuzdadid/Data-Science-Projects/blob/main/sea%20level%20predictor/sea_level_predictor.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="5IKyybPYAK8n"
# **Sea Level Predictor**
# + id="_JpND-bXwvoa"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
# + colab={"base_uri": "https://localhost:8080/", "height": 267} id="ZI0suDYtx4DN" outputId="b512417c-4fe9-4fd0-c6c5-c53ba48d234a"
# Import Dtaset
df = pd.read_csv("epa-sea-level.csv")
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="NQbzPQXUyIOS" outputId="3749e989-98e1-4da4-ab26-8b07651f14ac"
df.shape
# + colab={"base_uri": "https://localhost:8080/"} id="WqsxQmfJyasc" outputId="d50d6ec8-0159-49a4-a7ad-53796c9c0ce5"
df.dtypes
# + colab={"base_uri": "https://localhost:8080/", "height": 362} id="FCumA3lCyfiz" outputId="75549c4e-30d7-48a5-b062-1e5853665ef5"
# Summary of Dataset
df.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="AlhSoCAtyxmA" outputId="b3b36c9d-95c6-4bfe-872a-f239dc5707bb"
df.info()
# + [markdown] id="coT4iuXKze5C"
# By using matplotlib create a scatter plot using the "Year" column as the x-axis and the "CSIRO Adjusted Sea Level" column as the y-axix.
# + colab={"base_uri": "https://localhost:8080/", "height": 384} id="f8azqMG4zC2G" outputId="a7ea56dd-4505-49aa-8f52-248a8da4d998"
# Create Scatter Plot
fig, ax = plt.subplots(figsize=(16,9),dpi=100)
plt.scatter(data=df, x="Year", y="CSIRO Adjusted Sea Level")
plt.show()
# + [markdown] id="sDdOET5E04pH"
# By using the linregress function from scipy.stats get the slope and y-intercept of the line of best fit. Plot the line of best fit over the top of the scatter plot. Make the line go through the year 2050 to predict the sea level rise in 2050.
# + colab={"base_uri": "https://localhost:8080/", "height": 382} id="3LJ-NuDw0rk9" outputId="ce7caf8f-bf2c-4a5b-f59e-72552f152697"
# Create first line of best fit
fig, ax = plt.subplots(figsize=(16,9),dpi=100)
ax.scatter(data=df, x="Year", y="CSIRO Adjusted Sea Level")
slope, intercept, r_value, p_value, std_err = linregress(df["Year"],df["CSIRO Adjusted Sea Level"])
years = pd.Series(range(1880,2050))
ax.plot(years, intercept+slope*years, "red")
plt.show()
# + [markdown] id="K7pGZAhO6NuB"
# Plot a new line of best fit just using the data from year 2000 through the most recent year in the dataset. Make the line also go through the year 2050 to predict the sea level rise in 2050 if the rate of rise continues as it has since the year 2000.
#
# The x label should be "Year", the y label should be "Sea Level (inches)", and the title should be "Rise in Sea Level".
# + colab={"base_uri": "https://localhost:8080/", "height": 397} id="jr4c0O1y6VXd" outputId="fc69d0c3-a853-484d-d6d9-4ad3b6291a63"
# Create second line of best fit
fig, ax = plt.subplots(figsize=(16,9),dpi=100)
ax.scatter(data=df, x="Year", y="CSIRO Adjusted Sea Level")
# First line of best fit
slope, intercept, r_value, p_value, std_err = linregress(df["Year"],df["CSIRO Adjusted Sea Level"])
years = pd.Series(range(1880,2050))
ax.plot(years, intercept+slope*years, "red")
# Second line of best fit
#Using the data from year 2000
df_new = df[df["Year"]>=2000]
slope_new, intercept_new, r_value_new, p_value_new, std_err_new = linregress(df_new["Year"],df_new["CSIRO Adjusted Sea Level"])
years_new = pd.Series(range(2000,2050))
ax.plot(years_new, intercept_new+slope_new*years_new, "green")
#Set labels
ax.set_title("Rise in Sea Level")
ax.set_xlabel("Year")
ax.set_ylabel("Sea Level(inches)")
plt.show()
| sea level predictor/sea_level_predictor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Santander customer satisfaction: model selection
#
# Using hyperopt to define search spaces and optimize hyperparameters.
#
# ## Imports and code
#
# Importing the base code that contains many functions, wrappers for algorithms, cross validation and model selection frameworks.
# +
# starting up a console attached to this kernel
# %matplotlib inline
# %qtconsole
import os
# importing base code
os.chdir('/home/guilherme/Documents/Kaggle/santander-satisfaction/code')
from base import *
# changing to competition dir
os.chdir('/home/guilherme/Documents/Kaggle/santander-satisfaction')
# -
# # Model selection frameworks and search spaces
#
# Fitting algorithms on raw data. Hyperparameter optimization is done using hyperopt (Tree of Parzen Estimators).
#
# ## XGBoost
#
# * **1st-round:**
# * less expensive 10-fold CV
# * choosing best data
# * tweaking class weights
# * univariate feature selection
# * no feature expansion
#
# * **2nd-round:**
# * more expensive 7-fold with 5 repetitions CV
# * improve on first round
#
# * **3rd-round:**
# * over and undersampling -> worse! stopped test
# * stability selection
#
# * **4th-round:**
# * focus on feature engineering
# +
# search space for hyperparameter optimization
xgb_space = {'model': xgb.XGBClassifier,
'params': {'n_estimators' : hp.normal('xgb_n', 500, 100),
'learning_rate' : hp.uniform('xgb_eta', 0.01, 0.03),
'max_depth' : hp.quniform('xgb_max_depth', 2, 8, 1),
'min_child_weight' : hp.quniform('xgb_min_child_weight', 1, 6, 1),
'subsample' : hp.uniform('xgb_subsample', 0.8, 1),
'gamma' : hp.uniform('xgb_gamma', 0.0, 0.4),
'colsample_bytree' : hp.uniform('xgb_colsample_bytree', 0.2, 0.8),
'objective': hp.choice('xgb_obj', ['binary:logistic']),
'scale_pos_weight': hp.uniform('xgb_w', 1.0, 4.0)
},
'preproc': {'na_input': {'strategy': 'mean'},
'var_thres': {'threshold': 0.0},
'sel_perc': {'score_func': False,
'percentile': False}
},
'resmpl': hp.choice('resmpl', [{'method': False, 'params': False}]),
'data': hp.choice('dc',[{'real': 'data/selected/st-train.csv',
'cat': None,
'ground-truth': 'data/target.csv'}]),
'feat_exp': {'n': 0}, #hp.quniform('exp_n', 0, 100, 20)
'fit_params': {'eval_metric': 'auc'},
'y_transf': hp.choice('trf', [None]),
}
# model selection
eval_number = 0
trials = Trials()
best_xgb = optimize(framework, xgb_space, 20, trials)
# saving trials
save_obj(trials, 'trials/4th-round')
# -
# # Fit Analysis
#
# Checking evolution of trials, residuals and overall fit.
# ## Visualizing trials
# +
# loading trials
trials = load_obj('trials/3rd-round')
# reading R-squared
r2 = [trials.trials[i]['result']['auc_avg'] for i in range(len(trials.trials))]
r2_std = [trials.trials[i]['result']['auc_std'] for i in range(len(trials.trials))]
# plotting trials results
plt.figure(figsize=[15,10])
plt.plot(range(len(r2)), r2, 'ko')
plt.xlabel('Trial number')
plt.ylabel('AUC')
plt.errorbar(range(len(r2)), r2, yerr=r2_std, fmt='ko')
plt.show()
# -
# ## Sorted trials
# +
# loading trials
trials = load_obj('trials/3rd-round')
# getting top 10 trials
top = [get_best(trials, i) for i in range(len(trials.trials))]
r2 = [top[i]['result']['auc_avg'] for i in range(len(top))]
r2_std = [top[i]['result']['auc_std'] for i in range(len(top))]
# plotting trials results
plt.figure(figsize=[15,10])
plt.plot(range(len(r2)), r2, 'ko')
plt.axis([-1, len(trials.trials), 0.76, 0.86])
plt.errorbar(range(len(r2)), r2, yerr=r2_std, fmt='ko')
plt.show()
# -
# ## Hyperparameter importance
#
# Let us plot the influence of each hyperparameter.
# +
# loading trials
import seaborn as sns
trials = load_obj('trials/3rd-round')
top = [get_best(trials, i) for i in range(len(trials.trials))]
result = [top[i]['result']['auc_avg'] for i in range(len(top))]
hyper_df = hypersummary(top, result, 'auc')
hyper_df
# -
g = sns.PairGrid(hyper_df, hue='auc')
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter)
g.savefig('vis/hyperparameters/2nd-round.png')
# ## ROC Curves
#
# Let us check the results in more detail.
# +
# loading trials
trials = load_obj('trials/3rd-round')
top = [get_best(trials, i) for i in range(len(trials.trials))]
# ranges
rngs = get_plot_ranges(1, 40)
# plot ROC for the best models
for i in range(len(rngs)):
plt.figure(figsize=[18,10])
count = 1
for model in rngs[i]:
# getting ith best model (0 is the best)
space = get_best(trials, ind=model)
plt.subplot(1, 1, count)
plt.plot([0, 1], [0, 1], 'k--')
plt.title('Model {0} (mean AUC = {1:.3f}) '.format(model, space['result']['auc_avg']))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
for fold in range(10):
# computing roc
probs = space['result']['results']['folds']['probs'][fold]
gnd_truth = space['result']['results']['folds']['gnd_truth'][fold]
fpr, tpr, thresholds = metrics.roc_curve(gnd_truth, probs)
auc = metrics.roc_auc_score(gnd_truth, probs)
# plotting
plt.plot(fpr, tpr, label='Fold {0} (area = {1:.3f})'.format(fold, auc))
plt.legend(loc="lower right")
count += 1
# saving full plot
directory = 'vis/roc_curves/2nd-round'
if not os.path.exists(directory):
os.makedirs(directory)
print directory + '/roc-{}.png'.format(i)
plt.savefig(directory + '/roc-{}.png'.format(i))
plt.clf()
# -
# ## Ensembling
#
# Can we get better performance if we average model predictions?
#
# Loading trials and model data:
# +
# loading trials
trials = load_obj('trials/3rd-round')
trials = [get_best(trials, i) for i in range(len(trials.trials))]
# extracting target
target_m = []
for r in range(5):
fold_m = []
for f in range(7):
fold_m.append(np.array(trials[0]['result']['results'][r]['folds']['gnd_truth'][f]))
target_m.append(fold_m)
# as numpy array
target_m = np.array(target_m)
# extracting model data
model_dict = {}
for m in range(40):
model_dict[m] = {}
probs_m = []
auc = []
for r in range(5):
fold_m = []
for f in range(7):
fold_m.append(np.array(trials[m]['result']['results'][r]['folds']['probs'][f]))
probs_m.append(fold_m)
model_dict[m]['pm'] = probs_m
model_dict[m]['auc'] = trials[m]['result']['auc_avg']
# -
def eval_7_5_fold(target_m, models, weights=None):
probs = []
for r in range(5):
folds = []
for f in range (7):
folds.append([0]*len(target_m[r][f]))
probs.append(folds)
for i, model in enumerate(models):
for r in range(5):
for f in range (7):
if weights == None:
probs[r][f] = probs[r][f] + model['pm'][r][f]*1/len(models)
else:
probs[r][f] = probs[r][f] + model['pm'][r][f]*weights[i]/sum(weights)
aucs = []
for r in range(5):
folds = []
for f in range(7):
folds.append(metrics.roc_auc_score(target_m[r][f], probs[r][f]))
best = folds.index(max(folds))
worst = folds.index(min(folds))
folds = [v for i, v in enumerate(folds) if i not in [best, worst]]
aucs.append(folds)
return(np.mean(aucs))
# ## Simple average: greedy
#
# Joining results of best models in a greedy manner.
in_models = []
model_inds = []
max_score = 0
for m in range(40):
score = eval_7_5_fold(target_m, in_models + [model_dict[m]])
if score > max_score:
max_score = score
in_models.append(model_dict[m])
model_inds.append(m)
print 'AUC {0:.6f} for combination: {1}'.format(score, model_inds)
# ## Feature importances
#
# Checking which ones are more important.
# # Predictions
#
# Predicting on test data.
# +
ens_preds = []
for i in [0, 1, 2, 4, 5, 10]:
# getting space
trials = load_obj('trials/2nd-round')
space = get_best(trials, i)['result']['parameters']
# reading csv & casting into hashable type
train_real = pd.read_csv(space['data']['real'])
y_train = pd.read_csv(space['data']['ground-truth'])['TARGET']
# same for test
test_real = pd.read_csv(space['data']['real'].replace('train', 'test'))
# feature expansion
feat_exp = FeatureExpansion()
train_real = feat_exp.transform(train_real, space['feat_exp']['op_log'])
train_real = csr_matrix(train_real)
test_real = feat_exp.transform(test_real, space['feat_exp']['op_log'])
test_real = csr_matrix(test_real)
# categorical data
try:
# train
train_cat = load_obj(space['data']['cat'])
train = hstack([train_real, train_cat]).tocsr()
# test
test_cat = load_obj(space['data']['cat'].replace('train', 'test'))
test = hstack([test_real, test_cat]).tocsr()
except:
train = train_real.tocsr()
test = test_real.tocsr()
# casting values to int
space['params']['n_estimators'] = int(space['params']['n_estimators'])
space['preproc']['sel_perc']['percentile'] = int(space['preproc']['sel_perc']['percentile'])
# model loaded in search space
algo = space['model'](**space['params'])
y_transform = TargetTransform(space['y_transf'])
model = sklearn_wrapper(algo, space['preproc'], y_transform)
# fitting and predicting
model.fit(train, y_train, fit_params={'eval_metric': 'auc'})
probs = model.predict_proba(test)
ens_preds.append(probs[:,1])
sys.stdout.write('{}, '.format(i))
# ids
ID_col = pd.read_csv('data/raw/test.csv')['ID']
# averaging predictions
ens_preds = np.mean(ens_preds, axis=0)
#weights = np.array([0.19339375, 0.92619747, 0.01661305, 0.39647386, 0.49869799, 0.31282152])
#new_preds = np.transpose(np.array(ens_preds)) * np.transpose(weights)
#new_preds = np.sum(new_preds, axis=1)/sum(weights)
# making a submission
sub = pd.DataFrame({'ID': np.array(ID_col).astype(int), 'TARGET': ens_preds})
sub.to_csv('submissions/sub11.csv', index=False)
# -
# ## Submission log
#
# * **sub7:** single xgb, 1st round best - CV: 0.842 | LB: 0.840010.
# * **sub8:** ensemble of 20 best models of 1st round - CV: ? | LB: 0.841072.
# * **sub9:** best model of 2nd round - CV: 0.842240 | LB: 0.840917
# * **sub10:** greedy optimization ensemble of 2nd round models (output average) - CV: 0.842503 | LB: 0.841378
# * **sub11:** optimization of weights of ensemble of 2nd round models (output weighted average) - CV: 0.842525 | LB:
# ## Rules of thumb
#
# Some rules of thumb can be added in the submission, such as everyone under age 23 is happy. Let us see what happens.
preds = pd.read_csv('submissions/sub10.csv')['TARGET']
tc = pd.read_csv('data/no-duplicates/test.csv')
# +
nv = tc['num_var33'] + tc['saldo_medio_var33_ult3'] + tc['saldo_medio_var44_hace2'] + tc['saldo_medio_var44_hace3'] + tc['saldo_medio_var33_ult1'] + tc['saldo_medio_var44_ult1']
preds[nv > 0][1] = 0
preds[tc['var15'] < 23] = 0
preds[tc['saldo_medio_var5_hace2'] > 160000] = 0
preds[tc['saldo_var33'] > 0] = 0
preds[tc['var38'] > 3988596] = 0
preds[tc['var21'] > 7500] = 0
preds[tc['num_var30'] > 9] = 0
preds[tc['num_var13_0'] > 6] = 0
preds[tc['num_var33_0'] > 0] = 0
preds[tc['imp_ent_var16_ult1'] > 51003] = 0
preds[tc['imp_op_var39_comer_ult3'] > 13184] = 0
preds[tc['saldo_medio_var5_ult3'] > 108251] = 0
ID_col = tc['ID']
# making a submission
sub = pd.DataFrame({'ID': np.array(ID_col).astype(int), 'TARGET': preds})
sub.to_csv('submissions/sub12.csv', index=False)
# -
| notebooks/model-selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ur8xi4C7S06n"
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="zFWqF3Y4Gilg"
# # Guide to Building End-to-End Reinforcement Learning Application Pipelines using Vertex AI
# + [markdown] id="JAPoU8Sm5E6e"
# <table align="left">
#
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/mlops_pipeline_tf_agents_bandits_movie_recommendation/mlops_pipeline_tf_agents_bandits_movie_recommendation.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/mlops_pipeline_tf_agents_bandits_movie_recommendation/mlops_pipeline_tf_agents_bandits_movie_recommendation.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# </table>
# + [markdown] id="tvgnzT1CKxrO"
# ## Overview
#
# This demo showcases the use of [TF-Agents](https://www.tensorflow.org/agents), [Kubeflow Pipelines (KFP)](https://www.kubeflow.org/docs/components/pipelines/overview/pipelines-overview/) and [Vertex AI](https://cloud.google.com/vertex-ai), particularly [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines), in building an end-to-end reinforcement learning (RL) pipeline of a movie recommendation system. The demo is intended for developers who want to create RL applications using TensorFlow, TF-Agents and Vertex AI services, and those who want to build end-to-end production pipelines using KFP and Vertex Pipelines. It is recommended for developers to have familiarity with RL and the contextual bandits formulation, and the TF-Agents interface.
#
# ### Dataset
#
# This demo uses the [MovieLens 100K](https://www.kaggle.com/prajitdatta/movielens-100k-dataset) dataset to simulate an environment with users and their respective preferences. It is available at `gs://cloud-samples-data/vertex-ai/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/u.data`.
#
# ### Objective
#
# In this notebook, you will learn how to build an end-to-end RL pipeline for a TF-Agents (particularly the bandits module) based movie recommendation system, using [KFP](https://www.kubeflow.org/docs/components/pipelines/overview/pipelines-overview/), [Vertex AI](https://cloud.google.com/vertex-ai) and particularly [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines) which is fully managed and highly scalable.
#
# This Vertex Pipeline includes the following components:
# 1. *Generator* to generate MovieLens simulation data
# 2. *Ingester* to ingest data
# 3. *Trainer* to train the RL policy
# 4. *Deployer* to deploy the trained policy to a Vertex AI endpoint
#
# After pipeline construction, you (1) create the *Simulator* (which utilizes Cloud Functions, Cloud Scheduler and Pub/Sub) to send simulated MovieLens prediction requests, (2) create the *Logger* to asynchronously log prediction inputs and results (which utilizes Cloud Functions, Pub/Sub and a hook in the prediction code), and (3) create the *Trigger* to trigger recurrent re-training.
#
# A more general ML pipeline is demonstrated in [MLOps on Vertex AI](https://github.com/ksalama/ucaip-labs).
#
# ### Costs
#
# This tutorial uses billable components of Google Cloud:
#
# * Vertex AI
# * BigQuery
# * Cloud Build
# * Cloud Functions
# * Cloud Scheduler
# * Cloud Storage
# * Pub/Sub
#
# Learn about [Vertex AI
# pricing](https://cloud.google.com/vertex-ai/pricing), [BigQuery pricing](https://cloud.google.com/bigquery/pricing), [Cloud Build](https://cloud.google.com/build/pricing), [Cloud Functions](https://cloud.google.com/functions/pricing), [Cloud Scheduler](https://cloud.google.com/scheduler/pricing), [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and [Pub/Sub pricing](https://cloud.google.com/pubsub/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
# + [markdown] id="ze4-nDLfK4pw"
# ### Set up your local development environment
#
# **If you are using Colab or Google Cloud Notebooks**, your environment already meets
# all the requirements to run this notebook. You can skip this step.
# + [markdown] id="gCuSR8GkAgzl"
# **Otherwise**, make sure your environment meets this notebook's requirements.
# You need the following:
#
# * The Google Cloud SDK
# * Git
# * Python 3
# * virtualenv
# * Jupyter notebook running in a virtual environment with Python 3
#
# The Google Cloud guide to [Setting up a Python development
# environment](https://cloud.google.com/python/setup) and the [Jupyter
# installation guide](https://jupyter.org/install) provide detailed instructions
# for meeting these requirements. The following steps provide a condensed set of
# instructions:
#
# 1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)
#
# 1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)
#
# 1. [Install
# virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)
# and create a virtual environment that uses Python 3. Activate the virtual environment.
#
# 1. To install Jupyter, run `pip3 install jupyter` on the
# command-line in a terminal shell.
#
# 1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.
#
# 1. Open this notebook in the Jupyter Notebook Dashboard.
# + [markdown] id="i7EUnXsZhAGF"
# ### Install additional packages
#
# Install additional package dependencies not installed in your notebook environment, such as the Kubeflow Pipelines (KFP) SDK.
# + id="2b4ef9b72d43"
import os
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# Google Cloud Notebook requires dependencies to be installed with '--user'
USER_FLAG = ""
if IS_GOOGLE_CLOUD_NOTEBOOK:
USER_FLAG = "--user"
# + id="wyy5Lbnzg5fi"
# ! pip3 install {USER_FLAG} google-cloud-aiplatform
# ! pip3 install {USER_FLAG} google-cloud-pipeline-components
# ! pip3 install {USER_FLAG} --upgrade kfp
# ! pip3 install {USER_FLAG} numpy
# ! pip3 install {USER_FLAG} --upgrade tensorflow
# ! pip3 install {USER_FLAG} --upgrade pillow
# ! pip3 install {USER_FLAG} --upgrade tf-agents
# ! pip3 install {USER_FLAG} --upgrade fastapi
# + [markdown] id="hhq5zEbGg0XX"
# ### Restart the kernel
#
# After you install the additional packages, you need to restart the notebook kernel so it can find the packages.
# + id="EzrelQZ22IZj"
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="lWEdiXsJg0XY"
# ## Before you begin
#
# ### Select a GPU runtime
#
# **Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select "Runtime --> Change runtime type > GPU"**
# + [markdown] id="BF1j6f9HApxa"
# ### Set up your Google Cloud project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).
#
# 1. [Enable the Vertex AI API, BigQuery API, Cloud Build, Cloud Functions, Cloud Scheduler, Cloud Storage, and Pub/Sub API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,bigquery.googleapis.com,build.googleapis.com,functions.googleapis.com,scheduler.googleapis.com,storage.googleapis.com,pubsub.googleapis.com).
#
# 1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).
#
# 1. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
# + [markdown] id="WReHDGG5g0XY"
# #### Set your project ID
#
# **If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
# + id="oM1iC_MfAts1"
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
# shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
# + [markdown] id="qJYoRfYng0XZ"
# Otherwise, set your project ID here.
# + id="riG_qUokg0XZ"
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# + [markdown] id="06571eb4063b"
# #### Timestamp
#
# If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
# + id="697568e92bd6"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# + [markdown] id="dr--iN2kAylZ"
# ### Authenticate your Google Cloud account
#
# **If you are using Google Cloud Notebooks**, your environment is already
# authenticated. Skip this step.
# + [markdown] id="sBCra4QMA2wR"
# **If you are using Colab**, run the cell below and follow the instructions
# when prompted to authenticate your account via oAuth.
#
# **Otherwise**, follow these steps:
#
# 1. In the Cloud Console, go to the [**Create service account key**
# page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
#
# 2. Click **Create service account**.
#
# 3. In the **Service account name** field, enter a name, and
# click **Create**.
#
# 4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "Vertex AI"
# into the filter box, and select
# **Vertex AI Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
#
# 5. Click *Create*. A JSON file that contains your key downloads to your
# local environment.
#
# 6. Enter the path to your service account key as the
# `GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
# + id="PyQmSRbKA8r-"
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# The Google Cloud Notebook product has specific requirements
IS_GOOGLE_CLOUD_NOTEBOOK = os.path.exists("/opt/deeplearning/metadata/env_version")
# If on Google Cloud Notebooks, then don't execute this code
if not IS_GOOGLE_CLOUD_NOTEBOOK:
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# + [markdown] id="zgPO1eR3CYjk"
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
# In this tutorial, a Cloud Storage bucket holds the MovieLens dataset files to be used for model training. Vertex AI also saves the trained model that results from your training job in the same bucket. Using this model artifact, you can then create Vertex AI model and endpoint resources in order to serve online predictions.
#
# Set the name of your Cloud Storage bucket below. It must be unique across all
# Cloud Storage buckets.
#
# You may also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Make sure to [choose a region where Vertex AI services are
# available](https://cloud.google.com/vertex-ai/docs/general/locations#available_regions). You may
# not use a Multi-Regional Storage bucket for training with Vertex AI. Also note that Vertex
# Pipelines is currently only supported in select regions such as "us-central1" ([reference](https://cloud.google.com/vertex-ai/docs/general/locations)).
# + id="MzGDU7TWdts_"
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
# + id="cf221059d072"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
# + [markdown] id="-EcIXiGsCePi"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
# + id="NIq7R4HZCfIc"
# ! gsutil mb -l $REGION $BUCKET_NAME
# + [markdown] id="ucvCsknMCims"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + id="vhOb7YnwClBb"
# ! gsutil ls -al $BUCKET_NAME
# + [markdown] id="XoEqT2Y4DJmf"
# ### Import libraries and define constants
# + id="pRUOFELefqf1"
import os
import sys
from google.cloud import aiplatform
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import compiler, dsl
from kfp.v2.google.client import AIPlatformClient
# + [markdown] id="eIMBwJpVVaU3"
# #### Fill out the following configurations
# + id="PhWoA2uCVaU3"
# BigQuery parameters (used for the Generator, Ingester, Logger)
BIGQUERY_DATASET_ID = f"{PROJECT_ID}.movielens_dataset" # @param {type:"string"} BigQuery dataset ID as `project_id.dataset_id`.
BIGQUERY_LOCATION = "us" # @param {type:"string"} BigQuery dataset region.
BIGQUERY_TABLE_ID = f"{BIGQUERY_DATASET_ID}.training_dataset" # @param {type:"string"} BigQuery table ID as `project_id.dataset_id.table_id`.
# + [markdown] id="fuN1uU27VaU3"
# #### Set additional configurations
#
# You may use the default values below as is.
# + id="895ac243c125"
# Dataset parameters
RAW_DATA_PATH = "gs://[your-bucket-name]/raw_data/u.data" # @param {type:"string"}
# + id="62bfb9a820f6"
# Download the sample data into your RAW_DATA_PATH
# ! gsutil cp "gs://cloud-samples-data/vertex-ai/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/u.data" $RAW_DATA_PATH
# + id="H3530hdGGilo"
# Pipeline parameters
PIPELINE_NAME = "movielens-pipeline" # Pipeline display name.
ENABLE_CACHING = False # Whether to enable execution caching for the pipeline.
PIPELINE_ROOT = f"{BUCKET_NAME}/pipeline" # Root directory for pipeline artifacts.
PIPELINE_SPEC_PATH = "metadata_pipeline.json" # Path to pipeline specification file.
OUTPUT_COMPONENT_SPEC = "output-component.yaml" # Output component specification file.
# BigQuery parameters (used for the Generator, Ingester, Logger)
BIGQUERY_TMP_FILE = (
"tmp.json" # Temporary file for storing data to be loaded into BigQuery.
)
BIGQUERY_MAX_ROWS = 5 # Maximum number of rows of data in BigQuery to ingest.
# Dataset parameters
TFRECORD_FILE = (
f"{BUCKET_NAME}/trainer_input_path/*" # TFRecord file to be used for training.
)
# Logger parameters (also used for the Logger hook in the prediction container)
LOGGER_PUBSUB_TOPIC = "logger-pubsub-topic" # Pub/Sub topic name for the Logger.
LOGGER_CLOUD_FUNCTION = "logger-cloud-function" # Cloud Functions name for the Logger.
# + [markdown] id="E6ppE7imft-y"
# ## Create the RL pipeline components
#
# This section consists of the following steps:
# 1. Create the *Generator* to generate MovieLens simulation data
# 2. Create the *Ingester* to ingest data
# 3. Create the *Trainer* to train the RL policy
# 4. Create the *Deployer* to deploy the trained policy to a Vertex AI endpoint
#
# After pipeline construction, create the *Simulator* to send simulated MovieLens prediction requests, create the *Logger* to asynchronously log prediction inputs and results, and create the *Trigger* to trigger re-training.
#
# Here's the entire workflow:
# 1. The startup pipeline has the following components: Generator --> Ingester --> Trainer --> Deployer. This pipeline only runs once.
# 2. Then, the Simulator generates prediction requests (e.g. every 5 mins), and the Logger gets invoked immediately at each prediction request and logs each prediction request asynchronously into BigQuery. The Trigger runs the re-training pipeline (e.g. every 30 mins) with the following components: Ingester --> Trainer --> Deploy.
#
# You can find the KFP SDK documentation [here](https://www.kubeflow.org/docs/components/pipelines/sdk/sdk-overview/).
# + [markdown] id="dxTLuuWEGilo"
# ### Create the *Generator* to generate MovieLens simulation data
#
# Create the Generator component to generate the initial set of training data using a MovieLens simulation environment and a random data-collecting policy. Store the generated data in BigQuery.
#
# The Generator source code is [`src/generator/generator_component.py`](src/generator/generator_component.py).
# + [markdown] id="ay1ztxwIGilo"
# #### Run unit tests on the Generator component
#
# Before running the command, you should update the `RAW_DATA_PATH` in [`src/generator/test_generator_component.py`](src/generator/test_generator_component.py).
# + id="R9FQacKtGilo"
# ! python3 -m unittest src.generator.test_generator_component
# + [markdown] id="1gpYFPPBOWQP"
# ### Create the *Ingester* to ingest data
#
# Create the Ingester component to ingest data from BigQuery, package them as `tf.train.Example` objects, and output TFRecord files.
#
# Read more about `tf.train.Example` and TFRecord [here](https://www.tensorflow.org/tutorials/load_data/tfrecord).
#
# The Ingester component source code is in [`src/ingester/ingester_component.py`](src/ingester/ingester_component.py).
# + [markdown] id="ZQkLU7wyOWQP"
# #### Run unit tests on the Ingester component
# + id="Ej4rNnnEOWQP"
# ! python3 -m unittest src.ingester.test_ingester_component
# + [markdown] id="KFdSpGAWWkL9"
# ### Create the *Trainer* to train the RL policy
#
# Create the Trainer component to train a RL policy on the training dataset, and then submit a remote custom training job to Vertex AI. This component trains a policy using the TF-Agents LinUCB agent on the MovieLens simulation dataset, and saves the trained policy as a SavedModel.
#
# The Trainer component source code is in [`src/trainer/trainer_component.py`](src/trainer/trainer_component.py). You use additional Vertex AI platform code in pipeline construction to submit the training code defined in Trainer as a custom training job to Vertex AI. (The additional code is similar to what [`kfp.v2.google.experimental.run_as_aiplatform_custom_job`](https://github.com/kubeflow/pipelines/blob/master/sdk/python/kfp/v2/google/experimental/custom_job.py) does. You can find an example notebook [here](https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/official/pipelines/google_cloud_pipeline_components_model_train_upload_deploy.ipynb) for how to use that first-party Trainer component.)
#
# The Trainer performs off-policy training, where you train a policy on a static set of pre-collected data records containing information including observation, action and reward. For a data record, the policy in training might not output the same action given the observation in that data record.
#
# If you're interested in pipeline metrics, read about [KFP Pipeline Metrics](https://www.kubeflow.org/docs/components/pipelines/sdk/pipelines-metrics/) here.
# + id="fh5S-bcCcHn3"
# Trainer parameters
TRAINING_ARTIFACTS_DIR = (
f"{BUCKET_NAME}/artifacts" # Root directory for training artifacts.
)
TRAINING_REPLICA_COUNT = 1 # Number of replica to run the custom training job.
TRAINING_MACHINE_TYPE = (
"n1-standard-4" # Type of machine to run the custom training job.
)
TRAINING_ACCELERATOR_TYPE = "ACCELERATOR_TYPE_UNSPECIFIED" # Type of accelerators to run the custom training job.
TRAINING_ACCELERATOR_COUNT = 0 # Number of accelerators for the custom training job.
# + [markdown] id="MH3UOVU8WkL9"
# #### Run unit tests on the Trainer component
# + id="CEJ7_ymvWkL9"
# ! python3 -m unittest src.trainer.test_trainer_component
# + [markdown] id="5cz7h6V4ibYb"
# ### Create the *Deployer* to deploy the trained policy to a Vertex AI endpoint
#
# Use [`google_cloud_pipeline_components.aiplatform`](https://cloud.google.com/vertex-ai/docs/pipelines/build-pipeline#google-cloud-components) components during pipeline construction to:
# 1. Upload the trained policy
# 2. Create a Vertex AI endpoint
# 3. Deploy the uploaded trained policy to the endpoint
#
# These 3 components formulate the Deployer. They support flexible configurations; for instance, if you want to set up traffic splitting for the endpoint to run A/B testing, you may pass in your configurations to [google_cloud_pipeline_components.aiplatform.ModelDeployOp](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.1.3/google_cloud_pipeline_components.aiplatform.html#google_cloud_pipeline_components.aiplatform.ModelDeployOp).
# + id="E7dbkbDMcR-m"
# Deployer parameters
TRAINED_POLICY_DISPLAY_NAME = (
"movielens-trained-policy" # Display name of the uploaded and deployed policy.
)
TRAFFIC_SPLIT = {"0": 100}
ENDPOINT_DISPLAY_NAME = "movielens-endpoint" # Display name of the prediction endpoint.
ENDPOINT_MACHINE_TYPE = "n1-standard-4" # Type of machine of the prediction endpoint.
ENDPOINT_REPLICA_COUNT = 1 # Number of replicas of the prediction endpoint.
ENDPOINT_ACCELERATOR_TYPE = "ACCELERATOR_TYPE_UNSPECIFIED" # Type of accelerators to run the custom training job.
ENDPOINT_ACCELERATOR_COUNT = 0 # Number of accelerators for the custom training job.
# + [markdown] id="Ldr0yDs6ibYb"
# ### Create a custom prediction container using Cloud Build
#
# Before setting up the Deployer, define and build a custom prediction container that serves predictions using the trained policy. The source code, Cloud Build YAML configuration file and Dockerfile are in `src/prediction_container`.
#
# This prediction container is the serving container for the deployed, trained policy. See a more detailed guide on building prediction custom containers [here](https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/step_by_step_sdk_tf_agents_bandits_movie_recommendation/step_by_step_sdk_tf_agents_bandits_movie_recommendation.ipynb).
# + id="cKZd3Q_Pcfzo"
# Prediction container parameters
PREDICTION_CONTAINER = "prediction-container" # Name of the container image.
PREDICTION_CONTAINER_DIR = "src/prediction_container"
# + [markdown] id="jkReLZUAibYb"
# #### Create a Cloud Build YAML file using Kaniko build
#
# Note: For this application, you are recommended to use E2_HIGHCPU_8 or other high resouce machine configurations instead of the standard machine type listed [here](https://cloud.google.com/build/docs/api/reference/rest/v1/projects.builds#Build.MachineType) to prevent out-of-memory errors.
# + id="RUvEWUucibYc"
cloudbuild_yaml = """steps:
- name: "gcr.io/kaniko-project/executor:latest"
args: ["--destination=gcr.io/{PROJECT_ID}/{PREDICTION_CONTAINER}:latest",
"--cache=true",
"--cache-ttl=99h"]
env: ["AIP_STORAGE_URI={ARTIFACTS_DIR}",
"PROJECT_ID={PROJECT_ID}",
"LOGGER_PUBSUB_TOPIC={LOGGER_PUBSUB_TOPIC}"]
options:
machineType: "E2_HIGHCPU_8"
""".format(
PROJECT_ID=PROJECT_ID,
PREDICTION_CONTAINER=PREDICTION_CONTAINER,
ARTIFACTS_DIR=TRAINING_ARTIFACTS_DIR,
LOGGER_PUBSUB_TOPIC=LOGGER_PUBSUB_TOPIC,
)
with open(f"{PREDICTION_CONTAINER_DIR}/cloudbuild.yaml", "w") as fp:
fp.write(cloudbuild_yaml)
# + [markdown] id="kByjVm5yibYc"
# #### Run unit tests on the prediction code
# + id="tzLU1V6fibYc"
# ! python3 -m unittest src.prediction_container.test_main
# + [markdown] id="RelRBSFvibYc"
# #### Build custom prediction container
# + id="9uHbODeXibYd"
# ! gcloud builds submit --config $PREDICTION_CONTAINER_DIR/cloudbuild.yaml $PREDICTION_CONTAINER_DIR
# + [markdown] id="nW154IyqGilq"
# ## Author and run the RL pipeline
#
# You author the pipeline using custom KFP components built from the previous section, and [create a pipeline run](https://cloud.google.com/vertex-ai/docs/pipelines/run-pipeline#kubeflow-pipelines-sdk) using Vertex Pipelines. You can read more about whether to enable execution caching [here](https://cloud.google.com/vertex-ai/docs/pipelines/build-pipeline#caching). You can also specifically configure the worker pool spec for training if for instance you want to train at scale and/or at a higher speed; you can adjust the replica count, machine type, accelerator type and count, and many other specifications.
#
# Here, you build a "startup" pipeline that generates randomly sampled training data (with the Generator) as the first step. This pipeline runs only once.
# + id="cvXJzhSSGilq"
from google_cloud_pipeline_components.experimental.custom_job import utils
from kfp.components import load_component_from_url
generate_op = load_component_from_url(
"https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-samples/62a2a7611499490b4b04d731d48a7ba87c2d636f/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/mlops_pipeline_tf_agents_bandits_movie_recommendation/src/generator/component.yaml"
)
ingest_op = load_component_from_url(
"https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-samples/62a2a7611499490b4b04d731d48a7ba87c2d636f/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/mlops_pipeline_tf_agents_bandits_movie_recommendation/src/ingester/component.yaml"
)
train_op = load_component_from_url(
"https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-samples/62a2a7611499490b4b04d731d48a7ba87c2d636f/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/mlops_pipeline_tf_agents_bandits_movie_recommendation/src/trainer/component.yaml"
)
@dsl.pipeline(pipeline_root=PIPELINE_ROOT, name=f"{PIPELINE_NAME}-startup")
def pipeline(
# Pipeline configs
project_id: str,
raw_data_path: str,
training_artifacts_dir: str,
# BigQuery configs
bigquery_dataset_id: str,
bigquery_location: str,
bigquery_table_id: str,
bigquery_max_rows: int = 10000,
# TF-Agents RL configs
batch_size: int = 8,
rank_k: int = 20,
num_actions: int = 20,
driver_steps: int = 3,
num_epochs: int = 5,
tikhonov_weight: float = 0.01,
agent_alpha: float = 10,
) -> None:
"""Authors a RL pipeline for MovieLens movie recommendation system.
Integrates the Generator, Ingester, Trainer and Deployer components. This
pipeline generates initial training data with a random policy and runs once
as the initiation of the system.
Args:
project_id: GCP project ID. This is required because otherwise the BigQuery
client will use the ID of the tenant GCP project created as a result of
KFP, which doesn't have proper access to BigQuery.
raw_data_path: Path to MovieLens 100K's "u.data" file.
training_artifacts_dir: Path to store the Trainer artifacts (trained policy).
bigquery_dataset: A string of the BigQuery dataset ID in the format of
"project.dataset".
bigquery_location: A string of the BigQuery dataset location.
bigquery_table_id: A string of the BigQuery table ID in the format of
"project.dataset.table".
bigquery_max_rows: Optional; maximum number of rows to ingest.
batch_size: Optional; batch size of environment generated quantities eg.
rewards.
rank_k: Optional; rank for matrix factorization in the MovieLens environment;
also the observation dimension.
num_actions: Optional; number of actions (movie items) to choose from.
driver_steps: Optional; number of steps to run per batch.
num_epochs: Optional; number of training epochs.
tikhonov_weight: Optional; LinUCB Tikhonov regularization weight of the
Trainer.
agent_alpha: Optional; LinUCB exploration parameter that multiplies the
confidence intervals of the Trainer.
"""
# Run the Generator component.
generate_task = generate_op(
project_id=project_id,
raw_data_path=raw_data_path,
batch_size=batch_size,
rank_k=rank_k,
num_actions=num_actions,
driver_steps=driver_steps,
bigquery_tmp_file=BIGQUERY_TMP_FILE,
bigquery_dataset_id=bigquery_dataset_id,
bigquery_location=bigquery_location,
bigquery_table_id=bigquery_table_id,
)
# Run the Ingester component.
ingest_task = ingest_op(
project_id=project_id,
bigquery_table_id=generate_task.outputs["bigquery_table_id"],
bigquery_max_rows=bigquery_max_rows,
tfrecord_file=TFRECORD_FILE,
)
# Run the Trainer component and submit custom job to Vertex AI.
# Convert the train_op component into a Vertex AI Custom Job pre-built component
custom_job_training_op = utils.create_custom_training_job_op_from_component(
component_spec=train_op,
replica_count=TRAINING_REPLICA_COUNT,
machine_type=TRAINING_MACHINE_TYPE,
accelerator_type=TRAINING_ACCELERATOR_TYPE,
accelerator_count=TRAINING_ACCELERATOR_COUNT,
)
train_task = custom_job_training_op(
training_artifacts_dir=training_artifacts_dir,
tfrecord_file=ingest_task.outputs["tfrecord_file"],
num_epochs=num_epochs,
rank_k=rank_k,
num_actions=num_actions,
tikhonov_weight=tikhonov_weight,
agent_alpha=agent_alpha,
project=PROJECT_ID,
location=REGION,
)
# Run the Deployer components.
# Upload the trained policy as a model.
model_upload_op = gcc_aip.ModelUploadOp(
project=project_id,
display_name=TRAINED_POLICY_DISPLAY_NAME,
artifact_uri=train_task.outputs["training_artifacts_dir"],
serving_container_image_uri=f"gcr.io/{PROJECT_ID}/{PREDICTION_CONTAINER}:latest",
)
# Create a Vertex AI endpoint. (This operation can occur in parallel with
# the Generator, Ingester, Trainer components.)
endpoint_create_op = gcc_aip.EndpointCreateOp(
project=project_id, display_name=ENDPOINT_DISPLAY_NAME
)
# Deploy the uploaded, trained policy to the created endpoint. (This operation
# has to occur after both model uploading and endpoint creation complete.)
gcc_aip.ModelDeployOp(
endpoint=endpoint_create_op.outputs["endpoint"],
model=model_upload_op.outputs["model"],
deployed_model_display_name=TRAINED_POLICY_DISPLAY_NAME,
traffic_split=TRAFFIC_SPLIT,
dedicated_resources_machine_type=ENDPOINT_MACHINE_TYPE,
dedicated_resources_accelerator_type=ENDPOINT_ACCELERATOR_TYPE,
dedicated_resources_accelerator_count=ENDPOINT_ACCELERATOR_COUNT,
dedicated_resources_min_replica_count=ENDPOINT_REPLICA_COUNT,
)
# + id="icYK0WoRGilr"
# Compile the authored pipeline.
compiler.Compiler().compile(pipeline_func=pipeline, package_path=PIPELINE_SPEC_PATH)
# Create a pipeline run job.
job = aiplatform.PipelineJob(
display_name=f"{PIPELINE_NAME}-startup",
template_path=PIPELINE_SPEC_PATH,
pipeline_root=PIPELINE_ROOT,
parameter_values={
# Pipeline configs
"project_id": PROJECT_ID,
"raw_data_path": RAW_DATA_PATH,
"training_artifacts_dir": TRAINING_ARTIFACTS_DIR,
# BigQuery configs
"bigquery_dataset_id": BIGQUERY_DATASET_ID,
"bigquery_location": BIGQUERY_LOCATION,
"bigquery_table_id": BIGQUERY_TABLE_ID,
},
enable_caching=ENABLE_CACHING,
)
job.run()
# + [markdown] id="_YDDhAx5i1UL"
# ## Create the *Simulator* to send simulated MovieLens prediction requests
#
# Create the Simulator to [obtain observations](https://github.com/tensorflow/agents/blob/v0.8.0/tf_agents/bandits/environments/movielens_py_environment.py#L118-L125) from the MovieLens simulation environment, formats them, and sends prediction requests to the Vertex AI endpoint.
#
# The workflow is: Cloud Scheduler --> Pub/Sub --> Cloud Functions --> Endpoint
#
# In production, this Simulator logic can be modified to that of gathering real-world input features as observations, getting prediction results from the endpoint and communicating those results to real-world users.
#
# The Simulator source code is [`src/simulator/main.py`](src/simulator/main.py).
# + id="Sxz6T0yjcoX2"
# Simulator parameters
SIMULATOR_PUBSUB_TOPIC = (
"simulator-pubsub-topic" # Pub/Sub topic name for the Simulator.
)
SIMULATOR_CLOUD_FUNCTION = (
"simulator-cloud-function" # Cloud Functions name for the Simulator.
)
SIMULATOR_SCHEDULER_JOB = (
"simulator-scheduler-job" # Cloud Scheduler cron job name for the Simulator.
)
SIMULATOR_SCHEDULE = "*/5 * * * *" # Cloud Scheduler cron job schedule for the Simulator. Eg. "*/5 * * * *" means every 5 mins.
SIMULATOR_SCHEDULER_MESSAGE = (
"simulator-message" # Cloud Scheduler message for the Simulator.
)
# TF-Agents RL configs
BATCH_SIZE = 8
RANK_K = 20
NUM_ACTIONS = 20
# + [markdown] id="6JkcpQmpi1UL"
# ### Run unit tests on the Simulator
# + id="GoNH1VS_i1UL"
# ! python3 -m unittest src.simulator.test_main
# + [markdown] id="0g8bl_pCi1UL"
# ### Create a Pub/Sub topic
#
# - Read more about creating Pub/Sub topics [here](https://cloud.google.com/functions/docs/tutorials/pubsub)
# + id="5apj2cEri1UL"
# ! gcloud pubsub topics create $SIMULATOR_PUBSUB_TOPIC
# + [markdown] id="jA8gMvqXi1UM"
# ### Set up a recurrent Cloud Scheduler job for the Pub/Sub topic
#
# - Read more about possible ways to create cron jobs [here](https://cloud.google.com/scheduler/docs/creating#gcloud).
# - Read about the cron job schedule format [here](https://man7.org/linux/man-pages/man5/crontab.5.html).
# + id="snTyjFkDi1UM"
scheduler_job_args = " ".join(
[
SIMULATOR_SCHEDULER_JOB,
f"--schedule='{SIMULATOR_SCHEDULE}'",
f"--topic={SIMULATOR_PUBSUB_TOPIC}",
f"--message-body={SIMULATOR_SCHEDULER_MESSAGE}",
]
)
# ! echo $scheduler_job_args
# + id="4pY_Gs_Di1UM"
# ! gcloud scheduler jobs create pubsub $scheduler_job_args
# + [markdown] id="HjyV2Arei1UM"
# ### Define the *Simulator* logic in a Cloud Function to be triggered periodically, and deploy this Function
#
# - Specify dependencies of the Function in [`src/simulator/requirements.txt`](src/simulator/requirements.txt).
# - Read more about the available configurable arguments for deploying a Function [here](https://cloud.google.com/sdk/gcloud/reference/functions/deploy). For instance, based on the complexity of your Function, you may want to adjust its memory and timeout.
# - Note that the environment variables in `ENV_VARS` should be comma-separated; there should not be additional spaces, or other characters in between. Read more about setting/updating/deleting environment variables [here](https://cloud.google.com/functions/docs/env-var).
# - Read more about sending predictions to Vertex endpoints [here](https://cloud.google.com/vertex-ai/docs/predictions/online-predictions-custom-models).
# + id="0LJ2C_pdibYg"
endpoints = ! gcloud ai endpoints list \
--region=$REGION \
--filter=display_name=$ENDPOINT_DISPLAY_NAME
print("\n".join(endpoints), "\n")
ENDPOINT_ID = endpoints[2].split(" ")[0]
print(f"ENDPOINT_ID={ENDPOINT_ID}")
# + id="V4xwXBBgi1UM"
ENV_VARS = ",".join(
[
f"PROJECT_ID={PROJECT_ID}",
f"REGION={REGION}",
f"ENDPOINT_ID={ENDPOINT_ID}",
f"RAW_DATA_PATH={RAW_DATA_PATH}",
f"BATCH_SIZE={BATCH_SIZE}",
f"RANK_K={RANK_K}",
f"NUM_ACTIONS={NUM_ACTIONS}",
]
)
# ! echo $ENV_VARS
# + id="0yiHxUMBi1UM"
# ! gcloud functions deploy $SIMULATOR_CLOUD_FUNCTION \
# --region=$REGION \
# --trigger-topic=$SIMULATOR_PUBSUB_TOPIC \
# --runtime=python37 \
# --memory=512MB \
# --timeout=200s \
# --source=src/simulator \
# --entry-point=simulate \
# --stage-bucket=$BUCKET_NAME \
# --update-env-vars=$ENV_VARS
# + [markdown] id="213JWEcLxAhN"
# ## Create the *Logger* to asynchronously log prediction inputs and results
#
# Create the Logger to get environment feedback as rewards from the MovieLens simulation environment based on prediction observations and predicted actions, formulate trajectory data, and store said data back to BigQuery. The Logger closes the RL feedback loop from prediction to training data, and allows re-training of the policy on new training data.
#
# The Logger is triggered by a hook in the prediction code. At each prediction request, the prediction code messages a Pub/Sub topic, which triggers the Logger code.
#
# The workflow is: prediction container code (at prediction request) --> Pub/Sub --> Cloud Functions (logging predictions back to BigQuery)
#
# In production, this Logger logic can be modified to that of gathering real-world feedback (rewards) based on observations and predicted actions.
#
# The Logger source code is [`src/logger/main.py`](src/logger/main.py).
# + [markdown] id="gIuPCKRjxAhN"
# ### Run unit tests on the Logger
# + id="-eVdF88gxAhN"
# ! python3 -m unittest src.logger.test_main
# + [markdown] id="hs56EW17xAhO"
# ### Create a Pub/Sub topic
#
# - Read more about creating Pub/Sub topics [here](https://cloud.google.com/functions/docs/tutorials/pubsub)
# + id="ydoCTizJxAhO"
# ! gcloud pubsub topics create $LOGGER_PUBSUB_TOPIC
# + [markdown] id="FnlsMxfjxAhO"
# ### Define the *Logger* logic in a Cloud Function to be triggered by a Pub/Sub topic, which is triggered by the prediction code at each prediction request.
#
# - Specify dependencies of the Function in [`src/logger/requirements.txt`](src/logger/requirements.txt).
# - Read more about the available configurable arguments for deploying a Function [here](https://cloud.google.com/sdk/gcloud/reference/functions/deploy). For instance, based on the complexity of your Function, you may want to adjust its memory and timeout.
# - Note that the environment variables in `ENV_VARS` should be comma-separated; there should not be additional spaces, or other characters in between. Read more about setting/updating/deleting environment variables [here](https://cloud.google.com/functions/docs/env-var).
# + id="DwrukBPHxAhO"
ENV_VARS = ",".join(
[
f"PROJECT_ID={PROJECT_ID}",
f"RAW_DATA_PATH={RAW_DATA_PATH}",
f"BATCH_SIZE={BATCH_SIZE}",
f"RANK_K={RANK_K}",
f"NUM_ACTIONS={NUM_ACTIONS}",
f"BIGQUERY_TMP_FILE={BIGQUERY_TMP_FILE}",
f"BIGQUERY_DATASET_ID={BIGQUERY_DATASET_ID}",
f"BIGQUERY_LOCATION={BIGQUERY_LOCATION}",
f"BIGQUERY_TABLE_ID={BIGQUERY_TABLE_ID}",
]
)
# ! echo $ENV_VARS
# + id="OykKRkScxAhO"
# ! gcloud functions deploy $LOGGER_CLOUD_FUNCTION \
# --region=$REGION \
# --trigger-topic=$LOGGER_PUBSUB_TOPIC \
# --runtime=python37 \
# --memory=512MB \
# --timeout=200s \
# --source=src/logger \
# --entry-point=log \
# --stage-bucket=$BUCKET_NAME \
# --update-env-vars=$ENV_VARS
# + [markdown] id="n0YSz3xtJcci"
# ## Create the *Trigger* to trigger re-training
#
# Create the Trigger to recurrently re-run the pipeline to re-train the policy on new training data, using `kfp.v2.google.client.AIPlatformClient.create_schedule_from_job_spec`. You create a pipeline for orchestration on Vertex Pipelines, and a Cloud Scheduler job that recurrently triggers the pipeline. The method also automatically creates a Cloud Function that acts as an intermediary between the Scheduler and Pipelines. You can find the source code [here](https://github.com/kubeflow/pipelines/blob/v1.7.0-alpha.3/sdk/python/kfp/v2/google/client/client.py#L347-L391).
#
# When the Simulator sends prediction requests to the endpoint, the Logger is triggered by the hook in the prediction code to log prediction results to BigQuery, as new training data. As this pipeline has a recurrent schedule, it utlizes the new training data in training a new policy, therefore closing the feedback loop. Theoretically speaking, if you set the pipeline scheduler to be infinitely frequent, then you would be approaching real-time, continuous training.
# + id="-YmLQ-ykJcci"
TRIGGER_SCHEDULE = "*/30 * * * *" # Schedule to trigger the pipeline. Eg. "*/30 * * * *" means every 30 mins.
# + id="ay1x-rgIJcci"
ingest_op = load_component_from_url(
"https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-samples/62a2a7611499490b4b04d731d48a7ba87c2d636f/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/mlops_pipeline_tf_agents_bandits_movie_recommendation/src/ingester/component.yaml"
)
train_op = load_component_from_url(
"https://raw.githubusercontent.com/GoogleCloudPlatform/vertex-ai-samples/62a2a7611499490b4b04d731d48a7ba87c2d636f/community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/mlops_pipeline_tf_agents_bandits_movie_recommendation/src/trainer/component.yaml"
)
@dsl.pipeline(pipeline_root=PIPELINE_ROOT, name=f"{PIPELINE_NAME}-retraining")
def pipeline(
# Pipeline configs
project_id: str,
training_artifacts_dir: str,
# BigQuery configs
bigquery_table_id: str,
bigquery_max_rows: int = 10000,
# TF-Agents RL configs
rank_k: int = 20,
num_actions: int = 20,
num_epochs: int = 5,
tikhonov_weight: float = 0.01,
agent_alpha: float = 10,
) -> None:
"""Authors a re-training pipeline for MovieLens movie recommendation system.
Integrates the Ingester, Trainer and Deployer components.
Args:
project_id: GCP project ID. This is required because otherwise the BigQuery
client will use the ID of the tenant GCP project created as a result of
KFP, which doesn't have proper access to BigQuery.
training_artifacts_dir: Path to store the Trainer artifacts (trained policy).
bigquery_table_id: A string of the BigQuery table ID in the format of
"project.dataset.table".
bigquery_max_rows: Optional; maximum number of rows to ingest.
rank_k: Optional; rank for matrix factorization in the MovieLens environment;
also the observation dimension.
num_actions: Optional; number of actions (movie items) to choose from.
num_epochs: Optional; number of training epochs.
tikhonov_weight: Optional; LinUCB Tikhonov regularization weight of the
Trainer.
agent_alpha: Optional; LinUCB exploration parameter that multiplies the
confidence intervals of the Trainer.
"""
# Run the Ingester component.
ingest_task = ingest_op(
project_id=project_id,
bigquery_table_id=bigquery_table_id,
bigquery_max_rows=bigquery_max_rows,
tfrecord_file=TFRECORD_FILE,
)
# Run the Trainer component and submit custom job to Vertex AI.
# Convert the train_op component into a Vertex AI Custom Job pre-built component
custom_job_training_op = utils.create_custom_training_job_op_from_component(
component_spec=train_op,
replica_count=TRAINING_REPLICA_COUNT,
machine_type=TRAINING_MACHINE_TYPE,
accelerator_type=TRAINING_ACCELERATOR_TYPE,
accelerator_count=TRAINING_ACCELERATOR_COUNT,
)
train_task = custom_job_training_op(
training_artifacts_dir=training_artifacts_dir,
tfrecord_file=ingest_task.outputs["tfrecord_file"],
num_epochs=num_epochs,
rank_k=rank_k,
num_actions=num_actions,
tikhonov_weight=tikhonov_weight,
agent_alpha=agent_alpha,
project=PROJECT_ID,
location=REGION,
)
# Run the Deployer components.
# Upload the trained policy as a model.
model_upload_op = gcc_aip.ModelUploadOp(
project=project_id,
display_name=TRAINED_POLICY_DISPLAY_NAME,
artifact_uri=train_task.outputs["training_artifacts_dir"],
serving_container_image_uri=f"gcr.io/{PROJECT_ID}/{PREDICTION_CONTAINER}:latest",
)
# Create a Vertex AI endpoint. (This operation can occur in parallel with
# the Generator, Ingester, Trainer components.)
endpoint_create_op = gcc_aip.EndpointCreateOp(
project=project_id, display_name=ENDPOINT_DISPLAY_NAME
)
# Deploy the uploaded, trained policy to the created endpoint. (This operation
# has to occur after both model uploading and endpoint creation complete.)
gcc_aip.ModelDeployOp(
endpoint=endpoint_create_op.outputs["endpoint"],
model=model_upload_op.outputs["model"],
deployed_model_display_name=TRAINED_POLICY_DISPLAY_NAME,
dedicated_resources_machine_type=ENDPOINT_MACHINE_TYPE,
dedicated_resources_accelerator_type=ENDPOINT_ACCELERATOR_TYPE,
dedicated_resources_accelerator_count=ENDPOINT_ACCELERATOR_COUNT,
dedicated_resources_min_replica_count=ENDPOINT_REPLICA_COUNT,
)
# + id="9yPjcm75Jcci"
# Compile the authored pipeline.
compiler.Compiler().compile(pipeline_func=pipeline, package_path=PIPELINE_SPEC_PATH)
# Createa Vertex AI client.
api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION)
# Schedule a recurring pipeline.
response = api_client.create_schedule_from_job_spec(
job_spec_path=PIPELINE_SPEC_PATH,
schedule=TRIGGER_SCHEDULE,
parameter_values={
# Pipeline configs
"project_id": PROJECT_ID,
"training_artifacts_dir": TRAINING_ARTIFACTS_DIR,
# BigQuery config
"bigquery_table_id": BIGQUERY_TABLE_ID,
},
)
response["name"]
# + [markdown] id="TpV-iwP9qw9c"
# ## Cleaning up
#
# To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial (you also need to clean up other resources that are difficult to delete here, such as the all/partial of data in BigQuery, the recurring pipeline and its Scheduler job, the uploaded policy/model, etc.):
# + id="sx_vKniMq9ZX"
# Delete endpoint resource.
# ! gcloud ai endpoints delete $ENDPOINT_ID --quiet --region $REGION
# Delete Pub/Sub topics.
# ! gcloud pubsub topics delete $SIMULATOR_PUBSUB_TOPIC --quiet
# ! gcloud pubsub topics delete $LOGGER_PUBSUB_TOPIC --quiet
# Delete Cloud Functions.
# ! gcloud functions delete $SIMULATOR_CLOUD_FUNCTION --quiet
# ! gcloud functions delete $LOGGER_CLOUD_FUNCTION --quiet
# Delete Scheduler job.
# ! gcloud scheduler jobs delete $SIMULATOR_SCHEDULER_JOB --quiet
# Delete Cloud Storage objects that were created.
# ! gsutil -m rm -r $PIPELINE_ROOT
# ! gsutil -m rm -r $TRAINING_ARTIFACTS_DIR
| community-content/tf_agents_bandits_movie_recommendation_with_kfp_and_vertex_sdk/mlops_pipeline_tf_agents_bandits_movie_recommendation/mlops_pipeline_tf_agents_bandits_movie_recommendation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# name: python2
# ---
# Affect Account
#
# Script that causes affects to users when the minutes folder reaches a certain amount.
#
# Read minutes folder. If user has less than 10 mins warn them. At 5 mins warn every 5 mins. 10 seconds count down.
# Suggest they buy more time.
rdminz = ('/home/wcmckee/signinlca/usernames/')
rdminz
import os
usrdir = os.listdir(rdminz)
# +
#Make folders of usersnames from /home in /signinlca/usernames
#make username-time etc files
# -
holis = os.listdir('/home')
# +
for hol in holis:
#print hol
for usrd in usrdir:
if hol == usrd:
#print('its correct!')
print hol
else:
print hol
print('its not correct :(!')
#compare two lists - home and usernames. if home item isnt in
#username item - add it make folder/files etc.
# -
| .ipynb_checkpoints/affectaccount-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
array = np.random.randint(1,100,50)
array
maximo = np.max(array)
maximo
posicion_maximo = array.argmax() # posicion del maximo en el array
posicion_maximo
minimo = np.min(array)
minimo
posicion_minimo = array.argmin() # posicion del maximo en el array
posicion_minimo
| Machine Learning/numpy_max_min.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pyspark_env
# language: python
# name: pyspark_env
# ---
# ## Imports
#
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType
spark = (SparkSession.builder.appName("SalesAnalytics").getOrCreate())
# # Data Preparation
schema = StructType([
StructField("Order ID", StringType(), True),
StructField("Product", StringType(), True),
StructField("Quantity Ordered", StringType(), True),
StructField("Price Each", StringType(), True),
StructField("Order Date", StringType(), True),
StructField("Purchase Address", StringType(), True)
])
sales_data_fpath = "./data/salesdata"
sales_raw_df = (spark.read.format("csv")
.option("header",True)
.schema(schema)
.load(sales_data_fpath))
sales_raw_df.show(10)
sales_raw_df.printSchema()
# # Data Preparation and Cleansing
# ## Remove Null Rows and Bad Records
from pyspark.sql.functions import col
## show null values in dataframe
sales_raw_df.filter(col("Order ID").isNull() == True).show(10)
sales_raw_df = sales_raw_df.na.drop("any")
## No more Null Values
sales_raw_df.filter(col("Order ID").isNull() == True).show(10)
sales_raw_df.describe().show()
sales_raw_df.filter(col("Order ID") == "Order ID").show(10)
sales_temp_df = sales_raw_df.distinct()
sales_temp_df.filter(col("Order ID") == "Order ID").show(10)
sales_temp_df = sales_temp_df.filter(col("Order ID") != "Order ID")
sales_temp_df.filter(col("Order ID") == "Order ID").show(10)
sales_temp_df.show(10, truncate=False)
sales_temp_df.describe().show()
# ## Extract the city and State from Purchase Address
from pyspark.sql.functions import split
sales_temp_df.select("Purchase Address").show(10, False)
sales_temp_df.select("Purchase Address" ,split(col("purchase Address"), ",")).show(10, False)
## Get City
sales_temp_df.select("Purchase Address" ,split(col("purchase Address"), ",").getItem(1)).show(10, False)
## Get State
sales_temp_df.select("Purchase Address" ,split(col("purchase Address"), ",").getItem(2)).show(10, False)
## Get State--- We use an Extra Split function to drill into the first split
sales_temp_df.select("Purchase Address" ,split(split(col("purchase Address"), ",").getItem(2), ' ')).show(10, False)
sales_temp_df = (sales_temp_df.withColumn("City", split(col("purchase Address"), ",").getItem(1))
.withColumn("State", split(split(col("purchase Address"), ",").getItem(2), ' ').getItem(1)))
sales_temp_df.show(10, False)
# ## Rename and Change DataTypes
from pyspark.sql.functions import to_timestamp , year , month
from pyspark.sql.types import IntegerType , FloatType
sales_temp_df = (sales_temp_df.withColumn("OrderID", col("Order ID").cast(IntegerType()))
.withColumn("Quantity", col("Quantity Ordered").cast(IntegerType()))
.withColumn("Price", col("Price Each").cast(FloatType()))
.withColumn("OrderDate", to_timestamp(col("Order Date"), "MM/dd/yy HH:mm"))
.withColumnRenamed("Purchase Address", "StoreAddress")
.drop("order ID")
.drop("Quantity Ordered")
.drop("Price Each")
.drop("Purchase Address"))
sales_temp_df.show()
sales_temp_df.printSchema()
# ## Add New Columns:Month and Year
sales_temp_df = (sales_temp_df.withColumn("ReportYear", year(col("OrderDate")))
.withColumn("Month", month(col("OrderDate"))))
sales_temp_df.show(10)
| sparkdf/SparkSalesAnalytics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.10.0 64-bit
# language: python
# name: python3
# ---
import logging
from handnfoot import cardtable
from handnfoot import handnfoot
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
g = handnfoot.HNFGame()
g.add_player(cardtable.Player("J", precision=5, speed=1.2), handnfoot.Strategy())
g.add_player(cardtable.Player("S", precision=10, speed=1), handnfoot.Strategy())
g.add_player(cardtable.Player("L", precision=7, speed=1), handnfoot.Strategy())
g.add_player(cardtable.Player("A", precision=15, speed=.9), handnfoot.Strategy())
#g.game_setup()
#g.round_setup()
g.start()
#g.display()
g.players[0].display()
for i in range(20):
g.play_turn(player = g.players[0])
g.players[0].display()
from IPython.core.display import display, HTML
s = ""
for card in g.players[0].get_hand().cards:
s += (card.get_HTML())
display(HTML('<span style="font-size:90px;">'+s+'</span>'))
s = '<span style="display:inline-block; background-color:white; border-radius: 5px; border: solid 1px black"><img src="handnfoot/pcassets/png/ace_of_spades.png" width=70></span>'
s += '<span style="position: relative; left: -50px; display:inline-block; background-color:white; border-radius: 5px; border: solid 1px black"><img src="handnfoot/pcassets/png/queen_of_hearts.png" width=70></span>'
display(HTML(s))
#from IPython.display import Image
#Image(filename='handnfoot/pcassets/png/ace_of_spades.png')
| handnfoot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Ipython Notebook
#
# <NAME>, el Colombiano que creo Ipython una de las interfaces Python mas importantes en el mundo en los ultimos 10 años.
#
# https://en.wikipedia.org/wiki/Fernando_P%C3%A9rez_(software_developer)
# https://pybonacci.es/2013/05/16/entrevista-a-fernando-perez-creador-de-ipython/
# http://fperez.org/personal.html
#
#
# ## Ejemplo de lo que puedes lograr
#
# http://nbviewer.jupyter.org/github/glue-viz/example_data_viewers/blob/master/mario/mario.ipynb
#
#
# # Opreaciones Matematicas
# ## **Suma** : $2+3$
23454+189203
# ## **Multiplicación**: $2x3$
2334*2134
# ## **División**: $\frac{2}{3}$
100/4
# ## **Potencia**: $ 2^{3}$
2*44
# ## Funciones Trigonometricas
#
# En las siguientes celdas vamos a calcular los valores de funciones comunes en nuestras clases de matemáticas.
# Para esto, necesitamos importar la libreria numpy.
# Importar una libreria en Python
import numpy as np # el comando "as nb" sirve para asignarle un codigo mas corto a la libreria y ser mas rapido.
08
np.sin(59)
(np.sin(3))*(np.sin(2))
# ## Logaritmo y Exponencial: $ln(3), e^{3}$
np.log(3)
np.exp(3)
# ## Reto de Programación
#
# - Encuentre la hipotenusa del triangulo con lados a = 4 y b = 5, $h^2=(a^2 + b^2)$
#
# - Resolver la ecuación
# $$x^2 - x - 12 = 0$$
#
# $$ a=1, b=-1, c=-12$$
nombre_9="felipe"
nombre_13="luisa"
nombre_3="tatiana"
nombre_32="karol"
nombre_18="pilar"
np.sqrt((4**2) + (5**2))
# +
a=4
b=5
h=np.sqrt(a**+b**2)
print (h)
# -
print (nombre_9)
print (nombre_13)
print (nombre_3)
print (nombre_32)
print (nombre_18)
# # Variables
#
# ### Una variable es un espacio para guardar valores modificables o constantes.
#
# ----
# ```python
# nombre_de_la_variable = valor_de_la_variable
# ```
# ---
#
# **Los distintos tipos de variables son:**
#
# **Enteros (**`int`**): 1, 2, 3, -10, -103**
#
# ** Números continuos (**`float`**): 0.666, -10.678**
#
# ** Cadena de texto (**`str`**): 'clubes', 'clubes de ciencia', 'Roberto'**
#
# **Booleano (verdadero / Falso): `True`, `False` **
# Ejemplo
a = 'Aleajndro' + ' ' + 'Mahecha'
print (a) # Imprimir mi variable
# ## Variable tipo `int`
b = -15
print (b)
print ( type(b))
# ## Variable tipo `float`
c = 3.1416
print (c)
print (type(c))
# ## Variable tipo `str`
d = 'clubes de ciencia'
print (type(d))
# ## Variable tipo `bool`
e= true#=verdaderro
f = False # = Falso
print (f)
# ## Como averiguo el tipo de una variable ??
#
# Utilizando la función `type`:
#
# ```python
# type(nombre_de_la_variable)
# ```
print (type(a))
print (type(b))
print (type(c))
print (type(d))
print (type(e))
# ## Reto de Programación
# # Variables para guardar colecciones de datos
#
# >** Python tiene otro 3 tipos de variables mas complejas que pueden almacenar colecciones de datos
# >como los visotos anteriormente**
#
# - Listas
# - Tuplas
# - Diccionarios
# ## Listas
#
# Las listas permiten guardar colecciones de datos con diferentes tipos:
# ```python
# int; str; float; bool
# ```
# Una lista se crea de la siguiente forma:
#
# ```python
# nombre_de_la_lista = [valor_1, valor_2, valor_3]
# ```
# Los valores de la lista pueden ser modificados.
# +
# Ejemplo
mi_lista = [1,2,3,5,6,-3.1416]
mi_lista_diversa = [1,2,'clubes', 'de', 'ciencia', 3.1416, False]
print (mi_lista)
print (mi_lista_diversa)
# +
nombre_edad = ['Laura', 18, 'Josie', 23, 'Alejandro', 30]
print (nombre_edad)
# -
numero_1 = '17'
palabra = 'clubes de ciencia'
numero_2 = '4'
#print (numero_1 + palabra) # primer caso
print (numero_1 + numero_2) # segundo caso
# ### Como puedo mirar un elemento o elementos de mi lista??
# Para leer el elemento de la posición `n`, se usa:
#
# ```python
# mi_lista[n]
# ```
import numpy as np
# +
# Ejemplo
mi_lista = [1,2,3,4,5,'alejandro']
#print (mi_lista[0]) # Leer el primer elemento que se encuentra en la posición n=0
#print (mi_lista_diversa[0])
#print (type(mi_lista[5])) # Leer el tipo de variable en la posición n=5
print ('la longitud de mi lista es ' + str(len(mi_lista)) )
# -
# ** Como leer los elementos entre la posición n y m??**
# ```python
# mi_lista[n:m+1]
# ```
#Ejemplo
print (mi_lista[0:3]) # Leer entre n=0 y m=2
# ## Reto de Programación
# ## Tuplas
#
# Las tuplas permiten guardar colecciones de datos de diferentes tipos:
# ```python
# int; str; float; bool
# ```
# Una tupla se crea de la siguiente forma:
#
# ```python
# mi_tupla = ('cadena de texto', 15, 2.8, 'otro dato', 25)
# ```
# Los valores de una tupla no pueden ser modificados. Sus elementos se leen como en las listas
#
#
#Ejemplo
mi_lista = ('cadena de texto', 15, 2.8, 'otro dato', 25)
print (mi_lista)
print (mi_lista[2]) # leer el tercer elemento de la tupla
print (mi_lista[2:4]) # leer los dos ultimos elementos de la tupla
# ## Reto de Programación
# ## Diccionarios
#
# Mientras que en las listas y tuplas se accede a los elementos por un número de indice, en los diccionarios se utilizan claves(numericas ó de texto) para acceder a los elementos. Los elementos guardados en cada clave son de diferentes tipos, incluso listas u otros diccionarios.
#
# ```python
# int; str; float; bool, list, dict
# ```
# Una diccionario se crea de la siguiente forma:
#
# ```python
# mi_diccionario = {'grupo_1':4, 'grupo_2':6, 'grupo_3':7, 'grupo_4':3}
# ```
# Acceder al valor de la clave `grupo_2`:
# ```python
# print (mi_diccionario['grupo_2'])
# ```
#
# Ejemplo 1
mi_diccionario = {'grupo_1':4, 'grupo_2':6, 'grupo_3':7, 'grupo_4':3}
print (mi_diccionario['grupo_2'])
# +
# Ejemplo 2 con diferentes tipos de elementos
informacion_persona = {'nombres':'Elon', 'apellidos':'Musk', 'edad':45, 'nacionalidad':'Sudafricano',
'educacion':['Administracion de empresas','Física'],'empresas':['Zip2','PyPal','SpaceX','SolarCity']}
print (informacion_persona['educacion'])
print (informacion_persona['empresas'])
# -
# ## Reto de Programación
# # Estructuras de control condicionales
#
# Las estructuras de control condicionales nos permiten evaluar si una o mas condiciones se cumplen, y respecto a esto
# ejecutar la siguiente accion.
#
# Primero usamos:
# ```python
# if
# ```
# Despues algun operador relacional para comparar
# ```python
# == igual que
# != diferente de
# < menor que
# > mayor que
# <= menor igual que
# >= mayor igual que
# ```
# Cuando se evalua mas de una conición:
#
# ```python
# and, & (y)
#
# or, | (ó)
# ```
# Ejemplo
color_semaforo = 'amarillo'
if color_semaforo == 'verde':
print ("Cruzar la calle")
else:
print ("Esperar")
# +
# ejemplo
dia_semana = 'lunes'
if dia_semana == 'sabado' or dia_semana == 'domingo':
print ('Me levanto a las 10 de la mañana')
else:
print ('Me levanto antes de las 7am')
# -
# Ejemplo
costo_compra = 90
if costo_compra <= 100:
print ("Pago en efectivo")
elif costo_compra > 100 and costo_compra < 300:
print ("Pago con tarjeta de débito")
else:
print ("Pago con tarjeta de crédito")
# ## Reto de Programación
# # Estructuras de control iterativas(cíclicas o bucles)
#
# Estas estructuras nos permiten ejecutar un mismo codigo, de manera repetida, mientras se cumpla una condición.
#
# ## Bucle While
#
# Este bucle ejecuta una misma acción mientras determinada condición se cumpla:
#
# ```python
# anio = 2001
# while anio <= 2012:
# print ("Informes del Año", str(anio))
# anio = anio + 1 # aumentamos anio en 1
# ```
# En este ejemplo la condición es menor que 2012
#
# ejemplo
anio = 2001
while anio <= 2012:
print ("Informes del Año", str(anio))
anio = anio + 1 # aumentamos anio en 1
# ejemplo
cuenta = 10
while cuenta >= 0:
print ('faltan '+str(cuenta)+' minutos')
cuenta += -1
# ## Reto de Programación
# ## Bucle for
# En Python el bucle for nos permite iterar sobre variables que guardan colecciones de datos, como : tuplas y listas.
#
# ```python
# mi_lista = ['Juan', 'Antonio', 'Pedro', 'Herminio']
# for nombre in mi_lista:
# print (nombre)
# ```
#
# En el codigo vemos que la orden es ir por cada uno de los elementos de la lista para imprimirlos.
# Ejemplo
mi_tupla = ('rosa', 'verde', 'celeste', 'amarillo')
for color in mi_tupla:
print (color)
# +
# Ejemplo
dias_semana = ['lunes','martes','miercoles','jueves','viernes','sabado','domingo']
for i in dias_semana:
if (i == dias_semana[-1]) or (i == dias_semana[-2]):
print ('Hoy seguire aprendiendo de programación')
else:
print ('Hoy tengo que ir al colegio')
# -
# ## Reto de Programación
| Dia_1/felipe arevalo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## WCS solutions
# ### Exercise: Refine a WCS using a list of detections and a reference catalog
#
# Refine a WCS for a science image exposure from the Zwicky Transient Facility from these ingredients:
# * An initial header
# * A detection list cut at 17th magnitude, in file `data/ztf_detections_17thmag.csv`
# * A reference catalog with coordinates and magnitudes from Gaia cut at 17 Gaia G magnitude, in `data/Gaia-gaia_dr2_source-ztf-20190606224213_000667_zr.csv`
#
# The exercise makes use of `astropy.wcs`, `astropy.coordinates` and the projection capabilities of WCSAxes.
#
# 1. Read in the detection list and the reference catalog with `astropy.table.Table.read`
# 2. Calculate starting RAs and Decs for the detection list using the initial WCS
# 3. Create SkyCoord instances for the initial detection coordinates and the Gaia coordinates
# 4. Plot the detection list and the Gaia list in a scatter plot
# 5. Match the detection list and the Gaia list
# 6. Refine the WCS using the `fit_wcs_from_points` function from `astropy.wcs.utils`
# Import everything we'll need for the exercise.
# +
import os
import matplotlib.pyplot as plt
import numpy as np
from astropy.coordinates import SkyCoord
from astropy.table import Table
from astropy.wcs import WCS
from astropy.wcs.utils import fit_wcs_from_points
import astropy.units as u
# %matplotlib inline
# -
# Create the initial WCS programatically.
initial_wcs = WCS(naxis=2)
initial_wcs.wcs.ctype = ['RA---TAN', 'DEC--TAN']
initial_wcs.wcs.crval = [149.07662386535503, 33.32164150821777]
initial_wcs.wcs.crpix = [-3305.678, -7136.481]
initial_wcs.wcs.cd = [[-0.0002817188, -1.554e-07],
[-1.998e-07, -0.0002819204]]
initial_wcs.array_shape = [3080, 3072] # NAXIS2, NAXIS1
initial_wcs
# ### 1. Read in the detection list and the reference catalog
#
# Read in the detections and the reference catalog using `astropy.table.Table` with `format='csv'`.
# The detections table is in `'data/ztf_detections_17thmag.csv'` and the reference catalog is `'data/Gaia-gaia_dr2_source-ztf-20190606224213_000667_zr.csv'`
detections = Table.read(os.path.join('data', 'ztf_detections_17thmag.csv'), format='csv')
ref_catalog = Table.read(os.path.join('data', 'Gaia-gaia_dr2_source-ztf-20190606224213_000667_zr.csv'))
# ### 2. Calculate starting RAs and Decs for the detection list using the initial WCS
#
# Use the `initial_wcs.all_pix2world` function to calculate starting RA and Dec from the `detections['xpos']` and `detections['ypos']` columns. The pixel positions use the FITS numbering convention.
initial_ra, initial_dec = initial_wcs.all_pix2world(detections['xpos'], detections['ypos'], 1)
# ### 3. Create SkyCoord instances for the initial detection coordinates and the Gaia coordinates
initial_coords = SkyCoord(ra=initial_ra, dec=initial_dec, unit=u.deg)
gaia_coords = SkyCoord(ra=ref_catalog['ra'], dec=ref_catalog['dec'], unit=u.deg)
# ### 4. Plot the detection list and the Gaia list in a scatter plot
#
# Use `projection=initial_wcs` to make a scatter plot using `gaia_coords` and `initial_coords`. The open circles are sized according to magnitude.
# +
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(projection=initial_wcs)
ax.scatter(gaia_coords.ra,
gaia_coords.dec, c=None, marker='o',
s=20*(18 - ref_catalog['phot_g_mean_mag']),
facecolors='none', edgecolors='green',
transform=ax.get_transform('world'))
ax.scatter(initial_coords.ra,
initial_coords.dec, c=None, marker='o',
s=20*(18 - detections['mag']),
facecolors='none', edgecolors='blue',
transform=ax.get_transform('world'))
# -
# ### 5. Match the detection list and the Gaia list
#
# Use the `initial_coords.search_around_sky` method with a 15 arcsecond radius.
idxgaia, idxdet, d2d, d3d = initial_coords.search_around_sky(gaia_coords, 15*u.arcsec)
gaia_matched = gaia_coords[idxgaia]
detections_xpos_matched = detections['xpos'][idxdet]
detections_ypos_matched = detections['ypos'][idxdet]
print(len(gaia_matched), len(detections_xpos_matched), len(detections_ypos_matched))
# ### 6. Refine the WCS using the `fit_wcs_from_points` function
#
# Look at the help for `fit_wcs_from_points` and use it to fit a new WCS.
#
# Optionally, calculate new RAs and Decs for the matched pixel coordinates, and make another scatter plot.
# +
# fit_wcs_from_points?
# -
fitted_wcs = fit_wcs_from_points((detections_xpos_matched, detections_ypos_matched),
gaia_matched,
projection='TAN', sip_degree=3)
fitted_wcs
# Examine the SIP distortion coefficients
fitted_wcs.sip.a
fitted_wcs.sip.b
# Optionally, calculate new RAs and Decs for the matched pixel coordinates, and make another scatter plot.
fitted_ra, fitted_dec = fitted_wcs.all_pix2world(detections_xpos_matched,
detections_ypos_matched, 1)
# +
fig = plt.figure(figsize=(8,8))
ax = plt.subplot(projection=fitted_wcs)
ax.scatter(gaia_matched.ra,
gaia_matched.dec, c=None, marker='o',
s=20*(18 - ref_catalog['phot_g_mean_mag'][idxgaia]),
facecolors='none', edgecolors='green',
transform=ax.get_transform('world'))
ax.scatter(fitted_ra,
fitted_dec, c=None, marker='o',
s=20*(18 - detections['mag'][idxdet]),
facecolors='none', edgecolors='blue',
transform=ax.get_transform('world'))
# -
| 08-WCS/WCS_solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
url ='https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
from bs4 import BeautifulSoup as bs
import pandas as pd
import requests
response = requests.get(url)
soup = bs(response.text, 'html.parser')
print(soup.prettify())
nt1 = soup.find('div', class_="content_title")
news_title=nt1.a.text.replace("\n","")
news_title
p1 = soup.find('div', class_="rollover_description_inner")
news_p=p1.text.replace("\n","")
news_p
url2="https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
response2 = requests.get(url2)
soup2 = bs(response2.text, 'html.parser')
pic1 = soup2.find('article', class_="carousel_item")
pic1
pic = pic1['style']
pic
featured_image_url = 'https://www.jpl.nasa.gov' + pic[23:-3]
featured_image_url
url3="https://twitter.com/marswxreport?lang=en"
response3 = requests.get(url3)
soup3 = bs(response3.text, 'html.parser')
twt1 = soup3.find('div', class_='js-tweet-text-container')
twt1
mars_weather = twt1.p.text
mars_weather
url4='https://space-facts.com/mars/'
tables = pd.read_html(url4)
tables
df = tables[0]
df.columns = ['Category','Value']
df
html_table1 = df.to_html(index=False)
html_table = html_table1.replace('\n', '')
html_table
url5="https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars"
response5 = requests.get(url5)
soup5 = bs(response5.text, 'html.parser')
titles = soup5.find_all('div', class_='description')
titles
tlst=[]
for title in titles:
tlst.append(title.text)
tlst
refs = soup5.find_all('a', class_='itemLink product-item')
refs
urlroot = 'https://astrogeology.usgs.gov'
rlst=[]
for ref in refs:
url6= urlroot + ref['href']
response6 = requests.get(url6)
soup6 = bs(response6.text, 'html.parser')
imgbig = soup6.find('div', class_='downloads')
rlst.append(imgbig.li.a['href'])
rlst
tlst
hemisphere_image_urls = []
for i in range(0,4):
hemisphere_image_urls.append ({'title': tlst[i],'imgurl': rlst[i]})
hemisphere_image_urls
bigscrape ={'news title': news_title,
'news paragraph':news_p,
'featured image url':featured_image_url,
'html table':html_table,
'hemisphere image urls':hemisphere_image_urls,
'mars weather':mars_weather}
bigscrape
| .ipynb_checkpoints/mission_to_mars-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # K-Nearest Neighbors Classification
#
# The dataset was obtained from https://archive.ics.uci.edu/ml/datasets/Iris.
import numpy as np
import pandas as pd
import seaborn as sns
from heapq import heappush, nsmallest
from collections import Counter
# read data and parse it
data = pd.read_csv('../datasets/iris.csv',
names=["sepal_length","sepal_width","petal_length","petal_width","species"])
data = data.sample(len(data))
# Pair plots between features of different species
sns.pairplot(data, hue="species", size=3, diag_kind='kde')
label_id = {'Iris-setosa': 1, 'Iris-versicolor': 2, 'Iris-virginica': 3}
data['species'] = [label_id[x] for x in data['species']]
# +
y = np.array(data['species'])
x = np.array(data.loc[:, data.columns != 'species'])
def split_data(x, y, ratio, fold=0):
# Define the boundaries of the test data
test_start = int(len(x) * ratio * fold)
test_stop = int(len(x) * ratio * (fold+1))
# Split the data
train_x = np.concatenate((x[:test_start], x[test_stop:]))
train_y = np.concatenate((y[:test_start], y[test_stop:]))
test_x = x[test_start:test_stop]
test_y = y[test_start:test_stop]
return train_x, train_y, test_x, test_y
# +
def cross_validate(x, y, folds, k):
accuracies = []
for fold in range(folds):
train_x, train_y, test_x, test_y = split_data(x, y, 1./folds, fold)
knn = KNN(train_x, train_y, k)
accuracies.append(calculate_accuracy(knn.predict_batch(test_x), test_y))
return np.mean(accuracies)
def calculate_accuracy(y_hat, y):
return np.sum(np.equal(y, y_hat), dtype=float) / len(y)
# -
class KNN:
""" Implementation of the kNN classification algorithm """
def __init__(self, X, Y, k):
self.X = X
self.Y = Y
self.k = k
def predict(self, input_x):
"""
Predict labels using k-nearest neigbors for a given point.
"""
neighbors = []
for i in range(len(self.X)):
heappush(neighbors, (self.distance(self.X[i], input_x), self.Y[i]))
k_nearest = [x[1] for x in nsmallest(k, neighbors)]
return Counter(k_nearest).most_common(1)[0][0]
def predict_batch(self, X_test):
"""
Predict labels using k-nearest neigbors for a batch.
X_test : batch of inputs to predict
"""
return [self.predict(point) for point in X_test]
def distance(self,x1,x2):
""" Calculate the distance between two points x1 and x2 """
# L2 distance
return sum((x1 - x2) ** 2)
# Manhattan distance
# return sum(abs(x1 - x2))
# negative Cosine similarity (cosine similarity is inversely proportional to distance)
# return 1 - x1.dot(x2)/(np.linalg.norm(x1) * np.linalg.norm(x2))
# +
train_x, train_y, test_x, test_y = split_data(x, y, ratio=0.25)
k_values = range(2, 80)
best_k = 1
best_acc = 0
for k in k_values:
accuracy = cross_validate(train_x, train_y, folds=4, k=k)
if k % 10 == 0:
print("validation accuracy for k = {} is {:1.3f}".format(k, accuracy))
if accuracy > best_acc:
best_acc = accuracy
best_k = k
# Use the best parameter
print("Checking accuracy on the test set using k = {}".format(best_k))
knn = KNN(train_x, train_y, best_k)
y_hat = knn.predict_batch(test_x)
test_acc = calculate_accuracy(test_y, y_hat)
print("Test set accuracy: {:1.3f}%".format(test_acc * 100))
| notebooks/knn_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## Scraping census code from american census' api in two lots (limit on how many fields can be extraced in a single api call)
## In addition, this code is scraping crime data from city's opendata and joining it to the census data.
## US Census source: Community 5 year Survey: ACS/ACS5/Profile.html
## https://api.census.gov/data/2018/acs/acs5/profile/groups.html
import requests
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
HOST = "https://api.census.gov/data"
year = "2018"
dataset = "acs/acs5/profile.html" ## has census tracts only; not census blocks
key = "27d6154de64a12d9b06727be7177fe46e7c4eee1"
base_url = "/".join([HOST, year, dataset])
## ACS Detailed table request
get_vars = ["DP05_0001E","DP02_0001E", "DP02_0002E","DP02_0008PE", "DP02_0010PE",
"DP02_0015E", "DP02_0016E", "DP02_0022PE", "DP02_0038E", "DP02_0046PE",
"DP02_0048PE","DP02_0060PE","DP02_0063PE","DP02_0079PE","DP02_0085PE",
"DP02_0088PE","DP02_0112PE","DP02_0134PE",
"DP03_0009PE","DP03_0027PE","DP03_0028PE","DP03_0037PE","DP03_0040PE",
"DP03_0041PE","DP03_0050PE","DP03_0052PE", "DP03_0054PE","DP03_0058PE",
"DP03_0061PE","DP03_0065E","DP03_0066PE","DP03_0072PE","DP03_0074PE",
"DP03_0085PE","DP03_0092E","DP03_0093E","DP03_0094E",
"DP03_0097PE","DP03_0097PE",
"DP03_0099PE","DP03_0119PE","DP03_0121PE","DP03_0126PE","DP04_0001PE",
"DP04_0002PE","DP04_0004E","DP04_0005E","DP04_0006E"] # total housing units
get_vars = ["NAME"] + get_vars
dfs = []
items = ['113', '085', '121', '183', '251', '201', '397', '439', '231',
'491', '257', '061', '139', '029', '309', '453', '213']
for item in items:
predicates = {}
predicates["get"] = ",".join(get_vars)
predicates["for"] = "tract:*"
predicates["in"] = "state:48;county:"+item ## Dallas city in Texas (48)
predicates["key"]="27d6154de64a12d9b06727be7177fe46e7c4eee1"
r8 = requests.get(base_url, params=predicates)
## field names
col_names = ["NAME", "DP05_0001E-T Popo","DP02_0001E-T HHs","DP02_0002E-T Fam HHs",
"DP02_0008PE-%F-led HHs No H","DP02_0010PE-% Non-Fam HHs", "DP02_0015E-Avg HH Size",
"DP02_0016E-Avg Fam Size", "DP02_0022PE-%Non Rel in Hs","DP02_0038E-per1000-F-U-RecentPreg",
"DP02_0046PE-%GPwParent&Gc1-2yrs", "DP02_0048PE--%GPwParent&Gc>5yrs", "DP02_0060PE-%NoSchDiploma",
"DP02_0063PE-%AssDeg","DP02_0079PE-% SameHouse_1YrAgo","DP02_0085PE-%LivAbroad1YrAgo",
"DP02_0088PE-%BornUS", "DP02_0112PE-HomeLangNotEng", "DP02_0134PE-%IrishAnc",
"DP03_0009PE-%Unemploy",
"DP03_0027PE-%WorkingMgt,Bus,Sc,Arts","DP03_0028PE%WorkingService","DP03_0037PE-Retail",
"DP03_0040PE-%Fin&Ins","DP03_0041PE-%ProfSc,Mgt,Admin","DP03_0050PE-%UnpaidFamilyWk",
"DP03_0052PE-Inc<10k", "DP03_0054PE-Inc15-24k", "DP03_0058PE-Inc75-99k", "DP03_0061PE-Inc200k>",
"DP03_0065E-HH-MeanEarnings","DP03_0066PE-%HHsWithSocSec","DP03_0072PE-%HH Inc with Cash public Ass",
"DP03_0074PE-%HH Inc with Food Stamps SNAP","DP03_0085PE-%Families Inc>200k",
"DP03_0092E-Median Earning Workers","DP03_0093E-Median Earnings Male FT Workers",
"DP03_0094E-Median Earnings Female FT Workers",
"DP03_0097PE-%PHI-Priv","DP03_0098PE-%PHI-Pub","DP03_0099PE-No PHI","DP03_0119PE-% Fam <Pov",
"DP03_0121PE-%Fam with child U18&U5 <pov","DP03_0126PE-%F-Led Families with u18 <pov",
"DP04_0001E-T H U", "DP04_0002PE-%Occ H U","DP04_0004E-%Ow Vac Rate", "DP04_0005E-%Rn Vac Rate",
"DP04_0006E-T HU",
"State","County", "Tract"]
print("county:", item)
df_collect = pd.DataFrame(columns=col_names, data=r8.json()[1:])
rows = len(df_collect)
print("rows in r", rows)
dfs.append(df_collect)
Dallas1 = pd.concat(dfs) # examine the response
Dallas1.shape
from datetime import date
print("Today's date is:", date.today())
Dallas1.head()
# +
## other variables same as for Dallas1
## ACS Detailed table request
get_vars = ["DP04_0013PE","DP04_0017PE","DP04_0029PE","DP04_0039PE","DP04_0045PE", "DP04_0046PE",
"DP04_0047E","DP04_0048E","DP04_0051PE", "DP04_0054PE", "DP04_0082PE","DP04_0083PE",
"DP04_0084PE","DP04_0087PE","DP04_0101E", "DP04_0134E","DP05_0005PE","DP05_0023PE",
"DP05_0059PE","DP05_0061PE","DP05_0064PE", "DP05_0070PE"]
get_vars = ["NAME"] + get_vars
dfs2 = []
for item in items:
predicates = {}
predicates["get"] = ",".join(get_vars)
predicates["for"] = "tract:*"
predicates["in"] = "state:48;county:"+item ## Dallas city in Texas (48)
predicates["key"]="27d6154de64a12d9b06727be7177fe46e7c4eee1"
r7 = requests.get(base_url, params=predicates)
# print(' \n ', r3.text)
col_names = ["NAME","DP04_0013PE-%20+HU","DP04_0017PE-%Built2014+", "DP04_0029PE-%2rooms_only_HU",
"DP04_0039PE-Zero bedrooms","DP04_0045PE-%Occup HU","DP04_0046PE-%Owner Occup HU",
"DP04_0048E-Owner Ave Household size","DP04_0049E-Renter Ave Household size",
"DP04_0051PE-%moved since 2017+","DP04_0054PE-%moved 2000 to 2009",
"DP04_0082PE-%Value 50-99k","DP04_0083PE-%Value 100-149k","DP04_0084PE-%Value 150-199k",
"DP04_0087PE-%Value 500k-1m","DP04_0101E-Med Onwer Monthly Mortgage",
"DP04_0134E-Median Gross Rent","DP05_0005PE-%under 5 yrs", "DP05_0023PE-%62 yrs+",
"DP05_0058PE-% w & B or AM","DP05_0061PE-%White & Asian","DP05_0064PE-%White with others",
"DP05_0070PE-%H&L race",
"State","County", "Tract"]
print("county:", item)
df_collect = pd.DataFrame(columns=col_names, data=r7.json()[1:])
rows = len(df_collect)
print("rows in r", rows)
dfs2.append(df_collect)
#print('\n 2018 - NYC County Results = \n ',df6)
Dallas2 = pd.concat(dfs2) # examine the response
Dallas2.shape
print("Today's date is:", date.today())
Dallas2.head()
# -
import pandas as pd
### join Baltimore1 and Baltimore2
## Notes on joining - https://stackoverflow.com/questions/40468069/merge-two-dataframes-by-index
Dallas_Crime_df3 = pd.merge(left = Dallas1, right = Dallas2, how = 'right', left_on = ['NAME','State','County','Tract'], right_on = ['NAME','State','County','Tract'])
Dallas_Crime_df3.head()
# +
## scraping Dallas city crime data
## Website: https://www.dallasopendata.com/Public-Safety/Police-Incidents/qv6i-rri7
## data title: 'Police Incidents' and lists incidents from 2014 to current
## 'signal' is the type of offence www.dallasopendata.com/resource/qv6i-rri7.json?$limit=1000000&$select=compname,geocoded_column')
import requests
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
#742k records retrieved, only capturing signal, compname, incident_address and geocoded_columns
data = requests.get('https://www.dallasopendata.com/resource/qv6i-rri7.json?$limit=750000&$select=signal,incident_address, geocoded_column')
## CRS, WGS 1984, decimal degrees (EPSG 4326)
#Mairead Manifold 39yg545d61h09tmg07z4hd39p $$app_token=<PASSWORD>
Dallas_Crime_df3 = pd.DataFrame(data=data.json())
Dallas_Crime_df3.to_csv("Dallas_crime_as_downloaded_Feb8th2021.csv")
Dallas_Crime_df3.head() ## 740k crime stats downloaded for Dallas Jan 31st 2021
# +
## Dallas_Crime_df3['Dallas_Crime_df3'].isnull().sum() ## there are 3,159 NAs in geocoded_column
##df = df[df['EPS'].notna()] to keep rows that have not got NAs
Dallas_Crime_df = Dallas_Crime_df3[Dallas_Crime_df3['geocoded_column'].notna()]
print("No of NAs in Dallas geocoded_column col", Dallas_Crime_df3['geocoded_column'].isnull().sum())
print("shape of Dallas_Crime_df", Dallas_Crime_df.shape)
Dallas_Crime_df.head()
# +
## Extracting longitude and latitude from the geocoded_column
## extraction example
##data['desig'] = data['Name'].str.extract(r'(\w+), (\w+). (\w+)')
## https://stackoverflow.com/questions/34867862/python-pandas-dataframe-regex-to-extract-substring-from-object
Dallas_Crime_df['Latitude'] = Dallas_Crime_df['geocoded_column'].astype(str).str.extract(r'\'latitude\': \'(.*?)longitude')
Dallas_Crime_df['Longitude'] = Dallas_Crime_df['geocoded_column'].astype(str).str.extract(r'\'longitude\': \'(.*?)human_address')
Dallas_Crime_df.head()
# -
# removing part of
#df[df["Date"].str.replace(to_replace="\s:00", value="")]
#'human_address': '{"address": "
Dallas_Crime_df['Latitude'] = Dallas_Crime_df['Latitude'].str.slice(0,9)
Dallas_Crime_df['Longitude'] = Dallas_Crime_df['Longitude'].str.slice(0,10)
Dallas_Crime_df.head()
Dallas_Crime_df[Dallas_Crime_df['Longitude'].isna()] ## see how many longitude NAs found - 25,980 found
Dallas_Crime_df[Dallas_Crime_df['Latitude'].isna()] ## see how many latitude NAs found - 25,980 found
##df = df[df['EPS'].notna()] to keep rows that have not got NAs
Dallas_Crime_df_noNAs = Dallas_Crime_df[Dallas_Crime_df['Longitude'].notna()]
Dallas_Crime_df_noNAs = Dallas_Crime_df[Dallas_Crime_df['Latitude'].notna()]
Dallas_Crime_df_noNAs
## make longitude and latitude fields into floats.
import numpy as np
def f(x):
try:
return np.float(x)
except:
return np.nan
Dallas_Crime_df_noNAs['Latitude'] = Dallas_Crime_df_noNAs['Latitude'].apply(f)
Dallas_Crime_df_noNAs['Longitude'] = Dallas_Crime_df_noNAs['Longitude'].apply(f)
# +
import geopandas as gpd
## Creating a GeoDataframe file from Dallas and then using it
## to get tract points from the Texas census shape file (State ID: 48)
Dallas_data_points = gpd.GeoDataFrame(Dallas_Crime_df_noNAs, geometry=gpd.points_from_xy(Dallas_Crime_df_noNAs.Longitude, Dallas_Crime_df_noNAs.Latitude), crs='EPSG:4269')
Dallas_data_points
Dallas_shape = "cb_2018_48_tract_500k.shp" # load state code "48" for Texas shape file
Dallas_map = gpd.read_file(Dallas_shape)
Dallas_points_in_map = gpd.sjoin(Dallas_data_points.to_crs('EPSG:4269'),
Dallas_map,
how='inner',
op='within') ## doing inner because the census data is for whole of California and only LA crime data census tracts
print("I am done")
print("Today's date is:", date.today())
# -
Dallas_points_in_map.head()
print(Dallas_points_in_map.shape)
Dallas_data_points.head()
print(Dallas_points_in_map.shape)
## Checking what county are listed in the crime data as this is what
## is needed when extracting from census
list = Dallas_points_in_map['COUNTYFP'].unique() ## checking if leading zeros needed
list[0:10,] ## so extracted from correct census counties as this shows only 075 and 081.
import pandas as pd
## saved the dcb_2018_48_tract_500k.dbf file as a csv file to get access to the tract ID
## the index_right column aligns with the tract ID in this file.
census_tract_Texas = pd.read_csv("cb_2018_48_tract_500k.csv", header=0)
print("census tract info from Texas dbf file saved as a csv file")
census_tract_Texas ## 5254 tracts listed
census_tract_Texas['TRACTCE'] = census_tract_Texas['TRACTCE'].apply(lambda x: '{0:0>6}'.format(x)).astype(str)
census_tract_Texas['TRACTCE'].head()
# +
census_tract_Texas['COUNTYFP'] = census_tract_Texas['COUNTYFP'].apply(lambda x: '{0:0>3}'.format(x)).astype(str)
census_tract_Texas['COUNTYFP'].head()
# -
##The points_in_map of crime data does not include the county or tract or land area, so using join to add these
## fields from Shape dbf file to the crime data.
#points_in_map = points_in_map.join(census_tract_df[['COUNTYFP']], on='index_right')
#points_in_map = points_in_map.join(census_tract_df[['TRACTCE']], on='index_right')
#points_in_map = points_in_map.join(census_tract_df[['ALAND']], on='index_right')
Dallas_points_in_map = Dallas_points_in_map.join(census_tract_Texas[['STATEFP','COUNTYFP','TRACTCE','ALAND']],on='index_right', lsuffix='_left', rsuffix='_right')
Dallas_points_in_map.head()
# +
import pandas as pd
#'COUNTYFP_right', 'TRACTCE_right',, 'ALAND_right'
Dallas_points_in_map.drop('STATEFP_right', inplace=True, axis=1)
Dallas_points_in_map.drop('COUNTYFP_right', inplace=True, axis=1)
Dallas_points_in_map.drop('TRACTCE_right', inplace=True, axis=1)
Dallas_points_in_map.drop('ALAND_right', inplace=True, axis=1)
#LA_points_in_map.drop('STATEFP_left', inplace=True, axis=1)
#LA_points_in_map.drop('COUNTYFP_left', inplace=True, axis=1)
#LA_points_in_map.drop('TRACTCE_left', inplace=True, axis=1)
#LA_points_in_map.drop('ALAND_left', inplace=True, axis=1)
# -
Dallas_points_in_map.rename(columns={'STATEFP_left':'State', 'COUNTYFP_left': 'County', 'TRACTCE_left':'Tract', 'ALAND_left':'AreaLand'}, inplace=True)
Dallas_points_in_map.head()
groupedTract_count = Dallas_points_in_map.groupby(['County','Tract','AreaLand']).size().reset_index(name='crimeStats')
#df2 = df.rename({'a': 'X', 'b': 'Y'}, axis=1)
groupedTract_count
groupedTract_mean = groupedTract_count["crimeStats"].mean()
print("mean",groupedTract_mean)
groupedTract_min = groupedTract_count["crimeStats"].min()
print("min", groupedTract_min)
groupedTract_max = groupedTract_count["crimeStats"].max()
print("max", groupedTract_max)
import numpy as np
groupedTract_count["crime"] = np.where(groupedTract_count.crimeStats < groupedTract_mean, "low", "high")
groupedTract_count
## checking there are no crimeStats zero values
####dc_NYCAll_withCrime.loc[dc_NYCAll_withCrime.crime == 0, 'crime'].count()
groupedTract_count.loc[groupedTract_count.crimeStats == 0, 'crimeStats'].count()
## checking there are no AreaLand zero values
groupedTract_count.loc[groupedTract_count.AreaLand == 0, 'AreaLand'].count()
## join crime data to census data
## https://kodlogs.com/38097/len-left_on-must-equal-the-number-of-levels-in-the-index-of-right#:~:text=pages%20in%20html-,Len(left_on)%20must%20equal%20the%20number%20of%20levels%20in,the%20index%20of%20%22right%22.
DallasAll_withCrime = pd.merge(left = Dallas_All, right = groupedTract_count, how = 'inner', on = ['County', 'Tract'])
DallasAll_withCrime.count()
## remove rows with NaNs for Crime as it will not be possible to run ML on crime outcome for these
## rows using df.dropna() but there were none as same no of rows after running code
Dallas_AllNoNAs = DallasAll_withCrime.dropna() ## returning to number of rows that agrees with groupedTract_count available crime data
Dallas_AllNoNAs.head() ## 516 so no NAs as of 8/2/2021
print(date.today())
## checking if any rows with Popo == zero; luckily none
Dallas_AllNoNAs[Dallas_AllNoNAs['DP05_0001E-T Popo']== 0]
Dallas_AllNoNAs['AreaLand'] = (Dallas_AllNoNAs['AreaLand']/10000).astype(int)
Dallas_AllNoNAs['AreaLand']
Dallas_AllNoNAs['Popo_Density_Per_Hector'] = Dallas_AllNoNAs['AreaLand']/(Dallas_AllNoNAs['DP05_0001E-T Popo'].astype(int))
Dallas_AllNoNAs['Popo_Density_Per_Hector'] = Dallas_AllNoNAs['Popo_Density_Per_Hector'].round(2)
Dallas_AllNoNAs['Popo_Density_Per_Hector']
## double checking that there are no rows with zero popo, and there are none
Dallas_AllNoNAs = Dallas_AllNoNAs[Dallas_AllNoNAs['DP05_0001E-T Popo'] != '0']
Dallas_AllNoNAs.head()
# +
## converting all data columns to numeric
for col in Dallas_AllNoNAs.filter(like='DP').columns:
Dallas_AllNoNAs[col] = pd.to_numeric(Dallas_AllNoNAs[col], errors='coerce')
#dc_NYCAll_withCrime[cols] = dc_NYCAll_withCrime[cols].apply(pd.to_numeric, errors='coerce', axis=1)
# -
## notice that some are float64 and some are int64
Dallas_AllNoNAs.dtypes
Dallas_AllNoNAs.describe()
Dallas_AllNoNAs.isna().sum(0)
Dallas_AllNoNAs.loc[Dallas_AllNoNAs.crimeStats == 0, 'crimeStats'].count()
## none without the zero in the T Popo column
##df['Correlation'].drop(df[df['Correlation'] < 0].index, inplace=True)
Dallas_AllNoNAs[Dallas_AllNoNAs['DP05_0001E-T Popo'] != 0]
# +
## Want to round decimal places
##rounded_df = df.round(decimals=2)
##df.columns.tolist()
##df.apply(lambda x: '%.5f' % x, axis=1)
#Dallas_AllNoNAss = Dallas_AllNoNAs.apply(lambda x: '%.2f' % x, axis=1)
Dallas_AllNoNAs.dtypes
# -
Dallas_AllNoNAs.groupby('crime').count()
## save file to local drive
##dc_NYCAll_withCrimeNoNAs.to_csv("dc_NYCAll_withCrimeNov30th.csv")
Dallas_AllNoNAs.to_csv("Dallas_CSV__Feb8th_before.csv") ## 245kB
Dallaslist_of_counties = ['113', '085', '121', '183', '251', '201', '397', '439', '231',
'491', '257', '061', '139', '029', '309', '453', '213']
census_tract_Texas['COUNTYFP'] = census_tract_Texas['COUNTYFP'].apply(lambda x: '{0:0>3}'.format(x)).astype(str)
census_tracts_Dallas = census_tract_Texas[census_tract_Texas['COUNTYFP'].isin(Dallaslist_of_counties)]
census_tracts_Dallas
## save Dallas city tract file to local drive
##dc_NYCAll_withCrimeNoNAs.to_csv("dc_NYCAll_withCrimeNov30th.csv")
census_tracts_Dallas.to_csv("census_tracts_DallasFeb7th.csv")
| Dallas census & crime data (718k rows) from ACS_ACS_Profile.html.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] graffitiCellId="id_d94vwl1"
# # Practicing Adding Deletion
# + [markdown] graffitiCellId="id_fr0841w"
# You have just added the abiliy to insert and search on the tree lets take it one step farther by having you add the ability to delete of the tree as well.
# + [markdown] graffitiCellId="id_icxdmpp"
# #### Try it
# + graffitiCellId="id_u2x661f"
class Node(object):
def __init__(self, value):
self.value = value
self.left = None
self.right = None
class BST(object):
def __init__(self, root):
self.root = Node(root)
def insert(self, new_val):
# Use your old insert function
pass
def search(self, find_val):
# Use your old search function
return False
def delete(self, del_val):
# TODO: Implement a delete function
pass
# + [markdown] graffitiCellId="id_brj73e9"
# #### Test Cases
# + graffitiCellId="id_qqo3el0"
# Set up tree
tree = BST(4)
# Insert elements
tree.insert(2)
tree.insert(1)
tree.insert(3)
tree.insert(5)
# Check search
print ("Pass" if tree.search(4) else "Fail")
print ("Pass" if not tree.search(6) else "Fail")
# Delete elements
tree.delete(5)
# Should be False
print ("Pass" if not tree.search(5) else "Fail")
# + [markdown] graffitiCellId="id_guivotb"
# <span class="graffiti-highlight graffiti-id_guivotb-id_z5v0zs0"><i></i><button>Show Solution</button></span>
| trees/Practice Adding Deletion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="f14f3768"
# # CSV 파일 다루기
# 쉼표로 구분된 텍스트 파일<br><br>
#
# # 절차: 파일 열기 -> 파일 읽기 -> 헤더 제거 -> 리스트 변환
# 열기: Open()으로 파일 열기<br>
# 읽기: csv.reader()에서 파일 저장한 변수 넣기<br>
# 헤더제거: next()으로 행 건너뛰기<br>
# 리스트 변환: list()<br>
# + id="5831ba72" outputId="016877cf-2cc6-4a35-be38-839bc56c2ac8"
import csv
with open('olympic.csv', newline='', encoding='UTF8') as f:
reader = csv.reader(f)
data=[tuple(row) for row in reader]
for i in data:
print(i)
f.close()
# + id="3932ee13" outputId="c64b43a8-7e37-40b3-d943-bdc478c031fb"
import csv
with open('olympic.csv', newline='', encoding='UTF-8-sig') as f:
reader = csv.reader(f)
data=[tuple(row) for row in reader]
for i in data:
print(i)
f.close()
# + colab={"base_uri": "https://localhost:8080/"} id="584027e5" outputId="1e9ec363-968f-4b49-ea8f-e9670bfe8f8f"
numbers = ['one', 'two', 'three', 'four', 'five']
for n in numbers:
print(n)
for n in range(len(numbers)):
print(numbers[n])
# + [markdown] id="uXpavXaHsFEB"
# 점프 투 파이썬, 코딩도장 참고하기!
# + colab={"base_uri": "https://localhost:8080/"} id="NFLDClUaqrSN" outputId="5d93204c-b49e-4d3a-fd89-63d826b6f367"
f = open("/content/crypto1.txt", 'r')
data=f.read()
print(data)
f.close()
# + colab={"base_uri": "https://localhost:8080/"} id="pzAJkbEpsv8Y" outputId="173deb24-ab3a-4ba2-9f92-6a88cd2e2941"
f = open("/content/crypto1.txt", 'r')
while True:
line=f.readline()
if not line: break
print(line)
f.close()
# + colab={"base_uri": "https://localhost:8080/"} id="UkCdLsFvt1y4" outputId="df76291d-fb69-4e5a-a9ba-0a4812c02027"
f = open("/content/crypto1.txt", 'r')
lines=f.readlines()
for line in lines:
print(line)
f.close()
# + id="ZT24wKkdu2Ec"
f = open("/content/text.txt", 'a')
for i in range(1, 10):
data = "{}번째 줄입니다\n".format(i)
f.write(data)
f.close()
# + id="Q79AJfs_vfl3"
f = open("/content/text.txt", 'w')
for i in range(1, 10):
data = "{}번째 줄입니다\n".format(i)
f.write(data)
f.close()
# + colab={"base_uri": "https://localhost:8080/"} id="umzg5J_Kwu75" outputId="7574c18a-18f0-4ae6-9a72-32316ab8f580"
emo = input()
try:
f = open("/content/now.txt", 'w')
except FileNotFoundError:
f = open("/content/now.txt", 'a')
f.write(emo)
f.close()
f = open("/content/now.txt", 'r')
print("지금 내 기분: {}".format(f.read()))
# + id="awQdoXvKxVy1"
| 3-1/06-CSV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Conda Installs
#
# Presented by: <NAME> (HPCC)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Summary
#
# * Why Conda
# * Software Challenges
# * Solutions
# * Configure
# * Virtual Environments
# * Install Examples
# * Pandas
# * FastQC
# * Tensorflow
#
# <hr/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Why Conda: Software Challenges
#
# * Non-Root access
# * Dependency conflicts
# * Sharable
# * Easy to execute
#
# <hr/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Why Conda: Solutions
#
# * Use non-root user
# * Installs in separate environments
# * Installs are modular and easy to share
# * Easy to load/unload environments
#
# <hr/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Configure: Locations
#
# Create the file `~/.condarc` in your home, with the following content:
#
# ```
# channels:
# - defaults
# pkgs_dirs:
# - ~/bigdata/.conda/pkgs
# envs_dirs:
# - ~/bigdata/.conda/envs
# auto_activate_base: false
# ```
#
# https://hpcc.ucr.edu/manuals/hpc_cluster/package_manage/#virtual-environments
#
# <hr />
# + [markdown] slideshow={"slide_type": "slide"}
# ## Configure: Virtual Environment
#
# First, lets request a compute node, since running on the head nodes would be slow:
#
# ```
# srun -p short -c 2 --mem=10g --pty bash -l
# ```
# + [markdown] slideshow={"slide_type": "fragment"}
# Create conda virtual environment for `pandasEnv`, since pandas is a python pacakge, we will pre install Python:
# + slideshow={"slide_type": "-"}
conda create -y -n pandasEnv python=3.6.4
# + [markdown] slideshow={"slide_type": "fragment"}
# Activate pandasEnv virtual environment:
# + slideshow={"slide_type": "-"}
conda activate pandasEnv
# -
# <hr/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Install Examples: Pandas
#
# Now that we have a virtual environment, we can install packages within it. Visit anaconda website to search for a pagckage:
#
# [https://anaconda.org/](https://anaconda.org/)
#
# + [markdown] slideshow={"slide_type": "fragment"}
# After you find the package, click on the name of the package to see install command.
# + slideshow={"slide_type": "-"}
#conda install -y -c anaconda pandas
conda install -y -n pandasEnv -c anaconda pandas
# -
# <hr/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Install Examples: FastQC
#
# Same proceedure as before:
#
# 1. Create `fastqcEnv` virtual environment
# 2. Activate `fastqcEnv` virtual environment
# 3. Look up `fastqc` on Anaconda
# 4. Install `fastqc` package
# 5. Deactivate `fastqcEnv` virtual environment
# + slideshow={"slide_type": "fragment"}
# Create virutal environment
conda create -y -n fastqcEnv
# Activate virutal environment
conda activate fastqcEnv
# Install fastqc
conda install -y -c bioconda fastqc
# Deactivate virtual environment
conda deactivate
# -
# <hr/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Install Examples: Tensorflow
#
# Same proceedure as before:
#
# 1. Create `tensorflowEnv` virtual environment
# 2. Activate `tensorflowEnv` virtual environment
# 3. Look up `tensorflow` on Anaconda
# 4. Install `tensorflow` package
# 5. Deactivate `tensorflowEnv` virtual environment
# + slideshow={"slide_type": "fragment"}
# Create virtual environment
conda create -y -n tensorflowEnv
# Activate virtual environment
conda activate tensorflow
# Install tensorflow
conda install -y -n tensorflowEnv -c conda-forge tensorflow
# Deactivate virtual environment
conda deactivate
# -
# <hr/>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Install Examples: Script
#
# After the installs have completed, you may want to use 1 (or more) conda virtual environments within a job/script:
# +
#!/bin/bash -l
#SBATCH -p short
#SBATCH -c 2
#SBATCH --mem=10gb
#SBATCH --time=10:00
#SBATCH -J "Conda Demo"
# Already loaded by default
module load miniconda2
#### pandasEnv ####
# Activate virtual environment
conda activate pandasEnv
# Show file path to where pandas is installed
python -c 'import pandas; print(pandas.__file__)'
# Deactivate virtual environment
conda deactivate
#### fastqcEnv ####
# Activate virtual environment
conda activate fastqcEnv
# Run help
fastqc --help
# Deactivate virtual environment
conda deactivate
#### tensorflowEnv ####
# Activate virtual environment
conda activate tensorflowEnv
# Add a scalar and a list
python -c 'import tensorflow as tf; x = [1, 2, 3, 4, 5]; y = 1; print(tf.add(x, y))'
# Deactivate virtual environment
conda deactivate
| static/presentations/coffeehour/conda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="Tq8bG1MNUhzi"
# # Capítol 3 - Algorismes i Nombres
# + [markdown] id="tCtf0jLsUhzy"
# ### 3.6 Aritmetica modular
# + id="_ZIPSE7YUhzy"
def validar_NIF(cadenaNIF):
"""
Aquesta funció valida si la lletre correspon al DNI
Parameters
----------
cadenaNIF: str
NIF
Returns
-------
esCorrecte: bool
Retorna si el NIF és correcte o no.
"""
lletres = ['T','R','W','A','G','M','Y','F','P','D','X','B','N','J','Z','S','Q','V','H','L','C','K','E']
lletraNIF = cadenaNIF[-1]
cadenaNIF = int(cadenaNIF[:-1])
esCorrecte = (lletraNIF == lletres[cadenaNIF%23])
return esCorrecte
validar_NIF('47892906Y')
# + id="ez-eMr8lUhzy"
assert validar_NIF('56789123F') == True
assert validar_NIF('56789123H') == False
# + id="O_2SAkrsUhzy"
def conversio_fulla_calcul(num):
"""
Aquesta funció tradueix el valor num en el nom corresponent que tindriem en un full de càlcul.
Parameters
----------
num: int
Returns
columna: str
"""
abecedari = ['A', 'B', 'C', 'D','E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
#print(abecedari, columna, num)
columna = ""
columnes = 3
if num <= 26: columna = abecedari[num-1]
else:
modul = int((num-1)%26)
columna = abecedari[modul]
num /= 26
while ((num/26)-1 >=1):
modul = int((num-1)%26)
columna += abecedari[modul]
num /= 26
columnes += 1
columna += abecedari[int(num-columnes)]
columna = columna[::-1]
return columna
conversio_fulla_calcul(729)
# + id="DIe-ltSWUhzy"
assert conversio_fulla_calcul(1) == 'A'
assert conversio_fulla_calcul(25) == 'Y'
assert conversio_fulla_calcul(26) == 'Z'
assert conversio_fulla_calcul(27) == 'AA'
assert conversio_fulla_calcul(28) == 'AB'
assert conversio_fulla_calcul(29) == 'AC'
assert conversio_fulla_calcul(107) == 'DC'
assert conversio_fulla_calcul(406) == 'OP'
assert conversio_fulla_calcul(407) == 'OQ'
assert conversio_fulla_calcul(408) == 'OR'
assert conversio_fulla_calcul(412) == 'OV'
assert conversio_fulla_calcul(702) == 'ZZ'
assert conversio_fulla_calcul(703) == 'AAA'
assert conversio_fulla_calcul(704) == 'AAB'
assert conversio_fulla_calcul(705) == 'AAC'
assert conversio_fulla_calcul(708) == 'AAF'
assert conversio_fulla_calcul(1000) == 'ALL'
| Algorismica/.ipynb_checkpoints/3.6-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.7 64-bit (''.dunex-venv'': venv)'
# language: python
# name: python3
# ---
# <h1>Exploring the Parameter Space Spanned during DUNEX</h1>
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import netCDF4 as nc
import matplotlib.patches as patches
import cftime
# %matplotlib widget
# +
# Get list of times in and out of water from Notes Spreadsheet
# Define Project Directory
project_dir = '../'
# Define Data Directory
data_dir = 'microSWIFT_data/'
# Define Metadata Excel sheet name
metadata_name = 'DUNEXMainExp_notes.xlsx'
# Combine file name and project Directory
metadata_filename = project_dir + metadata_name
# Create dataframe object from DUNEX MetaData SpreadSheet
dunex_xlsx = pd.read_excel(metadata_filename)
# Get start and end times
start_times = []
end_times = []
for n in np.arange(len(dunex_xlsx['Start Time'])):
start_times.append(datetime.datetime.fromisoformat(dunex_xlsx['Start Time'][n]))
end_times.append(datetime.datetime.fromisoformat(dunex_xlsx['End Time'][n]))
# +
# Load in CDIP buoy 433 data
cdip_433_url = 'https://thredds.cdip.ucsd.edu/thredds/dodsC/cdip/realtime/433p1_rt.nc'
cdip433_data = nc.Dataset(cdip_433_url)
# Get wave parameters from dataset
ncTime = cdip433_data.variables['waveTime'][:]
cdip_time = [datetime.datetime.fromtimestamp(t) for t in ncTime] # Convert ncTime variable to datetime stamps
Hs = cdip433_data.variables['waveHs']
Tp = cdip433_data.variables['waveTp']
Dp = cdip433_data.variables['waveDp']
# Trim time series to be between October 3rd and October 30th
experiment_start = datetime.datetime(2021, 10, 3, 0, 0, 0)
experiment_end = datetime.datetime(2021, 10, 31, 0, 0, 0)
# Sort Indices within experiment
indices_in_experiment_waves = []
cdip_time_inex = []
for ind in np.arange(len(cdip_time)):
if (cdip_time[ind] >= experiment_start) & (cdip_time[ind] <= experiment_end):
indices_in_experiment_waves.append(int(ind))
cdip_time_inex.append(cdip_time[ind])
else:
continue
# Sort all variables to be within the experiment window
Hs_inex = Hs[indices_in_experiment_waves]
Tp_inex = Tp[indices_in_experiment_waves]
Dp_inex = Dp[indices_in_experiment_waves]
# +
# Load in Wind data from FRF data portal
wind_url = 'https://chlthredds.erdc.dren.mil/thredds/dodsC/frf/meteorology/wind/derived/2021/FRF-met_wind_derived_202110.nc'
wind_dataset = nc.Dataset(wind_url)
# Get Varaiables from wind data set
wind_time = cftime.num2pydate(wind_dataset.variables['time'][:], units=wind_dataset.variables['time'].units, calendar=wind_dataset.variables['time'].calendar)
# Need to remove last two values in list since they are not real values they are '--'
wind_time = wind_time[:-2]
windSpeed = wind_dataset['windSpeed'][:-2]
windDirection = wind_dataset['windDirection'][:-2]
# Sort Wind Speed and Direction to within the experiment
indices_in_experiment_wind = []
wind_time_inex = []
for ind in np.arange(len(wind_time)):
if (wind_time[ind] > experiment_start):
indices_in_experiment_wind.append(int(ind))
wind_time_inex.append(wind_time[ind])
else:
continue
# Sort all variables to be within the experiment window
windSpeed_inex = windSpeed[indices_in_experiment_wind]
windDirection_inex = windDirection[indices_in_experiment_wind]
print(np.max(windSpeed_inex))
# +
# Plot time series of each Parameter
fig_params, (ax_hs, ax_tp, ax_dp, ax_ws, ax_wd) = plt.subplots(5, figsize=(12,10))
# Plot Hs
ax_hs.plot(cdip_time_inex, Hs_inex)
ax_hs.set_ylabel('Significant Wave Height, Hs [m]')
ax_hs.set_xlabel('Time')
ax_hs.set_ylim(0, 3.5)
# Plot Tp
ax_tp.plot(cdip_time_inex, Tp_inex)
ax_tp.set_ylabel('Peak Period, Tp [sec]')
ax_tp.set_xlabel('Time')
ax_tp.set_ylim(0, 25)
# Plot Dp
ax_dp.plot(cdip_time_inex, Dp_inex)
ax_dp.set_ylabel('Peak Direction, Dp [degrees]')
ax_dp.set_xlabel('Time')
ax_dp.set_ylim(0, 370)
# Plot Wind Speed
ax_ws.plot(wind_time_inex, windSpeed_inex)
ax_ws.set_ylabel('10-Minute Mean Wind Speed, [m/s]')
ax_ws.set_xlabel('Time')
ax_ws.set_ylim(0, 20)
# Plot Wind Direction
ax_wd.plot(wind_time_inex, windDirection_inex)
ax_wd.set_ylabel('Wind Direction, [degrees]')
ax_wd.set_xlabel('Time')
ax_wd.set_ylim(0, 370)
# Add Mission Time block patches
for ind in np.arange(1,len(start_times)):
if ind == 6:
# skip mission 6 which was not a real mission but a separate offload for the micros that were lost then recovered later - see notes spreadsheet
continue
ax_hs.add_patch(patches.Rectangle((start_times[ind], 0), end_times[ind]-start_times[ind], 3.5, linewidth=1, edgecolor='0.7', facecolor='0.7'))
ax_tp.add_patch(patches.Rectangle((start_times[ind], 0), end_times[ind]-start_times[ind], 25, linewidth=1, edgecolor='0.7', facecolor='0.7'))
ax_dp.add_patch(patches.Rectangle((start_times[ind], 0), end_times[ind]-start_times[ind], 370, linewidth=1, edgecolor='0.7', facecolor='0.7'))
ax_ws.add_patch(patches.Rectangle((start_times[ind], 0), end_times[ind]-start_times[ind], 20, linewidth=1, edgecolor='0.7', facecolor='0.7'))
ax_wd.add_patch(patches.Rectangle((start_times[ind], 0), end_times[ind]-start_times[ind], 370, linewidth=1, edgecolor='0.7', facecolor='0.7'))
# Set Figure Properties
plt.tight_layout
# +
# Sort all parameters during microSWIFT deployments
# Sort Indices within experiment
indices_in_mission_waves = []
indices_in_mission_wind = []
for mission_num in np.arange(1,len(start_times)):
# Sort Wave Parameters
for ind in np.arange(len(cdip_time_inex)):
if (cdip_time_inex[ind] >= start_times[mission_num]) & (cdip_time_inex[ind] <= end_times[mission_num]):
indices_in_mission_waves.append(int(ind))
else:
continue
# Sort Wind Parameters
for ind in np.arange(len(wind_time_inex)):
if (wind_time_inex[ind] >= start_times[mission_num]) & (wind_time_inex[ind] <= end_times[mission_num]):
indices_in_mission_wind.append(int(ind))
else:
continue
# Sort Wave Parameters
Hs_in_mission = Hs_inex[indices_in_mission_waves]
Tp_in_mission = Tp_inex[indices_in_mission_waves]
Dp_in_mission = Dp_inex[indices_in_mission_waves]
# Sort Wind Parameters
windSpeed_in_mission = windSpeed_inex[indices_in_mission_wind]
windDirection_in_mission = windDirection_inex[indices_in_mission_wind]
# +
# Plot Histograms of Wave Parameter Space
fig_hist, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(10,10))
# Scatter plot of Hs and Tp sampled
ax1.scatter(Tp_in_mission, Hs_in_mission)
ax1.set_xlabel('Peak Period, Tp [sec]')
ax1.set_ylabel('Significant Wave Height, Hs [m]')
ax1.set_xlim(0, 25) # limits for Tp - same as time series above
ax1.set_ylim(0, 3.5) # limits for Hs - same as time series above
# Scatter Plot of Dp and Tp sampled
ax2.scatter(Tp_in_mission, Dp_in_mission)
ax2.set_xlabel('Peak Period, Tp [sec]')
ax2.set_ylabel('Peak Direction, Dp [degrees]')
ax2.set_xlim(0, 25) # limits for Tp - same as time series above
ax2.set_ylim(0, 370) # limits for Dp - same as time series above
# Scatter plot of Hs and Dp sampled
ax3.scatter(Dp_in_mission, Hs_in_mission)
ax3.set_xlabel('Peak Direction, Dp [degrees]')
ax3.set_ylabel('Significant Wave Height, Hs [m]')
ax3.set_xlim(0, 370) # limits for Dp - same as time series above
ax1.set_ylim(0, 3.5) # limits for Hs - same as time series above
# Scatter Wind Speed and Direction
ax4.scatter(windSpeed_in_mission, windDirection_in_mission)
ax4.set_xlabel('Wind Speed, [m/s]')
ax4.set_ylabel('Wind Direction, [degrees]')
ax4.set_xlim(0,20)
ax4.set_ylim(0,370)
# Figure Properties
plt.tight_layout()
# +
# Color Coded by peak direction scatter plot
fig_HsTpDp, ax = plt.subplots()
map = ax.scatter(Tp_in_mission, Hs_in_mission, c=Dp_in_mission, cmap='inferno')
ax.set_xlabel('Peak Period, tp [sec]')
ax.set_ylabel('Significant Wave Height, Hs [m]')
ax.set_xlim(0, 20)
ax.set_ylim(0, 3)
# Set Colorbar
cbar = plt.colorbar(map, ax=ax)
cbar.ax.set_ylabel('Peak Direction, Dp [degrees]')
| analysis/parameterSpaceExploration.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.3
# language: julia
# name: julia-1.0
# ---
# # Various illustrations on IHT-poisson fitting
#
# In this notebook we illustrate the functionalities and behaviors of IHT used for high-dimensional poisson regressions. Specifically, we investigate:
# + What is a reasonable distribution for $\beta_{true}$?
# + Is initializing $\beta$ good in terms of speed and model selection?
# + Is debiasing good in terms of speed and model selection?
# + How does IHT compare to LASSO?
#
using Revise
using MendelIHT
using SnpArrays
using DataFrames
using Distributions
using BenchmarkTools
using Random
using LinearAlgebra
using StatsFuns: logistic
# # What is a reasonable distribution for $\beta_{true}$?
#
# Recall that when $\lambda \geq 20$, Poisson($\lambda)$ can be [approximated by the normal distributions with a continuity correction](http://wiki.stat.ucla.edu/socr/index.php/AP_Statistics_Curriculum_2007_Limits_Norm2Poisson). Thus in our data simulation, it is a good idea to make sure the mean of the generated $\lambda_i$ for each sample does not exceed this value, because otherwise it would be more appropriate to fit a normal, or log-normal distribution. This can be roughly ensured if we let $\beta \sim N(0, s)$ where $s$ is small, but obviously the smaller the $\beta$, the harder it is to find it. This section investigates what distribution we should choose for $\beta$ to approximately ensure that the median of $y$ lies in a reasonable range.
#
# ## "Bad" poisson reconstruction for extreme $\lambda$ values
#
# To convince the skeptical reader, first we show poisson reconstruction results that are "sometimes bad" as a result of a few extreme $\lambda_i$. These extreme values forces extremely large gradients in the first few iterations. This introduces numerical instability, but also brings the model estimate to disproportionately large values, rendering IHT powerless. For $k = 10$, this happens around $50\%$ of the time (i.e. when mean$(y) \leq 20$), and this probability tend to increase with larger $k$ values because $\lambda_i = \exp(x_i^T\beta)$.
#some function that runs poisson regression on different SNP matrices
function run_poisson_bad(n :: Int64, p :: Int64, k :: Int64)
#set random seed
Random.seed!(1111)
#construct snpmatrix, covariate files, and true model b. Here we show N(0, 1) is a BAD MODEL
x, maf = simulate_random_snparray(n, p)
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
z = ones(n, 1) #intercept
true_b = zeros(p)
true_b[1:k] = randn(k) #N(0, 1) is a BAD MODEL
shuffle!(true_b)
correct_position = findall(x -> x != 0, true_b)
# Simulate poisson data
y_temp = xbm * true_b
λ = exp.(y_temp)
y = [rand(Poisson(x)) for x in λ]
y = Float64.(y)
#compute poisson IHT result
result = L0_poisson_reg(x, z, y, 1, k, glm = "poisson", debias=false, convg=true, show_info=false, true_beta=true_b, scale=false, init=false)
#check result
estimated_models = result.beta[correct_position]
true_model = true_b[correct_position]
compare_model = DataFrame(
true_β = true_model,
estimated_β = estimated_models)
#display results
@show sort(compare_model, rev=true, by=abs)
println("Total iteration number was " * string(result.iter))
println("Total time was " * string(result.time))
println("median of y = " * string(median(y)))
println("maximum of y = " * string(maximum(y)))
println("minimum of y = " * string(minimum(y)))
println("skewness of y = " * string(skewness(y)))
println("kurtosis of y = " * string(kurtosis(y)))
println("mean of y = " * string(mean(y)))
println("variance of y = " * string(var(y)))
println("var / mean = " * string(var(y) / mean(y)) * "\n\n")
end
for i = 1:50
n = rand(2000:5000)
p = rand(1:10)n
k = 10
println("Running the $i th model where " * "n, p = " * string(n) * ", " * string(p))
run_poisson_bad(n, p, k)
end
# ## What's the appropriate variance of our model?
#
# Below we have $s = 0.1, 0.2, ... 1.0$ and let $\beta \sim N(0, s)$. Then we construct 10 different SNP matrices for each model to simulate data. We compute some summary statistics at the end, but overall variance $= 0.3$ works the best for maximizing $\beta$ coefficients (i.e. making the problem easy) but minimizes extreme outliers.
function compute_y_mean(variance :: Float64)
#simulat data
n = rand(2000:5000)
p = 10n
k = 10 # number of true predictors
#construct snpmatrix, covariate files, and true model b
x, maf = simulate_random_snparray(n, p)
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
z = ones(n, 1) # non-genetic covariates, just the intercept
true_b = zeros(p)
true_b[1:k] = rand(Normal(0, variance), k)
shuffle!(true_b)
correct_position = findall(x -> x != 0, true_b)
#simulate data
y_temp = xbm * true_b
λ = exp.(y_temp) #inverse log link
y = [rand(Poisson(x)) for x in λ]
y = Float64.(y)
return n, p, median(y), maximum(y), mean(y), var(y), skewness(y), kurtosis(y)
end
Random.seed!(2019)
s = collect(0.1:0.1:1) #beta ~ N(0, s) for s in {0.1, 0.2,..., 1}
repeats = 30
for i in 1:length(s)
df = DataFrame(n = Int64[], p = Int64[], median = Float64[], max = Float64[], mean = Float64[],
var = Float64[], skew = Float64[], kur = Float64[])
for j in 1:repeats
n, p, med, max, μ, σ, skew, kur = compute_y_mean(s[i])
push!(df, [n, p, med, max, μ, σ, skew, kur])
end
println("for variance = " * string(s[i]) * ", the result is as follows:")
@show(df)
println("\n\n")
end
# ## Examine reconstruction for variance = 0.4
#
# From the above, it seems like variance = 0.4 is a reasonable choice for $\beta$. Below simulate 50 different SNP matrices, with 50 different $\beta \sim N(0, 0.4)$, to examine reconstruction behavior. Overall, IHT struggles to find effect sizes $< 0.1$ but performs well for larger values.
#some function that runs poisson regression on different SNP matrices
function run_poisson(n :: Int64, p :: Int64, k :: Int64)
#set random seed
Random.seed!(1111)
#construct snpmatrix, covariate files, and true model b
x, maf = simulate_random_snparray(n, p)
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
z = ones(n, 1) # non-genetic covariates, just the intercept
true_b = zeros(p)
true_b[1:k] = rand(Normal(0, 0.4), k)
shuffle!(true_b)
correct_position = findall(x -> x != 0, true_b)
# Simulate poisson data
y_temp = xbm * true_b
λ = exp.(y_temp) #inverse log link
y = [rand(Poisson(x)) for x in λ]
y = Float64.(y)
#compute poisson IHT result
result = L0_poisson_reg(x, z, y, 1, k, glm = "poisson", debias=false, convg=false, show_info=false)
#check result
estimated_models = result.beta[correct_position]
true_model = true_b[correct_position]
compare_model = DataFrame(
true_β = true_model,
estimated_β = estimated_models)
#display results
@show sort(compare_model, rev=true, by=abs)
println("Total iteration number was " * string(result.iter))
println("Total time was " * string(result.time))
println("median of y = " * string(median(y)))
println("maximum of y = " * string(maximum(y)))
println("minimum of y = " * string(minimum(y)))
println("skewness of y = " * string(skewness(y)))
println("kurtosis of y = " * string(kurtosis(y)))
println("mean of y = " * string(mean(y)))
println("variance of y = " * string(var(y)))
println("var / mean = " * string(var(y) / mean(y)) * "\n\n")
end
k = 10
for i = 1:50
println("running the $i th model")
n = rand(500:2000)
p = rand(1:10)n
println("n, p = " * string(n) * ", " * string(p))
run_poisson(n, p, k)
end
# # Is initializing $\beta$ good in terms of speed and model selection?
#
# When initilizing the model, we can fit a bunch of univariate regressions to find a good initial approximation to the model before starting the IHT algorithm. Doing so introduces extra computational cost, but could reduce total iteration number and/or improve model selection. This section examines whether this is useful (it's not).
#some function that runs poisson regression on different SNP matrices
function test_poisson_init(n :: Int64, p :: Int64, k :: Int64)
#set random seed
Random.seed!(1111)
#construct snpmatrix, covariate files, and true model b
x, maf = simulate_random_snparray(n, p)
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
z = ones(n, 1) # non-genetic covariates, just the intercept
true_b = zeros(p)
true_b[1:k] = rand(Normal(0, 0.4), k)
shuffle!(true_b)
correct_position = findall(x -> x != 0, true_b)
# Simulate poisson data
y_temp = xbm * true_b
λ = exp.(y_temp) #inverse log link
y = [rand(Poisson(x)) for x in λ]
y = Float64.(y)
#compute poisson IHT result
yes_init = L0_poisson_reg(x, z, y, 1, k, glm = "poisson", debias=false, convg=false, show_info=false, init=true)
no_init = L0_poisson_reg(x, z, y, 1, k, glm = "poisson", debias=false, convg=false, show_info=false, init=false)
#check result
est_model_init = yes_init.beta[correct_position]
est_model_no_init = no_init.beta[correct_position]
true_model = true_b[correct_position]
compare_model = DataFrame(
true_β = true_model,
est_β_no_init = est_model_no_init,
est_β_with_init = est_model_init)
#display results
@show sort(compare_model, rev=true, by=abs)
println("No initilialization:")
println(" Iter = " * string(no_init.iter))
println(" Time = " * string(no_init.time))
println("Yes initilialization:")
println(" Iter = " * string(yes_init.iter))
println(" Time = " * string(yes_init.time) * "\n\n")
#return summary statistic
yes_init_found = length(findall(!iszero, est_model_init))
no_init_found = length(findall(!iszero, est_model_no_init))
yes_init_time = yes_init.time
no_init_time = no_init.time
return yes_init_found, no_init_found, yes_init_time, no_init_time
end
# +
k = 10
yes_init_total_found = 0
no_init_total_found = 0
yes_init_total_time = 0
no_init_total_time = 0
iter = 50
for i = 1:iter
println("running the $i th model")
n = rand(500:2000)
p = rand(1:10)n
println("n, p = " * string(n) * ", " * string(p))
yif, nif, yit, nit = test_poisson_init(n, p, k)
yes_init_total_found += yif
no_init_total_found += nif
yes_init_total_time += yit
no_init_total_time += nit
end
println("With initialization, found $yes_init_total_found " * "predictors out of " * string(k*iter))
println("With initialization, average time was " * string(yes_init_total_time/iter))
println("Without initialization, found $no_init_total_found " * "predictors out of " * string(k*iter))
println("Without initialization, average time was " * string(no_init_total_time/iter))
# -
# # Is debiasing good in terms of speed and model selection?
#
# Within each IHT iteration, we can fit a GLM regression (using scoring algorithm) on just the support set of that iteration. This is known as debiasing. We now investigate whether this is a good idea, both in terms of speed and model selection performance. (ans: yes)
function test_poisson_debias(n :: Int64, p :: Int64, k :: Int64)
#set random seed
Random.seed!(1111)
#construct snpmatrix, covariate files, and true model b
x, maf = simulate_random_snparray(n, p)
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
z = ones(n, 1) # non-genetic covariates, just the intercept
true_b = zeros(p)
true_b[1:k] = rand(Normal(0, 0.4), k)
shuffle!(true_b)
correct_position = findall(x -> x != 0, true_b)
# Simulate poisson data
y_temp = xbm * true_b
λ = exp.(y_temp) #inverse log link
y = [rand(Poisson(x)) for x in λ]
y = Float64.(y)
#compute poisson IHT result
no_debias = L0_poisson_reg(x, z, y, 1, k, glm = "poisson", debias=false, convg=false, show_info=false, init=false)
yes_debias = L0_poisson_reg(x, z, y, 1, k, glm = "poisson", debias=true, convg=false, show_info=false, init=false)
#check result
est_model_yes = yes_debias.beta[correct_position]
est_model_no = no_debias.beta[correct_position]
true_model = true_b[correct_position]
compare_model = DataFrame(
true_β = true_model,
est_β_no_debias = est_model_no,
est_β_yes_debias = est_model_yes)
#display results
@show sort(compare_model, rev=true, by=abs)
println("No debiasing:")
println(" Iter = " * string(no_debias.iter))
println(" Time = " * string(no_debias.time))
println("Yes debiasing:")
println(" Iter = " * string(yes_debias.iter))
println(" Time = " * string(yes_debias.time) * "\n\n")
#return summary statistic
yes_debias_found = length(findall(!iszero, est_model_yes))
no_debias_found = length(findall(!iszero, est_model_no))
yes_debias_time = yes_debias.time
no_debias_time = no_debias.time
return yes_debias_found, no_debias_found, yes_debias_time, no_debias_time
end
# +
k = 10
yes_debias_total_found = 0
no_debias_total_found = 0
yes_debias_total_time = 0
no_debias_total_time = 0
iter = 50
for i = 1:iter
println("running the $i th model")
n = rand(500:2000)
p = rand(1:10)n
println("n, p = " * string(n) * ", " * string(p))
ydf, ndf, ydt, ndt = test_poisson_debias(n, p, k)
yes_debias_total_found += ydf
no_debias_total_found += ndf
yes_debias_total_time += ydt
no_debias_total_time += ndt
end
println("With debiasing, found $yes_debias_total_found " * "predictors out of " * string(k*iter))
println("With debiasing, average time was " * string(yes_debias_total_time/iter))
println("Without debiasing, found $no_debias_total_found " * "predictors out of " * string(k*iter))
println("Without debiasing, average time was " * string(no_debias_total_time/iter))
# -
# # Comparison with LASSO: cross validation
using Revise
using GLMNet #julia wrapper for GLMNet package in R, which calls fortran
using GLM
using MendelIHT
using SnpArrays
using DataFrames
using Distributions
using StatsFuns: logistic
using Random
using LinearAlgebra
using DelimitedFiles
Threads.nthreads() #verify multiple threads are enabled
function iht_lasso_poisson(n :: Int64, p :: Int64, sim :: Int64)
#define maf and true model size
k = 10
#set random seed
Random.seed!(1111)
#construct snpmatrix, covariate files, and true model b
x, maf = simulate_random_snparray(n, p)
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
z = ones(n, 1) # non-genetic covariates, just the intercept
true_b = zeros(p) # model vector
true_b[1:k] = rand(Normal(0, 0.4), k) # k true response
shuffle!(true_b) # Shuffle the entries
correct_position = findall(x -> x != 0, true_b) # keep track of what the true entries are
# Simulate poisson data
y_temp = xbm * true_b
λ = exp.(y_temp) #inverse log link
y = [rand(Poisson(x)) for x in λ]
y = Float64.(y)
#compute poisson IHT result
cur_time = time()
path = collect(1:20)
num_folds = 3
folds = rand(1:num_folds, size(x, 1))
k_est_iht = cv_iht(x, z, y, 1, path, folds, num_folds, use_maf=false, glm="poisson", debias=true)
iht_result = L0_poisson_reg(x, z, y, 1, k_est_iht, glm = "poisson", debias=true, convg=false, show_info=false, true_beta=true_b, init=false)
iht_time = time() - cur_time
#compute poisson lasso result
x_float = [convert(Matrix{Float64}, x, center=true, scale=true) z]
cur_time = time()
cv = glmnetcv(x_float, y, Poisson(), pmax=20, nfolds=3, folds=folds)
best = argmin(cv.meanloss)
lasso_result = cv.path.betas[:, best]
k_est_lasso = length(findall(!iszero, lasso_result))
lasso_time = time() - cur_time
#compute regular poisson regression using only true predictors
x_true = [x_float[:, correct_position] z]
regular_result = glm(x_true, y, Poisson(), LogLink())
regular_result = regular_result.pp.beta0
#check result
IHT_model = iht_result.beta[correct_position]
lasso_model = lasso_result[correct_position]
true_model = true_b[correct_position]
compare_model = DataFrame(
true_β = true_model,
iht_β = IHT_model,
lasso_β = lasso_model,
regular_β = regular_result[1:10])
@show compare_model
#compute summary statistics
lasso_num_correct_predictors = length(findall(!iszero, lasso_model))
lasso_false_positives = k_est_lasso - lasso_num_correct_predictors
lasso_false_negatives = k - lasso_num_correct_predictors
iht_num_correct_predictors = length(findall(!iszero, IHT_model))
iht_false_positives = k_est_iht - iht_num_correct_predictors
iht_false_negatives = k - iht_num_correct_predictors
println("IHT cv found $iht_false_positives" * " false positives and $iht_false_negatives" * " false negatives, and used $iht_time" * " seconds")
println("lasso cv found $lasso_false_positives" * " false positives and $lasso_false_negatives" * " false negatives, and used $lasso_time" * " seconds \n\n")
#write y to file to view distribution later
#writedlm("./IHT_poisson_simulations_mean/data_simulation_$sim.txt", y)
return lasso_num_correct_predictors, lasso_false_positives, lasso_false_negatives, lasso_time, iht_num_correct_predictors, iht_false_positives, iht_false_negatives, iht_time
end
function run_iht_lasso_poisson()
lasso_total_found = 0
lasso_false_positives = 0
lasso_false_negatives = 0
lasso_total_time = 0
iht_total_found = 0
iht_false_positives = 0
iht_false_negatives = 0
iht_total_time = 0
iter = 10
for i = 1:iter
n = rand(1000:3000)
p = rand(1:10)n
println("Running the $i th model where " * "n, p = " * string(n) * ", " * string(p))
ltf, lfp, lfn, ltt, itf, ifp, ifn, itt = iht_lasso_poisson(n, p, i)
lasso_total_found += ltf
lasso_false_positives += lfp
lasso_false_negatives += lfn
lasso_total_time += ltt
iht_total_found += itf
iht_false_positives += ifp
iht_false_negatives += ifn
iht_total_time += itt
end
println("IHT : Found $iht_total_found " * "correct predictors, out of " * string(10iter))
println("IHT : False positives = $iht_false_positives")
println("IHT : False negatives = $iht_false_negatives")
println("IHT : Average time = " * string(iht_total_time / iter))
println("Lasso: Found $lasso_total_found " * "correct predictors, out of " * string(10iter))
println("Lasso: False positives = $lasso_false_positives")
println("Lasso: False negatives = $lasso_false_negatives")
println("Lasso: Average time = " * string(lasso_total_time / iter))
end
Random.seed!(2019)
run_iht_lasso_poisson()
# # Which convergence criteria should I use?
#some function that runs poisson regression on different SNP matrices
function test_poisson_convg(n :: Int64, p :: Int64, k :: Int64)
#set random seed
Random.seed!(1111)
#construct snpmatrix, covariate files, and true model b
x, maf = simulate_random_snparray(n, p)
xbm = SnpBitMatrix{Float64}(x, model=ADDITIVE_MODEL, center=true, scale=true);
z = ones(n, 1) # non-genetic covariates, just the intercept
true_b = zeros(p)
true_b[1:k] = rand(Normal(0, 0.4), k)
shuffle!(true_b)
correct_position = findall(x -> x != 0, true_b)
# Simulate poisson data
y_temp = xbm * true_b
λ = exp.(y_temp) #inverse log link
y = [rand(Poisson(x)) for x in λ]
y = Float64.(y)
#compute poisson IHT result
kevin = L0_poisson_reg(x, z, y, 1, k, glm = "poisson", debias=false, convg=true, show_info=false, init=false)
stringent = L0_poisson_reg(x, z, y, 1, k, glm = "poisson", debias=false, convg=false, show_info=false, init=false)
#check result
est_model_stringent = stringent.beta[correct_position]
est_model_kevin = kevin.beta[correct_position]
true_model = true_b[correct_position]
compare_model = DataFrame(
true_β = true_model,
est_β_kevin = est_model_kevin,
est_β_stringent = est_model_stringent)
#display results
@show sort(compare_model, rev=true, by=abs)
println("Kevin's criteria:")
println(" Iter = " * string(kevin.iter))
println(" Time = " * string(kevin.time))
println("stringent criteria:")
println(" Iter = " * string(stringent.iter))
println(" Time = " * string(stringent.time) * "\n\n")
#return summary statistic
stringent_found = length(findall(!iszero, est_model_stringent))
kevin_found = length(findall(!iszero, est_model_kevin))
stringent_time = stringent.time
kevin_time = kevin.time
return stringent_found, kevin_found, stringent_time, kevin_time
end
# +
k = 10
kevin_total_found = 0
stringent_total_found = 0
kevin_total_time = 0
stringent_total_time = 0
iter = 50
for i = 1:iter
println("running the $i th model")
n = rand(500:2000)
p = rand(1:10)n
println("n, p = " * string(n) * ", " * string(p))
sf, kf, st, kt = test_poisson_convg(n, p, k)
stringent_total_found += sf
kevin_total_found += kf
stringent_total_time += st
kevin_total_time += kt
end
println("With stringent convergence, found $stringent_total_found " * "predictors out of " * string(k*iter))
println("With stringent convergence, average time was " * string(stringent_total_time/iter))
println("With kevin's convergence, found $kevin_total_found " * "predictors out of " * string(k*iter))
println("With kevin's convergence, average time was " * string(kevin_total_time/iter)* "\n\n")
# -
| notebooks/IHT_poisson.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Import necessary libraries
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
import numpy as np
from scipy.stats import chi2_contingency
#Load and inspect csv files
observations_df = pd.read_csv('observations.csv')
species_df = pd.read_csv('species_info.csv')
display(observations_df.head())
print(observations_df.info())
display(species_df.head())
display(species_df.info())
#How many species are in the observations dataset?
species_list = observations_df['scientific_name'].unique()
print('There are '+str(len(species_list))+' unique species in our observations.')
#How many parks are the observations from?
parks_list = observations_df['park_name'].unique()
print('There are '+str(len(parks_list))+' unique parks in our observations.')
print(parks_list)
#How many observations are from each park?
park_counts = observations_df['park_name'].value_counts()
print(park_counts)
#What categories do the species of the species dataset fall into?
categories = species_df['category'].unique()
print(categories)
#How many conservation statuses are there?
statuses = species_df['conservation_status'].unique()
print(statuses)
#Replace NaN values with something less ugly
species_df['conservation_status'] = species_df['conservation_status'].fillna('Not Specified')
statuses = species_df['conservation_status'].unique()
#Count the species falling into each status
species_counts = species_df['conservation_status'].value_counts()
print(species_counts)
#Create a pie chart of conservation statuses (Endangered, Threatened, and In Recovery condensed into "Other" for readability)
pie_labels = ['Not Specified', 'Species of Concern', 'Other']
other_num = len(species_df)-(species_counts[0] + species_counts[1])
pie_list = [species_counts[0], species_counts[1], other_num]
plt.pie(pie_list, autopct='%0.2f%%', pctdistance=1.2)
plt.legend(pie_labels)
plt.title('Conservation Statuses')
plt.axis('equal')
plt.show()
print('Other Percentages (Not Shown): \n Endangered: '+str(round(species_counts[2]*100/len(species_df), 2))+'%\n Threatened: '+str(round(species_counts[3]*100/len(species_df), 2))+'%\n In Recovery: '+str(round(species_counts[4]*100/len(species_df), 2))+'%')
#Create a bar chart of species observed by category
category_counts = species_df['category'].value_counts()
print(category_counts)
bar_labels = ['Vasc Plant', 'Bird', 'NonV Plant', 'Mammal', 'Fish', 'Amphibian', 'Reptile'] #Labels abbreviated for readability
plt.bar(range(len(category_counts)), category_counts, tick_label=bar_labels)
plt.xticks(rotation=30)
plt.title('Species Observed by Category')
plt.xlabel('Category')
plt.ylabel('Frequency')
plt.show()
#Create contingency tables, perform a chi-square test, and interpret the results
category_conservation = pd.crosstab(species_df['category'], species_df['conservation_status'])
print('Category vs. Conservation Status Contingency Table')
display(category_conservation)
chi2, p, dof, expected = chi2_contingency(category_conservation)
print('The p-value for a chi-square test conducted on this contingency table is '+str(p)+'.\nThis suggests a strong association between category and conservation status (signficance threshold: p=0.05)')
# +
#What are the n most observed species for a given park?
def n_most_observed(park, n):
park_values = observations_df[observations_df['park_name']==park]
park_sorted = park_values.sort_values("observations", ascending=False).reset_index()
park_merged = park_sorted.merge(species_df, how='inner', on='scientific_name')
for i in range(n):
print('The #'+str(i+1)+' most frequently observed species at '+park+' is '+park_merged['scientific_name'][i])
print('Its other names are: '+park_merged['common_names'][i])
print('It is a '+park_merged['category'][i]+' with a conservation status of '+park_merged['conservation_status'][i])
print('It has been observed at this park '+str(park_merged['observations'][i])+' times.')
print('\n')
#What are the n rarest species in a given park?
def n_least_observed(park, n):
park_values = observations_df[observations_df['park_name']==park]
park_sorted = park_values.sort_values("observations", ascending=False).reset_index()
park_merged = park_sorted.merge(species_df, how='inner', on='scientific_name')
for i in range(n):
print('The #'+str(i+1)+' rarest species at '+park+' is '+park_merged['scientific_name'][len(park_merged)-(i+1)])
print('Its other names are: '+park_merged['common_names'][len(park_merged)-(i+1)])
print('It is a '+park_merged['category'][len(park_merged)-(i+1)]+' with a conservation status of '+park_merged['conservation_status'][len(park_merged)-(i+1)])
print('It has been observed at this park '+str(park_merged['observations'][len(park_merged)-(i+1)])+' times.')
print('\n')
n_most_observed('Great Smoky Mountains National Park', 3) #describe the 3 most common species in Smoky Mts. National Park
n_least_observed('Bryce National Park', 4) #describe the 2 rarest species in Bryce National Park
| biodiversity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import dependencies
from scipy import ndimage
import sys
import os
from time import time
from time import sleep
from PIL import Image
import numpy as np
import pandas as pd
import plotly
from plotly.graph_objs import Bar, Line
from plotly.graph_objs import Scatter, Layout
from plotly.graph_objs.scatter import Marker
from plotly.graph_objs.layout import XAxis, YAxis
import seaborn as sns
from IPython.display import Image as _Imgdis
from sklearn.decomposition import PCA
from matplotlib import pyplot as plt
from sklearn import datasets as ds
import cv2
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import train_test_split
# +
import keras
from keras.models import Sequential
from keras.layers import Reshape
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping
from keras.regularizers import l2
from keras.layers import average
from keras.models import Input, Model
keras.__version__
# +
from sklearn import metrics as mt
from matplotlib import pyplot as plt
from skimage.io import imshow
import seaborn as sns
# %matplotlib inline
def summarize_net(net, X_test, y_test, title_text=''):
plt.figure(figsize=(15,5))
yhat = np.argmax(net.predict(X_test), axis=1)
acc = mt.accuracy_score(y_test,yhat)
cm = mt.confusion_matrix(y_test,yhat)
cm = cm/np.sum(cm,axis=1)[:,np.newaxis]
sns.heatmap(cm, annot=True, fmt='.2f')
plt.title(title_text+'{:.4f}'.format(acc))
# +
# Less Data...
#asl = pd.read_csv("C:/temp/Datasets/aslalphabettrain50-test/asl_alphabet_train_50_test.csv")
# More Data...
asl = pd.read_csv("C:/temp/Datasets/aslalphabettrain50/asl_alphabet_train_50.csv")
# -
import copy
aslcopy = copy.deepcopy(asl)
X=np.asarray(aslcopy.drop(asl.columns[0], axis=1), dtype=np.int64)
y=asl.drop(asl.columns[1:], axis=1)
y = np.asarray(y, dtype=np.int16)
print(X)
print(y)
# +
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.8)
#X_train = X_train/255.0 - 0.5
#X_test = X_test/255.0 - 0.5
print(X_train.shape)
# +
img_wh = 50
NUM_CLASSES=29
X_train = np.expand_dims(X_train.reshape((-1,img_wh,img_wh)), axis=3)
X_test = np.expand_dims(X_test.reshape((-1,img_wh,img_wh)), axis=3)
# the image data has been resized to (samples,image_rows,image_cols,image_channels)
# and one hot encoding the output values
y_train_ohe = keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test_ohe = keras.utils.to_categorical(y_test, NUM_CLASSES)
print('New Shape: Rows: %d, image size: (%d,%d,%d)' % (X_train.shape[0], X_train.shape[1], X_train.shape[2], X_train.shape[3] ))
# +
print(X_train.shape)
plt.subplot(1,1,1)
plt.imshow(X_train[0].squeeze(),cmap='bone')
plt.show()
print(X_train.shape)
plt.subplot(1,1,1)
plt.imshow(X_train[100].squeeze(),cmap='bone')
plt.show()
# +
classes = ['A','B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'Space', 'Del', 'Nothing']
datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=5, # used, Int. Degree range for random rotations.
width_shift_range=0.1, # used, Float (fraction of total width). Range for random horizontal shifts.
height_shift_range=0.1, # used, Float (fraction of total height). Range for random vertical shifts.
shear_range=0., # Float. Shear Intensity (Shear angle in counter-clockwise direction as radians)
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=True,
vertical_flip=False,
rescale=None)
datagen.fit(X_train)
idx = 0
# +
tmps = datagen.flow(X_train, y_train_ohe, batch_size=1)
for tmp in tmps:
imshow(tmp[0].squeeze(),cmap='bone')
plt.title(classes[np.argmax(tmp[1])])
break
# +
# %%time
cnn = Sequential()
# let's start with an AlexNet style convolutional phase
cnn.add(Conv2D(filters=32,
input_shape = (img_wh,img_wh,1),
kernel_size=(3,3),
padding='same',
activation='relu', data_format="channels_last")) # more compact syntax
# no max pool before next conv layer!!
cnn.add(Conv2D(filters=64,
kernel_size=(3,3),
padding='same',
activation='relu')) # more compact syntax
cnn.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_last"))
# add one layer on flattened output
cnn.add(Dropout(0.25)) # add some dropout for regularization after conv layers
cnn.add(Flatten())
cnn.add(Dense(128, activation='relu'))
cnn.add(Dropout(0.5)) # add some dropout for regularization, again!
cnn.add(Dense(NUM_CLASSES, activation='softmax'))
# Let's train the model
cnn.compile(loss='categorical_crossentropy', # 'categorical_crossentropy' 'mean_squared_error'
optimizer='rmsprop', # 'adadelta' 'rmsprop'
metrics=['accuracy'])
# the flow method yields batches of images indefinitely, with the given transformations
cnn.fit_generator(datagen.flow(X_train, y_train_ohe, batch_size=128),
steps_per_epoch=int(len(X_train)/128), # how many generators to go through per epoch
epochs=5, verbose=1,
validation_data=(X_test,y_test_ohe)
)
# -
summarize_net(cnn, X_test, y_test, title_text='Using Expansion:')
# +
# what if we just want to use the validation data??
from keras.callbacks import EarlyStopping
from keras.regularizers import l2
l2_lambda = 0.0001
# Use Kaiming He to regularize ReLU layers: https://arxiv.org/pdf/1502.01852.pdf
# Use Glorot/Bengio for linear/sigmoid/softmax: http://proceedings.mlr.press/v9/glorot10a/glorot10a.pdf
cnn = Sequential()
cnn.add(Conv2D(filters=32,
input_shape = (img_wh,img_wh,1),
kernel_size=(3,3),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',
data_format="channels_last")) # more compact syntax
cnn.add(Conv2D(filters=32,
kernel_size=(3,3),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',data_format="channels_last"))
cnn.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_last"))
cnn.add(Conv2D(filters=64,
input_shape = (img_wh,img_wh,1),
kernel_size=(3,3),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',data_format="channels_last")) # more compact syntax
cnn.add(Conv2D(filters=64,
kernel_size=(3,3),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu'))
cnn.add(MaxPooling2D(pool_size=(2, 2), data_format="channels_last"))
cnn.add(Conv2D(filters=128,
input_shape = (img_wh,img_wh,1),
kernel_size=(3,3),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',data_format="channels_last")) # more compact syntax
cnn.add(Conv2D(filters=128,
kernel_size=(3,3),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',data_format="channels_last"))
# add one layer on flattened output
cnn.add(Flatten())
cnn.add(Dropout(0.25)) # add some dropout for regularization after conv layers
cnn.add(Dense(128,
activation='relu',
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda)
))
cnn.add(Dropout(0.5)) # add some dropout for regularization, again!
cnn.add(Dense(NUM_CLASSES,
activation='softmax',
kernel_initializer='glorot_uniform',
kernel_regularizer=l2(l2_lambda)
))
# Let's train the model
cnn.compile(loss='categorical_crossentropy', # 'categorical_crossentropy' 'mean_squared_error'
optimizer='rmsprop', # 'adadelta' 'rmsprop'
metrics=['accuracy'])
# the flow method yields batches of images indefinitely, with the given transofmrations
cnn.fit_generator(datagen.flow(X_train, y_train_ohe, batch_size=128),
steps_per_epoch=int(len(X_train)/128), # how many generators to go through per epoch
epochs=50, verbose=1,
validation_data=(X_test,y_test_ohe),
callbacks=[EarlyStopping(monitor='val_loss', patience=2)]
)
# -
summarize_net(cnn, X_test, y_test, title_text='Using Exp.+Reg.+Init.:')
# ___
# # Adding ResNet Style Blocks
# Okay, so now lets try a trick from 2016---ancient in terms of deep learning years (that's a joke 🤷, ¯\\_(ツ)_/¯).
#
# The ResNet-Style Bypass is described in the following:
# 
# +
# %%time
# now lets use the LeNet architecture with batch norm
# We will also use ReLU where approriate and drop out
from keras.layers.normalization import BatchNormalization
from keras.layers import Add
from keras.layers import average, concatenate
from keras.models import Input, Model
input_holder = Input(shape=(img_wh, img_wh, 1))
# start with a conv layer
x = Conv2D(filters=32,
input_shape = (img_wh,img_wh,1),
kernel_size=(3,3),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',
data_format="channels_last")(input_holder)
x = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(x)
x = Conv2D(filters=32,
kernel_size=(3,3),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',
data_format="channels_last")(x)
x_split = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(x)
x = Conv2D(filters=64,
kernel_size=(1,1),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',
data_format="channels_last")(x_split)
x = Conv2D(filters=64,
kernel_size=(3,3),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',
data_format="channels_last")(x)
x = Conv2D(filters=32,
kernel_size=(1,1),
kernel_initializer='he_uniform',
kernel_regularizer=l2(l2_lambda),
padding='same',
activation='relu',
data_format="channels_last")(x)
# now add back in the split layer, x_split (residual added in)
x = Add()([x, x_split])
x = Activation("relu")(x)
x = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(x)
x = Flatten()(x)
x = Dropout(0.25)(x)
x = Dense(256)(x)
x = Activation("relu")(x)
x = Dropout(0.5)(x)
x = Dense(NUM_CLASSES)(x)
x = Activation('softmax')(x)
resnet = Model(inputs=input_holder,outputs=x)
resnet.summary()
# -
resnet.compile(loss='categorical_crossentropy', # 'categorical_crossentropy' 'mean_squared_error'
optimizer='adam', # 'adadelta' 'rmsprop'
metrics=['accuracy'])
# +
# the flow method yields batches of images indefinitely, with the given transofmrations
# resnet.fit_generator(datagen.flow(X_train, y_train_ohe, batch_size=128),
# steps_per_epoch=int(len(X_train)/128), # how many generators to go through per epoch
# epochs=50, verbose=1,
# validation_data=(X_test,y_test_ohe),
# callbacks=[EarlyStopping(monitor='val_loss', patience=4)]
# )
resnet.fit(X_train, y_train_ohe, batch_size=128,
epochs=50, verbose=1,
validation_data=(X_test,y_test_ohe),
callbacks=[EarlyStopping(monitor='val_loss', patience=4)]
)
# -
| project 6/project6_1-20190426.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="mG98ajLLNIjz"
# 
# + [markdown] colab_type="text" id="7Ch5KPRpui-Q"
#
# ## Exercícios de manipulação de dados - Parte 2
# + [markdown] colab_type="text" id="IscqV7Obui-R"
# Neste Jupyter notebook você irá resolver uma exercícios utilizando a biblioteca Pandas.\
# Todos os datasets utilizados nos exercícios estão salvos na pasta *datasets*.\
# Todo o seu código deve ser executado neste Jupyter Notebook. Por fim, se desejar, revise as respostas com o seu mentor.
# + [markdown] colab_type="text" id="Trmht8Dfui-S"
# #### Tarefa 1. Importe o dataset e salve os dados em um dataframe
#
# Os dados estão salvos no arquivo ***datasets/US_Crime_Rates_1960_2014.csv***.\
# Este dataset contém dados de crimes ocorridos nos Estados Unidos no período de 1960 até 2014.\
# Salve os dados em uma variável de nome *crime*.
# -
import pandas as pd
# + colab={} colab_type="code" id="LN4JDdSjui-T"
crime = pd.read_csv('US_Crime_Rates_1960_2014.csv', sep = ',')
# + [markdown] colab_type="text" id="8F6IEdZsui-X"
# #### Tarefa 2. Qual o tipo de dados em cada coluna?
# + colab={} colab_type="code" id="guR3et-zui-Y"
crime.dtypes
# + [markdown] colab_type="text" id="ZnhPRMhrui-c"
# #### Tarefa 3. Converta o tipo de dado da coluna Year para o tipo datetime
#
# *Dica: procure na documentação da biblioteca como fazer tal conversão*
# + colab={} colab_type="code" id="HAIM8iB3ui-d"
import datetime
crime['Year'] = pd.to_datetime(crime['Year'], format= '%Y')
crime['Year']
# + [markdown] colab_type="text" id="wUMLk49Wui-h"
# #### Tarefa 4. Configure a coluna Year como index do DataFrame.
#
# *Dica: use a função set_index() do Pandas*
# + colab={} colab_type="code" id="6n-r6P4jui-i"
crime.set_index('Year', inplace=True)
crime.head()
# + [markdown] colab_type="text" id="J-osqYpUui-l"
# #### Tarefa 5. Remova a coluna Total do DataFrame.
# + colab={} colab_type="code" id="td1aqCVrui-m"
crime.drop(columns='Total', inplace=True)
crime.head()
# + [markdown] colab_type="text" id="sNRpI3snDwZ7"
# #### Tarefa 6. Encontre o número de roubos de carro do ano de 1978.
# + colab={} colab_type="code" id="iCHYwMqLECpl"
crime.loc['1978', 'Vehicle_Theft']
# + [markdown] colab_type="text" id="EAlbLVK2ui-q"
# #### Tarefa 7. Retorne a linha do ano em que houve o maior número de assasinatos.
#
# *Dica: use a coluna Murder como referência.*
# + colab={} colab_type="code" id="GpIqR-6vui-s"
crime[crime['Murder'] == crime['Murder'].max()]
# + [markdown] colab_type="text" id="L7bit-TcEGMm"
# #### Tarefa 8. Retorne o número de assassinatos do ano em que foi registrado o menor número de roubo de carros.
#
# + colab={} colab_type="code" id="KAz9sW1oEVeK"
crime['Murder'][crime['Vehicle_Theft'] == crime['Vehicle_Theft'].min()]
# + [markdown] colab_type="text" id="E0RnhuuMEWsw"
# **Parabéns! Você chegou ao fim**
| data-manipulation-exercises/Manipulacao_de_Dados_Ex_02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github"
# <a href="https://colab.research.google.com/github/ShepherdCode/ShepherdML/blob/master/Nasa2021/CNN_523embed_12K.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="xBDT3RYOakJO"
#
# + [markdown] id="ojm_6E9f9Kcf"
# # CNN 523
#
# 1. Four layers of CNNS and MaxPooling
# 2. Drop out at 0.2
# 3. Variable filters and dense neurons
#
#
#
#
#
#
#
#
#
#
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="hh6XplUvC0j0" outputId="5b9b5e32-3052-495e-d81b-4c0562d4dac3"
#NC_FILENAME='ncRNA.tiny50.fasta'
#PC_FILENAME='pcRNA.tiny50.fasta'
#NC_FILENAME='ncRNA.gc34.processed.fasta'
#PC_FILENAME='pcRNA.gc34.processed.fasta'
NC_FILENAME='noncod_12000.fasta' # CHANGE THIS TO 1000, 2000, 4000, etc.
PC_FILENAME='coding_12000.fasta'
NC_VAL_FILE='noncod_validation.fasta' # 'noncod_validation.fasta' # CHANGE THIS TO THE UNIFORM VALIDATION FILE
PC_VAL_FILE='coding_validation.fasta' # 'coding_validation.fasta'
MODEL_FILE='JUNK3' # CHANGE THIS IF YOU WANT TO SAVE THE MODEL!
DATAPATH=''
try:
from google.colab import drive
IN_COLAB = True
PATH='/content/drive/'
drive.mount(PATH)
DATAPATH=PATH+'My Drive/data/' # must end in "/"
except:
IN_COLAB = False
DATAPATH='data/' # must end in "/"
NC_FILENAME = DATAPATH+NC_FILENAME
PC_FILENAME = DATAPATH+PC_FILENAME
NC_VAL_FILE = DATAPATH+NC_VAL_FILE
PC_VAL_FILE = DATAPATH+PC_VAL_FILE
MODEL_FILE=DATAPATH+MODEL_FILE
EPOCHS=20 # DECIDE ON SOME AMOUNT AND STICK WITH IT
SPLITS=5
K=1
VOCABULARY_SIZE=4**K+1 # e.g. K=3 => 64 DNA K-mers + 'NNN'
EMBED_DIMEN=2
FILTERS=32
KERNEL=3
NEURONS=24
DROP=0.2
MINLEN=200
MAXLEN=1000 # THIS HAS TO MATCH THE SIMULATION DATA
DENSE_LEN = 1000
ACT="tanh"
# + colab={"base_uri": "https://localhost:8080/"} id="e9TY3HK9ZklE" outputId="d9b7bb76-bd49-4d8a-fd5d-d62a313a7297"
# Load our own tools
# TO DO: don't go to GitHub if the file is already local.
GITHUB = True
if GITHUB:
# #!pip install requests # Uncomment this if necessary. Seems to be pre-installed.
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/ShepherdML/master/Strings/tools_fasta.py')
with open('tools_fasta.py', 'w') as f:
f.write(r.text)
# TO DO: delete the file after import
import tools_fasta as tools
tools.yahoo() # If this prints "Yahoo!" the the import was successful.
TOOLS_CHANGED = False # set to True to re-run with a new version of tools
if TOOLS_CHANGED:
from importlib import reload
tools=reload(tools)
print(dir(tools)) # run this to see EVERYTHING in the tools module
# + id="VQY7aTj29Kch"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import StratifiedKFold
import tensorflow as tf
from tensorflow import keras
import time
dt='float32'
tf.keras.backend.set_floatx(dt)
# + [markdown] id="j7jcg6Wl9Kc2"
# Build model
# + id="qLFNO1Xa9Kc3"
def compile_model(model):
print("COMPILE...")
bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
model.compile(loss=bc, optimizer="adam", metrics=["accuracy"])
print("...COMPILED")
return model
def build_model():
#SHAPE=(MAXLEN,5)
SHAPE=(MAXLEN,4)
# 4 input letters, 4 output dimensions, 1000 letters/RNA
elayer = keras.layers.Embedding(4,4,input_length=1000)
clayer1 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same",
input_shape=SHAPE)
clayer2 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same")
clayer3 = keras.layers.MaxPooling1D(2)
clayer4 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same")
clayer5 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same")
clayer6 = keras.layers.MaxPooling1D(2)
clayer7 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same")
clayer8 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same")
clayer9 = keras.layers.MaxPooling1D(2)
clayer10 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same")
clayer11 = keras.layers.Conv1D(FILTERS,KERNEL,activation=ACT,padding="same")
clayer12 = keras.layers.MaxPooling1D(2)
clayer13 = keras.layers.Flatten()
dlayer1 = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt, input_shape=[DENSE_LEN])
dlayer2 = keras.layers.Dropout(DROP)
dlayer3 = keras.layers.Dense(NEURONS, activation=ACT,dtype=dt)
dlayer4 = keras.layers.Dropout(DROP)
output_layer = keras.layers.Dense(1, activation="sigmoid", dtype=dt)
cnn = keras.models.Sequential()
cnn.add(elayer)
cnn.add(clayer1)
cnn.add(clayer2)
cnn.add(clayer3)
cnn.add(clayer4)
cnn.add(clayer5)
cnn.add(clayer6)
cnn.add(clayer7)
cnn.add(clayer8)
cnn.add(clayer9)
cnn.add(clayer10)
cnn.add(clayer11)
cnn.add(clayer12)
cnn.add(clayer13)
cnn.add(dlayer1)
cnn.add(dlayer2)
cnn.add(dlayer3)
cnn.add(dlayer4)
cnn.add(output_layer)
mlpc = compile_model(cnn)
return mlpc
# + [markdown] id="LdIS2utq9Kc9"
# Cross validation
# + id="BVo4tbB_9Kc-"
def do_cross_validation(X,y,given_model,X_VALID,Y_VALID):
cv_scores = []
fold=0
splitter = ShuffleSplit(n_splits=SPLITS, test_size=0.1, random_state=37863)
for train_index,valid_index in splitter.split(X):
fold += 1
X_train=X[train_index] # use iloc[] for dataframe
y_train=y[train_index]
X_valid=X[valid_index]
y_valid=y[valid_index]
# Avoid continually improving the same model.
model = compile_model(keras.models.clone_model(given_model))
bestname=MODEL_FILE+".cv."+str(fold)+".best"
mycallbacks = [keras.callbacks.ModelCheckpoint(
filepath=bestname, save_best_only=True,
monitor='val_accuracy', mode='max')]
print("FIT")
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS, verbose=1, callbacks=mycallbacks,
validation_data=(X_valid,y_valid))
# THE VALIDATION ABOVE IS JUST FOR SHOW
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1)
plt.show()
best_model=keras.models.load_model(bestname)
# THE VALIDATION BELOW IS FOR KEEPS
scores = best_model.evaluate(X_VALID, Y_VALID, verbose=0)
print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100))
cv_scores.append(scores[1] * 100)
print()
print("%d-way Cross Validation mean %.2f%% (+/- %.2f%%)" % (fold, np.mean(cv_scores), np.std(cv_scores)))
# + [markdown] id="qd3Wj_vI9KdP"
# ## Train on RNA lengths 200-1Kb
# + colab={"base_uri": "https://localhost:8080/"} id="G1HuSs8ZbeL4" outputId="c11f8c8d-3434-4253-8909-c8f6a60cee93"
print ("Compile the model")
model=build_model()
print ("Summarize the model")
print(model.summary()) # Print this only once
#model.save(MODEL_FILE+'.model')
# + id="f8fNo6sn9KdH"
def load_data_from_files(nc_filename,pc_filename):
FREEMEM=True # use False for debugging, True for production
print("Load data from files.")
nc_seq=tools.load_fasta(nc_filename,0)
pc_seq=tools.load_fasta(pc_filename,1)
train_set=pd.concat((nc_seq,pc_seq),axis=0)
print("Ready: train_set")
subset=tools.make_slice(train_set,MINLEN,MAXLEN)# One array to two: X and y
if FREEMEM:
nc_seq=None
pc_seq=None
train_set=None
(X1,y_train)=tools.separate_X_and_y(subset)
# X1 is pandas df of ("list" of one sequence)
X2=X1.to_numpy() # numpy ndarray of ("list" of one sequence)
X3=[elem[0] for elem in X2] # numpy ndarray of ACGT-str
# X3? It might be faster to use int-array than char-array. Come back to this.
X4=X3 # no-op
print("X4",type(X4))
#print(X4[0])
if FREEMEM:
X1=None
X2=None
X3=None
X5=[]
dna_to_int = {'A':0,'C':1,'G':2,'T':3}
for x in X4:
a=[]
for c in x:
i = dna_to_int[c]
a.append(i)
X5.append(a)
X5=np.asarray(X5)
print("X5",type(X5))
print(X5.shape)
if FREEMEM:
X4=None
X_train=X5
if FREEMEM:
X5=None
print("X_train",type(X_train))
y_train=y_train.to_numpy()
print(X_train.shape)
print(X_train[0].shape)
print(X_train[0])
return X_train,y_train
# + colab={"base_uri": "https://localhost:8080/"} id="UmEls9oXRQON" outputId="e0ba3308-aa1b-4e76-b374-dcd6b45e0a00"
print("Loading training data...")
X_train,y_train = load_data_from_files(NC_FILENAME,PC_FILENAME)
# + colab={"base_uri": "https://localhost:8080/"} id="sfvgDZR1a_2j" outputId="44178aad-5c14-4a88-860e-f234bde08bf3"
print("Loading validation data...")
X_VALID,Y_VALID = load_data_from_files(NC_VAL_FILE,PC_VAL_FILE)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="mQ8eW5Rg9KdQ" outputId="2a46f125-a9be-4409-c118-8ae6e3316cb0"
print ("Cross validation")
do_cross_validation(X_train,y_train,model,X_VALID,Y_VALID)
print ("Done")
# + id="p4fh2GI8beMQ"
| Nasa2021/CNN_523embed_12K.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Gauravhulmukh/Chatbot-Development-for-Regional-language-using-Artificial-Intelligence/blob/master/Response.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="kP3vhOWE6DLm" colab_type="code" outputId="e116c937-3338-4c9e-9462-165ed7509530" colab={"base_uri": "https://localhost:8080/", "height": 89}
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
import numpy as np
import tflearn
import tensorflow as tf
import random
import pickle
import json
# + id="ZnHjVKnc6L4P" colab_type="code" colab={}
ERROR_THRESHOLD = 0.25
def clean_up_sentence(sentence):
# tokenize the pattern
sentence_words = nltk.word_tokenize(sentence)
# stem each word
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=False):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words
bag = [0]*len(words)
for s in sentence_words:
for i, w in enumerate(words):
if w == s:
bag[i] = 1
if show_details:
print("found in bag %s" % w)
return np.array(bag)
# + id="jITeVoAx6M94" colab_type="code" colab={}
def classify(sentence):
# generate probabilities from the model
results = model.predict([bow(sentence, words)])[0]
# filter out predictions below a threshold
results = [[i, r] for i, r in enumerate(results) if r > ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append((classes[r[0]], r[1]))
# return tuple of intent and probability
return return_list
def response(sentence, userID='123', show_details=False):
results = classify(sentence)
# if we have a classification then find the matching intent tag
if results:
# loop as long as there are matches to process
while results:
for i in intents['intents']:
# find a tag matching the first results
if i['tag'] == results[0][0]:
# a random response from the intent
return print(random.choice(i['responses']))
results.pop(0)
# + id="ssV9uN2R6QkJ" colab_type="code" colab={}
data = pickle.load(open("training_data", "rb"))
words = data['words']
classes = data['classes']
train_x = data['train_x']
train_y = data['train_y']
# + id="5GM_eMfz6iir" colab_type="code" colab={}
import json
with open('/content/drive/My Drive/Colab Notebooks/intents.json') as json_data:
intents = json.load(json_data)
# + id="VVCdLLtl6j5Z" colab_type="code" outputId="7910fc2a-acba-42b8-c2e2-268c6b1694b2" colab={"base_uri": "https://localhost:8080/", "height": 211}
# load saved model
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
model.load('./model.tflearn')
# + id="ehgihb--7jHV" colab_type="code" outputId="d2b48541-728c-4621-a1b4-1c21a59eb22e" colab={"base_uri": "https://localhost:8080/", "height": 34}
response('Hi')
# + id="Qw--ZPJe7lh2" colab_type="code" outputId="7468457c-2fb6-46c0-d882-ca8a55f4cada" colab={"base_uri": "https://localhost:8080/", "height": 34}
response('Good day')
# + id="018s82El78Jt" colab_type="code" outputId="cc3e86eb-c956-43ae-8b0e-1d5664f2734d" colab={"base_uri": "https://localhost:8080/", "height": 34}
response("How it works to pay?")
# + id="uukNTiEZ8dOR" colab_type="code" outputId="19e3e710-f25e-4946-f37e-faf74d4bacfa" colab={"base_uri": "https://localhost:8080/", "height": 34}
response("Thanks")
# + id="NEZ1Fyht8iAg" colab_type="code" outputId="3bc931b9-e81d-4812-e1e0-3b4c441d4051" colab={"base_uri": "https://localhost:8080/", "height": 34}
response("how to register payment?")
# + id="Xl7AB9Kg8nCq" colab_type="code" outputId="c838c677-1cdf-4e5f-ab69-44ec08e7de02" colab={"base_uri": "https://localhost:8080/", "height": 34}
response('Hi')
# + id="zO_Ymvn38x9w" colab_type="code" outputId="723d8d0c-698a-4a90-838b-10278faa7207" colab={"base_uri": "https://localhost:8080/", "height": 34}
response("what I need to need to create payment?")
# + id="pxiKTx5W895E" colab_type="code" outputId="cf499a7d-737e-46f0-b5c2-4b4c03282932" colab={"base_uri": "https://localhost:8080/", "height": 34}
response("How to pay invoice?")
# + id="wyJpv8FN9YYL" colab_type="code" outputId="f2cc90e0-d2f2-48b2-ebcf-f580400244dd" colab={"base_uri": "https://localhost:8080/", "height": 34}
response("Please help to validate payment?")
# + id="OQj8i4ma9tks" colab_type="code" outputId="998d1c3c-de11-41b8-d9c9-01e8ac880c12" colab={"base_uri": "https://localhost:8080/", "height": 34}
response("payment")
# + id="J9TSkOLk92yh" colab_type="code" outputId="498b3bb7-f0bb-4d48-ff93-5c2d583830db" colab={"base_uri": "https://localhost:8080/", "height": 34}
response("Thanks")
# + id="i6Re4sh29_bG" colab_type="code" outputId="5cb077c6-cc13-4428-dcfd-787c4104a9a8" colab={"base_uri": "https://localhost:8080/", "height": 35}
response("Bye")
| Response.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1. Import Relevant Packages
import pandas as pd
import requests
import os
# # 2. Load the Station File
desktop = r'C:\Users\hlchang4.DS039047\Desktop'
station_location = pd.read_csv(os.path.join(desktop, 'station_location.csv'))
# # 3. Set the API key and Get the Latitudes and Longitudes
api_key = "XXXXX"
station_location['station_full_name'] = station_location.apply(lambda row: row['Name']+' station, Hong Kong', axis=1)
station_location.head(10)
def get_lat_lon_from_api(address):
api_response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address={0}&key={1}'.format(address, api_key))
api_response_dict = api_response.json()
latitude = api_response_dict['results'][0]['geometry']['location']['lat']
longitude = api_response_dict['results'][0]['geometry']['location']['lng']
return latitude, longitude
# +
station_location_dict = {}
for index, row in station_location.iterrows():
station_location_dict[row['Name']] = (get_lat_lon_from_api(row['station_full_name']))
# -
station_location_dict
# ## However, after checking each latitude and longitude pair, we see that the geoinformation of some stations is wrong. The geoinformation of these stations should be:
#
# 1. Kam Sheung Road: (22.43475, 114.06352)
# 2. Kowloon Bay: (22.32317, 114.21412)
# 3. Hong Kong: (22.28468, 114.15815)
# 4. <NAME>: (22.32463, 114.16826)
# 5. She<NAME>: (22.3321, 114.16879)
# 6. South Horizons: (22.24285, 114.14874)
# 7. <NAME>: (22.24797, 114.16805)
| Use_Google_Places_API_to_find_geoinformation_MTR_stations.ipynb |