code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Russian Corpora Study
# +
import os, sys
cwd = os.getcwd()
project_path = cwd[:cwd.find('pygents')+7]
if project_path not in sys.path: sys.path.append(project_path)
os.chdir(project_path)
from os import listdir
from os.path import isfile, join
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
#force reimport
if 'pygents.util' in sys.modules:
del sys.modules['pygents.util']
if 'pygents.text' in sys.modules:
del sys.modules['pygents.text']
if 'pygents.plot' in sys.modules:
del sys.modules['pygents.plot']
if 'pygents.token' in sys.modules:
del sys.modules['pygents.token']
from pygents.util import *
from pygents.text import *
from pygents.plot import *
from pygents.token import *
# +
# https://nlpub.ru/%D0%A0%D0%B5%D1%81%D1%83%D1%80%D1%81%D1%8B - Inventory
# http://study.mokoron.com/ - Twitter, need to extract froom SQL
# https://linguatools.org/tools/corpora/wikipedia-monolingual-corpora/ - Wiki, need to extract from XML
# +
# RusAge
# https://www.kaggle.com/datasets/oldaandozerskaya/fiction-corpus-for-agebased-text-classification - just txt books
path = '../../nlp/corpora/Russian/rusage/archive/previews'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
print(len(onlyfiles))
onlyfiles
# -
#check if context is present
n_counters1 = context_save_load(None,'russian_rusage',folder='data/models/')
len(n_counters1)
if n_counters1 is None or len(n_counters1) < 1:
max_n = 7 # https://www.sciencedirect.com/science/article/abs/pii/0378375886901692
n_counters1 = grams_init(max_n)
cnt = 0
for file in onlyfiles:
with open(join(path, file),errors='ignore') as f:
lines = f.readlines()
cnt += 1
print(cnt,file)
for text in lines:
text = preprocess_text(text)
text_grams_count(n_counters1,text,max_n)
context_save_load(n_counters1,'russian_rusage',folder='data/models/')
dfs = []
for i in range(len(n_counters1)):
counter = n_counters1[i]
df = pd.DataFrame([(gram, counter[gram]) for gram in counter],columns=['gram','freq'])
df['log'] = np.log10(df['freq'])
df.sort_values('freq',ascending=False,inplace=True)
df.title = str(1+i)
dfs.append(df)
plt.rcParams["figure.figsize"] = (20,20)
for df in dfs:
p = df[:100][['gram','freq']].plot.barh(x='gram'); p.invert_yaxis();
p.set_title(df.title,fontsize = 32)
plt.show()
# ## Load and explore full models
# +
rusage_chars = FreedomTokenizer(max_n=7,mode='chars',debug=False)
rusage_grams = FreedomTokenizer(max_n=7,mode='grams',debug=False)
path = '../../nlp/corpora/Russian/rusage/archive/previews'
onlyfiles = [f for f in listdir(path) if isfile(join(path, f))]
cnt = 0
for file in onlyfiles:
with open(join(path, file),errors='ignore') as f:
lines = f.readlines()
cnt += 1
if (cnt % 100) == 0:
print(cnt,file)
rusage_chars.train(lines)
rusage_grams.train(lines)
rusage_chars.store('data/models/rusage_chars_7a')
rusage_grams.store('data/models/rusage_grams_7a')
print(rusage_chars.count_params())
print(rusage_grams.count_params())
# -
| notebooks/nlp/russian/ru_token_corpora_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## VRFT without measurement noise (no instrumental variables)
# +
# Copyright (c) [2021] <NAME> [<EMAIL>]. All rights reserved.
# This file is part of PythonVRFT.
# PythonVRFT is free software: you can redistribute it and/or modify
# it under the terms of the MIT License. You should have received a copy of
# the MIT License along with PythonVRFT.
# If not, see <https://opensource.org/licenses/MIT>.
#
# Code author: [<NAME> - <EMAIL>]
# Last update: 10th January 2021, by <EMAIL>
#
# Example 1
# ------------
# In this example we see how to apply VRFT to a simple SISO model
# without any measurement noise.
# Input data is generated using a square signal
#
# -
# ### Load libraries
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as scipysig
from vrft import *
# ### System, Reference Model and Control law
# +
# System
dt = 1e-2
num = [0.5]
den = [1, -0.9]
sys = ExtendedTF(num, den, dt=dt)
# Reference Model
refModel = ExtendedTF([0.6], [1, -0.4], dt=dt)
# Control law
control = [ExtendedTF([1], [1, -1], dt=dt),
ExtendedTF([1, 0], [1, -1], dt=dt)]
# -
# ### Generate signals
# +
# Generate input siganl
t_start = 0
t_end = 10
t = np.arange(t_start, t_end, dt)
u = np.ones(len(t))
u[200:400] = np.zeros(200)
u[600:800] = np.zeros(200)
# Open loop experiment
t, y = scipysig.dlsim(sys, u, t)
y = y.flatten()
# Save data into an IDDATA Object with 0 initial condition
# Length of the initial condition depends on the reference model
data = iddata(y, u, dt, [0])
# -
# ### VRFT
# +
# VRFT Pre-filter
prefilter = refModel * (1 - refModel)
# VRFT method
theta, r, loss, C = compute_vrft(data, refModel, control, prefilter)
#Obtained controller
print("Loss: {}\nTheta: {}\nController: {}".format(loss, theta, C))
# -
# ### Verify performance
# +
# Closed loop system
closed_loop = (C * sys).feedback()
t = t[:len(r)]
u = np.ones(len(t))
_, yr = scipysig.dlsim(refModel, u, t)
_, yc = scipysig.dlsim(closed_loop, u, t)
_, ys = scipysig.dlsim(sys, u, t)
yr = np.array(yr).flatten()
ys = np.array(ys).flatten()
yc = np.array(yc).flatten()
fig, ax = plt.subplots(4, sharex=True, figsize=(12,8), dpi= 100, facecolor='w', edgecolor='k')
ax[0].plot(t, yr,label='Reference System')
ax[0].plot(t, yc, label='CL System')
ax[0].set_title('Systems response')
ax[0].grid(True)
ax[1].plot(t, ys, label='OL System')
ax[1].set_title('OL Systems response')
ax[1].grid(True)
ax[2].plot(t, y[:len(r)])
ax[2].grid(True)
ax[2].set_title('Experiment data')
ax[3].plot(t, r)
ax[3].grid(True)
ax[3].set_title('Virtual Reference')
# Now add the legend with some customizations.
legend = ax[0].legend(loc='lower right', shadow=True)
# The frame is matplotlib.patches.Rectangle instance surrounding the legend.
frame = legend.get_frame()
frame.set_facecolor('0.90')
# Set the fontsize
for label in legend.get_texts():
label.set_fontsize('large')
for label in legend.get_lines():
label.set_linewidth(1.5) # the legend line width
plt.show()
# -
| examples/notebook_example_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbsphinx-toctree={"hidden": true}
# [Installation](installing.rst)
# [Tutorials](tutorials.rst)
# [Examples](examples.rst)
# [API](api.rst)
# [Datasets](datasets.rst)
# [FAQ](faq.rst)
# + raw_mimetype="text/html" active=""
# <style>
# pre {
# white-space: pre-wrap !important;
# }
# .table-striped > tbody > tr:nth-of-type(odd) {
# background-color: #f9f9f9;
# }
# .table-striped > tbody > tr:nth-of-type(even) {
# background-color: white;
# }
# .table-striped td, .table-striped th, .table-striped tr {
# border: 1px solid black;
# border-collapse: collapse;
# margin: 1em 2em;
# }
# .rendered_html td, .rendered_html th {
# text-align: left;
# vertical-align: middle;
# padding: 4px;
# }
# </style>
# -
# ## What is Vaex?
#
# Vaex is a python library for lazy **Out-of-Core DataFrames** (similar to Pandas), to visualize and explore big tabular datasets. It can calculate *statistics* such as mean, sum, count, standard deviation etc, on an *N-dimensional grid* up to **a billion** ($10^9$) objects/rows **per second**. Visualization is done using **histograms**, **density plots** and **3d volume rendering**, allowing interactive exploration of big data. Vaex uses memory mapping, a zero memory copy policy, and lazy computations for best performance (no memory wasted).
# # Why vaex
#
# * **Performance:** works with huge tabular data, processes $\gt 10^9$ rows/second
# * **Lazy / Virtual columns:** compute on the fly, without wasting ram
# * **Memory efficient** no memory copies when doing filtering/selections/subsets.
# * **Visualization:** directly supported, a one-liner is often enough.
# * **User friendly API:** you will only need to deal with the DataFrame object, and tab completion + docstring will help you out: `ds.mean<tab>`, feels very similar to Pandas.
# * **Lean:** separated into multiple packages
# * `vaex-core`: DataFrame and core algorithms, takes numpy arrays as input columns.
# * `vaex-hdf5`: Provides memory mapped numpy arrays to a DataFrame.
# * `vaex-arrow`: [Arrow](https://arrow.apache.org/) support for cross language data sharing.
# * `vaex-viz`: Visualization based on matplotlib.
# * `vaex-jupyter`: Interactive visualization based on Jupyter widgets / ipywidgets, bqplot, ipyvolume and ipyleaflet.
# * `vaex-astro`: Astronomy related transformations and FITS file support.
# * `vaex-server`: Provides a server to access a DataFrame remotely.
# * `vaex-distributed`: (Proof of concept) combined multiple servers / cluster into a single DataFrame for distributed computations.
# * `vaex-qt`: Program written using Qt GUI.
# * `vaex`: Meta package that installs all of the above.
# * `vaex-ml`: [Machine learning](ml.ipynb)
#
# * **Jupyter integration**: vaex-jupyter will give you interactive visualization and selection in the Jupyter notebook and Jupyter lab.
# ## Installation
#
# Using conda:
#
# * `conda install -c conda-forge vaex`
#
# Using pip:
#
# * `pip install --upgrade vaex`
#
# Or read the [detailed instructions](installing.ipynb)
# # Getting started
#
# We assume that you have installed vaex, and are running a [Jupyter notebook server](https://jupyter.readthedocs.io/en/latest/running.html). We start by importing vaex and asking it to give us an example dataset.
import vaex
df = vaex.example() # open the example dataset provided with vaex
# Instead, you can [download some larger datasets](datasets.rst), or [read in your csv file](api.rst#vaex.from_csv).
df # will pretty print the DataFrame
# Using [square brackets[]](api.rst#vaex.dataframe.DataFrame.__getitem__), we can easily filter or get different views on the DataFrame.
df_negative = df[df.x < 0] # easily filter your DataFrame, without making a copy
df_negative[:5][['x', 'y']] # take the first five rows, and only the 'x' and 'y' column (no memory copy!)
# When dealing with huge datasets, say a billion rows ($10^9$), computations with the data can waste memory, up to 8 GB for a new column. Instead, vaex uses lazy computation, storing only a representation of the computation, and computations are done on the fly when needed. You can just use many of the numpy functions, as if it was a normal array.
import numpy as np
# creates an expression (nothing is computed)
some_expression = df.x + df.z
some_expression # for convenience, we print out some values
# These expressions can be added to a DataFrame, creating what we call a *virtual column*. These virtual columns are similar to normal columns, except they do not waste memory.
df['r'] = some_expression # add a (virtual) column that will be computed on the fly
df.mean(df.x), df.mean(df.r) # calculate statistics on normal and virtual columns
# One of the core features of vaex is its ability to calculate statistics on a regular (N-dimensional) grid. The dimensions of the grid are specified by the binby argument (analogous to SQL's grouby), and the shape and limits.
df.mean(df.r, binby=df.x, shape=32, limits=[-10, 10]) # create statistics on a regular grid (1d)
df.mean(df.r, binby=[df.x, df.y], shape=32, limits=[-10, 10]) # or 2d
df.count(df.r, binby=[df.x, df.y], shape=32, limits=[-10, 10]) # or 2d counts/histogram
# These one and two dimensional grids can be visualized using any plotting library, such as matplotlib, but the setup can be tedious. For convenience we can use [plot1d](api.rst#vaex.dataframe.DataFrame.plot1d), [plot](api.rst#vaex.dataframe.DataFrame.plot), or see the [list of plotting commands](api.rst#visualization)
df.plot(df.x, df.y, show=True); # make a plot quickly
# ## Continue
# [Continue the tutorial here](tutorial.ipynb) or check the [examples](examples.rst)
| docs/source/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# __Kaggle competition - Titanic__
#
# 1. [Import](#Import)
# 1. [Tools](#Tools)
# 1. [Data](#Data)
# 1. [Initial EDA](#Initial-EDA)
# 1. [object feature EDA](#object-feature-EDA)
# 1. [Univariate & feature vs. target](#Univariate-&-feature-vs.-target)
# 1. [number feature EDA](#number-feature-EDA)
# 1. [Univariate & feature vs. target](#Univariate-&-feature-vs.-target2)
# 1. [Correlation](#Correlation)
# 1. [Pair plot](#Pair-plot)
# 1. [Faceting](#Faceting)
# 1. [Target variable evaluation](#Target-variable-evaluation)
# 1. [Data preparation](#Data-preparation)
# 1. [Outliers (preliminary)](#Outliers-preliminary)
# 1. [Evaluate](#Evaluate)
# 1. [Remove](#remove)
# 1. [Missing data](#Missing-data)
# 1. [Evaluate](#Evaluate1)
# 1. [Impute](#Impute)
# 1. [Engineering](#Engineering)
# 1. [Evaluate](#Evaluate3)
# 1. [Engineer](#Engineer)
# 1. [Encoding](#Encoding)
# 1. [Evaluate](#Evaluate2)
# 1. [Encode](#Encode)
# 1. [Transformation](#Transformation)
# 1. [Evaluate](#Evaluate4)
# 1. [Transform](#Transform)
# 1. [Outliers (final)](#Outliers-final)
# 1. [Evaluate](#Evaluate5)
# 1. [Remove](#remove1)
# 1. [Data evaluation](#Data-evaluation)
# 1. [Feature importance](#Feature-importance)
# 1. [Rationality](#Rationality)
# 1. [Value override](#Value-override)
# 1. [number feature EDA](#number-feature-EDA3)
# 1. [Correlation](#Correlation3)
# 1. [Modeling](#Modeling)
# 1. [Data preparation](#Data-preparation-1)
# 1. [Bayesian hyper-parameter optimization](#Bayesian-hyper-parameter-optimization)
# 1. [Model loss by iteration](#Model-loss-by-iteration)
# 1. [Parameter selection by iteration](#Parameter-selection-by-iteration)
# 1. [Model performance evaluation - standard models](#Model-performance-evaluation-standard-models)
# 1. [Model explanability](#Model-explanability)
# 1. [Permutation importance](#Permutation-importance)
# 1. [SHAP values](#SHAP-values)
# 1. [Submission - standard models](#Submission-standard-models)
# 1. [Stacking](#Stacking)
# 1. [Primary models](#Primary-models)
# 1. [Meta model](#Meta-model)
# 1. [Model performance evaluation - stacked models](#Model-performance-evaluation-stacked-models)
# 1. [Submission - stacked models](#Submission-stacked-models)
# # Import
# <a id = 'Import'></a>
# ## Tools
# <a id = 'Tools'></a>
# +
# standard libary and settings
import os
import sys
import importlib
import itertools
import csv
import ast
from timeit import default_timer as timer
global ITERATION
import time
from functools import reduce
rundate = time.strftime("%Y%m%d")
import warnings
warnings.simplefilter("ignore")
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:95% !important; }</style>"))
# data extensions and settings
import numpy as np
np.set_printoptions(threshold=np.inf, suppress=True)
import pandas as pd
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.options.display.float_format = "{:,.6f}".format
# modeling extensions
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.cluster import KMeans, DBSCAN, AgglomerativeClustering
from sklearn.datasets import load_boston, load_wine, load_iris, load_breast_cancer, make_blobs, make_moons
from sklearn.decomposition import PCA, LatentDirichletAllocation
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, ExtraTreesClassifier, IsolationForest
from sklearn.feature_extraction.text import CounterVectorizer, TfidfTransformer, TfidfVectorizer, HashingVectorizer
from sklearn.feature_selection import f_classif, f_regression, VarianceThreshold, SelectFromModel, SelectKBest
import sklearn.gaussian_process as gaussian_process
from sklearn.linear_model import Lasso, Ridge, ElasticNet, LinearRegression, LogisticRegression, SGDRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics import precision_score, recall_score, f1_score, explained_variance_score, mean_squared_log_error, mean_absolute_error, median_absolute_error, mean_squared_error, r2_score, confusion_matrix, roc_curve, accuracy_score, roc_auc_score, homogeneity_score, completeness_score, classification_report, silhouette_samples
from sklearn.model_selection import KFold, train_test_split, GridSearchCV, StratifiedKFold, cross_val_score, RandomizedSearchCV
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.pipeline import make_pipeline, Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler, RobustScaler, PolynomialFeatures, OrdinalEncoder, LabelEncoder, OneHotEncoder, KBinsDiscretizer, QuantileTransformer, PowerTransformer, MinMaxScaler
from sklearn.svm import SVC, SVR
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
import sklearn.utils as utils
import eif
import shap; shap.initjs()
import eli5
from eli5.sklearn import PermutationImportance
from pdpbox import pdp, get_dataset, info_plots
from scipy import stats, special
from xgboost import XGBClassifier, XGBRegressor
from lightgbm import LGBMClassifier, LGBMRegressor
import catboost
from hyperopt import hp, tpe, Trials, fmin, STATUS_OK
from hyperopt.pyll.stochastic import sample
# visualization extensions and settings
import seaborn as sns
import matplotlib.pyplot as plt
import missingno as msno
# %matplotlib inline
try:
# import mlmachine as mlm
# from prettierplot.plotter import PrettierPlot
# import prettierplot.style as style
import asdfasd
except ModuleNotFoundError:
sys.path.append("../../../mlmachine") if "../../../../mlmachine" not in sys.path else None
sys.path.append("../../../prettierplot") if "../../../../prettierplot" not in sys.path else None
import mlmachine as mlm
from prettierplot.plotter import PrettierPlot
import prettierplot.style as style
else:
print('This notebook relies on the libraries mlmachine and prettierplot. Please run:')
print('\tpip install mlmachine')
print('\tpip install prettierplot')
# -
# ## Data
# <a id = 'Data'></a>
# +
# load data and print dimensions
df_train = pd.read_csv("s3://tdp-ml-datasets/kaggle-titanic//train.csv")
df_valid = pd.read_csv("s3://tdp-ml-datasets/kaggle-titanic//test.csv")
print("Training data dimensions: {}".format(df_train.shape))
print("Validation data dimensions: {}".format(df_valid.shape))
# -
# display info and first 5 rows
df_train.info()
display(df_train[:5])
# review counts of different column types
df_train.dtypes.value_counts()
# Load training data into mlmachine
train = mlm.Machine(
data=df_train,
target="Survived",
remove_features=["PassengerId", "Ticket"],
identify_as_object=["Pclass", "SibSp", "Parch"],
target_type="object",
)
print(train.data.shape)
# load training data into mlmachine
valid = mlm.Machine(
data=df_valid,
remove_features=["PassengerId", "Ticket"],
identify_as_object=["Pclass", "SibSp", "Parch"],
)
print(valid.data.shape)
# # Initial EDA
# <a id = 'Initial-EDA'></a>
# ## object feature EDA
# <a id = 'object-feature-EDA'></a>
# ### Univariate & feature vs. target
# <a id = 'Univariate-&-feature-vs.-target'></a>
# object features
for feature in train.data.mlm_dtypes["object"]:
train.eda_cat_target_cat_feat(feature=feature, level_count_cap=50)
# ## number feature EDA
# <a id = 'number-feature-EDA'></a>
# ### Univariate & feature vs. target
# <a id = 'Univariate-&-feature-vs.-target2'></a>
# number features
for feature in train.data.mlm_dtypes["number"]:
train.eda_cat_target_num_feat(feature=feature)
# ### Correlation
# <a id = 'Correlation'></a>
# ##### Correlation (all samples)
# correlation heat map
p = PrettierPlot()
ax = p.make_canvas()
p.corr_heatmap(df=train.data, annot=True, ax=ax)
# ##### Correlation (top vs. target)
# correlation heat map with most highly correlated features relative to the target
p = PrettierPlot(plot_orientation='tall',chart_scale=10)
ax = p.make_canvas()
p.corr_heatmap_target(
df=train.data, target=train.target, thresh=0.01, annot=True, ax=ax
)
# > Remarks - There are three pairs of highly correlated features:
# - 'GarageArea' and 'GarageCars'
# - 'TotRmsAbvGrd' and 'GrLivArea'
# - '1stFlrSF' and 'TotalBsmtSF
# This makes sense, given what each feature represents and how each pair items relate to each other. We likely only need one feature from each pair.
# ### Pair plot
# <a id = 'Pair-plot'></a>
# pair plot
p = PrettierPlot(chart_scale=12)
p.pair_plot(df=train.data, diag_kind="auto")
# pair plot
p = PrettierPlot(chart_scale=12)
p.pair_plot(
df=train.data.dropna(),
diag_kind="kde",
target=train.target,
columns=["Age", "Fare", "Pclass", "Parch", "SibSp"],
legend_labels=["Died", "Survived"],
bbox=(2.0, 0.0),
)
# ## Faceting
# <a id = 'Faceting'></a>
# ##### object by object
# facet Pclass vs Embarked
p = PrettierPlot(chart_scale=12)
ax = p.make_canvas(title="Survivorship, embark location by passenger class", y_shift=0.7)
p.facet_two_cat_bar(
df=train.recombine_data(train.data, train.target),
x="Embarked",
y=train.target.name,
split="Pclass",
y_units="ff",
ax=ax,
)
# facet Pclass vs Embarked
p = PrettierPlot(chart_scale=12)
ax = p.make_canvas(title="Survivorship, passenger class by gender", y_shift=0.7)
p.facet_two_cat_bar(
df=train.recombine_data(train.data, train.target),
x="Pclass",
y=train.target.name,
split="Sex",
y_units="ff",
ax=ax,
)
# facet Pclass vs Embarked
p = PrettierPlot(chart_scale=12)
ax = p.make_canvas(title="Survivorship,embark location by gender", y_shift=0.7)
p.facet_two_cat_bar(
df=train.recombine_data(train.data, train.target),
x="Embarked",
y=train.target.name,
split="Sex",
y_units="ff",
ax=ax,
)
#
p = PrettierPlot()
p.facet_two_cat_point(
df=train.recombine_data(train.data, train.target),
x="Sex",
y=train.target.name,
split="Pclass",
cat_row="Embarked",
aspect=1.0,
height=5,
bbox=(1.3, 1.2),
legend_labels=["1st class", "2nd class", "3rd class"],
)
#
p = PrettierPlot()
p.facet_two_cat_point(
df=train.recombine_data(train.data, train.target).dropna(subset=["Embarked"]),
x="Embarked",
y=train.target.name,
split="Pclass",
cat_row="Sex",
aspect=1.0,
height=5,
bbox=(1.5, 0.8),
legend_labels=["1st class", "2nd class", "3rd class"],
)
# ##### object by number
#
p = PrettierPlot()
p.facet_cat_num_hist(
df=train.recombine_data(train.data, train.target),
split=train.target.name,
legend_labels=["Died", "Lived"],
cat_row="Sex",
cat_col="Embarked",
num_col="Age",
bbox=(1.9, 1.0),
height=4,
aspect=1,
)
#
p = PrettierPlot(chart_scale=15)
p.facet_cat_num_scatter(
df=train.recombine_data(train.data, train.target),
split=train.target.name,
legend_labels=["Died", "Lived"],
cat_row="Sex",
cat_col="Embarked",
xNum="Fare",
yNum="Age",
bbox=(1.9, 1.0),
height=4,
aspect=1,
)
# ## Target variable evaluation
# <a id = 'Target-variable-evaluation'></a>
# null score
pd.Series(train.target).value_counts(normalize=True)
# # Data preparation
# <a id = 'Data-preparation'></a>
# ## Outliers (preliminary)
#
# <a id = 'Outliers-preliminary'></a>
# ### Evaluate
# <a id = 'Evaluate'></a>
# ##### Training outliers
# +
# identify columns that have zero missing values
nonNull = train.data.columns[train.data.isnull().sum() == 0].values.tolist()
# identify intersection between non-null columns and number columns
nonNullnum_col = list(set(nonNull).intersection(train.data.mlm_dtypes["number"]))
print(nonNullnum_col)
# +
# identify outliers using IQR
train_pipe = Pipeline([
("outlier",train.OutlierIQR(
outlier_count=2,
iqr_step=1.5,
features=["Age", "SibSp", "Parch", "Fare"],
drop_outliers=False,))
])
train.data = train_pipe.transform(train.data)
# capture outliers
iqr_outliers = np.array(sorted(train_pipe.named_steps["outlier"].outliers_))
print(iqr_outliers)
# +
# identify outliers using Isolation Forest
clf = IsolationForest(
behaviour="new", max_samples=train.data.shape[0], random_state=0, contamination=0.02
)
clf.fit(train.data[["SibSp", "Parch", "Fare"]])
preds = clf.predict(train.data[["SibSp", "Parch", "Fare"]])
# evaluate index values
mask = np.isin(preds, -1)
if_outliers = np.array(train.data[mask].index)
print(if_outliers)
# +
# identify outliers using extended isolation forest
train_pipe = Pipeline([
("outlier",train.ExtendedIsoForest(
columns=["SibSp", "Parch", "Fare"],
n_trees=100,
sample_size=256,
ExtensionLevel=1,
anomalies_ratio=0.03,
drop_outliers=False,))
])
train.data = train_pipe.transform(train.data)
# capture outliers
eif_outliers = np.array(sorted(train_pipe.named_steps["outlier"].outliers_))
print(eif_outliers)
# -
# identify outliers that are identified in multiple algorithms
# reduce(np.intersect1d, (iqr_outliers, if_outliers, eif_outliers))
outliers = reduce(np.intersect1d, (if_outliers, eif_outliers))
print(outliers)
# review outlier identification summary
outlier_summary = train.outlier_summary(iqr_outliers=iqr_outliers,
if_outliers=if_outliers,
eif_outliers=eif_outliers
)
outlier_summary
# ##### Validation outliers
# ### Remove
# <a id = 'remove'></a>
# remove outlers from predictors and response
outliers = np.array([27, 88, 258, 311, 341, 438, 679, 737, 742])
train.data = train.data.drop(outliers)
train.target = train.target.drop(index=outliers)
# ## Missing data
#
# <a id = 'Missing-data'></a>
# ### Evaluate
# <a id = 'Evaluate1'></a>
# ##### Training missingness
# evaluate missing data
train.eda_missing_summary()
# missingno matrix
msno.matrix(train.data)
# missingno bar
msno.bar(train.data)
# missingno heatmap
msno.heatmap(train.data)
# missingno dendrogram
msno.dendrogram(train.data)
# ##### Validation missingness
# evaluate missing data
valid.eda_missing_summary()
# missingno matrix
msno.matrix(valid.data)
# missingno bar
msno.bar(valid.data)
# missingno heatmap
msno.heatmap(valid.data)
# missingno dendrogram
msno.dendrogram(valid.data)
#
# ##### Training vs. validation missingness
#
# compare feature with missing data
train.missing_col_compare(train=train.data, validation=valid.data)
# ### Impute
# <a id = 'Impute'></a>
# ##### Impute training
# apply imputations to missing data in training dataset
train_pipe = Pipeline([
("imputeMedian",train.GroupbyImputer(null_column="Age", groupby_column="Parch", strategy="median")),
("imputeMode", train.ModeImputer(columns=["Embarked"])),
])
train.data = train_pipe.transform(train.data)
train.eda_missing_summary()
# ##### Impute validation
# apply imputations to missing data in validation dataset
validPipe = Pipeline([
("imputeMedian",valid.GroupbyImputer(null_column="Age",groupby_column="Parch",train=False,trainValue=train_pipe.named_steps["imputeMedian"].trainValue_,)),
("imputeMedian2",valid.numberalImputer(columns=["Fare", "Age"], strategy="median")),
])
valid.data = validPipe.transform(valid.data)
valid.eda_missing_summary()
# ## Engineering
# <a id = 'Engineering'></a>
# ### Evaluate
# <a id = 'Evaluate3'></a>
# ### Engineer
# <a id = 'Engineer'></a>
# ##### Engineer training
# +
# parse titles to learn gender, and identify rare titles which may convey status
title = [i.split(",")[1].split(".")[0].strip() for i in train.data["Name"]]
train.data["Title"] = pd.Series(title)
train.data["Title"] = train.data["Title"].replace(
[
"Lady",
"the Countess",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
train.data["Title"] = train.data["Title"].map(
{"Master": 0, "Miss": 1, "Ms": 1, "Mme": 1, "Mlle": 1, "Mrs": 1, "Mr": 2, "Rare": 3}
)
# distill cabin feature
train.data["CabinQuarter"] = pd.Series(
[i[0] if not pd.isnull(i) else "X" for i in train.data["Cabin"]]
)
# family size features and binning
train.data["FamilySize"] = train.data["SibSp"] + train.data["Parch"] + 1
customBinDict = {"Age": [16, 32, 48, 64], "FamilySize": [1, 2, 4]}
train_pipe = Pipeline([
("customBin", train.CustomBinner(customBinDict=customBinDict)),
("percentileBin",train.PercentileBinner(columns=["Age", "Fare"], percs=[25, 50, 75])),
])
train.data = train_pipe.transform(train.data)
# drop features
train.data, train.data.mlm_dtypes = train.featureDropper(
columns=["Name", "Cabin"], data=train.data, mlm_dtypes=train.data.mlm_dtypes
)
# -
# print new columns
for col in train.data.columns:
if (
col not in train.data.mlm_dtypes["object"]
and col not in train.data.mlm_dtypes["number"]
):
print(col)
# +
# append new number features
for col in ["FamilySize"]:
train.data.mlm_dtypes["number"].append(col)
# append new object features
for col in [
"AgeCustomBin",
"AgePercBin",
"FarePercBin",
"FamilySize",
"FamilySizeCustomBin",
"Title",
"CabinQuarter",
]:
train.data.mlm_dtypes["object"].append(col)
# -
# evaluate additional features
for feature in train.data.mlm_dtypes['object']:
train.eda_cat_target_cat_feat(feature=feature)
# ##### Engineer validation
# +
# parse titles to learn gender, and identify rare titles which may convey status
title = [i.split(",")[1].split(".")[0].strip() for i in valid.data["Name"]]
valid.data["Title"] = pd.Series(title)
valid.data["Title"] = valid.data["Title"].replace(
[
"Lady",
"the Countess",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
valid.data["Title"] = valid.data["Title"].map(
{"Master": 0, "Miss": 1, "Ms": 1, "Mme": 1, "Mlle": 1, "Mrs": 1, "Mr": 2, "Rare": 3}
)
# distill cabin feature
valid.data["CabinQuarter"] = pd.Series(
[i[0] if not pd.isnull(i) else "X" for i in valid.data["Cabin"]]
)
# additional features
valid.data["FamilySize"] = valid.data["SibSp"] + valid.data["Parch"] + 1
validPipe = Pipeline([
("customBin", valid.CustomBinner(customBinDict=customBinDict)),
("percentileBin",valid.PercentileBinner(train=False, trainValue=train_pipe.named_steps["percentileBin"].trainValue_)),
])
valid.data = validPipe.transform(valid.data)
# drop features
valid.data, valid.data.mlm_dtypes = valid.featureDropper(
columns=["Name", "Cabin"], data=valid.data, mlm_dtypes=valid.data.mlm_dtypes
)
# -
# print new columns
for col in valid.data.columns:
if (
col not in valid.data.mlm_dtypes["object"]
and col not in valid.data.mlm_dtypes["number"]
):
print(col)
# +
# append new number features
for col in ["FamilySize"]:
valid.data.mlm_dtypes["number"].append(col)
# append new object features
for col in [
"AgeCustomBin",
"AgePercBin",
"FarePercBin",
"FamilySize",
"FamilySizeCustomBin",
"Title",
"CabinQuarter",
]:
valid.data.mlm_dtypes["object"].append(col)
# -
# ## Encoding
# <a id = 'Encoding'></a>
# ### Evaluate
# <a id = 'Evaluate2'></a>
# ##### Training feature evaluation
# counts of unique values in training data string columns
train.data[train.data.mlm_dtypes["object"]].apply(pd.Series.nunique, axis=0)
# print unique values in each object columns
for col in train.data[train.data.mlm_dtypes["object"]]:
try:
print(col, np.unique(train.data[col]))
except:
pass
# ##### Validation feature evaluation
# counts of unique values in validation data string columns
valid.data[valid.data.mlm_dtypes["object"]].apply(pd.Series.nunique, axis=0)
# print unique values in each object columns
for col in valid.data[valid.data.mlm_dtypes["object"]]:
if col not in ["Name", "Cabin"]:
print(col, np.unique(valid.data[col]))
# ##### Training vs. validation
# identify values that are present in the training data but not the validation data, and vice versa
for col in train.data.mlm_dtypes["object"]:
if col not in ["Name", "Cabin"]:
train_values = train.data[col].unique()
valid_values = valid.data[col].unique()
train_diff = set(train_values) - set(valid_values)
valid_diff = set(valid_values) - set(train_values)
if len(train_diff) > 0 or len(valid_diff) > 0:
print("\n\n*** " + col)
print("Value present in training data, not in validation data")
print(train_diff)
print("Value present in validation data, not in training data")
print(valid_diff)
# ### Encode
# <a id = 'Encode'></a>
# ##### Encode training
# +
# ordinal column encoding instructions
ordobject_columns = {"Pclass": {1: 1, 2: 2, 3: 3}}
# nominal columns
nomobject_columns = ["Embarked", "Sex", "CabinQuarter", "Title"]
# apply encodings to training data
train_pipe = Pipeline(
[
("encodeOrdinal", train.CustomOrdinalEncoder(encodings=ordobject_columns)),
("dummyNominal", train.Dummies(columns=nomobject_columns, dropFirst=True)),
]
)
train.data = train_pipe.transform(train.data)
train.data[:5]
# -
# ##### Encode validation
# apply encodings to validation data
validPipe = Pipeline(
[
("encodeOrdinal", valid.CustomOrdinalEncoder(encodings=ordobject_columns)),
("dummyNominal", valid.Dummies(columns=nomobject_columns, dropFirst=False)),
("sync", valid.FeatureSync(trainCols=train.data.columns)),
]
)
valid.data = validPipe.transform(valid.data)
valid.data[:5]
# ## Transformation
# <a id = 'Transformation'></a>
# ### Evaluate
# <a id = 'Evaluate4'></a>
# ##### Training feature transformation
# evaluate skew of number features - training data
train.skew_summary()
# ##### Validation feature transformation
# evaluate skew of number features - validation data
valid.skew_summary()
# ### Transform
# <a id = 'Transform'></a>
# ##### Transform training
# skew correct in training dataset, which also learns te best lambda value for each columns
train_pipe = Pipeline([
("skew",train.SkewTransform(columns=train.data.mlm_dtypes["number"], skewMin=0.75, pctZeroMax=1.0, verbose = True))
])
train.data = train_pipe.transform(train.data)
train.skew_summary()
# ##### Transform validation
# skew correction in validation dataset using lambdas learned on training data
validPipe = Pipeline([
("skew",valid.SkewTransform(train=False, trainValue=train_pipe.named_steps["skew"].trainValue_))
])
valid.data = validPipe.transform(valid.data)
valid.skew_summary()
# ## Outliers (final)
#
# <a id = 'Outliers-final'></a>
# ### Evaluate
# <a id = 'Evaluate5'></a>
# +
# identify outliers using IQR
train_pipe = Pipeline([
("outlier",train.OutlierIQR(
outlier_count=5,
iqr_step=1.5,
features=train.data.columns,
drop_outliers=False,))
])
train.data = train_pipe.transform(train.data)
# capture outliers
iqr_outliers = np.array(sorted(train_pipe.named_steps["outlier"].outliers_))
print(iqr_outliers)
# +
# identify outliers using Isolation Forest
clf = IsolationForest(
behaviour="new", max_samples=train.data.shape[0], random_state=0, contamination=0.01
)
clf.fit(train.data[train.data.columns])
preds = clf.predict(train.data[train.data.columns])
# evaluate index values
mask = np.isin(preds, -1)
if_outliers = np.array(train.data[mask].index)
print(if_outliers)
# +
# identify outliers using extended isolation forest
train_pipe = Pipeline([
("outlier",train.ExtendedIsoForest(
columns=train.data.columns,
n_trees=100,
sample_size=256,
ExtensionLevel=1,
anomalies_ratio=0.03,
drop_outliers=False,))
])
train.data = train_pipe.transform(train.data)
# capture outliers
eif_outliers = np.array(sorted(train_pipe.named_steps["outlier"].outliers_))
print(eif_outliers)
# -
# identify outliers that are identified in multiple algorithms
outliers = reduce(np.intersect1d, (iqr_outliers, if_outliers, eif_outliers))
# outliers = reduce(np.intersect1d, (if_outliers, eif_outliers))
print(outliers)
# review outlier identification summary
outlier_summary = train.outlier_summary(iqr_outliers=iqr_outliers,
if_outliers=if_outliers,
eif_outliers=eif_outliers
)
outlier_summary
# ### Remove
# <a id = 'remove1'></a>
# +
# # remove outlers from predictors and response
# outliers = np.array([59,121])
# train.data = train.data.drop(outliers)
# train.target = train.target.drop(index=outliers)
# -
# # Data evaluation
# <a id = 'Data evaluation'></a>
# ## Feature importance
# <a id = 'Feature-importance'></a>
# +
# generate feature importance summary
estimators = [
"LGBMClassifier",
"RandomForestClassifier",
"GradientBoostingClassifier",
"ExtraTreesClassifier",
"AdaBoostClassifier",
"XGBClassifier",
]
featureSummary = train.feature_selector_suite(estimators=estimators)
# +
# calculate cross-validation performance
estimators = [
"SVC",
"LGBMClassifier",
"LogisticRegression",
"XGBClassifier",
"RandomForestClassifier",
"GradientBoostingClassifier",
"AdaBoostClassifier",
"ExtraTreesClassifier",
"KNeighborsClassifier",
]
cv_summary = train.feature_selector_cross_val(
estimators=estimators,
featureSummary=featureSummary,
metrics=["accuracy","f1_macro","roc_auc"],
n_folds=8,
)
# -
# visualize CV performance for diminishing feature set
train.feature_selector_results_plot(
cv_summary=cv_summary,
featureSummary=featureSummary,
metric="accuracy",
show_features=True,
)
df = train.features_used_summary(
cv_summary=cv_summary, metric="accuracy", featureSummary=featureSummary
)
df
# ## Rationality
# <a id = 'Rationality'></a>
# percent difference summary
df_diff = abs(
(
((valid.data.describe() + 1) - (train.data.describe() + 1))
/ (train.data.describe() + 1)
)
* 100
)
df_diff = df_diff[df_diff.columns].replace({0: np.nan})
df_diff[df_diff < 0] = np.nan
df_diff = df_diff.fillna("")
display(df_diff)
display(train.data[df_diff.columns].describe())
display(valid.data[df_diff.columns].describe())
# ## Value override
# <a id = 'Value override'></a>
# +
# change clearly erroneous value to what it probably was
# exploreValid.data['GarageYrBlt'].replace({2207 : 2007}, inplace = True)
# -
# ## number feature EDA
# <a id = 'number-feature-EDA3'></a>
# ## Correlation
# <a id = 'Correlation3'></a>
# correlation heat map with most highly correlated features relative to the target
p = PrettierPlot(chart_scale=15)
ax = p.make_canvas()
p.corr_heatmap_target(df=train.data, target=train.target, thresh=0.2, ax=ax)
# # Modeling
# <a id = 'Modeling'></a>
# ## Data preparation
# <a id = 'Data-preparation-1'></a>
# ##### Prepare training data
# + code_folding=[15]
# import training data
df_train = pd.read_csv("s3://tdp-ml-datasets/kaggle-titanic//train.csv")
train = mlm.Machine(
data=df_train,
target="Survived",
remove_features=["PassengerId", "Ticket"],
identify_as_object=["Pclass", "SibSp", "Parch"],
target_type="object",
)
### feature engineering
# parse titles to learn gender, and identify rare titles which may convey status
title = [i.split(",")[1].split(".")[0].strip() for i in train.data["Name"]]
train.data["Title"] = pd.Series(title)
train.data["Title"] = train.data["Title"].replace(
[
"Lady",
"the Countess",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
train.data["Title"] = train.data["Title"].map(
{"Master": 0, "Miss": 1, "Ms": 1, "Mme": 1, "Mlle": 1, "Mrs": 1, "Mr": 2, "Rare": 3}
)
# distill cabin feature
train.data["CabinQuarter"] = pd.Series(
[i[0] if not pd.isnull(i) else "X" for i in train.data["Cabin"]]
)
# family size features
train.data["FamilySize"] = train.data["SibSp"] + train.data["Parch"] + 1
# custom bin specifications
customBinDict = {"Age": [16, 32, 48, 64], "FamilySize": [1, 2, 4]}
# object column specifications
ordobject_columns = {"Pclass": {1: 1, 2: 2, 3: 3}}
nomobject_columns = ["Embarked", "Sex", "CabinQuarter", "Title"]
# remove outliers
outliers = np.array([27, 88, 258, 311, 341, 438, 679, 737, 742])
train.data = train.data.drop(outliers)
train.target = train.target.drop(index=outliers)
### pipeline
train_pipe = Pipeline([
('imputeMedian', train.GroupbyImputer(null_column = 'Age', groupby_column = 'Parch', strategy = 'median')),
('imputeMode', train.ModeImputer(columns = ['Embarked'])),
('customBin', train.CustomBinner(customBinDict = customBinDict)),
('percentileBin', train.PercentileBinner(columns = ['Age','Fare'], percs = [10, 25, 50, 75, 90])),
('encodeOrdinal', train.CustomOrdinalEncoder(encodings = ordobject_columns)),
('dummyNominal', train.Dummies(columns = nomobject_columns, dropFirst = True)),
('skew', train.SkewTransform(columns = train.data.mlm_dtypes['number'], skewMin = 0.75, pctZeroMax = 1.0)),
])
train.data = train_pipe.transform(train.data)
# drop features
train.data, train.data.mlm_dtypes = train.featureDropper(
columns=["Name", "Cabin"], data=train.data, mlm_dtypes=train.data.mlm_dtypes
)
print('completed')
# -
# ##### Prepare validation data
# + code_folding=[13]
### import valid data
df_valid = pd.read_csv("s3://tdp-ml-datasets/kaggle-titanic//test.csv")
valid = mlm.Machine(
data=df_valid,
remove_features=["PassengerId", "Ticket"],
identify_as_object=["Pclass", "SibSp", "Parch"],
)
### feature engineering
# parse titles to learn gender, and identify rare titles which may convey status
title = [i.split(",")[1].split(".")[0].strip() for i in valid.data["Name"]]
valid.data["Title"] = pd.Series(title)
valid.data["Title"] = valid.data["Title"].replace(
[
"Lady",
"the Countess",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
valid.data["Title"] = valid.data["Title"].map(
{"Master": 0, "Miss": 1, "Ms": 1, "Mme": 1, "Mlle": 1, "Mrs": 1, "Mr": 2, "Rare": 3}
)
# distill cabin feature
valid.data["CabinQuarter"] = pd.Series(
[i[0] if not pd.isnull(i) else "X" for i in valid.data["Cabin"]]
)
# additional features
valid.data["FamilySize"] = valid.data["SibSp"] + valid.data["Parch"] + 1
### pipeline
validPipe = Pipeline(
[
("imputeMedian",valid.GroupbyImputer(null_column="Age",groupby_column="Parch",train=False,trainValue=train_pipe.named_steps["imputeMedian"].trainValue_)),
("imputeMedian2",valid.numberalImputer(columns=["Fare", "Age"], strategy="median",train=False,trainValue=train.data)),
("customBin", valid.CustomBinner(customBinDict=customBinDict)),
("percentileBin",valid.PercentileBinner(train=False, trainValue=train_pipe.named_steps["percentileBin"].trainValue_)),
("encodeOrdinal", valid.CustomOrdinalEncoder(encodings=ordobject_columns)),
("dummyNominal", valid.Dummies(columns=nomobject_columns, dropFirst=False)),
("sync", valid.FeatureSync(trainCols=train.data.columns)),
("skew",valid.SkewTransform(train=False, trainValue=train_pipe.named_steps["skew"].trainValue_)),
]
)
valid.data = validPipe.transform(valid.data)
print('completed')
# -
# ## Bayesian hyper-parameter optimization
# <a id = 'Bayesian-hyper-parameter-optimization'></a>
# + code_folding=[]
# parameter space
all_space = {
"LGBMClassifier": {
"class_weight": hp.choice("class_weight", [None]),
"colsample_bytree": hp.uniform("colsample_bytree", 0.4, 0.7),
"boosting_type": hp.choice("boosting_type", ["dart"]),
"subsample": hp.uniform("subsample", 0.5, 1),
"learning_rate": hp.uniform("learning_rate", 0.15, 0.25),
"max_depth": hp.choice("max_depth", np.arange(4, 20, dtype=int)),
"min_child_samples": hp.quniform("min_child_samples", 50, 150, 5),
"n_estimators": hp.choice("n_estimators", np.arange(100, 4000, 10, dtype=int)),
"num_leaves": hp.quniform("num_leaves", 30, 70, 1),
"reg_alpha": hp.uniform("reg_alpha", 0.75, 1.25),
"reg_lambda": hp.uniform("reg_lambda", 0.0, 1.0),
"subsample_for_bin": hp.quniform("subsample_for_bin", 100000, 350000, 20000),
},
"LogisticRegression": {
"C": hp.uniform("C", 0.04, 0.1),
"penalty": hp.choice("penalty", ["l1"]),
},
"XGBClassifier": {
"colsample_bytree": hp.uniform("colsample_bytree", 0.4, 0.7),
"gamma": hp.quniform("gamma", 0.0, 10, 0.05),
"learning_rate": hp.quniform("learning_rate", 0.01, 0.2, 0.01),
"max_depth": hp.choice("max_depth", np.arange(2, 15, dtype=int)),
"min_child_weight": hp.quniform("min_child_weight", 2.5, 7.5, 1),
"n_estimators": hp.choice("n_estimators", np.arange(100, 4000, 10, dtype=int)),
"subsample": hp.uniform("subsample", 0.4, 0.7),
},
"RandomForestClassifier": {
"bootstrap": hp.choice("bootstrap", [True, False]),
"max_depth": hp.choice("max_depth", np.arange(2, 10, dtype=int)),
"n_estimators": hp.choice("n_estimators", np.arange(100, 8000, 10, dtype=int)),
"max_features": hp.choice("max_features", ["sqrt"]),
"min_samples_split": hp.choice(
"min_samples_split", np.arange(15, 25, dtype=int)
),
"min_samples_leaf": hp.choice("min_samples_leaf", np.arange(2, 20, dtype=int)),
},
"GradientBoostingClassifier": {
"n_estimators": hp.choice("n_estimators", np.arange(100, 4000, 10, dtype=int)),
"max_depth": hp.choice("max_depth", np.arange(2, 11, dtype=int)),
"max_features": hp.choice("max_features", ["sqrt"]),
"learning_rate": hp.quniform("learning_rate", 0.01, 0.09, 0.01),
"loss": hp.choice("loss", ["deviance", "exponential"]),
"min_samples_split": hp.choice(
"min_samples_split", np.arange(2, 40, dtype=int)
),
"min_samples_leaf": hp.choice("min_samples_leaf", np.arange(2, 40, dtype=int)),
},
"AdaBoostClassifier": {
"n_estimators": hp.choice("n_estimators", np.arange(100, 4000, 10, dtype=int)),
"learning_rate": hp.quniform("learning_rate", 0.1, 0.25, 0.01),
"algorithm": hp.choice("algorithm", ["SAMME"]),
},
"ExtraTreesClassifier": {
"n_estimators": hp.choice("n_estimators", np.arange(100, 4000, 10, dtype=int)),
"max_depth": hp.choice("max_depth", np.arange(2, 15, dtype=int)),
"min_samples_split": hp.choice(
"min_samples_split", np.arange(4, 30, dtype=int)
),
"min_samples_leaf": hp.choice("min_samples_leaf", np.arange(2, 20, dtype=int)),
"max_features": hp.choice("max_features", ["auto"]),
"criterion": hp.choice("criterion", ["entropy"]),
},
"SVC": {
"C": hp.uniform("C", 4, 15),
"decision_function_shape": hp.choice("decision_function_shape", ["ovr"]),
"gamma": hp.uniform("gamma", 0.00000001, 1.5),
},
"KNeighborsClassifier": {
"algorithm": hp.choice("algorithm", ["ball_tree", "brute"]),
"n_neighbors": hp.choice("n_neighbors", np.arange(1, 15, dtype=int)),
"weights": hp.choice("weights", ["uniform"]),
},
}
# -
# execute bayesian optimization grid search
analysis = "titanic"
train.exec_bayes_optim_search(
all_space=all_space,
results_dir="{}_hyperopt_{}.csv".format(rundate, analysis),
X=train.data,
y=train.target,
scoring="accuracy",
n_folds=2,
n_jobs=4,
iters=8,
verbose=0,
)
# ### Model loss by iteration
# <a id = 'Model-loss-by-iteration'></a>
# read scores summary table
analysis = "titanic"
rundate = "20190807"
bayes_optim_summary = pd.read_csv(
"{}_hyperopt_{}.csv".format(rundate, analysis), na_values="nan"
)
bayes_optim_summary[:5]
# model loss plot
for estimator in np.unique(bayes_optim_summary["estimator"]):
train.model_loss_plot(bayes_optim_summary=bayes_optim_summary, estimator=estimator)
# ### Parameter selection by iteration
# <a id = 'Parameter-selection-by-iteration'></a>
# estimator parameter plots
for estimator in np.unique(bayes_optim_summary["estimator"]):
train.modelParamPlot(
bayes_optim_summary=bayes_optim_summary,
estimator=estimator,
all_space=all_space,
n_iter=100,
chart_scale=15,
)
# +
sample_space = {
'param': hp.uniform('param', np.log(0.4), np.log(0.6))
# "": 0.000001 + hp.uniform("gamma", 0.000001, 10)
# 'param2': hp.loguniform('param2', np.log(0.001), np.log(0.01))
}
train.sample_plot(sample_space, 1000)
# -
# pair-wise comparison
p = PrettierPlot(chart_scale=12)
p.pair_plot_custom(
df=df,
columns=["colsample_bytree", "learning_rate", "iteration","iterLoss"],
gradient_col="iteration",
)
# ## Model performance evaluation - standard models
# <a id = 'Model-performance-evaluation-standard-models'></a>
top_models = train.top_bayes_optim_models(bayes_optim_summary=bayes_optim_summary, num_models=1)
top_models
# +
# classification panel, single model
estimator = "SVC"
model_iter = 135
# estimator = 'GradientBoostingClassifier'; model_iter = 590
# estimator = 'XGBClassifier'; model_iter = 380
model = train.BayesOptimModelBuilder(
bayes_optim_summary=bayes_optim_summary, estimator=estimator, model_iter=model_iter
)
train.binary_classification_panel(
model=model,
X_train=train.data,
y_train=train.target,
cm_labels=["Dies", "Survives"],
n_folds=5,
)
# -
# create classification reports for training data
for estimator, model_iters in top_models.items():
for model_iter in model_iters:
model = train.BayesOptimModelBuilder(
bayes_optim_summary=bayes_optim_summary,
estimator=estimator,
model_iter=model_iter,
)
train.binary_classification_panel(
model=model, X_train=train.data, y_train=train.target, cm_labels=['Dies', 'Survives'], n_folds=4
)
# ## Model explanability
# <a id = 'Feature-importance'></a>
# +
#
# estimator = "ExtraTreesClassifier"; model_iter = 145
# estimator = "SVC"; model_iter = 135
estimator = "GradientBoostingClassifier"; model_iter = 490
model = train.BayesOptimModelBuilder(
bayes_optim_summary=bayes_optim_summary, estimator=estimator, model_iter=model_iter
)
model.fit(train.data.values, train.target.values)
# -
# ### Permutation importance
# <a id = 'Permutation-importance'></a>
# permutation importance - how much does performance decrease when shuffling a certain feature?
perm = PermutationImportance(model.model, random_state=1).fit(train.data, train.target)
eli5.show_weights(perm, feature_names=train.data.columns.tolist())
# ### SHAP values
# <a id = 'SHAP-values'></a>
# ##### Training
# ###### Force plots - single observations
# SHAP force plots for individual observations
for i in train.data.index[:2]:
train.single_shap_viz_tree(obsIx=i, model=model, data=train.data, target=train.target)
# ###### Force plots - multiple observations
# SHAP force plot a set of data
visual = train.multi_shap_viz_tree(obs_ixs=train.data.index, model=model, data=train.data)
visual
# ###### Dependence plots
# generate SHAP values for set of observations
obs_data, _, obs_shap_values = train.multi_shap_value_tree(
obs_ixs=train.data.index, model=model, data=train.data
)
# +
# SHAP dependence plot grid
grid_features = ["Pclass", "Age", "Fare", "SibSp","Parch"]
train.shap_dependence_grid(
obs_data=obs_data,
obs_shap_values=obs_shap_values,
grid_features=grid_features,
all_features=train.data.columns,
dot_size=35,
alpha=0.5,
)
# +
# single SHAP dependence plot
p = PrettierPlot()
ax = p.make_canvas()
train.shap_dependence_plot(
obs_data=obs_data,
obs_shap_values=obs_shap_values,
scatter_feature="Age",
color_feature="Parch",
feature_names=train.data.columns,
dot_size=50,
alpha=0.5,
ax=ax
)
# +
# SHAP dependence plots for all feature relative to an interaction feature
feature_names = train.data.columns.tolist()
top_shap = np.argsort(-np.sum(np.abs(obs_shap_values), 0))
for top_ix in top_shap:
p = PrettierPlot()
ax = p.make_canvas()
train.shap_dependence_plot(
obs_data=obs_data,
obs_shap_values=obs_shap_values,
scatter_feature=feature_names[top_ix],
color_feature="Age",
feature_names=feature_names,
dot_size=50,
alpha=0.5,
ax=ax,
)
# -
# ###### Summary plots
# SHAP summary plot
train.shap_summary_plot(
obs_data=obs_data,
obs_shap_values=obs_shap_values,
feature_names=train.data.columns,
)
# ##### Validation
# ###### Force plots - single observations
# SHAP force plots for individual observations
for i in valid.data.index[:2]:
valid.single_shap_viz_tree(obsIx=i, model=model, data=valid.data)
# ###### Force plots - multiple observations
# SHAP force plot a set of data
visual = valid.multi_shap_viz_tree(obs_ixs=valid.data.index, model=model, data=valid.data)
visual
# ###### Dependence plots
# generate SHAP values for set of observations
obs_data, _, obs_shap_values = valid.multi_shap_value_tree(
obs_ixs=valid.data.index, model=model, data=valid.data
)
# +
# SHAP dependence plot grid
grid_features = ["Pclass", "Age", "Fare", "SibSp","Parch"]
valid.shap_dependence_grid(
obs_data=obs_data,
obs_shap_values=obs_shap_values,
grid_features=grid_features,
all_features=valid.data.columns,
dot_size=35,
alpha=0.5,
)
# +
# single SHAP dependence plot
p = PrettierPlot()
ax = p.make_canvas()
valid.shap_dependence_plot(
obs_data=obs_data,
obs_shap_values=obs_shap_values,
scatter_feature="Age",
color_feature="Parch",
feature_names=valid.data.columns,
dot_size=50,
alpha=0.5,
ax=ax
)
# +
# SHAP dependence plots for all feature relative to an interaction feature
feature_names = valid.data.columns.tolist()
top_shap = np.argsort(-np.sum(np.abs(obs_shap_values), 0))
for top_ix in top_shap:
p = PrettierPlot()
ax = p.make_canvas()
valid.shap_dependence_plot(
obs_data=obs_data,
obs_shap_values=obs_shap_values,
scatter_feature=feature_names[top_ix],
color_feature="Age",
feature_names=feature_names,
dot_size=50,
alpha=0.5,
ax=ax,
)
# -
# ###### Summary plots
# SHAP summary plot
valid.shap_summary_plot(
obs_data=obs_data,
obs_shap_values=obs_shap_values,
feature_names=valid.data.columns,
)
# ## Submission - standard models
# <a id = 'Submission-standard-models'></a>
# +
## standard model fit and predict
# select estimator and iteration
# estimator = "LGBMClassifier"; model_iter = 668 #142 survived, 0.77511
# estimator = "XGBClassifier"; model_iter = 380 #151 survived, 0.7655
# estimator = "RandomForestClassifier"; model_iter = 405 #148 survived, 0.79425
# estimator = "GradientBoostingClassifier"; model_iter = 590 #142 survived, 0.7655
estimator = "SVC"; model_iter = 135 #154 survived, 0.755
# extract params and instantiate model
model = train.BayesOptimModelBuilder(
bayes_optim_summary=bayes_optim_summary, estimator=estimator, model_iter=model_iter
)
model.fit(train.data.values, train.target.values)
# fit model and make predictions
y_pred = model.predict(valid.data.values)
print(sum(y_pred))
# -
# generate prediction submission file
submit = pd.DataFrame({"PassengerId": df_valid.PassengerId, "Survived": y_pred})
submit.to_csv("submission.csv", index=False)
# + [markdown] heading_collapsed=true
# # Stacking
# + [markdown] hidden=true
# <a id = 'Stacking'></a>
# + [markdown] heading_collapsed=true hidden=true
# ## Primary models
# + [markdown] hidden=true
# <a id = 'Primary-models'></a>
# + hidden=true
# get out-of-fold predictions
oof_train, oof_valid, columns = train.model_stacker(
models=top_models,
bayes_optim_summary=bayes_optim_summary,
X_train=train.data.values,
y_train=train.target.values,
X_valid=valid.data.values,
n_folds=10,
n_jobs=10,
)
# + hidden=true
# view correlations of predictions
p = PrettierPlot()
ax = p.make_canvas()
p.corr_heatmap(
df=pd.DataFrame(oof_train, columns=columns), annot=True, ax=ax, vmin=0
)
# + [markdown] heading_collapsed=true hidden=true
# ## Meta model
# + [markdown] hidden=true
# <a id = 'Meta-model'></a>
# + code_folding=[1] hidden=true
# parameter space
all_space = {
"LGBMClassifier": {
"class_weight": hp.choice("class_weight", [None]),
"colsample_bytree": hp.uniform("colsample_bytree", 0.4, 0.7),
"boosting_type": hp.choice("boosting_type", ["dart"]),
"subsample": hp.uniform("subsample", 0.5, 1),
"learning_rate": hp.uniform("learning_rate", 0.15, 0.25),
"max_depth": hp.choice("max_depth", np.arange(4, 20, dtype=int)),
"min_child_samples": hp.quniform("min_child_samples", 50, 150, 5),
"n_estimators": hp.choice("n_estimators", np.arange(100, 4000, 10, dtype=int)),
"num_leaves": hp.quniform("num_leaves", 30, 70, 1),
"reg_alpha": hp.uniform("reg_alpha", 0.75, 1.25),
"reg_lambda": hp.uniform("reg_lambda", 0.0, 1.0),
"subsample_for_bin": hp.quniform("subsample_for_bin", 100000, 350000, 20000),
},
"XGBClassifier": {
"colsample_bytree": hp.uniform("colsample_bytree", 0.4, 0.7),
"gamma": hp.quniform("gamma", 0.0, 10, 0.05),
"learning_rate": hp.quniform("learning_rate", 0.01, 0.2, 0.01),
"max_depth": hp.choice("max_depth", np.arange(2, 15, dtype=int)),
"min_child_weight": hp.quniform("min_child_weight", 2.5, 7.5, 1),
"n_estimators": hp.choice("n_estimators", np.arange(100, 4000, 10, dtype=int)),
"subsample": hp.uniform("subsample", 0.4, 0.7),
},
"RandomForestClassifier": {
"bootstrap": hp.choice("bootstrap", [True, False]),
"max_depth": hp.choice("max_depth", np.arange(2, 10, dtype=int)),
"n_estimators": hp.choice("n_estimators", np.arange(100, 8000, 10, dtype=int)),
"max_features": hp.choice("max_features", ["sqrt"]),
"min_samples_split": hp.choice(
"min_samples_split", np.arange(15, 25, dtype=int)
),
"min_samples_leaf": hp.choice("min_samples_leaf", np.arange(2, 20, dtype=int)),
},
"GradientBoostingClassifier": {
"n_estimators": hp.choice("n_estimators", np.arange(100, 4000, 10, dtype=int)),
"max_depth": hp.choice("max_depth", np.arange(2, 11, dtype=int)),
"max_features": hp.choice("max_features", ["sqrt"]),
"learning_rate": hp.quniform("learning_rate", 0.01, 0.09, 0.01),
"loss": hp.choice("loss", ["deviance", "exponential"]),
"min_samples_split": hp.choice(
"min_samples_split", np.arange(2, 40, dtype=int)
),
"min_samples_leaf": hp.choice("min_samples_leaf", np.arange(2, 40, dtype=int)),
},
"SVC": {
"C": hp.uniform("C", 0.00000001, 15),
"decision_function_shape": hp.choice("decision_function_shape", ["ovr", "ovo"]),
"gamma": hp.uniform("gamma", 0.00000001, 1.5),
},
}
# + hidden=true
# execute bayesian optimization grid search
train.exec_bayes_optim_search(
all_space=all_space,
results_dir="{}_hyperopt_meta_{}.csv".format(rundate, analysis),
X=oof_train,
y=train.target,
scoring="accuracy",
n_folds=8,
n_jobs=10,
iters=1000,
verbose=0,
)
# + hidden=true
# read scores summary table
analysis = "Titanic"
rundate = "20190807"
bayes_optim_summary_meta = pd.read_csv("{}_hyperopt_meta_{}.csv".format(rundate, analysis))
bayes_optim_summary_meta[:5]
# + hidden=true
# model loss plot
for estimator in np.unique(bayes_optim_summary_meta["estimator"]):
train.model_loss_plot(bayes_optim_summary=bayes_optim_summary_meta, estimator=estimator)
# + hidden=true
# estimator parameter plots
for estimator in np.unique(bayes_optim_summary_meta["estimator"]):
train.modelParamPlot(
bayes_optim_summary=bayes_optim_summary_meta,
estimator=estimator,
all_space=all_space,
n_iter=100,
chart_scale=15,
)
# + [markdown] heading_collapsed=true hidden=true
# ## Model performance evaluation - stacked models
# + [markdown] hidden=true
# <a id = 'Model-performance-evaluation-stacked-models'></a>
# + hidden=true
top_models = train.top_bayes_optim_models(
bayes_optim_summary=bayes_optim_summary_meta, num_models=1
)
top_models
# + hidden=true
# classification panel, single model
estimator = "SVC"; model_iter = 135
# estimator = 'GradientBoostingClassifier'; model_iter = 590
# estimator = 'XGBClassifier'; model_iter = 380
model = train.BayesOptimModelBuilder(
bayes_optim_summary=bayes_optim_summary_meta, estimator=estimator, model_iter=model_iter
)
train.binary_classification_panel(
model=model, X_train=oof_train, y_train=train.target, labels=[0, 1], n_folds=4
)
# + hidden=true
# create classification reports for training data
for estimator, model_iters in top_models.items():
for model_iter in model_iters:
model = train.BayesOptimModelBuilder(
bayes_optim_summary=bayes_optim_summary_meta,
estimator=estimator,
model_iter=model_iter,
)
train.binary_classification_panel(
model=model, X_train=oof_train, y_train=train.target, labels=[0, 1], n_folds=4
)
# + [markdown] heading_collapsed=true hidden=true
# ## Submission - stacked models
# + [markdown] hidden=true
# <a id = 'Submission-stacked-models'></a>
# + hidden=true
# best second level learning model
# estimator = "LGBMClassifier"; model_iter = 876 #0.75119
# estimator = "XGBClassifier"; model_iter = 821, #0.779
# estimator = "RandomForestClassifier"; model_iter = 82
# estimator = "GradientBoostingClassifier"; model_iter = 673 #0.77511
estimator = "SVC"; model_iter = 538 # 0.77511
# extract params and instantiate model
model = train.BayesOptimModelBuilder(
bayes_optim_summary=bayes_optim_summary_meta, estimator=estimator, model_iter=model_iter
)
model.fit(oof_train, train.target.values)
y_pred = model.predict(oof_valid)
print(sum(y_pred))
# + hidden=true
# generate prediction submission file
submit = pd.DataFrame({"PassengerId": df_valid.PassengerId, "Survived": y_pred})
submit.to_csv("submission.csv", index=False)
| articles/kernel_orig.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Tce3stUlHN0L"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" id="tuOe1ymfHZPu"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="qFdPvlXBOdUN"
# # 그래디언트 및 자동 미분 소개
# + [markdown] id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td> <a target="_blank" href="https://www.tensorflow.org/guide/autodiff" class=""><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a> </td>
# <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/autodiff.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행</a></td>
# <td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/autodiff.ipynb" class=""><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서 소스 보기</a> </td>
# <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/autodiff.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드</a></td>
# </table>
# + [markdown] id="r6P32iYYV27b"
# ## 자동 미분 및 그래디언트
#
# [자동 미분](https://en.wikipedia.org/wiki/Automatic_differentiation)은 신경망 학습을 위한 [역전파](https://en.wikipedia.org/wiki/Backpropagation)와 같은 머신러닝 알고리즘을 구현하는 데 유용합니다.
#
# 이 가이드에서는 특히 [즉시 실행](eager.ipynb)에서 TensorFlow로 그래디언트를 계산하는 방법을 알아봅니다.
# + [markdown] id="MUXex9ctTuDB"
# ## 설정
# + id="IqR2PQG4ZaZ0"
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# + [markdown] id="xHxb-dlhMIzW"
# ## 그래디언트 계산하기
#
# 자동으로 미분하기 위해 TensorFlow는 *정방향 패스* 동안 어떤 연산이 어떤 순서로 발생하는지 기억해야 합니다. 그런 다음 *역방향 패스* 동안 TensorFlow는 이 연산 목록을 역순으로 이동하여 그래디언트를 계산합니다.
# + [markdown] id="1CLWJl0QliB0"
# ## 그래디언트 테이프
#
# 텐서플로는 자동 미분(주어진 입력 변수에 대한 연산의 그래디언트(gradient)를 계산하는 것)을 위한 [tf.GradientTape](https://www.tensorflow.org/api_docs/python/tf/GradientTape) API를 제공합니다. `tf.GradientTape`는 컨텍스트(context) 안에서 실행된 모든 연산을 테이프(tape)에 "기록"합니다. 그 다음 텐서플로는 [후진 방식 자동 미분(reverse mode differentiation)](https://en.wikipedia.org/wiki/Automatic_differentiation)을 사용해 테이프에 "기록된" 연산의 그래디언트를 계산합니다.
#
# 예를 들면:
# + id="Xq9GgTCP7a4A"
x = tf.Variable(3.0)
with tf.GradientTape() as tape:
y = x**2
# + [markdown] id="CR9tFAP_7cra"
# 일부 연산을 기록한 후에는 `GradientTape.gradient(target, sources)`를 사용하여 일부 소스(종종 모델 변수)에 상대적인 일부 대상(종종 손실)의 그래디언트를 계산합니다.
# + id="LsvrwF6bHroC"
# dy = 2x * dx
dy_dx = tape.gradient(y, x)
dy_dx.numpy()
# + [markdown] id="Q2_aqsO25Vx1"
# 위의 예제는 스칼라를 사용하지만, `tf.GradientTape`는 모든 텐서에서 쉽게 작동합니다.
# + id="vacZ3-Ws5VdV"
w = tf.Variable(tf.random.normal((3, 2)), name='w')
b = tf.Variable(tf.zeros(2, dtype=tf.float32), name='b')
x = [[1., 2., 3.]]
with tf.GradientTape(persistent=True) as tape:
y = x @ w + b
loss = tf.reduce_mean(y**2)
# + [markdown] id="i4eXOkrQ-9Pb"
# 두 변수 모두에 대해 `loss`의 그래디언트를 가져오려면, 두 변수를 `gradient` 메서드에 소스로 전달할 수 있습니다. 테이프는 소스가 전달되는 방식에 대해 융통성이 있으며 목록 또는 사전의 중첩된 조합을 허용하고 같은 방식으로 구조화된 그래디언트를 반환합니다(`tf.nest` 참조).
# + id="luOtK1Da_BR0"
[dl_dw, dl_db] = tape.gradient(loss, [w, b])
# + [markdown] id="Ei4iVXi6qgM7"
# 각 소스에 대한 그래디언트는 소스의 형상을 갖습니다.
# + id="aYbWRFPZqk4U"
print(w.shape)
print(dl_dw.shape)
# + [markdown] id="dI_SzxHsvao1"
# 다음은 그래디언트 계산입니다. 이번에는 변수의 사전을 전달합니다.
# + id="d73cY6NOuaMd"
my_vars = {
'w': w,
'b': b
}
grad = tape.gradient(loss, my_vars)
grad['b']
# + [markdown] id="HZ2LvHifEMgO"
# ## 모델에 대한 그래디언트
#
# [검사점 설정](checkpoint.ipynb) 및 [내보내기](saved_model.ipynb)를 위해 `tf.Variables`를 `tf.Module` 또는 해당 서브 클래스(<code>layers.Layer</code>와 <code>keras.Model</code>) 중 하나로 수집하는 것이 일반적입니다.
#
# 대부분의 경우 모델의 훈련 가능한 변수에 대한 그래디언트를 계산하려고 합니다. `tf.Module`의 모든 서브 클래스는 `Module.trainable_variables` 속성에서 변수를 집계하므로 몇 줄의 코드로 이러한 그래디언트를 계산할 수 있습니다.
# + id="JvesHtbQESc-"
layer = tf.keras.layers.Dense(2, activation='relu')
x = tf.constant([[1., 2., 3.]])
with tf.GradientTape() as tape:
# Forward pass
y = layer(x)
loss = tf.reduce_mean(y**2)
# Calculate gradients with respect to every trainable variable
grad = tape.gradient(loss, layer.trainable_variables)
# + id="PR_ezr6UFrpI"
for var, g in zip(layer.trainable_variables, grad):
print(f'{var.name}, shape: {g.shape}')
# + [markdown] id="f6Gx6LS714zR"
# <a id="watches"></a>
#
# ## 테이프의 감시 대상 제어하기
# + [markdown] id="N4VlqKFzzGaC"
# 기본 동작은 훈련 가능한 `tf.Variable`에 액세스한 후 모든 연산을 기록하는 것입니다. 그 이유는 다음과 같습니다.
#
# - 테이프는 역방향 패스의 그래디언트를 계산하기 위해 정방향 패스에 기록할 연산을 알아야 합니다.
# - 테이프는 중간 출력에 대한 참조를 보유하므로 불필요한 연산을 기록하지 않습니다.
# - 가장 일반적인 사용 사례는 모든 모델의 훈련 가능한 변수에 대해 손실의 그래디언트를 계산하는 것입니다.
#
# 예를 들어, 다음은 `tf.Tensor`가 기본적으로 "감시"되지 않고 `tf.Variable`을 훈련할 수 없기 때문에 그래디언트를 계산하지 못합니다.
# + id="Kj9gPckdB37a"
# A trainable variable
x0 = tf.Variable(3.0, name='x0')
# Not trainable
x1 = tf.Variable(3.0, name='x1', trainable=False)
# Not a Variable: A variable + tensor returns a tensor.
x2 = tf.Variable(2.0, name='x2') + 1.0
# Not a variable
x3 = tf.constant(3.0, name='x3')
with tf.GradientTape() as tape:
y = (x0**2) + (x1**2) + (x2**2)
grad = tape.gradient(y, [x0, x1, x2, x3])
for g in grad:
print(g)
# + [markdown] id="RkcpQnLgNxgi"
# `GradientTape.watched_variables` 메서드를 사용하여 테이프에서 감시 중인 변수를 나열할 수 있습니다.
# + id="hwNwjW1eAkib"
[var.name for var in tape.watched_variables()]
# + [markdown] id="NB9I1uFvB4tf"
# `tf.GradientTape`는 사용자가 감시 대상 또는 감시 예외 대상을 제어할 수 있는 후크를 제공합니다.
#
# `tf.Tensor`에 대한 그래디언트를 기록하려면 `GradientTape.watch(x)`를 호출해야 합니다.
# + id="tVN1QqFRDHBK"
x = tf.constant(3.0)
with tf.GradientTape() as tape:
tape.watch(x)
y = x**2
# dy = 2x * dx
dy_dx = tape.gradient(y, x)
print(dy_dx.numpy())
# + [markdown] id="qxsiYnf2DN8K"
# 반대로, 모든 `tf.Variables`을 감시하는 기본 동작을 비활성화하려면, 그래디언트 테이프를 만들 때 `watch_accessed_variables=False`를 설정합니다. 이 계산은 두 가지 변수를 사용하지만, 변수 중 하나의 그래디언트만 연결합니다.
# + id="7QPzwWvSEwIp"
x0 = tf.Variable(0.0)
x1 = tf.Variable(10.0)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x1)
y0 = tf.math.sin(x0)
y1 = tf.nn.softplus(x1)
y = y0 + y1
ys = tf.reduce_sum(y)
# + [markdown] id="TRduLbE1H2IJ"
# `x0`에서 `GradientTape.watch`가 호출되지 않았으므로 이에 대한 그래디언트가 계산되지 않습니다.
# + id="e6GM-3evH1Sz"
# dys/dx1 = exp(x1) / (1 + exp(x1)) = sigmoid(x1)
grad = tape.gradient(ys, {'x0': x0, 'x1': x1})
print('dy/dx0:', grad['x0'])
print('dy/dx1:', grad['x1'].numpy())
# + [markdown] id="2g1nKB6P-OnA"
# ## 중간 결과
#
# `tf.GradientTape` 컨텍스트 내에서 계산된 중간값과 관련하여 출력의 그래디언트를 요청할 수도 있습니다.
# + id="7XaPRAwUyYms"
x = tf.constant(3.0)
with tf.GradientTape() as tape:
tape.watch(x)
y = x * x
z = y * y
# Use the tape to compute the gradient of z with respect to the
# intermediate value y.
# dz_dy = 2 * y and y = x ** 2 = 9
print(tape.gradient(z, y).numpy())
# + [markdown] id="ISkXuY7YzIcS"
# 기본적으로, `GradientTape.gradient` 메서드가 호출되면 `GradientTape`가 보유한 리소스가 해제됩니다. 동일한 계산에 대해 여러 그래디언트를 계산하려면 `persistent=True` 그래디언트 테이프를 만듭니다. 이렇게 하면 테이프 객체가 가비지 수집될 때 리소스가 해제되면 `gradient` 메서드를 여러 번 호출할 수 있습니다. 예를 들면 다음과 같습니다.
# + id="zZaCm3-9zVCi"
x = tf.constant([1, 3.0])
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
y = x * x
z = y * y
print(tape.gradient(z, x).numpy()) # 108.0 (4 * x**3 at x = 3)
print(tape.gradient(y, x).numpy()) # 6.0 (2 * x)
# + id="j8bv_jQFg6CN"
del tape # Drop the reference to the tape
# + [markdown] id="O_ZY-9BUB7vX"
# ## 성능에 대한 참고 사항
#
# - 그래디언트 테이프 컨텍스트 내에서 연산을 수행하는 것과 관련하여 작은 오버헤드가 있습니다. 대부분의 Eager 실행에는 상당한 비용이 들지 않지만, 필요한 경우에만 테이프 컨텍스트를 사용해야 합니다.
#
# - 그래디언트 테이프는 메모리를 사용하여 역방향 패스 동안 사용하기 위해 입력 및 출력을 포함한 중간 결과를 저장합니다.
#
# 효율성을 위해 (`ReLU`와 같은) 일부 연산은 중간 결과를 유지할 필요가 없으며 정방향 패스 동안에 정리됩니다. 그러나 테이프에서 `persistent=True`를 사용하면 *아무것도 삭제되지 않으며* 최대 메모리 사용량이 높아집니다.
# + [markdown] id="9dLBpZsJebFq"
# ## 스칼라가 아닌 대상의 그래디언트
# + [markdown] id="7pldU9F5duP2"
# 그래디언트는 기본적으로 스칼라에 대한 연산입니다.
# + id="qI0sDV_WeXBb"
x = tf.Variable(2.0)
with tf.GradientTape(persistent=True) as tape:
y0 = x**2
y1 = 1 / x
print(tape.gradient(y0, x).numpy())
print(tape.gradient(y1, x).numpy())
# + [markdown] id="COEyYp34fxj4"
# 따라서, 여러 대상의 그래디언트를 요청하면 각 소스의 결과는 다음과 같습니다.
#
# - 대상의 합계 또는 그에 상응하는 그래디언트
# - 각 대상의 그래디언트의 합계
# + id="o4a6_YOcfWKS"
x = tf.Variable(2.0)
with tf.GradientTape() as tape:
y0 = x**2
y1 = 1 / x
print(tape.gradient({'y0': y0, 'y1': y1}, x).numpy())
# + [markdown] id="uvP-mkBMgbym"
# 마찬가지로, 대상이 스칼라가 아닌 경우 합계의 그래디언트가 계산됩니다.
# + id="DArPWqsSh5un"
x = tf.Variable(2.)
with tf.GradientTape() as tape:
y = x * [3., 4.]
print(tape.gradient(y, x).numpy())
# + [markdown] id="flDbx68Zh5Lb"
# 이렇게 하면, 손실 컬렉션 합계의 그래디언트 또는 요소별 손실 계산 합계의 그래디언트를 간단하게 구할 수 있습니다.
#
# 각 항목에 대해 별도의 그래디언트가 필요한 경우, [야고비안](advanced_autodiff.ipynb#jacobians)을 참조하세요.
# + [markdown] id="iwFswok8RAly"
# 어떤 경우에는 야고비안을 건너뛸 수 있습니다. 요소별 계산의 경우, 각 요소가 독립적이므로 합의 그래디언트는 입력 요소와 관련하여 각 요소의 미분을 제공합니다.
# + id="JQvk_jnMmTDS"
x = tf.linspace(-10.0, 10.0, 200+1)
with tf.GradientTape() as tape:
tape.watch(x)
y = tf.nn.sigmoid(x)
dy_dx = tape.gradient(y, x)
# + id="e_f2QgDPmcPE"
plt.plot(x, y, label='y')
plt.plot(x, dy_dx, label='dy/dx')
plt.legend()
_ = plt.xlabel('x')
# + [markdown] id="6kADybtQzYj4"
# ## 흐름 제어하기
#
# 그래디언트 테이프는 실행되는 연산을 기록하기 때문에 Python 제어 흐름이 자연스럽게 처리됩니다(예: `if` 및 `while` 구문).
#
# 여기서는 `if`의 각 분기에 서로 다른 변수가 사용됩니다. 그래디언트는 사용된 변수에만 연결됩니다.
# + id="ciFLizhrrjy7"
x = tf.constant(1.0)
v0 = tf.Variable(2.0)
v1 = tf.Variable(2.0)
with tf.GradientTape(persistent=True) as tape:
tape.watch(x)
if x > 0.0:
result = v0
else:
result = v1**2
dv0, dv1 = tape.gradient(result, [v0, v1])
print(dv0)
print(dv1)
# + [markdown] id="HKnLaiapsjeP"
# 제어문 자체는 미분할 수 없으므로 그래디언트 기반 최적화 프로그램에는 보이지 않습니다.
#
# 위 예제에서 `x` 값에 따라 테이프는 `result = v0` 또는 `result = v1**2`를 기록합니다. `x`에 대한 그래디언트는 항상 `None`입니다.
# + id="8k05WmuAwPm7"
dx = tape.gradient(result, x)
print(dx)
# + [markdown] id="egypBxISAHhx"
# ## `None`의 그래디언트 구하기
#
# 대상이 소스에 연결되어 있지 않으면 그래디언트는 `None`입니다.
#
# + id="CU185WDM81Ut"
x = tf.Variable(2.)
y = tf.Variable(3.)
with tf.GradientTape() as tape:
z = y * y
print(tape.gradient(z, x))
# + [markdown] id="sZbKpHfBRJym"
# 여기서 `z`는 명확하게 `x`에 연결되어 있지 않지만, 그래디언트의 연결을 끊을 수 있는 몇 가지 덜 명확한 방법이 있습니다.
# + [markdown] id="eHDzDOiQ8xmw"
# ### 1. 변수를 텐서로 대체했습니다.
#
# [ "테이프의 감시 대상 제어"{/ a0} 섹션에서 테이프가 자동으로 `tf.Variable`을 감시하지만 `tf.Tensor`는 감시하지 않는 것을 살펴보았습니다.]()
#
# 한 가지 일반적인 오류는 `Variable.assign`를 사용하여 `tf.Variable`를 업데이트하는 대신 실수로 `tf.Variable`을 `tf.Tensor`로 대체하는 것입니다. 예를 들면, 다음과 같습니다.
# + id="QPKY4Tn9zX7_"
x = tf.Variable(2.0)
for epoch in range(2):
with tf.GradientTape() as tape:
y = x+1
print(type(x).__name__, ":", tape.gradient(y, x))
x = x + 1 # This should be `x.assign_add(1)`
# + [markdown] id="3gwZKxgA97an"
# ### 2. TensorFlow 외부에서 계산했습니다.
#
# 계산에서 TensorFlow를 종료하면 테이프가 그래디언트 경로를 기록할 수 없습니다. 예를 들면, 다음과 같습니다.
# + id="jmoLCDJb_yw1"
x = tf.Variable([[1.0, 2.0],
[3.0, 4.0]], dtype=tf.float32)
with tf.GradientTape() as tape:
x2 = x**2
# This step is calculated with NumPy
y = np.mean(x2, axis=0)
# Like most ops, reduce_mean will cast the NumPy array to a constant tensor
# using `tf.convert_to_tensor`.
y = tf.reduce_mean(y, axis=0)
print(tape.gradient(y, x))
# + [markdown] id="p3YVfP3R-tp7"
# ### 3. 정수 또는 문자열을 통해 그래디언트를 구했습니다.
#
# 정수와 문자열은 구별할 수 없습니다. 계산 경로에서 이러한 데이터 유형을 사용하면 그래디언트는 없습니다.
#
# 아무도 문자열을 미분할 것으로 기대하지는 않지만, `dtype`을 지정하지 않으면 실수로 `int` 상수 또는 변수를 만들기 쉽습니다.
# + id="9jlHXHqfASU3"
x = tf.constant(10)
with tf.GradientTape() as g:
g.watch(x)
y = x * x
print(g.gradient(y, x))
# + [markdown] id="RsdP_mTHX9L1"
# TensorFlow는 유형 간에 자동으로 전송되지 않으므로 실제로 그래디언트가 누락되는 대신 유형 오류가 발생합니다.
# + [markdown] id="WyAZ7C8qCEs6"
# ### 5. 상태 저장 개체를 통해 그래디언트를 구했습니다.
#
# 상태가 그래디언트를 중지합니다. 상태 저장 객체에서 읽을 때 테이프는 현재 상태만 볼 수 있으며 현재 상태에 이르게 된 기록은 볼 수 없습니다.
#
# `tf.Tensor`는 변경할 수 없습니다. 텐서가 작성된 후에는 변경할 수 없습니다. *값*은 있지만 *상태는* 없습니다. 지금까지 논의된 모든 연산은 상태 비저장입니다. `tf.matmul`의 출력은 입력에만 의존합니다.
#
# `tf.Variable`은 내부 상태와 값을 갖습니다. 변수를 사용하면 상태를 읽습니다. 변수와 관련하여 그래디언트를 계산하는 것이 일반적이지만, 변수의 상태는 그래디언트 계산이 더 멀리 돌아가지 않도록 차단합니다. 예를 들면, 다음과 같습니다.
#
# + id="C1tLeeRFE479"
x0 = tf.Variable(3.0)
x1 = tf.Variable(0.0)
with tf.GradientTape() as tape:
# Update x1 = x1 + x0.
x1.assign_add(x0)
# The tape starts recording from x1.
y = x1**2 # y = (x1 + x0)**2
# This doesn't work.
print(tape.gradient(y, x0)) #dy/dx0 = 2*(x1 + x0)
# + [markdown] id="xKA92-dqF2r-"
# 마찬가지로, `tf.data.Dataset` 반복기(iterator)와 `tf.queue`는 상태 저장이며 이들을 통과하는 텐서의 모든 그래디언트를 중지합니다.
# + [markdown] id="HHvcDGIbOj2I"
# ## 그래디언트가 등록되지 않음
# + [markdown] id="aoc-A6AxVqry"
# 일부 `tf.Operation`는 **미분 불가능한 것으로 등록되어** `None`을 반환합니다. 다른 연산에는 **그래디언트가 등록되지 않았습니다**.
#
# `tf.raw_ops` 페이지에는 그래디언트가 등록된 저수준 op가 표시됩니다.
#
# 그래디언트가 등록되지 않은 float op를 통해 그래디언트를 얻고자 시도하면 테이프가 자동으로 `None`을 반환하는 대신 오류가 발생합니다. 이렇게 하면 무언가 잘못되었다는 것을 알 수 있습니다.
#
# 예를 들어, `tf.image.adjust_contrast` 함수는 그래디언트를 가질 수 있지만 그래디언트는 구현되지 않은 `raw_ops.AdjustContrastv2`를 래핑합니다.
#
# + id="HSb20FXc_V0U"
image = tf.Variable([[[0.5, 0.0, 0.0]]])
delta = tf.Variable(0.1)
with tf.GradientTape() as tape:
new_image = tf.image.adjust_contrast(image, delta)
try:
print(tape.gradient(new_image, [image, delta]))
assert False # This should not happen.
except LookupError as e:
print(f'{type(e).__name__}: {e}')
# + [markdown] id="pDoutjzATiEm"
# 이 op를 통해 미분해야 하는 경우, 그래디언트를 구현하고 등록하거나(`tf.RegisterGradient` 사용) 다른 ops를 사용하여 함수를 다시 구현해야 합니다.
# + [markdown] id="GCTwc_dQXp2W"
# ## None 대신 0
# + [markdown] id="TYDrVogA89eA"
# 연결되지 않은 그래디언트의 경우 `None` 대신 0을 가져오는 것이 편리한 경우가 있습니다. 연결되지 않은 그래디언트가 있을 때 `unconnected_gradients` 인수를 사용하여 반환할 항목을 결정할 수 있습니다.
# + id="U6zxk1sf9Ixx"
x = tf.Variable([2., 2.])
y = tf.Variable(3.)
with tf.GradientTape() as tape:
z = y**2
print(tape.gradient(z, x, unconnected_gradients=tf.UnconnectedGradients.ZERO))
| site/ko/guide/autodiff.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Step 1 Download and prepare data
# This example is for demonstration purposes
# Please refer to the corresponding NLP tutorial on NeMo documentation
# ! scripts/get_wkt2.sh
# verify data is there
# ! ls -l data/lm/wikitext-2
# Prepare tokenization model
# ! python scripts/create_vocab.py --train_path=data/lm/wikitext-2/train.txt
# ### Step 2 - import necessary packages, define hyperparameters, create tokenizer instance
# +
import os
import torch
import nemo
from nemo.utils.lr_policies import CosineAnnealing
import nemo_nlp
from nemo_nlp import NemoBertTokenizer, SentencePieceTokenizer
from nemo_nlp.utils.callbacks.bert_pretraining import eval_iter_callback, \
eval_epochs_done_callback
BATCHES_PER_STEP = 1
BATCH_SIZE = 64
BATCH_SIZE_EVAL = 16
D_MODEL = 768
D_INNER = 3072
HIDDEN_ACT = "relu"
LEARNING_RATE = 0.0001
LR_WARMUP_PROPORTION = 0.05
MASK_PROBABILITY = 0.15
MAX_SEQ_LENGTH = 128
NUM_EPOCHS = 1
NUM_HEADS = 12
# Note that for Demo purposes this is set to just one epoch
NUM_LAYERS = 1
OPTIMIZER = "adam_w"
# -
# Instantiate neural factory with supported backend
neural_factory = nemo.core.NeuralModuleFactory(
backend=nemo.core.Backend.PyTorch,
# If you're training with multiple GPUs, you should handle this value with
# something like argparse. See examples/nlp/bert_pretraining.py for an example.
local_rank=None,
# If you're training with mixed precision, this should be set to mxprO1 or mxprO2.
# See https://nvidia.github.io/apex/amp.html#opt-levels for more details.
optimization_level=nemo.core.Optimization.mxprO1,
# If you're training with multiple GPUs, this should be set to
# nemo.core.DeviceType.AllGpu
placement=nemo.core.DeviceType.GPU)
# tokenizer.model file was created during Step 1
tokenizer = SentencePieceTokenizer(model_path="tokenizer.model")
tokenizer.add_special_tokens(["[MASK]", "[CLS]", "[SEP]"])
# #### Instantiate necessary neural modules
bert_model = nemo_nlp.huggingface.BERT(
vocab_size=tokenizer.vocab_size,
num_hidden_layers=NUM_LAYERS,
hidden_size=D_MODEL,
num_attention_heads=NUM_HEADS,
intermediate_size=D_INNER,
max_position_embeddings=MAX_SEQ_LENGTH,
hidden_act=HIDDEN_ACT,
factory=neural_factory)
# +
# Masked Language Modeling Loss
mlm_classifier = nemo_nlp.BertTokenClassifier(D_MODEL,
num_classes=tokenizer.vocab_size,
activation=HIDDEN_ACT,
log_softmax=True)
mlm_loss = nemo_nlp.MaskedLanguageModelingLossNM()
# Next Sentence Prediciton Loss
nsp_classifier = nemo_nlp.SequenceClassifier(D_MODEL,
num_classes=2,
num_layers=2,
activation='tanh',
log_softmax=False)
nsp_loss = nemo.backends.pytorch.common.CrossEntropyLoss()
bert_loss = nemo_nlp.LossAggregatorNM(num_inputs=2)
# +
import os
train_data_layer = nemo_nlp.BertPretrainingDataLayer(
tokenizer=tokenizer,
dataset=os.path.join("data/lm/wikitext-2", "train.txt"),
max_seq_length=MAX_SEQ_LENGTH,
mask_probability=MASK_PROBABILITY,
batch_size=BATCH_SIZE,
factory=neural_factory)
eval_data_layer = nemo_nlp.BertPretrainingDataLayer(
tokenizer=tokenizer,
dataset=os.path.join("data/lm/wikitext-2", "valid.txt"),
max_seq_length=MAX_SEQ_LENGTH,
mask_probability=MASK_PROBABILITY,
batch_size=BATCH_SIZE_EVAL,
factory=neural_factory)
# -
# ### Step 3 - Describe training and evaluation DAGs
# +
# Training DAG
input_ids, input_type_ids, input_mask, \
output_ids, output_mask, nsp_labels = train_data_layer()
hidden_states = bert_model(input_ids=input_ids,
token_type_ids=input_type_ids,
attention_mask=input_mask)
mlm_logits = mlm_classifier(hidden_states=hidden_states)
t_mlm_loss = mlm_loss(logits=mlm_logits, output_ids=output_ids, output_mask=output_mask)
nsp_logits = nsp_classifier(hidden_states=hidden_states)
t_nsp_loss = nsp_loss(logits=nsp_logits, labels=nsp_labels)
loss = bert_loss(loss_1=t_mlm_loss, loss_2=t_nsp_loss)
# +
# Evaluation DAG
e_input_ids, e_input_type_ids, e_input_mask, \
e_output_ids, e_output_mask, e_nsp_labels = eval_data_layer()
e_hidden_states = bert_model(input_ids=e_input_ids,
token_type_ids=e_input_type_ids,
attention_mask=e_input_mask)
e_mlm_logits = mlm_classifier(hidden_states=e_hidden_states)
e_mlm_loss = mlm_loss(logits=e_mlm_logits, output_ids=e_output_ids, output_mask=e_output_mask)
e_nsp_logits = nsp_classifier(hidden_states=e_hidden_states)
e_nsp_loss = nsp_loss(logits=e_nsp_logits, labels=e_nsp_labels)
e_loss = bert_loss(loss_1=e_mlm_loss, loss_2=e_nsp_loss)
# +
callback_loss = nemo.core.SimpleLossLoggerCallback(
tensors=[loss],
print_func=lambda x: print("Loss: {:.3f}".format(x[0].item())))
train_data_size = len(train_data_layer)
# If you're training on multiple GPUs, this should be
# train_data_size / (batch_size * batches_per_step * num_gpus)
steps_per_epoch = int(train_data_size / (BATCHES_PER_STEP * BATCH_SIZE))
callback_test = nemo.core.EvaluatorCallback(
eval_tensors=[e_mlm_loss, e_nsp_loss],
user_iter_callback=eval_iter_callback,
user_epochs_done_callback=eval_epochs_done_callback,
eval_step=steps_per_epoch)
# -
lr_policy = CosineAnnealing(NUM_EPOCHS * steps_per_epoch,
warmup_ratio=LR_WARMUP_PROPORTION)
neural_factory.train(tensors_to_optimize=[loss],
lr_policy=lr_policy,
callbacks=[callback_loss, callback_test],
#callbacks=[callback_loss],
batches_per_step=BATCHES_PER_STEP,
optimizer=OPTIMIZER,
optimization_params={
"batch_size": BATCH_SIZE,
"num_epochs": NUM_EPOCHS,
"lr": LEARNING_RATE,
"betas": (0.95, 0.98),
"grad_norm_clip": None
})
| examples/nlp/BERTPretrainingTutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# ============================================================================
# Author : <NAME>, User Interfaces Group, Aalto University
# Init : August, 2017
# Project : ELEC-D7010 Engineering for Humans course materials
# Topic : Hick-Hyman Law
# ============================================================================
import tkinter as tk
# %matplotlib inline
import matplotlib.pyplot as plt
from IPython import display
import numpy as np
import random
import time as tm
class ExpAccurate(tk.Frame):
# initial parameters
gameAreaHeight = 300
stimuliBasePosition = [200, -30]
stimuliSideLength = 30
stimuliFontSize = 20
stimuliKey = ['<KEY> 'E', 'I', 'R', 'U', 'G', 'H']
stimuliTotalAmount = 8
timeStart = [] # list of timestamp when generating each stimulus
timeHit = [] # list of timestamp when hit each correct key
timeDelta = [] # list of reaction time in millisecond
plotTimeXList = [] # list of stimuli amount as abscissa in plot RT ~ f(N)
clips = 0
plotTimeXClips = [] # list of amount of valid reaction time logs with regard to each timuli set size
counterMax = 10 # to limit repeating times of hit-key experiment for a certain stimuli set size
# counterMax = 1
timerValidHit = 5
flagPause = False
flagTransit = False
flagStart = False
coefficient_determination = 0.0
def __init__(self, parent=None):
tk.Frame.__init__(self, parent)
self.pack()
self.makeWidgets()
self.defStimuliCollection()
self.stimuliAmount = 2 # initial stimuli amount
self.counter = 0 # to record repeating times of hit-key experiment so far
self.flagStart = False
self.flagPause = False
self.flagTransit = False
self.bind_all('<space>', self.start) # press <space> to start the experiment
def makeWidgets(self):
self.frameExplain = tk.Frame(self)
self.frameGameArea = tk.Canvas(self, bd=2, bg='white', relief=tk.GROOVE, width=500, height=self.gameAreaHeight)
self.frameControlPanel = tk.Frame(self)
self.frameExplain.grid(row=0, column=0, padx=10, pady=10)
self.frameGameArea.grid(row=1, column=0, padx=10, pady=10)
self.frameGameArea.grid_propagate(0)
self.frameControlPanel.grid(row=0, rowspan=2, column=1, padx=10, pady=10)
self.labelPause = tk.Label(self.frameExplain, text="Press <SPACE> key to START",font=("Arial", 12))
# stimuli hints
self.stimuliHints = [0 for x in range(8)]
for x in range(8):
self.stimuliHints[x] = tk.Label(self.frameExplain, text=self.stimuliKey[x], relief=tk.GROOVE, width=5)
self.labelRequirement = tk.Label(self.frameControlPanel, text="If checked, stimuli are dropping, otherwise they are still", wraplength=200)
# toggle stimuli dropping animation
self.varToggle = tk.IntVar()
self.btnToggleMode = tk.Checkbutton(self.frameControlPanel, text="Stimuli animation", variable=self.varToggle)
self.btnPreload = tk.Button(self.frameControlPanel, text="Preload Dataset")
self.btnFilter = tk.Button(self.frameControlPanel, text="Filter Outliers")
self.labelPause.grid(row=0, column=0, columnspan=8, padx=10, pady=10, sticky=tk.W)
self.stimuliHints[0].grid(row=2, column=0, sticky=tk.NSEW, padx=10, pady=10)
self.stimuliHints[1].grid(row=2, column=7, sticky=tk.NSEW, padx=10, pady=10)
self.stimuliHints[2].grid(row=1, column=1, sticky=tk.NSEW, padx=10, pady=10)
self.stimuliHints[3].grid(row=1, column=6, sticky=tk.NSEW, padx=10, pady=10)
self.stimuliHints[4].grid(row=1, column=2, sticky=tk.NSEW, padx=10, pady=10)
self.stimuliHints[5].grid(row=1, column=5, sticky=tk.NSEW, padx=10, pady=10)
self.stimuliHints[6].grid(row=2, column=3, sticky=tk.NSEW, padx=10, pady=10)
self.stimuliHints[7].grid(row=2, column=4, sticky=tk.NSEW, padx=10, pady=10)
self.labelRequirement.grid(row=0, column=0, padx=10, pady=10)
self.btnPreload.grid(row=2, column=0, sticky=tk.NSEW, padx=10, pady=10)
self.btnToggleMode.grid(row=1, column=0, sticky=tk.NSEW, padx=10, pady=10)
self.btnFilter.grid(row=3, column=0, sticky=tk.NSEW, padx=10, pady=10)
self.btnPreload.bind('<ButtonRelease-1>', self.preload)
self.btnFilter.bind('<ButtonRelease-1>', self.filter)
def defStimuliCollection(self):
self.stimuli = [0 for x in range(8)]
self.stimuliTexts = [0 for x in range(8)]
for x in range(8):
self.stimuli[x] = self.frameGameArea.create_rectangle(self.stimuliBasePosition[0], self.stimuliBasePosition[1], self.stimuliBasePosition[0] + self.stimuliSideLength, self.stimuliBasePosition[1] + self.stimuliSideLength, fill='#C0FFF8', activefill='#63B2A9', tags=self.stimuliKey[x], state=tk.HIDDEN)
self.stimuliTexts[x] = self.frameGameArea.create_text(self.stimuliBasePosition[0] + 0.5*self.stimuliSideLength, self.stimuliBasePosition[1] + 0.5*self.stimuliSideLength, text=self.stimuliKey[x], font=self.stimuliFontSize, tags=self.stimuliKey[x], state=tk.HIDDEN)
def start(self, event):
self.flagStart = True
self.generateStimuliSet(self.stimuliAmount)
self.bind_all('<KeyPress>', self.onKeyHit)
self.bind_all('<space>', self.pauseResume)
def generateStimuliSet(self, argAmount):
self.plotTimeXClips.append(self.clips)
self.flagTransit = True
self.indexList = [] # currently activated indices of stimuli
for i in range(argAmount):
self.indexList.append(i)
self.stimuliHints[i].config(relief=tk.RAISED, bg='#63B2A9', fg='white')
self.speed = 5
self.counterTransit = 0
self.transitStimuliSet()
def transitStimuliSet(self):
# blink updated stimuli set twice
if self.counterTransit < 4:
if self.stimuliHints[0].cget('state') == tk.NORMAL:
for i in range(self.stimuliAmount):
self.stimuliHints[i].config(state=tk.DISABLED, bg='SystemButtonFace', relief=tk.GROOVE)
else:
for i in range(self.stimuliAmount):
self.stimuliHints[i].config(state=tk.NORMAL, bg='#63B2A9', relief=tk.RAISED)
self.counterTransit += 1
self.frameGameArea.after(500, self.transitStimuliSet)
else:
self.frameGameArea.after(1500, self.generateRandomStimuli(self.indexList))
def generateRandomStimuli(self, argIndexList):
self.indexCurrent = random.sample(argIndexList, 1)
self.frameGameArea.itemconfigure(self.stimuli[self.indexCurrent[0]], state=tk.NORMAL)
self.frameGameArea.itemconfigure(self.stimuliTexts[self.indexCurrent[0]], state=tk.NORMAL)
self.frameGameArea.coords(self.stimuli[self.indexCurrent[0]], self.stimuliBasePosition[0], self.stimuliBasePosition[1], self.stimuliBasePosition[0] + self.stimuliSideLength, self.stimuliBasePosition[1] + self.stimuliSideLength)
self.frameGameArea.coords(self.stimuliTexts[self.indexCurrent[0]], self.stimuliBasePosition[0] + 0.5*self.stimuliSideLength, self.stimuliBasePosition[1] + 0.5*self.stimuliSideLength)
self.flagTransit = False
if self.flagStart == True:
self.animateStimuli()
self.flagStart = False
# record the timestamp when generating this stimulus
self.timeStart.append(tm.clock())
# animate the dropping of stimuli
def animateStimuli(self):
# press <space> can pause/resume animateStimuli()
if self.flagPause == False:
if self.flagTransit == False:
# if choose to drop stimuli
if self.varToggle.get() == 1:
if self.frameGameArea.coords(self.stimuli[self.indexCurrent[0]])[1] > self.gameAreaHeight + 3:
# the user doesn't hit the correct key until the stimuli drops out, so deleting invalid starting time log for this stimuli
del(self.timeStart[-1])
if self.counter < self.counterMax - 1:
self.generateRandomStimuli(self.indexList)
self.counter += 1
else:
if self.stimuliAmount < 8:
self.counter = 0
self.stimuliAmount += 1
self.generateStimuliSet(self.stimuliAmount)
else:
self.flagPause = True
win = tk.Toplevel()
win.wm_title("Message")
tk.Label(win, text="Experiment is done, please restart").grid(row=0, column=0, padx=20, pady=10, sticky=tk.NSEW)
tk.Button(win, text="Okay", command=win.destroy).grid(row=1, column=0, padx=20, pady=10, sticky=tk.NSEW)
self.expEnd()
else:
self.frameGameArea.move(self.stimuli[self.indexCurrent[0]], 0, self.speed)
self.frameGameArea.move(self.stimuliTexts[self.indexCurrent[0]], 0, self.speed)
# if choose to still stimuli
else:
self.frameGameArea.coords(self.stimuli[self.indexCurrent[0]], 250-0.5*self.stimuliSideLength,
0.5*(self.gameAreaHeight-self.stimuliSideLength), 250+0.5*self.stimuliSideLength, 0.5*(self.gameAreaHeight+self.stimuliSideLength))
self.frameGameArea.coords(self.stimuliTexts[self.indexCurrent[0]], 250, 0.5*self.gameAreaHeight)
if(len(self.timeStart)):
if(tm.clock()-self.timeStart[-1]) > self.timerValidHit:
self.disappearStimuli()
self.frameGameArea.after(20, self.animateStimuli)
def disappearStimuli(self):
self.frameGameArea.itemconfigure(self.stimuli[self.indexCurrent[-1]], state=tk.HIDDEN, fill='#C0FFF8')
self.frameGameArea.itemconfigure(self.stimuliTexts[self.indexCurrent[-1]], state=tk.HIDDEN)
if self.counter < self.counterMax - 1:
self.generateRandomStimuli(self.indexList)
self.counter += 1
else:
if self.stimuliAmount < 8:
self.counter = 0
self.stimuliAmount += 1
self.generateStimuliSet(self.stimuliAmount)
else:
self.flagPause = True
win = tk.Toplevel()
win.wm_title("Message")
tk.Label(win, text="Experiment is done, please restart").grid(row=0, column=0, padx=20, pady=10, sticky=tk.NSEW)
tk.Button(win, text="Okay", command=win.destroy).grid(row=1, column=0, padx=20, pady=10, sticky=tk.NSEW)
self.expEnd()
def onKeyHit(self, event):
if self.flagPause == False:
self.indexHit = []
if event.char == self.stimuliKey[self.indexCurrent[0]] or event.char == self.stimuliKey[self.indexCurrent[0]].lower():
self.indexHit.append(self.indexCurrent[0])
self.timeHit.append(tm.clock())
self.timeDelta.append((self.timeHit[-1] - self.timeStart[-1])*1000)
self.plotTimeXList.append(self.stimuliAmount)
self.clips += 1
self.frameGameArea.itemconfigure(self.stimuli[self.indexCurrent[0]], fill='#63B2A9')
self.flagPause = True
self.flagTransit = True
self.frameGameArea.after(300, self.afterCorrectKeyHit)
def afterCorrectKeyHit(self):
self.flagTransit = False
self.frameGameArea.itemconfigure(self.stimuli[self.indexCurrent[0]], state=tk.HIDDEN, fill='#C0FFF8')
self.frameGameArea.itemconfigure(self.stimuliTexts[self.indexCurrent[0]], state=tk.HIDDEN)
self.flagPause = False
self.frameGameArea.after(1000, self.animateStimuli)
# repeat generateRandomStimuli()
if self.counter < self.counterMax - 1:
self.generateRandomStimuli(self.indexList)
self.counter += 1
# plot reaction time against stimuli set size
fig = plt.figure()
graph = fig.add_subplot(1,1,1)
graph.set(title="Hick-Hyman Law empirical data fitting", ylabel="Reaction Time (ms)", xlabel="Degree of Choice, n")
graph.scatter(self.plotTimeXList, self.timeDelta, color='#4A857E', alpha=0.5)
plt.xticks(range(min(self.plotTimeXList)-1, max(self.plotTimeXList)+2, 1))
plt.grid()
display.clear_output(wait=True)
plt.show()
else:
# increase the size of stimuli set
if self.stimuliAmount < 8:
self.counter = 0
self.stimuliAmount += 1
self.generateStimuliSet(self.stimuliAmount)
else:
self.flagPause = True
win = tk.Toplevel()
win.wm_title("Message")
tk.Label(win, text="Experiment is done, please restart").grid(row=0, column=0, padx=20, pady=10, sticky=tk.NSEW)
tk.Button(win, text="Okay", command=win.destroy).grid(row=1, column=0, padx=20, pady=10, sticky=tk.NSEW)
self.expEnd()
def pauseResume(self, event):
if self.flagTransit == False:
self.flagPause = not self.flagPause
self.animateStimuli()
if self.flagPause == True:
self.labelPause.config(text="Press <SPACE> key to CONTINUE")
else:
self.labelPause.config(text="Press <SPACE> key to PAUSE at any time")
def expEnd(self):
self.unbind_all('<KeyPress>')
# plot reaction time against stimuli set size
fig, ax = plt.subplots()
ax.set(title="Hick-Hyman Law empirical data fitting", ylabel="Reaction Time (ms)", xlabel="Degree of Choice, n")
plt.scatter(self.plotTimeXList, self.timeDelta, color='#4A857E', alpha=0.5)
plt.xticks(range(min(self.plotTimeXList)-1, max(self.plotTimeXList)+2, 1))
plt.grid()
# empirical data fitting
plotTimeXLogList = np.log2(self.plotTimeXList)
z = np.polyfit(plotTimeXLogList, self.timeDelta, 1)
f = np.poly1d(z)
plotTimeXArray = np.array(self.plotTimeXList)
x = np.arange(plotTimeXArray.min()-0.3, plotTimeXArray.max()+0.3, 0.1)
xLog = np.log2(x)
y = f(xLog)
plt.plot(x, y)
# compute R-squared (coefficient of determination)
ybar = np.mean(self.timeDelta)
sstot, ssres = (0 for i in range(2))
for i in range(len(self.timeDelta)):
sstot += (self.timeDelta[i] - ybar)**2
ssres += (f(np.log2(self.plotTimeXList[i])) - self.timeDelta[i])**2
self.coefficient_determination = 1 - ssres/sstot
display.clear_output(wait=True)
plt.show()
self.btnFilter.config(state=tk.NORMAL)
print("Data fitting result: %.1f log(DOC) + %.1f\n(DOC: Degree Of Choice)\n" %(z[0],z[1]))
print("R-squared = %f" %self.coefficient_determination)
def preload(self, event):
self.timeDelta = [1315,1350,1340,1400,1320,2100,1350,1340,1400,1320,
1350,1400,1980,1450,1750,1350,1400,1390,1450,1370,
1400,1450,1430,1500,1420,1400,1467,1440,1490,1820,
1450,1500,1920,1550,1470,1450,1500,1480,1540,1470,
1497,1532,1520,1590,1510,1490,2240,1530,1590,1510,
1520,1570,1550,1520,1540,1508,1578,1560,1620,1440,
1543,1570,1570,1590,1560,1540,1590,1573,2800,1610,]
self.plotTimeXList = []
for i in range(2,9):
self.plotTimeXList += [i for j in range(10)]
self.expEnd()
def filter(self, event):
tmpTimeDelta, tmpPlotTimeXList = ([] for i in range(2))
tmpPlotTimeX_Ref = self.plotTimeXList[0]
tmpTimeDelta_filtering, tmpPlotTimeX_filtering = ([] for i in range(2))
for i in range(len(self.timeDelta)):
if self.plotTimeXList[i] == tmpPlotTimeX_Ref:
tmpTimeDelta_filtering.append(self.timeDelta[i])
tmpPlotTimeX_filtering.append(self.plotTimeXList[i])
else:
tmpPlotTimeX_Ref = self.plotTimeXList[i]
threshold = 0.8 * np.std(tmpTimeDelta_filtering)
for j in range(len(tmpTimeDelta_filtering)):
if abs(tmpTimeDelta_filtering[j] - np.mean(tmpTimeDelta_filtering)) <= threshold:
tmpTimeDelta.append(tmpTimeDelta_filtering[j])
tmpPlotTimeXList.append(tmpPlotTimeX_filtering[j])
tmpTimeDelta_filtering, tmpPlotTimeX_filtering = ([] for i in range(2))
tmpTimeDelta_filtering.append(self.timeDelta[i])
tmpPlotTimeX_filtering.append(self.plotTimeXList[i])
if i == len(self.timeDelta) - 1:
threshold = 0.8 * np.std(tmpTimeDelta_filtering)
for j in range(len(tmpTimeDelta_filtering)):
if abs(tmpTimeDelta_filtering[j] - np.mean(tmpTimeDelta_filtering)) <= threshold:
tmpTimeDelta.append(tmpTimeDelta_filtering[j])
tmpPlotTimeXList.append(tmpPlotTimeX_filtering[j])
self.timeDelta = tmpTimeDelta
self.plotTimeXList = tmpPlotTimeXList
self.expEnd()
if __name__ == "__main__":
root = tk.Tk()
root.title("Hick-Hyman Law Experiment")
ExpAccurate(root).mainloop()
root.after(5, lambda: root.focus_force())
# -
| 2_HickHyman_Law/Hick_Hyman_Law.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ugolbck/CNN-code-mixed-sentiment/blob/master/convolutional_sentiment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="I6dD3j7nBicK" colab_type="text"
# # Enhanced convolutional NN for code-mixed sentiment analysis
# + [markdown] id="3q7uTz3Xx8RI" colab_type="text"
# # Data loading
# + [markdown] id="QyT9l_RLQAy3" colab_type="text"
# We load the data from a conll-formatted file and extract all tweets and their sentiment polarity.
#
# "#meta= 1 positive
#
# word1 lang1
#
# word2 lang1
#
# word3 lang2
#
# word4 other"
# + id="Juexqld-x-x5" colab_type="code" colab={}
# !pip install conllu
# # !pip install matplotlib==3.1.0 # In case bugged version that outputs cut confusion matrices
from conllu import parse
from numpy import asarray, zeros, array, unique
data_path = '/content/drive/My Drive/RD/backup_train_conll_spanglish.txt'
with open(data_path, "r") as in_file:
data = in_file.read()
sentences = parse(data, fields=["form", "tag"], \
metadata_parsers={"meta": lambda key, value: (key, value.split("\t"))})
# + [markdown] id="Zn42ABxN7OGV" colab_type="text"
# # Pre-processing
# + [markdown] id="qaoJJQ_JQQL2" colab_type="text"
# We clean the data, use the TweetTokenizer from NLTK to tokenize and remove usernames. We remove URLs and add hashtags that had a dummy form in the original file
# + id="0mFHBWoEVDpn" colab_type="code" colab={}
import nltk
nltk.download('stopwords')
# + id="bNNIibvz7Q4R" colab_type="code" outputId="806f803c-6b9a-4ee5-f136-3b95fff50c26" colab={"base_uri": "https://localhost:8080/", "height": 285}
import re
from nltk.tokenize import casual_tokenize
from nltk.corpus import stopwords
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.utils import class_weight
import seaborn as sns
import pandas as pd
tools = ['tweet_tokenize',
'url_remove',
]
whitelist = {'no', "doesn't", 'nor', 'couldn', "won't", "wouldn't", 'very', "you're", "isn't", 'off', 'why', 'muy', 'muchos', 'todo', 'poco',}
patterns = {
'urls': '\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*',
}
stopwords = (set(stopwords.words('english')) | set(stopwords.words('spanish')) | {'ke'}) - whitelist
# Pre-process raw data and insert in numpy arrays
### TO OPTIMIZE / DO IN OTHER FILE ###
X, y, y_tmp = [], [], []
for i in range(len(sentences)):
phrase = ''
for word in sentences[i]:
if word['form'].lower() in stopwords:
pass
else:
phrase += ' ' + word["form"] if len(phrase) != 0 else word["form"]
if 'tweet_tokenize' in tools:
# Adapted tokenization, removes usernames and shortens character repetitions
phrase = ' '.join(casual_tokenize(phrase, reduce_len=True, strip_handles=True))
if 'url_remove' in tools:
phrase = re.sub(re.compile(patterns['urls']), "", phrase)
X.append(phrase)
y_tmp.append(sentences[i].metadata["meta"][1])
for i in y_tmp:
if i == 'positive':
y.append(0)
elif i == 'neutral':
y.append(1)
else:
y.append(2)
# Ploting the categorical distribution
df = pd.DataFrame()
df["tweet"] = X
df["sentiment"] = y
sns.set(style="darkgrid")
sns.countplot(x='sentiment', data=df, hue='sentiment')
# Computing the class weight penalties
class_weights = class_weight.compute_class_weight('balanced', unique(y), y)
#We need the class weights to be a dictionary
class_weights = dict(enumerate(class_weights))
# Multi-label classification
y = to_categorical(y, 3)
X = asarray(X)
# Splitting the data sets
test_size = 0.1
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=42)
# + [markdown] id="O4iY5d0qOX34" colab_type="text"
# # Embedding
# + [markdown] id="kZeAR-WGP3Hb" colab_type="text"
# Helper function to create character embedding inputs. We start by creating a nested list of zeros, each representing a character in the sentence (+ padding until the maximum sentence/word length). Then we replace zeros with their corresponding index in the keras tokenizer object.
#
# This function is an equivalent to texts_to_sequences() + pad_sequences().
# + id="6wD6lT79Pj9k" colab_type="code" cellView="code" colab={}
def toCharInput(sentences, tokenizer, s_len, w_len):
"""
Converts a list of sentences into character input
with dimensions (batch_size, word_vocab, char_vocab)
:param sentences: list of strings
:param tokenizer: Tokenizer object fitted on train data
:param s_len: longest possible sentence size
:param w_len: longest possible word size
:return char_input: post-padded character-based input for character embedding layer
"""
sentences_len = len(sentences)
# 3-dimentional matrix where each 0 is a character in a word in a sentence
char_input = [[[0] * w_len for _ in range(s_len)] for _ in range(sentences_len)]
for sentence_i, sentence in enumerate(sentences):
for word_i, word in enumerate(sentence):
if word_i >= s_len:
break
for char_i, char in enumerate(word):
if char_i >= w_len:
break
char_input[sentence_i][word_i][char_i] = tokenizer.word_index.get(char)
return asarray(char_input)
# + [markdown] id="k7eGEg1KXuqi" colab_type="text"
# Build word and character inputs, get the size of word and character vocabularies.
# + id="_z9PZLT6OdTK" colab_type="code" colab={}
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
s_length = 100
w_length = 50
filters = '"$%&*+,-./<=>[\\]`°{|}~\t\n'
# Tokenizer objects
word_tokenizer = Tokenizer(num_words=10000, filters=filters, lower=True, char_level=False)
char_tokenizer = Tokenizer(num_words=10000, char_level=True, lower=False)
# Adapt to the training data
word_tokenizer.fit_on_texts(X_train)
char_tokenizer.fit_on_texts(X_train)
# Creating inputs for the character embedding layer
C_train = toCharInput(X_train, char_tokenizer, s_length, w_length)
C_test = toCharInput(X_test, char_tokenizer, s_length, w_length)
# Each word becomes its index from the word->index frequency dictionary
X_train = word_tokenizer.texts_to_sequences(X_train)
X_test = word_tokenizer.texts_to_sequences(X_test)
# Get word/character vocabulary sizes
word_vocab_size = len(word_tokenizer.word_index) + 1
char_vocab_size = len(char_tokenizer.word_index) + 1
# Add zeros to sequences until max sentence length
X_train = pad_sequences(X_train, padding='post', maxlen=s_length)
X_test = pad_sequences(X_test, padding='post', maxlen=s_length)
# + [markdown] id="CCpk_A1vXnYe" colab_type="text"
# Load GloVe or fastText word embeddings.
# + id="q-8EUimQVOke" colab_type="code" colab={}
##### DO NOT RERUN THIS CELL #####
from itertools import islice
es_embd = '/content/drive/My Drive/RD/wiki.es.align.vec'
en_embd = '/content/drive/My Drive/RD/wiki.en.align.vec'
glove = '/content/drive/My Drive/RD/glove.twitter.27B.200d.txt'
EMBD = 'glove'
def load_words(embd_filepath):
embd_dict = dict()
with open(embd_filepath, encoding="utf8") as emb_file:
for line in islice(emb_file, 1, None):
records = line.split(' ')
word = records[0]
vector = asarray(records[1:], dtype='float32')
embd_dict[word] = vector
return embd_dict
if EMBD == 'fasttext':
embd_dict = load_words(en_embd)
words_es = load_words(es_embd)
# Dealing with words that are in both embeddings by averaging their vectors
for k, v in words_es.items():
if k in embd_dict.keys():
embd_dict[k] = [(x+y)/2 for x,y in zip(*[v, embd_dict[k]])]
else:
embd_dict[k] = v
elif EMBD == 'glove':
embd_dict = load_words(glove)
else:
raise ValueError("Wrong embedding name.")
# + id="mnvl0rBbiNqa" colab_type="code" colab={}
# Creation of the word embedding
word_embedding = zeros((word_vocab_size, 300))
for word, index in word_tokenizer.word_index.items():
embedding_vector = embd_dict.get(word)
if embedding_vector is not None:
word_embedding[index] = embedding_vector
# + [markdown] id="fyS61wFCYmBo" colab_type="text"
# We print the rate of words in our vocabulary that are present in the embedding matrix.
# + id="pTvU1CVvYCGY" colab_type="code" outputId="b39351bd-784b-4a96-f8ce-a0ad506f285e" colab={"base_uri": "https://localhost:8080/", "height": 34}
from numpy import count_nonzero
rate = (count_nonzero(count_nonzero(word_embedding, axis=1)) / word_vocab_size) * 100
print(str(rate) + "%", "of the vocabulary was found in the pre-trained embedding.")
# + [markdown] id="rUm6sKM-ZJA3" colab_type="text"
# # Embedding layer
# + [markdown] id="ZYlSnTN0aiUK" colab_type="text"
# Concatenation of pre-trained word Embeddings (GloVe) and randomly initialized character embeddings.
# + id="io8IVxwpZML0" colab_type="code" colab={}
def embd_layer(vocab_word_size,
vocab_char_size,
word_embd_dim,
char_embd_dim,
word_embd_weights,
char_embd_weights = None,
word_embd_trainable = False,
char_embd_trainable = True,
max_word_len = None,
char_hidden_type = 'lstm',
char_hidden_dim = 25):
"""
Returns both inputs and the embedding layer
"""
word_input_layer = Input(shape=(None,), name='Input_Word')
char_input_layer = Input(shape=(None, max_word_len), name='Input_Char')
word_embd_layer = Embedding(
input_dim=vocab_word_size,
output_dim=word_embd_dim,
weights=word_embd_weights,
trainable=word_embd_trainable,
name='Embedding_Word',
)(word_input_layer)
char_embd_layer = Embedding(
input_dim=vocab_char_size,
output_dim=char_embd_dim,
weights=char_embd_weights,
embeddings_initializer='random_uniform',
trainable=char_embd_trainable,
name='Embedding_Char_Pre',
)(char_input_layer)
char_hidden_layer = Bidirectional(LSTM(
units=char_hidden_dim,
input_shape=(max_word_len, vocab_char_size),
return_sequences=False,
return_state=False))
char_embd_layer = TimeDistributed(layer=char_hidden_layer, name='Embedding_Char_Final',)(char_embd_layer)
embd_layer = Concatenate(name='Embedding',)([word_embd_layer, char_embd_layer])
return [word_input_layer, char_input_layer], embd_layer
# + [markdown] id="H4vpJlHRgrYb" colab_type="text"
# # Model
# + [markdown] id="kFdhrP8zPVNV" colab_type="text"
# Building the neural network.
# + id="ZfpUhH1Ygtlh" colab_type="code" colab={}
from keras.models import Model
from keras.layers.core import Dropout, Dense
from keras.layers import Input, TimeDistributed, LSTM, Bidirectional, Concatenate
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.layers.embeddings import Embedding
from keras.regularizers import l2
from keras.optimizers import Adam
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
""" Parameters """
joint_embd = True
h_dropout = 0.5
l2_reg = 0.001
epochs = 30
batch_size = 64
lr = 0.0001
val_size = 0.15
dense_units = 32
# Joint word and character embedding
if joint_embd:
inputs, embedding_layer = embd_layer(word_vocab_size, char_vocab_size, 300, 50,
[word_embedding], max_word_len=w_length,
word_embd_trainable=False)
else:
# Input
inputs = Input(shape=(None,), name='Input')
# Embedding
embedding_layer = Embedding(word_vocab_size, 300, weights=[word_embedding],
input_length=s_length, trainable=False)(inputs)
# Convolution
convo_layer_1 = Conv1D(64, 3, activation='relu', name='Convolution_1', kernel_regularizer=l2(l2_reg))(embedding_layer)
pooling_layer_1 = GlobalMaxPooling1D(name='Pooling_1')(convo_layer_1)
# Activation
dropout_layer = (Dropout(h_dropout))(pooling_layer_1)
dense_layer = Dense(dense_units, activation='relu', name='Fully_conn', kernel_regularizer=l2(l2_reg))(dropout_layer)
dropout_layer = (Dropout(h_dropout))(dense_layer)
# Classification
dense_layer = Dense(3, activation='softmax', name='Dense_classification')(dropout_layer)
# Model
model = Model(inputs=inputs, outputs=dense_layer)
model.compile(optimizer=Adam(lr=lr), loss='categorical_crossentropy', metrics=['acc'])
# + [markdown] id="7aXjlXWEVcCy" colab_type="text"
# Training the model with early stopping to speed up the process in case the model doesn't improve, and class weights to counter
# + id="csi2hovVUHSg" colab_type="code" colab={}
from keras.callbacks import EarlyStopping
early_stop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=2, mode='min', verbose=1)
history = model.fit([X_train, C_train],
y_train, batch_size=batch_size,
epochs=epochs, validation_split=val_size,
verbose=1, callbacks=[early_stop], class_weight=class_weights)
# + [markdown] id="lJ4DfxrBV7v7" colab_type="text"
# # Evaluation
# + [markdown] id="hfrVnhMzWGQg" colab_type="text"
# Keras metrics are not reliable as they measure the average accuracy. As we deal with multilabel classification
# + id="KgcZFj0MV-Ak" colab_type="code" outputId="34b6529c-c193-43c0-93ac-fc95d8a0a2e1" colab={"base_uri": "https://localhost:8080/", "height": 721}
from numpy import argmax
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
import matplotlib.pyplot as plt
y_pred = model.predict([X_test, C_test])
y_gold = [argmax(i) for i in y_test]
y_scores = [argmax(i) for i in y_pred]
print("scikit evaluation on test set...")
print("Accuracy: " + str(accuracy_score(y_gold, y_scores)))
print("Precision: " + str(precision_score(y_gold, y_scores, average='weighted')))
print("Recall: " + str(recall_score(y_gold, y_scores, average='weighted')))
print("F1 Score: " + str(f1_score(y_gold, y_scores, average='weighted')))
print("Keras evaluation on test set...")
results = model.evaluate([X_test, C_test], y_test)
print(results)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','validation'], loc = 'upper left')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','validation'], loc = 'upper left')
plt.show()
# + [markdown] id="8U522yWQLq4_" colab_type="text"
# # Confusion matrix
# + id="06Xr5NvKLn9J" colab_type="code" colab={}
from sklearn.metrics import confusion_matrix
import numpy as np
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.YlOrRd):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
import itertools
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
#print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.grid(None)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig('/content/drive/My Drive/RD/CNNenhanced.png', bbox_inches='tight', quality=50)
# + id="XWTDi32MLwvH" colab_type="code" outputId="d2b0441a-a493-4be8-c613-9d7bb4c33ee9" colab={"base_uri": "https://localhost:8080/", "height": 293}
cnf_matrix = confusion_matrix(y_gold, y_scores, labels=[0, 1, 2])
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Positive', 'Neutral', 'Negative'], title='')
# + [markdown] id="ZxxfUcjtbrV_" colab_type="text"
# # Logs
# + [markdown] id="qk_miKAxfLok" colab_type="text"
# I choose to keep track of global model parameters, but it is also possible to use the Callback() class from keras to produce per-epoch logs of loss and accuracy.
#
# These logs are directly appended to a text file located on the Drive.
# + id="PjKmec8abs3A" colab_type="code" colab={}
import datetime
version = '1.3.2 - 1 convolution, 2 dense with dropout between the 2. fasttext + class weights + charEmbd'
log_hyperparam = {
"validation": val_size,
"s_length": s_length,
"w_length": w_length,
"epochs": epochs,
"batch": batch_size,
"hidden_dropout": h_dropout,
"L2": l2_reg,
"l_rate": lr,
}
log_filepath = '/content/drive/My Drive/RD/results_log.txt'
print("Writing logs...")
with open(log_filepath, "a+") as out_file:
out_file.write(8*"#" + str(datetime.datetime.now()) + 8*"#" + "\n")
out_file.write("Model version:" + version + "\n")
out_file.write(str(log_hyperparam) + "\n")
out_file.write("Train accuracy" + str(["%.3f" %a for a in history.history['acc']]) + "\n")
out_file.write("Val accuracy" + str(["%.3f" %a for a in history.history['val_acc']]) + "\n")
out_file.write("Train loss" + str(["%.3f" %a for a in history.history['loss']]) + "\n")
out_file.write("Val loss" + str(["%.3f" %a for a in history.history['val_loss']]) + "\n")
out_file.write("Keras test metrics: " + str(results) + "\n")
out_file.write("\n")
print("Done.")
# + id="iAhMA2yihNkq" colab_type="code" colab={}
| convolutional_sentiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python3.6
# language: python
# name: python3.6
# ---
# # 方案:DQN + TFIDF + Cosine + tri-gram
#
# ---
#
# step 1. 构造QQ+A模式的多轮训练集D
#
# step 2. 在训练集D上面训练上sentence2vec模型、TFIDF-IR模型,以tri-gram作为文本特征
#
# step 3. 训练一个DQN模型从TFIDF-IR模型给出的候选答案中提取使得BLEU得分最大的答案
# +
import os
from tqdm import tqdm
import sys
import pprint
sys.path.insert(0, "/home/team55/notespace/zengbin")
from jddc.config import DQNConfig
from jddc.embedding import load_s2v_model
from jddc.tfidf import load_tfidf_ir_model
import jddc.utils as u
from jddc.dqn import *
from torch.autograd import Variable
# -
conf = DQNConfig()
# ## 加载数据、sentence2vec、TFIDF-IR
# ---
sessions = u.read_from_pkl(conf.pkl_mqa_1000)
s2v_model = load_s2v_model()
ir_model = load_tfidf_ir_model()
# ## DQN训练
#
# ---
trainer = DQNTrainer(sessions, s2v_model, ir_model, use_cuda=True)
trainer.run()
| final/ipynb/dqn_tfidf_ir.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ''
# name: sagemath
# ---
# + language="html"
# <link href="http://mathbook.pugetsound.edu/beta/mathbook-content.css" rel="stylesheet" type="text/css" />
# <link href="https://aimath.org/mathbook/mathbook-add-on.css" rel="stylesheet" type="text/css" />
# <style>.subtitle {font-size:medium; display:block}</style>
# <link href="https://fonts.googleapis.com/css?family=Open+Sans:400,400italic,600,600italic" rel="stylesheet" type="text/css" />
# <link href="https://fonts.googleapis.com/css?family=Inconsolata:400,700&subset=latin,latin-ext" rel="stylesheet" type="text/css" /><!-- Hide this cell. -->
# <script>
# var cell = $(".container .cell").eq(0), ia = cell.find(".input_area")
# if (cell.find(".toggle-button").length == 0) {
# ia.after(
# $('<button class="toggle-button">Toggle hidden code</button>').click(
# function (){ ia.toggle() }
# )
# )
# ia.hide()
# }
# </script>
#
# -
# **Important:** to view this notebook properly you will need to execute the cell above, which assumes you have an Internet connection. It should already be selected, or place your cursor anywhere above to select. Then press the "Run" button in the menu bar above (the right-pointing arrowhead), or press Shift-Enter on your keyboard.
# $\newcommand{\identity}{\mathrm{id}}
# \newcommand{\notdivide}{\nmid}
# \newcommand{\notsubset}{\not\subset}
# \newcommand{\lcm}{\operatorname{lcm}}
# \newcommand{\gf}{\operatorname{GF}}
# \newcommand{\inn}{\operatorname{Inn}}
# \newcommand{\aut}{\operatorname{Aut}}
# \newcommand{\Hom}{\operatorname{Hom}}
# \newcommand{\cis}{\operatorname{cis}}
# \newcommand{\chr}{\operatorname{char}}
# \newcommand{\Null}{\operatorname{Null}}
# \newcommand{\lt}{<}
# \newcommand{\gt}{>}
# \newcommand{\amp}{&}
# $
# <div class="mathbook-content"><h2 class="heading hide-type" alt="Exercises 2.4 Programming Exercises"><span class="type">Section</span><span class="codenumber">2.4</span><span class="title">Programming Exercises</span></h2><a href="integers-exercises-programming.ipynb" class="permalink">¶</a></div>
# <div class="mathbook-content"><article class="exercise-like" id="exercise-62"><h6 class="heading"><span class="codenumber">1</span><span class="title">The Sieve of Eratosthenes</span></h6><p id="p-324">One method of computing all of the prime numbers less than a certain fixed positive integer $N$ is to list all of the numbers $n$ such that $1 \lt n \lt N\text{.}$ Begin by eliminating all of the multiples of 2. Next eliminate all of the multiples of 3. Now eliminate all of the multiples of 5. Notice that 4 has already been crossed out. Continue in this manner, noticing that we do not have to go all the way to $N\text{;}$ it suffices to stop at $\sqrt{N}\text{.}$ Using this method, compute all of the prime numbers less than $N = 250\text{.}$ We can also use this method to find all of the integers that are relatively prime to an integer $N\text{.}$ Simply eliminate the prime factors of $N$ and all of their multiples. Using this method, find all of the numbers that are relatively prime to $N= 120\text{.}$ Using the Sieve of Eratosthenes, write a program that will compute all of the primes less than an integer $N\text{.}$</p></article></div>
# <div class="mathbook-content"><article class="exercise-like" id="exercise-63"><h6 class="heading"><span class="codenumber">2</span></h6><p id="p-325">Let ${\mathbb N}^0 = {\mathbb N} \cup \{ 0 \}\text{.}$ Ackermann's function is the function $A :{\mathbb N}^0 \times {\mathbb N}^0 \rightarrow {\mathbb N}^0$ defined by the equations</p><div class="displaymath">
# \begin{align*}
# A(0, y) & = y + 1,\\
# A(x + 1, 0) & = A(x, 1),\\
# A(x + 1, y + 1) & = A(x, A(x + 1, y)).
# \end{align*}
# </div><p>Use this definition to compute $A(3, 1)\text{.}$ Write a program to evaluate Ackermann's function. Modify the program to count the number of statements executed in the program when Ackermann's function is evaluated. How many statements are executed in the evaluation of $A(4, 1)\text{?}$ What about $A(5, 1)\text{?}$</p></article></div>
# <div class="mathbook-content"><article class="exercise-like" id="exercise-64"><h6 class="heading"><span class="codenumber">3</span></h6><p id="p-326">Write a computer program that will implement the Euclidean algorithm. The program should accept two positive integers $a$ and $b$ as input and should output $\gcd( a,b)$ as well as integers $r$ and $s$ such that</p><div class="displaymath">
# \begin{equation*}
# \gcd( a,b) = ra + sb.
# \end{equation*}
# </div></article></div>
| aata/integers-exercises-programming.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # From A2C to PPO
# ## Helper function
# +
import numpy as np
import gym
import time
import scipy.signal
from gym.spaces import Box, Discrete
import pathlib
import torch
import torch.nn as nn
from torch.distributions.normal import Normal
from torch.distributions.categorical import Categorical
from torch.optim import Adam
import wandb
# +
def combined_shape(length, shape=None):
if shape is None:
return (length,)
return (length, shape) if np.isscalar(shape) else (length, *shape)
def mlp(sizes, activation, output_activation=nn.Identity):
layers = []
for j in range(len(sizes)-1):
act = activation if j < len(sizes)-2 else output_activation
layers += [nn.Linear(sizes[j], sizes[j+1]), act()]
return nn.Sequential(*layers)
def count_vars(module):
return sum([np.prod(p.shape) for p in module.parameters()])
def discount_cumsum(x, discount):
"""
magic from rllab for computing discounted cumulative sums of vectors.
input:
vector x,
[x0,
x1,
x2]
output:
[x0 + discount * x1 + discount^2 * x2,
x1 + discount * x2,
x2]
"""
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
# -
# ## Model
# +
class Actor(nn.Module):
def _distribution(self, obs):
raise NotImplementedError
def _log_prob_from_distribution(self, pi, act):
raise NotImplementedError
def forward(self, obs, act=None):
# Produce action distributions for given observations, and
# optionally compute the log likelihood of given actions under
# those distributions.
pi = self._distribution(obs)
logp_a = None
if act is not None:
logp_a = self._log_prob_from_distribution(pi, act)
return pi, logp_a
class MLPCategoricalActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
self.logits_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
logits = self.logits_net(obs)
return Categorical(logits=logits)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act)
class MLPGaussianActor(Actor):
def __init__(self, obs_dim, act_dim, hidden_sizes, activation):
super().__init__()
log_std = -0.5 * np.ones(act_dim, dtype=np.float32)
self.log_std = torch.nn.Parameter(torch.as_tensor(log_std))
self.mu_net = mlp([obs_dim] + list(hidden_sizes) + [act_dim], activation)
def _distribution(self, obs):
mu = self.mu_net(obs)
std = torch.exp(self.log_std)
return Normal(mu, std)
def _log_prob_from_distribution(self, pi, act):
return pi.log_prob(act).sum(axis=-1) # Last axis sum needed for Torch Normal distribution # TODO: why sum?
class MLPCritic(nn.Module):
def __init__(self, obs_dim, hidden_sizes, activation):
super().__init__()
self.v_net = mlp([obs_dim] + list(hidden_sizes) + [1], activation)
def forward(self, obs):
return torch.squeeze(self.v_net(obs), -1) # Critical to ensure v has right shape.
class MLPActorCritic(nn.Module):
"""
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``a`` (batch, act_dim) | Numpy array of actions for each
| observation.
``v`` (batch,) | Numpy array of value estimates
| for the provided observations.
``logp_a`` (batch,) | Numpy array of log probs for the
| actions in ``a``.
=========== ================ ======================================
The ``pi`` module's forward call should accept a batch of
observations and optionally a batch of actions, and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` N/A | Torch Distribution object, containing
| a batch of distributions describing
| the policy for the provided observations.
``logp_a`` (batch,) | Optional (only returned if batch of
| actions is given). Tensor containing
| the log probability, according to
| the policy, of the provided actions.
| If actions not given, will contain
| ``None``.
=========== ================ ======================================
The ``v`` module's forward call should accept a batch of observations
and return:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``v`` (batch,) | Tensor containing the value estimates
| for the provided observations. (Critical:
| make sure to flatten this!)
=========== ================ ======================================
"""
def __init__(self, observation_space, action_space,
hidden_sizes=(64,64), activation=nn.Tanh):
super().__init__()
obs_dim = observation_space.shape[0]
# policy builder depends on action space
if isinstance(action_space, Box):
self.pi = MLPGaussianActor(obs_dim, action_space.shape[0], hidden_sizes, activation)
elif isinstance(action_space, Discrete):
self.pi = MLPCategoricalActor(obs_dim, action_space.n, hidden_sizes, activation)
# build value function
self.v = MLPCritic(obs_dim, hidden_sizes, activation)
def step(self, obs):
with torch.no_grad():
pi = self.pi._distribution(obs)
a = pi.sample()
logp_a = self.pi._log_prob_from_distribution(pi, a)
v = self.v(obs)
return a.numpy(), v.numpy(), logp_a.numpy()
def act(self, obs): # only return action
return self.step(obs)[0]
# -
# ## Agent
# $L^{CLIP}(\theta) = E_t[min(r_t(\theta)\hat A_t, clip(r_t(\theta), 1-\epsilon, 1+\epsilon)\hat A_t)]$
class PPOAgent():
""" update model and take action """
def __init__(self, observation_space, action_space, train_pi_iters, train_v_iters, clip_ratio, pi_lr, vf_lr,
target_kl, hidden_sizes=(256, 256), activation=nn.ReLU):
self.train_pi_iters = train_pi_iters
self.train_v_iters = train_v_iters
self.clip_ratio = clip_ratio
self.target_kl = target_kl
self.ac = MLPActorCritic(observation_space, action_space, hidden_sizes=hidden_sizes, activation=activation)
# Set up optimizers for policy and value function
self.pi_optimizer = Adam(self.ac.pi.parameters(), lr=pi_lr)
self.vf_optimizer = Adam(self.ac.v.parameters(), lr=vf_lr)
# Count variables
var_counts = tuple(count_vars(module) for module in [self.ac.pi, self.ac.v])
print('\nNumber of parameters: \t pi: %d, \t v: %d\n'%var_counts)
# Set up function for computing PPO policy loss
def _compute_loss_pi(self, data):
obs, act, adv, logp_old = data['obs'], data['act'], data['adv'], data['logp']
# Policy loss
pi, logp = self.ac.pi(obs, act) # logp: pi_new(a_t|s_t); logp_old: pi_old(a_t|s_t), calculated by policy with params when collecting data.
ratio = torch.exp(logp - logp_old)
clip_adv = torch.clamp(ratio, 1-self.clip_ratio, 1+self.clip_ratio) * adv
loss_pi = -(torch.min(ratio * adv, clip_adv)).mean() # expectation under pi_old, since act is sample from pi_old
# Useful extra info
approx_kl = (logp_old - logp).mean().item() # TODO: Why use this?
ent = pi.entropy().mean().item()
clipped = ratio.gt(1+self.clip_ratio) | ratio.lt(1-self.clip_ratio)
clipfrac = torch.as_tensor(clipped, dtype=torch.float32).mean().item()
pi_info = dict(kl=approx_kl, ent=ent, cf=clipfrac)
return loss_pi, pi_info
# Set up function for computing value loss
def _compute_loss_v(self, data):
obs, ret = data['obs'], data['ret'] # TODO: important: how to calculate the ret
return ((self.ac.v(obs) - ret)**2).mean()
def update(self, data):
# compute loss of pi and v before updating, used to calculate DelatLossPi and DeltaLossV
pi_l_old, pi_info_old = self._compute_loss_pi(data)
pi_l_old = pi_l_old.item()
v_l_old = self._compute_loss_v(data).item()
# Train policy with multiple steps of gradient descent
for i in range(self.train_pi_iters): # in vanilla PG, the policy is trained with a single step
self.pi_optimizer.zero_grad()
loss_pi, pi_info = self._compute_loss_pi(data)
kl = pi_info['kl']
if kl > 1.5 * self.target_kl:
print('Early stopping at step %d due to reaching max kl.'%i)
break
loss_pi.backward()
self.pi_optimizer.step()
# Value function learning
for i in range(self.train_v_iters):
self.vf_optimizer.zero_grad()
loss_v = self._compute_loss_v(data)
loss_v.backward()
self.vf_optimizer.step()
# Log changes from update
kl, ent, cf = pi_info['kl'], pi_info_old['ent'], pi_info['cf']
wandb.log({"LossPi":pi_l_old, "LossV":v_l_old, "KL":kl, "Entropy":ent, "ClipFrac":cf,
"DeltaLossPi":(loss_pi.item() - pi_l_old),
"DeltaLossV":(loss_v.item() - v_l_old)})
def get_action(obs):
return self.ac.act(obs)
# ## Buffer
# $\hat A_t = \delta_t + (\lambda \gamma)\delta_{t+1}+ \cdots + (\lambda \gamma)^{T-t+1}\delta_{T-1}$,
# where $\delta_t = r_t + \gamma V(s_{t+1}) - V(s_t)$
#
# $V(s_t) = \sum_{t'=t}^{t+n} \gamma^{t'-t} r(s_{t'}, a_{t'}) + V(s_{t+n})$
class PPOBuffer:
"""
A buffer for storing trajectories experienced by a PPO agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_dim, act_dim, size, gamma=0.99, lam=0.95):
self.obs_buf = np.zeros(combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros(combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros(size, dtype=np.float32)
self.rew_buf = np.zeros(size, dtype=np.float32)
self.ret_buf = np.zeros(size, dtype=np.float32)
self.val_buf = np.zeros(size, dtype=np.float32)
self.logp_buf = np.zeros(size, dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = 0, 0, size
def store(self, obs, act, rew, val, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert self.ptr < self.max_size # buffer has to have room so you can store
self.obs_buf[self.ptr] = obs
self.act_buf[self.ptr] = act
self.rew_buf[self.ptr] = rew
self.val_buf[self.ptr] = val
self.logp_buf[self.ptr] = logp
self.ptr += 1
def finish_path(self, last_val=0):
"""
Calculate the return and advantage:
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx, self.ptr)
rews = np.append(self.rew_buf[path_slice], last_val)
vals = np.append(self.val_buf[path_slice], last_val)
# the next two lines implement GAE-Lambda advantage calculation
deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]
self.adv_buf[path_slice] = discount_cumsum(deltas, self.gamma * self.lam)
# the next line computes rewards-to-go, to be targets for the value function
self.ret_buf[path_slice] = discount_cumsum(rews, self.gamma)[:-1]
self.path_start_idx = self.ptr
def get(self):
"""
Call this at the end of an epoch to get all of the data from
the buffer, with advantages appropriately normalized (shifted to have
mean zero and std one). Also, resets some pointers in the buffer.
"""
assert self.ptr == self.max_size # buffer has to be full before you can get
self.ptr, self.path_start_idx = 0, 0
# # the next two lines implement the advantage normalization trick
# adv_mean, adv_std = mpi_statistics_scalar(self.adv_buf)
# self.adv_buf = (self.adv_buf - adv_mean) / adv_std
data = dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf,
adv=self.adv_buf, logp=self.logp_buf)
return {k: torch.as_tensor(v, dtype=torch.float32) for k,v in data.items()}
# ## Training and testing
# +
if __name__ == "__main__":
# setup hyperparameter
wandb.init(project="ppo")
config = wandb.config
config.logdir = pathlib.Path(".")
config.env = "HalfCheetah-v2"
config.seed = 0
config.steps_per_epoch = 4000
config.epochs = 50
config.gamma = 0.99
config.clip_ratio = 0.2
config.pi_lr = 3e-4
config.vf_lr = 1e-3
config.train_pi_iters = 80
config.train_v_iters = 80
config.lam = 0.97 # Lambda for GAE-Lambda. (Always between 0 and 1, close to 1.)
config.max_ep_len = 1000
config.target_kl = 0.01
config.save_freq=10
# setup random seed and num_threads
torch.manual_seed(config.seed)
np.random.seed(config.seed)
torch.set_num_threads(torch.get_num_threads())
# setup env
env, test_env = gym.make(config.env), gym.make(config.env)
obs_dim = env.observation_space.shape
act_dim = env.action_space.shape
print(obs_dim, act_dim)
# replay buffer
buffer = PPOBuffer(obs_dim, act_dim, config.steps_per_epoch, config.gamma, config.lam)
# Prepare for interaction with environment
start_time = time.time()
o, ep_ret, ep_len = env.reset(), 0, 0 # obs, ep_totoal_reward, ep_length
# setup agent
agent = PPOAgent(env.observation_space,
env.action_space,
config.train_pi_iters,
config.train_v_iters,
config.clip_ratio,
config.pi_lr,
config.vf_lr,
config.target_kl)
# Main loop: collect experience in env and update/log each epoch
for epoch in range(config.epochs):
for t in range(config.steps_per_epoch):
a, v, logp = agent.ac.step(torch.as_tensor(o, dtype=torch.float32))
next_o, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
# save and log
buffer.store(o, a, r, v, logp)
wandb.log({"VVlas":v})
# Update obs (critical!)
o = next_o
timeout = ep_len == config.max_ep_len
terminal = d or timeout
epoch_ended = t==config.steps_per_epoch-1
if terminal or epoch_ended:
if epoch_ended and not(terminal):
print('Warning: trajectory cut off by epoch at %d steps.'%ep_len, flush=True)
# if trajectory didn't reach terminal state, bootstrap value target
if timeout or epoch_ended:
_, v, _ = agent.ac.step(torch.as_tensor(o, dtype=torch.float32))
else:
v = 0
# calculate the advantage and ret once an eposide is finished.
buffer.finish_path(v)
if terminal:
# only save EpRet / EpLen if trajectory finished
wandb.log({"EpRet":ep_ret, "EpLen":ep_len})
o, ep_ret, ep_len = env.reset(), 0, 0
# Save model
if (epoch % config.save_freq == 0) or (epoch == config.epochs-1):
torch.save(agent.ac.state_dict(), "model.h5")
wandb.save("model.h5")
# Perform PPO update!
data = buffer.get()
agent.update(data)
# # Log info about epoch
# logger.log_tabular('Epoch', epoch)
# logger.log_tabular('EpRet', with_min_and_max=True)
# logger.log_tabular('EpLen', average_only=True)
# logger.log_tabular('VVals', with_min_and_max=True)
# logger.log_tabular('TotalEnvInteracts', (epoch+1)*steps_per_epoch)
# logger.log_tabular('LossPi', average_only=True)
# logger.log_tabular('LossV', average_only=True)
# logger.log_tabular('DeltaLossPi', average_only=True)
# logger.log_tabular('DeltaLossV', average_only=True)
# logger.log_tabular('Entropy', average_only=True)
# logger.log_tabular('KL', average_only=True)
# logger.log_tabular('ClipFrac', average_only=True)
# logger.log_tabular('StopIter', average_only=True)
# logger.log_tabular('Time', time.time()-start_time)
# logger.dump_tabular()
# -
| PPO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# # Ejercicios funciones
# ## Ejercicio 1
# Escribe una función que convierta números del 1 al 7, en nombres de los días de la semana. La función constará de un único argumento numérico y una salida de tipo string
# + tags=[]
def dias_semanas(numero):
salida = ''
if numero == 1:
salida = "lunes"
elif numero == 2:
salida = "martes"
elif numero == 3:
salida = "miércoles"
elif numero == 4:
salida = "jueves"
elif numero == 5:
salida = "viernes"
elif numero == 6:
salida = "sábado"
elif numero == 7:
salida = "domingo"
else:
salida = "No es un número válido"
return salida
a = dias_semanas()
print(a)
# -
# ## Ejercicio 2
# En el ejercicio 8 de bucles, creábamos una pirámide invertida, cuyo número de pisos venía determinado por un input del usuario. Crear una función que replique el comportamiento de la pirámide. Utiliza un único parámetro de entrada de la función para determinar el número de filas de la pirámide, es decir, elimina la sentencia input.
# + tags=[]
n = 7
for i in range(n):
# -
# ## Ejercicio 3
# Escibe una función que compare dos números. La función tiene dos argumentos y hay tres salidas posibles: que sean iguales, que el primero sea mayor que el segundo, o que el segundo sea mayor que el primero
# + tags=[]
def compara_numeros(n1, n2):
if n1 > n2:
return str(n1) + " > " + str(n2)
elif n1 == n2:
return str(n1) + " = " + str(n2)
else:
return str(n1) + " < " + str(n2)
print(compara_numeros(3, 4))
print(compara_numeros(3, 3))
print(compara_numeros(5, 4))
# -
# ## Ejercicio 4
# Escribe una función que sea un contador de letras. En el primer argumento tienes que introducir un texto, y en el segundo, la letra a contar. La función tiene que devolver un entero con el número de veces que aparece esa letra, tanto mayúscula, como minúscula
# ## Ejercicio 5
# Escribe una función que tenga un único argumento, un string. La salida de la función tiene que ser un diccionario con el conteo de todas las letras de ese string.
# +
def funcion(cadena):
lista_letras = list(cadena)
set_letras = set(lista_letras)
dic = {}
for letra in set_letras:
apariciones = 0
for letra2 in cadena:
if letra2 == letra:
apariciones = apariciones + 1
dic[letra] = apariciones
return dic
funcion("palabra")
# +
dic = {}
dic['a'] = 10
print(dic['a'])
dic['a'] = 20
print(dic['a'])
dic['a'] = dic['a'] + 1
print(dic['a'])
# +
def cuenta_letras(cadena):
dic = {}
for letra in cadena:
dic[letra] = cadena.count(letra)
return dic
cuenta_letras("Juan")
# +
def cuenta_letras(cadena):
dic = {}
for letra in cadena:
if letra in dic.keys():
dic[letra] = dic[letra] + 1
else:
dic[letra] = 1
return dic
cuenta_letras("palabra asjod asd adlk asdmasld as dl")
# -
# ## Ejercicio 6
# Escribir una función que añada o elimine elementos en una lista. La función necesita los siguientes argumentos:
# * lista: la lista donde se añadira o eliminarán los elementos
# * comando: "add" o "remove"
# * elemento: Por defecto es None.
# + tags=[]
# -
# ## Ejercicio 7
# Crea una función que reciba un número arbitrario de palabras, y devuelva una frase completa, separando las palabras con espacios.
# +
def concatena(*argumentos):
salida = argumentos[0]
print(salida)
for palabra in argumentos[1:]:
salida = salida + ' ' + palabra
print(salida)
return salida
concatena("alskfn", "alsknd", "oilahsd", "lakns")
# -
# ## Ejercicio 8
# Escribe un programa que calcule la [serie de Fibonacci](https://es.wikipedia.org/wiki/Sucesi%C3%B3n_de_Fibonacci)
# + tags=[]
# -
# ## Ejercicio 9
# Define en una única celda las siguientes funciones:
# * Función que calcule el área de un cuadrado
# * Función que calcule el area de un triángulo
# * Función que calcule el área de un círculo
#
# En otra celda, calcular el area de:
# * Dos círculos de radio 10 + un triángulo de base 3 y altura 7
# * Un cuadrado de lado = 10 + 3 círculos (uno de radio = 4 y los otros dos de radio = 6) + 5 triángulos de base = 2 + altura = 4
# +
import math
formula_area_cuadrado = lado**2
formula_area_triangulo = base*altura/2
formula_area_circulo = math.pi*radio**2
# +
def area_cuadrado(lado):
formula_area_cuadrado = lado**2
return formula_area_cuadrado
def area_triangulo(base, altura):
# Cosas
return algo
def area_circulo(radio):
# Cosas
return algo
# +
a = area_cuadrado(lado)
| Bloque 1 - Ramp-Up/05_Python/03_Funciones/03_Ejercicios funciones.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Standard imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib notebook
# Load mavenn and check path
import mavenn
# Import helper functions
from helper_functions import my_rsquared, save_fig_with_date_stamp, set_xticks
# Set random seed
np.random.seed(0)
# Set figure name
fig_name = 'figS5'
# +
style_file_name = f'{fig_name}.style'
s = """
axes.linewidth: 0.5 # edge linewidth
font.size: 7.0
axes.labelsize: 7.0 # fontsize of the x any y labels
xtick.labelsize: 7.0 # fontsize of the tick labels
ytick.labelsize: 7.0 # fontsize of the tick labels
legend.fontsize: 7.0
legend.borderpad: 0.2 # border whitespace
legend.labelspacing: 0.2 # the vertical space between the legend entries
legend.borderaxespad: 0.2 # the border between the axes and legend edge
legend.framealpha: 1.0
"""
with open(style_file_name, 'w') as f:
f.write(s)
plt.style.use(style_file_name)
plt.rc('font', family='sans-serif')
plt.rc('font', family='sans-serif')
# + code_folding=[0]
## Define OtwinowskiGPMapLayer
# Standard TensorFlow imports
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.initializers import Constant
# Import base class
from mavenn.src.layers.gpmap import GPMapLayer
# Define custom G-P map layer
class OtwinowskiGPMapLayer(GPMapLayer):
"""
A G-P map representing the thermodynamic model described by
Otwinowski (2018).
"""
def __init__(self, *args, **kwargs):
"""Construct layer instance."""
# Call superclass constructor
# Sets self.L, self.C, and self.regularizer
super().__init__(*args, **kwargs)
# Initialize constant parameter for folding energy
self.theta_f_0 = self.add_weight(name='theta_f_0',
shape=(1,),
trainable=True,
regularizer=self.regularizer)
# Initialize constant parameter for binding energy
self.theta_b_0 = self.add_weight(name='theta_b_0',
shape=(1,),
trainable=True,
regularizer=self.regularizer)
# Initialize additive parameter for folding energy
self.theta_f_lc = self.add_weight(name='theta_f_lc',
shape=(1, self.L, self.C),
trainable=True,
regularizer=self.regularizer)
# Initialize additive parameter for binding energy
self.theta_b_lc = self.add_weight(name='theta_b_lc',
shape=(1, self.L, self.C),
trainable=True,
regularizer=self.regularizer)
def call(self, x_lc):
"""Compute phi given x."""
# 1kT = 0.582 kcal/mol at room temperature
kT = 0.582
# Reshape input to samples x length x characters
x_lc = tf.reshape(x_lc, [-1, self.L, self.C])
# Compute Delta G for binding
Delta_G_b = self.theta_b_0 + \
tf.reshape(K.sum(self.theta_b_lc * x_lc, axis=[1, 2]),
shape=[-1, 1])
# Compute Delta G for folding
Delta_G_f = self.theta_f_0 + \
tf.reshape(K.sum(self.theta_f_lc * x_lc, axis=[1, 2]),
shape=[-1, 1])
# Compute and return fraction folded and bound
Z = 1+K.exp(-Delta_G_f/kT)+K.exp(-(Delta_G_f+Delta_G_b)/kT)
p_bf = (K.exp(-(Delta_G_f+Delta_G_b)/kT))/Z
phi = p_bf #K.log(p_bf)/np.log(2)
return phi
# +
# Load GB1 model
model = mavenn.load('../models/gb1_thermodynamic_model_2021.12.30.21h.07m')
# Get wt sequence
wt_seq = model.x_stats['consensus_seq']
wt_lc = mavenn.src.utils.x_to_ohe(wt_seq, alphabet=model.alphabet).reshape([model.L, model.C])
# Get parameters
theta_dict = model.layer_gpmap.get_params()
dG_f_mat_mavenn = theta_dict['theta_f_lc']
# Compute ddGs
ddG_f_mat_mavenn = dG_f_mat_mavenn - (dG_f_mat_mavenn*wt_lc).sum(axis=1)[:,np.newaxis]
# +
# Load Nisthal data
nisthal_df = pd.read_csv('../datasets/nisthal_data.csv.gz')
nisthal_df.set_index('x', inplace=True)
# Get Nisthal folding energies relative to WT
dG_f_nisthal = nisthal_df['y']
dG_f_wt_nisthal = dG_f_nisthal[wt_seq]
ddG_f_nisthal = dG_f_nisthal - dG_f_wt_nisthal
# -
# Load Otwinowski parameters
dG_f_otwinowski_df = pd.read_csv('../datasets/otwinowski_gf_data.csv.gz', index_col=[0]).T.reset_index(drop=True)[model.alphabet]
ddG_f_mat_otwinowski = dG_f_otwinowski_df.values - \
np.sum(wt_lc*dG_f_otwinowski_df.values, axis=1)[:,np.newaxis]
# +
# Get MAVE-NN folding energies relative to WT
x_nisthal = nisthal_df.index.values
x_nisthal_ohe = mavenn.src.utils.x_to_ohe(x=x_nisthal,
alphabet=model.alphabet)
ddG_f_vec = ddG_f_mat_mavenn.ravel().reshape([1,-1])
ddG_f_mavenn = np.sum(ddG_f_vec*x_nisthal_ohe, axis=1)
# Get Otwinowski folding energies relative to WT
ddG_f_vec_otwinowski = ddG_f_mat_otwinowski.ravel().reshape([1,-1])
ddG_f_otwinowski = np.sum(ddG_f_vec_otwinowski*x_nisthal_ohe, axis=1)
# Define plotting routine
def draw(ax, y, model_name, loc):
r2, dr2 = my_rsquared(ddG_f_nisthal, y)
ax.scatter(ddG_f_nisthal, y, alpha=.2, label='data', s=20, linewidth=0)
ax.scatter(0,0, label='WT sequence', s=30, linewidth=0)
xlim = [-3,5]
ylim = [-4,8]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.plot(xlim, xlim, color='k', alpha=.5, label='diagonal', zorder=-100)
ax.set_xlabel(f'Nisthal $\Delta \Delta G_F$ (kcal/mol)')
ax.set_ylabel(f'{model_name} $\Delta \Delta G_F$ (kcal/mol)')
ax.legend(loc=loc)
# Display r^2
yint = ylim[1]-ylim[0]
xint = xlim[1]-xlim[0]
ax.text(x=xlim[0]+.02*xint,
y=ylim[1]-.05*yint,
s=f'$R^2 =$ {r2:.3f} $\pm$ {dr2:.3f}',
ha='left', va='center');
# Make figure
fig = plt.figure(figsize=[6.5, 3])
plt.style.use(style_file_name)
gs = fig.add_gridspec(1, 2)
# Define panels
ax_a = fig.add_subplot(gs[0, 0])
ax_b = fig.add_subplot(gs[0, 1])
draw(ax=ax_a,
y=ddG_f_otwinowski,
model_name='Otwinowski',
loc='lower right')
draw(ax=ax_b,
y=ddG_f_mavenn,
model_name='MAVE-NN',
loc='upper right')
# Add panel labels
fig.text(0.02, 0.95, 'a', fontsize=11, fontweight='bold')
fig.text(0.51, 0.95, 'b', fontsize=11, fontweight='bold')
# Tight layout
fig.tight_layout(w_pad=5)
# Save figure
save_fig_with_date_stamp(fig, fig_name)
# -
| paper/figure_scripts/figS5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import open3d as o3d
import numpy as np
import matplotlib.pyplot as plt
from trajectory_io import *
import torch
from torch.autograd import Variable
import os
# %matplotlib inline
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# +
def squeeze_out_pc(depth, extrinsic, intrinsic):
assert isinstance(intrinsic, np.ndarray)
assert intrinsic.shape == (3, 3)
assert isinstance(depth, torch.autograd.Variable)
assert len(depth.data.shape) == 2
assert isinstance(extrinsic, np.ndarray)
assert extrinsic.shape == (4, 4)
h, w = depth.shape
iy = np.arange(h)
ix = np.arange(w)
mesh = [torch.autograd.Variable(torch.from_numpy(a.astype(np.float32))) * depth for a in np.meshgrid(ix, iy)]
image_cloud = torch.stack(mesh + [depth], dim=-1).reshape(-1, 3)
# image_cloud = np.concatenate([image_cloud, np.ones([w*h, 1])], axis=-1)
assert image_cloud.shape == (w * h, 3)
pc_camera_coord = torch.from_numpy(np.linalg.inv(intrinsic).astype(np.float32)) @ image_cloud.T
pc_global_coord = (torch.from_numpy(np.linalg.inv(extrinsic).astype(np.float32)) @ torch.cat([pc_camera_coord, torch.ones([1, w*h])], dim=0)).T
pc_global_coord = pc_global_coord[:, :3]
return pc_global_coord
def voxel_down_sample(pc, voxel_length):
assert isinstance(pc, torch.autograd.Variable)
assert len(pc.shape) == 2 and pc.shape[-1] == 3
voxel_points = dict()
for i in range(pc.shape[0]):
point = pc[i]
voxel = tuple(np.floor((point / voxel_length).numpy()))
if voxel in voxel_points:
voxel_points[voxel].append(point)
else:
voxel_points[voxel] = [point]
downsampled_pc = []
for v in voxel_points:
downsampled_pc.append(torch.stack(voxel_points[v], dim=0).mean(dim=0))
return torch.stack(downsampled_pc, dim=0)
def psi_discrete(nu, mu):
if nu >= -mu:
with torch.no_grad():
sgn = torch.sign(nu)
if 1 < nu/mu:
return torch.autograd.Variable(torch.tensor(sgn)).double()
else:
return ((nu/mu) * sgn).double()
# return min(1, nu/mu) * sgn
else:
return torch.autograd.Variable(torch.tensor(0)).double()
def psi_vector(nu, mu):
zeros = torch.zeros(nu.data.shape[0])
ones = torch.ones(nu.data.shape[0])
print(nu.data.dtype)
assert isinstance(nu, torch.autograd.Variable)
assert len(nu.data.shape) == 1
with torch.no_grad():
outside = (nu >= -mu).float()
sgn = torch.sign(nu)
print(np.logical_and(nu.numpy() > -mu, nu.numpy() < 0).astype(np.float32).mean())
zeros = torch.zeros(nu.data.shape[0])
ones = torch.ones(nu.data.shape[0])
# return outside * torch.min(ones, nu/mu) * sgn + (ones - outside) * zeros
#It seems, that *sgn is error in the article
return outside * torch.min(ones, nu/mu) + (ones - outside) * zeros
class DifferentialTSDFVolume():
def __init__(self, voxel_length, mu, d, h, w, gf_corner_coord):
# D, H, W, correspond to x, y, z axes in global frame
assert isinstance(gf_corner_coord, np.ndarray)
assert gf_corner_coord.shape == (3, )
self.voxel_length = voxel_length
self.mu = mu
self.weight = 0.0
self.gf_corner_coord = gf_corner_coord
self.h = h
self.w = w
self.d = d
self.nh = int(np.floor(h / voxel_length)) + 1
self.nw = int(np.floor(w / voxel_length)) + 1
self.nd = int(np.floor(d / voxel_length)) + 1
self.volume = torch.autograd.Variable(torch.zeros([self.nd, self.nh, self.nw]))
assert tuple(self.volume.data.shape) == (self.nd, self.nh, self.nw)
def get_current_pointcloud(self, eps=0.2):
return self.get_pointcloud(self.volume, eps)
def get_pointcloud(self, tsdf_volume, eps=0.2):
pc = []
for i in range(tsdf_volume.data.shape[0]): # run along X axis
for j in range(tsdf_volume.data.shape[1]): # run along Y axis
for k in range(tsdf_volume.data.shape[2]): # run along Z axis
if tsdf_volume[i, j, k] != 0 and tsdf_volume[i, j, k] * self.mu < eps:
pc.append(np.array([self.gf_corner_coord[0] + i * self.voxel_length,
self.gf_corner_coord[1] + j * self.voxel_length,
self.gf_corner_coord[2] + k * self.voxel_length]))
return np.stack(pc)
def individual_tsdf(self, depth, extrinsic, intrinsic):
assert isinstance(depth, torch.autograd.Variable)
assert len(tuple(depth.data.shape)) == 2
assert depth.data.dtype == torch.float32
assert isinstance(extrinsic, np.ndarray)
assert extrinsic.shape == (4, 4)
assert extrinsic.dtype == np.float32
assert isinstance(intrinsic, np.ndarray)
assert intrinsic.shape == (3, 3)
assert intrinsic.dtype == np.float32
tsdf = []
#Coordinates of camera in the global frame
t_g_k = (torch.from_numpy(np.linalg.inv(extrinsic)) @ torch.cat([torch.zeros([3]), torch.ones([1])]))[:3]
active_pixels = {}
debug_df = []
for i in range(self.nd): # run along X axis
for j in range(self.nh): # run along Y axis
for k in range(self.nw): # run along Z axis
#Point coordinates in the global frame
p = np.zeros([3])
p[0] = self.gf_corner_coord[0] + i * self.voxel_length
p[1] = self.gf_corner_coord[1] + j * self.voxel_length
p[2] = self.gf_corner_coord[2] + k * self.voxel_length
#Calculation projection to reference depth frame
x = (intrinsic @ (extrinsic @ np.concatenate([p, np.ones([1])])[:, None])[:3, :]).squeeze()
x = (x / x[2])[:2]
x_floor = np.floor(x).astype(np.int32)
#Calculating lambda - depth scaling factor
lam = np.linalg.norm(np.linalg.inv(intrinsic) @ np.concatenate([x, np.ones([1])])[:, None])
#Calculate truncated tsdf
if x[0] > 0.0 and x[0] < depth.data.shape[1] and x[1] > 0.0 and x[1] < depth.data.shape[0]:
# print(type(depth[x_floor[1], x_floor[0]]))
# print(type(psi_discrete(float(np.linalg.norm(t_g_k - p)/lam) - depth[x_floor[1], x_floor[0]],
# self.mu)))
tsdf.append(psi_discrete(float(np.linalg.norm(t_g_k - p)/lam) - depth[x_floor[1], x_floor[0]],
self.mu))
debug_df.append(float(np.linalg.norm(t_g_k - p)/lam) - depth[x_floor[1], x_floor[0]])
else:
tsdf.append(torch.autograd.Variable(torch.tensor(0)).double())
# tsdf[i, j, k] += psi_discrete(float(np.linalg.norm(t_g_k - p)/lam) - depth[x_floor[1], x_floor[0]],
# self.mu)
print(len(tsdf))
return torch.stack(tsdf).reshape([self.nd, self.nh, self.nw])
def integrate(self, depth, extrinsic, intrinsic, w):
#Here we assume that weight does not depend on the point
new_tsdf = self.individual_tsdf(depth, extrinsic, intrinsic)
self.volume = (self.weight * self.volume + w * new_tsdf) / (self.weight + w)
self.weight += w
class DifferentialVectorTSDFVolume():
def __init__(self, voxel_length, mu, d, h, w, gf_corner_coord):
# D, H, W, correspond to x, y, z axes in global frame
assert isinstance(gf_corner_coord, np.ndarray)
assert gf_corner_coord.shape == (3, )
self.voxel_length = voxel_length
self.mu = mu
self.weight = 0.0
self.gf_corner_coord = gf_corner_coord
self.h = h
self.w = w
self.d = d
self.nh = int(np.floor(h / voxel_length)) + 1
self.nw = int(np.floor(w / voxel_length)) + 1
self.nd = int(np.floor(d / voxel_length)) + 1
self.volume = torch.autograd.Variable(torch.zeros([self.nd, self.nh, self.nw])).reshape(-1)
print(self.volume.data.shape)
assert tuple(self.volume.data.shape) == (self.nd * self.nh * self.nw, )
def get_current_pointcloud(self, eps=0.2):
return self.get_pointcloud(self.volume, eps)
def get_pointcloud(self, tsdf_volume, eps=0.2):
tsdf_volume = tsdf_volume.data.numpy()
flag = np.logical_and(np.logical_and(np.abs(tsdf_volume) > 0.0001, tsdf_volume < 0.0),
tsdf_volume * self.mu < eps).astype(np.int32)
ind = np.nonzero(flag)[0]
p = np.meshgrid(np.linspace(0, (self.nd-1)*self.voxel_length, num=self.nd),
np.linspace(0, (self.nh-1)*self.voxel_length, num=self.nh),
np.linspace(0, (self.nw-1)*self.voxel_length, num=self.nw),
indexing='ij')
p = np.stack(p, axis=-1)
p = p.reshape(-1, 3) + self.gf_corner_coord[None, :]
return p[ind]
# pc = []
# for i in range(tsdf_volume.data.shape[0]): # run along X axis
# for j in range(tsdf_volume.data.shape[1]): # run along Y axis
# for k in range(tsdf_volume.data.shape[2]): # run along Z axis
# if tsdf_volume[i, j, k] != 0 and tsdf_volume[i, j, k] * self.mu < eps:
# pc.append(np.array([self.gf_corner_coord[0] + i * self.voxel_length,
# self.gf_corner_coord[1] + j * self.voxel_length,
# self.gf_corner_coord[2] + k * self.voxel_length]))
# return np.stack(pc)
def individual_tsdf(self, depth, extrinsic, intrinsic):
assert isinstance(depth, torch.autograd.Variable)
assert len(tuple(depth.data.shape)) == 2
assert depth.data.dtype == torch.float32
assert isinstance(extrinsic, np.ndarray)
assert extrinsic.shape == (4, 4)
assert extrinsic.dtype == np.float32
assert isinstance(intrinsic, np.ndarray)
assert intrinsic.shape == (3, 3)
assert intrinsic.dtype == np.float32
tsdf = torch.autograd.Variable(torch.zeros([self.nd, self.nh, self.nw])).reshape(-1)
#Coordinates of camera in the global frame
t_g_k = (torch.from_numpy(np.linalg.inv(extrinsic)) @ torch.cat([torch.zeros([3]), torch.ones([1])]))[:3]
active_pixels = {}
#Creating pointcloud, corresponding to voxel_grid
p = np.meshgrid(np.linspace(0, (self.nd - 1)*self.voxel_length, num=self.nd),
np.linspace(0, (self.nh - 1)*self.voxel_length, num=self.nh),
np.linspace(0, (self.nw - 1)*self.voxel_length, num=self.nw),
indexing='ij')
p = np.stack(p, axis=-1)
assert p.shape == (self.nd, self.nh, self.nw, 3)
p = p.reshape(-1, 3) + self.gf_corner_coord[None, :]
assert p.shape == (self.nd * self.nh * self.nw, 3)
pc_size = p.shape[0]
#Calculation projection to reference depth frame
x = intrinsic @ (extrinsic @ np.concatenate([p, np.ones([pc_size, 1])], axis=1).T)[:3, :]
x = (x / x[2:3, :])[:2, :].T
assert x.shape == (pc_size, 2)
x_floor = np.floor(x).astype(np.int32)
#Calculating lambda - depth scaling factor
lam = np.linalg.norm(np.linalg.inv(intrinsic) @ np.concatenate([x, np.ones([pc_size, 1])], axis=1).T, axis=0)
assert lam.shape == (pc_size, )
#Filter out points, which do not intersect with reference frame
ray_intersect = np.logical_and(np.logical_and(x[:, 0] > 0, x[:, 0] < depth.data.shape[1]),
np.logical_and(x[:, 1] > 0, x[:, 1] < depth.data.shape[0]))
assert ray_intersect.shape == (pc_size, )
ray_intersect = np.nonzero(ray_intersect.astype(np.int32))[0].astype(np.int32)
active_x = x[ray_intersect]
active_p = p[ray_intersect]
active_lam = lam[ray_intersect]
active_x_floor = x_floor[ray_intersect]
active_depth = depth[active_x_floor[:, 1], active_x_floor[:, 0]]
debug_df = torch.from_numpy(np.linalg.norm(t_g_k.reshape(1, 3) - active_p, axis=1)/active_lam).float() - active_depth
# active_tsdf = psi_vector(torch.from_numpy(np.linalg.norm(t_g_k.reshape(1, 3) - active_p, axis=1)/active_lam).float() - active_depth, self.mu)
active_tsdf = psi_vector(active_depth - torch.from_numpy(np.linalg.norm(t_g_k.reshape(1, 3) - active_p, axis=1)/active_lam).float(), self.mu)
tsdf[ray_intersect] = active_tsdf
return tsdf
def integrate(self, depth, extrinsic, intrinsic, w):
#Here we assume that weight does not depend on the point
new_tsdf = self.individual_tsdf(depth, extrinsic, intrinsic)
self.volume = (self.weight * self.volume + w * new_tsdf) / (self.weight + w)
self.weight += w
# +
def voxel_down_sample(pc, voxel_length):
assert isinstance(pc, torch.autograd.Variable)
assert len(pc.shape) == 2 and pc.shape[-1] == 3
voxel_points = dict()
for i in range(pc.shape[0]):
point = pc[i]
voxel = tuple(np.floor((point / voxel_length).numpy()))
if voxel in voxel_points:
voxel_points[voxel].append(point)
else:
voxel_points[voxel] = [point]
downsampled_pc = []
for v in voxel_points:
downsampled_pc.append(torch.stack(voxel_points[v], dim=0).mean(dim=0))
return torch.stack(downsampled_pc, dim=0)
# -
K = np.array([[2.666666666666666970e+03, 0.000000000000000000e+00, 9.600000000000000000e+02],
[0.000000000000000000e+00, 2.666666666666666970e+03, 5.400000000000000000e+02],
[0.000000000000000000e+00, 0.000000000000000000e+00, 1.000000000000000000e+00]])
# +
npy = np.load('blender_render_depth4/{:01d}.npy'.format(0)).squeeze()
extrinsic = np.loadtxt('blender_render_depth4/RT_%01d.txt' % 0).astype(np.float32)
# extrinsic = np.identity(4).astype(np.float32)
intrinsic = K.astype(np.float32)
x = np.array([npy.shape[1]//2, npy.shape[0]//2, 1.0])
p = np.linalg.inv(extrinsic) @ np.concatenate([np.linalg.inv(K) @ (x * npy[npy.shape[0]//2, npy.shape[1]//2]), np.ones([1])])
p = p[:3]
# +
# volume = DifferentialTSDFVolume(voxel_length=0.1, mu=0.3, d=4.0, h=4.0, w=4.0, gf_corner_coord=np.array([-2, -4, -2]))
vector_volume = DifferentialVectorTSDFVolume(voxel_length=0.03, mu=0.1, d=4.0, h=4.0, w=4.0, gf_corner_coord=np.array([-2, -4, -2]))
input_depth = []
individual_tsdf = []
# [5, 120, 276, 170, 50, 220]
for i in [0, 180, 220]:
# for i in range(0, 220, 10):
print("Integrating {:d} frame".format(i))
npy = np.load('blender_render_depth4/{:01d}.npy'.format(i)).squeeze()
extrinsic = np.loadtxt('blender_render_depth4/RT_%01d.txt' % i).astype(np.float32)
# extrinsic = np.identity(4).astype(np.float32)
intrinsic = K.astype(np.float32)
input_depth.append(torch.autograd.Variable(torch.from_numpy(npy)))
individual_tsdf.append(vector_volume.individual_tsdf(input_depth[-1], extrinsic, intrinsic))
# volume.integrate(input_depth[-1], extrinsic, intrinsic, w=0.1)
vector_volume.integrate(input_depth[-1], extrinsic, intrinsic, w=0.1)
# -
pc = o3d.geometry.PointCloud()
# pc.points = o3d.utility.Vector3dVector(vector_volume.get_pointcloud((individual_tsdf[0] + individual_tsdf[1])/2, eps=1.0))
pc.points = o3d.utility.Vector3dVector(vector_volume.get_current_pointcloud(eps=0.8))
# pc_0.paint_uniform_color(color=np.array([[1, 0, 0]]).T)
o3d.visualization.draw_geometries([pc])
| Fusion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Monte Carlo GPI
#
# The idea behind Monte Carlo methods for RL is very straightforward, we want to do this via GPI, where we try to estimate the value of state-action pairs based on sampling.
#
# Essentialy we will run episodes by a following a police $\pi_i$(which can start as random), $0\leq i <n$. Then we will update an estimate of our $q$ function by the incrementing state-action pairs that occured in the episode:
#
# $$ q_{i+1}(s, a) = q_{i}(s, a) + \frac{G(s, a, i) - q_{i}(s, a)}{u(s, a) + 1}$$
#
# Where $u(s, a)$ is the count of updates for this state-action pair, and $G(s, a, i)$ is the return we got in episode $i$ after taking action $(s, a)$ for the first time(we will implement first visit Monte Carlo). This is just averaging over all first-visit results.
#
# After that we update $\pi_{i+1}$ as an $\epsilon$-greedy police over $q_{i}$. Note that we must have $\pi_i(s, a) > 0$ for all state-action pairs, else we can't guarantee enough exploration, and that's why we go with an $\epsilon$-greedy police.
#
# In this notebook, we will explore how we can create a simple agent class and some functions to help it learn a good policy. For future use all the relevant functions here will be a part of the MonteCarloAgent class implemented in the codebase.
# +
import sys
sys.path.append('../..')
import numpy as np
import matplotlib.pyplot as plt
from grid_world.action import Action
from grid_world.grid_world import GridWorld
from grid_world.visualization.format_objects import get_police_rec_str, get_police_eval_str, get_world_str
from grid_world.utils.returns import returns_from_reward
from grid_world.utils.police import get_police_rec, get_random_police, sample_action
np.random.seed(21)
# -
# # World
#
# Monte Carlo is by no means an efficient method, so we will use a small deterministic word for this example.
gworld = GridWorld(
grid_shape=(4,5),
terminal_states_coordinates=((0,4),),
walls_coordinates=((0,1), (1,1), (2,3)),
traps_coordinates=((1,3),),
)
print(get_world_str(gworld))
# # Agent
#
# Here we will define a simple Agent class to store relevant information and simple methods.
#
# The agent as we are defining isn't aware of the world it is in, which means that it doesn't know what are the possible states it can reach. This seems like a natural choice for me, as I don't see why we would require this knowledge, but this is somewhat different from some implementations described in the literature, although in practice this doesn't change.
#
# Important properties of our agent are its reward function and gamma value, which will dictate what he is trying to accomplish. The police and $q$ function are also naturally a part of the agent.
# +
def reward(e):
if e == 1:
return 0
elif e == -1:
return -10
else:
return -1
class BasicAgent:
def __init__(
self,
reward_function,
actions = None,
police = None,
gamma = 1,
):
self.actions = actions if actions is not None else tuple(Action)
self.police = Police if police is not None else get_random_police(self.actions)
self.reward_function = reward_function
self.gamma = gamma
self.q_map: dict[tuple[State, Action], float] = {}
self.u: dict[tuple[State, Action], int] = {}
self.q = lambda s, a: self.q_map.get((s, a), 0)
def get_visited_states(self) -> set:
return {x for x, y in agent.q_map.keys()}
agent = BasicAgent(
reward_function = reward,
actions = [Action.up, Action.down, Action.left, Action.right]
)
# -
# # Episodes
#
# Alright in order to learn the agent needs to interact with the world, this is done through episodes.
#
# An epidsode consists of an agent starting in the initial position, selecting an action, going to a new position and receving an 'effect'(which will be interpreted by agent and transformed in a reward). Then it repeats the process until it reaches a terminal state or a certain threshold of steps is reached.
def run_episode(
agent, world, initial_state = None, max_steps = 1000000
):
state = initial_state if initial_state is not None else world.initial_state
episode_terminated = False
episode_states = [state]
episode_actions = []
episode_rewards = []
for _ in range(max_steps):
action = sample_action(agent.police, state, agent.actions)
state, effect = world.take_action(state, action)
reward = agent.reward_function(effect)
episode_actions.append(action)
episode_states.append(state)
episode_rewards.append(reward)
if state.kind == "terminal":
episode_terminated = True
break
return episode_terminated, episode_states, episode_actions, episode_rewards
episode_terminated, episode_states, episode_actions, episode_rewards = run_episode(agent, gworld)
episode_returns = returns_from_reward(episode_rewards)
# +
def get_sar_str(episode_states, episode_actions, episode_rewards, episode_returns, world):
sars_str = ''
for i in range(len(episode_states)):
if episode_states[i].kind == 'terminal':
sars_str += f'terminated at state: {episode_states[i]}'
break
else:
sars_str += f'state: {episode_states[i]} | action: {episode_actions[i].unicode} | reward: {episode_rewards[i]:.2f} | return: {episode_returns[i]:.2f} \n'
return sars_str
print(get_sar_str(episode_states, episode_actions, episode_rewards, episode_returns, gworld))
# -
# # Learning
#
# Monte Carlo agents are expected to 'learn' after each completed episode. By this I simply mean improving his expectations on how good taking an action in a state is, i.e. reestimating its q function.
# +
def first_visit_return(states, actions, returns):
fvr = {}
for i in range(len(returns)):
if (sa := (states[i], actions[i])) not in fvr:
fvr[sa] = returns[i]
return fvr
def update_q(agent, fvr):
for s, a in fvr:
agent.u[(s, a)] = agent.u[(s, a)] + 1 if (s, a) in agent.u.keys() else 1
agent.q_map[(s, a)] = agent.q_map[(s, a)] + (fvr[(s, a)] - agent.q(s, a))/agent.u[(s, a)] if (s, a) in agent.q_map.keys() else fvr[(s, a)]
# agent._update_q_function()
fvr = first_visit_return(episode_states, episode_actions, episode_returns)
update_q(agent, fvr)
# -
# # Improving
#
# So learning is not necessarily useful by itself, we also want the agent to be able to take better decision, so we improve the police, by making it $\epsilon$-greedy over our improved q function.
# +
def get_best_action(s, q, actions):
best_score = float('-inf')
for a in actions:
if (score := q(s, a)) > best_score:
best_score = score
best_action = a
return best_action
def get_e_greedy_police(q, states, actions, epsilon = 0.1):
police_map = {}
p_0 = epsilon/len(actions)
for s in states:
best_action = get_best_action(s, q, actions)
for a in actions:
police_map[(s, a)] = p_0 + (1-epsilon if a == best_action else 0)
return lambda s, a: police_map[(s, a)] if (s, a) in police_map.keys() else 1/len(actions)
pi0 = get_e_greedy_police(agent.q, agent.get_visited_states(), agent.actions, epsilon = 0.1) # review
# -
pi0r = get_police_rec(pi0, gworld, agent.actions)
print(get_police_rec_str(pi0r, gworld))
# # Looping
# Of course an agent isn't abble to get very good over a single episode, so lets put this in a GPI loop where it will keep learning and improving.
# +
total_episodes = 200
agent = BasicAgent(
# world = gworld,
reward_function = reward,
actions = [Action.up, Action.down, Action.left, Action.right]
)
returns_history = []
lengths_history = []
for i in range(total_episodes):
episode_terminated, episode_states, episode_actions, episode_rewards = run_episode(agent, gworld)
episode_returns = returns_from_reward(episode_rewards)
if episode_terminated:
fvr = first_visit_return(episode_states, episode_actions, episode_returns)
update_q(agent, fvr)
agent.police = get_e_greedy_police(agent.q, agent.get_visited_states(), agent.actions, epsilon = 0.1)
returns_history.append(episode_returns[0])
lengths_history.append(len(episode_returns))
pi_r = get_police_rec(agent.police, gworld, agent.actions)
print(get_police_rec_str(pi_r, gworld))
# +
fig, ax = plt.subplots(figsize=(14, 6))
ax.plot(lengths_history)
ax.set(xlabel='# of episodes', ylabel='episode length', ylim=[0, 100])
ax.grid()
plt.show()
# -
# The agent improved a lot! However... this clearly isn't ideal.
#
# There are two main problems here, one is that by following this $\epsilon$-greedy policy the agent is very unlikely to reach certain states, like (0,2), this makes learning good estimates for $q$ very hard in this states. This can be particularly problematic if we have 'stable' sub-optimal policies, where many exploration steps would be needed to get better estimates; this is kinda the case here, on state (2,2) for instance, where down is a better action, but to learn this we would need to also select ((1,2), down), ((0,2), right) and ((0,3), right) which is unlikelly to happen(less then $\epsilon^3(1-\epsilon)$ episodes that reach (2,2) will have this actions) under the current policy.
#
# The other problem is that the agent really wants to avoid getting near the trap, that's because its stochastic behavior has a chance to throw it in the trap at any step. This leads to a solution that is non-optimal (in the sense that the geedy police over the final estimate of $q$ isn't optimal), but very safe - it will infact be the optimal $\epsilon$-greedy policy if we run this long enough, which is clearly as good as we can hope for this approach.
#
# In theory both problems could be dealt with by running this process a lot longer and choosing a small enough value for epsilon or decreasing it appropriately. In practice this is just too slow, that's because the two problems compete with each other, as small values of $\epsilon$ mean less exploration, which makes learning good actions in 'rare' states very difficult, but large values makes the agent more likely to fall into the trap by exploring, which makes it averse to states near the trap.
#
# The conclusion is that the agent learns, but it has some problems and we need better ways of dealing with them.
| notebooks/reinforcement_learning/agents/monte_carlo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 14장. 텐서플로의 구조 자세히 알아보기
# **아래 링크를 통해 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.**
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://nbviewer.jupyter.org/github/rickiepark/python-machine-learning-book-2nd-edition/blob/master/code/ch14/ch14.ipynb"><img src="https://jupyter.org/assets/main-logo.svg" width="28" />주피터 노트북 뷰어로 보기</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/rickiepark/python-machine-learning-book-2nd-edition/blob/master/code/ch14/ch14.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
# </td>
# </table>
# `watermark`는 주피터 노트북에 사용하는 파이썬 패키지를 출력하기 위한 유틸리티입니다. `watermark` 패키지를 설치하려면 다음 셀의 주석을 제거한 뒤 실행하세요.
# +
# #!pip install watermark
# -
# %load_ext watermark
# %watermark -u -d -v -p numpy,tensorflow,matplotlib
# **이 노트북을 실행하려면 텐서플로 2.0.0-alpha0 버전 이상이 필요합니다. 이전 버전의 텐서플로가 설치되어 있다면 다음 셀의 주석을 제거한 뒤 실행하세요.**
# +
# #!pip install tensorflow==2.0.0-alpha0
# -
# **코랩을 사용할 때는 다음 셀의 주석을 제거하고 GPU 버전의 텐서플로 2.0.0-alpha0 버전을 설치하세요.**
# +
# #!pip install tensorflow-gpu==2.0.0-alpha0
# -
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
# ## 텐서플로의 랭크와 텐서
# **텐서의 랭크와 크기를 확인하는 방법**
# +
## t1, t2, t3 텐서를 정의합니다.
t1 = tf.constant(np.pi)
t2 = tf.constant([1, 2, 3, 4])
t3 = tf.constant([[1, 2], [3, 4]])
## 랭크를 구합니다.
r1 = tf.rank(t1)
r2 = tf.rank(t2)
r3 = tf.rank(t3)
## 크기를 구합니다
s1 = t1.get_shape()
s2 = t2.get_shape()
s3 = t3.get_shape()
print('크기:', s1, s2, s3)
print('랭크:',
r1.numpy(),
r2.numpy(),
r3.numpy())
# -
# ## 텐서를 다차원 배열로 변환하기
arr = np.array([[1., 2., 3., 3.5],
[4., 5., 6., 6.5],
[7., 8., 9., 9.5]])
T1 = tf.constant(arr)
print(T1)
s = T1.get_shape()
print('T1의 크기:', s)
print('T1의 크기:', T1.shape)
T2 = tf.Variable(np.random.normal(size=s))
print(T2)
T3 = tf.Variable(np.random.normal(size=s[0]))
print(T3)
T4 = tf.reshape(T1, shape=[1, 1, -1])
print(T4)
T5 = tf.reshape(T1, shape=[1, 3, -1])
print(T5)
T6 = tf.transpose(T5, perm=[2, 1, 0])
print(T6)
T7 = tf.transpose(T5, perm=[0, 2, 1])
print(T7)
t5_splt = tf.split(T5,
num_or_size_splits=2,
axis=2)
print(t5_splt)
# +
t1 = tf.ones(shape=(5, 1), dtype=tf.float32)
t2 = tf.zeros(shape=(5, 1), dtype=tf.float32)
print(t1)
print(t2)
t3 = tf.concat([t1, t2], axis=0)
print(t3)
t4 = tf.concat([t1, t2], axis=1)
print(t4)
# -
# ## 텐서플로의 계산 그래프 이해하기
# +
a = tf.constant(1)
b = tf.constant(2)
c = tf.constant(3)
z = 2*(a-b) + c
print('2*(a-b)+c => ', z.numpy())
# +
## 텐서플로 1.x 방식
g = tf.Graph()
## 그래프에 노드를 추가합니다.
with g.as_default():
a = tf.constant(1, name='a')
b = tf.constant(2, name='b')
c = tf.constant(3, name='c')
z = 2*(a-b) + c
## 그래프를 실행합니다.
with tf.compat.v1.Session(graph=g) as sess:
print('2*(a-b)+c => ', sess.run(z))
# -
g.get_operations()
g.as_graph_def()
# +
@tf.function
def simple_func():
a = tf.constant(1)
b = tf.constant(2)
c = tf.constant(3)
z = 2*(a-b) + c
return z
print('2*(a-b)+c => ', simple_func().numpy())
# -
print(simple_func.__class__)
# +
def simple_func():
a = tf.constant(1, name='a')
b = tf.constant(2, name='b')
c = tf.constant(3, name='c')
z = 2*(a-b) + c
return z
simple_func = tf.function(simple_func)
print('2*(a-b)+c => ', simple_func().numpy())
# -
con_func = simple_func.get_concrete_function()
con_func.graph.get_operations()
con_func.graph.as_graph_def()
# ## 텐서플로의 변수
# +
g1 = tf.Graph()
with g1.as_default():
w1 = tf.Variable(np.array([[1, 2, 3, 4],
[5, 6, 7, 8]]), name='w1')
print(w1)
# -
g1.get_operations()
with g1.as_default():
init = tf.compat.v1.global_variables_initializer()
print(init.node_def)
# +
with g1.as_default():
w1 = w1 + 1
print(w1)
with tf.compat.v1.Session(graph=g1) as sess:
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
print(sess.run(w1))
print(sess.run(w1))
# +
g2 = tf.Graph()
with g2.as_default():
w1 = tf.Variable(np.array([[1, 2, 3, 4],
[5, 6, 7, 8]]), name='w1')
w1 = w1.assign(w1 + 1)
with tf.compat.v1.Session(graph=g2) as sess:
init = tf.compat.v1.global_variables_initializer()
sess.run(init)
print(sess.run(w1))
print(sess.run(w1))
# -
w2 = tf.Variable(np.array([[1, 2, 3, 4],
[5, 6, 7, 8]]), name='w2')
print(w2)
print(w2 + 1)
w2.assign(w2 + 1)
print(w2.numpy())
w2.assign(w2 + 1)
print(w2.numpy())
print(w2)
# ## tf.keras API 자세히 배우기
# ### Sequential 모델
# +
## 랜덤한 회귀용 예제 데이터셋을 만듭니다
np.random.seed(0)
def make_random_data():
x = np.random.uniform(low=-2, high=2, size=200)
y = []
for t in x:
r = np.random.normal(loc=0.0,
scale=(0.5 + t*t/3),
size=None)
y.append(r)
return x, 1.726*x -0.84 + np.array(y)
x, y = make_random_data()
plt.plot(x, y, 'o')
plt.show()
# -
x_train, y_train = x[:150], y[:150]
x_test, y_test = x[150:], y[150:]
# +
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(units=1, input_dim=1))
# -
model.summary()
model.compile(optimizer='sgd', loss='mse')
history = model.fit(x_train, y_train, epochs=50,
validation_split=0.3)
epochs = np.arange(1, 50+1)
plt.plot(epochs, history.history['loss'], label='Training loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# ### 함수형 API
# +
input = tf.keras.Input(shape=(1,))
output = tf.keras.layers.Dense(1)(input)
model = tf.keras.Model(input, output)
model.summary()
# -
model.compile(optimizer='sgd', loss='mse')
history = model.fit(x_train, y_train, epochs=50,
validation_split=0.3)
plt.plot(epochs, history.history['loss'], label='Training loss')
plt.plot(epochs, history.history['val_loss'], label='Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# ## tf.keras 모델의 저장과 복원
model.save_weights('simple_weights.h5')
# +
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(units=1, input_dim=1))
model.compile(optimizer='sgd', loss='mse')
model.load_weights('simple_weights.h5')
# -
model.evaluate(x_test, y_test)
model.save('simple_model.h5')
model = tf.keras.models.load_model('simple_model.h5')
model.evaluate(x_test, y_test)
# +
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(units=1, input_dim=1))
model.compile(optimizer='sgd', loss='mse')
callback_list = [tf.keras.callbacks.ModelCheckpoint(filepath='my_model.h5',
monitor='val_loss', save_best_only=True),
tf.keras.callbacks.EarlyStopping(patience=5)]
history = model.fit(x_train, y_train, epochs=50,
validation_split=0.2, callbacks=callback_list)
# -
epochs = np.arange(1, len(history.history['loss'])+1)
plt.plot(epochs, history.history['loss'], label='Training loss')
plt.plot(epochs, history.history['val_loss'], label='Validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# 저장된 모델 복원:
model = tf.keras.models.load_model('simple_model.h5')
model.load_weights('my_model.h5')
model.evaluate(x_test, y_test)
# +
x_arr = np.arange(-2, 2, 0.1)
y_arr = model.predict(x_arr)
plt.figure()
plt.plot(x_train, y_train, 'bo')
plt.plot(x_test, y_test, 'bo', alpha=0.3)
plt.plot(x_arr, y_arr, '-r', lw=3)
plt.show()
# -
# ## 계산 그래프 시각화하기
tf.keras.backend.clear_session()
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(units=1, input_dim=1))
callback_list = [tf.keras.callbacks.TensorBoard(log_dir='logs')]
model.compile(optimizer='sgd', loss='mse')
history = model.fit(x_train, y_train, epochs=50,
callbacks=callback_list, validation_split=0.3)
# 코랩을 사용하거나 주피터 노트북을 로컬 컴퓨터에서 실행하는 경우엔 아래 두 개의 코드 셀의 주석을 삭제하고 실행하면 텐서보드를 노트북에 임베딩시킬 수 있습니다.
# %load_ext tensorboard.notebook
# %tensorboard --logdir logs --port 6006
# +
input = tf.keras.Input(shape=(784,))
hidden = tf.keras.layers.Dense(100)(input)
output = tf.keras.layers.Dense(10)(hidden)
model = tf.keras.Model(input, output)
# -
tf.keras.utils.plot_model(model, to_file='model_1.png')
tf.keras.utils.plot_model(model, show_shapes=True, to_file='model_2.png')
| Understand_Tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Goal of the ETL Lesson
#
# The main goal of this ETL pipelines lesson is to take the [World Bank Project data set](https://datacatalog.worldbank.org/dataset/world-bank-projects-operations) and merge this data with the [World Bank indicator data](https://data.worldbank.org/indicator/SP.POP.TOTL). Then you'll load the merged data into a database.
#
# In the process, you'll need to transform these data sets in different ways. And finally, you'll code an ETL pipeline to extract, transform, and load the data all in one step.
#
# # Extracting data from a csv file
#
# The first step in an ETL pipeline is extraction. Data comes in all types of different formats, and you'll practice extracting data from csv files, JSON files, XML files, SQL databases, and the web.
#
# In this first exercise, you'll practice extracting data from a CSV file and then navigating through the results. You'll see that extracting data is not always a straight-forward process.
#
# This exercise contains a series of coding questions for you to solve. If you get stuck, there is a solution file called 1_csv_exercise_solution.ipynb. You can find this solution file by going to File->Open and then clicking on the file name.
# # Part 1 projects_data.csv
#
# You'll be using the following csv files:
# * projects_data.csv
# * population_data.csv
#
# As a first step, try importing the projects data using the pandas [read_csv method](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html). The file path is just '../data/projects_data.csv'. You can see the file if you click on File->Open in the workspace and open the data folder.
# TODO: import the projects_data.csv file using the pandas library
# Store the results in the df_projects variable
import pandas as pd
df_project = '../data/projects_data.csv'
# Did you get a DType warning? Read about what this warning is in the [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.errors.DtypeWarning.html).
#
# Pandas tries to figure out programatically the data type of each column (integer, float, boolean, string). In this case, pandas could not automatically figure out the data type. That is because some columns have more than one possible data types. In other words, this data is messy.
#
# You can use the dtype option to specify the data type of each column. Because there are so many columns in this data set, you can set all columns to be strings at least for now.
#
# Try reading in the data set again using the read_csv() method. This time, also use the option dtype=str so that pandas treats everything like a string.
# TODO: Read in the projects_data.csv file using the read_csv method
# and dtype = str option
df_projects = pd.read_csv(df_project)
# Run the cell below to see what the data looks like
df_projects.head()
# TODO: count the number of null values in the data set
# HINT: use the isnull() and sum() methods
df_projects.isnull().sum()
# Notice that the number 18248 shows up multiple times. There is also a countryname column with 0 missing values and a Country column with 14045 missing values. This data set clearly has some issues that will need to be solved in the transform part of the pipeline.
#
# Next, output the shape of the data frame.
# TODO: output the shape of the data frame
df_projects.shape
# There are 18248 rows in this data set. Considering many columns had 18248 NaN values, many columns in the data set are filled completely with NaN values.
# # Part 2 population_data.csv
# Next, use the pandas read_csv method to read in the population_data.csv file. The path to this file is "../data/population_data.csv". When you try to read in this data set using pandas, you'll get an error because there is something wrong with the data.
# TODO: read in the population_data.csv file using the read_csv() method
# Put the results in a variabe called df_population
df_population = pd.read_csv('population_data.csv')
# There is something wrong with this data set. You should see an error that says "expected 3 fields in line 5, saw 63". What might have happened? Try printing out the first few lines of the data file to see what the issue might be.
# TODO: Print out the first 10 lines of the data set, line by line.
# HINT: You can't use the read_csv method from pandas
# HINT: to do this manually, you'll need to use pure Python
# HINT: the open(), readline(), and close() methods should be helpful
# HINT: Use a for loop
f = open('population_data.csv')
for i in range(10):
line = f.readline()
print('line :',i,line)
f.close()
# The first four lines in the file are not properly formatted and don't contain data. Next, read in the data using the read_csv method. But this time, use the skiprows option.
# +
# TODO: read in population data skipping the first four rows
# Put the results in a variable called df_population
df_population = pd.read_csv('population_data.csv',skiprows=4)
# -
# Run this cell to see what the data looks like
df_population.head()
# Make sure to scroll over to see what the last column looks like. That last column, called 'Unnamed: 62', doesn't look very useful and is filled with NaN values.
# TODO: Count the number of null values in each column
df_population.isnull().sum()
# It looks like every year column has at least one NaN value.
# TODO: Count the number of null values in each row
# HINT: In the sum method, use axis=1
df_population.isnull().sum(axis=0)
# And it looks like almost every row has only one null value. That is probably from the 'Unnamed: 62' column that doesn't have any relevant information in it. Next, drop the 'Unnamed: 62' column from the data frame.
# +
# TODO: drop the 'Unnamed: 62' column from the data frame,
# and save the results in the df_population variable
df_population = df_population.drop('Unnamed: 62',axis = 1)
# -
# Run this code cell.
# This code outputs any row that contains a null value
# The purpose is to see what rows contain null values now that
# 'Unnamed: 62' was dropped from the data.
df_population[df_population.isnull().any(axis=1)]
# # Conclusion
#
# This population data doesn't look too bad. Only six rows have missing values. In the transformation part of the lesson, you'll have to deal with these missing values somehow.
#
# If you would like to see the solution file for this exercise, go to File->Open and click on 1_csv_exercise_solution.ipynb.
#
# In the next exercise, you'll practice extracting data json and xml files.
| etl/home/1_csv_exercise/1_csv_exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
from richLowCost_copy import *
Vgrid = np.load("richLow.npy")
matplotlib.rcParams['figure.figsize'] = [16, 8]
plt.rcParams.update({'font.size': 15})
# %%time
t = 50
v,cbkha = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,:,t+1]))(Xs)
from jax.scipy.optimize import minimize
# + tags=[]
# pc*qc / (ph*qh) = alpha/(1-alpha)
@partial(jit, static_argnums=(0,))
def feasibleActions(t, x, consumptionRatio, stockInvestingRatio):
# owner
sell = As[:,2]
# last term is the tax deduction of the interest portion of mortgage payment
payment = (x[2] > 0)*(((t<=T_R)*tau_L + (t>T_R)*tau_R)*x[2]*rh - m)
sell = (yAT(t,x) + x[0] + payment > 0)*jnp.zeros(nA) + (yAT(t,x) + x[0] + payment <= 0)*jnp.ones(nA)
budget1 = yAT(t,x) + x[0] + (1-sell)*payment + sell*(H*pt - x[2] - c_s)
# last term is the tax deduction of the interest portion of mortgage payment
# consumptionRatio = As[:,0]*((t>=0 and t<20)*(2.0/3)) + As[:,0]*((t>=20 and t<40)*(1.0/3)) + As[:,0]*((t>=40 and t<=60)*(2.0/3))
h = jnp.ones(nA)*H*(1+kappa)*(1-sell) + sell*jnp.clip(budget1*consumptionRatio*(1-alpha)/pr, a_max = Rl)
c = budget1*consumptionRatio*(1-sell) + sell*(budget1*consumptionRatio - h*pr)
budget2 = budget1*(1-consumptionRatio)
# stock investing Ratio
# stockInvestingRatio = (As[:,1]*(100-(t+20))/100)
k = budget2*stockInvestingRatio
k = (k - (1-x[6])*(k>0)*c_k)*(1-Kc)
b = budget2*(1-stockInvestingRatio)
owner_action = jnp.column_stack((c,b,k,h,sell))
# renter
buy = As[:,2]*(t < 30)
budget1 = yAT(t,x) + x[0] - buy*(H*pt*0.2 + c_h)
h = jnp.clip(budget1*consumptionRatio*(1-alpha)/pr, a_max = Rl)*(1-buy) + buy*jnp.ones(nA)*H*(1+kappa)
c = (budget1*consumptionRatio - h*pr)*(1-buy) + buy*budget1*consumptionRatio
budget2 = budget1*(1-consumptionRatio)
k = budget2*stockInvestingRatio
k = (k - (1-x[6])*(k>0)*c_k)*(1-Kc)
b = budget2*(1-stockInvestingRatio)
renter_action = jnp.column_stack((c,b,k,h,buy))
actions = x[5]*owner_action + (1-x[5])*renter_action
return actions
@partial(jit, static_argnums=(0,))
def V(t,V_next,x):
'''
x = [w,n,m,s,e,o,z]
x = [0,1,2,3,4,5]
xp:
w_next 0
n_next 1
m_next 2
s_next 3
e_next 4
o_next 5
z_next 6
prob_next 7
'''
def Q(consumptionRatioStockInvestingRatio):
actions = feasibleActions(t, x, jnp.exp(consumptionRatioStockInvestingRatio[0]), jnp.exp(consumptionRatioStockInvestingRatio[1]))
xp = transition(t,actions,x)
# bequeath utility
TB = xp[:,0]+x[1]*(1+r_bar)+xp[:,5]*(H*pt-x[2]*(1+rh)-25)
bequeathU = uB(TB)
if t == T_max-1:
QQ = R(x,actions) + beta * dotProduct(xp[:,7], bequeathU)
else:
QQ = R(x,actions) + beta * dotProduct(xp[:,7], Pa[t]*fit(V_next, xp) + (1-Pa[t])*bequeathU)
QQ = jnp.nan_to_num(QQ, nan = -jnp.inf)
v = QQ.max()
return v
return minimize(Q,jnp.array([0.5,0.5]),method = "BFGS")
# -
t = 50
partial(V,t,Vgrid[:,:,:,:,:,:,:,t+1])(Xs[0])
t = 50
partial(V,t,Vgrid[:,:,:,:,:,:,:,t+1])(Xs[2])
import numpy as np
import jax.numpy as jnp
from jax.scipy.ndimage import map_coordinates
import warnings
from jax import jit, partial, random, vmap
from tqdm import tqdm
warnings.filterwarnings("ignore")
detEarning = jnp.array(np.load("constant/detEarningHigh.npy"))
# rescale the deterministic income
detEarning = detEarning
####################################################################################### low skill feature
lowdetEarning = jnp.concatenate([detEarning[:46]*0.5, detEarning[46:]-45])
| 20210909/.ipynb_checkpoints/speedTest-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Image Classification with Caffe
#
# This tutorial demonstrates the steps required to prepare and deploy a trained Caffe model for FPGA acceleration using Xilinx MLSuite:
# 1. **Quantize the model** - The quantizer will generate scaling parameters for quantizing floats INT8. This is required, because FPGAs will take advantage of Fixed Point Precision, to achieve more parallelization at lower power.
# 2. **Compile the Model** - In this step, the network Graph (prototxt) and the Weights (caffemodel) are compiled, the compiler
# 3. **Subgraph Cutting** - In this step, the original graph is cut, and a custom FPGA accelerated python layer is inserted to be used for Inference.
# 4. **Classification** - In this step, the caffe model and the prototxt from the previous step are run on the FPGA to perform inference on an input image.
#
# For command line versions see: examples/caffe/
#
# ## Prerequisite Files
# 1. **Model files** - This notebook requires that model files are located in
# `$VAI_ALVEO_ROOT/examples/caffe/models/`
# 2. **Image files** - This notebook requires ilsvrc2012 image files are downloaded in
# `$HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-val-min/`
#
# ## Setup (Before Running Notebook)
# **Note:** User is responsible for the use of the downloaded content and compliance with any copyright licenses.
#
# ```
# conda activate vitis-ai-caffe
# python -m ck pull repo:ck-env
# python -m ck install package:imagenet-2012-val-min
# python -m ck install package:imagenet-2012-aux
# head -n 500 $HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-aux/val.txt > $HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-val-min/val_map.txt
# # cd $VAI_ALVEO_ROOT/examples/caffe
# python resize.py $HOME/CK-TOOLS/dataset-imagenet-ilsvrc2012-val-min 256 256
# python getModels.py
# source $VAI_ALVEO_ROOT/overlaybins/setup.sh
# python replace_mluser.py --modelsdir models
# ```
# ### Step 1. Import required packages
# +
from __future__ import print_function
import os
import shutil
import subprocess
from IPython.display import Image as display
from ipywidgets import interact
import numpy as np
from caffe import Classifier, io
from caffe.proto import caffe_pb2
from caffe.draw import draw_net_to_file
from google.protobuf import text_format
# Environment Variables ("source overlaybins/setup.sh")
VAI_ALVEO_ROOT = os.getenv("VAI_ALVEO_ROOT",os.getcwd()+"/..")
XCLBIN = "/opt/xilinx/overlaybins/xdnnv3"
print("Running w/ VAI_ALVEO_ROOT: %s" % VAI_ALVEO_ROOT)
print("Running w/ XCLBIN: %s" % XCLBIN)
# Bring in SubGraph Cutter
from decent import CaffeFrontend as xfdnnQuantizer
from vai.dpuv1.rt.scripts.framework.caffe.xfdnn_subgraph import CaffeCutter as xfdnnCutter
# -
# Delete stale directories
if os.path.exists("quantize_results"):
shutil.rmtree("quantize_results")
if os.path.exists("work"):
shutil.rmtree("work")
# ### Step 2. Choose a model
# Choose a model using the drop down, or select custom, and enter your own.
@interact(MODEL=["bvlc_googlenet","inception_v2","inception_v3","inception_v4",\
"resnet50_v1","resnet50_v2","squeezenet","vgg16","custom"])
def selectModel(MODEL):
global prototxt
global caffemodel
global name
model_root = VAI_ALVEO_ROOT + "/examples/caffe/models/"
if MODEL == "custom":
prototxt = None
caffemodel = None
name = None
else:
prototxt = model_root + MODEL + "/" + MODEL + "_train_val.prototxt"
caffemodel = model_root + MODEL + "/" + MODEL + ".caffemodel"
name = MODEL
if not prototxt:
@interact(PROTOTXT="Provide the path to your prototxt")
def selectPrototxt(PROTOTXT):
global prototxt
prototxt = PROTOTXT
@interact(CAFFEMODEL="Provide the path to your caffemodel")
def selectCaffemodel(CAFFEMODEL):
global caffemodel
caffemodel = CAFFEMODEL
@interact(MODEL="Provide a name to your model")
def selectCaffemodel(MODEL):
global name
name = MODEL
print("Currently running : %s" % name)
print("Running with prototxt: %s" % prototxt)
print("Running with caffemodel: %s" % caffemodel)
# ### Step 3. Run the Quantizer
#
# Here, we will quantize the model. The inputs are model prototxt, model weights, number of test iterations and calibration iterations. The output is quantized prototxt, weights, and quantize_info.txt and will be generated in the quantize_results/ directory.
#
# The Quantizer will generate a json file holding scaling parameters for quantizing floats to INT8
# This is required, because FPGAs will take advantage of Fixed Point Precision, to achieve accelerated inference
def Quantize(prototxt,caffemodel,calib_iter=1,output_dir="quantize_results"):
os.environ["DECENT_DEBUG"] = "1"
subprocess.call(["vai_q_caffe", "quantize",
"--model", prototxt,
"--weights", caffemodel,
"--calib_iter", str(calib_iter)])
Quantize(prototxt,caffemodel)
# ### Step 4: Run the Compiler
#
# The compiler takes in the quantizer outputs from the previous step (prototxt, weights, quantize_info) and outputs a compiler.json and quantizer.json.
#
# * A Network Graph (prototxt) and a Weights Blob (caffemodel) are compiled
# * The network is optimized
# * FPGA Instructions are generated
# +
arch = "/opt/vitis_ai/compiler/arch/DPUCADX8G/ALVEO/arch.json" # Informs compiler what underlying hardware is capable of
def Compile(prototxt="quantize_results/deploy.prototxt",\
caffemodel="quantize_results/deploy.caffemodel",\
quantize_info="quantize_results/quantize_info.txt"):
subprocess.call(["vai_c_caffe",
"--prototxt", prototxt,
"--caffemodel", caffemodel,
"--net_name", name,
"--output_dir", "work",
"--arch", arch,
"--options", "{\"quant_cfgfile\":\"%s\"}" %(quantize_info)])
# -
Compile()
# ### Step 4: Run the Subgraph Cutter
#
# The subgraph cutter creates a custom python layer to be accelerated on the FPGA. The inputs are compiler.json, quantizer.json and model weights from the compiler step, as well as the FPGA xclbin. This outputs a cut prototxt file with FPGA references, to be used for inference.
def Cut(prototxt):
cutter = xfdnnCutter(
inproto="quantize_results/deploy.prototxt",
trainproto=prototxt,
outproto="xfdnn_auto_cut_deploy.prototxt",
outtrainproto="xfdnn_auto_cut_train_val.prototxt",
cutAfter="data",
xclbin=XCLBIN,
netcfg="work/compiler.json",
quantizecfg="work/quantizer.json",
weights="work/weights.h5"
)
cutter.cut()
Cut(prototxt)
# Lets visualize the new graph with the FPGA subgraph
net = caffe_pb2.NetParameter()
text_format.Merge(open("xfdnn_auto_cut_deploy.prototxt").read(), net)
draw_net_to_file(net,"xfdnn_auto_cut_deploy.png")
display("xfdnn_auto_cut_deploy.png")
# ### Step 5: Inference
#
# The inputs are the FPGA prototxt file, caffemodel weights, a test image, and the labels
#
def Classify(prototxt,caffemodel,image,labels):
classifier = Classifier(prototxt,caffemodel,
mean=np.array([104,117,123]),
raw_scale=255, channel_swap=[2,1,0])
predictions = classifier.predict([io.load_image(image)]).flatten()
labels = np.loadtxt(labels, str, delimiter='\t')
top_k = predictions.argsort()[-1:-6:-1]
for l,p in zip(labels[top_k],predictions[top_k]):
print (l," : ",p)
# Choose image to run, display it for reference
HOME = os.getenv("HOME")
image = HOME+"/CK-TOOLS/dataset-imagenet-ilsvrc2012-val-min/ILSVRC2012_val_00000002.JPEG"
display(filename=image)
Classify("xfdnn_auto_cut_deploy.prototxt","quantize_results/deploy.caffemodel",image,HOME+"/CK-TOOLS/dataset-imagenet-ilsvrc2012-aux/synset_words.txt")
# # Conclusion
# This notebook demonstrates how to target Xilinx FPGAs for inference using Caffe.
# When the time comes to take your application to production please look at examples in /workspace/alveo/examples/deployment_modes/
# Highest performance is acheived by creating multiprocess pipelines.
| alveo/notebooks/image_classification_caffe.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="6tDnOpUKcX_N"
# ### **(a) Bayesian Neural Network with limited features (4) and emotions (3):**
#
# + colab={"base_uri": "https://localhost:8080/"} id="y8xMegWp_QMj" outputId="f6900e7d-256d-49a3-9092-cedce9557955"
# # !pip3 install torch==1.2.0+cu92 torchvision==0.4.0+cu92 -f https://download.pytorch.org/whl/torch_stable.html
# + colab={"base_uri": "https://localhost:8080/"} id="ePNCiSndAwLd" outputId="678b0393-6d6b-4a63-9507-5c38c901260f"
# !pip3 install torchbnn
# + id="O-0nzNZq_BFi"
import pandas as pd
import numpy as np
import glob
import seaborn as sns
import matplotlib.pyplot as plt
# #%matplotlib.pylab as plt
from sklearn.linear_model import LinearRegression
import nltk
import os
import nltk.corpus
import datetime
import scipy.stats as sp
import tensorflow as tf
from sklearn import datasets
import torch
import torch.nn as nn
import torchbnn as bnn
import torch.optim as optim
import matplotlib.pyplot as plt
# + id="XcPjglay_wq1"
# dataset = datasets.load_iris()
# dataset
# + colab={"base_uri": "https://localhost:8080/"} id="JlVzgn2cj9gW" outputId="e9570527-2795-4ed1-d59f-501eb6aed29e"
from google.colab import drive
drive.mount("/content/drive/")
# + id="d277bxXekAx-"
data = pd.read_csv('/content/drive/MyDrive/Research/Bayesian deep neural network/imotion_feature_sorted.csv')
# + id="M_H7vN17OlOh" colab={"base_uri": "https://localhost:8080/", "height": 423} outputId="0ab47439-f5e3-4a4e-cd86-277b12c93806"
data.dropna()
# + id="NT36dMvTkDIO" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="d6b8db60-0c61-4ccc-ae99-e5d320f76bea"
data.head()
# + id="RTuJ5f_Bk9vz" colab={"base_uri": "https://localhost:8080/", "height": 242} outputId="808d43c7-182d-4de6-e9d7-99cafb77b2a5"
np_array = data.to_numpy()
display(np_array)
# + id="oQwvoON7xOVX" colab={"base_uri": "https://localhost:8080/"} outputId="dd0d0e42-6952-449a-fa26-5b98732c9bc7"
x = data[['Brow Furrow','Cheek Raise','Inner Brow Raise','Lip Suck']].to_numpy()
x
# + id="G2-aeOuXelyi" colab={"base_uri": "https://localhost:8080/"} outputId="020e81f8-f432-4235-bd09-287676478b1e"
y = data[['SourceStimuliNameArray']].to_numpy()
y = y.ravel()
y
# + id="kvqDQW5q7_m0"
data = x
target = y
data_tensor=torch.from_numpy(data).float()
target_tensor=torch.from_numpy(target).long()
# + colab={"base_uri": "https://localhost:8080/"} id="uELAmJEVJtUs" outputId="36ee396e-af33-492a-d531-abc9600f36ad"
x, y = torch.from_numpy(x).float(), torch.from_numpy(y).long()
x.shape, y.shape
# + [markdown] id="C8XmAhN7TRMP"
# 1. Define model
# + id="6lC47NMVKNbJ"
model = nn.Sequential(
bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=4, out_features=100),
nn.ReLU(),
bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=100, out_features=3),
)
# + id="w7jpvMABKRWI"
ce_loss = nn.CrossEntropyLoss()
kl_loss = bnn.BKLLoss(reduction='mean', last_layer_only=False)
kl_weight = 0.01
optimizer = optim.Adam(model.parameters(), lr=0.01)
# + [markdown] id="FmqJYdKDTYsf"
# ###2. Train Model
# + id="EdnJtRpRKTxY"
kl_weight = 0.1
# + colab={"base_uri": "https://localhost:8080/"} id="9tfCmXxMKVyY" outputId="74f11fa3-8e5a-448c-8616-2a043cf62c48"
for step in range(5000):
pre = model(x)
ce = ce_loss(pre, y)
kl = kl_loss(model)
cost = ce + kl_weight*kl
optimizer.zero_grad()
cost.backward()
optimizer.step()
_, predicted = torch.max(pre.data, 1)
total = y.size(0)
correct = (predicted == y).sum()
print('- Accuracy: %f %%' % (100 * float(correct) / total))
print('- CE : %2.2f, KL : %2.2f' % (ce.item(), kl.item()))
# + [markdown] id="8Mrr2s_JThNX"
# ### 3. Test Model
# + id="6lR4H2GePyCa"
def draw_plot(predicted) :
fig = plt.figure(figsize = (16, 5))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
z1_plot = ax1.scatter(x[:, 0], x[:, 1], c = y)
z2_plot = ax2.scatter(x[:, 0], x[:, 1], c = predicted)
plt.colorbar(z1_plot,ax=ax1)
plt.colorbar(z2_plot,ax=ax2)
ax1.set_title("REAL")
ax2.set_title("PREDICT")
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 336} id="g3BJshbEQP6R" outputId="38ae06ea-0e2b-4a63-ad5f-e142ea7ad980"
# Bayesian Neural Network will return different outputs even if inputs are same.
# In other words, different plots will be shown every time forward method is called.
pre = model(x)
_, predicted = torch.max(pre.data, 1)
draw_plot(predicted)
# + [markdown] id="GXuFxxHrTEAL"
# ### **(b) Different approach: Bayesian Neural Network with all the features (21) and emotions (6):**
# + colab={"base_uri": "https://localhost:8080/"} id="lhkX9JZ1TDUr" outputId="358ddaa0-bc8c-45a0-8922-873d04432b96"
pip install tensorflow-probability
# + id="9GKXsDTyT9Ww"
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as skl
from tensorflow import keras
from tensorflow.keras import layers
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
# + id="Ei--GU03UDOu"
# data = pd.read_csv('/content/drive/MyDrive/Research/Bayesian deep neural network/imotion_feature_sorted.csv')
# data1 = pd.read_csv('/content/drive/MyDrive/Research/Bayesian deep neural network/iMotions_sensor_data-sample.csv')
data1 = pd.read_csv('/content/drive/MyDrive/Research/Bayesian deep neural network/Copy of iMotions_sensor_data-sample_n.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 327} id="1rb9-Ipj-i5E" outputId="a4d4809d-ecc6-4e62-bafe-d56fe90068d1"
data1 = data1.dropna()
data1.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 225} id="Q6dsG0px--fN" outputId="899f4e69-7f73-4ee6-cbd8-c016783f7e4f"
np_array = data1.to_numpy()
display(np_array)
# + colab={"base_uri": "https://localhost:8080/"} id="TVrRJwuh_HC0" outputId="7c8cef51-da49-44b6-9fb5-9f683c9d8ad9"
x = data1[['Brow Furrow', 'Brow Raise', 'Cheek Raise','Chin Raise', 'Dimpler', 'Eye Closure', 'Eye Widen', 'Jaw Drop', 'Inner Brow Raise','Lip Suck', 'Lip Corner Depressor', 'Lip Press', 'Lip Pucker', 'Lip Stretch', 'Lid Tighten', 'Lip Suck',
'Mouth Open', 'Nose Wrinkle', 'Smile', 'Smile', 'Upper Lip Raise']].to_numpy()
x
# + colab={"base_uri": "https://localhost:8080/"} id="CKfbe7n1_Ow0" outputId="3265efb2-462a-46a7-aca6-86d823dacc2f"
y = data1[['SourceStimuliArray']].to_numpy()
y = y.ravel()
y
# + id="3WAXEAJzA4PE"
data = x
target = y
data_tensor=torch.from_numpy(data).float()
target_tensor=torch.from_numpy(target).long()
# + colab={"base_uri": "https://localhost:8080/"} id="C1zzCpjSBRp-" outputId="933e1cc5-8cb6-4657-fc1c-2bc8a5120ec7"
x, y = torch.from_numpy(x).float(), torch.from_numpy(y).long()
x.shape, y.shape
# + id="JBTLPam9Bgu0"
model = nn.Sequential(
bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=21, out_features=100),
nn.ReLU(),
bnn.BayesLinear(prior_mu=0, prior_sigma=0.1, in_features=100, out_features=6),
)
# + id="XL8vWT22Bm1E"
ce_loss = nn.CrossEntropyLoss()
kl_loss = bnn.BKLLoss(reduction='mean', last_layer_only=False)
kl_weight = 0.01
optimizer = optim.Adam(model.parameters(), lr=0.01)
# + id="WyX_OE25BfMG"
kl_weight = 0.9
# + colab={"base_uri": "https://localhost:8080/"} id="WjSRcHM9Bx5N" outputId="16313738-07da-4862-8c31-1d961b6480b6"
for step in range(5000):
pre = model(x)
ce = ce_loss(pre, y)
kl = kl_loss(model)
cost = ce + kl_weight*kl
optimizer.zero_grad()
cost.backward()
optimizer.step()
_, predicted = torch.max(pre.data, 1)
total = y.size(0)
correct = (predicted == y).sum()
print('- Accuracy: %f %%' % (100 * float(correct) / total))
print('- CE : %2.2f, KL : %2.2f' % (ce.item(), kl.item()))
# + id="eGE1c_WiBxds"
def draw_plot(predicted) :
fig = plt.figure(figsize = (18, 7))
ax1 = fig.add_subplot(1, 2, 1)
ax2 = fig.add_subplot(1, 2, 2)
z1_plot = ax1.scatter(x[:, 0], x[:, 1], c = y)
z2_plot = ax2.scatter(x[:, 0], x[:, 1], c = predicted)
plt.colorbar(z1_plot,ax=ax1)
plt.colorbar(z2_plot,ax=ax2)
ax1.set_title("REAL")
ax2.set_title("PREDICT")
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 443} id="Cg_ULzxxIv5r" outputId="187fac5a-ded9-4488-f443-af3c3c919808"
# Bayesian Neural Network will return different outputs even if inputs are same.
# In other words, different plots will be shown every time forward method is called.
pre = model(x)
_, predicted = torch.max(pre.data, 1)
draw_plot(predicted)
# + [markdown] id="2juItHAw1d2q"
# ### **(c) Artificial Neural Network (ANN) with all the features and emotions:**
# + colab={"base_uri": "https://localhost:8080/"} id="gc1oIgVoBAMz" outputId="bc3bcdb8-9884-4b30-8619-4edf27585c25"
# data2['SourceStimuliArray'].sum() / data1.shape[0] # class rate
# + id="CBPTu03cfO3E"
# Importing Packages
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense
from sklearn import datasets
# + id="YgUtekLGfSan"
# # Loading Dataset
# data = datasets.load_iris()
# print(data)
# + id="DCEEbemYfZRh"
data2 = data1[['SourceStimuliArray', 'Brow Furrow', 'Brow Raise', 'Cheek Raise','Chin Raise', 'Dimpler', 'Eye Closure', 'Eye Widen', 'Jaw Drop', 'Inner Brow Raise','Lip Suck', 'Lip Corner Depressor', 'Lip Press', 'Lip Pucker', 'Lip Stretch', 'Lid Tighten', 'Lip Suck',
'Mouth Open', 'Nose Wrinkle', 'Smile', 'Smile', 'Upper Lip Raise' ]]
# + colab={"base_uri": "https://localhost:8080/"} id="bErWOiA3fipa" outputId="17c2fcc5-6d18-40d8-ddac-4e458a3574d6"
x = data1[['Brow Furrow', 'Brow Raise', 'Cheek Raise','Chin Raise', 'Dimpler', 'Eye Closure', 'Eye Widen', 'Jaw Drop', 'Inner Brow Raise','Lip Suck', 'Lip Corner Depressor', 'Lip Press', 'Lip Pucker', 'Lip Stretch', 'Lid Tighten', 'Lip Suck',
'Mouth Open', 'Nose Wrinkle', 'Smile', 'Smile', 'Upper Lip Raise']].to_numpy()
x
# + colab={"base_uri": "https://localhost:8080/"} id="36M7r2yEflqU" outputId="47a39228-f7e7-490a-d79f-389e841ef611"
y = data1[['SourceStimuliArray']].to_numpy()
y = y.ravel()
y
# + id="wuQ68Cr3gYAC"
# Split Dataset
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
# + colab={"base_uri": "https://localhost:8080/"} id="lj7Ns9DwgcW6" outputId="fe623c99-d7cf-45a9-b5b6-6bddbaeaff6e"
# Data Shape
print(x_train.shape)
print(x_test.shape)
# + id="hB6exeN4gfel"
# Building the Model
model= Sequential()
model.add(Dense(100,input_shape=(21,), activation="tanh"))
model.add(Dense(21, activation='softmax'))
# + id="rCMsQByighDL"
# Compile the Model
model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
# + colab={"base_uri": "https://localhost:8080/"} id="dXwBQd6fgj66" outputId="f7f9e67a-1a10-4228-b4d9-65760e9d7b71"
# Fit the Model
model.fit(x_train,y_train, epochs=100)
# + colab={"base_uri": "https://localhost:8080/"} id="_NwUhcE_g6KT" outputId="feb5f4e1-efe4-4ab4-b92f-0f29da81eede"
# Evaluate the Model
model.evaluate(x_test, y_test)
# + id="hVJX2736g-Nx"
# Predict for the first 10 Observations
pred=model.predict(x_test[:10])
print(pred)
# + colab={"base_uri": "https://localhost:8080/"} id="nqTCPX9OhAHI" outputId="1b78f603-28fe-4dae-bb68-5b1ebbf93888"
p=np.argmax(pred, axis=1)
print(p)
print(y_test[:10])
| Bayesian deep neural network facial expression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Create Features for stock analysis
# + outputHidden=false inputHidden=false
# Libraries
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
import yfinance as yf
from pandas_datareader import data as pdr
yf.pdr_override()
# + outputHidden=false inputHidden=false
stock = 'MSFT'
start = '2014-01-01'
end = '2018-01-01'
df = pdr.get_data_yahoo(stock, start, end)
# + outputHidden=false inputHidden=false
df = df.reset_index()
df.head()
# + outputHidden=false inputHidden=false
# Drop date variable
# df = df.drop(['Date'], 1)
# Daily Returns
df['Returns'] = round(df['Adj Close'].pct_change(), 4)
df.head()
# + outputHidden=false inputHidden=false
# Log Returns
df['Log Returns'] = np.log(df['Adj Close']) - np.log(df['Adj Close'].shift(1))
df.head()
# + outputHidden=false inputHidden=false
# Calculate in Rows using axis=1
df['Risk'] = round(df[['Open', 'High', 'Low','Adj Close']].std(axis=1), 4)
df.head()
# + outputHidden=false inputHidden=false
# VWAP
df['VWAP'] = round(np.cumsum(df['Volume']*(df['High']+df['Low'])/2) / np.cumsum(df['Volume']), 2)
df.head()
# + outputHidden=false inputHidden=false
df['Mean'] = round(df[['Open', 'High', 'Low','Adj Close']].mean(axis=1), 2)
df.head()
# + outputHidden=false inputHidden=false
df['Median'] = round(df[['Open', 'High', 'Low','Adj Close']].median(axis=1), 2)
df.head()
# + outputHidden=false inputHidden=false
df['Mode'] = round(df[['Open', 'High', 'Low','Adj Close']].mode(axis=1), 2)
df.head()
# + outputHidden=false inputHidden=false
df['Variance'] = round(df[['Open', 'High', 'Low','Adj Close']].var(axis=1), 4)
df.head()
# + outputHidden=false inputHidden=false
df['Skew'] = round(df[['Open', 'High', 'Low','Adj Close']].skew(axis=1), 4)
df.head()
# + outputHidden=false inputHidden=false
df['Skew'] = round(df[['Open', 'High', 'Low','Adj Close']].skew(axis=1), 4)
df.head()
# + outputHidden=false inputHidden=false
df['Kurt'] = round(df[['Open', 'High', 'Low','Adj Close']].kurt(axis=1), 4)
df.head()
# + outputHidden=false inputHidden=false
# Standard error of the mean
df['Error'] = df[['Open', 'High', 'Low','Adj Close']].sem(axis=1)
df.head()
# + outputHidden=false inputHidden=false
import talib as ta
# + outputHidden=false inputHidden=false
# Creating Indicators
n=5
df['RSI']=ta.RSI(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['SMA']= pd.rolling_mean(df['Adj Close'].shift(1),window=n)
df['Corr']= pd.rolling_corr(df['SMA'],df['Adj Close'].shift(1),window=n)
df['SAR']=ta.SAR(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),0.2,0.2)
# + outputHidden=false inputHidden=false
# Momemtum Indicator Functions
df['ADX']=ta.ADX(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), np.array(df['Open'].shift(1)), timeperiod=n)
df['ADXR']=ta.ADXR(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close']), timeperiod=n)
# df['APO']=ta.APO(np.array(df['Adj Close'].shift(1), fastperiod=12, slowperiod=26, matype=0))
df['AROON_DOWN'], df['AROON_UP']=ta.AROON(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), timeperiod=n)
df['AROONOSC']=ta.AROONOSC(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),timeperiod=n)
df['BOP']=ta.BOP(np.array(df['Open'].shift(1)),np.array(df['High'].shift(1)),\
np.array(df['Low']),np.array(df['Adj Close'].shift(1)))
df['CCI']=ta.CCI(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),\
np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['CMO']=ta.CMO(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['DX']=ta.DX(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),\
np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['MACD'], df['MACD_SIGNAL'], df['MACD_HIST'] =ta.MACD(np.array(df['High'].shift(1)),fastperiod=12, slowperiod=26, signalperiod=9)
# df['MACDEXT'], df['MACD_SIGNAL'], df['MACD_HIST'] =ta.MACDEXT(np.array(df['Adj Close'].shift(1)), fastperiod=12, fastmatype=0, slowperiod=26, slowmatype=0, signalperiod=9, signalmatype=0)
# df['MACDFIX'], df['MACD_SIGNAL'], df['MACD_HIST'] =ta.MACDFIX(np.array(df['Adj Close'].shift(1)), signalperiod=9)
df['MFI']=ta.MFI(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),\
np.array(df['Adj Close'].shift(1)),np.array(df['Volume'].shift(1)) , timeperiod=n)
df['MINUS_DI']=ta.MINUS_DI(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),\
np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['MINUS_DM']=ta.MINUS_DM(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), timeperiod=n)
df['MOM']=ta.MOM(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['PLUS_DI']=ta.PLUS_DI(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),\
np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['PLUS_DM']=ta.PLUS_DM(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), timeperiod=n)
df['PPO']=ta.PPO(np.array(df['Adj Close'].shift(1)), fastperiod=12, slowperiod=26, matype=0)
df['ROC']=ta.ROC(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['ROCP']=ta.ROCP(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['ROCR']=ta.ROCR(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['ROCR100']=ta.ROCR100(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['slowk'], df['slowd'] =ta.STOCH(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close']), fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=3, slowd_matype=0)
df['fastk'], df['fastd'] =ta.STOCHF(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close']), fastk_period=5, fastd_period=3, fastd_matype=0)
# df['fastk'], df['fastd'] =ta.STOCHRIS(np.array(df['Adj Close'].shift(1)), timeperiod=N, fastk_period=5, fastd_period=3, fastd_matype=0)
df['TRIX']=ta.TRIX(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['ULTOSC']=ta.ULTOSC(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),\
np.array(df['Adj Close']), timeperiod1=7, timeperiod2=14, timeperiod3=28)
df['WILLR']=ta.WILLR(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),\
np.array(df['Adj Close'].shift(1)), timeperiod=n)
# + outputHidden=false inputHidden=false
# Volatility Indicator Functions
df['ATR']=ta.ATR(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['NATR']=ta.NATR(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['TRANGE']=ta.TRANGE(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close'].shift(1)))
# + outputHidden=false inputHidden=false
# Volume Indicator Functions
df['AD']=ta.AD(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close'].shift(1)),np.array(df['Volume'].shift(1)))
df['ADOSC']=ta.ADOSC(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close'].shift(1)),np.array(df['Volume'].shift(1)),fastperiod=3, slowperiod=10)
df['OBV']=ta.OBV(np.array(df['Adj Close'].shift(1)),np.array(df['Volume'].shift(1)))
# + outputHidden=false inputHidden=false
# Price Transform Functions
df['AVGPRICE']=ta.AVGPRICE(np.array(df['Open'].shift(1)),np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), np.array(df['Adj Close'].shift(1)))
df['MEDPRICE']=ta.MEDPRICE(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)))
df['TYPPRICE']=ta.TYPPRICE(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close'].shift(1)))
df['WCLPRICE']=ta.WCLPRICE(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)),np.array(df['Adj Close'].shift(1)))
# + outputHidden=false inputHidden=false
# Pattern Recognition Fuction
df['Two_Crows'] = ta.CDL2CROWS(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Three_Crows'] = ta.CDL3BLACKCROWS(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Three_Inside_Up_Down'] = ta.CDL3INSIDE(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Three_Line_Strike'] = ta.CDL3LINESTRIKE(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Thre_Outside_Up_Down'] = ta.CDL3OUTSIDE(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Thre_Stars_In_The_South'] = ta.CDL3STARSINSOUTH(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Three_Advancing_White_Soldiers'] = ta.CDL3WHITESOLDIERS(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Abandoned_Baby'] = ta.CDLABANDONEDBABY(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']), penetration=0)
df['Advanced_Block'] = ta.CDLADVANCEBLOCK(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Belt_hold'] = ta.CDLBELTHOLD(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Breakaway'] = ta.CDLBREAKAWAY(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Closing_Marubozu'] = ta.CDLCLOSINGMARUBOZU(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Concealing_Baby_Swallow'] = ta.CDLCONCEALBABYSWALL(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Counterattack'] = ta.CDLCOUNTERATTACK(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Dark_Cloud_Cover'] = ta.CDLDARKCLOUDCOVER(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']), penetration=0)
df['Doji'] = ta.CDLDOJI(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Doji_Star'] = ta.CDLDOJISTAR(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Dragonfly_Doji'] = ta.CDLDRAGONFLYDOJI(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Engulfing_Pattern'] = ta.CDLENGULFING(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Evening_Doji_Star'] = ta.CDLEVENINGDOJISTAR(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']), penetration=0)
df['Evening_Star'] = ta.CDLEVENINGSTAR(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']), penetration=0)
df['Up_Down_gap_side_by_side_white_lines'] = ta.CDLGAPSIDESIDEWHITE(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Gravestone_Doji'] = ta.CDLGRAVESTONEDOJI(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Hammer'] = ta.CDLHAMMER(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Hanging_Man'] = ta.CDLHANGINGMAN(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Harami_Pattern'] = ta.CDLHARAMI(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Harami_Cross_Pattern'] = ta.CDLHARAMICROSS(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['High_Wave_Candle'] = ta.CDLHIGHWAVE(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Hikkake_Pattern'] = ta.CDLHIKKAKE(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Modified_Hikkake_Pattern'] = ta.CDLHIKKAKEMOD(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Homing_Pigeon'] = ta.CDLHOMINGPIGEON(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Identical_Three_Crows'] = ta.CDLIDENTICAL3CROWS(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['In_Neck_Pattern'] = ta.CDLINNECK(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Inverted_Hammer'] = ta.CDLINVERTEDHAMMER(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Kicking'] = ta.CDLKICKING(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Kicking_Bull_Bear'] = ta.CDLKICKINGBYLENGTH(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Ladder_Bottom'] = ta.CDLLADDERBOTTOM(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Long_Legged_Doji'] = ta.CDLLONGLEGGEDDOJI(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Long_line_Candle'] = ta.CDLLONGLINE(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Marubozu'] = ta.CDLMARUBOZU(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Matching_Low'] = ta.CDLMATCHINGLOW(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Mat_Hold'] = ta.CDLMATHOLD(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Morning_Star'] = ta.CDLMORNINGSTAR(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['On_Neck_Pattern'] = ta.CDLONNECK(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Piercing_Pattern'] = ta.CDLPIERCING(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Rickshaw_Man'] = ta.CDLRICKSHAWMAN(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Rising_Falling_Three_Method'] = ta.CDLRISEFALL3METHODS(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Separating_Lines'] = ta.CDLSEPARATINGLINES(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Shooting_Star'] = ta.CDLSHOOTINGSTAR(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Short_Line_Candle'] = ta.CDLSHORTLINE(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Spinning_Top'] = ta.CDLSPINNINGTOP(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Stalled_Pattern'] = ta.CDLSTALLEDPATTERN(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Stick_Sandwich'] = ta.CDLSTICKSANDWICH(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Takuri'] = ta.CDLTAKURI(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Tasuki_Gap'] = ta.CDLTASUKIGAP(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Thrusting_Pattern'] = ta.CDLTHRUSTING(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Tristar_Pattern'] = ta.CDLTRISTAR(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Unique_3_River'] = ta.CDLUNIQUE3RIVER(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Upside_Gap_Two_Crows'] = ta.CDLUPSIDEGAP2CROWS(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
df['Upside_Downside_Gap_Three_Methods'] = ta.CDLXSIDEGAP3METHODS(np.array(df['Open']), np.array(df['High']), np.array(df['Low']), np.array(df['Adj Close']))
# + outputHidden=false inputHidden=false
# Cycle Indicator Functions
df['HT_DCPERIOD']=ta.HT_DCPERIOD(np.array(df['Adj Close'].shift(1)))
df['HT_DCPHASE']=ta.HT_DCPHASE(np.array(df['Adj Close'].shift(1)))
df['inphase'], df['quadrature']=ta.HT_PHASOR(np.array(df['Adj Close'].shift(1)))
df['sine'], df['leadsine']=ta.HT_SINE(np.array(df['Adj Close'].shift(1)))
df['HT_TRENDMODE']=ta.HT_TRENDMODE(np.array(df['Adj Close'].shift(1)))
df['ATR1']=abs(np.array(df['High'].shift(1)) - np.array(df['Low'].shift(1)))
df['ATR2']=abs(np.array(df['High'].shift(1)) - np.array(df['Adj Close'].shift(1)))
df['ATR3']=abs(np.array(df['Low'].shift(1)) - np.array(df['Adj Close'].shift(1)))
df['AverageTrueRange'] = df[['ATR1', 'ATR2', 'ATR3']].max(axis=1)
df['EMA']=pd.Series(pd.ewma(df['Adj Close'], span = n, min_periods = n - 1))
# + outputHidden=false inputHidden=false
# Statistic Functions
df['Beta']=ta.BETA(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), timeperiod=n)
df['CORREL']=ta.CORREL(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), timeperiod=n)
df['LINEARREG']=ta.LINEARREG(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['LINEARREG_ANGLE']=ta.LINEARREG_ANGLE(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['LINEARREG_INTERCEPT']=ta.LINEARREG_INTERCEPT(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['LINEARREG_SLOPE']=ta.LINEARREG_SLOPE(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['STDDEV']=ta.STDDEV(np.array(df['Adj Close'].shift(1)), timeperiod=n, nbdev=1)
df['Time Series Forecast']=ta.TSF(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['VAR']=ta.VAR(np.array(df['Adj Close'].shift(1)), timeperiod=n, nbdev=1)
# + outputHidden=false inputHidden=false
# Overlap Studies Functions
df['upperband'], df['middleband'], df['lowerband']=ta.BBANDS(np.array(df['Adj Close'].shift(1)), timeperiod=n, nbdevup=2, nbdevdn=2, matype=0)
df['DEMA']=ta.DEMA(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['EMA']=ta.EMA(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['HT_TRENDLINE']=ta.HT_TRENDLINE(np.array(df['Adj Close'].shift(1)))
df['KAMA']=ta.KAMA(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['MA']=ta.MA(np.array(df['Adj Close'].shift(1)), timeperiod=n, matype=0)
df['mama'],df['fama'] = ta.MAMA(np.array(df['Adj Close'].shift(1)), fastlimit=0, slowlimit=0)
df['MAVP'] =ta.MAVP(np.array(df['Adj Close'].shift(1)),periods, minperiod=2, maxperiod=30, matype=0)
df['MIDPOINT']=ta.MIDPOINT(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['MIDPRICE']=ta.MIDPRICE(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), timeperiod=n)
df['SAR']=ta.SAR(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), acceleration=0, maximum=0)
df['SAREXT']=ta.SAREXT(np.array(df['High'].shift(1)),np.array(df['Low'].shift(1)), startvalue=0, offsetonreverse=0, accelerationinitlong=0, accelerationlong=0, accelerationmaxlong=0, accelerationinitshort=0, accelerationshort=0, accelerationmaxshort=0)
df['SMA']=ta.SMA(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['T3']=ta.T3(np.array(df['Adj Close'].shift(1)), timeperiod=n, vfactor=0)
df['TEMA']=ta.TEMA(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['TRIMA']=ta.TRIMA(np.array(df['Adj Close'].shift(1)), timeperiod=n)
df['WMA']=ta.WMA(np.array(df['Adj Close'].shift(1)), timeperiod=n)
# + outputHidden=false inputHidden=false
df['20d_ma'] = df['Adj Close'].shift(1).rolling(window=20).mean()
df['50d_ma'] = df['Adj Close'].shift(1).rolling(window=50).mean()
df['Bol_upper'] = df['Adj Close'].shift(1).rolling(window=20).mean() + 2* df['Adj Close'].shift(1).rolling(window=20).std()
df['Bol_lower'] = df['Adj Close'].shift(1).rolling(window=20).mean() - 2* df['Adj Close'].shift(1).rolling(window=20).std()
df['Bol_BW'] = ((df['Bol_upper'] - df['Bol_lower'])/df['20d_ma'])*100
df['Bol_BW_200MA'] = df['Bol_BW'].shift(1).rolling(window=50).mean()
df['Bol_BW_200MA'] = df['Bol_BW_200MA'].fillna(method='backfill')
df['20d_ewma'] = df['Adj Close'].shift(1).ewm(span=20).mean()
df['50d_ewma'] = df['Adj Close'].shift(1).ewm(span=50).mean()
| Python_Stock/Stock_Columns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.10.0 64-bit (''.venv'': poetry)'
# name: python3
# ---
base_url=""
debug=True
booking_gym=""
from selenium import webdriver
from selenium.webdriver.support.ui import Select
# +
opts = webdriver.ChromeOptions()
if not debug:
opts.add_argument("--headless")
service = webdriver.chrome.service.Service(executable_path="../webdriver/chromedriver")
driver = webdriver.Chrome(options = opts, service = service)
driver.get(base_url)
# -
# login
elem_username = driver.find_element(by="id", value="username")
elem_password = driver.find_element(by="id", value="password")
elem_confirm_code = driver.find_element(by="id", value="confirm_code")
elem_login_btn = driver.find_element(by="id", value="login_btn")
elem_captcha_img = driver.find_element(by="xpath", value="//form[@id='login_form']//div[@class='list-group']//img")
image_url = elem_captcha_img.get_attribute("src")
# +
import requests
img_data = requests.get(image_url).content
with open('captcha.png', 'wb') as handler:
handler.write(img_data)
# +
elem_username.send_keys("")
elem_password.send_keys("")
elem_confirm_code.send_keys("") # TODO: transcribe code
elem_login_btn.click()
# -
# +
# go to gym
soup = bs4.BeautifulSoup(driver.page_source)
current_gym = soup.find(class_="current-club-name").get_text()
if current_gym != booking_gym:
elem_club_selector = driver.find_element(by="class name", value="current-club-name")
elem_club_selector.click()
elem_desired_gym = driver.find_element(by="xpath", value=f"//a[text()='{booking_gym}']")
elem_desired_gym.click()
# +
elem_schedule = driver.find_element(by="xpath", value="//*[@data-app_id='15']") # go to schedule
elem_schedule.click()
# -
import bs4
driver.page_source
soup = bs4.BeautifulSoup(driver.page_source)
# + tags=[]
schedule = soup.find(id="week_schedule")
# -
from datetime import datetime, timedelta
import pytz
pytz.country_timezones["nz"]
(datetime.now(tz=pytz.timezone("")) + timedelta(days=2)).strftime("%V")
(datetime(2022, 1, 1) + timedelta(days=2)).strftime("%V")
datetime.now(tz="")
driver.close()
| notebooks/exploratory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.10 64-bit (''myenv'': conda)'
# name: python3
# ---
# +
#Import libraries
# %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.float_format', lambda x: '%.5f' % x)
import warnings
warnings.filterwarnings('ignore')
# -
#Import the data
train_data = pd.read_csv("train.csv")
# # **Exploratory Data Analysis**
# ## **Examining the Data**
train_data.head(25)
train_data.dtypes
train_data.shape
target = train_data[['Emp_ID','LastWorkingDate']]
target = target.dropna(subset = ['LastWorkingDate'])
target.shape
target.head()
train_data["Month_yr"] = pd.to_datetime(train_data['MMM-YY'],errors = 'coerce')
train_data['Age'].isna().sum()
# ## **Feature Engineering**
age = train_data.groupby('Emp_ID')['Age'].max().reset_index()
age.head()
df_train = train_data.sort_values(['Emp_ID',"Month_yr"],ascending=False)
df_train.isna().sum()
df_train['months_before_Leaving']=df_train.groupby('Emp_ID').cumcount()+ 1
total_business = df_train.groupby('Emp_ID').agg({'Total Business Value': ['min', 'max','mean','median','sum'], 'Quarterly Rating': ['min','max'],'Salary':['min','max']}).reset_index()
total_business.head(5)
df_train.head(10)
df_train['Month_yr'].max()
df_train.columns
pivoted = df_train.pivot(index='Emp_ID', columns='months_before_Leaving',values=['Total Business Value']).reset_index()
pivoted
pivoted.columns
pivoted.columns=[ 'Emp_ID',
'Total Business Value_ 1',
'Total Business Value_ 2',
'Total Business Value_ 3',
'Total Business Value_ 4',
'Total Business Value_ 5',
'Total Business Value_ 6',
'Total Business Value_ 7',
'Total Business Value_ 8',
'Total Business Value_ 9',
'Total Business Value_10',
'Total Business Value_11',
'Total Business Value_12',
'Total Business Value_13',
'Total Business Value_14',
'Total Business Value_15',
'Total Business Value_16',
'Total Business Value_17',
'Total Business Value_18',
'Total Business Value_19',
'Total Business Value_20',
'Total Business Value_21',
'Total Business Value_22',
'Total Business Value_23',
'Total Business Value_24']
pivoted[['Emp_ID','Total Business Value_ 1',
'Total Business Value_ 2',
'Total Business Value_ 3',
'Total Business Value_ 4',
'Total Business Value_ 5',
'Total Business Value_ 6']].isna().sum()
df_train['Total Business Value'].describe()
pivoted_new = pivoted[['Emp_ID','Total Business Value_ 1',
'Total Business Value_ 2',
'Total Business Value_ 3',
'Total Business Value_ 4']]
pivoted_new = pivoted_new.fillna(0)
pivoted_new['change_m1_2']=pivoted['Total Business Value_ 2']-pivoted['Total Business Value_ 1']
pivoted_new['change_m2_3']=pivoted['Total Business Value_ 3']-pivoted['Total Business Value_ 2']
pivoted_new['change_m3_4']=pivoted['Total Business Value_ 4']-pivoted['Total Business Value_ 3']
pivoted.shape
age.shape
merged_1 = pd.merge(pivoted_new,age, on='Emp_ID', how = 'inner')
merged_2 = pd.merge(merged_1,target, on ='Emp_ID', how='left')
merged_2.shape
merged_2.head()
merged_2.columns
merged_2['Left'] = np.where(pd.isna(merged_2['LastWorkingDate']),0,1)
merged_2.head()
merged_2.dtypes
merged_2.shape
df_train.columns
abc = df_train[['Emp_ID', 'Gender', 'City', 'Education_Level',
'Dateofjoining','Joining Designation',
'Designation']].drop_duplicates(subset='Emp_ID')
abc.shape
merged_2 = pd.merge(abc,merged_2,on='Emp_ID', how= 'inner')
merged_2.shape
merged_2['Dateofjoining']= pd.to_datetime(merged_2['Dateofjoining'],errors='coerce')
max_date = pd.to_datetime('2018-01-01',errors='coerce')
merged_2['LWD'] = np.where(pd.isna(merged_2['LastWorkingDate']),max_date,merged_2['LastWorkingDate'])
merged_2.dtypes
merged_2['tenure'] = (merged_2['LWD']-merged_2['Dateofjoining'])/np.timedelta64(1, 'M')
merged_2.isna().sum().values
merged_2.shape
merged_2
merged_2['LWD']
# ### **Transforming Date and Time**
import datetime
merged_2['day_of_year'] = merged_2.LWD.dt.dayofyear
merged_2['day'] = merged_2.LWD.dt.day
merged_2['month'] = merged_2.LWD.dt.month
#M2['year'] = M2.LWD.dt.year
merged_2['quarter'] = merged_2.LWD.dt.quarter
merged_2.head()
merged_2['Is_month_start'] = pd.to_datetime(merged_2['LWD']).dt.is_month_start
merged_2['Is_month_end'] = pd.to_datetime(merged_2['LWD']).dt.is_month_end
merged_2['Is_quarter_start'] = pd.to_datetime(merged_2['LWD']).dt.is_quarter_start
merged_2['Is_quarter_end'] = pd.to_datetime(merged_2['LWD']).dt.is_quarter_end
merged_2['Is_year_start'] = pd.to_datetime(merged_2['LWD']).dt.is_year_start
merged_2['Is_year_end'] = pd.to_datetime(merged_2['LWD']).dt.is_year_end
total_business.columns=['Emp_ID', 'Total Business Value_min',
'Total Business Value_max',
'Total Business Value_mean',
'Total Business Value_median',
'Total Business Value_sum',
'Quarterly Rating_min',
'Quarterly Rating_max',
'Salary_min',
'Salary_max']
final_merge = pd.merge(total_business,merged_2,on='Emp_ID', how='inner')
final_merge.isna().sum()
final_merge_new = final_merge.drop(labels=['LastWorkingDate','Dateofjoining','LWD'], axis=1)
final_merge_new = final_merge_new.fillna(0)
df2_train = pd.get_dummies(final_merge_new)
df2_train.shape
df2_train.isna().sum()
df2_train.columns
df2_train['tenure']=df2_train['tenure'].astype('int64')
df2_train.head()
# # **Building model with new data set**
# +
# Import train_test_split function
from sklearn.model_selection import train_test_split
X = df2_train.drop(labels=['Left','Emp_ID'], axis=1)
y = df2_train['Left'].values
# Split dataset into training set and test set
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.4, random_state=0)
# -
X.columns
# # **ML Application**
# +
# Outlier detection
from sklearn.ensemble import IsolationForest
outliers = IsolationForest(random_state=0).fit_predict(X)
outliers_index = list(np.where(outliers == -1)[0])
print(f"Outlier Count: {len(outliers_index)} \nSample Count: {len(df2_train)} \nFraction: {round(len(outliers_index)/len(df2_train),3)}")
# -
# ## **Exploring different models**
# ### **Logistic Regression Model**
# +
#Logistic Regression Classifier
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
print('Logistic regression accuracy: {:.3f}'.format(accuracy_score(y_test, logreg.predict(X_test))))
# -
# ### **Random Forest**
#Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
print('Random Forest Accuracy: {:.3f}'.format(accuracy_score(y_test, rf.predict(X_test))))
# ### **Support Vector Machine**
#SVM Classifier
from sklearn.svm import SVC
svc = SVC()
svc.fit(X_train, y_train)
print('Support vector machine accuracy: {:.3f}'.format(accuracy_score(y_test, svc.predict(X_test))))
# # **Hyper-Parameter Tuning with Cross-Validation**
# ## **10 Fold Cross Validation**
# Cross validation attempts to avoid overfitting while still producing a prediction for each observation dataset. We are using 10-fold Cross-Validation to train our Random Forest and SVM model.
#For Random Forest
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
kfold = model_selection.KFold(n_splits=10, random_state=None)
modelCV = RandomForestClassifier()
scoring = 'accuracy'
results = model_selection.cross_val_score(modelCV, X_train, y_train, cv=kfold, scoring=scoring)
print("10-fold cross validation average accuracy for Random Forest Classifier: %.3f" % (results.mean()))
#For SVM
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
kfold = model_selection.KFold(n_splits=10, random_state=None)
modelCV = SVC()
scoring = 'accuracy'
results = model_selection.cross_val_score(modelCV, X_train, y_train, cv=kfold, scoring=scoring)
print("10-fold cross validation average accuracy for SVM Classifier: %.3f" % (results.mean()))
#For Log Regression
from sklearn import model_selection
from sklearn.model_selection import cross_val_score
kfold = model_selection.KFold(n_splits=10, random_state=None)
modelCV = LogisticRegression()
scoring = 'accuracy'
results = model_selection.cross_val_score(modelCV, X_train, y_train, cv=kfold, scoring=scoring)
print("10-fold cross validation average accuracy for Logistic Regression Classifier: %.3f" % (results.mean()))
# From the Cross Validation results we observe that the average accuracy remains very close to the Random Forest & Logistics Regression model accuracy; hence, we can conclude that the models generalize well.
# ### **Precision and Recall**
# We construct confusion matrix to visualize predictions made by a classifier and evaluate the accuracy of a classification.
#Precison Recall Scores for Random Forest
from sklearn.metrics import classification_report
print(classification_report(y_test, rf.predict(X_test)))
#Confusion Matrix for Random Forest
y_pred = rf.predict(X_test)
from sklearn.metrics import confusion_matrix
import seaborn as sns
forest_cm = metrics.confusion_matrix(y_pred, y_test, [1,0])
sns.heatmap(forest_cm, annot=True, fmt='.2f',xticklabels = ["Left", "Stayed"] , yticklabels = ["Left", "Stayed"] )
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.title('Random Forest')
plt.savefig('random_forest')
#PRScores for Logistic Regression
print(classification_report(y_test, logreg.predict(X_test)))
#Confusion Matrix for Logistic Regression
logreg_y_pred = logreg.predict(X_test)
logreg_cm = metrics.confusion_matrix(logreg_y_pred, y_test, [1,0])
sns.heatmap(logreg_cm, annot=True, fmt='.2f',xticklabels = ["Left", "Stayed"] , yticklabels = ["Left", "Stayed"] )
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.title('Logistic Regression')
plt.savefig('logistic_regression')
#PR scores for SVM
print(classification_report(y_test, svc.predict(X_test)))
#Confusion Matrix for SVM
svc_y_pred = svc.predict(X_test)
svc_cm = metrics.confusion_matrix(svc_y_pred, y_test, [1,0])
sns.heatmap(svc_cm, annot=True, fmt='.2f',xticklabels = ["Left", "Stayed"] , yticklabels = ["Left", "Stayed"] )
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.title('Support Vector Machine')
plt.savefig('support_vector_machine')
# # **ROC Curve**
# +
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
#ROC for logistic regression
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1])
#ROC for Random Forrest
rf_roc_auc = roc_auc_score(y_test, rf.predict(X_test))
rf_fpr, rf_tpr, rf_thresholds = roc_curve(y_test, rf.predict_proba(X_test)[:,1])
#ROC Curve for Random Forest & Logistic Regression
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot(rf_fpr, rf_tpr, label='Random Forest (area = %0.2f)' % rf_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.savefig('ROC')
plt.show()
# -
# The receiver operating characteristic (ROC) curve is another common tool used with binary classifiers. The dotted line represents the ROC curve of a purely random classifier; a good classifier stays as far away from that line as possible (toward the top-left corner)
# ## **Variable Importance for Random Forest Classifier**
# According to our Random Forest model, the the most important features which influence whether to leave the company, in ascending order are as follows:
X.columns
import pandas as pd
feature_imp = pd.Series(rf.feature_importances_,index=X.columns).sort_values(ascending=False)
feature_imp.head(25)
feature_imp.nlargest(25).plot(kind='bar')
X['Score']=logreg.predict(X)
X['Score_Probs']=logreg.predict_proba(X.drop(columns='Score'))[:,1]
X.head(10)
concat_empid = pd.concat([df2_train[['Emp_ID']],X],axis=1)
concat_empid.shape
concat_empid.head()
# Importing test data
test_data = pd.read_csv("test.csv")
test_data.head()
test_data.shape
df_test= pd.merge(concat_empid,test_data,on='Emp_ID', how='inner')
df_test[df_test['Score']==1].head(25)
df_test.shape
# # **Submission**
final_sub = pd.DataFrame({"Emp_ID":df_test['Emp_ID'],'Target':df_test['Score']})
final_sub.tail()
final_sub.to_csv("final_submission.csv",index=False)
| Notebooks/02_McKinsey Womenhack_Modelling & Hyperparemeter Tuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Creating a Vivado HLS Core
#
#
# This notebook will walk through the process of creating a Shared-Memory Vivado HLS core. This notebook assumes that you are familiar with Vivado HLS and have already used Vivado HLS to write, simluate, and debug a C/C++ core for hardware.
#
#
# If you have not already, add Vivado HLS to your executable path. In Cygwin, you can do this by running:
#
# ``` bash
# source C:/Xilinx/Vivado/2017.1/settings64.sh
# ```
#
# Or on Linux:
#
# ``` bash
# source /opt/Xilinx/Vivado/2017.1/settings64.sh
# ```
#
# These command assume that Vivado has been installed in `C:/Xilinx/Vivado` or `/opt/Xilinx/Vivado/`. If that is not the case, you should modify the commands above to match your installation path.
#
# This notebook assumes that you have cloned the [PYNQ-HLS repository](https://github.com/drichmond/PYNQ-HLS) to the home directory (`~`) on your computer. On our computer, this is the `/home/xilinx/` directory.
#
# To skip this notebook, run the following commands on your host computer:
#
# ``` bash
# cp ~/PYNQ-HLS/pynqhls/sharedmem/ip/mmult/* ~/PYNQ-HLS/tutorial/pynqhls/sharedmem/ip/mmult/
# make -C ~/PYNQ-HLS/pynqhls/sharedmem/ip/mmult/
# ```
# ## Objectives:
#
# We will be creating a Shared-Memory Matrix-Multiply accelerator as a High-Level Synthesis core. Unlike the Streaming Filter, this notebook will not use a DMA engine and read/write memory shared with the ARM Processor.
#
# This notebook will teach you how to :
# 1. Create an AXI-Lite Interface for HLS Core configuration
# 4. Create an AXI-Master Interface for reading and writing memory shared by the ARM PS
#
# The AXI Lite interface will be connected to the AXI-Master of the ARM processor. The AXI-Master Interface will be connected to the AXI-Slave ports of the ARM processor.
# ## Creating a Vivado HLS Project
#
#
# We will begin by creating a Vivado HLS project. On your host computer, navigate to the following folder of the PYNQ-HLS repository using your terminal:
#
# ```bash
# cd ~/PYNQ-HLS/tutorial/pynqhls/sharedmem/ip/mmult/
# ```
#
# In this directory we have provided a makefile that will:
#
# 1. Create a `mmult` directory with a Vivado HLS project
# 2. Add `mmult.cpp`, `mmult.hpp`, and `main.cpp` files to the project
# 3. Run tests for the `mmult.cpp` file (**These will fail initially**)
# 4. If the tests pass, synthesize the core.
#
# To run the makefile, run make from your current directory:
#
# ``` bash
# make
# ```
#
# This will build the Vivado HLS project, but the testbench will fail because the method `mmult` is not implemented. Open the project in the Vivado HLS tool by running the command:
#
# ```bash
# vivado_hls -p ~/PYNQ-HLS/tutorial/pynqhls/sharedmem/ip/mmult/
# ```
#
# This will open the following window:
#
# <img src="pictures/vivadohls_mmult_splash.png" alt="mmult Project in Vivado HLS" style="width: 768px;"/>
# ## Writing Your Core
#
# The next step is to implement the `mmult` core. Open the file `mmult.cpp`. You will see the following method body:
#
# ``` C++
#
# #include "mmult.hpp" // Defines mata_t, A_ROWS, B_ROWS, etc.
#
# // mmult()
# // Implements a simple matrix-multiply function in HLS
# // Parameters:
# // A - mata_t
# // A 2-dimensional array of mata_t values to be multiplied
# //
# // BT - matb_t
# // A 2-dimensional array of matb_t values to be multiplied
# // BT is the transpose of B
# //
# // C - matc_t
# // Matrix multiply output definition
# //
# // The dimensions of the arrays are defined in mmult.hpp.
# void mmult(const mata_t A [A_ROWS][A_COLS],
# const matb_t BT [B_COLS][B_ROWS],
# matc_t C [A_ROWS][B_COLS]){
#
# // Your code goes here!
#
# }
#
# ```
#
# As you can see, the body of the function `mmult` is blank - **this is okay**. To pass the testbench we have provided in `main.cpp` you will need to fill out the functionality.
#
# To pass the testbench, you will need to:
#
# - Multiply matrix A and matrix BT (Transpose of B) and write the result in Matrix C
#
# To implement the core in hardware you will need to create the following interfaces:
#
# - AXI-Lite Slave on the control registers (Also known as `return`)
# - AXI-Master for the matrix `A` argument
# - AXI-Master for the matrix `BT` argument
# - AXI-Master for the matrix `C` argument
# - Combine the three AXI-Master interfaces into a single interface
#
# Go ahead and try to implement your own `mmult` function!
# ### Our Implementation
#
# You can define your own implementation, or fill `mmult.cpp` with the implementation below:
#
# ``` C++
# #include "mmult.hpp"
#
# // mmult()
# // Implements a simple matrix-multiply function in HLS
# // Parameters:
# // A - mata_t
# // A 2-dimensional array of mata_t values to be multiplied
# //
# // BT - matb_t
# // A 2-dimensional array of matb_t values to be multiplied
# // BT is the transpose of B
# //
# // C - matc_t
# // Matrix multiply output definition
# //
# // The dimensions of the arrays are defined in mmult.hpp.
# void mmult(const mata_t A [A_ROWS][A_COLS],
# const matb_t BT [B_COLS][B_ROWS],
# matc_t C [A_ROWS][B_COLS]){
# /* Define a new AXI-Lite bus named CTRL for offset arguments, and HLS
# Status/Control registers (return)*/
# #pragma HLS INTERFACE s_axilite port=return bundle=CTRL
# /* Define a new AXI4 Master bus named DATA for memory ports A, BT, and C. The
# argument offset=slave specifies that the the pointers (offset) of A, BT, and
# C can be set using register writes in the CTRL axi slave port */
# #pragma HLS INTERFACE m_axi port=A offset=slave bundle=DATA
# #pragma HLS INTERFACE m_axi port=BT offset=slave bundle=DATA
# #pragma HLS INTERFACE m_axi port=C offset=slave bundle=DATA
#
# // We use the log2 functions in mmult.hpp to determine the correct size
# // of the index variables i, j, and k. Typically, vivado will size these
# // correctly
# ap_uint<pynq::log2(A_ROWS) + 1> i = 0;
# ap_uint<pynq::log2(B_COLS) + 1> j = 0;
# ap_uint<pynq::log2(A_COLS) + 1> k = 0;
#
# // Perform a simple matrix-multiply with three nested for-loops
# for(i = 0; i < A_ROWS; ++i){
# for(j = 0; j < B_COLS; ++j){
# matc_t sum = 0;
# for(k = 0; k < A_ROWS; ++k){
# #pragma HLS PIPELINE
# sum += A[i][k]*BT[j][k];
# }
# C[i][j] = sum;
# }
# }
# }
# ```
# ## Compiling
#
# Once you have filled the implementation, click the **Run C Simulation ** and then **Synthesize** button. This will produce the window shown below:
#
# <img src="pictures/vivadohls_mmult_synth.png" alt="Synthesized mmult function in Vivado HLS" style="width: 768px;"/>
#
# ## Interfaces
#
# In the center window scroll down to view the ports. The ports list shows us the interface signals for our HLS core. The port names are unimportant in this example - what matters is the protocol field. The protocol field has four types: **s_axi_lite (AXI Lite, Slave Interface)**, **m_axi (AXI Master Interface)**, and **ap_ctrl_hs (Control Signals)**.
#
# For best results in Vivado (and PYNQ) your core should provide **s_axi_lite**, **axis**, or **m_axi** interfaces for data transfer. These ports are automagically recognized by Vivado and can be used in the Block Diagram editor in the **[Building a Bitstream](3-Building-A-Bitstream.ipynb)** notebook.
#
#
# ### AXI-Lite Interface
#
# The AXI-Lite interface is declared using the following pragma:
#
# ``` C
# #pragma HLS INTERFACE s_axilite port=return bundle=CTRL
# ```
#
# This defines a AXI-Lite interface that will be named `s_axi_CTRL` in Vivado. AXI Lite is used for configuration data. It is low-performance and uses few resources. The `pragma` in the first line above defines an AXI-Lite interface for the control registers, called the `return` argument.
#
# <img src="pictures/vivadohls_mmult_axilite.png" alt="AXI Lite Control Port in Vivado HLS" style="width: 512px;"/>
#
# This AXI-Lite interface will allow the ARM PS to specify the address offset of the `A` `BT` and `C` arrays described below.
#
# ### AXI-Master Interface
#
# The AXI-Master interface on matrices `A` `BT` and `C` are declared using the pragmas below:
#
# ``` C
# #pragma HLS INTERFACE m_axi port=A offset=slave bundle=DATA
# #pragma HLS INTERFACE m_axi port=BT offset=slave bundle=DATA
# #pragma HLS INTERFACE m_axi port=C offset=slave bundle=DATA
# ```
#
# Each pragma defines an AXI-Master interface for `A`, `BT`, and `C`. The interfaces are combined using the `bundle=DATA` argument, which defines a single AXI-Master interface that will be named `m_axi_DATA` in Vivado.
# AXI-Master is a memory-mapped bus interface driven by the HLS Core. Accesses on the array will appear as reads/writes byte addresses on the `m_axi_DATA` bus interface of the hardware core.
#
# <img src="pictures/vivadohls_mmult_aximaster.png" alt="AXI Master Port in Vivado HLS" style="width: 512px;"/>
#
# Each array is associated with an offset address that is programmed before the HLS core starts computation. The read/write address of any access on `A`, `BT`, or `C` is computed by adding each array's offset to the byte-offset of the index. The argument `offset=slave` specifies that the offset is a register that can be accessed by the AXI-Slave interface. The location of each offset register can be found in the header files generated by High-Level Synthesis. For example, here is the header file `xmmult_hw.h` produced when this file is compiled:
#
#
# ``` C
# // CTRL
# // 0x00 : Control signals
# // bit 0 - ap_start (Read/Write/COH)
# // bit 1 - ap_done (Read/COR)
# // bit 2 - ap_idle (Read)
# // bit 3 - ap_ready (Read)
# // bit 7 - auto_restart (Read/Write)
# // others - reserved
# // 0x04 : Global Interrupt Enable Register
# // bit 0 - Global Interrupt Enable (Read/Write)
# // others - reserved
# // 0x08 : IP Interrupt Enable Register (Read/Write)
# // bit 0 - Channel 0 (ap_done)
# // bit 1 - Channel 1 (ap_ready)
# // others - reserved
# // 0x0c : IP Interrupt Status Register (Read/TOW)
# // bit 0 - Channel 0 (ap_done)
# // bit 1 - Channel 1 (ap_ready)
# // others - reserved
# // 0x10 : Data signal of A_V
# // bit 31~0 - A_V[31:0] (Read/Write)
# // 0x14 : reserved
# // 0x18 : Data signal of BT_V
# // bit 31~0 - BT_V[31:0] (Read/Write)
# // 0x1c : reserved
# // 0x20 : Data signal of C_V
# // bit 31~0 - C_V[31:0] (Read/Write)
# // 0x24 : reserved
# // (SC = Self Clear, COR = Clear on Read, TOW = Toggle on Write, COH = Clear on Handshake)
# #define XMMULT_CTRL_ADDR_AP_CTRL 0x00
# #define XMMULT_CTRL_ADDR_GIE 0x04
# #define XMMULT_CTRL_ADDR_IER 0x08
# #define XMMULT_CTRL_ADDR_ISR 0x0c
# #define XMMULT_CTRL_ADDR_A_V_DATA 0x10
# #define XMMULT_CTRL_BITS_A_V_DATA 32
# #define XMMULT_CTRL_ADDR_BT_V_DATA 0x18
# #define XMMULT_CTRL_BITS_BT_V_DATA 32
# #define XMMULT_CTRL_ADDR_C_V_DATA 0x20
# #define XMMULT_CTRL_BITS_C_V_DATA 32
# ```
#
# ### ap_ctrl_hs ports
#
# **ap_ctrl_hs** signals provide clock, reset, and interrupt ports.
#
# <img src="pictures/vivadohls_mmult_ap_ctrl.png" alt="AP Ctrl Port in Vivado HLS" style="width: 512px;"/>
#
# ## Testing and Recompiling the Core
#
# Once this process has been completed, you can re-compile the HLS core and run the tests by executing the following commands:
#
# ```bash
# cd ~/PYNQ-HLS/pynqhls/sharedmem/ip/mmult
# make clean mmult
# ```
#
# If the tests pass, proceed to the **[Building a Bitstream](3-Building-A-Bitstream.ipynb)** notebook.
| tutorial/notebooks/sharedmem/2-Creating-A-Vivado-HLS-Core.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### Feature Classifier 2022-03
# The point of this file is to see how well a model can classify road segments given limited, proximal information from the cities centreline data.
# Some of this data is already used for the classification of LTS; however, this model tries to use limited, easy to access datasets that may be transferrable across all cities in order to speed up the classification process without using all of the features used in the 2016 paper.
#Import all packages from model functions
from model_functions import *
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
#plt.style.use('dark_background')
plt.style.use('seaborn-colorblind')
# Import train centreline data
train_path = 'C:/Users/<NAME>/Documents/PycharmProj/data/training_data/centrelinebike_train_spatial.csv'
#'C:/Users/<NAME>/Documents/PycharmProj/data/training_data/centreline_train_spatial.csv'
train_data = pd.read_csv(train_path)
print(train_data.shape)
train_data.head()
# #### Clean and feature engineer using model_functions.py
# After evaluating the features in the model, many of them are similar. Ex. Arterial vs. Arterial Ramp. <br>
# Before doing any other feature eng, create a function that combines these similar features. Convert to the majoy road types: <br>
# Local, Collector, Arterial, Highway. Keep paths seperate. Do we lose info when we do this?
# +
train_data['FEATURE_CODE_DESC'].unique()
def map_centreline_features(data):
"""
Maps the centreline feature codes to their more like features.
"""
data_m = data.copy()
data_m['FEATURE_CODE_DESC'] = data_m['FEATURE_CODE_DESC'].replace({'Laneway':'Local',
'Minor Arterial':'Arterial',
'Major Arterial':'Arterial',
'Major Arterial Ramp':'Arterial',
'Walkway':'Trail',
'Collector Ramp':'Collector',
'Minor Arterial Ramp':'Arterial',
})
return data_m
train_data = map_centreline_features(train_data)
train_data['FEATURE_CODE_DESC']
# -
# Split features/target
y_train_lts = train_data['LTS']
y_train_access = train_data['high access']
X_train = train_data.drop(['LTS','high access'], axis=1)
# Engineer features usign model functions.
keep_rows = ['FEATURE_CODE_DESC','geometry', 'AREA_ID', 'bikelane']
#X_train['LINEAR_NAME_TYPE'] = X_train['LINEAR_NAME_TYPE'].fillna('')
X_train = droprows(X_train, keep_rows)
X_train = add_regions(X_train, 2, 3)
X_train = dummy(X_train, dummy_feats=['FEATURE_CODE_DESC','x_region','y_region'])
X_train.head()
X_train.shape
# #### Feature Selection
# Preprocessing and Cross Validation to choose the best features for the model
# Define some models that we would like to test
model_log = LogisticRegression()
model_svm = svm.SVC()
model_rf = RandomForestClassifier()
# Use the normal feature selector function to see CV results without spatial autocorrolation
feature_selector(model_log, splits=10, X=X_train.iloc[:,2:], y=y_train_access, i=8)
# <br>Use the spatial cross validation function to get a better look at the feature performance
# +
features = ['FEATURE_CODE_DESC_Arterial', 'FEATURE_CODE_DESC_Collector', 'FEATURE_CODE_DESC_Local', 'FEATURE_CODE_DESC_Trail',
'x_region_2', 'x_region_3', 'y_region_2', 'y_region_3',
'bikelane',
#'LINEAR_NAME_TYPE_Ave', 'LINEAR_NAME_TYPE_Cres', 'LINEAR_NAME_TYPE_Ramp', 'LINEAR_NAME_TYPE_Rd', 'LINEAR_NAME_TYPE_St',
]
features
# -
spatial_cv(model_log, grouper=X_train['AREA_ID'], splits=141, X=X_train[features], y=y_train_access)
spatial_cv(model_rf, grouper=X_train['AREA_ID'], splits=141, X=X_train[features], y=y_train_access)
# Finally, select Features to be used in hyper parameter tuning
# #### Tune hyperparameters
# Use random search, and try using groupkfold for the cv in the tuning <br>
# look at logistic regression first
# +
# Get a set of group kfold using the neighbourhood grouper.
# Unfrtuantely, the 141 cross validations massively increases runtime of the search.
group_kfold = GroupKFold(n_splits=10)
grouper = X_train['AREA_ID']
neighbour_kfold = group_kfold.split(X_train, y_train_access, grouper)
# Create list to be passed to search.
train_index, val_index = [list(tt) for tt in zip(*neighbour_kfold)]
neighbour_cv = [*zip(train_index, val_index)]
# +
cv = StratifiedKFold(n_splits=10)
# Choose hyper-params
params_log = {'penalty': ('l1', 'l2'),
'fit_intercept': (True, False),
'C': loguniform(1e-3, 1e2),
'class_weight': ('balanced')
}
# Do the search (might take awhile)
r_search_log = RandomizedSearchCV(estimator=model_log,
param_distributions=params_log,
n_iter=200,
scoring='f1_weighted',
cv=neighbour_cv,
)
r_search_log.fit(X_train[features], y_train_access)
# -
print('best weighted score', r_search_log.best_score_)
print('best estimator: ',r_search_log.best_estimator_)
# <br>
# Above we tuned the hyperparameters for the logistic function. Here, We try a random forest. Both models were the best performers in feature selection.
# +
params_rf = {'criterion': ('gini', 'entropy'),
'class_weight': (None, 'balanced'),
'max_samples': uniform(0.5,1),
'max_features': uniform(0.5,1)}
r_search_rf = RandomizedSearchCV(estimator=model_rf,
param_distributions=params_rf,
n_iter=20,
scoring='f1_weighted',
cv=neighbour_cv,
)
r_search_rf.fit(X_train[features], y_train_access)
# -
print('best weighted score', r_search_rf.best_score_)
print('best estimator: ',r_search_rf.best_estimator_)
# <br>Train the logistic model:
# +
model_features = LogisticRegression(C=0.9056753857212789, class_weight='balanced') #
model_features.fit(X_train[features], y_train_access)
y_pred = model_features.predict(X_train[features])
y_prob = model_features.predict_proba(X_train[features])
# -
# #### Model Evaluation - training data
# Metrics, visualizations included
# Spatial CV
spatial_cv(model_features, grouper=X_train['AREA_ID'], splits=141, X=X_train[features], y=y_train_access)
# +
# Confusion matrix
f, ax = plt.subplots(figsize=(10, 10))
plot_confusion_matrix(model_features, X_train[features], y_train_access, ax=ax)
ax.grid(False)
#Weighted F1 score with optimal threshold if relevant
plot_f1_threshold(X_train[features], y_train_access, model_features)
#roc curve
plot_roc(y_train_access, model_features.predict_proba(X_train[features]))
# -
# Plot the classifications geospatially
predicted = pd.Series(y_pred)
predicted = pd.concat((train_data, predicted), axis=1)
lts_gpd = gpd.read_file('C:/Users/<NAME>/Documents/PycharmProj/data/raw_data/Bo Github Data/centerline_LTS_July2021.shp')
lts_gpd = lts_gpd[lts_gpd['LTS'] != 0]
lts_gpd['high access'] = lts_gpd['LTS'].apply(lambda x: 1 if x <= 2 else 0)
# +
# Plot the above to show the new data. Show with 4 LTS and with the high/low access grouping
# Geoframe
plotr = pd.merge(lts_gpd,
predicted[['GEO_ID',0]],
how='left',
left_on='GEO_ID',
right_on='GEO_ID',)
# Find the difference in classification
plotr = plotr.dropna()
plotr['diff'] = plotr['high access'] - plotr[0]
plotr['i or c'] = plotr['diff'].apply(lambda x: 'incorrect' if x != 0 else 'correct')
plotr_diff = plotr[plotr['diff'] != 0]
# Figure
fig, axs = plt.subplots(figsize=(20,12), ncols=3)
# Plotting
plotr.plot(ax=axs[0], column='high access', cmap='cividis')
plotr.plot(ax=axs[1], column=0, cmap='cividis')
#plotr[plotr['i or c'] == 'incorrect'].plot(ax=axs[2], column='i or c', cmap='coolwarm')
plotr_diff.plot(ax=axs[2], column='diff', cmap='coolwarm')
# Attributes
axs[0].title.set_text('Label')
axs[1].title.set_text('Predicted')
axs[2].title.set_text('Incorrectly Classified')
axs[0].grid(False)
axs[1].grid(False)
axs[2].grid(False)
plt.show()
# -
# <br>
# Train the random forest classifier and see the results on the test set
# +
model_features_rf = RandomForestClassifier(criterion='entropy', max_features=0.5384874461913766,
max_samples=0.5862816454781242, class_weight='balanced')
model_features_rf.fit(X_train[features], y_train_access)
y_pred = model_features_rf.predict(X_train[features])
#y_prob = model_features.predict_proba(X_train[features])
# -
# Spatial CV
spatial_cv(model_features_rf, grouper=X_train['AREA_ID'], splits=141, X=X_train[features], y=y_train_access)
# +
# Confusion matrix
f, ax = plt.subplots(figsize=(10, 10))
plot_confusion_matrix(model_features_rf, X_train[features], y_train_access, ax=ax)
ax.grid(False)
#Weighted F1 score with optimal threshold if relevant
plot_f1_threshold(X_train[features], y_train_access, model_features_rf)
#roc curve
plot_roc(y_train_access, model_features_rf.predict_proba(X_train[features]))
# -
predicted = pd.Series(y_pred)
predicted = pd.concat((train_data, predicted), axis=1)
lts_gpd = gpd.read_file('C:/Users/<NAME>/Documents/PycharmProj/data/raw_data/Bo Github Data/centerline_LTS_July2021.shp')
lts_gpd = lts_gpd[lts_gpd['LTS'] != 0]
lts_gpd['high access'] = lts_gpd['LTS'].apply(lambda x: 1 if x <= 2 else 0)
# +
# Plot the above to show the new data. Show with 4 LTS and with the high/low access grouping
# Geoframe
plotr = pd.merge(lts_gpd,
predicted[['GEO_ID',0]],
how='left',
left_on='GEO_ID',
right_on='GEO_ID',)
# Find the difference in classification
plotr = plotr.dropna()
plotr['diff'] = plotr['high access'] - plotr[0]
plotr['i or c'] = plotr['diff'].apply(lambda x: 'incorrect' if x != 0 else 'correct')
plotr_diff = plotr[plotr['diff'] != 0]
# Figure
fig, axs = plt.subplots(figsize=(20,12), ncols=3)
# Plotting
plotr.plot(ax=axs[0], column='high access', cmap='cividis')
plotr.plot(ax=axs[1], column=0, cmap='cividis')
#plotr[plotr['i or c'] == 'incorrect'].plot(ax=axs[2], column='i or c', cmap='coolwarm')
plotr_diff.plot(ax=axs[2], column='diff', cmap='coolwarm')
# Attributes
axs[0].title.set_text('Label')
axs[1].title.set_text('Predicted')
axs[2].title.set_text('Incorrectly Classified')
axs[0].grid(False)
axs[1].grid(False)
axs[2].grid(False)
plt.show()
# -
| model/road_feature_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="WAJ7lA2wuvR8"
# # Set Up
#
# 1. click “Edit” -> "Notebook Settings" -> "Hardware accelerator, GPU" -> Save
# <img src="https://z3.ax1x.com/2021/03/30/ciQWWV.png">
#
# 2. Click the folder icon on the left, upload your video file, and copy the uploaded video path
#
# <img src="https://z3.ax1x.com/2021/03/30/cilvhq.png">
#
# 3. Run the code, enter the pasted video path
#
#
#
# + [markdown] id="_jPi_FBwyZyr"
# check whether GPU works
# + id="eHPHc_Bheo-j"
# !nvidia-smi
# + id="TkQKKGKZkkT2"
# !nvcc -V
# + [markdown] id="_85O6zgPyhto"
# # Install Dependencies
# + id="ICeq0T1FeqjT"
# !git clone https://github.com/YaoFANGUK/video-subtitle-extractor.git
# + id="GHutEWynkMKR"
# cd video-subtitle-extractor
# + id="ynJydzo1kMKR"
# !pip install -r requirements_gpu.txt
# + id="3-GdvmaGl-aF"
# !pip install paddlepaddle-gpu==2.1.0
# + [markdown] id="SGb0i3tPyw9Q"
# # Run
# -
# Here is an example:
#
# input video path: /content/test.mp4
#
# input subtitle area: 894 1052 152 1264
# + id="B2MPjMOOgGbD"
# !python main.py
| google_colab_en.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="rngxh6Kcmi64"
# ## **Agrupando Clientes por Padrão de Consumo**
#
# 
# -
# ### **INTRODUÇÃO**
#
# As empresas sempre estão em busca de encontrar meios para promover seus produtos, aumentar o número de clientes e, consequentemente, obter maiores lucros. É fato que cada pessoa tem um gosto singular, em sua essência, em relação aos produtos e serviços que lhes são oferecidos. Definir estratégias para alcançar cada tipo de cliente é algo que mudaria profundamente a dinâmica de um negócio e um passo além dos concorrentes.
#
# A segmentação, ou agrupamento, é um processo estratégico em que clientes e potenciais clientes passam a fazer parte de determinados grupos com características semelhantes de comportamento/gostos. Por exemplo, grupo de mulheres que gostam de ir à academia; dessas que vão à academia, as que têm entre 18 e 25 anos; pessoas que andam de bicicleta na orla de Boa Viagem (PE); das que andam de bicicleta, as que vão pela manhã; etc. E assim criar algo personalizado a oferecer.
#
# As possibilidades são inúmeras. Os objetivos estratégicos para atingir esses grupos também. De quê adianta a *Companhia de Leitores de Ficção* querer vender seus livros para grupos de pessoas que leem APENAS romance? Ninguém compraria. O foco seria divergente e haveria desperdício de recursos. Não há empresa no mundo que goste de um prejuízo, não é verdade?
#
# Essa é a ideia geral de um agrupamento.
#
# As soluções não se limitam a vendas de produtos e personalização de serviços para determinados grupos de clientes. Mas essa solução também pode ser aplicada em estudos para entender, por exemplo, o consumo de energia em determinados horários, sem a necessidade de oferecer serviços - mas (por que não?) campanhas de conscientização.
#
# ### **OBJETIVO**
#
# Uma Cia Elétrica detém um grande número de dados sobre o consumo de energia dos cidadãos de uma região. Com base nesses dados, a empresa pediu para <b>agrupar os consumidores por similaridade a fim de compreender o comportamento dos clientes e sua relação com o consumo de energia</b>.
#
# **Dicionário de Atributos**
# 1. `date`: Data no formato dd/mm/yyyy
# 2. `time`: Tempo no formato hh:mm:ss
# 3. `global_active_power`: Energia ativa média global por família (em kilowatt)
# 4. `global_reactive_power`: Energia reativa média global por família (em kilowatt)
# 5. `voltage`: Voltagem média (em volt)
# 6. `global_intensity`: Intensidade global da corrente por família (em ampere)
# 7. `sub_metering_1`: Sub-divisão da energia No. 1 (em watt-hora por energia ativa). Corresponde à cozinha, contendo principalmente lava-louça, fogão e microondas.
# 8. `sub_metering_2`: Sub-divisão da energia No. 2 (em watt-hora por energia ativa). Corresponde à lavanderia, contendo principalmente máquina de lavar, secadora, refrigerador e iluminação.
# 9. `sub_metering_3`: Sub-divisão da energia No. 3 (em watt-hora por energia ativa). Corresponde ao aquecedor de água e arcondicionado.
# ### **IMPORTANDO PACOTES E CARREGANDO DATASET**
# + colab={} colab_type="code" id="luozcVKfoIBX"
# Importação dos pacotes
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import pylab
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist, pdist
import warnings
warnings.filterwarnings("ignore")
# %matplotlib inline
# + colab={} colab_type="code" id="ip_ooSlnpatl"
consumo_energia = pd.read_table('~/Mega/Portfolio/dataset/cluster/dataset_power_consumption.txt', delimiter=';')
# -
# ### **ANÁLISE EXPLORATÓRIA E MANIPULAÇÃO DE DADOS**
# > **1. Verificar um exemplo do conjunto de dados**
# + [markdown] colab_type="text" id="KWgFk6nHqlh3"
# Observar uma amostra contendo cinco linhas do dataset para saber como estão dispostos os dados.
#
# Em seguida analisar os tipos e a quantidade de dados carregados.
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="yZ55s9MopvvQ" outputId="07e4ea62-2138-4a33-a2b1-3950f5e742f4"
# Cinco amostras
consumo_energia.sample(5)
# + colab={"base_uri": "https://localhost:8080/", "height": 280} colab_type="code" id="YnJDpaeJtb8X" outputId="742479f6-9d0a-472a-ca38-19887590beb1"
# Tipos e quantidade de dados
consumo_energia.info(null_counts=True)
# + [markdown] colab_type="text" id="WhQFBMjiqzYB"
# **Observação 1**: Aparentemente, olhando apenas para as cinco amostras, notamos que não existem valores que se mostrem estranhos/formatados de forma irregular.
#
# **Observação 2**: São 2075259 linhas e 9 colunas, onde todos os atributos estão no formato 'object' *(salvo `['Sub_metering_3']` - tipo float).*
#
# **Observação 3**: O atributo `['Sub_metering_3']` possui valores missing pois a quantidade de valores non-null *(não nulos)* é inferior ao restante dos atributos.
#
# É importante saber que, para o processamento, os dados devem estar todos em formato numérico pois algoritmos de machine learning se dão melhor com esse formato.
# + [markdown] colab_type="text" id="LqUo-U5i2PyF"
# > **2. Missing Values**
#
# Encontrar valores NaN, sua proporção e tratar da forma mais adequada.
# + colab={"base_uri": "https://localhost:8080/", "height": 486} colab_type="code" id="ueBDdLKGsW7n" outputId="5ae6e3fa-2e9c-4780-a0d4-89392b57ce2d"
# Encontrar a quantidade de valores NaN
print('QUANTIDADE NAN')
print('--------------')
print(consumo_energia.isna().sum())
# Encontrar a proporção desses valores
print('\nPROPORÇÃO NAN')
print('-------------')
print(consumo_energia.isna().mean().sort_values(ascending=False)*100)
# + [markdown] colab_type="text" id="xm9CML_7s_dA"
# Encontramos *25979* registros com valores NaN no atributo `['Sub_metering_3']`, o que corresponde a *1,25%* do total de linhas.
#
# Vamos imprimir as linhas onde existem valores NaN:
# + colab={"base_uri": "https://localhost:8080/", "height": 424} colab_type="code" id="Lb17MvssstZD" outputId="f9d20851-21bf-40d3-bc5e-43564ea9d5f7"
# Linhas com valores NaN
consumo_energia[consumo_energia['Sub_metering_3'].isna()]
# + [markdown] colab_type="text" id="SYlU_yARxymV"
# Como observado, além dos valores NaN, apareceu o caracter '?' em diversos registros em quase todos os atributos - o que indica a mesma coisa de valor NaN. Vamos trocar esse caracter por NaN, assim podemos analisar melhor a quantidade de NaN no dataset completo.
# + colab={"base_uri": "https://localhost:8080/", "height": 205} colab_type="code" id="oSXMSH07yZkS" outputId="390c6f41-f580-4562-9594-c7fbe05a3f90"
# Trocar caracter '?' por NaN
consumo_energia.replace('?', np.nan, inplace=True)
# Proporção dos valores NaN
consumo_energia.isna().mean().sort_values(ascending=True)
# + [markdown] colab_type="text" id="VtPOeDL6zr4T"
# Há valores missing em todos os registros de cada atributo *(salvo `['Date', 'Time']`)*. Vamos removê-los.
# + colab={"base_uri": "https://localhost:8080/", "height": 280} colab_type="code" id="1yxUcP7w0jpa" outputId="3e8e6a6f-3f94-4fe1-c88c-c095d1580f52"
# Deletar todas as linhas onde NaN se faz presente.
consumo_energia.dropna(axis=0, how='any', inplace=True)
# Quantidade de valores missing
print('Quantidade de valores NaN')
print('-------------------------')
print(consumo_energia.isna().sum())
# Dimensões do dataset
print('\nShape', consumo_energia.shape)
# + [markdown] colab_type="text" id="0G4r6gb_09HC"
# Com isso podemos verificar que existem 2049280 linhas disponíveis para trabalharmos rumo à resolução do problema definido, mas...
# + [markdown] colab_type="text" id="y_v0RAoJ22in"
# > **3. Coletar 2% do conjunto de dados**
#
# Essa coleta percentual é devido ao peso computacional exigido e muitos de nós não detêm m'aquinas com grande capacidade de processamento. Esse 2% facilitará nossas vidas *(onde temos que ter, pelo menos, o mínimo de hardware ou utilização da computação em nuvem)* e teremos uma amostra significativa de dados. De agora em diante, será instanciado o objeto `power_consumption`.
# + colab={"base_uri": "https://localhost:8080/", "height": 55} colab_type="code" id="SkPKZc2G1eZY" outputId="664dd829-403b-48e3-b800-d5904f30f4a5"
# Coleta de 2% dos dados
power_consumption = consumo_energia.sample(frac=0.02, replace=False, random_state=42)
power_consumption.reset_index(inplace=True)
power_consumption.drop(columns='index', inplace=True)
# Dimensões dos dados segregados
print('SHAPE', power_consumption.shape)
# Proporcionalidade em relação aos dados originais
proportion = power_consumption.shape[0] / consumo_energia.shape[0]
print('PROPORTION: {0:.1f}%'.format(proportion*100))
# + [markdown] colab_type="text" id="A-w5EJGEDYFD"
# A proporção de 2% nos retornou uma quantidade de linhas igual a 40986. O suficiente para continuarmos o trabalho.
#
# Vamos rever as informações do dataset coletado. Vamos observar que os non-null estão todos iguais ao número de linhas. Então, sem valores missing.
# + colab={"base_uri": "https://localhost:8080/", "height": 280} colab_type="code" id="KZHCj73x6ydc" outputId="36c30092-69bd-48ff-edcb-22340779005c"
# Infos da amostra coletada
power_consumption.info(null_counts=True)
# -
# > **4. Remoção de atributos**
#
# Ok. Nesse momento devemos perceber que, para a resolução do problema, não necessitamos dos dados de `['Date', Time']`. Vamos removê-los. Em seguida devemos transformar os tipos de dados para valores float.
# + colab={"base_uri": "https://localhost:8080/", "height": 167} colab_type="code" id="22qXCPmIBnDe" outputId="d1f72a85-044a-4e89-d7a3-42cfc4d7156c"
# Remoção dos atributos 'Date' e 'Time'
power_consumption.drop(columns=['Date', 'Time'], inplace=True)
# Transformação tipo object em tipo float
power_consumption = power_consumption.astype('float64')
print(power_consumption.dtypes)
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="e0J7LoduF_d7" outputId="494bffb9-7f64-4796-b7fd-6b443425ad76"
# Primeiras cinco linhas
power_consumption.head()
# + [markdown] colab_type="text" id="QvS0YMsknPez"
# > **7. Estatísticas, Gráficos e Outliers**
# + [markdown] colab_type="text" id="NdUOwV1vHVaM"
# 1. Estatística
# 2. Histogramas de atributos Global Active Power, Reactive Power e Global Intensity
# 3. Boxplot dos atributos
# -
# > *7.a. Estatística básica*
# + [markdown] colab_type="text" id="Mz5J3EH6VBtX"
# Abaixo segue uma tabela estatística básica com `contagem`, `média`, `desvio-padrão`, `valor mínimo`, `valor máximo` e `quartis`.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 300} colab_type="code" id="LO22e--TOduo" outputId="02865c65-c379-446f-95c0-37632fc347bb"
power_consumption.describe().round(3)
# + [markdown] colab_type="text" id="YHrrzpeHmhnh"
# Da Estatística impressa acima, podemos observar alguns valores aparentemente fora do comum.
#
# Observe que os valores correspondentes a 75% é bem inferior, em alguns casos, aos valores máximos. Isso pode represensar um *outlier*.
#
# Para concluir melhor, uma representação gráfica é interessante. Porém se pararmos para pensar, os valores 0 nos atributos de `['Sub_metering']` podem não ser valores discrepantes, pode apenas ser que não tenha tais equipamentos englobados em cada `['Sub_metering']`.
#
# Relembrando,<br>
#
# * `['Sub_metering_1']` corresponde à cozinha (lava louças, fogão e microondas). Qual a casa que não tem pelo menos um fogão elétrico???? Nesse caso devem utilizar a lenha ou a gás??
#
# * `['Sub_metering_2']` corresponde à lavanderia (máquina de lavar, de secar, refrigerador e iluminação). Creio que alguém viveria sem lavanderia 'elétrica'!
#
# * `['Sub_metering_3']` corresponde ao aquecedor de água e arcondicionado. Com certeza existem lugares sem esses equipamentos!
#
# Mas vamos olhar uns gráficos para ver a distribuição/intensidade desses valores.
#
#
# -
# >*6.b. Histrogrrama e Density Plot*
# + colab={"base_uri": "https://localhost:8080/", "height": 606} colab_type="code" id="S6pNMH08K7TG" outputId="eafaad5e-8420-4dd0-920a-4acb963c946d"
# Histogramas / Density Plot
GAP = power_consumption.Global_active_power
GRP = power_consumption.Global_reactive_power
GI = power_consumption.Global_intensity
VOLT = power_consumption.Voltage
fig, axes = plt.subplots(2,2, figsize=(15,10))
sns.despine(left=True)
sns.distplot(GAP, hist=True, ax=axes[0,0], bins=20, axlabel='Global Active Power')
sns.distplot(GRP, hist=True, ax=axes[0,1], axlabel='Global Reactive Power')
sns.distplot(GI, hist=True, ax=axes[1,0], axlabel='Global Intensity')
sns.distplot(VOLT, hist=True, ax=axes[1,1], axlabel='Voltage')
plt.show()
# -
# > *6.c. Boxplot*
# + colab={"base_uri": "https://localhost:8080/", "height": 611} colab_type="code" id="3h9budGzc-S2" outputId="5d6c3909-db5d-4945-a134-75e137d7c493"
# BoxPlot 1
Globais = power_consumption[['Global_active_power', 'Global_reactive_power', 'Global_intensity']]
plt.figure(figsize=(10,10))
plt.title('BoxPlot of Globals Power and Intensity')
sns.set(style="whitegrid", color_codes=True)
sns.boxplot(data=Globais);
# + colab={"base_uri": "https://localhost:8080/", "height": 611} colab_type="code" id="dFFORkRRZ8wA" outputId="4b610ee5-85a0-4ebc-e77d-c93b932a3067"
# BoxPlot 2
Sub_meterings = power_consumption[['Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3']]
plt.figure(figsize=(10,10))
plt.title('BoxPlot of Sub Metering Consumption')
sns.set(style="whitegrid", color_codes=True)
sns.boxplot(data=Sub_meterings);
# + [markdown] colab_type="text" id="WWKvdqjzn83h"
# **Interessante**. Existem muitos outliers (esses pontos 'infinitos'). Vamos mantê-los para visualizarmos no gráfico.
# + [markdown] colab_type="text" id="sOwH05h-pl4L"
# ### **MODELAGEM**
#
# Para a resolução do negócio, será utilizado o modelo de machine learning **K-Means**.
#
# K-Means é um método de clusterização que utiliza a distância dos diversos pontos com o pontos-centro (centróide) como métrica para separar os dados em grupos. A quantidade de grupos, e seus centróides, é definido pelo parâmetro "k". Os grupos são formados pelos dados que apresentam a menor distância com um dos centros.
# + [markdown] colab_type="text" id="aX-nVLWgm7IK"
# Para definir o melhor "k", podemos utilizar o *elbow method* (método do cotovelo). Esse método irá testar vários clusters e encontrar a melhor quantidade de grupos.
#
# Mas primeiro...
# -
# > **7. Principal Component Analysis**
# + [markdown] colab_type="text" id="Oa1YsDj5RRQl"
# A aplicação do **PCA** (Principal Component Analysis) é muito comum quando se tem uma alta dimensinalidade, com um extensível número de atributos. O PCA pega o largo dataset e, através das dependência entre os atributos, reduz o tamanho pela compressão do dataset sem remover de fato quaisquer atributos. Vamos determinar 2 componentes, assim poderemos visualizar graficamente a disposição dos grupos.
# + colab={} colab_type="code" id="1-_V1GsVRRQh" outputId="2854e51a-438f-4d05-e7ac-48d36c2dca4c"
# Redução de dimensionalidade
pca = PCA(n_components=2, random_state=42, ).fit_transform(power_consumption)
pca
# -
# > **8. K-Means**
# > *8.a. Encontrando o melhor K com o Método Elbow*
# + [markdown] colab_type="text" id="ULJOTpfPpAw4"
# Inicialmente foi definido um intervalo para "k" de 1 a 9. Como demonstrado no gráfico abaixo, a partir de K = 1 a inércia cai bruscamente até o K = 3. Daí em diante a tendência cai com pouca variação.
# + colab={} colab_type="code" id="ppoPyj_URRQn" outputId="0b8e93b8-006f-4cae-d052-2a5221a3bc96"
# Encontrar o melhor K para aplicar ao algoritmo K-Means
inertia = []
for i in range(1,10):
kmeans = KMeans(n_clusters=i, random_state=42)
kmeans.fit(pca)
inertia.append((i,kmeans.inertia_,))
# Plotar gráfico
plt.figure(figsize=(8,6))
plt.plot([k[0] for k in inertia], [k[1] for k in inertia], marker='o')
plt.title('Finding the best K')
plt.xlabel('K-Value')
plt.ylabel('Inertia')
plt.show()
# -
# Podemos utilizar K = 3. Mas para determinando o melhor agrupamento vamos analisar as métricas de acordo com o **Silhouette Score**.
# + [markdown] colab_type="text" id="3w_IBJdGRRRB"
# > *8.b. Silhouette Score*
#
# O coeficiente da silhueta é calculado usando a **distância média intra-cluster** e a **distância média mais próxima do cluster para cada amostra** *(a amostra, nesse, não faz parte do cluster)*. O coeficiente para uma amostra é `(b - a) / máximo (a, b)`. Quanto mais próximo de 1, melhor. Quanto mais próximo de -1, pior *(indicando que uma amostra foi disposta em um cluster errado)*. O valor próximo a ZERO indica clusters sobrepostos.
# +
ranges = [2,3,4,5,6,7]
silhouettes = []
for k in ranges:
kmeans = KMeans(n_clusters=k, random_state=0).fit(pca)
silhouettes_k = silhouette_score(pca, kmeans.labels_,metric='euclidean')
silhouettes.append({'cluster': k, 'silhouette_score':round(silhouettes_k,4)})
k_silh = pd.DataFrame(silhouettes)
# Escolha do melhor k baseado no maior valor do silhouette score
k_max_silh = k_silh.silhouette_score.max()
k_by_silh = k_silh.cluster[k_silh.silhouette_score.argmax()]
print(k_silh)
print(f'\nA quantidade de k-grupos escolhido foi {k_by_silh}, que retornou um Silhouette Score de {k_max_silh}.')
# -
# > *8.c. Criando o modelo K-Means*4
# + colab={} colab_type="code" id="wTiz_QJ4RRQs" outputId="beef262c-5fa7-4336-8154-87881fe7befc"
# modelo
kmeans = KMeans(n_clusters=k_by_silh, random_state=42)
kmeans.fit(pca)
# + [markdown] colab_type="text" id="-Ob4jrzcRRQ7"
# ### **GRÁFICO**
#
# Agora irei plotar um scatterplot para visualizarmos os 4 clusters criados e seus centróides.
# + colab={} colab_type="code" id="xBlo-uooRRQ8" outputId="d2298507-6a0f-466f-eb26-d270763a42aa"
# Scatterplot da modelagem
y_kmeans = kmeans.predict(pca)
plt.scatter(pca[:, 0], pca[:, 1], c=y_kmeans, s=5, cmap='viridis')
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], marker='o',c='red', s=50)
plt.show()
# + [markdown] colab_type="text" id="TfyvxhLJRRRH"
# ### **ANALISANDO OS GRUPOS**
# + colab={} colab_type="code" id="xkpuypIURRRH"
power_consumption['Labels'] = kmeans.labels_
grupo_um = power_consumption[power_consumption.Labels == 0]
grupo_dois = power_consumption[power_consumption.Labels == 1]
grupo_tres = power_consumption[power_consumption.Labels == 2]
grupo_quatro = power_consumption[power_consumption.Labels == 3]
grupo_cinco = power_consumption[power_consumption.Labels == 4]
groups = [grupo_um, grupo_dois, grupo_tres, grupo_quatro, grupo_cinco]
# + colab={} colab_type="code" id="WGpeeDQ9RRRK" outputId="ea771f98-ed8d-4f63-8374-2a14d34049c3"
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10,6))
ax0, ax1, ax2, ax3, ax4, ax5 = axes.flatten()
ax0.hist(x=grupo_um.Global_active_power, bins=20)
ax0.legend(prop={'size': 10})
ax0.set_title('Global Active Power - Group 1')
ax1.hist(x=grupo_dois.Global_active_power, bins=20)
ax1.set_title('Global Active Power - Group 2')
ax2.hist(x=grupo_tres.Global_active_power, bins=20)
ax2.set_title('Global Active Power - Group 3')
ax3.hist(x=grupo_quatro.Global_active_power, bins=20)
ax3.set_title('Global Active Power - Group 4')
ax4.hist(x=grupo_quatro.Global_active_power, bins=20)
ax4.set_title('Global Active Power - Group 5')
ax5.hist(x=power_consumption.Labels, color= 'green')
ax5.set_title('Groups')
fig.tight_layout()
plt.show()
# + colab={} colab_type="code" id="H9pzxEvZRRRN" outputId="99a8a61c-5f1d-408c-9bce-3c9ef98c6571"
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10,6))
ax0, ax1, ax2, ax3, ax4, ax5 = axes.flatten()
ax0.hist(x=grupo_um.Global_reactive_power, bins=20)
ax0.legend(prop={'size': 10})
ax0.set_title('Global Reactive Power - Group 1')
ax1.hist(x=grupo_dois.Global_reactive_power, bins=20)
ax1.set_title('Global Reactive Power - Group 2')
ax2.hist(x=grupo_tres.Global_reactive_power, bins=20)
ax2.set_title('Global Reactive Power - Group 3')
ax3.hist(x=grupo_quatro.Global_reactive_power, bins=20)
ax3.set_title('Global Reactive Power - Group 4')
ax4.hist(x=grupo_quatro.Global_reactive_power, bins=20)
ax4.set_title('Global Reactive Power - Group 5')
ax5.hist(x=power_consumption.Labels, color= 'green')
ax5.set_title('Groups')
fig.tight_layout()
plt.show()
# + colab={} colab_type="code" id="hxF7i6PmRRRQ" outputId="63da7176-822c-4230-eb87-ae88345d224c"
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10,6))
ax0, ax1, ax2, ax3, ax4, ax5 = axes.flatten()
ax0.hist(x=grupo_um.Global_intensity, bins=20)
ax0.legend(prop={'size': 10})
ax0.set_title('Global Intensity Power - Group 1')
ax1.hist(x=grupo_dois.Global_intensity, bins=20)
ax1.set_title('Global Intensity Power - Group 2')
ax2.hist(x=grupo_tres.Global_intensity, bins=20)
ax2.set_title('Global Intensity Power - Group 3')
ax3.hist(x=grupo_quatro.Global_intensity, bins=20)
ax3.set_title('Global Intensity Power - Group 4')
ax4.hist(x=grupo_quatro.Global_intensity, bins=20)
ax4.set_title('Global Intensity Power - Group 5')
ax5.hist(x=power_consumption.Labels, color= 'green')
ax5.set_title('Groups')
fig.tight_layout()
plt.show()
# + colab={} colab_type="code" id="2Cq-oFDHRRRS" outputId="86c797d2-c677-473d-c7a4-1efcf2e0772d"
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10,6))
ax0, ax1, ax2, ax3, ax4, ax5 = axes.flatten()
ax0.hist(x=grupo_um.Sub_metering_1, bins=20)
ax0.legend(prop={'size': 10})
ax0.set_title('Kitchen Sub Metering - Group 1')
ax1.hist(x=grupo_dois.Sub_metering_1, bins=20)
ax1.set_title('Kitchen Sub Metering - Group 2')
ax2.hist(x=grupo_tres.Sub_metering_1, bins=20)
ax2.set_title('Kitchen Sub Metering - Group 3')
ax3.hist(x=grupo_quatro.Sub_metering_1, bins=20)
ax3.set_title('Kitchen Sub Metering - Group 4')
ax4.hist(x=grupo_quatro.Sub_metering_1, bins=20)
ax4.set_title('Kitchen Sub Metering - Group 5')
ax5.hist(x=power_consumption.Labels, color= 'green')
ax5.set_title('Groups')
fig.tight_layout()
plt.show()
# + colab={} colab_type="code" id="4DF7fUj4RRRV" outputId="ed74ece6-ab4b-4f8b-a697-b35d91674c3d"
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10,6))
ax0, ax1, ax2, ax3, ax4, ax5 = axes.flatten()
ax0.hist(x=grupo_um.Sub_metering_2, bins=20)
ax0.legend(prop={'size': 10})
ax0.set_title('Laundry Sub Metering - Group 1')
ax1.hist(x=grupo_dois.Sub_metering_2, bins=20)
ax1.set_title('Laundry Sub Metering - Group 2')
ax2.hist(x=grupo_tres.Sub_metering_2, bins=20)
ax2.set_title('Laundry Sub Metering - Group 3')
ax3.hist(x=grupo_quatro.Sub_metering_2, bins=20)
ax3.set_title('Laundry Sub Metering - Group 4')
ax4.hist(x=grupo_quatro.Sub_metering_2, bins=20)
ax4.set_title('Laundry Sub Metering - Group 5')
ax5.hist(x=power_consumption.Labels, color= 'green')
ax5.set_title('Groups')
fig.tight_layout()
plt.show()
# + colab={} colab_type="code" id="w_54uLYdRRRY" outputId="82956cde-3998-4f65-e704-81ed972e66c2"
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(10,6))
ax0, ax1, ax2, ax3, ax4, ax5 = axes.flatten()
ax0.hist(x=grupo_um.Sub_metering_3, bins=20)
ax0.legend(prop={'size': 10})
ax0.set_title('Heater/AC Sub Metering - Group 1')
ax1.hist(x=grupo_dois.Sub_metering_3, bins=20)
ax1.set_title('Heater/AC Sub Metering - Group 2')
ax2.hist(x=grupo_tres.Sub_metering_3, bins=20)
ax2.set_title('Heater/AC Sub Metering - Group 3')
ax3.hist(x=grupo_quatro.Sub_metering_3, bins=20)
ax3.set_title('Heater/AC Sub Metering - Group 4')
ax4.hist(x=grupo_quatro.Sub_metering_3, bins=20)
ax4.set_title('Heater/AC Sub Metering - Group 5')
ax5.hist(x=power_consumption.Labels, color= 'green')
ax5.set_title('Groups')
fig.tight_layout()
plt.show()
# -
power_consumption['Total_Consumed'] = power_consumption['Sub_metering_1'] + power_consumption['Sub_metering_2'] + power_consumption['Sub_metering_3']
print('MEAN OF TOTAL ENERGY CONSUMED BY GROUP')
print('--------------------------------------')
for i in range(0,len(power_consumption.Labels.unique())):
print(f'Mean of Group {i} =', round(power_consumption.Total_Consumed[power_consumption.Labels == i].mean(),2))
| cluster/energy_consumption_groups/Pt_BR_agrupando_clientes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from matplotlib.colors import PowerNorm, LogNorm
from scipy.spatial.distance import pdist
from matplotlib import animation
from IPython.display import HTML
# +
def get_params_posits(fname):
fname = Path(fname)
if not fname.exists():
raise RuntimeError("File not found")
if not fname.name.split('.')[-1] == 'analysis' and fname.name.split('.')[-2] == 'msd':
raise RuntimeError("File must be a MSD analysis file with the format "
"'<file name>.msd.analysis'")
params = pd.read_csv(fname, delim_whitespace=True, nrows=1)
posits = pd.read_csv(fname, index_col=0, delim_whitespace=True, skiprows=2)
n_fils = params.n_filaments[0]
fil_labels = [i for sub in [["fil{:04d}".format(i)] * 6 for i in range(n_fils)] for i in sub]
arrays = [fil_labels, ["x", "y", "z", "ux", "uy", "uz"] * n_fils]
columns = pd.MultiIndex.from_arrays(arrays, names=["filament", "coord"])
posits.columns = columns
return params, posits
def get_msd_vcf_from_posits(posits):
"""Get MSD and VCF from posits"""
time_len = posits.shape[0]//4
dr2 = np.zeros(time_len * params.n_filaments[0])
du2 = np.zeros(time_len * params.n_filaments[0])
start_times = range(0, posits.shape[0] - time_len, time_len//4)
for start in start_times:
pos = (posits.iloc[start:start+time_len] - posits.iloc[start]).stack('filament').iloc[:, 3:]
u = (posits.iloc[start:start+time_len] - posits.iloc[start]).stack('filament').iloc[:, :3]
dr2 = dr2 + np.sum(pos.values**2, axis=1)
du2 = du2 + np.sum(u.values**2, axis=1)
dr2 /= len(start_times)
du2 /= len(start_times)
pos = pd.DataFrame(dr2, columns=['dr2'], index=pos.index).unstack('filament')
u = pd.DataFrame(du2, columns=['du2'], index=u.index).unstack('filament')
pos.columns = list(range(pos.shape[1]))
u.columns = list(range(u.shape[1]))
pos_mean = pos.mean(axis=1)
pos_stderr = pos.std(axis=1)/np.sqrt(pos.shape[1])
u_mean = u.mean(axis=1)
u_stderr = u.std(axis=1)/np.sqrt(u.shape[1])
return (pos_mean, pos_stderr), (u_mean, u_stderr)
def run_msd_analysis(fname, late_time_percentage, show_plots=False, save_plots=False,
dist_lag_times=[10, 100, 1000], dist_xlims=None):
params, posits = get_params_posits(fname)
assert (late_time_percentage > 0 and late_time_percentage <= 1), (
"Late time percentage must be a value between 0 and 1"
)
posit_start = int(late_time_percentage * posits.shape[0])
msd, vcf = get_msd_vcf_from_posits(posits.iloc[posit_start:, :])
if save_plots or show_plots:
fig, ax = plt.subplots(1, 2, figsize=(14, 6))
time = msd[0].index - msd[0].index[0]
ax[0].plot(time, msd[0], label='MSD')
ax[0].fill_between(time, msd[0]-msd[1], msd[0]+msd[1], alpha=0.5, label='s.e.m.')
ax[1].plot(time, vcf[0], label='VCF')
ax[1].fill_between(time, vcf[0]-vcf[1], vcf[0]+vcf[1], alpha=0.5, label='s.e.m.')
ax[0].legend(loc='upper left', fontsize=15)
ax[1].legend(loc='upper left', fontsize=15)
ax[0].set_xlabel(r'$\tau$', fontsize=18)
ax[1].set_xlabel(r'$\tau$', fontsize=18)
ax[0].set_ylabel(r'$\langle (\mathbf{r}(0) - \mathbf{r}(t))^2 \rangle$', fontsize=18)
ax[1].set_ylabel(r'$\langle (\mathbf{u}(0) - \mathbf{u}(t))^2 \rangle$', fontsize=18)
fig.tight_layout()
ax[0].tick_params(labelsize=15)
ax[1].tick_params(labelsize=15)
if show_plots:
plt.show()
if save_plots:
fig.savefig(Path(fname.parent, fname.name + '.png'), dpi=200, bbox_inches='tight')
plt.close()
plot_lag_time_distributions(fname, posits, lag_times=dist_lag_times, save=save_plots,
show=show_plots, dist_xlims=dist_xlims)
return msd, vcf
def plot_lag_time_distributions(fname, posits, lag_times=[10, 100, 1000], save=False,
show=True, dist_xlims=None):
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
plot_lag_time_dists(posits, ax[0], lag_times=lag_times, dimension='x', xlims=dist_xlims)
plot_lag_time_dists(posits, ax[1], lag_times=lag_times, dimension='y', xlims=dist_xlims)
fig.tight_layout()
if show:
plt.show()
if save:
fig.savefig(Path(fname.parent, fname.name + '_lag_time_dists.png'), dpi=300, bbox_inches='tight')
plt.close()
def plot_lag_time_dists(posits, ax, lag_times=[10, 100, 1000], dimension='x', xlims=None):
if dimension == 'x':
index = 3
elif dimension == 'y':
index = 4
else:
raise RuntimeError("Dimension should be 'x' or 'y'")
if xlims is not None:
bins = np.linspace(xlims[0], xlims[1], 36)
else:
bins = 35
for T in lag_times:
ax.hist(posits.diff(periods=T).dropna().stack('filament').iloc[:, index].values,
bins=bins, density=True, histtype='step', linewidth=2, label=T)
ax.set_xlabel(r'$\Delta$' + dimension + r'($\tau$)', fontsize=18)
ax.set_ylabel(r'P($\Delta$' + dimension + r'($\tau$))', fontsize=18)
legend = ax.legend(loc='upper right', title=r'$\tau$', fontsize=15)
ax.tick_params(labelsize=15)
legend.get_title().set_fontsize('18')
def run_cluster_msd_analysis(fname, lifetime_min, show_plots=True, save_plots=False, drop_nans=True):
df = pd.read_csv(fname, delim_whitespace=True)
lifetimes = df.groupby('cluster_label').count().sort_values(by='time', ascending=False).time
long_lived_labels = lifetimes[lifetimes > lifetime_min].index
dr2_df = None
for label in long_lived_labels:
dr2_df = get_msd_vcf_from_cluster(df.loc[df['cluster_label'] == label], dr2_df)
if drop_nans:
dr2_df = dr2_df.dropna()
dr2_mean = dr2_df.mean(axis=1)
dr2_std = dr2_df.std(axis=1)/np.sqrt(dr2_df.shape[1])
if save_plots or show_plots:
fig = plt.figure(figsize=(6, 4))
ax = fig.gca()
time = dr2_df.index
ax.plot(time, dr2_mean, label='MSD')
ax.fill_between(time, dr2_mean-dr2_std, dr2_mean+dr2_std, alpha=0.5, label='s.e.m.')
ax.legend(loc='upper left', fontsize=15)
ax.set_xlabel(r'$\tau$', fontsize=18)
ax.set_ylabel(r'$\langle (\mathbf{r}(0) - \mathbf{r}(t))^2 \rangle$', fontsize=18)
fig.tight_layout()
ax.tick_params(labelsize=15)
ax.set_title(r"Filament cluster MSD, $N$ = {}, $n$ = {}".format(long_lived_labels.shape[0],
dr2_df.shape[1]), fontsize=20)
if show_plots:
plt.show()
if save_plots:
fig.savefig(Path(fname.parent, fname.name + '.msd.png'), dpi=200, bbox_inches='tight')
plt.close()
return dr2_df
def get_msd_vcf_from_cluster(cluster_df, dr2_df = None):
"""Get MSD and VCF from cluster posits"""
assert cluster_df.cluster_label.nunique() == 1, "Found multiple cluster labels in cluster dataframe"
time_len = cluster_df.shape[0]//4
if dr2_df is None:
dr2 = np.zeros(time_len)
else:
dr2 = np.zeros(dr2_df.shape[0])
dr2[time_len:] = np.nan
start_times = range(0, cluster_df.shape[0] - time_len, time_len//4)
posits = cluster_df.iloc[:, 3:5]
for i, start in enumerate(start_times):
pos = (posits.iloc[start:start+time_len] - posits.iloc[start])
dr2[:time_len] = np.sum(pos.values**2, axis=1)
if dr2_df is None:
dr2_df = pd.DataFrame(dr2, columns=[cluster_df.cluster_label.iloc[0]],
index=cluster_df.time.iloc[:time_len] - cluster_df.time.iloc[0])
else:
dr2_df['{}.{}'.format(cluster_df.cluster_label.iloc[0], i)] = dr2
return dr2_df
# -
lag_times = [25, 100, 400, 800, 1600, 3200]
fname = Path("ic_nodr_v020_filament_mt.msd.analysis")
msd, vcf = run_msd_analysis(fname, late_time_percentage=0.1,
save_plots=True, show_plots=True,
dist_lag_times=lag_times,
dist_xlims = (-15, 15))
fname = Path("ic_v020_filament_mt.msd.analysis")
msd, vcf = run_msd_analysis(fname, late_time_percentage=0.4,
save_plots=True, show_plots=True,
dist_lag_times=lag_times,
dist_xlims=(-15, 15))
fname = Path("ic_nodr_v037_filament_mt.msd.analysis")
msd, vcf = run_msd_analysis(fname, late_time_percentage=0.2,
save_plots=True, show_plots=True,
dist_lag_times=lag_times,
dist_xlims=None)
fname = Path("ic_v037_filament_mt.msd.analysis")
msd, vcf = run_msd_analysis(fname, late_time_percentage=0.4,
save_plots=True, show_plots=True,
dist_lag_times=lag_times,
dist_xlims=(-30, 30))
params, posits = get_params_posits(Path("ic_v037_filament_mt.msd.analysis"))
posits.head()
posits = posits.dropna()
posits = posits.iloc[posits.shape[0]//4:]
posits_only = posits.stack('filament').iloc[:, 3:5].unstack('filament').reorder_levels(
['filament', 'coord'], axis=1).sort_index(axis=1)
def get_lag_diff(posits, lag_time):
return posits.diff(periods=lag_time).dropna().iloc[1:].stack('filament')
lag_times = np.exp(np.linspace(0, 9, 40))
lag_times[0] = 0
lag_times = np.unique([int(t) for t in lag_times])
hists = np.array([np.histogram2d(diff.x, diff.y, bins=np.linspace(-20, 20, 100), density=True)[0]
for diff in
[get_lag_diff(posits_only, int(T)) for T in lag_times]])
font = {'family': 'DejaVu Sans Mono',
'color': 'black',
'weight': 'normal',
'size': 16,
}
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
cax = ax.imshow(hists[0], cmap=plt.cm.coolwarm, vmin=0, vmax=1, norm=LogNorm(), animated=True, origin='lower')
txt = ax.text(0.7, 0.9, r"$\tau$ = {:06.02f}".format(0), fontdict=font,transform=ax.transAxes,
bbox=dict(facecolor='white', alpha=0.8))
cbar = fig.colorbar(cax, shrink=0.8, )
ax.tick_params(labelsize=13)
ax.set_xticks(np.linspace(0, 98, 5))
ax.set_xticklabels([str(i) for i in np.linspace(-20, 20, 5)])
ax.set_yticks(np.linspace(0, 98, 5))
ax.set_yticklabels([str(i) for i in np.linspace(-20, 20, 5)])
ax.set_xlabel('x', fontsize=18)
ax.set_ylabel('y', fontsize=18)
ax.set_title('2D Autocorrelation Distribution Function', fontsize=20)
cbar.ax.tick_params(labelsize=14)
cbar.ax.set_title(r'$\rho$', fontsize=20)
times = posits.index - posits.index[0]
def animate(i):
cax.set_array(hists[i]);
txt.set_text(r"$\tau$ = {:06.02f}".format(times[int(lag_times[i])]))
ani = animation.FuncAnimation(
fig, animate, interval=100, frames=range(len(hists)))
ani.save("ic_v037_vh_self.mp4")
HTML(ani.to_html5_video())
params, posits = get_params_posits(fname)
pd.DataFrame().unstack()
posits.head().stack('filament').iloc[:, 3:5].unstack('time').reorder_levels(
['time', 'coord'], axis=1).sort_index(axis=1)
posits_only = posits.stack('filament').iloc[:, 3:5].unstack('filament').reorder_levels(
['filament', 'coord'], axis=1).sort_index(axis=1)
posits_by_time = posits_only.stack('filament').unstack('time').reorder_levels(
['time', 'coord'], axis=1).sort_index(axis=1)
xperiodic = 50
def euclidean_pbc_1d(u, v):
#x = u[0] - v[0]
#if x < 0:
# x = (x / xperiodic - x // xperiodic - 1) * xperiodic
#else:
# x = (x / xperiodic - x // xperiodic) * xperiodic
#return x
return u[0] - v[0]
def my_func(array, xperiodic):
N = array.shape[0]
result = np.zeros(int(N*(N-1)/2))
k = 0
for i in range(N-1):
for j in range(i+1, N):
x = array[i] - array[j]
if (x < 0):
result[k] = (x / xperiodic - x // xperiodic - 1) * xperiodic
else:
result[k] = (x / xperiodic - x // xperiodic) * xperiodic
k+=1
# +
# %%timeit -n1 -r1
bins = np.linspace(-15, 15, 100)
lag_times = range(10, 10000, 100)
x0 = pdist(posits_by_time.iloc[:, 0:2], euclidean_pbc_1d)
y0 = pdist(posits_by_time.iloc[:, 1:3], euclidean_pbc_1d)
mask = (abs(x0) > 0)
hist = np.histogram2d(x0[mask], y0[mask], bins=bins)[0]
for i in lag_times:
x0 = pdist(posits_by_time.iloc[:, i:i+2], euclidean_pbc_1d)
y0 = pdist(posits_by_time.iloc[:, i+1:i+3], euclidean_pbc_1d)
x0[x0 < 0] = (x0[x0 < 0] / xperiodic - x0[x0 < 0] // xperiodic) * xperiodic
x0[x0 > 0] = (x0[x0 > 0] / xperiodic - x0[x0 > 0] // xperiodic) * xperiodic
y0[y0 < 0] = (y0[y0 < 0] / xperiodic - y0[y0 < 0] // xperiodic) * xperiodic
y0[y0 > 0] = (y0[y0 > 0] / xperiodic - y0[y0 > 0] // xperiodic) * xperiodic
mask = (abs(x0) > 0)
hist += np.histogram2d(x0[mask], y0[mask], bins=bins)[0]
hist /= len(lag_times)+1
# -
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
cax = ax.imshow(hist, cmap=plt.cm.coolwarm, vmin=0, vmax=1, norm=PowerNorm(1), animated=True, origin='lower')
cbar = fig.colorbar(cax, shrink=0.8, )
ax.tick_params(labelsize=13)
ax.set_xticks(np.linspace(0, 98, 5))
ax.set_xticklabels([str(i) for i in np.linspace(-20, 20, 5)])
ax.set_yticks(np.linspace(0, 98, 5))
ax.set_yticklabels([str(i) for i in np.linspace(-20, 20, 5)])
ax.set_xlabel('x', fontsize=18)
ax.set_ylabel('y', fontsize=18)
cbar.ax.tick_params(labelsize=14)
cbar.ax.set_title(r'$\rho$', fontsize=20)
ax.set_title('2D Pair Distribution Function', fontsize=18)
#fig.savefig('ic_v037_pdf.png', dpi=300)
plt.show()
x0[abs(x0)>0].shape
y0[abs(y0)>0].shape
params
x0[x0>0].shape
x0.shape
fname = Path('ic_v037_filament_mt.van_hove_distinct.analysis')
vh_params = pd.read_csv(fname, delim_whitespace=True, nrows=1)
lag_times = pd.read_csv(fname, delim_whitespace=True, skiprows=3, header=None, nrows=1)
n_samples = pd.read_csv(fname, delim_whitespace=True, skiprows=5, header=None, nrows=1)
data = pd.read_csv(fname, delim_whitespace=True, skiprows=6, header=None)
n_bins_1d = vh_params.n_bins_1d.iloc[0]
n_frames = vh_params.n_frames.iloc[0]
lag_times = lag_times.iloc[0].values
n_samples = n_samples.iloc[0].values
n_fil = 232
data = data.values
data_distinct = []
for i in range(n_frames):
data_distinct.append(data[i*n_bins_1d:i*n_bins_1d+n_bins_1d])
data_distinct = np.array(data_distinct)
fname = Path('ic_v037_filament_mt.van_hove_self.analysis')
vh_params = pd.read_csv(fname, delim_whitespace=True, nrows=1)
lag_times = pd.read_csv(fname, delim_whitespace=True, skiprows=3, header=None, nrows=1)
n_samples = pd.read_csv(fname, delim_whitespace=True, skiprows=5, header=None, nrows=1)
data = pd.read_csv(fname, delim_whitespace=True, skiprows=6, header=None)
n_bins_1d = vh_params.n_bins_1d.iloc[0]
n_frames = vh_params.n_frames.iloc[0]
lag_times = lag_times.iloc[0].values
n_samples = n_samples.iloc[0].values
n_fil = 232
data = data.values
data_self = []
for i in range(n_frames):
data_self.append(data[i*n_bins_1d:i*n_bins_1d+n_bins_1d])
data_self = np.array(data_self)
data = data_self + data_distinct
data_F = []
for i in range(n_frames):
data_F.append(np.fft.fftshift(np.fft.fft2(data[i])))
data_F = np.absolute(data_F)
data_S = np.fft.fft(data_F, axis=0)
data_S = np.absolute(data_S)
data=data_S
vmax = (1.05*dat.max() if dat.max() < 0.9 else 1)
vmax = 5
offset = 0
font = {'family': 'DejaVu Sans Mono',
'color': 'black',
'weight': 'normal',
'size': 16,
}
fig = plt.figure(figsize=(8, 8))
ax = fig.gca()
cax = ax.imshow(data[0, offset:n_bins_1d-offset, offset:n_bins_1d-offset],
cmap=plt.cm.coolwarm, vmin=0, vmax=vmax, norm=PowerNorm(1),
animated=True, origin='lower')
txt = ax.text(0.7, 0.9, r"$\tau$ = {:06.02f}".format(0),
fontdict=font,transform=ax.transAxes,
bbox=dict(facecolor='white', alpha=0.8))
cbar = fig.colorbar(cax, shrink=0.8, )
ax.tick_params(labelsize=13)
ticks = np.linspace(-(n_bins_1d-2*offset-1)/4, (n_bins_1d-2*offset-1)/4, 5)
ax.set_xticks(np.linspace(0, n_bins_1d-2*offset-1, 5))
ax.set_xticklabels([str(i) for i in ticks])
ax.set_yticks(np.linspace(0, n_bins_1d-2*offset-1, 5))
ax.set_yticklabels([str(i) for i in ticks])
ax.set_xlabel('x', fontsize=18)
ax.set_ylabel('y', fontsize=18)
ax.set_title('2D Autocorrelation Distribution Function', fontsize=20)
cbar.ax.tick_params(labelsize=14)
cbar.ax.set_title(r'$\rho$', fontsize=20)
times = posits.index - posits.index[0]
def animate(i):
cax.set_array(data[i, offset:n_bins_1d-offset, offset:n_bins_1d-offset])
txt.set_text(r"$\tau$ = {:06.02f}".format(lag_times[i]))
ani = animation.FuncAnimation(
fig, animate, interval=100, frames=n_frames-2)
#ani.save("ic_v037_vh_self.mp4")
HTML(ani.to_html5_video())
| analysis/msd_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bokeh Visualizations Topics
# This notebook is for exploratory visual analysis using Bokeh of the topic models
# +
import pandas as pd
from bokeh.plotting import figure, output_file, show
from bokeh.models import ColumnDataSource, DataTable, TableColumn
from bokeh.models.tools import HoverTool
from bokeh.transform import factor_cmap, factor_mark, linear_cmap, jitter
from bokeh.io import output_notebook
import colorcet as cc
import numpy as np
import xlsxwriter
from bokeh.palettes import viridis
import csv
from pathlib import Path
# -
# ## Set Up Spaces and Load Data
# These set the working folder, name the topic files, and name the key file. It also sets up the way in which Bokeh will display the data, in this case it is outputting into this notebook. This run of the topic modeling created 50 topics from the invention keyword search.
# +
working_folder = Path.home() / ("syncthing/Dissertation/dissertation_data/dissertation_data_working_folder/topic_modeling")
topic_file = Path(working_folder / ('invention_by_year.csv'))
key_file = Path(working_folder / ('invention_keys_filtered.csv'))
output_notebook()
topics = pd.read_csv(topic_file)
keys = pd.read_csv(key_file)
inv_mentions = topics.groupby("year")["filename"].count().reset_index(name="count")
#keys_list = keys['topic'].tolist()
# -
# ## Data Tables
# This is a representation of data tables from the dataset.
# +
table_source = ColumnDataSource(inv_mentions)
table = figure(width=720, x_minor_ticks=2)
colors=viridis(10)
color_map = linear_cmap(field_name='year', palette=colors, low=1990, high=1999)
table.vbar(x='year', top='count',source=table_source, width=0.70, color=color_map)
table.title.text = 'Documents Containing "Invention" per year'
table.xaxis.axis_label = 'Year'
table.yaxis.axis_label = 'Article Count'
table.xaxis[0].ticker.desired_num_ticks = 10
table.xaxis[0].ticker.num_minor_ticks = 0
hover = HoverTool()
hover.tooltips=[
('Year', '@year'),
('Count', '@count')
]
table.add_tools(hover)
show(table)
# -
# ## Prepare data for visualizations
# - This block makes a list of the entires within the dataframe available to Bokeh for visualizations
# - This block removes all low significance match values from the topic models (less than 5%)
# +
column_list=['1','2','9','10','11','12','13','14','16','17','22','25','26','30','38','40','42','45','47']
#column_list = []
#for i in range(50):
# i = str(i)
# column_list.append(i)
for column in column_list:
values = topics[column].values
mask = values < 0.05
values[mask] = np.nan
topics[column] = values
# -
# This block groups the means together an exports an Excel file which contains the means, minumum, and maximum weight values for each topic for each year
# +
column_list.append('year')
topics_min = topics[column_list].groupby(['year']).min()
topics_max = topics[column_list].groupby(['year']).max()
topics_mean = topics[column_list].groupby(['year']).mean()
output_excel_file = Path(working_folder / ('invention_filtered_topics.xlsx'))
writer = pd.ExcelWriter(output_excel_file, engine = 'xlsxwriter')
topics_mean.to_excel(writer, sheet_name="Mean")
topics_min.to_excel(writer, sheet_name="Min")
topics_max.to_excel(writer, sheet_name="Max")
writer.save()
column_list.remove('year')
# -
# These next two blocks set up the scatter plot and tabular data
topics = topics.groupby(['year']).mean()
topics = topics.reset_index()
source = ColumnDataSource(topics)
# +
keys_source = ColumnDataSource(keys)
columns = [
TableColumn(field="key", title="Key"),
TableColumn(field="interpretation", title="Subjective Reading"),
TableColumn(field="topic", title="Topics"),
]
data_table = DataTable(source=keys_source, index_position=None, columns=columns, autosize_mode='fit_viewport')
# -
# ## Visualizations
# Below are interactive visualizations from the dataset
# +
p = figure(output_backend="webgl", width=800, height=1000)
color = cc.glasbey
i = 0
for column in column_list:
x=jitter('year', 0.5)
p.scatter(x=x, y=column, source=source, color=color[i], size=10, name=column, muted_color=color[int(column)], muted_alpha=0.2, legend_label=column)
p.line(x=x, y=column, source=source, color=color[i], line_width=2, name=column, muted_color=color[int(column)], muted_alpha=0.2, legend_label=column)
i +=1
p.xaxis.axis_label = "Year Published"
p.yaxis.axis_label = "Topic Mean"
p.xaxis[0].ticker.desired_num_ticks = 10
p.xaxis[0].ticker.num_minor_ticks = 0
p.title = "Invention Topic Means per Year > .05"
p.legend.location = "right"
p.legend.click_policy="mute"
legend = p.legend[0]
hover = HoverTool()
hover.tooltips=[
('Year', '@year'),
('Topic Key', '$name'),
('Topic Match', '@$name{0.0000}')
]
p.add_tools(hover)
p.add_layout(legend, 'right')
output_graph = Path(working_folder / ('invetion_techne_visualization.html'))
output_file(output_graph)
show(p)
output_table = Path(working_folder / ('invention_techne_datatable.html'))
output_file(output_table)
show(data_table)
# -
| notebooks/bokeh visualizations- invention.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:s2s-future-dragonstone]
# language: python
# name: conda-env-s2s-future-dragonstone-py
# ---
# # Modeling Source-to-Sink systems using FastScape: 10. Cyclic variations in tectonic flux
# 
import xsimlab as xs
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
#plt.style.use('dark_background')
# %load_ext xsimlab.ipython
import hvplot.xarray
# In this last exercise, we compute the response of the basin to changes in tectonic flux in the source area that we model using a variable uplift function.
# +
from fastscape.models import marine_model
from fastscape.processes import (BlockUplift)
transit_model = (marine_model.
drop_processes('diffusion').
drop_processes('init_topography').
drop_processes('uplift').
drop_processes('marine').
drop_processes('sea').
update_processes({'uplift': BlockUplift}))
#transit_model.visualize(show_inputs=True)
# +
xl = 100e3
yl = 100e3
nx = 101
ny = 101
X = np.linspace(0,xl,nx)
Y = np.linspace(0,yl,ny)
x,y = np.meshgrid(X, Y)
u0 = 3e-2
u1 = -1e-4
u = np.zeros((ny,nx))
ylim = 2*yl/(nx-1)
u = np.where(y<ylim, u0, u1*(yl-y)/(yl-ylim))
# +
nstep = 201 # total number of steps
neq = 101 # number of steps to reach steady-state
teq = 1e7 # time to reach steady-state
period = 1e6 # period of climatic forcing
tfinal = teq + 5*period # final time
# Here we build the time array (note that not all time steps are of the same length)
tim1 = np.linspace(0,teq,101)
tim2 = np.linspace(teq + period/10, tfinal, 100)
tim = np.concatenate((tim1,tim2))
# build precipitation array
uplift = np.where(tim>teq, 1 + 0.5*np.sin(2*np.pi*(tim-teq)/period), 1)
u_tim_space = np.broadcast_to(u,(len(tim),ny,nx)).copy().transpose()
u_tim_space = np.where(u_tim_space>0,u_tim_space*uplift,u_tim_space).transpose()
u_xr = xr.DataArray(data=u_tim_space, dims=['time','y', 'x'])
fig, ax = plt.subplots(nrows = 1, ncols = 1, sharex=False, sharey=True, figsize=(12,7))
ax.plot(tim, uplift)
# +
# # %create_setup transit_model --default --verbose
import xsimlab as xs
ds_in = xs.create_setup(
model=transit_model,
clocks={'time': tim,
'strati': tim[::10]},
master_clock='time',
input_vars={
# nb. of grid nodes in (y, x)
'grid__shape': [ny,nx],
# total grid length in (y, x)
'grid__length': [yl,xl],
# node status at borders
'boundary__status': ['looped','looped','fixed_value','core'],
'uplift__rate': u_xr,
# MFD partioner slope exponent
'flow__slope_exp': 1,
# drainage area exponent
'spl__area_exp': 0.4,
# slope exponent
'spl__slope_exp': 1,
# bedrock channel incision coefficient
'spl__k_coef_bedrock': 1e-5,
# soil (sediment) channel incision coefficient
'spl__k_coef_soil': 1e-5,
# detached bedrock transport/deposition coefficient
'spl__g_coef_bedrock': 1,
# soil (sediment) transport/deposition coefficient
'spl__g_coef_soil': 1,
# surface topography elevation
'topography__elevation': np.random.random((ny,nx)),
# horizon freezing (deactivation) time
'strati__freeze_time': tim,
},
output_vars={'topography__elevation': 'time',
'drainage__area': 'time',
'strati__elevation': 'strati'}
)
# -
with xs.monitoring.ProgressBar():
ds_out = ds_in.xsimlab.run(model=transit_model)
# +
from ipyfastscape import TopoViz3d
app = TopoViz3d(ds_out, canvas_height=600, time_dim="time")
app.components['background_color'].set_color('lightgray')
app.components['vertical_exaggeration'].set_factor(5)
app.components['timestepper'].go_to_time(ds_out.time[99])
app.show()
# +
fig, ax = plt.subplots(figsize=(12,8))
nout = 101
for iout in range(nout-1, -1, -1):
ds_out.strati__elevation.isel(strati=-1).isel(horizon=iout).sel(x=xl/2)[ds_out.y>ylim].plot()
# +
fig, ax = plt.subplots(figsize=(12,8))
nout = 101
for iout in range(nout-1, -1, -1):
ds_out.strati__elevation.isel(strati=-1).isel(horizon=iout).sel(y=ylim*3).plot()
# +
nstep = len(ds_out.time)
flux = [0]
sumtop0 = ds_out.topography__elevation.isel(time=0).where(ds_out.y>=ylim).sum()
for step in range(1,nstep):
sumtop = ds_out.topography__elevation.isel(time=step).where(ds_out.y>=ylim).sum()
flux.append(
(sumtop0 - sumtop)/
(ds_out.time.values[step] - ds_out.time.values[step-1])
)
sumtop0 = sumtop
total_area = ds_out.grid__shape[0].values*ds_out.grid__shape[1].values
flux0 = ds_out.uplift__rate.mean().values*total_area
flux = flux/flux0
# +
fig, ax = plt.subplots(nrows = 1, ncols = 1, sharex=False, sharey=True, figsize=(12,7))
ax.plot(tim, flux, label='flux')
ax.plot(tim, uplift, label='precip')
ax.legend()
# +
mid = 101
amp_flux = flux[mid:].max() - flux[mid:].min()
amp_forcing = uplift[mid:].max() - uplift[mid:].min()
print('forcing:',amp_forcing,'response:', amp_flux)
# -
print('time lag:',(tim[np.argmax(uplift[180:])+180] - tim[np.argmax(flux[180:])+180])/period)
| notebooks/BasinTransit/FastScape_S2S_Transit_5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 (wustl)
# language: python
# name: wustl
# ---
# # T81-558: Applications of Deep Neural Networks
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
#
# **Module 1 Assignment: How to Submit an Assignment**
#
# **Student Name: <NAME>**
# # Assignment Instructions
#
# Assignments are submitted using the **submit** function that is defined earlier in this document. When you submit an assignment, you are sending both your source code and data. Your data will automatically be checked and you will be informed
# of how closely your data matches up with my solution file. You are allowed to submit as many times as you like, so if you see some issues with your first submission, you are allowed to make changes and resubmit. You may resubmit as many times as you like, only your final submission will be counted towards your grade.
#
# When you first signed up for the course you were emailed a student key. You can see a sample key below. If you use this key, you will get an error. It is not a current key. **Use YOUR student key, that I provided in email.**
#
# You must also provide a filename and assignment number. The filename is simply your source file that you wish to submit. Your data is a Pandas dataframe.
#
# **Assignment 1 is very easy!** To complete assignment one, all you have to do is add your student key and make sure that the **file** variable contains the path to your source file. Your source file will most likly end in **.pynb** if you are using a juputer notebook; however, it might also end in **.py** if you are simply using a Python script.
#
# Run the code below, and if you are successful, you should see something similar to:
#
# ```
# Success: Submitted assignment 1 for jheaton:
# You have submitted this assignment 2 times. (this is fine)
# No warnings on your data. You will probably do well, but no guarantee. :-)
# ```
#
# If there is an issue with your data, you will get a warning.
#
#
# **Common Problem #1: Bad student key**
#
# If you use an invalid student key, you will see:
#
# ```
# Failure: {"message":"Forbidden"}
# ```
#
# You should also make sure that **_class#** appears somewhere in your filename. For example, for assignment 1, you should have **_class1** somewhere in your filename. If not, you will get an error. This is a check to make sure you do not submit the wrong assignment, with the wrong file. If you do have a mismatch, you will get an error such as:
#
#
# **Common Problem #2: Must have class1 (or other number) as part of the filename**
# ```
# Exception: _class1 must be part of the filename.
# ```
#
# The following video covers assignment submission: [assignment submission video](http://www.yahoo.com).
#
# **Common Problem #3: Can't find source file**
#
# You might get an error similar to this:
#
# ```
# FileNotFoundError: [Errno 2] No such file or directory: '/Users/jeffh/projects/t81_558_deep_learning/t81_558_class1_intro_python.ipynb'
# ```
#
# This means your **file** path is wrong. Make sure the path matches where your file actually is at. See my hints below in the comments for paths in different environments.
#
# **Common Problem #4: ??? **
#
# If you run into a problem not listed here, just let me know.
# # Helpful Functions
#
# You will see these at the top of every module and assignment. These are simply a set of reusable functions that we will make use of. Each of them will be explained as the semester progresses. They are explained in greater detail as the course progresses. Class 4 contains a complete overview of these functions.
# +
import base64
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from sklearn import preprocessing
# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)
def encode_text_dummy(df, name):
dummies = pd.get_dummies(df[name])
for x in dummies.columns:
dummy_name = f"{name}-{x}"
df[dummy_name] = dummies[x]
df.drop(name, axis=1, inplace=True)
# Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1
# at every location where the original column (name) matches each of the target_values. One column is added for
# each target value.
def encode_text_single_dummy(df, name, target_values):
for tv in target_values:
l = list(df[name].astype(str))
l = [1 if str(x) == str(tv) else 0 for x in l]
name2 = f"{name}-{tv}"
df[name2] = l
# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).
def encode_text_index(df, name):
le = preprocessing.LabelEncoder()
df[name] = le.fit_transform(df[name])
return le.classes_
# Encode a numeric column as zscores
def encode_numeric_zscore(df, name, mean=None, sd=None):
if mean is None:
mean = df[name].mean()
if sd is None:
sd = df[name].std()
df[name] = (df[name] - mean) / sd
# Convert all missing values in the specified column to the median
def missing_median(df, name):
med = df[name].median()
df[name] = df[name].fillna(med)
# Convert all missing values in the specified column to the default
def missing_default(df, name, default_value):
df[name] = df[name].fillna(default_value)
# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs
def to_xy(df, target):
result = []
for x in df.columns:
if x != target:
result.append(x)
# find out the type of the target column. Is it really this hard? :(
target_type = df[target].dtypes
target_type = target_type[0] if hasattr(
target_type, '__iter__') else target_type
# Encode to int for classification, float otherwise. TensorFlow likes 32 bits.
if target_type in (np.int64, np.int32):
# Classification
dummies = pd.get_dummies(df[target])
return df[result].values.astype(np.float32), dummies.values.astype(np.float32)
# Regression
return df[result].values.astype(np.float32), df[[target]].values.astype(np.float32)
# Nicely formatted time string
def hms_string(sec_elapsed):
h = int(sec_elapsed / (60 * 60))
m = int((sec_elapsed % (60 * 60)) / 60)
s = sec_elapsed % 60
return f"{h}:{m:>02}:{s:>05.2f}"
# Regression chart.
def chart_regression(pred, y, sort=True):
t = pd.DataFrame({'pred': pred, 'y': y.flatten()})
if sort:
t.sort_values(by=['y'], inplace=True)
plt.plot(t['y'].tolist(), label='expected')
plt.plot(t['pred'].tolist(), label='prediction')
plt.ylabel('output')
plt.legend()
plt.show()
# Remove all rows where the specified column is +/- sd standard deviations
def remove_outliers(df, name, sd):
drop_rows = df.index[(np.abs(df[name] - df[name].mean())
>= (sd * df[name].std()))]
df.drop(drop_rows, axis=0, inplace=True)
# Encode a column to a range between normalized_low and normalized_high.
def encode_numeric_range(df, name, normalized_low=-1, normalized_high=1,
data_low=None, data_high=None):
if data_low is None:
data_low = min(df[name])
data_high = max(df[name])
df[name] = ((df[name] - data_low) / (data_high - data_low)) \
* (normalized_high - normalized_low) + normalized_low
# This function submits an assignment. You can submit an assignment as much as you like, only the final
# submission counts. The paramaters are as follows:
# data - Pandas dataframe output.
# key - Your student key that was emailed to you.
# no - The assignment class number, should be 1 through 1.
# source_file - The full path to your Python or IPYNB file. This must have "_class1" as part of its name.
# . The number must match your assignment number. For example "_class2" for class assignment #2.
def submit(data,key,no,source_file=None):
if source_file is None and '__file__' not in globals(): raise Exception('Must specify a filename when a Jupyter notebook.')
if source_file is None: source_file = __file__
suffix = '_class{}'.format(no)
if suffix not in source_file: raise Exception('{} must be part of the filename.'.format(suffix))
with open(source_file, "rb") as image_file:
encoded_python = base64.b64encode(image_file.read()).decode('ascii')
ext = os.path.splitext(source_file)[-1].lower()
if ext not in ['.ipynb','.py']: raise Exception("Source file is {} must be .py or .ipynb".format(ext))
r = requests.post("https://api.heatonresearch.com/assignment-submit",
headers={'x-api-key':key}, json={'csv':base64.b64encode(data.to_csv(index=False).encode('ascii')).decode("ascii"),
'assignment': no, 'ext':ext, 'py':encoded_python})
if r.status_code == 200:
print("Success: {}".format(r.text))
else: print("Failure: {}".format(r.text))
# -
# # Assignment #1 Sample Code
#
# For assignment #1, you only must change two things. The key must be modified to be your key and the file path most be modified to be the path to your local file. Once you have that, just run it and your assignment is submitted.
# +
# This is your student key that I emailed to you at the beginnning of the semester.
key = "<KEY>" # This is an example key and will not work.
# You must also identify your source file. (modify for your local setup)
# file='/resources/t81_558_deep_learning/assignment_yourname_class1.ipynb' # IBM Data Science Workbench
#file='C:\\Users\\jeffh\\projects\\t81_558_deep_learning\\assignments\\assignment_yourname_class1.ipynb' # Windows
file='/Users/jheaton/projects/t81_558_deep_learning/assignments/assignment_yourname_class1.ipynb' # Mac/Linux
df = pd.DataFrame({'a' : [0, 0, 0, 1], 'b' : [0, 1, 0, 1], 'c' : [0, 1, 1, 0]})
submit(source_file=file,data=df,key=key,no=1)
# -
# # Checking Your Submission
#
# You can always double check to make sure your submission actually happened. The following utility code will help with that.
# +
import requests
import pandas as pd
import base64
import os
def list_submits(key):
r = requests.post("https://api.heatonresearch.com/assignment-submit",
headers={'x-api-key': key},
json={})
if r.status_code == 200:
print("Success: \n{}".format(r.text))
else:
print("Failure: {}".format(r.text))
def display_submit(key,no):
r = requests.post("https://api.heatonresearch.com/assignment-submit",
headers={'x-api-key': key},
json={'assignment':no})
if r.status_code == 200:
print("Success: \n{}".format(r.text))
else:
print("Failure: {}".format(r.text))
# +
# Show a listing of all submitted assignments.
key = "<KEY>"
list_submits(key)
# +
# Show one assignment, by number.
display_submit(key,2)
# -
| assignments/assignment_yourname_class1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
x=input();y=input()
print("".join(["1"if x[i]==y[i]=="1"else"0"for i in range(len(x))]))
a=input();print("*"*(len(a)+4)+"\n"+"*",a,"*"+"\n"+"*"*(len(a)+4))
n,r=map(int,input().split())
print(*[r**i for i in range(n)])
# +
## chemical reaction program
def gcd(a, b):
if b == 0: return a
else: return gcd(b, a % b)
element1, valency1 = input().split(); valency1 = int(valency1)
element2, valency2 = input().split(); valency2 = int(valency2)
if valency2 * valency1 >= 0:
print("IMPOSSIBLE")
else:
valency1 = abs(valency1); o1 = valency1
valency2 = abs(valency2); o2 = valency2
valency1 = int(valency1 / gcd(o1, o2))
valency2 = int(valency2 / gcd(o1, o2))
if valency1 == 1 and valency2 != 1: print(element1 + str(valency2) + element2)
elif valency2 == 1 and valency1 != 1: print(element1 + element2 + str(valency1))
elif valency1 == 1 and valency2 == 1: print((element1 + element2))
else: print(element1 + str(valency2) + element2 + str(valency1))
# -
| Codingame.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MindSpore-1.1.1
# language: python
# name: mindspore-1.1.1
# ---
# # Implementing Simple Linear Function Fitting
#
# Author: [<NAME>](https://github.com/helloyesterday) Editor: [Mingfu Lv](https://gitee.com/lvmingfu)
#
# `Linux` `Windows` `Ascend` `GPU` `CPU` `Whole Process` `Beginner` `Intermediate` `Expert`
#
# [](https://gitee.com/mindspore/docs/blob/master/docs/mindspore/programming_guide/source_en/quick_start/linear_regression.ipynb) [](https://mindspore-website.obs.cn-north-4.myhuaweicloud.com/notebook/master/programming_guide/en/mindspore_linear_regression.ipynb) [](https://authoring-modelarts-cnnorth4.huaweicloud.com/console/lab?share-url-b64=aHR0cHM6Ly9taW5kc3BvcmUtd2Vic2l0ZS5vYnMuY24tbm9ydG<KEY>ZXNzaW9uLmlweW5i&imageid=65f636a0-56cf-49df-b941-7d2a07ba8c8c)
# ## Overview
#
# Regression algorithms usually use a series of properties to predict a value, and the predicted values are consecutive. For example, the price of a house is predicted based on some given feature data of the house, such as area and the number of bedrooms; or future temperature conditions are predicted by using the temperature change data and satellite cloud images in the last week. If the actual price of the house is CNY5 million, and the value predicted through regression analysis is CNY4.99 million, the regression analysis is considered accurate. For machine learning problems, common regression analysis includes linear regression, polynomial regression, and logistic regression. This example describes the linear regression algorithms and how to use MindSpore to perform linear regression AI training.
#
# The whole process is as follows:
#
# 1. Generate datasets.
# 2. Define a training network.
# 3. Define and associate the forward and backward propagation networks.
# 4. Prepare for fitting process visualization.
# 5. Perform training.
#
# > This document is applicable to CPU, GPU and Ascend environments. The source code address of this example: <https://gitee.com/mindspore/docs/blob/master/docs/sample_code/linear_regression.py>.
#
# ## Environment Preparation
#
# Complete MindSpore running configuration.
# +
from mindspore import context
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
# -
# `GRAPH_MODE`: graph mode.
#
# `device_target`: sets the MindSpore training hardware to CPU.
#
# > Third-party support package: `matplotlib`. If this package is not installed, run the `pip install matplotlib` command to install it first.
#
# ## Generating Datasets
#
# ### Defining the Dataset Generation Function
#
# `get_data` is used to generate training and test datasets. Since linear data is fitted, the required training datasets should be randomly distributed around the objective function. Assume that the objective function to be fitted is $f(x)=2x+3$. $f(x)=2x+3+noise$ is used to generate training datasets, and `noise` is a random value that complies with standard normal distribution rules.
# +
import numpy as np
def get_data(num, w=2.0, b=3.0):
for _ in range(num):
x = np.random.uniform(-10.0, 10.0)
noise = np.random.normal(0, 1)
y = x * w + b + noise
yield np.array([x]).astype(np.float32), np.array([y]).astype(np.float32)
# -
# Use `get_data` to generate 50 groups of test data and visualize them.
# +
import matplotlib.pyplot as plt
eval_data = list(get_data(50))
x_target_label = np.array([-10, 10, 0.1])
y_target_label = x_target_label * 2 + 3
x_eval_label,y_eval_label = zip(*eval_data)
plt.scatter(x_eval_label, y_eval_label, color="red", s=5)
plt.plot(x_target_label, y_target_label, color="green")
plt.title("Eval data")
plt.show()
# -
# In the preceding figure, the green line indicates the objective function, and the red points indicate the verification data `eval_data`.
#
# ### Defining the Data Argumentation Function
#
# Use the MindSpore data conversion function `GeneratorDataset` to convert the data type to that suitable for MindSpore training, and then use `batch` and `repeat` to perform data argumentation. The operation is described as follows:
#
# - `ds.GeneratorDataset`: converts the generated data into a MindSpore dataset and saves the x and y values of the generated data to arrays of `data` and `label`.
# - `batch`: combines `batch_size` pieces of data into a batch.
# - `repeat`: multiplies the number of datasets.
# +
from mindspore import dataset as ds
def create_dataset(num_data, batch_size=16, repeat_size=1):
input_data = ds.GeneratorDataset(list(get_data(num_data)), column_names=['data','label'])
input_data = input_data.batch(batch_size)
input_data = input_data.repeat(repeat_size)
return input_data
# -
# Use the dataset argumentation function to generate training data and view the training data format.
# +
data_number = 1600
batch_number = 16
repeat_number = 1
ds_train = create_dataset(data_number, batch_size=batch_number, repeat_size=repeat_number)
print("The dataset size of ds_train:", ds_train.get_dataset_size())
dict_datasets = next(ds_train.create_dict_iterator())
print(dict_datasets.keys())
print("The x label value shape:", dict_datasets["data"].shape)
print("The y label value shape:", dict_datasets["label"].shape)
# -
# Use the defined `create_dataset` to perform argumentation on the generated 1600 data records and set them into 100 datasets with the shape of 16 x 1.
#
# ## Defining the Training Network
#
# In MindSpore, use `nn.Dense` to generate a linear function model of single data input and single data output.
#
# $$f(x)=wx+b\tag{1}$$
#
# Use the Normal operator to randomly initialize the weights $w$ and $b$.
# +
from mindspore.common.initializer import Normal
from mindspore import nn
class LinearNet(nn.Cell):
def __init__(self):
super(LinearNet, self).__init__()
self.fc = nn.Dense(1, 1, Normal(0.02), Normal(0.02))
def construct(self, x):
x = self.fc(x)
return x
# -
# Call the network to view the initialized model parameters.
net = LinearNet()
model_params = net.trainable_params()
for param in model_params:
print(param, param.asnumpy())
# After initializing the network model, visualize the initialized network function and training dataset to understand the model function before fitting.
# +
from mindspore import Tensor
x_model_label = np.array([-10, 10, 0.1])
y_model_label = (x_model_label * Tensor(model_params[0]).asnumpy()[0][0] +
Tensor(model_params[1]).asnumpy()[0])
plt.axis([-10, 10, -20, 25])
plt.scatter(x_eval_label, y_eval_label, color="red", s=5)
plt.plot(x_model_label, y_model_label, color="blue")
plt.plot(x_target_label, y_target_label, color="green")
plt.show()
# -
# As shown in the preceding figure, the initialized model function in blue differs greatly from the objective function in green.
#
# ## Defining and Associating the Forward and Backward Propagation Networks
#
# Define the loss function of the model. The mean squared error (MSE) method is used to determine the fitting effect. The smaller the MSE value difference, the better the fitting effect. The loss function formula is as follows:
#
# $$J(w)=\frac{1}{2m}\sum_{i=1}^m(h(x_i)-y^{(i)})^2\tag{2}$$
#
# Assuming that the $i$th data record in the training data is $(x_i,y^{(i)})$, parameters in formula 2 are described as follows:
#
# - $J(w)$ specifies the loss value.
#
# - $m$ specifies the amount of sample data. In this example, the value of $m$ is `batch_number`.
#
# - $h(x_i)$ is a predicted value obtained after the $x_i$ value of the $i$th data record is substituted into the model network (formula 1).
#
# - $y^{(i)}$ is the $y^{(i)}$ value (label value) of the $i$th data record.
#
# ### Defining the Forward Propagation Network
#
# A forward propagation network consists of two parts:
#
# 1. Bring parameters into the model network to obtain the predicted value.
# 2. Use the predicted value and training data to compute the loss value.
#
# The following method is used in MindSpore:
net = LinearNet()
net_loss = nn.loss.MSELoss()
# ### Defining the Backward Propagation Network
#
# The objective of the backward propagation network is to continuously change the weight value to obtain the minimum loss value. Generally, the weight update formula is used in the linear network:
#
# $$w_{t}=w_{t-1}-\alpha\frac{\partial{J(w_{t-1})}}{\partial{w}}\tag{3}$$
#
# Parameters in formula 3 are described as follows:
#
# - $w_{t}$ indicates the weight after training steps.
# - $w_{t-1}$ indicates the weight before training steps.
# - $\alpha$ indicates the learning rate.
# - $\frac{\partial{J(w_{t-1}\ )}}{\partial{w}}$ is the differentiation of the loss function to the weight $w_{t-1}$.
#
# After all weight values in the function are updated, transfer the values to the model function. This process is the backward propagation. To implement this process, the optimizer function in MindSpore is required.
opt = nn.Momentum(net.trainable_params(), learning_rate=0.005, momentum=0.9)
# ### Associating the Forward and Backward Propagation Networks
#
# After forward propagation and backward propagation are defined, call the `Model` function in MindSpore to associate the previously defined networks, loss functions, and optimizer function to form a complete computing network.
# +
from mindspore import Model
model = Model(net, net_loss, opt)
# -
# ## Preparation for Fitting Process Visualization
#
# ### Defining the Visualization Function
#
# To make the entire training process easier to understand, the test data, objective function, and model network of the training process need to be visualized. The following defines a visualization function which is called after each training step to display a fitting process of the model network.
# +
import matplotlib.pyplot as plt
import time
def plot_model_and_datasets(net, eval_data):
weight = net.trainable_params()[0]
bias = net.trainable_params()[1]
x = np.arange(-10, 10, 0.1)
y = x * Tensor(weight).asnumpy()[0][0] + Tensor(bias).asnumpy()[0]
x1, y1 = zip(*eval_data)
x_target = x
y_target = x_target * 2 + 3
plt.axis([-11, 11, -20, 25])
plt.scatter(x1, y1, color="red", s=5)
plt.plot(x, y, color="blue")
plt.plot(x_target, y_target, color="green")
plt.show()
time.sleep(0.2)
# -
# ### Defining the Callback Function
#
# MindSpore provides tools to customize the model training process. The following calls the visualization function in `step_end` to display the fitting process. For more information, see [Customized Debugging Information](https://www.mindspore.cn/docs/programming_guide/en/master/custom_debugging_info.html#callback).
#
# - `display.clear_output`:Clear the printed content to achieve dynamic fitting effect.
# +
from IPython import display
from mindspore.train.callback import Callback
class ImageShowCallback(Callback):
def __init__(self, net, eval_data):
self.net = net
self.eval_data = eval_data
def step_end(self, run_context):
plot_model_and_datasets(self.net, self.eval_data)
display.clear_output(wait=True)
# -
# ## Performing Training
#
# After the preceding process is complete, use the training parameter `ds_train` to train the model. In this example, `model.train` is called. The parameters are described as follows:
#
# - `epoch`: Number of times that the entire dataset is trained.
# - `ds_train`: Training dataset.
# - `callbacks`: Required callback function during training.
# - `dataset_sink_mode`: Dataset offload mode, which supports the Ascend and GPU computing platforms. In this example, this parameter is set to False for the CPU computing platform.
# +
from mindspore.train.callback import LossMonitor
epoch = 1
imageshow_cb = ImageShowCallback(net, eval_data)
model.train(epoch, ds_train, callbacks=[imageshow_cb], dataset_sink_mode=False)
plot_model_and_datasets(net, eval_data)
for param in net.trainable_params():
print(param, param.asnumpy())
# -
# After the training is complete, the weight parameters of the final model are printed. The value of weight is close to 2.0 and the value of bias is close to 3.0. As a result, the model training meets the expectation.
#
# ## Summary
#
# We have learned the principles of the linear fitting algorithm, defined the corresponding algorithms in the MindSpore framework, understood the training process of such linear fitting models in MindSpore, and finally fitted a model function close to the objective function. In addition, you can adjust the dataset generation interval from (-10,10) to (-100,100) to check whether the weight values are closer to those of the objective function; adjust the learning rate to check whether the fitting efficiency changes; or explore how to use MindSpore to fit quadratic functions, such as $f(x)=ax^2+bx+c$, or higher-order functions.
| docs/mindspore/programming_guide/source_en/quick_start/linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="okdyN_8MdBrF" colab_type="code" colab={}
from keras.datasets import mnist
from keras.layers import LSTM, Dense, Dropout
from keras.models import Sequential
from keras.utils import np_utils
import cv2
import numpy as np
# + id="iJ4d-pOtdUQy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="a803f9ce-48d3-453c-e5c0-108268505f86"
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# + id="hAUCvrN6dZvh" colab_type="code" colab={}
#image generaion for given number
X = []
for i in range(len(y_train)):
if y_train[i] == 3:
X.append(x_train[i])
# + id="DosDf9YalL8w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3c3144ab-953a-45a0-8899-8e6106d38ec0"
len(X)
# + id="xIyK-4VwdiM8" colab_type="code" colab={}
X= np.array(X)
# + id="qSLMElYZdzdd" colab_type="code" colab={}
X = np.reshape(X, (1, len(X), 28, 28))
# + id="GhxCm5_Ed-sW" colab_type="code" colab={}
X_train=[]
Y_train =[]
#as row by row and pixel by pixel approach
for it in range(X.shape[1]):
for i in range(1,X.shape[2] -1):
for j in range(1,X.shape[3] -1):
X_train.append([X[0][it][i-1][j-1], X[0][it][i-1][j], X[0][it][i-1][j+1]])
Y_train.append(X[0][it][i][j])
# + id="PL0Q9eIn67J9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="31bd7289-ee3a-4719-dea8-923cbb539fe3"
X.shape[1]*(X.shape[2]-2)*(X.shape[3] -2)
# + id="AG23esykeEac" colab_type="code" colab={}
X_train = np.reshape(X_train,(((X.shape[1])*(X.shape[2]-2)*(X.shape[3]-2)),1,3))
# + id="WS_6xhtcHBSo" colab_type="code" colab={}
Y_train = np.array(Y_train)
# + id="rqH9Jv5jH8kw" colab_type="code" colab={}
Y_train = np.reshape(Y_train,(((X.shape[1])*(X.shape[2]-2)*(X.shape[3]-2)),1))
# + id="NCQlKJTdIOmG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="74b0a29c-5f54-4a8a-cbb9-6ecb6605133e"
X_train.shape, Y_train.shape #problem look here
# + id="1kvvZvyGJHGe" colab_type="code" colab={}
Y_train = np_utils.to_categorical(Y_train)
# + id="RNAGk8d8hP0U" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a0071214-2c3a-42f4-857e-d41bc546004d"
Y_train.shape
# + id="JhuUu4y6hQRt" colab_type="code" colab={}
# + id="HzxI3RmseI0h" colab_type="code" colab={}
model = Sequential()
model.add(LSTM(256, input_shape=(X_train.shape[1],X_train.shape[2])))
#model.add(LSTM(128, activation='softmax'))
#model.add(Dropout(0.2))
model.add(Dense(256, activation = 'softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics=['accuracy'])
# + id="8NDLzE4EkkQB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="8cb9e2dc-cf72-4a9c-ff37-79694e904a3e"
model.fit(X_train, Y_train, epochs=4, batch_size = 32768)#this should work
# + id="85cUUN2El7b1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 208} outputId="8244f58a-f0f4-4991-f377-baf9fb26425a"
model.summary()
# + id="vXsbKmkdZh74" colab_type="code" colab={}
model.save("mp_3.h5")
# + id="Dr7VGoC1HTGw" colab_type="code" colab={}
unique, counts = np.unique(X_train, return_counts=True)
# + id="ZaVFMsc2HrL4" colab_type="code" colab={}
d = dict(zip(unique, counts))
# + id="DSsoIlFzHyY6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="04dee821-81c2-493a-a7e0-dae183588a60"
d[0]/(sum(d[i] for i in range(0,256)))
# + id="nH676uCBIIuQ" colab_type="code" colab={}
X_test = []
Y_test = []
for i in range(1,27):
for j in range(1,27):
X_test.append([X[0][0][i-1][j-1], X[0][0][i-1][j], X[0][0][i-1][j+1]])
# + id="avFeGY_YKSR7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 657} outputId="15dad101-b68a-4ae0-eb48-dfb6bb08f0bb"
from keras.models import load_model
test_model = load_model('mp_3.h5')
# + id="SxSciNufKn-n" colab_type="code" colab={}
X_test = np.reshape(X_test,(len(X_test), 1, 3))
# + id="-O5Ha8C1KgPa" colab_type="code" colab={}
prediction = test_model.predict(X_test)
# + id="ABYsF0snPhlu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 243} outputId="ce5c4ce0-67bd-40e8-d36b-7c49e6e422c4"
prediction
# + id="chK_SU7tK7U2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="86541742-a9b2-4114-a483-35a0d4e2f152"
list(prediction[0]).index(max(prediction[0]))
# + id="cA0TxsXBOWvJ" colab_type="code" colab={}
Y_test = []
for i in range(prediction.shape[0]):
v = list(prediction[i]).index(max(prediction[i]))
Y_test.append(v)
# + id="AFb9edN6OgRy" colab_type="code" colab={}
unique, count = np.unique(np.array(Y_test),return_counts = True )
# + id="3SBpegWYcmFL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9fc48851-63bb-4322-9483-2d1936cd9ecc"
dict(zip(unique, count))
# + id="cpxHq_H6gCBH" colab_type="code" colab={}
Y_test = np.reshape(Y_test,(26, 26))
# + id="MwVnADjwgM6A" colab_type="code" colab={}
np.save("mp.npy",Y_test)
# + id="fvLNKZKpg4xS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3fb0d7fc-78f0-4a04-9902-708427a0e0a1"
# !ls
# + id="POzYvArEjDDr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2de9bd33-2b0d-4516-fdca-10044439dc25"
# !ls
# + id="e6BJ7Or7jE2B" colab_type="code" colab={}
import matplotlib.pyplot as plt
# + id="6kS1CcWDjcXj" colab_type="code" colab={}
import numpy as np
mp = np.load("mp.npy")
# + id="z5Stj5Hejsbw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 364} outputId="c094187a-1085-4109-917f-9ed8af42408e"
plt.imshow(mp)
# + id="vn-D95sqjvpw" colab_type="code" colab={}
| minor_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import display
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import seaborn as sns
from utils import get_dfs_full_prop
TREATMENT = "hcomp.txx.resp"
export_folder = f"../data/output/diagrams/{TREATMENT}"
os.makedirs(export_folder, exist_ok=True)
dfs_full_prop = get_dfs_full_prop()
dfs = {}
dfs_full = {}
result_df = pd.DataFrame(index=range(105))
stats = pd.DataFrame(index=["min_offer", "min_offer_final"])
TREATMENTS = {"t00", "t10a", "t11a", "t12", "t13", "t20", "t30", "t31"}
TREATMENTS_MAPPING = {
"t00": "T0",
# "t10a": "TAI.R",
# "t10b": "TAI.R",
"t12": "T2.0",
"t13": "T2.1",
#"t20": "TAIAuto.R",
"t30": "T3.0",
"t31": "T3.1",
}
TREATMENTS = sorted(TREATMENTS_MAPPING.values())
for treatment, new_treatment in TREATMENTS_MAPPING.items():
# Read and sanitize the data
df = pd.read_csv(f"../data/{treatment}/export/result__{treatment}_resp.csv")
df_full = df.copy()
# drop_cols = ["worker_id", "resp_worker_id", "prop_worker_id", "updated", "status", "job_id", "status", "timestamp", "rowid", "offer_dss", "offer", "offer_final", "completion_code"]
drop_cols = ["worker_id", "resp_worker_id", "prop_worker_id", "updated", "status", "job_id", "status", "timestamp", "rowid", "offer_dss", "offer", "offer_final", "completion_code", "prop_time_spent"]
df = df[[col for col in df.columns if col not in drop_cols]]
if "min_offer_final" not in df_full:
df_full["min_offer_final"] = df_full["min_offer"]
treatment = new_treatment
dfs[treatment] = df
dfs_full[treatment] = df_full
result_df[treatment+"."+"min_offer"] = df_full["min_offer"]
result_df[treatment+"."+"min_offer_final"] = df_full["min_offer_final"]
stats[treatment] = [df_full["min_offer"].mean(), df_full["min_offer_final"].mean()]
cols = [col for col in df.columns if col != "min_offer"] + ["min_offer"]
# -
result_df.describe()
#sns.set()
stats.T.plot.bar()
# +
import seaborn as sns
result_df.columns
sns.barplot(data=result_df)
# -
# **Correlation to the target value**
# **Responder's min_offer / Proposer's over and final_offer distribution**
# +
bins = list(range(0, 105, 5))
idx = 0
f, axes = plt.subplots(3, 2, figsize=(7, 7))
nb_rows = 3
nb_cols = 2
for treatment in sorted(TREATMENTS):
df = dfs_full[treatment]
fig = plt.figure(figsize=(5, 4))
ax = sns.distplot(df["min_offer"], hist=True, kde=False, bins=bins, ax=axes[idx//nb_cols, idx%nb_cols], axlabel=treatment, label="Responder")
_ = ax.legend()
if treatment.upper() not in {"T0.R"}:
ax = sns.distplot(df["min_offer_final"], hist=True, kde=False, bins=bins, ax=axes[idx//nb_cols, idx%nb_cols], axlabel=treatment, label="Responder + DSS_info")
_ = ax.legend(loc="center")
idx += 1
_ = plt.tight_layout()
f.savefig(os.path.join(export_folder, "min_offers.pdf"), bbox_inches='tight')
print(export_folder)
# +
bins = np.array(list(range(-55, 55, 5)))
idx = 0
f, axes = plt.subplots(1, 1, figsize=(10, 7))
kwargs = {'cumulative': True}
ax = axes
for treatment in TREATMENTS:
#ax = axes[idx//3, idx%3]
df = dfs_full[treatment]
fig = plt.figure(figsize=(5, 4))
#ax = sns.distplot(df["offer"], hist=True, kde=False, hist_kws=kwargs, bins=bins, ax=axes[idx//3, idx%3], axlabel=treatment)
#_ = ax.legend()
if treatment.upper() != "T0":
x = bins[:-1]
y = np.cumsum(np.histogram(df["min_offer_final"] - df["min_offer"], bins=bins)[0])
print(x.shape, y.shape)
y = y * 100 / df.shape[0]
ax = sns.lineplot(x, y, label=treatment, ax=ax)
ax.set(xlabel ="min_offer_final - min_offer")
#ax.legend()
#_ = ax.legend(loc="center")
idx += 1
plt.tight_layout()
f.savefig(os.path.join(export_folder, "final-min_offer_diff_hist_cumul.pdf"), bbox_inches='tight')
print(export_folder)
# +
bins = np.array(list(range(-55, 55, 5)))
bins_neg = np.array(list(range(-55, 0)))
bins_pos = np.array(list(range(5, 55)))
idx = 0
f, axes = plt.subplots(1, 2, figsize=(8, 5))
kwargs = {'cumulative': True}
ax = axes
for treatment in TREATMENTS:
#ax = axes[idx//3, idx%3]
df = dfs_full[treatment]
fig = plt.figure(figsize=(5, 4))
#ax = sns.distplot(df["offer"], hist=True, kde=False, hist_kws=kwargs, bins=bins, ax=axes[idx//3, idx%3], axlabel=treatment)
#_ = ax.legend()
if treatment.upper() != "T0":
x = bins[:-1]
y_neg = np.cumsum(np.histogram(df["min_offer_final"] - df["min_offer"], bins=bins_neg)[0])
y_neg = y_neg * 100 / df.shape[0]
y_pos = np.cumsum(np.histogram(df["min_offer_final"] - df["min_offer"], bins=bins_pos)[0])
y_pos = y_pos * 100 / df.shape[0]
#print(x.shape, y.shape)
ax = axes[0]
ax = sns.lineplot(x=bins_neg[:-1], y=y_neg, label=treatment, ax=ax)
ax.set(xlabel ="$min\_offer_{final} - min\_offer$", ylabel="fraction of participants (%)")
ax.set_ylim(0, 30)
ax = axes[1]
ax = sns.lineplot(x=bins_pos[:-1], y=y_pos.astype(int), label=treatment, ax=ax)
ax.set(xlabel ="$min\_offer_{final} - min\_offer$")
ax.set_ylim(0, 30)
#ax.legend()
#_ = ax.legend(loc="center")
idx += 1
plt.tight_layout()
f.savefig(os.path.join(export_folder, "final-min_offer_diff_hist_cumul_split.pdf"), bbox_inches='tight')
print(export_folder)
| notebooks_hcomp/Data - Responder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 64-bit
# name: python3
# ---
# # We will be creating the folds for one-point tracking, six-point tracking, and all-points tracking!
# +
# first track all points
K = 5
import mediapipe as mp
from PIL import Image as im
import mediapipe as mp
def hand_locations(frame, min_detection_confidence = 0.5, min_tracking_confidence = 0.5):
hands = mp.solutions.hands.Hands(min_detection_confidence=min_detection_confidence, min_tracking_confidence=min_tracking_confidence) # MAKE SURE THIS IS ALL GOOD
results = hands.process(frame.astype('uint8'))
X_locations = [0] * 42 # use 0 as default if the class is not there
Y_locations = [0] * 42
Z_locations = [0] * 42
x = y = z = 0
if results.multi_hand_landmarks:
for hand, hand_landmark in enumerate(results.multi_hand_landmarks):
for i in range(0, 21):
landmark = hand_landmark.landmark[i]
X_locations[x] = landmark.x
Y_locations[y] = landmark.y
Z_locations[z] = landmark.z
x += 1; y += 1; z += 1;
locations = np.concatenate([X_locations, Y_locations, Z_locations])
hands.close()
return locations
# time to actually do calibration.
SECONDS_TO_DETECT = 2 # in seconds
import os, cv2
from tqdm import tqdm
import numpy as np
ARMFLAPPING_VIDEOS = []
CONTROL_VIDEOS = []
ARMFLAPPING_FPS = [] # store the FPS of all armflapping videos
CONTROL_FPS = [] # store the FPS of all control videos
# load these same file names
import pickle
with open("FILE_NAMES.pkl", 'rb') as f:
ARMFLAPPING_FILE_NAMES, CONTROL_FILE_NAMES = pickle.load(f)
for video_name in tqdm(ARMFLAPPING_FILE_NAMES, desc = "armflapping_videos"):
try:
os.mkdir("behavior_data/armflapping/" + video_name[1:])
cap = cv2.VideoCapture('behavior_data/armflapping/' + video_name)
frame_rate = cap.get(cv2.CAP_PROP_FPS)
if cap.get(cv2.CAP_PROP_FRAME_COUNT) / frame_rate < SECONDS_TO_DETECT: continue # too short!
FRAMES = [] # frames for this video
i = 0
while cap.isOpened():
_, image = cap.read()
if not _ :
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to RGB.
FRAMES.append(image)
cv2.imwrite("behavior_data/armflapping/" + video_name[1:] + "/" + str(i + 1) + ".jpg", image)
i += 1
ARMFLAPPING_VIDEOS.append(FRAMES)
ARMFLAPPING_FPS.append(frame_rate)
except Exception as e:
print(e)
print(f"failed on {video_name}")
for video_name in tqdm(CONTROL_FILE_NAMES, desc = "control_videos"):
try:
os.mkdir("behavior_data/control/" + video_name[1:])
cap = cv2.VideoCapture('behavior_data/control/' + video_name)
frame_rate = cap.get(cv2.CAP_PROP_FPS)
if cap.get(cv2.CAP_PROP_FRAME_COUNT) / frame_rate < SECONDS_TO_DETECT: continue # too short!
FRAMES = [] # frames for this video
i = 0
while cap.isOpened():
_, image = cap.read()
if not _ :
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to RGB.
FRAMES.append(image)
cv2.imwrite("behavior_data/control/" + video_name[1:] + "/" + str(i + 1) + ".jpg", image)
i += 1
CONTROL_VIDEOS.append(FRAMES)
CONTROL_FPS.append(frame_rate)
except Exception as e:
print(e)
print(f"failed on {video_name}")
ARMFLAPPING_LOCATIONS, CONTROL_LOCATIONS = [], []
for FRAMES, file_name in tqdm(zip(CONTROL_VIDEOS, CONTROL_FILE_NAMES)):
locs = []
for i, frame in enumerate(FRAMES):
locs.append(hand_locations(np.array(frame)))
#FRAME = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#cv2.imwrite("behavior_data/control/" + file_name[1:] + "/" + str(i + 1) + ".jpg", frame)
CONTROL_LOCATIONS.append(locs)
# get the locations of all of the videos
for FRAMES, file_name in tqdm(zip(ARMFLAPPING_VIDEOS,ARMFLAPPING_FILE_NAMES)) :
locs = []
for i, frame in enumerate(FRAMES):
locs.append(hand_locations(np.array(frame)))
#FRAME = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#cv2.imwrite("behavior_data/armflapping/" + file_name[1:] + "/" + str(i + 1) + ".jpg", frame)
ARMFLAPPING_LOCATIONS.append(locs)
N = min([len(locs) for locs in [ARMFLAPPING_LOCATIONS, CONTROL_LOCATIONS]])
ARMFLAPPING_LOCATIONS = ARMFLAPPING_LOCATIONS[:N]
CONTROL_LOCATIONS = CONTROL_LOCATIONS[:N]
ARMFLAPPING_LOCATIONS = np.array(ARMFLAPPING_LOCATIONS)
CONTROL_LOCATIONS = np.array(CONTROL_LOCATIONS)
ARMFLAPPING_FILE_NAMES = np.array(ARMFLAPPING_FILE_NAMES[:N])
CONTROL_FILE_NAMES = np.array(CONTROL_FILE_NAMES[:N])
# we can create a padding function in order to pad
def pad(locations, maxlen = 90, padding = "post", truncating = "post"):
new_locations = locations.tolist()
empty_row = np.zeros((1, 126))
for i, video in tqdm(enumerate(new_locations)):
if len(video) < maxlen:
for new_row in range(maxlen - len(video)):
if padding == "post":
new_locations[i] = np.array(new_locations[i])
new_locations[i] = np.concatenate([new_locations[i], empty_row])
if padding == "pre":
new_locations[i] = np.array(new_locations[i])
new_locations[i] = np.concatenate([empty_row, new_locations[i]])
if len(video) > maxlen:
if truncating == "post":
new_locations[i] = new_locations[i][:maxlen]
elif truncating == "pre":
new_locations[i] = new_locations[i][len(video) - maxlen : ]
return np.array(new_locations)
padded_armflapping_locations = ARMFLAPPING_LOCATIONS
padded_control_locations = CONTROL_LOCATIONS
padded_armflapping_locations = pad(padded_armflapping_locations, maxlen = 90)
padded_control_locations = pad(padded_control_locations, maxlen = 90)
print(padded_control_locations.shape, padded_armflapping_locations.shape)
assert padded_armflapping_locations.shape == padded_control_locations.shape
ARMFLAPPING_FILE_NAMES = np.array(ARMFLAPPING_FILE_NAMES)
CONTROL_FILE_NAMES = np.array(CONTROL_FILE_NAMES)
"""
# first shuffle
N = padded_armflapping_locations.shape[0]
perm = np.random.permutation(N)
padded_armflapping_locations = padded_armflapping_locations[perm]
padded_control_locations = padded_control_locations[perm]
ARMFLAPPING_FILE_NAMES = ARMFLAPPING_FILE_NAMES[perm]
CONTROL_FILE_NAMES = CONTROL_FILE_NAMES[perm]
"""
# then balance each fold (still should do this)
padded_armflapping_locations = np.array_split(padded_armflapping_locations, K)
padded_control_locations = np.array_split(padded_control_locations, K)
ARMFLAPPING_FILE_NAMES = np.array_split(ARMFLAPPING_FILE_NAMES, K)
CONTROL_FILE_NAMES = np.array_split(CONTROL_FILE_NAMES, K)
X_splits = []
y_splits = []
for i in range(K):
X_splits.append(np.concatenate([padded_armflapping_locations[i], padded_control_locations[i]]))
y_splits.append(np.concatenate([np.ones((len(padded_armflapping_locations[i]), 1)), np.zeros((len(padded_control_locations[i]), 1))]))
import pickle
for i, (X_split, y_split, A_FILE_NAMES, C_FILE_NAMES) in enumerate(zip(X_splits, y_splits, ARMFLAPPING_FILE_NAMES, CONTROL_FILE_NAMES)):
with open(f"all_points_folds/split{i+1}", 'wb') as f:
pickle.dump((X_split, y_split), f)
with open(f"all_point_folds_seeds", "wb") as f:
pickle.dump((padded_armflapping_locations, ARMFLAPPING_FILE_NAMES, padded_control_locations, CONTROL_FILE_NAMES), f)
# +
# next, track six points
text here to make sure you don't run this cell again. just look in six_point_folds_seeds for all data required.
K = 5
import mediapipe as mp
from PIL import Image as im
import numpy as np
np.random.seed(42) # set a random seed
def hand_locations(frame, min_detection_confidence = 0.5, min_tracking_confidence = 0.5):
"""Only give 6 landmarks"""
hands = mp.solutions.hands.Hands(min_detection_confidence=min_detection_confidence, min_tracking_confidence=min_tracking_confidence) # MAKE SURE THIS IS ALL GOOD
results = hands.process(frame.astype('uint8'))
X_locations = [0] * 12
Y_locations = [0] * 12
Z_locations = [0] * 12
if results.multi_hand_landmarks:
x = y = z = 0
for hand, hand_landmark in enumerate(results.multi_hand_landmarks):
for i in range(0, 21):
if i not in [0, 4, 8, 12, 16, 20]: continue
landmark = hand_landmark.landmark[i]
X_locations[x] = landmark.x
Y_locations[y] = landmark.y
Z_locations[z] = landmark.z
x += 1; y += 1; z +=1;
hands.close()
return np.concatenate([X_locations, Y_locations, Z_locations])
# time to actually do calibration.
SECONDS_TO_DETECT = 2 # in seconds
import os, cv2
from tqdm import tqdm
import numpy as np
ARMFLAPPING_VIDEOS = []
CONTROL_VIDEOS = []
ARMFLAPPING_FPS = [] # store the FPS of all armflapping videos
CONTROL_FPS = [] # store the FPS of all control videos
import pickle
with open("FILE_NAMES.pkl", 'rb') as f:
ARMFLAPPING_FILE_NAMES, CONTROL_FILE_NAMES = pickle.load(f)
for video_name in tqdm(ARMFLAPPING_FILE_NAMES, desc = "armflapping_videos"):
try:
os.mkdir("behavior_data/armflapping/" + video_name[1:])
cap = cv2.VideoCapture('behavior_data/armflapping/' + video_name)
frame_rate = cap.get(cv2.CAP_PROP_FPS)
if cap.get(cv2.CAP_PROP_FRAME_COUNT) / frame_rate < SECONDS_TO_DETECT: continue # too short!
FRAMES = [] # frames for this video
i = 0
while cap.isOpened():
_, image = cap.read()
if not _ :
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to RGB.
FRAMES.append(image)
cv2.imwrite("behavior_data/armflapping/" + video_name[1:] + "/" + str(i + 1) + ".jpg", image)
i += 1
ARMFLAPPING_VIDEOS.append(FRAMES)
ARMFLAPPING_FPS.append(frame_rate)
except Exception as e:
print(e)
print(f"failed on {video_name}")
for video_name in tqdm(CONTROL_FILE_NAMES, desc = "control_videos"):
try:
os.mkdir("behavior_data/control/" + video_name[1:])
cap = cv2.VideoCapture('behavior_data/control/' + video_name)
frame_rate = cap.get(cv2.CAP_PROP_FPS)
if cap.get(cv2.CAP_PROP_FRAME_COUNT) / frame_rate < SECONDS_TO_DETECT: continue # too short!
FRAMES = [] # frames for this video
i = 0
while cap.isOpened():
_, image = cap.read()
if not _ :
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to RGB.
FRAMES.append(image)
cv2.imwrite("behavior_data/control/" + video_name[1:] + "/" + str(i + 1) + ".jpg", image)
i += 1
CONTROL_VIDEOS.append(FRAMES)
CONTROL_FPS.append(frame_rate)
except Exception as e:
print(e)
print(f"failed on {video_name}")
ARMFLAPPING_LOCATIONS, CONTROL_LOCATIONS = [], []
for FRAMES, file_name in tqdm(zip(CONTROL_VIDEOS, CONTROL_FILE_NAMES)):
locs = []
for i, frame in enumerate(FRAMES):
locs.append(hand_locations(np.array(frame)))
#FRAME = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#cv2.imwrite("behavior_data/control/" + file_name[1:] + "/" + str(i + 1) + ".jpg", frame)
CONTROL_LOCATIONS.append(locs)
# get the locations of all of the videos
for FRAMES, file_name in tqdm(zip(ARMFLAPPING_VIDEOS,ARMFLAPPING_FILE_NAMES)) :
locs = []
for i, frame in enumerate(FRAMES):
locs.append(hand_locations(np.array(frame)))
#FRAME = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#cv2.imwrite("behavior_data/armflapping/" + file_name[1:] + "/" + str(i + 1) + ".jpg", frame)
ARMFLAPPING_LOCATIONS.append(locs)
N = min([len(locs) for locs in [ARMFLAPPING_LOCATIONS, CONTROL_LOCATIONS]])
ARMFLAPPING_LOCATIONS = ARMFLAPPING_LOCATIONS[:N]
CONTROL_LOCATIONS = CONTROL_LOCATIONS[:N]
ARMFLAPPING_LOCATIONS = np.array(ARMFLAPPING_LOCATIONS)
CONTROL_LOCATIONS = np.array(CONTROL_LOCATIONS)
ARMFLAPPING_FILE_NAMES = np.array(ARMFLAPPING_FILE_NAMES[:N])
CONTROL_FILE_NAMES = np.array(CONTROL_FILE_NAMES[:N])
# we can create a padding function in order to pad
def pad(locations, maxlen = 90, padding = "post", truncating = "post"):
new_locations = locations.tolist()
empty_row = np.zeros((1, 36))
for i, video in tqdm(enumerate(new_locations)):
if len(video) < maxlen:
for new_row in range(maxlen - len(video)):
if padding == "post":
new_locations[i] = np.array(new_locations[i])
new_locations[i] = np.concatenate([new_locations[i], empty_row])
if padding == "pre":
new_locations[i] = np.array(new_locations[i])
new_locations[i] = np.concatenate([empty_row, new_locations[i]])
if len(video) > maxlen:
if truncating == "post":
new_locations[i] = new_locations[i][:maxlen]
elif truncating == "pre":
new_locations[i] = new_locations[i][len(video) - maxlen : ]
return np.array(new_locations)
padded_armflapping_locations = ARMFLAPPING_LOCATIONS
padded_control_locations = CONTROL_LOCATIONS
padded_armflapping_locations = pad(padded_armflapping_locations, maxlen = 90)
padded_control_locations = pad(padded_control_locations, maxlen = 90)
print(padded_control_locations.shape, padded_armflapping_locations.shape)
assert padded_armflapping_locations.shape == padded_control_locations.shape
ARMFLAPPING_FILE_NAMES = np.array(ARMFLAPPING_FILE_NAMES)
CONTROL_FILE_NAMES = np.array(CONTROL_FILE_NAMES)
# first shuffle
N = padded_armflapping_locations.shape[0]
perm = np.random.permutation(N)
padded_armflapping_locations = padded_armflapping_locations[perm]
padded_control_locations = padded_control_locations[perm]
ARMFLAPPING_FILE_NAMES = ARMFLAPPING_FILE_NAMES[perm]
CONTROL_FILE_NAMES = CONTROL_FILE_NAMES[perm]
# then balance each fold
padded_armflapping_locations = np.array_split(padded_armflapping_locations, K)
padded_control_locations = np.array_split(padded_control_locations, K)
ARMFLAPPING_FILE_NAMES = np.array_split(ARMFLAPPING_FILE_NAMES, K)
CONTROL_FILE_NAMES = np.array_split(CONTROL_FILE_NAMES, K)
X_splits = []
y_splits = []
for i in range(K):
X_splits.append(np.concatenate([padded_armflapping_locations[i], padded_control_locations[i]]))
y_splits.append(np.concatenate([np.ones((len(padded_armflapping_locations[i]), 1)), np.zeros((len(padded_control_locations[i]), 1))]))
import pickle
for i, (X_split, y_split, A_FILE_NAMES, C_FILE_NAMES) in enumerate(zip(X_splits, y_splits, ARMFLAPPING_FILE_NAMES, CONTROL_FILE_NAMES)):
with open(f"six_point_folds/split{i+1}", 'wb') as f:
pickle.dump((X_split, y_split), f)
with open(f"six_point_folds_seeds", "wb") as f:
pickle.dump((padded_armflapping_locations, ARMFLAPPING_FILE_NAMES, padded_control_locations, CONTROL_FILE_NAMES), f)
# +
# finally, only track one point
K = 5
import mediapipe as mp
from PIL import Image as im
import mediapipe as mp
def hand_locations(frame, min_detection_confidence = 0.5, min_tracking_confidence = 0.5):
"""Only give the 0th landmark"""
hands = mp.solutions.hands.Hands(min_detection_confidence=min_detection_confidence, min_tracking_confidence=min_tracking_confidence)
results = hands.process(frame.astype('uint8'))
X_locations = [0] * 2
Y_locations = [0] * 2
Z_locations = [0] * 2
if results.multi_hand_landmarks:
for hand, hand_landmark in enumerate(results.multi_hand_landmarks):
for i in range(0, 21):
landmark = hand_landmark.landmark[i]
X_locations[hand] = landmark.x
Y_locations[hand] = landmark.y
Z_locations[hand] = landmark.z
break # take only the first landmark
hands.close()
return np.concatenate([X_locations, Y_locations, Z_locations])
# time to actually do calibration.
SECONDS_TO_DETECT = 2 # in seconds
import os, cv2
from tqdm import tqdm
import numpy as np
ARMFLAPPING_VIDEOS = []
CONTROL_VIDEOS = []
ARMFLAPPING_FPS = [] # store the FPS of all armflapping videos
CONTROL_FPS = [] # store the FPS of all control videos
# load these same file names
import pickle
with open("FILE_NAMES.pkl", 'rb') as f:
ARMFLAPPING_FILE_NAMES, CONTROL_FILE_NAMES = pickle.load(f)
for video_name in tqdm(ARMFLAPPING_FILE_NAMES, desc = "armflapping_videos"):
try:
os.mkdir("behavior_data/armflapping/" + video_name[1:])
cap = cv2.VideoCapture('behavior_data/armflapping/' + video_name)
frame_rate = cap.get(cv2.CAP_PROP_FPS)
if cap.get(cv2.CAP_PROP_FRAME_COUNT) / frame_rate < SECONDS_TO_DETECT: continue # too short!
FRAMES = [] # frames for this video
i = 0
while cap.isOpened():
_, image = cap.read()
if not _ :
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to RGB.
FRAMES.append(image)
cv2.imwrite("behavior_data/armflapping/" + video_name[1:] + "/" + str(i + 1) + ".jpg", image)
i += 1
ARMFLAPPING_VIDEOS.append(FRAMES)
ARMFLAPPING_FPS.append(frame_rate)
except Exception as e:
print(e)
print(f"failed on {video_name}")
for video_name in tqdm(CONTROL_FILE_NAMES, desc = "control_videos"):
try:
os.mkdir("behavior_data/control/" + video_name[1:])
cap = cv2.VideoCapture('behavior_data/control/' + video_name)
frame_rate = cap.get(cv2.CAP_PROP_FPS)
if cap.get(cv2.CAP_PROP_FRAME_COUNT) / frame_rate < SECONDS_TO_DETECT: continue # too short!
FRAMES = [] # frames for this video
i = 0
while cap.isOpened():
_, image = cap.read()
if not _ :
break
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # convert to RGB.
FRAMES.append(image)
cv2.imwrite("behavior_data/control/" + video_name[1:] + "/" + str(i + 1) + ".jpg", image)
i += 1
CONTROL_VIDEOS.append(FRAMES)
CONTROL_FPS.append(frame_rate)
except Exception as e:
print(e)
print(f"failed on {video_name}")
ARMFLAPPING_LOCATIONS, CONTROL_LOCATIONS = [], []
for FRAMES, file_name in tqdm(zip(CONTROL_VIDEOS, CONTROL_FILE_NAMES)):
locs = []
for i, frame in enumerate(FRAMES):
locs.append(hand_locations(np.array(frame)))
#FRAME = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#cv2.imwrite("behavior_data/control/" + file_name[1:] + "/" + str(i + 1) + ".jpg", frame)
CONTROL_LOCATIONS.append(locs)
# get the locations of all of the videos
for FRAMES, file_name in tqdm(zip(ARMFLAPPING_VIDEOS,ARMFLAPPING_FILE_NAMES)) :
locs = []
for i, frame in enumerate(FRAMES):
locs.append(hand_locations(np.array(frame)))
#FRAME = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
#cv2.imwrite("behavior_data/armflapping/" + file_name[1:] + "/" + str(i + 1) + ".jpg", frame)
ARMFLAPPING_LOCATIONS.append(locs)
N = min([len(locs) for locs in [ARMFLAPPING_LOCATIONS, CONTROL_LOCATIONS]])
ARMFLAPPING_LOCATIONS = ARMFLAPPING_LOCATIONS[:N]
CONTROL_LOCATIONS = CONTROL_LOCATIONS[:N]
ARMFLAPPING_LOCATIONS = np.array(ARMFLAPPING_LOCATIONS)
CONTROL_LOCATIONS = np.array(CONTROL_LOCATIONS)
ARMFLAPPING_FILE_NAMES = np.array(ARMFLAPPING_FILE_NAMES[:N])
CONTROL_FILE_NAMES = np.array(CONTROL_FILE_NAMES[:N])
# we can create a padding function in order to pad
def pad(locations, maxlen = 90, padding = "post", truncating = "post"):
new_locations = locations.tolist()
empty_row = np.zeros((1, 6))
for i, video in tqdm(enumerate(new_locations)):
if len(video) < maxlen:
for new_row in range(maxlen - len(video)):
if padding == "post":
new_locations[i] = np.array(new_locations[i])
new_locations[i] = np.concatenate([new_locations[i], empty_row])
if padding == "pre":
new_locations[i] = np.array(new_locations[i])
new_locations[i] = np.concatenate([empty_row, new_locations[i]])
if len(video) > maxlen:
if truncating == "post":
new_locations[i] = new_locations[i][:maxlen]
elif truncating == "pre":
new_locations[i] = new_locations[i][len(video) - maxlen : ]
return np.array(new_locations)
padded_armflapping_locations = ARMFLAPPING_LOCATIONS
padded_control_locations = CONTROL_LOCATIONS
padded_armflapping_locations = pad(padded_armflapping_locations, maxlen = 90)
padded_control_locations = pad(padded_control_locations, maxlen = 90)
print(padded_control_locations.shape, padded_armflapping_locations.shape)
assert padded_armflapping_locations.shape == padded_control_locations.shape
ARMFLAPPING_FILE_NAMES = np.array(ARMFLAPPING_FILE_NAMES)
CONTROL_FILE_NAMES = np.array(CONTROL_FILE_NAMES)
"""
# first shuffle
N = padded_armflapping_locations.shape[0]
perm = np.random.permutation(N)
padded_armflapping_locations = padded_armflapping_locations[perm]
padded_control_locations = padded_control_locations[perm]
ARMFLAPPING_FILE_NAMES = ARMFLAPPING_FILE_NAMES[perm]
CONTROL_FILE_NAMES = CONTROL_FILE_NAMES[perm]
"""
# then balance each fold (still should do this)
padded_armflapping_locations = np.array_split(padded_armflapping_locations, K)
padded_control_locations = np.array_split(padded_control_locations, K)
ARMFLAPPING_FILE_NAMES = np.array_split(ARMFLAPPING_FILE_NAMES, K)
CONTROL_FILE_NAMES = np.array_split(CONTROL_FILE_NAMES, K)
X_splits = []
y_splits = []
for i in range(K):
X_splits.append(np.concatenate([padded_armflapping_locations[i], padded_control_locations[i]]))
y_splits.append(np.concatenate([np.ones((len(padded_armflapping_locations[i]), 1)), np.zeros((len(padded_control_locations[i]), 1))]))
import pickle
for i, (X_split, y_split, A_FILE_NAMES, C_FILE_NAMES) in enumerate(zip(X_splits, y_splits, ARMFLAPPING_FILE_NAMES, CONTROL_FILE_NAMES)):
with open(f"one_point_folds/split{i+1}", 'wb') as f:
pickle.dump((X_split, y_split), f)
with open(f"one_point_folds_seeds", "wb") as f:
pickle.dump((padded_armflapping_locations, ARMFLAPPING_FILE_NAMES, padded_control_locations, CONTROL_FILE_NAMES), f)
# +
def mean_locs(frame):
"""take in a single frame, and return the mean location"""
assert len(frame) == 126, len(frame) # make sure this is a vector
X_locs_1, X_locs_2, Y_locs_1, Y_locs_2, Z_locs_1, Z_locs_2 = frame[:21], frame[21:42], frame[42:63], frame[63:84], frame[84:105], frame[105:]
return [locs[np.nonzero(locs)].mean() for locs in [X_locs_1, X_locs_2, Y_locs_1, Y_locs_2, Z_locs_1, Z_locs_2] ]
new_X = []
for video in range(X.shape[0]):
matrix_video = X[video]
new_X.append(np.apply_along_axis(mean_locs, 1, matrix_video))
X = np.array(new_X, copy = True)
assert len(X.shape) == 3
splits = [] # stores k (X_i, y_i) splits
X_splits, y_splits = np.array_split(X, K), np.array_split(y, K)
for X_split, y_split in zip(X_splits, y_splits):
splits.append((X_split, y_split))
for i, split in enumerate(splits):
with open(f"mean_point_folds/split{i+1}", 'wb') as f:
pickle.dump(split, f)
# +
with open("temp_X.pkl", 'wb') as f:
pickle.dump(X, f)
with open("temp_X.pkl", 'rb') as f:
X = pickle.load(f)
# +
X = np.nan_to_num(X)
splits = [] # stores k (X_i, y_i) splits
X_splits, y_splits = np.array_split(X, K), np.array_split(y, K)
for X_split, y_split in zip(X_splits, y_splits):
splits.append((X_split, y_split))
for i, split in enumerate(splits):
with open(f"mean_point_folds/split{i+1}", 'wb') as f:
pickle.dump(split, f)
# -
for file in os.listdir("/Users/anish/Documents/Machine Learning Env/ActivRecognition-Autism-Diagnosis/behavior_data/control"):
if os.path.isfile("/Users/anish/Documents/Machine Learning Env/ActivRecognition-Autism-Diagnosis/behavior_data/control/" + file):
continue
else:
#shutil.rmtree("/Users/anish/Documents/Machine Learning Env/ActivRecognition-Autism-Diagnosis/behavior_data/armflapping/" + file)
for vid_name in os.listdir("/Users/anish/Documents/Machine Learning Env/ActivRecognition-Autism-Diagnosis/behavior_data/control/" + file):
image = cv2.imread("/Users/anish/Documents/Machine Learning Env/ActivRecognition-Autism-Diagnosis/behavior_data/control/" + file + "/" + vid_name)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
os.remove("/Users/anish/Documents/Machine Learning Env/ActivRecognition-Autism-Diagnosis/behavior_data/control/" + file + "/" + vid_name)
cv2.imwrite("/Users/anish/Documents/Machine Learning Env/ActivRecognition-Autism-Diagnosis/behavior_data/control/" + file + "/" + vid_name, image)
import cv2, os
| create_folds.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [Oregon Curriculum Network](http://www.4dsolutions.net/ocn) <br />
# [Discovering Math with Python](Introduction.ipynb)
#
# # DISCOVERING MATH WITH PYTHON
#
#
# # Introduction
#
# A primary intended audience for the chapters that follow is a teacher looking for another better way to encapsulate a lesson plan in the form of an interactive worksheet.
#
# Jupyter Notebooks, an outgrowth of the I-Python Notebook project, are designed as a front end whereby the reader is not passive, but welcome and invited to change both the code and surrounding text. Add more pictures, more examples. Modify whole sections.
#
# This particular set of chapters does not aim to teach Jupyter Notebook skills so much as to demonstrate how they might be deployed, as worksheets subject to change, by students, by other teachers. A given Notebook has the potential to "go viral" as we say, especially given a Creative Commons type license.
#
# When sharing these Notebooks with other teachers, I recommend keeping these chapters together as a set. That's not a commandment or edict however. Use these materials as you see fit.
#
# You may be wondering at my choice of topics. What drives me to introduce such an exotic approach to polyhedra for example? Rather than go over all that here (it's a long story), let me refer you to my blogs, and to [Focal Points](http://mybizmo.blogspot.com/2006/09/focal-points.html) as a good starting place.
#
# The original home of these chapters is/was a Github account maintained by [4Dsolutions.net](http://www.4dsolutions.net), the domain behind what I call the *Oregon Curriculum Network*. Other Jupyter Notebooks, as well as ordinary Python source code files, made up [the original repository](https://github.com/4dsolutions/Python5/).
#
# I invite you to journey back to these primal roots. Explore!
#
# # Table of Contents
#
# * [Chapter 1: Welcome to Python](Welcome%20to%20Python.ipynb)
# * [Chapter 2: Functions At Work](Functions%20At%20Work.ipynb)
# * [Chapter 3: A First Class](A%20First%20Class.ipynb)
# * [Chapter 4: Clock Arithmetic](Clock%20Arithmetic.ipynb)
# * [Chapter 5: Public Key Cryptography](Public%20Key%20Cryptography.ipynb)
# * [Chapter 6: Vectors in Space](Vectors%20in%20Space.ipynb)
# * [Chapter 7: Polyhedrons](Polyhedrons.ipynb)
# * [Chapter 8: Transformations](Transformations.ipynb)
# * Chapter 9: Symmetry Groups
# * Chapter 10: Complex Numbers
# * [Chapter 11: Quaternions](Quaternions.ipynb)
# * [Chapter 12: The Mandelbrot Set](Mandelbrot Set.ipynb)
| Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="figures/k2_pix_small.png">
# *This notebook contains an excerpt instructional material from [gully](https://twitter.com/gully_) and the [K2 Guest Observer Office](https://keplerscience.arc.nasa.gov/); the content is available [on GitHub](https://github.com/gully/goldenrod).*
#
# <!--NAVIGATION-->
# < [KEGS metadata and sample overview](01.01-KEGS-sample-overview.ipynb) | [Contents](Index.ipynb) | [Fetch all the KEGS data with `wget`](01.03-wget-all-KEGS-data.ipynb) >
# # Custom tile target apertures on galaxies
#
# The Kepler spacecraft is over 1 AU away from the Earth, and relies on low-bandwidth communication to telemeter data back to Earth. Accordingly, only a $\sim5$% portion of the pixels can be telemetered, so we only download small postage stamp windows rather than the full CCD mosaic. The sizes of these *target apertures* are assigned programmatically based on software designed for the original Kepler prime mission and now retrofitted for K2. The scientifically needed target aperture sizes sometimes exceed the programmatically assigned target aperture size. In these cases, manual overrides called "tiles" are placed on the desired target. These tiles sometimes violate the simplistic assumptions of the Kepler pipeline, complicating the extraction of lightcurves. See the [K2 Handbook](http://archive.stsci.edu/k2/manuals/k2_handbook.pdf) Section 2.2 for more details on K2 target management.
#
# Because of their added complexity, it is worth looking into how many of the KEGS targets were assigned custom apertures, these so-called "tiles".
import pandas as pd
import numpy as np
# Change the path below to wherever you have the [K2-metadata repository](https://github.com/gully/k2-metadata)
K2_GO_metadata_path = '../../K2-metadata/metadata/tidy/GO_proposal_metadata.csv'
df_GO = pd.read_csv(K2_GO_metadata_path, dtype={'campaign':str}, usecols=['EPIC ID','Investigation IDs', 'campaign'])
df_GO.shape
# We'll find all the KEGS targets in the same way as we did in a previous notebook.
# +
KEGS_ids = ['GO14078','GO14079','GO12068','GO12116','GO10053','GO10070',
'GO8053','GO8070','GO6077','GO5096','GO4096','GO3048','GO1074','GO0103']
search_pattern = '|'.join(KEGS_ids)
df_GO['KEGS_target'] = df_GO['Investigation IDs'].str.contains(search_pattern)
df_GO = df_GO[df_GO['KEGS_target']]
df_GO.head()
# -
df_GO['Is_TILE'] = df_GO['Investigation IDs'].str.contains('TILE|SQUARE_GALAXY')
df_GO.head()
df_GO.Is_TILE.value_counts()
# There are 108 instances in which a KEGS galaxy target is on a tile. The vast majority of KEGS targets are on "regular", programmatically-assigned apertures.
# Let's take a look at those 108.
df_GO[df_GO.Is_TILE].tail()
# Indeed, it looks like galaxies with large solid-angles on the sky, like NGC3412, were assigned custom masks. Let's find the unique entries only.
# We will have to perform some slightly advanced pandas methods: data cleaning, string manipulation, aggregation and filtering.
df_GO['Investigation IDs'] = df_GO['Investigation IDs'].str.strip(' ')
df_GO[df_GO.Is_TILE]['Investigation IDs'].str.split('|').tail()
df_GO['Investigator_list'] = df_GO['Investigation IDs'].str.split('|')
tile_targets = df_GO.Investigator_list[df_GO.Is_TILE].reset_index(drop=True)
tile_targets.tail()
KEGS_targs_on_tiles = tile_targets.aggregate(np.concatenate)
np.unique(KEGS_targs_on_tiles)
set(KEGS_targs_on_tiles) - set(KEGS_ids)
# OK so there were six named galaxies observed by KEGS that were so large on the sky as to require custom `TILE` apertures, and another `SQUARE_GALAXY` category presumably for a cluster of a few galaxies.
# These targets all originate from the [GO14078 proposal](https://keplerscience.arc.nasa.gov/data/k2-programs/GO14078.txt).
# As noted in the previous notebook, these tiles can cause over-counting of targets. For example if you simply counted all the unique EPIC IDs associated with a proposal you would get a higher number than the number of unique targets.
in_GO14078 = df_GO['Investigation IDs'].str.contains('GO14078')
in_GO14078.sum()
# Match these sources with the K2 target index.
targ_index_path = '../../k2-target-index/k2-target-pixel-files.csv.gz'
# %time df_targ = pd.read_csv(targ_index_path)
df_comb = pd.merge(df_GO[ in_GO14078 ], df_targ, how='left', left_on='EPIC ID', right_on='keplerid')
df_comb[df_comb.columns[0:10]].head()
df_comb.filename.nunique()
# Campaign 14 is not yet in the K2-target index!
# <!--NAVIGATION-->
# < [KEGS metadata and sample overview](01.01-KEGS-sample-overview.ipynb) | [Contents](Index.ipynb) | [Fetch all the KEGS data with `wget`](01.03-wget-all-KEGS-data.ipynb) >
| notebooks/01.02-Custom-tile-apertures-on-galaxies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
data=pd.read_csv("F:\\ML_DATASETS\\diabetes2.csv")
data.shape
data.head()
data.isnull().values.any() # to check if null value is present or not
data.dropna(inplace=True)
import seaborn as sns
import matplotlib.pyplot as plt
# get correlations of each features in dataset
corl=data.corr()
top_corr_features=corl.index
plt.figure(figsize=(20,20))
# plot heat map
r=sns.heatmap(data[top_corr_features].corr(),annot=True,cmap='RdYlGn')
data.corr()
sns.countplot(data['Outcome'],label='count')
data.head(5)
# Train Test split using sklearn
from sklearn.model_selection import train_test_split
x=data.loc[:,data.columns!='Outcome'] # data
y=data['Outcome'] # target
X_train,X_test,y_train,y_test=train_test_split(x,y,random_state=66)
# # Training and Testing the model
from sklearn.neighbors import KNeighborsClassifier
training_accuracy=[]
test_accuracy=[]
neighbors_no=range(1,11)
for n_neighbors in neighbors_no:
# build the model
knn=KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X_train,y_train)
training_accuracy.append(knn.score(X_train,y_train))
test_accuracy.append(knn.score(X_test,y_test))
plt.plot(neighbors_no,training_accuracy,label="Training accuracy")
plt.plot(neighbors_no,test_accuracy,label="Test accuracy")
plt.xlabel("n_neighbors")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
knn=KNeighborsClassifier(n_neighbors=9)
knn.fit(X_train,y_train)
X_new=np.array([[5,147,70,35,0,33.6,0.657,54,2]])
print("Xnew_shape:",X_new.shape)
prediction=knn.predict(X_new)
print("prediction: ",prediction)
y_pred=knn.predict(X_test)
print('Test set prediction : ',y_pred)
print("Test set score:",np.mean(y_pred==y_test))
print('Accuracy of K-NN classifier on training set:',knn.score(X_train,y_train))
print('Accuracy of K-NN classifier on test set:',knn.score(X_test,y_test))
from sklearn.metrics import accuracy_score,confusion_matrix,roc_curve,roc_auc_score
auc = roc_auc_score(y_test, y_pred)
auc
from sklearn.externals import joblib
joblib.dump(knn,open('model.pkl','wb'))
model=joblib.load(open('model.pkl','rb'))
prediction=model.predict([[5,147,70,35,0,33.6,0.657,54,2]])
print(prediction)
| Diabetes_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pylab inline
# %load_ext autoreload
# %autoreload 2
import os
sys.path.append('..')
from dataset import DatasetGenerator, DatasetFromFile
import matplotlib.pyplot as pylab
import pprint
import numpy as np
# +
data_class_list_1 = [
'position_non_aligned_scale',
'position_common_scale',
'angle',
'length',
'direction',
'curvature'
]
data_class_list_2 = [
'area',
'volume'
]
for data_class in data_class_list_1:
dataset_generator = DatasetGenerator(
data_class=data_class,
counts={"train": 60000, "val": 20000, "test": 20000}
)
dataset_generator.generate()
for data_class in data_class_list_2:
dataset_generator = DatasetGenerator(
data_class=data_class,
counts={"train": 6000, "val": 2000, "test": 2000}
)
dataset_generator.generate()
# -
import pickle
with open("output/" + "area" + "/distributions_label_distribution_0.p", "rb") as file:
data = pickle.load(file)
pyplot.figure()
x = sorted(list(map(int, data["area"].keys())))
print(len(x))
y = [data["area"][str(key)] for key in x]
pyplot.bar(x=x,height=y)
pyplot.xlabel("area")
import pickle
with open("output/" + "volume" + "/distributions_label_distribution_0.p", "rb") as file:
data = pickle.load(file)
pyplot.figure()
x = sorted(list(map(int, data["volume"].keys())))
print(len(x))
y = [data["volume"][str(key)] for key in x]
pyplot.bar(x=x,height=y)
pyplot.xlabel("volume")
import pickle
with open("output/" + "curvature" + "/distributions_label_distribution_0.p", "rb") as file:
data = pickle.load(file)
pyplot.figure()
x = sorted(list(map(int, data["curvature"].keys())))
print(len(x))
y = [data["curvature"][str(key)] for key in x]
pyplot.bar(x=x,height=y)
pyplot.xlabel("curvature")
import pickle
with open("output/" + "position_common_scale" + "/distributions_label_distribution_0.p", "rb") as file:
data = pickle.load(file)
pyplot.figure()
x = sorted(list(map(int, data["position_common_scale"].keys())))
print(len(x))
y = [data["position_common_scale"][str(key)] for key in x]
pyplot.bar(x=x,height=y)
pyplot.xlabel("position_common_scale")
| IPY/full_generation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from stat_helper import *
from hdf_helper import *
from data_cleaning import *
from scipy.ndimage import gaussian_filter1d
from matplotlib import pyplot as plt
import pandas as pd
import numpy as np
import seaborn as sns
from dateutil.parser import parse
from datetime import datetime, date, time, timedelta
import os
import re
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import IsolationForest
from scipy import stats
# %matplotlib inline
# -
path = 'competitionfiles/COOLCAT_20091219_074253_24_20091219_074253_240.hdf'
df = big_endian_problem(h5_to_df(path))
stats_df = get_stats_list(df)
# +
df_arr = []
len_ch_arr = []
len_in_arr = []
files = os.listdir('./competitionfiles')
for file in files[:50]:
len_ch_arr.append(len(h5_to_df('competitionfiles/' + file).columns))
len_in_arr.append(len(h5_to_df('competitionfiles/' + file).index))
# -
import seaborn as sns
# +
plt.title('Number of Channels Across Data')
plt.xlabel('Number of Channels')
plt.ylabel('Number of files')
plt.hist(ch_arr, bins=10, alpha=0.5)
# -
in_arr_srs = pd.Series(in_arr)
plt.title('Number of Datapoints Across Data')
plt.xlabel('Number of Datapoints')
plt.ylabel('Number of Files')
plt.hist(in_arr_srs, bins=30, alpha=0.5)
# +
files = os.listdir('./competitionfiles')
stats_list = []
for file in files[:100]:
stats_list.append(get_stats_list(h5_to_df('competitionfiles/' + file)))
# +
# Change the second index to change between stats
# order of [calc_mean(df),calc_median(df),calc_std(df),calc_max(df),calc_min(df)]
total_mean = stats_list[0][0][:]
for i in range(1,len(stats_list)):
total_mean += stats_list[i][0][:]
total_mean /= len(stats_list)
plt.title('Channel Data Mean')
plt.hist(total_mean, bins=10, alpha=0.5)
# -
total_mean
| Presentation_Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Basic walkthrough
#
# Here is a basic walkthrough of how to run the whole pipeline from preprocessing to training the GAN. The walkthrough is only one example to see what one can to to tune and tweek on different datasets, hyperparemeters and changing between models see `Tuning.ipynb`. Every script should be run(```# RUN```) from the repostitory folder.
#
# -------
# ### Datasets
# The combined dataset with CNN/Daily mail and Exabel data is used. It has 255157 articles with summaries.
#
#
# ### Preprocess datasets
# #### CNN dataset
# - The tokenized version of the CNN/Daily Mail dataset can be downloaded from [Dataset Link](https://github.com/JafferWilson/Process-Data-of-CNN-DailyMail).
# - Create a folder `data` in the parent folder of the repo, on the same level as the repo. There is a datafolder in the repo with dummyfiles in the leafnodes you can move.
# - Place the tokenized data in the `cnn_clean` folder
#
#
# #### Exabel Dataset:
# - Get from Erlend Aune in json-format
# - Place in `data` folder
#
# **save the exabel data in the same format as CNN/DM**
# ```sh
# # RUN
# python3 preprocess/clean_exa.py
# ```
#
# ```sh
# # RUN
# python3 preprocess/combined_preprocess.py
# ```
#
# **create vocabulary and preprocess all the articles from both the CNN, daily mail and exabel and save as pickle**
# ```sh
# # RUN
# python3 preprocess/preprocess_pointer.py
# ```
# -------
# ### Generator pretraining
# ```sh
# # RUN
# python3 training/seq2seq/run_experiment.py training/seq2seq/experiments/combined_test_1 0
# ```
# PS: it prints after every epoch. But logs more frequently in output.log in ```training/seq2seq/experiments/combined_test_1```
#
#
# **time to run: **
# 40h ++
#
# ----------
# ### Generate fake data to train discriminator:
#
# Creates samples using the generator as fake ones.
# ```sh
# # RUN
# python3 evaluation/seq2seq/generate_fake_sampled_data.py 0
# ```
#
# - The data is saved in ```../data/cnn_validation_sampled_data```.
# - Move the data over to the sibling folder: ```cnn_fake_data```.
# - it is possible to have several fake_data files from different generators. Then evaluate a discriminator on different datasets.
# ---
# ### Generate real data to train discriminator:
# ```sh
# # RUN
# python3 evaluation/seq2seq/generate_real_data.py
# ```
#
# - the real data is then saved in ```../data/cnn_real_data```
# ---
# ### Discriminator pretraining
# - first specify name of real_data_file and fake_data_directory in ```training/classifier/experiments/cnn_test_1/config.json```
#
# ```sh
# # RUN
# python3 training/classifier/run_experiment.py training/classifier/experiments/cnn_test_1 0
# ```
#
# - pretrains the Discriminator on the fake data and real data generated
# - Saves the model in the same folder as the config.json file
# ---
# ### GAN training
# ```sh
# # RUN
# python3 training/GAN/run_experiment.py training/GAN/experiments/hb_cnn_test_1 0
# ```
#
# ---
# ### Evaluating
#
# To evaluate generator models you yourself have to manually copy the models you want evaluated into a fitting ```models_first```-folder
#
# #### Evaluate generator
# To evaluate all the models through all epochs put them in a folder and call this:
# ```sh
# # RUN
# python3 evaluation/seq2seq/evaluate_multiple_generators.py output_for_eval/seq2seq/cnn/models_first 0
# # or
# python3 evaluation/seq2seq/evaluate_multiple_generators.py output_for_eval/gan/cnn/models_first 0
# ```
# **time to run:** overnight
#
# To calculate rouge for results of all the models:
# ```sh
# # RUN
# python3 evaluation/seq2seq/calculate_rouge_in_folder.py output_for_eval/seq2seq/cnn/models_first_eval
# # or
# python3 evaluation/seq2seq/calculate_rouge_in_folder.py output_for_eval/gan/cnn/models_first_eval
# ```
#
# #### Evaluate Discriminator
# ```sh
# # RUN
# python3 evaluation/classifier/test_pretrained_classifier.py 0
# ```
#
# Evaluates generater
#
# **time to run:** seconds
# ---------
# ### Tensorboard
# To see the statistics that are saved in ```log/``` during training.
#
# ```sh
# tensorboard --logdir log
# ```
#
| jupyter/Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pre-processing
from utils import *
Subjects = ['A','B']
Subject = Subjects[1]
samps = 64
Target,Target_label,NonTarget,NonTarget_label = train_data_and_label(Subject,samps)
Target, Target_label = shuffled_data(Target, Target_label)
NonTarget, NonTarget_label = shuffled_data(NonTarget, NonTarget_label)
train_P300_dataset = Target
train_P300_label = Target_label
train_non_P300_dataset = NonTarget
train_non_P300_label = NonTarget_label
print ('train_P300_dataset:' + str(train_P300_dataset.shape))
print ('train_P300_label:' + str(train_P300_label.shape))
print ('train_non_P300_dataset:'+ str(train_non_P300_dataset.shape))
print ('train_non_P300_label:' + str(train_non_P300_label.shape))
Target,Target_label,NonTarget,NonTarget_label = test_data_and_label(Subject,samps)
Target, Target_label = shuffled_data(Target, Target_label)
NonTarget, NonTarget_label = shuffled_data(NonTarget, NonTarget_label)
test_P300_dataset = Target
test_P300_label = Target_label
test_non_P300_dataset = NonTarget
test_non_P300_label = NonTarget_label
print ('test_P300_dataset:' + str(test_P300_dataset.shape))
print ('test_P300_label:' + str(test_P300_label.shape))
print ('test_non_P300_dataset:'+ str(test_non_P300_dataset.shape))
print ('test_non_P300_label:' + str(test_non_P300_label.shape))
# # Experiment
import sklearn.metrics as skm
from sklearn import preprocessing
from torch.utils.data import Dataset,DataLoader
import torch
import torch.optim as optim
from torch.autograd import Variable
import matplotlib.pyplot as plt
def data_prepocessing(data):
pd_data = np.zeros(data.shape)
for i in range(data.shape[0]):
pd_data[i,:,:] = preprocessing.normalize(data[i,:,:], norm='l2')
return pd_data
def add_dimension(x, y):
x = np.reshape(x,[x.shape[0],x.shape[2],x.shape[1]]).astype('float32')
y = np.reshape(y,[y.shape[0],1]).astype('float32')
return x, y
# +
train_data = np.vstack((train_P300_dataset,train_non_P300_dataset))
train_label = np.vstack((train_P300_label,train_non_P300_label))
train_data, train_label = shuffled_data(train_data, train_label)
test_data = np.vstack((test_P300_dataset,test_non_P300_dataset))
test_label = np.vstack((test_P300_label,test_non_P300_label))
test_data, test_label = shuffled_data(test_data, test_label)
train_data, train_label = add_dimension(train_data,train_label)
test_data, test_label = add_dimension(test_data, test_label)
print('train_data:',train_data.shape)
print('train_label:',train_label.shape)
print('test_data:',test_data.shape)
print('test_label:',test_label.shape)
# -
class Dataset(Dataset):
def __init__(self, Data, Label):
self.Data = torch.from_numpy(Data)
self.Label = torch.from_numpy(Label)
def __getitem__(self, index):
return self.Data[index], self.Label[index]
def __len__(self):
return len(self.Data)
input_channel = 64
input_length = 64
output_channel = 1
channel_sizes = [64,64,64,64]
kernel_size = 7
input = torch.randn(1, 64, 64)
net = STNN_net(input_channel, input_length,output_channel, channel_sizes,kernel_size)
net(input).shape
# +
LR = 0.001
epochs = 15
b_size = 32
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=LR)
Dataset = Dataset(train_data,train_label)
Train_loader = DataLoader(Dataset,batch_size = b_size ,shuffle = True)
# -
def train(epoch,train_data,train_label):
train_loss = 0.0
train_loss_array = []
for step, (inputs, labels) in enumerate(Train_loader):
inputs, labels = Variable(inputs), Variable(labels)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss_array.append(loss.item())
return train_loss_array
def test(net,epoch,vaild_data,vaild_label):
predicted = []
inputs = torch.from_numpy(vaild_data)
labels = torch.from_numpy(vaild_label)
inputs, labels = Variable(inputs), Variable(labels)
predicted = net(inputs)
predicted = predicted.data.cpu().numpy()
labels = labels.data.numpy()
accuracy_score = skm.accuracy_score(labels, np.round(predicted))
return accuracy_score
for epoch in range(1, epochs+1):
print ("\nEpoch ", epoch)
train_loss=train(epoch,train_data,train_label)
accuracy_score = test(net,epoch,test_data, test_label)
print (' accuracy_score', accuracy_score)
| demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.10.2 64-bit
# language: python
# name: python3
# ---
# ## Condicional IF
# +
# Condicional IF
if 5 > 2:
print(True)
# -
# Statement If ...Else
if 5 < 2:
print(True)
else:
print(False)
6 > 3
3 > 7
4 < 8
4 >= 4
if 5 == 5:
print(True)
if True:
print(True)
# Atenção com a sintaxe
if 4 > 3
print('Faltou informar os dois pontos.')
# Atenção com a sintaxe
if 4 > 3:
print('expected an indented block after 'if' statement on line 2')
# ## Condicionais Aninhadas
idade = 18
if idade > 17:
print('Você pode dirigir!')
# +
nome = 'Bob'
if idade > 13:
if nome == 'Bob':
print('Ok Bob, você está autorozado a entrar!')
else:
print('Desculpe, mas você não pode entrar!')
# -
idade = 13
nome = 'Bob'
if idade >= 13 and nome == 'Bob':
print('Ok Bob, você está autorozado a entrar!')
idade = 13
nome = 'Bob'
if idade >= 13 or nome == 'Bob':
print('Ok Bob, você está autorozado a entrar!')
# ## Elif
dia = 'Terça'
if dia == 'Segunda':
print(True)
else:
print(False)
if dia == 'Segunda':
print(True, '1')
elif dia == 'Terça':
print(True, '2')
else:
print(False)
# ## Operadores Lógicos
idade = 18
nome = 'Bob'
if idade > 17:
print('Voce pode dirigir!')
idade = 18
if idade > 17 and nome == 'Bob':
print('Autorizado!')
# +
# Usando mais de uma condição na cláusula if
disciplina = input('Digite o nome da disciplina: ')
nota_final = input('Digite a nota final (entre 0 e 100): ')
if disciplina == 'Geografia' and nota_final >= 70:
print('Você foi aprovado!')
else:
print('Lamento, acho que você precisa estudar mais!')
# +
# Usando mais de uma condição
disciplina = input('Digite o nome da disciplina:')
nota_final = input('Digite a nota final (entre 0 e 100): ')
semestre = input('Digite o semestre (1 a 4): ')
if disciplina == 'Geografia' and nota_final >= '50' and int(semestre) != 1:
print ('Voce foi aprovado em %s com média final de %r!' %(disciplina, nota_final))
else:
print('Lamento, acho que você precisa estudar mais!')
# -
# # FIM
| 001-Curso-De-Python/001-DSA/002-Loops-Condicionais-Metodos-Funcoes/001-Condicional IF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# ## Problem 2
ls -lsh
# The command ls lists everything inside the directory. -l flag shows them in a list (so we are able to see ownership), -s shows the size, and -h shows the results in a way is easy to read for humans.
#
# About the ownership, we can see there is only one dierctory, which is the one starting by "d", and there are 10 files, startig by "-". Then,we can see every element from this directory is readable and writeble by the user (first r and first w), and only the directory is executable by the owner (first x in the directory), that can be noted by the absence of x in the 4th column, and the "-" we can find instead.
#
# The second r, w and x refer to the group, and the third ones refer to everyone.
# ## Problem 3
head -1 Locus_Germplasm_Phenotype_20130122.txt
# As the header line is the first line of the document, we can use the command head, and specify to only include the first line of the file we are interested in.
# ## Problem 4
wc -l Locus_Germplasm_Phenotype_20130122.txt
# For this task, we can use the command wc (word count), and to specify to count the lines and not the words with the -l flag.
# ## Problem 5
tail -7215 Locus_Germplasm_Phenotype_20130122.txt > Data_only.csv
wc -l Data_only.csv
# As we know from the last Problem that our file has 7216 lines, we can take the last 7215 and direct the output to the new file. Then we use wc again to check that it has the expected number of lines
# ## Problem 6
grep -nai "\Wroot\W" Locus_Germplasm_Phenotype_20130122.txt
# We use the command grep in order to find all places where the word root is. Then, we use the flags -a because we dont know the source, and the txt file could be comming from windows, so it is better to work in binary; and -n, because we want the lines; and -i, because it could be the first word of a sentence. Also, we use the word root surrounded by something that is not a word character, as it could be preceded or followed by a "." or a "-".
# ## Problem 7
grep -ai "\Wroot\W" Locus_Germplasm_Phenotype_20130122.txt | grep -E -oia '^at[1-5]g[0-9]+\s' > Root-associated-Loci.txt
# We take the same command we use for Problem 6, but not including the n, as we will need the line to start by the AGI locus code and not by the name of the line. Then, we use the pipe to send the output of the first grep to another grep, which is going to pick only the locus code (starting by a, then t, then a number from 1 to five, a g, and then 5 numbers from 0 to 9 and a tab) and redirect it to the file specified in the Problem 7.
# ## Problem 8
grep -ai "\Wroot\W" Locus_Germplasm_Phenotype_20130122.txt | grep -E -oia '\s[0-9]+$' > Root-associated-Publications.txt
# We use the same strategy we used before, but in the second grep we find the last characters of a line that contains only numbers and are preceded by a space (a tab in this case).
# ## Problem 9
grep -E -aio "^at[1]" Root-associated-Loci.txt | head -1
grep -E -aio "^at[2]" Root-associated-Loci.txt | head -1
grep -E -aio "^at[3]" Root-associated-Loci.txt | head -1
grep -E -aio "^at[4]" Root-associated-Loci.txt | head -1
grep -E -aio "^at[5]" Root-associated-Loci.txt | head -1
# We will grep only the ones for the first chromosome, and only the part that specify which chromosome, then for the second chromosome, and so on. Then, we will display them using head -1, so we can clearly see 1 the first result for each chromosome, and only the first part, in order to not overwhelm the the screen.
# ## Problem 10
# Seems I did correctly last exercise, so I am skipping this part (wohoo!).
| Exam_1_Answers/.ipynb_checkpoints/Exam_1_Answers_JCCS-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.svm import SVC
from sklearn.metrics import roc_auc_score, roc_curve
from sklearn import preprocessing
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
import warnings
import numpy as np
from collections import OrderedDict
import os
from lob_data_utils import lob, db_result, gdf_pca, model
from lob_data_utils.svm_calculation import lob_svm
sns.set_style('whitegrid')
warnings.filterwarnings('ignore')
# -
data_length = 15000
stock = '9061'
gdf_filename_pattern = 'gdf_{}_' + 'len{}'.format(data_length) + '_r{}_s{}_K50'
gdf_parameters = [(1.0, 1.0), (0.1, 0.1), (0.1, 1.0), (1.0, 0.1)]
gdf_dfs = []
for r, s in gdf_parameters:
gdf_dfs.append(gdf_pca.SvmGdfResults(
stock, r=r, s=s, data_length=data_length,
gdf_filename_pattern=gdf_filename_pattern))
# ## Visualization
#
# What do I want to say here?
#
# * that data is quite random
# * distribution of queue imbalance
# * that there is some correlation between queue imbalance and mid price indicator
# * show some gdf values
df = gdf_dfs[2].df
# ### Mid Price
#
# Data is quite random, there are outliers
# +
fig, ax = plt.subplots(figsize=(16, 8))
df[['bid_price']].plot(ax=ax, alpha=0.75, linestyle='--')
df[['ask_price']].plot(ax=ax, alpha=0.75, linestyle='--')
df[['mid_price']].plot(ax=ax, alpha=0.5)
plt.legend(['Bid-Price', 'Ask-Price', 'Mid-Price',])
plt.ylabel('Price')
plt.xlabel('')
plt.title('Mid-Price for 9061 for training dataset')
# -
# We can observe outliers, lets look into them - both are around 11 Fridays
fig, ax = plt.subplots(figsize=(16, 8))
df[['bid_price']].loc['2013-09-20'].plot(ax=ax, alpha=0.75, linestyle='--')
df[['ask_price']].loc['2013-09-20'].plot(ax=ax, alpha=0.75, linestyle='--')
df[['mid_price']].loc['2013-09-20'].plot(ax=ax, alpha=0.5)
fig, ax = plt.subplots(figsize=(16, 8))
df[['bid_price']].loc['2013-10-18'].plot(ax=ax, alpha=0.75, linestyle='--')
df[['ask_price']].loc['2013-10-18'].plot(ax=ax, alpha=0.75, linestyle='--')
df[['mid_price']].loc['2013-10-18'].plot(ax=ax, alpha=0.5)
# ### Distributions
fig, ax = plt.subplots(1, 3, figsize=(16, 4))
sns.distplot(df['mid_price_indicator'], ax=ax[0])
sns.distplot(df['queue_imbalance'], ax=ax[1])
sns.distplot(df['mid_price'], ax=ax[2])
sns.distplot(df['ask_price'], ax=ax[2])
sns.distplot(df['bid_price'], ax=ax[2])
df['mid_price'].mean(), df['mid_price'].median(), df['mid_price'].mode()
# Distribution of Mid-Price Indidcator is balanced (classes are balanced). Mid-Price distribution is slightly Right-Skewed.
plt.figure(figsize=(8, 8))
sns.kdeplot(df['mid_price'], df['queue_imbalance'], cbar=True, shade=True)
df['mid_price'].mean(), df['mid_price'].median(), df['mid_price'].mode()
# We can see that for queue imbalance -1 or 1 the density is the smallest. The highest density for queue imbalance in between -0.5 and 0.5 which happens to be around the mean mid price
# ### Correlations
cols = ['gdf_1', 'gdf_23', 'gdf_25', 'queue_imbalance', 'prev_queue_imbalance',
'mid_price', 'mid_price_indicator']
sns.heatmap(df[cols].corr(), annot=True)
# TODO: ?
# +
### GDF
# +
n_components = 2
pca = PCA(n_components=n_components)
gdf_columns = ['gdf_' + str(i) for i in range(0, 50)]
p = pca.fit_transform(df[gdf_columns])
for i in range(n_components):
df['pca' + str(i)] = p[:, i]
pca_cols = ['pca' + str(i) for i in range(n_components)]
cols = ['gdf_23', 'gdf_25', 'queue_imbalance',
'mid_price', 'mid_price_indicator']
df[pca_cols].plot()
# +
sns.heatmap(df[pca_cols + [
'mid_price_indicator', 'queue_imbalance', 'prev_queue_imbalance']].corr(), annot=True)
# -
df[['gdf_0', 'gdf_24', 'gdf_49']].plot()
fig, ax = plt.subplots(2, 1, figsize=(16, 8), sharex=True)
df[gdf_columns].iloc[0].plot('bar', color=['b'], alpha=0.5, ax=ax[0])
df[gdf_columns].iloc[2003].plot('bar', color=['b'], alpha=0.5, ax=ax[1])
ax[0].set_title(df['mid_price_indicator'].iloc[0])
ax[1].set_title(df['mid_price_indicator'].iloc[2003])
fig, ax = plt.subplots(2, 1, figsize=(16, 8), sharex=True)
df[pca_cols].iloc[0].plot('bar', color=['b'], alpha=0.5, ax=ax[0])
df[pca_cols].iloc[2003].plot('bar', color=['b'], alpha=0.5, ax=ax[1])
ax[0].set_title(df['mid_price_indicator'].iloc[0])
ax[1].set_title(df['mid_price_indicator'].iloc[2003])
df.head()
# +
gdf_columns = ['gdf_' + str(i) for i in range(0, 50)]
plt.figure(figsize=(16, 8))
ax = sns.boxplot(data=df[gdf_columns])
_ = ax.set_xticklabels([g.replace('_', ' ').upper() for g in gdf_columns], rotation=45)
plt.title('Distribution of gdf features for stock ' + str(stock))
plt.xlabel('GDF feature')
plt.ylabel('')
# +
gdf_columns = ['gdf_' + str(i) for i in range(0, 50)]
plt.figure(figsize=(16, 8))
ax = sns.boxplot(data=df[pca_cols])
_ = ax.set_xticklabels([g.replace('_', ' ').upper() for g in pca_cols], rotation=45)
plt.title('Distribution of gdf features for stock ' + str(stock))
plt.xlabel('GDF feature')
plt.ylabel('')
# -
for g in gdf_columns[::9]:
sns.distplot(df[g], label=g)
plt.legend()
# ## Predictions
# +
def get_kernel_from_method(method):
return method.split('_')[1]
class CFR():
def __init__(self, clf, feature_name, is_svm=True):
self.clf = clf
self.feature_name = feature_name
self.is_svm = is_svm
def get_result(self, gdf_df, should_validate=False, method=None):
if self.is_svm:
return gdf_df.train_svm(C=self.clf.C, gamma=self.clf.gamma, coef0=self.clf.coef0,
kernel=self.clf.kernel,
feature_name=self.feature_name, should_validate=should_validate)
else:
res = gdf_df.train_clf(self.clf, self.feature_name, should_validate=should_validate)
if method:
res['kernel'] = method
return res
# -
# ### Queue Imbalance - LOGIT, SVM (linear, rbf, sigmoid)
def get_best_svm_results_for_queue_imbalance(s, features):
df_res = pd.read_csv('res_{}_prev_queue_imbalance.csv'.format(s))
df_res= df_res[df_res['features'] == features]
df_res= df_res[df_res['method'] != 'logistic']
df_res['stock'] = [s for i in range(len(df_res))]
columns_to_mean = ['f1', 'kappa', 'matthews',
'precision', 'recall', 'roc_auc', 'train_f1', 'train_kappa',
'train_matthews', 'train_precision', 'train_recall', 'train_roc_auc']
for c in columns_to_mean:
cc = []
for i, row in df_res.iterrows():
from ast import literal_eval
cc.append(np.array(literal_eval(row[c])).mean())
df_res[c] = cc
return df_res.sort_values(by='matthews', ascending=False).groupby('method').head(1)
df_best_svm_que = get_best_svm_results_for_queue_imbalance(stock, features='queue_imbalance')
# +
log_que_clfs = [
CFR(clf=LogisticRegression(), feature_name='que', is_svm=False),
]
svm_que_clfs = [
CFR(
SVC(kernel=get_kernel_from_method(row['method']),
C=row['c'],
gamma=row['gamma'],
coef0=row['coef0']), feature_name='que')
for i, row in df_best_svm_que.iterrows()]
# -
res = []
for clf in log_que_clfs + svm_que_clfs:
r = clf.get_result(gdf_dfs[0])
res.append(r)
df_res_que = pd.DataFrame(res)
df_res_que
# ### Queue Imbalance and Prev Queue Imbalance
df_best_svm_prev_que = get_best_svm_results_for_queue_imbalance(
stock, features='queue_imbalance, prev_queue_imbalance')
# +
prev_que_log_clfs = [
CFR(clf=LogisticRegression(), feature_name='que_prev', is_svm=False),
]
svm_prev_que_clfs = [
CFR(
SVC(kernel=get_kernel_from_method(row['method']),
C=row['c'],
gamma=row['gamma'],
coef0=row['coef0']), feature_name='que_prev')
for i, row in df_best_svm_prev_que.iterrows()]
# -
res = []
for clf in prev_que_log_clfs + svm_prev_que_clfs:
r = clf.get_result(gdf_dfs[0])
res.append(r)
df_res_que_prev = pd.DataFrame(res)
df_res_que_prev
# ### GDF Plain
df_gdf_plain_res = pd.DataFrame()
gdf_results_dir = '../gaussian_filter/data_res_9061_len15000'
results_files = os.listdir(gdf_results_dir)
results_files = [f for f in results_files if '9061' in f and 'partial' not in f]
for f in results_files:
temp_df = pd.read_csv(os.path.join(gdf_results_dir, f))
df_gdf_plain_res = df_gdf_plain_res.append(temp_df)
print(df_gdf_plain_res.columns)
df_gdf_plain_res.sort_values(by='matthews', ascending=False).groupby('kernel').head(1)
# +
from sklearn.neural_network import MLPClassifier
alphas = [0.0001, 0.001, 0.1, 1.0]
hidden_layer_sizes = [(14, 6)]
res = []
for hidden_layer_size in hidden_layer_sizes:
for alpha in alphas:
solver = 'adam'
activation = 'tanh'
clf = MLPClassifier(solver=solver, alpha=alpha, activation=activation,
hidden_layer_sizes=hidden_layer_size, random_state=1)
c = CFR(clf=clf, feature_name='gdf_0_50', is_svm=False)
r = c.get_result(gdf_dfs[0], should_validate=True, method='mlp')
res.append({**r, 'hidden_layer_size': hidden_layer_size,
'alpha': alpha, 'activation': activation, 'solver': solver})
df_res1 = pd.DataFrame(res).sort_values(by='matthews', ascending=False)
# -
# ### GDF with PCA
# +
df_gdf_res = pd.DataFrame()
gdf_results_dir = '../gdf_pca/res_pca_gdf3_15000'
results_files = os.listdir(gdf_results_dir)
results_files = [f for f in results_files if stock in f and 'partial' not in f]
for f in results_files:
temp_df = pd.read_csv(os.path.join(gdf_results_dir, f))
df_gdf_res = df_gdf_res.append(temp_df)
df_gdf_res.sort_values(by='matthews', ascending=False).groupby('kernel').head(2)
# +
df_gdf_res = pd.DataFrame()
gdf_results_dir = '../gdf_pca/res_pca1_gdf_que_15000'
results_files = os.listdir(gdf_results_dir)
results_files = [f for f in results_files if '9061' in f and 'partial' not in f]
for f in results_files:
temp_df = pd.read_csv(os.path.join(gdf_results_dir, f))
df_gdf_res = df_gdf_res.append(temp_df)
df_gdf_res.sort_values(by='matthews', ascending=False).groupby('kernel').head(2)
# +
df_gdf_res = pd.DataFrame()
gdf_results_dir = '../gdf_pca/res_pca2_gdf_que_15000'
results_files = os.listdir(gdf_results_dir)
results_files = [f for f in results_files if '9061' in f and 'partial' not in f]
for f in results_files:
temp_df = pd.read_csv(os.path.join(gdf_results_dir, f))
df_gdf_res = df_gdf_res.append(temp_df)
df_gdf_res.sort_values(by='matthews', ascending=False).groupby('kernel').head(2)
# -
# +
from sklearn.neural_network import MLPClassifier
alphas = [0.0001, 0.001, 0.1, 1.0]
learning_rates = [0.000001, 0.00001, 0.001, 0.1, 1.0]
hidden_layer_sizes = [(14, 6)]
res = []
for i in range(len(gdf_dfs)):
for learning_rate in learning_rates:
for alpha in alphas:
hidden_layer_size = (14, 6)
solver = 'adam'
activation = 'tanh'
clf = MLPClassifier(solver=solver, alpha=alpha, activation=activation,
learning_rate_init=learning_rate,
hidden_layer_sizes=hidden_layer_size, random_state=1, shuffle=False)
c = CFR(clf=clf, feature_name='pca_gdf_que_prev10', is_svm=False)
r = c.get_result(gdf_dfs[i], should_validate=True, method='mlp')
res.append({**r, 'hidden_layer_size': hidden_layer_size, 'gdf_dfs': i,
'alpha': alpha, 'activation': activation, 'solver': solver,
'learning_rate': learning_rate})
df_res = pd.DataFrame(res).sort_values(by='matthews', ascending=False)
# -
columns = ['solver', 'matthews', 'test_matthews', 'stock', 'roc_auc', 'test_roc_auc', 'learning_rate']
df_res[columns].sort_values(by='matthews', ascending=False).groupby('learning_rate').head(2)
| overview_val10/overview_junk/overview_9061_full.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/KristineYW/Coding-Challenges/blob/main/hr_library_fines.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="x9na3C6_UTvR"
# URL: https://www.hackerrank.com/challenges/library-fine/problem
# + [markdown] id="r7vSf71UUVfu"
# Your local library needs your help! Given the expected and actual return dates for a library book, create a program that calculates the fine (if any). The fee structure is as follows:
#
# If the book is returned on or before the expected return date, no fine will be charged (i.e.: fine = 0).
# If the book is returned after the expected return day but still within the same calendar month and year as the expected return date, fine = 15 Hackos x (the number of days late).
# If the book is returned after the expected return month but still within the same calendar year as the expected return date, the fine = 500 Hackos x (the number of months late).
# If the book is returned after the calendar year in which it was expected, there is a fixed fine of 10,000 Hackos.
# Charges are based only on the least precise measure of lateness. For example, whether a book is due January 1, 2017 or December 31, 2017, if it is returned January 1, 2018, that is a year late and the fine would be 10,000 Hackos.
#
# Example
# d1, m1, y1 = 14, 7, 2018
# d2, m2, y2 = 5, 7, 2018
#
# The first values are the return date and the second are the due date. The years are the same and the months are the same. The book is 14-5=9 days late. Return 9*15=135.
#
# Function Description
#
# Complete the libraryFine function in the editor below.
#
# libraryFine has the following parameter(s):
#
# d1, m1, y1: returned date day, month and year, each an integer
# d2, m2, y2: due date day, month and year, each an integer
#
#
# Returns
#
# int: the amount of the fine or 0 if there is none
#
#
# Input Format
#
# The first line contains space-separated integers, , denoting the respective , , and on which the book was returned.
# The second line contains space-separated integers, , denoting the respective , , and on which the book was due to be returned.
#
# Constraints
# 1<= d1, d2<= 31
# 1<=m1, m2<=12
# 1<=y1, y2<=3000
#
# Sample Input
#
# 9 6 2015
# 6 6 2015
#
#
# Sample Output
#
# 45
#
# Explanation
#
# Given the following dates:
# Returned: d1=9, m1=6, y1=2015
# Due: d2=6, m2=6, y2=2015
#
# Because y2=y1, we know it is less than a year late.
# Because m2=m1, we know it's less than a month late.
# Because d2<d1, we know that it was returned late (but still within the same month and year).
#
# Per the library's fee structure, we know that our fine will be 15 Hackos x (# of days late). We then print the result of 15 x (d1-d2) = 15 x (9 - 6) = 45 as our output.
# + id="3H5n_7-0USwR"
# #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the libraryFine function below.
"""
1. If the book is returned on or before the expected return date, no fine will be charged (i.e.: fine = 0).
2. If the book is returned after the expected return day but still within the same calendar month and year as the expected return date, fine = 15 Hackos x (the number of days late).
3. If the book is returned after the expected return month but still within the same calendar year as the expected return date, the fine = 500 Hackos x (the number of months late).
4. If the book is returned after the calendar year in which it was expected, there is a fixed fine of 10,000 Hackos.
* Charges are based only on the least precise measure of lateness. For example, whether a book is due January 1, 2017 or December 31, 2017, if it is returned January 1, 2018, that is a year late and the fine would be 10,000 Hackos.
"""
def libraryFine(d1, m1, y1, d2, m2, y2):
if y1 > y2:
return 10000
elif y1 == y2 and m1 > m2:
return (500 * (m1 - m2))
elif y1 == y2 and m1 == m2 and d1 > d2:
difference_in_days = d1 - d2
return (15 * (d1 - d2))
else:
return 0
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
d1M1Y1 = input().split()
d1 = int(d1M1Y1[0])
m1 = int(d1M1Y1[1])
y1 = int(d1M1Y1[2])
d2M2Y2 = input().split()
d2 = int(d2M2Y2[0])
m2 = int(d2M2Y2[1])
y2 = int(d2M2Y2[2])
result = libraryFine(d1, m1, y1, d2, m2, y2)
fptr.write(str(result) + '\n')
fptr.close()
| hr_library_fines.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/attackgnome/pytorch_project/blob/master/Image_Classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="bsXoHHyJOPil" colab_type="text"
# # Project to classify images using pytorch
#
# The goal of this project is to gain familiarity with using pytorch for image classification and using gpu acceleration.
# + id="xNP2MlMkNoct" colab_type="code" colab={}
import torch
import torchvision
import torchvision.transforms as transforms
# + [markdown] id="LIvIAe0KOz9Q" colab_type="text"
# Make sure to change runtime options to allow use of gpu acceleration.
# + id="J4v1WlCTTq5C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b62bc612-5a4a-4b0a-d79a-494a2ff3ffda"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") #check if gpu is enabled
# Assuming that we are on a CUDA machine, this should print a CUDA device:
print(device)
# + [markdown] id="MlSKoCuZVOKZ" colab_type="text"
# The output of torchvision datasets are PILImage images of range [0, 1]. Transform them to Tensors of normalized range [-1, 1].
# + id="VB5c7iWsOy90" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 100, "referenced_widgets": ["a460084077874dd8a49533d6ac29757d", "da7404be7871495daa512bd7402e8ad2", "59aacafec30f4579a6d715046e98c190", "e03499dffab549b286678846a1a3e4fa", "369734cb5438480e93a06fbe55bfacdd", "b8f07535dcda4198b6ddcc11b7489438", "172d9d6361bf4d77a57fb2de877c71ff", "29f8e8d076394f19b050f83d1eaefb02"]} outputId="7c6d7719-9780-4602-9b15-16e7833acec1"
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# + [markdown] id="1BBKQV-KQXmy" colab_type="text"
# Display training images to get an idea of what is in the dataset and print their labels.
# + id="Q3FQc9FgQV5C" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 155} outputId="a9b68c78-84a4-454a-a085-900570c70419"
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# get some random training images
dataiter = iter(trainloader)
images, labels = dataiter.next()
# show images
imshow(torchvision.utils.make_grid(images))
# print labels
print(' '.join('%5s' % classes[labels[j]] for j in range(4)))
# + [markdown] id="1QXwKRlzVksi" colab_type="text"
# Now let's jump into defining the convolutional neural network.
# + id="lT2BNtygQfZS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="898d5bea-c025-47df-f330-fbe9e79ca98f"
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.drop_layer = nn.Dropout(p=0.5) ## Add dropout later to experiment
self.fc2 = nn.Linear(120, 84)
self.drop_layer = nn.Dropout(p=0.5) ## Add dropout later to experiment
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
net.to(device)
# + [markdown] id="aZVChqgiWNYR" colab_type="text"
# Define the optimizer. Since we have multiple classes we will use cross-entropy loss and SGD with momentum since that is what the tutorial suggests.
# + id="kMgT7rsqQ4Ub" colab_type="code" colab={}
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#optimizer = optim.ASGD(net.parameters(), lr=0.001, lambd=0.0001, alpha=0.75, t0=1000000.0, weight_decay=0.0001) # try a different optimizer
# + [markdown] id="QimSqsgjWpfB" colab_type="text"
# Alright, lets go ahead and train the model. We will set a few parameters in the front end that differentiate it from the example in the tutorial. Specifically we will define the number of epochs we want to use in training to see if we can improve on the results from the tutorial.
# + id="ffG6rDe-Q-I7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b152cece-51f9-4a18-daad-867e4eb69bcf"
epoch_choice = 16 #number of epochs to select for training.
for epoch in range(epoch_choice): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
if torch.cuda.is_available():
inputs, labels = inputs.cuda(), labels.cuda() # Send input tensors to gpu if available.
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
# + [markdown] id="71FbhxqTa0ur" colab_type="text"
# Let's save our trained model.
# + id="aZePGzUdRQ29" colab_type="code" colab={}
PATH = './cifar_net.pth'
torch.save(net.state_dict(), PATH)
# + id="oiz5s6phRfSD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 155} outputId="68c34f04-af30-4fb8-85ea-073e225cc705"
dataiter = iter(testloader)
images, labels = dataiter.next()
# print images
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4)))
# + id="hlQk2zDPR3xZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bf92766a-e8ab-4f3f-f0f3-f9cb4f24b4d3"
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(4)))
# + id="xTcwHd27SkP8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="2b87d6d1-7bc2-4e64-8e09-a0f5eaf910fc"
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
if torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda() # Send input images to gpu if available.
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
# + [markdown] id="TecHBS_-dADY" colab_type="text"
# Compared to the 52% accuracy in the default tutorial, fine tuning a few of the parameters has significantly increased the accuracy.
# + id="8DAXfULOTZtT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="f7cafe87-76dc-4139-c312-f9602bcafbd7"
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
if torch.cuda.is_available():
images, labels = images.cuda(), labels.cuda() # Send input images to gpu if available.
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(4):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
| Image_Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="7uL7ALJfaHAq"
# Load libs and utilities.
# + id="xk7fUOkRQZdJ" colab={"base_uri": "https://localhost:8080/"} outputId="95f52567-d88c-44b5-ef7e-8fdc2a3f3abd"
# !pip install -U -q PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
from google.colab import drive
drive.mount('/content/drive')
# %cd "drive/MyDrive/Projects/Fourier"
# !pip install import-ipynb
import import_ipynb
# + id="Pg-U8VI9MgKR" colab={"base_uri": "https://localhost:8080/"} outputId="a0df45d4-2ca9-4455-d322-a666cc84a955"
import os
import tensorflow as tf
print("Tensorflow version: " + tf.__version__)
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.utils import plot_model
from pathlib import Path
from utils import *
# + id="XdAxWKZr9Umn"
class UnitGaussianNormalizer:
def __init__(self, x, eps=0.00001):
super(UnitGaussianNormalizer, self).__init__()
self.mean = tf.math.reduce_mean(x, 0)
self.std = tf.math.reduce_std(x, 0)
self.eps = eps
def encode(self, x):
x = (x - self.mean) / (self.std + self.eps)
return x
def decode(self, x):
std = self.std + self.eps
mean = self.mean
x = (x * std) + mean
return x
# + id="p8IKhRIycrn3" colab={"base_uri": "https://localhost:8080/"} outputId="263587dc-73e9-4df5-cb04-eb05c3e5ec09"
PROJECT_PATH = Path(os.path.abspath('')).parent.parent.resolve().__str__()
TRAIN_PATH = PROJECT_PATH + '/Datasets/Fourier/piececonst_r241_N1024_smooth1.mat'
TEST_PATH = PROJECT_PATH + '/Datasets/Fourier/piececonst_r241_N1024_smooth2.mat'
N_TRAIN = 1000
W = 49 #width
FTS = 32 #features
R = 5 #refinement
MODES = 12
# ...
try:
if DATA_IS_LOADED:
print("Not reloading data!")
except:
reader = MatReader()
if reader.is_not_loaded():
reader.load_file(TRAIN_PATH)
DATA_IS_LOADED = True
# ...
x_train = reader.read_field('coeff')[:N_TRAIN,::R,::R]
y_train = reader.read_field('sol')[:N_TRAIN,::R,::R]
S_ = x_train.shape[1]
grids = []
grids.append(np.linspace(0, 1, S_))
grids.append(np.linspace(0, 1, S_))
grid = np.vstack([xx.ravel() for xx in np.meshgrid(*grids)]).T
grid = grid.reshape(1,S_,S_,2)
print(x_train.shape)
x_train = tf.convert_to_tensor(x_train, dtype=tf.float32)
y_train = tf.convert_to_tensor(y_train, dtype=tf.float32)
grid = tf.convert_to_tensor(grid, dtype=tf.float32)
x_train = tf.expand_dims(x_train, axis=3)
grid = tf.repeat(grid, repeats = N_TRAIN, axis = 0)
x_train = tf.concat([x_train, grid], axis=3)
y_train = tf.expand_dims(y_train, axis=3)
x_normalizer = UnitGaussianNormalizer(x_train)
x_train = x_normalizer.encode(x_train)
y_normalizer = UnitGaussianNormalizer(y_train)
y_train = y_normalizer.encode(y_train)
print("x_train dims: " + str(x_train.shape))
print("y_train dims: " + str(y_train.shape))
# + id="-MQFoq1YRkyP"
class FourierLayer(layers.Layer):
def __init__(self):
super(FourierLayer, self).__init__()
self.weight_fft1 = tf.Variable(tf.random.uniform([FTS, FTS, MODES, MODES], minval=0, maxval=1),name="Wfft1", trainable=True)
self.weight_fft2 = tf.Variable(tf.random.uniform([FTS, FTS, MODES, MODES], minval=0, maxval=1),name="Wfft2", trainable=True)
def call(self, input, training=True):
weight_fft_complex = tf.complex(self.weight_fft1, self.weight_fft2)
x = input
x = keras.layers.Lambda(lambda v: tf.signal.rfft2d(v, tf.constant([49, 49])))(x)
x = x[:,:,:MODES, :MODES]
x = keras.layers.Lambda(lambda v: tf.einsum('ioxy,bixy->boxy', weight_fft_complex, v))(x)
x = keras.layers.Lambda(lambda v: tf.signal.irfft2d(v, tf.constant([49, 49])))(x)
return x
# + id="Km7v54ZzpczG"
class FourierUnit(layers.Layer):
def __init__(self):
super(FourierUnit, self).__init__()
self.W = tf.keras.layers.Conv1D(W, 1)
self.fourier = FourierLayer()
self.add = tf.keras.layers.Add()
self.bn = tf.keras.layers.BatchNormalization()
def call(self, input, training=True):
x = input
x1 = self.fourier(x)
x2 = self.W(x)
x = self.add([x1, x2])
x = self.bn(x)
return x
# + id="f75b5clwcDbC"
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__()
self.fc0 = tf.keras.layers.Dense(FTS)
self.perm_pre = tf.keras.layers.Permute((3, 1, 2))
self.fourier_unit_1 = FourierUnit()
self.relu_1 = tf.keras.layers.ReLU()
self.fourier_unit_2 = FourierUnit()
self.relu = tf.keras.layers.ReLU()
self.perm_post = tf.keras.layers.Permute((2, 3, 1))
self.fc1 = tf.keras.layers.Dense(128)
self.relu2 = tf.keras.layers.ReLU()
self.fc2 = tf.keras.layers.Dense(1)
def call(self, input):
x = self.fc0(input)
x = self.perm_pre(x)
x = self.fourier_unit_1(x)
x = self.relu_1(x)
x = self.fourier_unit_2(x)
x = self.perm_post(x)
x = self.fc1(x)
x = self.relu2(x)
x = self.fc2(x)
return x
def model(self):
x = keras.Input(shape=(W, W, 3))
return keras.Model(inputs=[x], outputs=self.call(x))
# + id="b34VyLimesac" colab={"base_uri": "https://localhost:8080/"} outputId="5d7139bf-5778-4894-a919-f58bd35f04b0"
model = MyModel()
mse = tf.keras.losses.MeanSquaredError()
model.compile(
loss=mse,
optimizer=keras.optimizers.Adam(lr=3e-4),
metrics=[tf.keras.metrics.RootMeanSquaredError()],
)
model.fit(x_train, y_train, batch_size=64, epochs=2, verbose=2)
model.model().summary()
| FourrierNeuralNet2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
sns.set_theme()
# +
import os
PLOT_DIR = "example-plots"
if not(os.path.exists(PLOT_DIR)):
os.mkdir(PLOT_DIR)
# +
THESIS_DIR = os.path.join(os.environ["HOME"], "Documents", "Studie", "Thesis", "Report", "thesis", "src")
if not(os.path.exists(THESIS_DIR)):
raise Error("Set the correct thesis root dir to save results to!")
THESIS_FIGURE_DIR = os.path.join(THESIS_DIR, "fig")
THESIS_TABLE_DIR = os.path.join(THESIS_DIR, "tab")
os.makedirs(THESIS_FIGURE_DIR, exist_ok=True)
os.makedirs(THESIS_TABLE_DIR, exist_ok=True)
# +
# Load data
results_files = [
"20211117-210600 chocopy.csv"
# "20211014-124146 -p optimisationLevel=4 -f 2 -wi 2 -i 3 run.csv",
# "20211013-114213_results-compilespace.csv"
# "20211013-114213_results-compilespace.csv"
# "20211013-124531-benchnullary.csv"
# "20211012-102647.csv"
# "20211007-135249.csv",
# "20211007-084741.csv", # benchsym switch backend
# "20211006-143434.csv", # more problem swtich backend
# "20210915-111700.csv",
# "results-compilespace.csv",
]
df_raw = pd.concat(map(lambda f: pd.read_csv(f"results/{f}"), results_files), ignore_index=True)
df_raw.info()
# +
for col in ["Score", "Score Error (99.9%)"]:
if col in df_raw and df_raw[col].dtype == object:
df_raw[col] = df_raw[col].map(lambda s: s.replace(',', '.'), na_action="ignore").astype(float)
# df_raw
# -
df_raw["Param: optimisationLevel"] = df_raw["Param: optimisationLevel"].astype("string")
df = df_raw[df_raw.Benchmark.str.endswith("jfr") == False].copy()
df
df["Stage"] = df["Benchmark"].str.rpartition('.')[2]
df["Problem"] = df["Benchmark"].str.split('.').map(lambda l: l[-2])
df
program_text = """
module calls
imports libstrategolib
signature
sorts
S
constructors
nullary_constructor : S
unary_constructor : S -> S
nary_constructor : S * S * S -> S
nullary_function : S
unary_function : S -> S
nary_function : S * S * S -> S
a : S
b : S
c : S
d : S
e : S
f : S
rules
REC_E: nullary_function() -> nullary_constructor()
REC_E: unary_function (x) -> unary_constructor (x)
REC_E: nary_function (x, y, z) -> nary_constructor (x, y, z)
REC_E: a() -> nullary_constructor()
REC_E: b() -> unary_constructor (nullary_constructor())
REC_E: c() -> nary_constructor (nullary_constructor(), nullary_constructor(), nullary_constructor())
REC_E: d() -> nullary_function()
REC_E: e() -> unary_function (nullary_function())
REC_E: f() -> nary_function (nullary_function(), nullary_function(), nullary_function())
strategies
eval = memo (innermost (REC_E))
eval-and-print = eval ; debug (!"result = ")
main =
(<eval-and-print> nullary_constructor()) ;
(<eval-and-print> unary_constructor (nullary_constructor())) ;
(<eval-and-print> nary_constructor (nullary_constructor(), nullary_constructor(), nullary_constructor())) ;
(<eval-and-print> nullary_function()) ;
(<eval-and-print> unary_function (nullary_function())) ;
(<eval-and-print> nary_function (nullary_function(), nullary_function(), nullary_function())) ;
<exit> 0
"""
df["Param: problemSize"].astype("str").mask(df["Param: problemSize"]==-1, "")
# +
import re
import os
# Add number of unique constructors to problems
# cons_regex_pat = r"\bconstructors\b\W*(?:\s*(\w*)\s*:.*\s*)*\W*\brules\b"
cons_area_regex = r"^\s*constructors\s*$((?:.*\n)*)^\s*rules\s*$"
cons_area_pat = re.compile(cons_area_regex, re.MULTILINE)
cons_regex = r"\w+\s*:.*\S"
cons_pat = re.compile(cons_regex)
def find_constructors(p):
cons_areas = cons_area_pat.findall(p)
cons_groups = list(map(cons_pat.findall, cons_areas))
conses = set().union(*cons_groups)
return conses
def count_constructors_in_file(program_name):
p_program = os.path.join("..", "src", "main", "resources", program_name + ".str2")
try:
with open(p_program, 'r') as f_program:
contents = f_program.read()
conses = find_constructors(contents)
return len(conses)
except:
return np.nan
df["Number of constructors"] = (df["Problem"].str.lower() + df["Param: problemSize"].astype("str").mask(df["Param: problemSize"]==-1, "")).map(count_constructors_in_file)
# -
## Plot-specific settings
configs = [
{
"problems": {"Benchexpr", "Benchsym", "Benchtree", "Bubblesort", "Factorial", "Fibonacci", "Hanoi", "Mergesort", "Quicksort", "Sieve"},
"stages": {"run"},
"settings": {"yscale": "log"}
},
{
# Apply to all problems
"stages": {"compileStratego", "compileJava", "Java space", "Class space"},
"settings": {"ylim": (0, None)}
},
{
"stages": {"Java space", "Class space"},
"settings": {"ylabel": "Size (bytes)"}
}
]
# +
from pandas.core.common import flatten
def configure_grid(g: sns.FacetGrid, problem=None):
## Global settings
g.set_ylabels("Time (s)")
g.set_xlabels("Input size")
for ax in flatten(g.axes):
ax.get_xaxis().set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
# Specific settings
try:
for (row_val, col_val), ax in g.axes_dict.items():
for config in configs:
if "problems" not in config or row_val in config["problems"]:
if "stages" not in config or col_val in config["stages"]:
ax.set(**config["settings"])
except (ValueError):
for col_val, ax in g.axes_dict.items():
for config in configs:
if "problems" not in config or problem in config["problems"]:
if "stages" not in config or col_val in config["stages"]:
ax.set(**config["settings"])
finally:
return g
# -
df_scaledproblem = df[(df["Param: problemSize"] != -1) & (df["Problem"] != "BenchNullary")]
# +
g = configure_grid(sns.relplot(
data=df_scaledproblem,
x="Param: problemSize",
y="Score",
row="Problem",
row_order=np.sort(np.unique(df_scaledproblem["Problem"].values)),
col="Stage",
col_order=[s for s in ["compileStratego", "Java space", "compileJava", "Class space", "run"] if s in df_scaledproblem["Stage"].values],
style="Param: switchImplementation",
hue="Param: optimisationLevel",
hue_order=["2", "3", "4"],
kind="line",
# err_style="band",
markers=True,
facet_kws=dict(
sharex=False,
sharey=False,
),
))
# TODO Include strj runtime -> with and without fusion
# Save figure to file
plt.savefig(f"{PLOT_DIR}/{'_'.join(results_files)}-allstages.png")
# +
# Runtimes for DFA switch backends (per problem)
cols = np.sort(np.unique(df_scaledproblem["Problem"].values))
g = configure_grid(sns.relplot(
data=df_scaledproblem[df_scaledproblem["Stage"] == "run"],
x="Param: problemSize",
y="Score",
col="Problem",
col_order=cols,
col_wrap=min(3, len(cols)),
# style="Param: switchImplementation",
hue="Param: switchImplementation",
# hue_order=["2", "3", "4"],
kind="line",
# err_style="band",
markers=True,
facet_kws=dict(
sharex=False,
sharey=False,
),
))
g.set(yscale="log")
# TODO Include strj runtime -> with and without fusion
# Save figure to file
plt.savefig(f"{PLOT_DIR}/{'_'.join(results_files)}-runtime.png")
# +
df_ratios = df[df["Stage"] == "run"].pivot(index=["Param: switchImplementation"], columns=["Benchmark", "Param: problemSize", "Number of constructors", "Problem"], values="Score").T
# df_ratios["Ratio"] = df_ratios["elseif"] / df_ratios["switch"]
df_ratios["Ratio [nested]"] = df_ratios["nested-switch"] / df_ratios["elseif"]
df_ratios["Ratio [hash]"] = df_ratios["hash-switch"] / df_ratios["elseif"]
df_ratios.sort_values("Ratio [hash]")
# df_ratios.columns.to_series().apply(''.join)
# df[df[""]]
# -
df_ratios_flat = df_ratios.melt(value_vars=["Ratio [hash]"], value_name="Ratio", ignore_index=False)
# df_ratios_flat.index = df_ratios_flat.index.to_flat_index()
# df_ratios_flat.reset_index()
df_ratios_flat.index
# +
# Runtimes for DFA switch backends (against number of constructors)
g = configure_grid(sns.relplot(
data=df_ratios,
x="Number of constructors",
y="Ratio [hash]",
style="Problem",
hue="Problem",
size="Param: problemSize",
kind="scatter",
markers=True,
facet_kws=dict(
sharex=False,
sharey=False,
),
))
# g.set(yscale="log")
g.set_ylabels("Ratio of runtime [hash-switch] over runtime [elseif]")
g.set_xlabels("# of constructors in program")
# TODO Include strj runtime -> with and without fusion
# Save figure to file
plt.savefig(f"{PLOT_DIR}/{'_'.join(results_files)}-backend-constructors.png")
# +
# g = sns.boxplot(
# data=df_ratios,
# x="Number of constructors",
# y="Ratio",
# hue="Problem"
# )
df_ratios.columns#.to_flat_index()
# df_ratios.columns.get_level_values(0) + '_' + df_ratios.columns.get_level_values(1)
# -
for problem in ["Benchexpr", "Bubblesort", "Hanoi"]:
df_problem = df_scaledproblem[df_scaledproblem["Problem"] == problem]
cols = [s for s in ["compileStratego", "compileJava", "run", "Java space", "Class space"] if s in df_problem["Stage"].values]
g = configure_grid(sns.relplot(
data=df_problem,
x="Param: problemSize",
y="Score",
# row="Problem",
# row_order=np.sort(np.unique(df_problem["Problem"].values)),
col_wrap=min(3, len(cols)),
col="Stage",
col_order=cols,
style="Param: switchImplementation",
hue="Param: optimisationLevel",
hue_order=[l for l in ["2", "3", "4"] if l in df_problem["Param: optimisationLevel"].values],
kind="line",
# err_style="band",
markers=True,
facet_kws=dict(
sharex=False,
sharey=False,
),
), problem)
# Save figure to file
plt.savefig(f"{PLOT_DIR}/{'_'.join(results_files)}-{problem}.png")
# +
df_unscaledproblem = df[df["Param: problemSize"] == -1]
print(df_unscaledproblem.to_latex(
columns=["Problem", "Stage", "Samples", "Score", "Score Error (99.9%)", "Param: optimisationLevel", "Param: switchImplementation"]
))
df_unscaledproblem
# +
g = sns.catplot(
data = df_unscaledproblem.loc[(df_unscaledproblem["Param: optimisationLevel"] == "2") | (df_unscaledproblem["Param: switchImplementation"] == "hash-switch")],
x = "Problem",
y = "Score",
row = "Stage",
hue = "Param: optimisationLevel",
kind = "bar",
sharey=False,
)
g.set_ylabels("Time (s)")
plt.xticks(rotation=90)
plt.savefig(f"{PLOT_DIR}/{'_'.join(results_files)}-unscaled.png")
# -
| stratego.build.spoofax2.benchmark/analysis/Example plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/parekhakhil/pyImageSearch/blob/main/1101_contrastive_loss_keras.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="bSIAjNLqI5LI"
# # Contrastive Loss for Siamese Networks with Keras and TensorFlow
#
#
# + [markdown] id="viKSbM-CJDkK"
#
# This notebook is associated with the [Contrastive Loss for Siamese Networks with Keras and TensorFlow](https://www.pyimagesearch.com/2021/01/18/contrastive-loss-for-siamese-networks-with-keras-and-tensorflow/) blog post published on 01-18-21.
#
# Only the code for the blog post is here. Most codeblocks have a 1:1 relationship with what you find in the blog post with two exceptions: (1) Python classes are not separate files as they are typically organized with PyImageSearch projects, and (2) Command Line Argument parsing is replaced with an `args` dictionary that you can manipulate as needed.
#
# We recommend that you execute (press ▶️) the code block-by-block, as-is, before adjusting parameters and `args` inputs. Once you've verified that the code is working, you are welcome to hack with it and learn from manipulating inputs, settings, and parameters. For more information on using Jupyter and Colab, please refer to these resources:
#
# * [Jupyter Notebook User Interface](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html#notebook-user-interface)
# * [Overview of Google Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)
#
#
#
# Happy hacking!
#
#
# <hr>
#
# + [markdown] id="uX9B9dPlJO9k"
# ### Download the code zip file
# + id="dwa3GuDLIZA3"
# !wget https://pyimagesearch-code-downloads.s3-us-west-2.amazonaws.com/contrastive-loss-keras/contrastive-loss-keras.zip
# !unzip -qq contrastive-loss-keras.zip
# %cd contrastive-loss-keras
# + [markdown] id="C5V8gTAZJhhn"
# ## Blog Post Code
# + [markdown] id="PAxelcDxJjie"
# ### Import Packages
# + id="FkyV3l7NJbfE"
# import the necessary packages
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import MaxPooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Lambda
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import load_model
from imutils.paths import list_images
import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
import cv2
import os
# + [markdown] id="uiKV4G4oN7pU"
# ### Our `Config` class
# + id="D3AADGAoN9nm"
class config:
# specify the shape of the inputs for our network
IMG_SHAPE = (28, 28, 1)
# specify the batch size and number of epochs
BATCH_SIZE = 64
EPOCHS = 100
# define the path to the base output directory
BASE_OUTPUT = "output"
# use the base output path to derive the path to the serialized
# model along with training history plot
MODEL_PATH = os.path.sep.join([BASE_OUTPUT,
"contrastive_siamese_model"])
PLOT_PATH = os.path.sep.join([BASE_OUTPUT,
"contrastive_plot.png"])
# + [markdown] id="OHHo29PxOXXX"
# ### Creating our helper utility functions
# + id="Ku0MVG5jOJ5H"
def make_pairs(images, labels):
# initialize two empty lists to hold the (image, image) pairs and
# labels to indicate if a pair is positive or negative
pairImages = []
pairLabels = []
# calculate the total number of classes present in the dataset
# and then build a list of indexes for each class label that
# provides the indexes for all examples with a given label
numClasses = len(np.unique(labels))
idx = [np.where(labels == i)[0] for i in range(0, numClasses)]
# loop over all images
for idxA in range(len(images)):
# grab the current image and label belonging to the current
# iteration
currentImage = images[idxA]
label = labels[idxA]
# randomly pick an image that belongs to the *same* class
# label
idxB = np.random.choice(idx[label])
posImage = images[idxB]
# prepare a positive pair and update the images and labels
# lists, respectively
pairImages.append([currentImage, posImage])
pairLabels.append([1])
# grab the indices for each of the class labels *not* equal to
# the current label and randomly pick an image corresponding
# to a label *not* equal to the current label
negIdx = np.where(labels != label)[0]
negImage = images[np.random.choice(negIdx)]
# prepare a negative pair of images and update our lists
pairImages.append([currentImage, negImage])
pairLabels.append([0])
# return a 2-tuple of our image pairs and labels
return (np.array(pairImages), np.array(pairLabels))
# + id="SH_pZEYdOpDr"
def euclidean_distance(vectors):
# unpack the vectors into separate lists
(featsA, featsB) = vectors
# compute the sum of squared distances between the vectors
sumSquared = K.sum(K.square(featsA - featsB), axis=1,
keepdims=True)
# return the euclidean distance between the vectors
return K.sqrt(K.maximum(sumSquared, K.epsilon()))
# + id="pnPq25r4OsSF"
def plot_training(H, plotPath):
# construct a plot that plots and saves the training history
plt.style.use("ggplot")
plt.figure()
plt.plot(H.history["loss"], label="train_loss")
plt.plot(H.history["val_loss"], label="val_loss")
plt.title("Training Loss")
plt.xlabel("Epoch #")
plt.ylabel("Loss")
plt.legend(loc="lower left")
plt.savefig(plotPath)
# + [markdown] id="BNxC3f7uOy7U"
# ### Implementing our siamese network architecture
# + id="RoLsUgB8Ou97"
def build_siamese_model(inputShape, embeddingDim=48):
# specify the inputs for the feature extractor network
inputs = Input(inputShape)
# define the first set of CONV => RELU => POOL => DROPOUT layers
x = Conv2D(64, (2, 2), padding="same", activation="relu")(inputs)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.3)(x)
# second set of CONV => RELU => POOL => DROPOUT layers
x = Conv2D(64, (2, 2), padding="same", activation="relu")(x)
x = MaxPooling2D(pool_size=2)(x)
x = Dropout(0.3)(x)
# prepare the final outputs
pooledOutput = GlobalAveragePooling2D()(x)
outputs = Dense(embeddingDim)(pooledOutput)
# build the model
model = Model(inputs, outputs)
# return the model to the calling function
return model
# + [markdown] id="qR4xZ4yIPBLc"
# ### Implementing contrastive loss with Keras and TensorFlow
# + id="YpnwyEG7O9rL"
def contrastive_loss(y, preds, margin=1):
# explicitly cast the true class label data type to the predicted
# class label data type (otherwise we run the risk of having two
# separate data types, causing TensorFlow to error out)
y = tf.cast(y, preds.dtype)
# calculate the contrastive loss between the true labels and
# the predicted labels
squaredPreds = K.square(preds)
squaredMargin = K.square(K.maximum(margin - preds, 0))
loss = K.mean(y * squaredPreds + (1 - y) * squaredMargin)
# return the computed contrastive loss to the calling function
return loss
# + [markdown] id="VJXznmckPO6o"
# ### Creating our contrastive loss training script
# + id="JHE95JUpPMQV"
# load MNIST dataset and scale the pixel values to the range of [0, 1]
print("[INFO] loading MNIST dataset...")
(trainX, trainY), (testX, testY) = mnist.load_data()
trainX = trainX / 255.0
testX = testX / 255.0
# add a channel dimension to the images
trainX = np.expand_dims(trainX, axis=-1)
testX = np.expand_dims(testX, axis=-1)
# prepare the positive and negative pairs
print("[INFO] preparing positive and negative pairs...")
(pairTrain, labelTrain) = make_pairs(trainX, trainY)
(pairTest, labelTest) = make_pairs(testX, testY)
# + id="AESdW43tPc3J"
# configure the siamese network
print("[INFO] building siamese network...")
imgA = Input(shape=config.IMG_SHAPE)
imgB = Input(shape=config.IMG_SHAPE)
featureExtractor = build_siamese_model(config.IMG_SHAPE)
featsA = featureExtractor(imgA)
featsB = featureExtractor(imgB)
# finally, construct the siamese network
distance = Lambda(euclidean_distance)([featsA, featsB])
model = Model(inputs=[imgA, imgB], outputs=distance)
# + id="xTHOBs1JPpF0"
# compile the model
print("[INFO] compiling model...")
model.compile(loss=contrastive_loss, optimizer="adam")
# train the model
print("[INFO] training model...")
history = model.fit(
[pairTrain[:, 0], pairTrain[:, 1]], labelTrain[:],
validation_data=([pairTest[:, 0], pairTest[:, 1]], labelTest[:]),
batch_size=config.BATCH_SIZE,
epochs=config.EPOCHS)
# serialize the model to disk
print("[INFO] saving siamese model...")
model.save(config.MODEL_PATH)
# plot the training history
print("[INFO] plotting training history...")
plot_training(history, config.PLOT_PATH)
# + [markdown] id="XZc9P9qpQJN0"
# ### Implementing our contrastive loss test script
#
# + id="IcvtheutPxPD"
# # construct the argument parser and parse the arguments
# ap = argparse.ArgumentParser()
# ap.add_argument("-i", "--input", required=True,
# help="path to input directory of testing images")
# args = vars(ap.parse_args())
# since we are using Jupyter Notebooks we can replace our argument
# parsing code with *hard coded* arguments and values
args = {
"input": "examples"
}
# + id="Oc4ittsRQnoI"
# grab the test dataset image paths and then randomly generate a
# total of 10 image pairs
print("[INFO] loading test dataset...")
testImagePaths = list(list_images(args["input"]))
np.random.seed(42)
pairs = np.random.choice(testImagePaths, size=(10, 2))
# load the model from disk
print("[INFO] loading siamese model...")
model = load_model(config.MODEL_PATH, compile=False)
# + id="2tUZMx1SQvaJ"
# loop over all image pairs
for (i, (pathA, pathB)) in enumerate(pairs):
# load both the images and convert them to grayscale
imageA = cv2.imread(pathA, 0)
imageB = cv2.imread(pathB, 0)
# create a copy of both the images for visualization purpose
origA = imageA.copy()
origB = imageB.copy()
# add channel a dimension to both the images
imageA = np.expand_dims(imageA, axis=-1)
imageB = np.expand_dims(imageB, axis=-1)
# add a batch dimension to both images
imageA = np.expand_dims(imageA, axis=0)
imageB = np.expand_dims(imageB, axis=0)
# scale the pixel values to the range of [0, 1]
imageA = imageA / 255.0
imageB = imageB / 255.0
# use our siamese model to make predictions on the image pair,
# indicating whether or not the images belong to the same class
preds = model.predict([imageA, imageB])
proba = preds[0][0]
# initialize the figure
fig = plt.figure("Pair #{}".format(i + 1), figsize=(4, 2))
plt.suptitle("Distance: {:.2f}".format(proba))
# show first image
ax = fig.add_subplot(1, 2, 1)
plt.imshow(origA, cmap=plt.cm.gray)
plt.axis("off")
# show the second image
ax = fig.add_subplot(1, 2, 2)
plt.imshow(origB, cmap=plt.cm.gray)
plt.axis("off")
# show the plot
plt.show()
# + [markdown] id="0q1x1gzOLEHB"
# For a detailed walkthrough of the concepts and code, be sure to refer to the full tutorial, [*Contrastive Loss for Siamese Networks with Keras and TensorFlow*](https://www.pyimagesearch.com/2021/01/18/contrastive-loss-for-siamese-networks-with-keras-and-tensorflow/) blog post published on 01-18-21.
| 1101_contrastive_loss_keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # pre processing all current raw data for action id process (Step 4, 5,6)
# steps:
# 1 - Organize all files by user (with related information)
# 2 - merge all related files per user
# 3 - sort by timestamp
# 4 - **decide the certain "action key"(exp. 1:login 2:longoff)**
# 5 - **break the log into action keys (origional using auto-generated non-repeat num ID with parsed log type)**
# 6 - **new line while "logoff" (origional using block_id which not exist here)**
# + pycharm={"name": "#%%\n"}
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
LDAP_dir = "./r5.2/LDAP"
Processed_dir = "./pre_processed"
result_dir = "./processed_log"
temp_test = "./test_final_raw.csv"
# + pycharm={"name": "#%%\n"}
#"action key" the element of LogKey
ACTION_DIR = {"Logon": "1",
"Logoff": "2",
"Connect": "3",
"Disconnect": "4",
"http": "5",
"file": "6",
"email": "7"}
# + pycharm={"name": "#%%\n"}
ACTION_DIR["email"]
# + [markdown] pycharm={"name": "#%% md\n"}
# test with one user
# + pycharm={"name": "#%%\n"}
test_file = pd.read_csv(temp_test)
test_file
# test_file.shape()
# + pycharm={"name": "#%%\n"}
test_file = test_file.drop(columns=["Unnamed: 0"])
test_file
# + pycharm={"name": "#%%\n"}
test_file = test_file.sort_values(by=['date'])
# + pycharm={"name": "#%%\n"}
with open("./test_logKey.txt","w") as out_file:
for _, row in tqdm(test_file.iterrows()):
out_file.writelines(ACTION_DIR[row.action]+' ')
if row.action == "Logoff":
out_file.writelines('\n')
# + pycharm={"name": "#%%\n"}
| action_id_finalstep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
import pandas as pd
import datetime as dt
from IPython.display import HTML
#importing the CSV file
cities= pd.read_csv ("Resources/cities.csv")
cities.set_index("City_ID", inplace = True)
cities
cities_df = pd.DataFrame(data= cities)
cities_df
cities_df
# Converting Data Frame to HTML
cities_new = cities_df.to_html(classes=["table", "table-bordered", "table-striped", "table-hover"])
cities_new
#exporting the html file
cities_df.to_html('Resources/cities_data.html', index= False)
| WebVisualizations/cities.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 20 - Example Simulation - I Beams
#
# This Journal shows how to model a site including I beams. While the I Beams can be added to the module unit, doing it for the whole row reduces the amount of geometries in the scene and makes raytracing faster.
#
# This particular geometry corresponds to the fixed tilt array in Bethesda, Maryland, studied for degradation in <i> <NAME>, <NAME> , <NAME> , <NAME> , <NAME> , <NAME> , <NAME> , <NAME> , <NAME> , <NAME>, Differential degradation patterns of photovoltaic backsheets at the array level, Solar Energy 63 (2018) 62-69. </i>
#
# It is a 5 row, 5-up landscape site (except for the last row which is 4-up, but we are not modeling that here). There are 48 5-up collectors or 'columns' (as bifacial_radiacne and the journal calls them, respectively). Tilt is 20 degrees, ground clearance is 0.9m.
#
#
# 
# +
import os
from pathlib import Path
testfolder = Path().resolve().parent.parent / 'bifacial_radiance' / 'TEMP' / 'Tutorial_20'
if not os.path.exists(testfolder):
os.makedirs(testfolder)
# Another option using relative address; for some operative systems you might need '/' instead of '\'
# testfolder = os.path.abspath(r'..\..\bifacial_radiance\TEMP')
print ("Your simulation will be stored in %s" % testfolder)
# +
import bifacial_radiance as br
import numpy as np
br.__version__
# -
radObj = br.RadianceObj('tutorial_20', path=str(testfolder))
epwfile = radObj.getEPW(lat = 37.5, lon = -77.6) # This location corresponds to Richmond, VA.
radObj.readWeatherFile(epwfile)
radObj.setGround(0.14)
radObj.gendaylit(2819)
# +
moduletype='test-module' # Data sizes below are for 'Sharp_NU-U235F2'
x=1.64
y=0.994
xgap = 0.046
zgap = 0
ygap = 0
numpanels=5
NIST_Collector = radObj.makeModule(name=moduletype,x=x, y=y, numpanels=numpanels,
xgap=xgap, ygap = ygap, zgap=zgap)
# +
sceneDict = {'tilt':20, 'pitch':9.4488, 'clearance_height':0.9,
'azimuth':180, 'nMods':48, 'nRows':5}
sceneObj = radObj.makeScene(NIST_Collector, sceneDict=sceneDict)
# -
# # Adding the I-Beams
#
# HEre is where the magic happens. We will calculate the row length (number of modules times the collector x value plus the xgaps between), and we will also calculate the spacing between the beams accross the collector width so that the beas are placed at the start of the colectro and then between each module, just like in the image (5 modules = 6 Beams then)
# +
beam_count = 6
beam_mat = 'Metal_Grey'
beam_len = sceneDict['nMods']*NIST_Collector.x + (sceneDict['nMods']-1)*NIST_Collector.xgap
beam_len = round(beam_len,0)
beam_ydist = np.linspace(NIST_Collector.sceney/2,0-NIST_Collector.sceney/2,6)
# by photograph approximation
beam_cap = {'len':beam_len, 'height':0.02, 'width':0.12}
beam_ctr = {'len':beam_len, 'height':0.30, 'width':0.02}
print(f'Beam Length: {beam_len} meters')
print(f'Vertical Distribution: {beam_ydist}')
# -
# We will use makeCustomObject like in previous journal examples and appendtoScene the IBeams.
#
# Note that the IBeams geometry is being generated:
# <ol>
# <li> Generate the geometry (genbox)</li>
# <li> Translate the beam so that the center of the world (0,0,0) is positioned at the beam's center</li>
# <li> Tilt by the angle of the array,</li>
# <li> Then move to the correct clearance height and position accross the collector width calculated above.</li>
# </ol>
rows = sceneDict['nRows']
offsetMultiplier = np.linspace(-(rows//2),(rows//2),rows)
for row in range(0,sceneDict['nRows']):
offset = offsetMultiplier[row]*sceneDict['pitch']
customObjects = []
for pos in beam_ydist:
count = list(beam_ydist).index(pos)
name = f'BEAM_r{row}_c{count}'
ydisp = pos * np.cos(sceneDict['tilt']*np.pi/180.0) + offset
zdisp = np.sin(sceneDict['tilt']*np.pi/180.0) * (pos-beam_ydist[-1]) + sceneDict['clearance_height'] - .05
text = '! genbox {} beamTop{} {} {} {} | xform -t {} {} 0 | xform -rx {} | xform -t 0 {} {}'.format(
beam_mat, count,
beam_cap['len'], beam_cap['width'], beam_cap['height'],
-beam_cap['len']/2+.8, -beam_cap['width']/2,
sceneDict['tilt'],
ydisp, zdisp)
text+= '\r\n! genbox {} beamBot{} {} {} {} | xform -t {} {} 0 | xform -rx {} | xform -t 0 {} {}'.format(
beam_mat, count,
beam_cap['len'], beam_cap['width'], beam_cap['height'],
-beam_cap['len']/2+.8, -beam_cap['width']/2,
sceneDict['tilt'],
ydisp + beam_ctr['height']*np.cos(np.pi/2 - np.pi*sceneDict['tilt']/180.0), zdisp - beam_ctr['height'])
text+= '\r\n! genbox {} beamCtr{} {} {} {} | xform -t {} {} {} | xform -rx {} | xform -t 0 {} {}'.format(
beam_mat, count,
beam_ctr['len'], beam_ctr['width'], beam_ctr['height'],
-beam_ctr['len']/2+.8, -beam_ctr['width']/2, beam_cap['height'],
sceneDict['tilt'],
ydisp + beam_ctr['height']*np.cos(np.pi/2 - np.pi*sceneDict['tilt']/180.0), zdisp - beam_ctr['height'])
customObj = radObj.makeCustomObject(name,text)
customObjects.append(customObj)
radObj.appendtoScene(radfile=sceneObj.radfiles, customObject=customObj, text="!xform -rz 0")
radObj.makeOct()
# ## View your geometry
#
# You can view your geometry by uncommenting the line below (or copy-pasting it into a terminal after you navigate to the folder that contains your simulation)
# +
# #!rvu -vf views\front.vp -e .01 -vp -49 -10 0.9 -vd 0.6879 0.7119 -0.1411 IBeam.oct
| docs/tutorials/20 - Example Simulation - I Beams.ipynb |
# # Full configuration interaction theory
#
#
#
#
# ## Slater determinants as basis states, Repetition
# The simplest possible choice for many-body wavefunctions are **product** wavefunctions.
# That is
# $$
# \Psi(x_1, x_2, x_3, \ldots, x_A) \approx \phi_1(x_1) \phi_2(x_2) \phi_3(x_3) \ldots
# $$
# because we are really only good at thinking about one particle at a time. Such
# product wavefunctions, without correlations, are easy to
# work with; for example, if the single-particle states $\phi_i(x)$ are orthonormal, then
# the product wavefunctions are easy to orthonormalize.
#
# Similarly, computing matrix elements of operators are relatively easy, because the
# integrals factorize.
#
#
# The price we pay is the lack of correlations, which we must build up by using many, many product
# wavefunctions. (Thus we have a trade-off: compact representation of correlations but
# difficult integrals versus easy integrals but many states required.)
#
#
#
#
# ## Slater determinants as basis states, repetition
# Because we have fermions, we are required to have antisymmetric wavefunctions, e.g.
# $$
# \Psi(x_1, x_2, x_3, \ldots, x_A) = - \Psi(x_2, x_1, x_3, \ldots, x_A)
# $$
# etc. This is accomplished formally by using the determinantal formalism
# $$
# \Psi(x_1, x_2, \ldots, x_A)
# = \frac{1}{\sqrt{A!}}
# \det \left |
# \begin{array}{cccc}
# \phi_1(x_1) & \phi_1(x_2) & \ldots & \phi_1(x_A) \\
# \phi_2(x_1) & \phi_2(x_2) & \ldots & \phi_2(x_A) \\
# \vdots & & & \\
# \phi_A(x_1) & \phi_A(x_2) & \ldots & \phi_A(x_A)
# \end{array}
# \right |
# $$
# Product wavefunction + antisymmetry = Slater determinant.
#
#
#
#
# ## Slater determinants as basis states
# $$
# \Psi(x_1, x_2, \ldots, x_A)
# = \frac{1}{\sqrt{A!}}
# \det \left |
# \begin{array}{cccc}
# \phi_1(x_1) & \phi_1(x_2) & \ldots & \phi_1(x_A) \\
# \phi_2(x_1) & \phi_2(x_2) & \ldots & \phi_2(x_A) \\
# \vdots & & & \\
# \phi_A(x_1) & \phi_A(x_2) & \ldots & \phi_A(x_A)
# \end{array}
# \right |
# $$
# Properties of the determinant (interchange of any two rows or
# any two columns yields a change in sign; thus no two rows and no
# two columns can be the same) lead to the Pauli principle:
#
# * No two particles can be at the same place (two columns the same); and
#
# * No two particles can be in the same state (two rows the same).
#
#
#
#
#
# ## Slater determinants as basis states
# As a practical matter, however, Slater determinants beyond $N=4$ quickly become
# unwieldy. Thus we turn to the **occupation representation** or **second quantization** to simplify calculations.
#
# The occupation representation or number representation, using fermion **creation** and **annihilation**
# operators, is compact and efficient. It is also abstract and, at first encounter, not easy to
# internalize. It is inspired by other operator formalism, such as the ladder operators for
# the harmonic oscillator or for angular momentum, but unlike those cases, the operators **do not have coordinate space representations**.
#
# Instead, one can think of fermion creation/annihilation operators as a game of symbols that
# compactly reproduces what one would do, albeit clumsily, with full coordinate-space Slater
# determinants.
#
#
#
#
# ## Quick repetition of the occupation representation
# We start with a set of orthonormal single-particle states $\{ \phi_i(x) \}$.
# (Note: this requirement, and others, can be relaxed, but leads to a
# more involved formalism.) **Any** orthonormal set will do.
#
# To each single-particle state $\phi_i(x)$ we associate a creation operator
# $\hat{a}^\dagger_i$ and an annihilation operator $\hat{a}_i$.
#
# When acting on the vacuum state $| 0 \rangle$, the creation operator $\hat{a}^\dagger_i$ causes
# a particle to occupy the single-particle state $\phi_i(x)$:
# $$
# \phi_i(x) \rightarrow \hat{a}^\dagger_i |0 \rangle
# $$
# ## Quick repetition of the occupation representation
# But with multiple creation operators we can occupy multiple states:
# $$
# \phi_i(x) \phi_j(x^\prime) \phi_k(x^{\prime \prime})
# \rightarrow \hat{a}^\dagger_i \hat{a}^\dagger_j \hat{a}^\dagger_k |0 \rangle.
# $$
# Now we impose antisymmetry, by having the fermion operators satisfy **anticommutation relations**:
# $$
# \hat{a}^\dagger_i \hat{a}^\dagger_j + \hat{a}^\dagger_j \hat{a}^\dagger_i
# = [ \hat{a}^\dagger_i ,\hat{a}^\dagger_j ]_+
# = \{ \hat{a}^\dagger_i ,\hat{a}^\dagger_j \} = 0
# $$
# so that
# $$
# \hat{a}^\dagger_i \hat{a}^\dagger_j = - \hat{a}^\dagger_j \hat{a}^\dagger_i
# $$
# ## Quick repetition of the occupation representation
# Because of this property, automatically $\hat{a}^\dagger_i \hat{a}^\dagger_i = 0$,
# enforcing the Pauli exclusion principle. Thus when writing a Slater determinant
# using creation operators,
# $$
# \hat{a}^\dagger_i \hat{a}^\dagger_j \hat{a}^\dagger_k \ldots |0 \rangle
# $$
# each index $i,j,k, \ldots$ must be unique.
#
# For some relevant exercises with solutions see chapter 8 of [Lecture Notes in Physics, volume 936](http://www.springer.com/us/book/9783319533353).
#
#
#
#
#
# ## Full Configuration Interaction Theory
# We have defined the ansatz for the ground state as
# $$
# |\Phi_0\rangle = \left(\prod_{i\le F}\hat{a}_{i}^{\dagger}\right)|0\rangle,
# $$
# where the index $i$ defines different single-particle states up to the Fermi level. We have assumed that we have $N$ fermions.
# A given one-particle-one-hole ($1p1h$) state can be written as
# $$
# |\Phi_i^a\rangle = \hat{a}_{a}^{\dagger}\hat{a}_i|\Phi_0\rangle,
# $$
# while a $2p2h$ state can be written as
# $$
# |\Phi_{ij}^{ab}\rangle = \hat{a}_{a}^{\dagger}\hat{a}_{b}^{\dagger}\hat{a}_j\hat{a}_i|\Phi_0\rangle,
# $$
# and a general $NpNh$ state as
# $$
# |\Phi_{ijk\dots}^{abc\dots}\rangle = \hat{a}_{a}^{\dagger}\hat{a}_{b}^{\dagger}\hat{a}_{c}^{\dagger}\dots\hat{a}_k\hat{a}_j\hat{a}_i|\Phi_0\rangle.
# $$
# ## Full Configuration Interaction Theory
# We can then expand our exact state function for the ground state
# as
# $$
# |\Psi_0\rangle=C_0|\Phi_0\rangle+\sum_{ai}C_i^a|\Phi_i^a\rangle+\sum_{abij}C_{ij}^{ab}|\Phi_{ij}^{ab}\rangle+\dots
# =(C_0+\hat{C})|\Phi_0\rangle,
# $$
# where we have introduced the so-called correlation operator
# $$
# \hat{C}=\sum_{ai}C_i^a\hat{a}_{a}^{\dagger}\hat{a}_i +\sum_{abij}C_{ij}^{ab}\hat{a}_{a}^{\dagger}\hat{a}_{b}^{\dagger}\hat{a}_j\hat{a}_i+\dots
# $$
# Since the normalization of $\Psi_0$ is at our disposal and since $C_0$ is by hypothesis non-zero, we may arbitrarily set $C_0=1$ with
# corresponding proportional changes in all other coefficients. Using this so-called intermediate normalization we have
# $$
# \langle \Psi_0 | \Phi_0 \rangle = \langle \Phi_0 | \Phi_0 \rangle = 1,
# $$
# resulting in
# $$
# |\Psi_0\rangle=(1+\hat{C})|\Phi_0\rangle.
# $$
# ## Full Configuration Interaction Theory
# We rewrite
# $$
# |\Psi_0\rangle=C_0|\Phi_0\rangle+\sum_{ai}C_i^a|\Phi_i^a\rangle+\sum_{abij}C_{ij}^{ab}|\Phi_{ij}^{ab}\rangle+\dots,
# $$
# in a more compact form as
# $$
# |\Psi_0\rangle=\sum_{PH}C_H^P\Phi_H^P=\left(\sum_{PH}C_H^P\hat{A}_H^P\right)|\Phi_0\rangle,
# $$
# where $H$ stands for $0,1,\dots,n$ hole states and $P$ for $0,1,\dots,n$ particle states.
# Our requirement of unit normalization gives
# $$
# \langle \Psi_0 | \Phi_0 \rangle = \sum_{PH}|C_H^P|^2= 1,
# $$
# and the energy can be written as
# $$
# E= \langle \Psi_0 | \hat{H} |\Phi_0 \rangle= \sum_{PP'HH'}C_H^{*P}\langle \Phi_H^P | \hat{H} |\Phi_{H'}^{P'} \rangle C_{H'}^{P'}.
# $$
# ## Full Configuration Interaction Theory
# Normally
# $$
# E= \langle \Psi_0 | \hat{H} |\Phi_0 \rangle= \sum_{PP'HH'}C_H^{*P}\langle \Phi_H^P | \hat{H} |\Phi_{H'}^{P'} \rangle C_{H'}^{P'},
# $$
# is solved by diagonalization setting up the Hamiltonian matrix defined by the basis of all possible Slater determinants. A diagonalization
# <!-- to do: add text about Rayleigh-Ritz -->
# is equivalent to finding the variational minimum of
# $$
# \langle \Psi_0 | \hat{H} |\Phi_0 \rangle-\lambda \langle \Psi_0 |\Phi_0 \rangle,
# $$
# where $\lambda$ is a variational multiplier to be identified with the energy of the system.
# The minimization process results in
# 2
# 3
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# $$
# \sum_{P'H'}\left\{\delta[C_H^{*P}]\langle \Phi_H^P | \hat{H} |\Phi_{H'}^{P'} \rangle C_{H'}^{P'}+
# C_H^{*P}\langle \Phi_H^P | \hat{H} |\Phi_{H'}^{P'} \rangle \delta[C_{H'}^{P'}]-
# \lambda( \delta[C_H^{*P}]C_{H'}^{P'}+C_H^{*P}\delta[C_{H'}^{P'}]\right\} = 0.
# $$
# Since the coefficients $\delta[C_H^{*P}]$ and $\delta[C_{H'}^{P'}]$ are complex conjugates it is necessary and sufficient to require the quantities that multiply with $\delta[C_H^{*P}]$ to vanish.
#
#
#
#
#
#
# ## Full Configuration Interaction Theory
#
# This leads to
# $$
# \sum_{P'H'}\langle \Phi_H^P | \hat{H} |\Phi_{H'}^{P'} \rangle C_{H'}^{P'}-\lambda C_H^{P}=0,
# $$
# for all sets of $P$ and $H$.
#
# If we then multiply by the corresponding $C_H^{*P}$ and sum over $PH$ we obtain
# $$
# \sum_{PP'HH'}C_H^{*P}\langle \Phi_H^P | \hat{H} |\Phi_{H'}^{P'} \rangle C_{H'}^{P'}-\lambda\sum_{PH}|C_H^P|^2=0,
# $$
# leading to the identification $\lambda = E$. This means that we have for all $PH$ sets
# <!-- Equation labels as ordinary links -->
# <div id="eq:fullci"></div>
#
# $$
# \begin{equation}
# \sum_{P'H'}\langle \Phi_H^P | \hat{H} -E|\Phi_{H'}^{P'} \rangle = 0. \label{eq:fullci} \tag{1}
# \end{equation}
# $$
# ## Full Configuration Interaction Theory
# An alternative way to derive the last equation is to start from
# $$
# (\hat{H} -E)|\Psi_0\rangle = (\hat{H} -E)\sum_{P'H'}C_{H'}^{P'}|\Phi_{H'}^{P'} \rangle=0,
# $$
# and if this equation is successively projected against all $\Phi_H^P$ in the expansion of $\Psi$, then the last equation on the previous slide
# results. As stated previously, one solves this equation normally by diagonalization. If we are able to solve this equation exactly (that is
# numerically exactly) in a large Hilbert space (it will be truncated in terms of the number of single-particle states included in the definition
# of Slater determinants), it can then serve as a benchmark for other many-body methods which approximate the correlation operator
# $\hat{C}$.
#
#
#
#
#
# ## Example of a Hamiltonian matrix
# Suppose, as an example, that we have six fermions below the Fermi level.
# This means that we can make at most $6p-6h$ excitations. If we have an infinity of single particle states above the Fermi level, we will obviously have an infinity of say $2p-2h$ excitations. Each such way to configure the particles is called a **configuration**. We will always have to truncate in the basis of single-particle states.
# This gives us a finite number of possible Slater determinants. Our Hamiltonian matrix would then look like (where each block can have a large dimensionalities):
#
# <table border="1">
# <thead>
# <tr><th align="center"> </th> <th align="center">$0p-0h$</th> <th align="center">$1p-1h$</th> <th align="center">$2p-2h$</th> <th align="center">$3p-3h$</th> <th align="center">$4p-4h$</th> <th align="center">$5p-5h$</th> <th align="center">$6p-6h$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> $0p-0h$ </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> $1p-1h$ </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> $2p-2h$ </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> 0 </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> $3p-3h$ </td> <td align="center"> 0 </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> $4p-4h$ </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> </tr>
# <tr><td align="center"> $5p-5h$ </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> </tr>
# <tr><td align="center"> $6p-6h$ </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> x </td> <td align="center"> x </td> <td align="center"> x </td> </tr>
# </tbody>
# </table>
# with a two-body force. Why are there non-zero blocks of elements?
#
#
#
#
# ## Example of a Hamiltonian matrix with a Hartree-Fock basis
# If we use a Hartree-Fock basis, this corresponds to a particular unitary transformation where matrix elements of the type $\langle 0p-0h \vert \hat{H} \vert 1p-1h\rangle =\langle \Phi_0 | \hat{H}|\Phi_{i}^{a}\rangle=0$ and our Hamiltonian matrix becomes
#
# <table border="1">
# <thead>
# <tr><th align="center"> </th> <th align="center"> $0p-0h$ </th> <th align="center"> $1p-1h$ </th> <th align="center"> $2p-2h$ </th> <th align="center"> $3p-3h$ </th> <th align="center"> $4p-4h$ </th> <th align="center"> $5p-5h$ </th> <th align="center"> $6p-6h$ </th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> $0p-0h$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> 0 </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> $1p-1h$ </td> <td align="center"> 0 </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> $2p-2h$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> 0 </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> $3p-3h$ </td> <td align="center"> 0 </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> $4p-4h$ </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> </tr>
# <tr><td align="center"> $5p-5h$ </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> </tr>
# <tr><td align="center"> $6p-6h$ </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> <td align="center"> $\tilde{x}$ </td> </tr>
# </tbody>
# </table>
#
#
#
#
#
# ## Shell-model jargon
# If we do not make any truncations in the possible sets of Slater determinants (many-body states) we can make by distributing $A$ nucleons among $n$ single-particle states, we call such a calculation for **Full configuration interaction theory**
#
# If we make truncations, we have different possibilities
#
# * The standard nuclear shell-model. Here we define an effective Hilbert space with respect to a given core. The calculations are normally then performed for all many-body states that can be constructed from the effective Hilbert spaces. This approach requires a properly defined effective Hamiltonian
#
# * We can truncate in the number of excitations. For example, we can limit the possible Slater determinants to only $1p-1h$ and $2p-2h$ excitations. This is called a configuration interaction calculation at the level of singles and doubles excitations, or just CISD.
#
# * We can limit the number of excitations in terms of the excitation energies. If we do not define a core, this defines normally what is called the no-core shell-model approach.
#
# What happens if we have a three-body interaction and a Hartree-Fock basis?
#
#
#
#
# ## FCI and the exponential growth
# Full configuration interaction theory calculations provide in principle, if we can diagonalize numerically, all states of interest. The dimensionality of the problem explodes however quickly.
#
# The total number of Slater determinants which can be built with say $N$ neutrons distributed among $n$ single particle states is
# $$
# \left (\begin{array}{c} n \\ N\end{array} \right) =\frac{n!}{(n-N)!N!}.
# $$
# For a model space which comprises the first for major shells only $0s$, $0p$, $1s0d$ and $1p0f$ we have $40$ single particle states for neutrons and protons. For the eight neutrons of oxygen-16 we would then have
# $$
# \left (\begin{array}{c} 40 \\ 8\end{array} \right) =\frac{40!}{(32)!8!}\sim 10^{9},
# $$
# and multiplying this with the number of proton Slater determinants we end up with approximately with a dimensionality $d$ of $d\sim 10^{18}$.
#
#
#
#
#
# ## Exponential wall
# This number can be reduced if we look at specific symmetries only. However, the dimensionality explodes quickly!
#
# * For Hamiltonian matrices of dimensionalities which are smaller than $d\sim 10^5$, we would use so-called direct methods for diagonalizing the Hamiltonian matrix
#
# * For larger dimensionalities iterative eigenvalue solvers like Lanczos' method are used. The most efficient codes at present can handle matrices of $d\sim 10^{10}$.
#
#
#
#
#
# ## A non-practical way of solving the eigenvalue problem
# To see this, we look at the contributions arising from
# $$
# \langle \Phi_H^P | = \langle \Phi_0|
# $$
# in Eq. ([1](#eq:fullci)), that is we multiply with $\langle \Phi_0 |$
# from the left in
# $$
# (\hat{H} -E)\sum_{P'H'}C_{H'}^{P'}|\Phi_{H'}^{P'} \rangle=0.
# $$
# If we assume that we have a two-body operator at most, Slater's rule gives then an equation for the
# correlation energy in terms of $C_i^a$ and $C_{ij}^{ab}$ only. We get then
# $$
# \langle \Phi_0 | \hat{H} -E| \Phi_0\rangle + \sum_{ai}\langle \Phi_0 | \hat{H} -E|\Phi_{i}^{a} \rangle C_{i}^{a}+
# \sum_{abij}\langle \Phi_0 | \hat{H} -E|\Phi_{ij}^{ab} \rangle C_{ij}^{ab}=0,
# $$
# or
# $$
# E-E_0 =\Delta E=\sum_{ai}\langle \Phi_0 | \hat{H}|\Phi_{i}^{a} \rangle C_{i}^{a}+
# \sum_{abij}\langle \Phi_0 | \hat{H}|\Phi_{ij}^{ab} \rangle C_{ij}^{ab},
# $$
# where the energy $E_0$ is the reference energy and $\Delta E$ defines the so-called correlation energy.
# The single-particle basis functions could be the results of a Hartree-Fock calculation or just the eigenstates of the non-interacting part of the Hamiltonian.
#
#
#
#
#
# ## A non-practical way of solving the eigenvalue problem
# To see this, we look at the contributions arising from
# $$
# \langle \Phi_H^P | = \langle \Phi_0|
# $$
# in Eq. ([1](#eq:fullci)), that is we multiply with $\langle \Phi_0 |$
# from the left in
# $$
# (\hat{H} -E)\sum_{P'H'}C_{H'}^{P'}|\Phi_{H'}^{P'} \rangle=0.
# $$
# ## A non-practical way of solving the eigenvalue problem
# If we assume that we have a two-body operator at most, Slater's rule gives then an equation for the
# correlation energy in terms of $C_i^a$ and $C_{ij}^{ab}$ only. We get then
# $$
# \langle \Phi_0 | \hat{H} -E| \Phi_0\rangle + \sum_{ai}\langle \Phi_0 | \hat{H} -E|\Phi_{i}^{a} \rangle C_{i}^{a}+
# \sum_{abij}\langle \Phi_0 | \hat{H} -E|\Phi_{ij}^{ab} \rangle C_{ij}^{ab}=0,
# $$
# or
# $$
# E-E_0 =\Delta E=\sum_{ai}\langle \Phi_0 | \hat{H}|\Phi_{i}^{a} \rangle C_{i}^{a}+
# \sum_{abij}\langle \Phi_0 | \hat{H}|\Phi_{ij}^{ab} \rangle C_{ij}^{ab},
# $$
# where the energy $E_0$ is the reference energy and $\Delta E$ defines the so-called correlation energy.
# The single-particle basis functions could be the results of a Hartree-Fock calculation or just the eigenstates of the non-interacting part of the Hamiltonian.
#
#
#
#
#
#
# ## Rewriting the FCI equation
# In our notes on Hartree-Fock calculations,
# we have already computed the matrix $\langle \Phi_0 | \hat{H}|\Phi_{i}^{a}\rangle $ and $\langle \Phi_0 | \hat{H}|\Phi_{ij}^{ab}\rangle$. If we are using a Hartree-Fock basis, then the matrix elements
# $\langle \Phi_0 | \hat{H}|\Phi_{i}^{a}\rangle=0$ and we are left with a *correlation energy* given by
# $$
# E-E_0 =\Delta E^{HF}=\sum_{abij}\langle \Phi_0 | \hat{H}|\Phi_{ij}^{ab} \rangle C_{ij}^{ab}.
# $$
# ## Rewriting the FCI equation
# Inserting the various matrix elements we can rewrite the previous equation as
# $$
# \Delta E=\sum_{ai}\langle i| \hat{f}|a \rangle C_{i}^{a}+
# \sum_{abij}\langle ij | \hat{v}| ab \rangle C_{ij}^{ab}.
# $$
# This equation determines the correlation energy but not the coefficients $C$.
#
#
#
#
# ## Rewriting the FCI equation, does not stop here
# We need more equations. Our next step is to set up
# $$
# \langle \Phi_i^a | \hat{H} -E| \Phi_0\rangle + \sum_{bj}\langle \Phi_i^a | \hat{H} -E|\Phi_{j}^{b} \rangle C_{j}^{b}+
# \sum_{bcjk}\langle \Phi_i^a | \hat{H} -E|\Phi_{jk}^{bc} \rangle C_{jk}^{bc}+
# \sum_{bcdjkl}\langle \Phi_i^a | \hat{H} -E|\Phi_{jkl}^{bcd} \rangle C_{jkl}^{bcd}=0,
# $$
# as this equation will allow us to find an expression for the coefficents $C_i^a$ since we can rewrite this equation as
# $$
# \langle i | \hat{f}| a\rangle +\langle \Phi_i^a | \hat{H}|\Phi_{i}^{a} \rangle C_{i}^{a}+ \sum_{bj\ne ai}\langle \Phi_i^a | \hat{H}|\Phi_{j}^{b} \rangle C_{j}^{b}+
# \sum_{bcjk}\langle \Phi_i^a | \hat{H}|\Phi_{jk}^{bc} \rangle C_{jk}^{bc}+
# \sum_{bcdjkl}\langle \Phi_i^a | \hat{H}|\Phi_{jkl}^{bcd} \rangle C_{jkl}^{bcd}=EC_i^a.
# $$
# ## Rewriting the FCI equation, please stop here
# We see that on the right-hand side we have the energy $E$. This leads to a non-linear equation in the unknown coefficients.
# These equations are normally solved iteratively ( that is we can start with a guess for the coefficients $C_i^a$). A common choice is to use perturbation theory for the first guess, setting thereby
# $$
# C_{i}^{a}=\frac{\langle i | \hat{f}| a\rangle}{\epsilon_i-\epsilon_a}.
# $$
# ## Rewriting the FCI equation, more to add
# The observant reader will however see that we need an equation for $C_{jk}^{bc}$ and $C_{jkl}^{bcd}$ as well.
# To find equations for these coefficients we need then to continue our multiplications from the left with the various
# $\Phi_{H}^P$ terms.
#
#
# For $C_{jk}^{bc}$ we need then
# 4
# 4
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# $$
# \sum_{cdkl}\langle \Phi_{ij}^{ab} | \hat{H} -E|\Phi_{kl}^{cd} \rangle C_{kl}^{cd}+\sum_{cdeklm}\langle \Phi_{ij}^{ab} | \hat{H} -E|\Phi_{klm}^{cde} \rangle C_{klm}^{cde}+\sum_{cdefklmn}\langle \Phi_{ij}^{ab} | \hat{H} -E|\Phi_{klmn}^{cdef} \rangle C_{klmn}^{cdef}=0,
# $$
# and we can isolate the coefficients $C_{kl}^{cd}$ in a similar way as we did for the coefficients $C_{i}^{a}$.
#
#
#
#
#
#
# ## Rewriting the FCI equation, more to add
# A standard choice for the first iteration is to set
# $$
# C_{ij}^{ab} =\frac{\langle ij \vert \hat{v} \vert ab \rangle}{\epsilon_i+\epsilon_j-\epsilon_a-\epsilon_b}.
# $$
# At the end we can rewrite our solution of the Schroedinger equation in terms of $n$ coupled equations for the coefficients $C_H^P$.
# This is a very cumbersome way of solving the equation. However, by using this iterative scheme we can illustrate how we can compute the
# various terms in the wave operator or correlation operator $\hat{C}$. We will later identify the calculation of the various terms $C_H^P$
# as parts of different many-body approximations to full CI. In particular, we can relate this non-linear scheme with Coupled Cluster theory and
# many-body perturbation theory.
#
#
#
#
#
# ## Summarizing FCI and bringing in approximative methods
#
# If we can diagonalize large matrices, FCI is the method of choice since:
# * It gives all eigenvalues, ground state and excited states
#
# * The eigenvectors are obtained directly from the coefficients $C_H^P$ which result from the diagonalization
#
# * We can compute easily expectation values of other operators, as well as transition probabilities
#
# * Correlations are easy to understand in terms of contributions to a given operator beyond the Hartree-Fock contribution. This is the standard approach in many-body theory.
#
#
#
#
# ## Definition of the correlation energy
# The correlation energy is defined as, with a two-body Hamiltonian,
# $$
# \Delta E=\sum_{ai}\langle i| \hat{f}|a \rangle C_{i}^{a}+
# \sum_{abij}\langle ij | \hat{v}| ab \rangle C_{ij}^{ab}.
# $$
# The coefficients $C$ result from the solution of the eigenvalue problem.
# The energy of say the ground state is then
# $$
# E=E_{ref}+\Delta E,
# $$
# where the so-called reference energy is the energy we obtain from a Hartree-Fock calculation, that is
# $$
# E_{ref}=\langle \Phi_0 \vert \hat{H} \vert \Phi_0 \rangle.
# $$
# ## FCI equation and the coefficients
#
# However, as we have seen, even for a small case like the four first major shells and a nucleus like oxygen-16, the dimensionality becomes quickly intractable. If we wish to include single-particle states that reflect weakly bound systems, we need a much larger single-particle basis. We need thus approximative methods that sum specific correlations to infinite order.
#
# Popular methods are
# * [Many-body perturbation theory (in essence a Taylor expansion)](http://www.sciencedirect.com/science/article/pii/0370157395000126)
#
# * [Coupled cluster theory (coupled non-linear equations)](http://iopscience.iop.org/article/10.1088/0034-4885/77/9/096302/meta)
#
# * Green's function approaches (matrix inversion)
#
# * [Similarity group transformation methods (coupled ordinary differential equations)](http://journals.aps.org/prl/abstract/10.1103/PhysRevLett.106.222502)
#
# All these methods start normally with a Hartree-Fock basis as the calculational basis.
#
#
#
#
# ## Important ingredients to have in codes
#
# * Be able to validate and verify the algorithms.
#
# * Include concepts like unit testing. Gives the possibility to test and validate several or all parts of the code.
#
# * Validation and verification are then included *naturally* and one can develop a better attitude to what is meant with an ethically sound scientific approach.
#
#
#
#
#
#
# ## A structured approach to solving problems
# In the steps that lead to the development of clean code you should think of
# 1. How to structure a code in terms of functions (use IDEs or advanced text editors like sublime or atom)
#
# 2. How to make a module
#
# 3. How to read input data flexibly from the command line or files
#
# 4. How to create graphical/web user interfaces
#
# 5. How to write unit tests
#
# 6. How to refactor code in terms of classes (instead of functions only)
#
# 7. How to conduct and automate large-scale numerical experiments
#
# 8. How to write scientific reports in various formats (LaTeX, HTML, doconce)
#
#
#
#
#
# ## Additional benefits
# Many of the above aspetcs will save you a lot of time when you incrementally extend software over time from simpler to more complicated problems. In particular, you will benefit from many good habits:
# 1. New code is added in a modular fashion to a library (modules)
#
# 2. Programs are run through convenient user interfaces
#
# 3. It takes one quick command to let all your code undergo heavy testing
#
# 4. Tedious manual work with running programs is automated,
#
# 5. Your scientific investigations are reproducible, scientific reports with top quality typesetting are produced both for paper and electronic devices. Use version control software like [git](https://git-scm.com/) and repositories like [github](https://github.com/)
#
#
#
#
#
#
# ## Unit Testing
# Unit Testing is the practice of testing the smallest testable parts,
# called units, of an application individually and independently to
# determine if they behave exactly as expected.
#
# Unit tests (short code
# fragments) are usually written such that they can be preformed at any
# time during the development to continually verify the behavior of the
# code.
#
# In this way, possible bugs will be identified early in the
# development cycle, making the debugging at later stages much
# easier.
#
#
#
#
# ## Unit Testing, benefits
# There are many benefits associated with Unit Testing, such as
# * It increases confidence in changing and maintaining code. Big changes can be made to the code quickly, since the tests will ensure that everything still is working properly.
#
# * Since the code needs to be modular to make Unit Testing possible, the code will be easier to reuse. This improves the code design.
#
# * Debugging is easier, since when a test fails, only the latest changes need to be debugged.
#
# * Different parts of a project can be tested without the need to wait for the other parts to be available.
#
#
# * A unit test can serve as a documentation on the functionality of a unit of the code.
#
#
#
#
#
# ## Simple example of unit test
# Look up the guide on how to install unit tests for c++ at course webpage. This is the version with classes.
# #include <unittest++/UnitTest++.h>
#
# class MyMultiplyClass{
# public:
# double multiply(double x, double y) {
# return x * y;
# }
# };
#
# TEST(MyMath) {
# MyMultiplyClass my;
# CHECK_EQUAL(56, my.multiply(7,8));
# }
#
# int main()
# {
# return UnitTest::RunAllTests();
# }
#
# ## Simple example of unit test
# And without classes
# #include <unittest++/UnitTest++.h>
#
#
# double multiply(double x, double y) {
# return x * y;
# }
#
# TEST(MyMath) {
# CHECK_EQUAL(56, multiply(7,8));
# }
#
# int main()
# {
# return UnitTest::RunAllTests();
# }
#
# For Fortran users, the link at <http://sourceforge.net/projects/fortranxunit/> contains a similar
# software for unit testing. For Python go to <https://docs.python.org/2/library/unittest.html>.
#
#
#
#
#
#
#
#
# ## [Unit tests](https://github.com/philsquared/Catch/blob/master/docs/tutorial.md)
# There are many types of **unit test** libraries. One which is very popular with C++ programmers is [Catch](https://github.com/philsquared/Catch/blob/master/docs/tutorial.md)
#
# Catch is header only. All you need to do is drop the file(s) somewhere reachable from your project - either in some central location you can set your header search path to find, or directly into your project tree itself!
#
# This is a particularly good option for other Open-Source projects that want to use Catch for their test suite.
#
#
#
#
# ## Examples
#
# Computing factorials
# inline unsigned int Factorial( unsigned int number ) {
# return number > 1 ? Factorial(number-1)*number : 1;
# }
#
# ## Factorial Example
#
# Simple test where we put everything in a single file
# #define CATCH_CONFIG_MAIN // This tells Catch to provide a main()
# #include "catch.hpp"
# inline unsigned int Factorial( unsigned int number ) {
# return number > 1 ? Factorial(number-1)*number : 1;
# }
#
# TEST_CASE( "Factorials are computed", "[factorial]" ) {
# REQUIRE( Factorial(0) == 1 );
# REQUIRE( Factorial(1) == 1 );
# REQUIRE( Factorial(2) == 2 );
# REQUIRE( Factorial(3) == 6 );
# REQUIRE( Factorial(10) == 3628800 );
# }
#
#
# This will compile to a complete executable which responds to command line arguments. If you just run it with no arguments it will execute all test cases (in this case there is just one), report any failures, report a summary of how many tests passed and failed and return the number of failed tests.
#
#
# ## What did we do (1)?
# All we did was
# #define
#
# one identifier and
# #include
#
# one header and we got everything - even an implementation of main() that will respond to command line arguments.
# Once you have more than one file with unit tests in you'll just need to
# #include "catch.hpp"
#
# and go. Usually it's a good idea to have a dedicated implementation file that just has
# #define CATCH_CONFIG_MAIN
# #include "catch.hpp".
#
# You can also provide your own implementation of main and drive Catch yourself.
#
#
#
# ## What did we do (2)?
# We introduce test cases with the
# TEST_CASE
#
# macro.
#
# The test name must be unique. You can run sets of tests by specifying a wildcarded test name or a tag expression.
# All we did was **define** one identifier and **include** one header and we got everything.
#
# We write our individual test assertions using the
# REQUIRE
#
# macro.
#
#
#
# ## Unit test summary and testing approach
# Three levels of tests
# 1. Microscopic level: testing small parts of code, use often unit test libraries
#
# 2. Mesoscopic level: testing the integration of various parts of your code
#
# 3. Macroscopic level: testing that the final result is ok
#
#
#
#
#
#
# ## Coding Recommendations
# Writing clean and clear code is an art and reflects
# your understanding of
#
# 1. derivation, verification, and implementation of algorithms
#
# 2. what can go wrong with algorithms
#
# 3. overview of important, known algorithms
#
# 4. how algorithms are used to solve mathematical problems
#
# 5. reproducible science and ethics
#
# 6. algorithmic thinking for gaining deeper insights about scientific problems
#
# Computing is understanding and your understanding is reflected in your abilities to
# write clear and clean code.
#
#
#
# ## Summary and recommendations
# Some simple hints and tips in order to write clean and clear code
# 1. Spell out the algorithm and have a top-down approach to the flow of data
#
# 2. Start with coding as close as possible to eventual mathematical expressions
#
# 3. Use meaningful names for variables
#
# 4. Split tasks in simple functions and modules/classes
#
# 5. Functions should return as few as possible variables
#
# 6. Use unit tests and make sure your codes are producing the correct results
#
# 7. Where possible use symbolic coding to autogenerate code and check results
#
# 8. Make a proper timing of your algorithms
#
# 9. Use version control and make your science reproducible
#
# 10. Use IDEs or smart editors with debugging and analysis tools.
#
# 11. Automatize your computations interfacing high-level and compiled languages like C++ and Fortran.
#
# 12. .....
#
# ## Building a many-body basis
# Here we will discuss how we can set up a single-particle basis which we can use in the various parts of our projects, from the simple pairing model to infinite nuclear matter. We will use here the simple pairing model to illustrate in particular how to set up a single-particle basis. We will also use this do discuss standard FCI approaches like:
# 1. Standard shell-model basis in one or two major shells
#
# 2. Full CI in a given basis and no truncations
#
# 3. CISD and CISDT approximations
#
# 4. No-core shell model and truncation in excitation energy
#
#
#
#
# ## Building a many-body basis
# An important step in an FCI code is to construct the many-body basis.
#
# While the formalism is independent of the choice of basis, the **effectiveness** of a calculation
# will certainly be basis dependent.
#
# Furthermore there are common conventions useful to know.
#
# First, the single-particle basis has angular momentum as a good quantum number. You can
# imagine the single-particle wavefunctions being generated by a one-body Hamiltonian,
# for example a harmonic oscillator. Modifications include harmonic oscillator plus
# spin-orbit splitting, or self-consistent mean-field potentials, or the Woods-Saxon potential which mocks
# up the self-consistent mean-field.
# For nuclei, the harmonic oscillator, modified by spin-orbit splitting, provides a useful language
# for describing single-particle states.
#
#
#
#
# ## Building a many-body basis
# Each single-particle state is labeled by the following quantum numbers:
#
# * Orbital angular momentum $l$
#
# * Intrinsic spin $s$ = 1/2 for protons and neutrons
#
# * Angular momentum $j = l \pm 1/2$
#
# * $z$-component $j_z$ (or $m$)
#
# * Some labeling of the radial wavefunction, typically $n$ the number of nodes in the radial wavefunction, but in the case of harmonic oscillator one can also use the principal quantum number $N$, where the harmonic oscillator energy is $(N+3/2)\hbar \omega$.
#
# In this format one labels states by $n(l)_j$, with $(l)$ replaced by a letter:
# $s$ for $l=0$, $p$ for $l=1$, $d$ for $l=2$, $f$ for $l=3$, and thenceforth alphabetical.
#
#
#
#
# ## Building a many-body basis
# In practice the single-particle space has to be severely truncated. This truncation is
# typically based upon the single-particle energies, which is the effective energy
# from a mean-field potential.
#
# Sometimes we freeze the core and only consider a valence space. For example, one
# may assume a frozen $^{4}\mbox{He}$ core, with two protons and two neutrons in the $0s_{1/2}$
# shell, and then only allow active particles in the $0p_{1/2}$ and $0p_{3/2}$ orbits.
#
#
# Another example is a frozen $^{16}\mbox{O}$ core, with eight protons and eight neutrons filling the
# $0s_{1/2}$, $0p_{1/2}$ and $0p_{3/2}$ orbits, with valence particles in the
# $0d_{5/2}, 1s_{1/2}$ and $0d_{3/2}$ orbits.
#
#
# Sometimes we refer to nuclei by the valence space where their last nucleons go.
# So, for example, we call $^{12}\mbox{C}$ a $p$-shell nucleus, while $^{26}\mbox{Al}$ is an
# $sd$-shell nucleus and $^{56}\mbox{Fe}$ is a $pf$-shell nucleus.
#
#
#
#
#
# ## Building a many-body basis
# There are different kinds of truncations.
#
# * For example, one can start with `filled' orbits (almost always the lowest), and then allow one, two, three... particles excited out of those filled orbits. These are called 1p-1h, 2p-2h, 3p-3h excitations.
#
# * Alternately, one can state a maximal orbit and allow all possible configurations with particles occupying states up to that maximum. This is called *full configuration*.
#
# * Finally, for particular use in nuclear physics, there is the *energy* truncation, also called the $N\hbar\Omega$ or $N_{max}$ truncation.
#
#
#
#
# ## Building a many-body basis
# Here one works in a harmonic oscillator basis, with each major oscillator shell assigned a principal quantum number $N=0,1,2,3,...$.
# The $N\hbar\Omega$ or $N_{max}$ truncation: Any configuration is given an noninteracting energy, which is the sum
# of the single-particle harmonic oscillator energies. (Thus this ignores
# spin-orbit splitting.)
#
# Excited state are labeled relative to the lowest configuration by the
# number of harmonic oscillator quanta.
#
# This truncation is useful because if one includes *all* configuration up to
# some $N_{max}$, and has a translationally invariant interaction, then the intrinsic
# motion and the center-of-mass motion factor. In other words, we can know exactly
# the center-of-mass wavefunction.
#
# In almost all cases, the many-body Hamiltonian is rotationally invariant. This means
# it commutes with the operators $\hat{J}^2, \hat{J}_z$ and so eigenstates will have
# good $J,M$. Furthermore, the eigenenergies do not depend upon the orientation $M$.
#
#
# Therefore we can choose to construct a many-body basis which has fixed $M$; this is
# called an $M$-scheme basis.
#
#
# Alternately, one can construct a many-body basis which has fixed $J$, or a $J$-scheme
# basis.
#
#
#
#
# ## Building a many-body basis
# The Hamiltonian matrix will have smaller dimensions (a factor of 10 or more) in the $J$-scheme than in the $M$-scheme.
# On the other hand, as we'll show in the next slide, the $M$-scheme is very easy to
# construct with Slater determinants, while the $J$-scheme basis states, and thus the
# matrix elements, are more complicated, almost always being linear combinations of
# $M$-scheme states. $J$-scheme bases are important and useful, but we'll focus on the
# simpler $M$-scheme.
#
# The quantum number $m$ is additive (because the underlying group is Abelian):
# if a Slater determinant $\hat{a}_i^\dagger \hat{a}^\dagger_j \hat{a}^\dagger_k \ldots | 0 \rangle$
# is built from single-particle states all with good $m$, then the total
# $$
# M = m_i + m_j + m_k + \ldots
# $$
# This is *not* true of $J$, because the angular momentum group SU(2) is not Abelian.
#
#
#
#
#
# ## Building a many-body basis
#
# The upshot is that
# * It is easy to construct a Slater determinant with good total $M$;
#
# * It is trivial to calculate $M$ for each Slater determinant;
#
# * So it is easy to construct an $M$-scheme basis with fixed total $M$.
#
# Note that the individual $M$-scheme basis states will *not*, in general,
# have good total $J$.
# Because the Hamiltonian is rotationally invariant, however, the eigenstates will
# have good $J$. (The situation is muddied when one has states of different $J$ that are
# nonetheless degenerate.)
#
#
#
#
#
#
#
# ## Building a many-body basis
# Example: two $j=1/2$ orbits
#
# <table border="1">
# <thead>
# <tr><th align="center">Index</th> <th align="center">$n$</th> <th align="center">$l$</th> <th align="center">$j$</th> <th align="center">$m_j$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 3 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 4 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# </tbody>
# </table>
# Note that the order is arbitrary.
#
#
#
#
# ## Building a many-body basis
# There are $\left ( \begin{array}{c} 4 \\ 2 \end{array} \right) = 6$ two-particle states,
# which we list with the total $M$:
#
# <table border="1">
# <thead>
# <tr><th align="center">Occupied</th> <th align="center">$M$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1,2 </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> 1,3 </td> <td align="center"> -1 </td> </tr>
# <tr><td align="center"> 1,4 </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> 2,3 </td> <td align="center"> 0 </td> </tr>
# <tr><td align="center"> 2,4 </td> <td align="center"> 1 </td> </tr>
# <tr><td align="center"> 3,4 </td> <td align="center"> 0 </td> </tr>
# </tbody>
# </table>
# There are 4 states with $M= 0$,
# and 1 each with $M = \pm 1$.
#
#
#
#
#
# ## Building a many-body basis
# As another example, consider using only single particle states from the $0d_{5/2}$ space.
# They have the following quantum numbers
#
# <table border="1">
# <thead>
# <tr><th align="center">Index</th> <th align="center">$n$</th> <th align="center">$l$</th> <th align="center">$j$</th> <th align="center">$m_j$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> -5/2 </td> </tr>
# <tr><td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> -3/2 </td> </tr>
# <tr><td align="center"> 3 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 4 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 5 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> 3/2 </td> </tr>
# <tr><td align="center"> 6 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> 5/2 </td> </tr>
# </tbody>
# </table>
#
#
#
#
# ## Building a many-body basis
# There are $\left ( \begin{array}{c} 6 \\ 2 \end{array} \right) = 15$ two-particle states,
# which we list with the total $M$:
#
# <table border="1">
# <thead>
# <tr><th align="center">Occupied</th> <th align="center">$M$</th> <th align="center">Occupied</th> <th align="center">$M$</th> <th align="center">Occupied</th> <th align="center">$M$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1,2 </td> <td align="center"> -4 </td> <td align="center"> 2,3 </td> <td align="center"> -2 </td> <td align="center"> 3,5 </td> <td align="center"> 1 </td> </tr>
# <tr><td align="center"> 1,3 </td> <td align="center"> -3 </td> <td align="center"> 2,4 </td> <td align="center"> -1 </td> <td align="center"> 3,6 </td> <td align="center"> 2 </td> </tr>
# <tr><td align="center"> 1,4 </td> <td align="center"> -2 </td> <td align="center"> 2,5 </td> <td align="center"> 0 </td> <td align="center"> 4,5 </td> <td align="center"> 2 </td> </tr>
# <tr><td align="center"> 1,5 </td> <td align="center"> -1 </td> <td align="center"> 2,6 </td> <td align="center"> 1 </td> <td align="center"> 4,6 </td> <td align="center"> 3 </td> </tr>
# <tr><td align="center"> 1,6 </td> <td align="center"> 0 </td> <td align="center"> 3,4 </td> <td align="center"> 0 </td> <td align="center"> 5,6 </td> <td align="center"> 4 </td> </tr>
# </tbody>
# </table>
# There are 3 states with $M= 0$, 2 with $M = 1$, and so on.
#
#
#
#
#
#
#
#
#
#
#
#
#
# ## Shell-model project
#
# The first step is to construct the $M$-scheme basis of Slater determinants.
# Here $M$-scheme means the total $J_z$ of the many-body states is fixed.
#
# The steps could be:
#
# * Read in a user-supplied file of single-particle states (examples can be given) or just code these internally;
#
# * Ask for the total $M$ of the system and the number of particles $N$;
#
# * Construct all the $N$-particle states with given $M$. You will validate the code by comparing both the number of states and specific states.
#
#
#
#
# ## Shell-model project
# The format of a possible input file could be
# <table border="1">
# <thead>
# <tr><th align="center">Index</th> <th align="center">$n$</th> <th align="center">$l$</th> <th align="center">$2j$</th> <th align="center">$2m_j$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1 </td> <td align="center"> -1 </td> </tr>
# <tr><td align="center"> 2 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1 </td> <td align="center"> 1 </td> </tr>
# <tr><td align="center"> 3 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 3 </td> <td align="center"> -3 </td> </tr>
# <tr><td align="center"> 4 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 3 </td> <td align="center"> -1 </td> </tr>
# <tr><td align="center"> 5 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 3 </td> <td align="center"> 1 </td> </tr>
# <tr><td align="center"> 6 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 3 </td> <td align="center"> 3 </td> </tr>
# <tr><td align="center"> 7 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5 </td> <td align="center"> -5 </td> </tr>
# <tr><td align="center"> 8 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5 </td> <td align="center"> -3 </td> </tr>
# <tr><td align="center"> 9 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5 </td> <td align="center"> -1 </td> </tr>
# <tr><td align="center"> 10 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5 </td> <td align="center"> 1 </td> </tr>
# <tr><td align="center"> 11 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5 </td> <td align="center"> 3 </td> </tr>
# <tr><td align="center"> 12 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5 </td> <td align="center"> 5 </td> </tr>
# </tbody>
# </table>
# This represents the $1s_{1/2}0d_{3/2}0d_{5/2}$ valence space, or just the $sd$-space. There are
# twelve single-particle states, labeled by an overall index, and which have associated quantum
# numbers the number of radial nodes, the orbital angular momentum $l$, and the
# angular momentum $j$ and third component $j_z$. To keep everything as integers, we could store $2 \times j$ and
# $2 \times j_z$.
#
#
#
#
# ## Shell-model project
# To read in the single-particle states you need to:
# * Open the file
#
# * Read the number of single-particle states (in the above example, 12); allocate memory; all you need is a single array storing $2\times j_z$ for each state, labeled by the index.
#
#
# * Read in the quantum numbers and store $2 \times j_z$ (and anything else you happen to want).
#
#
#
#
# ## Shell-model project
#
# The next step is to read in the number of particles $N$ and the fixed total $M$ (or, actually, $2 \times M$).
# For this project we assume only a single species of particles, say neutrons, although this can be
# relaxed. **Note**: Although it is often a good idea to try to write a more general code, given the
# short time alloted we would suggest you keep your ambition in check, at least in the initial phases of the
# project.
#
#
# You should probably write an error trap to make sure $N$ and $M$ are congruent; if $N$ is even, then
# $2 \times M$ should be even, and if $N$ is odd then $2\times M$ should be odd.
#
#
#
#
# ## Shell-model project
# The final step is to generate the set of $N$-particle Slater determinants with fixed $M$.
# The Slater determinants will be stored in occupation representation. Although in many codes
# this representation is done compactly in bit notation with ones and zeros, but for
# greater transparency and simplicity we will list the occupied single particle states.
#
# Hence we can
# store the Slater determinant basis states as $sd(i,j)$, that is an
# array of dimension $N_{SD}$, the number of Slater determinants, by $N$, the number of occupied
# state. So if for the 7th Slater determinant the 2nd, 3rd, and 9th single-particle states are occupied,
# then $sd(7,1) = 2$, $sd(7,2) = 3$, and $sd(7,3) = 9$.
#
#
#
#
# ## Shell-model project
#
# We can construct an occupation representation of Slater determinants by the *odometer*
# method. Consider $N_{sp} = 12$ and $N=4$.
# Start with the first 4 states occupied, that is:
#
# * $sd(1,:)= 1,2,3,4$ (also written as $|1,2,3,4 \rangle$)
#
# Now increase the last occupancy recursively:
# * $sd(2,:)= 1,2,3,5$
#
# * $sd(3,:)= 1,2,3,6$
#
# * $sd(4,:)= 1,2,3,7$
#
# * $\ldots$
#
# * $sd(9,:)= 1,2,3,12$
#
# Then start over with
# * $sd(10,:)= 1,2,4,5$
#
# and again increase the rightmost digit
#
# * $sd(11,:)= 1,2,4,6$
#
# * $sd(12,:)= 1,2,4,7$
#
# * $\ldots$
#
# * $sd(17,:)= 1,2,4,12$
#
#
#
#
# ## Shell-model project
# When we restrict ourselves to an $M$-scheme basis, we could choose two paths.
# The first is simplest (and simplest is often best, at
# least in the first draft of a code): generate all possible Slater determinants,
# and then extract from this initial list a list of those Slater determinants with a given
# $M$. (You will need to write a short function or routine that computes $M$ for any
# given occupation.)
#
#
# Alternately, and not too difficult, is to run the odometer routine twice: each time, as
# as a Slater determinant is calculated, compute $M$, but do not store the Slater determinants
# except the current one. You can then count up the number of Slater determinants with a
# chosen $M$. Then allocated storage for the Slater determinants, and run the odometer
# algorithm again, this time storing Slater determinants with the desired $M$ (this can be
# done with a simple logical flag).
#
#
#
#
#
# ## Shell-model project
#
# *Some example solutions*: Let's begin with a simple case, the $0d_{5/2}$ space containing six single-particle states
#
# <table border="1">
# <thead>
# <tr><th align="center">Index</th> <th align="center">$n$</th> <th align="center">$l$</th> <th align="center">$j$</th> <th align="center">$m_j$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> -5/2 </td> </tr>
# <tr><td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> -3/2 </td> </tr>
# <tr><td align="center"> 3 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 4 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 5 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> 3/2 </td> </tr>
# <tr><td align="center"> 6 </td> <td align="center"> 0 </td> <td align="center"> 2 </td> <td align="center"> 5/2 </td> <td align="center"> 5/2 </td> </tr>
# </tbody>
# </table>
# For two particles, there are a total of 15 states, which we list here with the total $M$:
# * $\vert 1,2 \rangle$, $M= -4$, $\vert 1,3 \rangle$, $M= -3$
#
# * $\vert 1,4 \rangle$, $M= -2$, $\vert 1,5 \rangle$, $M= -1$
#
# * $\vert 1,5 \rangle$, $M= 0$, $vert 2,3 \rangle$, $M= -2$
#
# * $\vert 2,4 \rangle$, $M= -1$, $\vert 2,5 \rangle$, $M= 0$
#
# * $\vert 2,6 \rangle$, $M= 1$, $\vert 3,4 \rangle$, $M= 0$
#
# * $\vert 3,5 \rangle$, $M= 1$, $\vert 3,6 \rangle$, $M= 2$
#
# * $\vert 4,5 \rangle$, $M= 2$, $\vert 4,6 \rangle$, $M= 3$
#
# * $\vert 5,6 \rangle$, $M= 4$
#
# Of these, there are only 3 states with $M=0$.
#
#
#
#
# ## Shell-model project
# *You should try* by hand to show that in this same single-particle space, that for
# $N=3$ there are 3 states with $M=1/2$ and for $N= 4$ there are also only 3 states with $M=0$.
#
# *To test your code*, confirm the above.
#
# Also,
# for the $sd$-space given above, for $N=2$ there are 14 states with $M=0$, for $N=3$ there are 37
# states with $M=1/2$, for $N=4$ there are 81 states with $M=0$.
#
#
#
#
# ## Shell-model project
# For our project, we will only consider the pairing model.
# A simple space is the $(1/2)^2$ space with four single-particle states
#
# <table border="1">
# <thead>
# <tr><th align="center">Index</th> <th align="center">$n$</th> <th align="center">$l$</th> <th align="center">$s$</th> <th align="center">$m_s$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 3 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 4 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# </tbody>
# </table>
# For $N=2$ there are 4 states with $M=0$; show this by hand and confirm your code reproduces it.
#
#
#
#
# ## Shell-model project
# Another, slightly more challenging space is the $(1/2)^4$ space, that is,
# with eight single-particle states we have
#
# <table border="1">
# <thead>
# <tr><th align="center">Index</th> <th align="center">$n$</th> <th align="center">$l$</th> <th align="center">$s$</th> <th align="center">$m_s$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 3 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 4 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 5 </td> <td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 6 </td> <td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 7 </td> <td align="center"> 3 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 8 </td> <td align="center"> 3 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# </tbody>
# </table>
# For $N=2$ there are 16 states with $M=0$; for $N=3$ there are 24 states with $M=1/2$, and for
# $N=4$ there are 36 states with $M=0$.
#
#
#
#
# ## Shell-model project
# In the shell-model context we can interpret this as 4 $s_{1/2}$ levels, with $m = \pm 1/2$, we can also think of these are simple four pairs, $\pm k, k = 1,2,3,4$. Later on we will
# assign single-particle energies, depending on the radial quantum number $n$, that is,
# $\epsilon_k = |k| \delta$ so that they are equally spaced.
#
#
#
#
# ## Shell-model project
#
# For application in the pairing model we can go further and consider only states with
# no "broken pairs," that is, if $+k$ is filled (or $m = +1/2$, so is $-k$ ($m=-1/2$).
# If you want, you can write your code to accept only these, and obtain the following
# six states:
#
# * $| 1, 2 , 3 , 4 \rangle , $
#
# * $| 1 , 2 , 5 , 6 \rangle , $
#
# * $| 1 , 2 , 7 , 8 \rangle , $
#
# * $| 3 , 4 , 5 , 6 \rangle , $
#
# * $| 3 , 4 , 7 , 8 \rangle , $
#
# * $| 5 , 6 , 7 , 8 \rangle $
#
#
#
#
#
#
# ## Shell-model project
# **Hints for coding.**
#
#
#
# * Write small modules (routines/functions) ; avoid big functions that do everything. (But not too small.)
#
# * Use Unit tests! Write lots of error traps, even for things that are `obvious.'
#
# * Document as you go along. The Unit tests serve as documentation. For each function write a header that includes:
#
# a. Main purpose of function and/or unit test
#
# b. names and brief explanation of input variables, if any
#
# c. names and brief explanation of output variables, if any
#
# d. functions called by this function
#
# e. called by which functions
#
#
#
#
# ## Shell-model project
#
# Hints for coding
#
# * Unit tests will save time. Use also IDEs for debugging. If you insist on brute force debugging, print out intermediate values. It's almost impossible to debug a code by looking at it - the code will almost always win a `staring contest.'
#
# * Validate code with SIMPLE CASES. Validate early and often. Unit tests!!
#
# The number one mistake is using a too complex a system to test. For example ,
# if you are computing particles in a potential in a box, try removing the potential - you should get
# particles in a box. And start with one particle, then two, then three... Don't start with
# eight particles.
#
#
#
#
# ## Shell-model project
#
# Our recommended occupation representation, e.g. $| 1,2,4,8 \rangle$, is
# easy to code, but numerically inefficient when one has hundreds of
# millions of Slater determinants.
#
#
# In state-of-the-art shell-model codes, one generally uses bit
# representation, i.e. $|1101000100... \rangle$ where one stores
# the Slater determinant as a single (or a small number of) integer.
#
#
# This is much more compact, but more intricate to code with considerable
# more overhead. There exist
# bit-manipulation functions. We will discuss these in more detail at the beginning of the third week.
#
#
#
#
# ## Example case: pairing Hamiltonian
#
# We consider a space with $2\Omega$ single-particle states, with each
# state labeled by
# $k = 1, 2, 3, \Omega$ and $m = \pm 1/2$. The convention is that
# the state with $k>0$ has $m = + 1/2$ while $-k$ has $m = -1/2$.
#
#
# The Hamiltonian we consider is
# $$
# \hat{H} = -G \hat{P}_+ \hat{P}_-,
# $$
# where
# $$
# \hat{P}_+ = \sum_{k > 0} \hat{a}^\dagger_k \hat{a}^\dagger_{-{k}}.
# $$
# and $\hat{P}_- = ( \hat{P}_+)^\dagger$.
#
# This problem can be solved using what is called the quasi-spin formalism to obtain the
# exact results. Thereafter we will try again using the explicit Slater determinant formalism.
#
#
#
#
#
#
# ## Example case: pairing Hamiltonian
#
# One can show (and this is part of the project) that
# $$
# \left [ \hat{P}_+, \hat{P}_- \right ] = \sum_{k> 0} \left( \hat{a}^\dagger_k \hat{a}_k
# + \hat{a}^\dagger_{-{k}} \hat{a}_{-{k}} - 1 \right) = \hat{N} - \Omega.
# $$
# Now define
# $$
# \hat{P}_z = \frac{1}{2} ( \hat{N} -\Omega).
# $$
# Finally you can show
# $$
# \left [ \hat{P}_z , \hat{P}_\pm \right ] = \pm \hat{P}_\pm.
# $$
# This means the operators $\hat{P}_\pm, \hat{P}_z$ form a so-called $SU(2)$ algebra, and we can
# use all our insights about angular momentum, even though there is no actual
# angular momentum involved.
#
# So we rewrite the Hamiltonian to make this explicit:
# $$
# \hat{H} = -G \hat{P}_+ \hat{P}_-
# = -G \left( \hat{P}^2 - \hat{P}_z^2 + \hat{P}_z\right)
# $$
# ## Example case: pairing Hamiltonian
#
# Because of the SU(2) algebra, we know that the eigenvalues of
# $\hat{P}^2$ must be of the form $p(p+1)$, with $p$ either integer or half-integer, and the eigenvalues of $\hat{P}_z$
# are $m_p$ with $p \geq | m_p|$, with $m_p$ also integer or half-integer.
#
#
# But because $\hat{P}_z = (1/2)(\hat{N}-\Omega)$, we know that for $N$ particles
# the value $m_p = (N-\Omega)/2$. Furthermore, the values of $m_p$ range from
# $-\Omega/2$ (for $N=0$) to $+\Omega/2$ (for $N=2\Omega$, with all states filled).
#
# We deduce the maximal $p = \Omega/2$ and for a given $n$ the
# values range of $p$ range from $|N-\Omega|/2$ to $\Omega/2$ in steps of 1
# (for an even number of particles)
#
#
# Following Racah we introduce the notation
# $p = (\Omega - v)/2$
# where $v = 0, 2, 4,..., \Omega - |N-\Omega|$
# With this it is easy to deduce that the eigenvalues of the pairing Hamiltonian are
# $$
# -G(N-v)(2\Omega +2-N-v)/4
# $$
# This also works for $N$ odd, with $v= 1,3,5, \dots$.
#
#
#
#
#
# ## Example case: pairing Hamiltonian
#
# Let's take a specific example: $\Omega = 3$ so there are 6 single-particle states,
# and $N = 3$, with $v= 1,3$. Therefore there are two distinct eigenvalues,
# $$
# E = -2G, 0
# $$
# Now let's work this out explicitly. The single particle degrees of freedom are defined as
#
# <table border="1">
# <thead>
# <tr><th align="center">Index</th> <th align="center">$k$</th> <th align="center">$m$ </th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1 </td> <td align="center"> 1 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 2 </td> <td align="center"> -1 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 3 </td> <td align="center"> 2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 4 </td> <td align="center"> -2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 5 </td> <td align="center"> 3 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 6 </td> <td align="center"> -3 </td> <td align="center"> 1/2 </td> </tr>
# </tbody>
# </table>
# There are $\left( \begin{array}{c}6 \\ 3 \end{array} \right) = 20$ three-particle states, but there
# are 9 states with $M = +1/2$, namely
# $| 1,2,3 \rangle, |1,2,5\rangle, | 1,4,6 \rangle, | 2,3,4 \rangle, |2,3,6 \rangle, | 2,4,5 \rangle, | 2, 5, 6 \rangle, |3,4,6 \rangle, | 4,5,6 \rangle$.
#
#
#
#
#
#
#
#
# ## Example case: pairing Hamiltonian
#
# In this basis, the operator
# $$
# \hat{P}_+
# = \hat{a}^\dagger_1 \hat{a}^\dagger_2 + \hat{a}^\dagger_3 \hat{a}^\dagger_4 +
# \hat{a}^\dagger_5 \hat{a}^\dagger_6
# $$
# From this we can determine that
# $$
# \hat{P}_- | 1, 4, 6 \rangle = \hat{P}_- | 2, 3, 6 \rangle
# = \hat{P}_- | 2, 4, 5 \rangle = 0
# $$
# so those states all have eigenvalue 0.
#
#
#
#
#
# ## Example case: pairing Hamiltonian
# Now for further example,
# $$
# \hat{P}_- | 1,2,3 \rangle = | 3 \rangle
# $$
# so
# $$
# \hat{P}_+ \hat{P}_- | 1,2,3\rangle = | 1,2,3\rangle+ | 3,4,3\rangle + | 5,6,3\rangle
# $$
# The second term vanishes because state 3 is occupied twice, and reordering the last
# term we
# get
# $$
# \hat{P}_+ \hat{P}_- | 1,2,3\rangle = | 1,2,3\rangle+ |3, 5,6\rangle
# $$
# without picking up a phase.
#
#
#
#
# ## Example case: pairing Hamiltonian
#
# Continuing in this fashion, with the previous ordering of the many-body states
# ( $| 1,2,3 \rangle, |1,2,5\rangle, | 1,4,6 \rangle, | 2,3,4 \rangle, |2,3,6 \rangle, | 2,4,5 \rangle, | 2, 5, 6 \rangle, |3,4,6 \rangle, | 4,5,6 \rangle$) the
# Hamiltonian matrix of this system is
# $$
# H = -G\left(
# \begin{array}{ccccccccc}
# 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \\
# 0 & 1 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 1 & 0 & 0 & 1 & 0 & 0 \\
# 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
# 0 & 1 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
# 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1
# \end{array} \right )
# $$
# This is useful for our project. One can by hand confirm
# that there are 3 eigenvalues $-2G$ and 6 with value zero.
#
#
#
#
# ## Example case: pairing Hamiltonian
#
# Another example
# Using the $(1/2)^4$ single-particle space, resulting in eight single-particle states
#
# <table border="1">
# <thead>
# <tr><th align="center">Index</th> <th align="center">$n$</th> <th align="center">$l$</th> <th align="center">$s$</th> <th align="center">$m_s$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 3 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 4 </td> <td align="center"> 1 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 5 </td> <td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 6 </td> <td align="center"> 2 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# <tr><td align="center"> 7 </td> <td align="center"> 3 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> -1/2 </td> </tr>
# <tr><td align="center"> 8 </td> <td align="center"> 3 </td> <td align="center"> 0 </td> <td align="center"> 1/2 </td> <td align="center"> 1/2 </td> </tr>
# </tbody>
# </table>
# and then taking only 4-particle, $M=0$ states that have no `broken pairs', there are six basis Slater
# determinants:
#
# * $| 1, 2 , 3 , 4 \rangle , $
#
# * $| 1 , 2 , 5 , 6 \rangle , $
#
# * $| 1 , 2 , 7 , 8 \rangle , $
#
# * $| 3 , 4 , 5 , 6 \rangle , $
#
# * $| 3 , 4 , 7 , 8 \rangle , $
#
# * $| 5 , 6 , 7 , 8 \rangle $
#
#
#
#
# ## Example case: pairing Hamiltonian
#
# Now we take the following Hamiltonian
# $$
# \hat{H} = \sum_n n \delta \hat{N}_n - G \hat{P}^\dagger \hat{P}
# $$
# where
# $$
# \hat{N}_n = \hat{a}^\dagger_{n, m=+1/2} \hat{a}_{n, m=+1/2} +
# \hat{a}^\dagger_{n, m=-1/2} \hat{a}_{n, m=-1/2}
# $$
# and
# $$
# \hat{P}^\dagger = \sum_{n} \hat{a}^\dagger_{n, m=+1/2} \hat{a}^\dagger_{n, m=-1/2}
# $$
# We can write down the $ 6 \times 6$ Hamiltonian in the basis from the prior slide:
# $$
# H = \left (
# \begin{array}{cccccc}
# 2\delta -2G & -G & -G & -G & -G & 0 \\
# -G & 4\delta -2G & -G & -G & -0 & -G \\
# -G & -G & 6\delta -2G & 0 & -G & -G \\
# -G & -G & 0 & 6\delta-2G & -G & -G \\
# -G & 0 & -G & -G & 8\delta-2G & -G \\
# 0 & -G & -G & -G & -G & 10\delta -2G
# \end{array} \right )
# $$
# (You should check by hand that this is correct.)
#
# For $\delta = 0$ we have the closed form solution of the g.s. energy given by $-6G$.
#
#
#
#
#
# ## Building a Hamiltonian matrix
# The goal is to compute the matrix elements of the Hamiltonian, specifically
# matrix elements between many-body states (Slater determinants) of two-body
# operators
# $$
# \sum_{p < q, r < s}V_{pqr} \hat{a}^\dagger_p \hat{a}^\dagger_q\hat{a}_s \hat{a}_r
# $$
# In particular we will need to compute
# $$
# \langle \beta | \hat{a}^\dagger_p \hat{a}^\dagger_q\hat{a}_s \hat{a}_r |\alpha \rangle
# $$
# where $\alpha, \beta$ are indices labeling Slater determinants and $p,q,r,s$ label
# single-particle states.
#
#
#
#
#
# ## Building a Hamiltonian matrix
# Note: there are other, more efficient ways to do this than the method we describe,
# but you will
# be able to produce a working code quickly.
#
# As we coded in the first step,
# a Slater determinant $| \alpha \rangle$ with index $\alpha$ is a
# list of $N$ occupied single-particle states $i_1 < i_2 < i_3 \ldots i_N$.
#
#
# Furthermore, for the two-body matrix elements $V_{pqrs}$ we normally assume
# $p < q$ and $r < s$. For our specific project, the interaction is much simpler and you can use this to simplify considerably the setup of a shell-model code for project 2.
#
# What follows here is a more general, but still brute force, approach.
#
#
#
#
#
# ## Building a Hamiltonian matrix
# Write a function that:
# 1. Has as input the single-particle indices $p,q,r,s$ for the two-body operator and the index $\alpha$ for the ket Slater determinant;
#
# 2. Returns the index $\beta$ of the unique (if any) Slater determinant such that
# $$
# | \beta \rangle = \pm \hat{a}^\dagger_p \hat{a}^\dagger_q\hat{a}_s \hat{a}_r |\alpha \rangle
# $$
# as well as the phase
#
# This is equivalent to computing
# $$
# \langle \beta | \hat{a}^\dagger_p \hat{a}^\dagger_q\hat{a}_s \hat{a}_r |\alpha \rangle
# $$
# ## Building a Hamiltonian matrix, first step
# The first step can take as input an initial Slater determinant
# (whose position in the list of basis Slater determinants is $\alpha$) written as an
# ordered listed of occupied single-particle states, e.g. $1,2,5,8$, and the
# indices $p,q,r,s$ from the two-body operator.
#
# It will return another final Slater determinant if the single-particle states $r$ and $s$ are occupied, else it will return an
# empty Slater determinant
# (all zeroes).
#
# If $r$ and $s$ are in the list of occupied single particle states, then
# replace the initial single-particle states $ij$ as $i \rightarrow r$ and $j \rightarrow r$.
#
#
#
#
#
# ## Building a Hamiltonian matrix, second step
# The second step will take the final Slater determinant
# from the first step (if not empty),
# and then order by pairwise permutations (i.e., if the Slater determinant is
# $i_1, i_2, i_3, \ldots$, then if $i_n > i_{n+1}$, interchange
# $i_n \leftrightarrow i_{n+1}$.
#
#
#
#
#
#
# ## Building a Hamiltonian matrix
#
# It will also output a phase. If any two single-particle occupancies are repeated,
# the phase is
# 0. Otherwise it is +1 for an even permutation and -1 for an odd permutation to
# bring the final
# Slater determinant into ascending order, $j_1 < j_2 < j_3 \ldots$.
#
#
#
#
#
# ## Building a Hamiltonian matrix
# **Example**: Suppose in the $sd$ single-particle space that the initial
# Slater determinant
# is $1,3,9,12$. If $p,q,r,s = 2,8,1,12$, then after the first step the final Slater determinant
# is $2,3,9,8$. The second step will return $2,3,8,9$ and a phase of -1,
# because an odd number of interchanges is required.
#
#
#
#
#
# ## Building a Hamiltonian matrix
#
# **Example**: Suppose in the $sd$ single-particle space that the initial
# Slater determinant
# is $1,3,9,12$. If $p,q,r,s = 3,8,1,12$, then after the first step the
# final Slater determinant
# is $3,3,9,8$, but after the second step the phase is 0
# because the single-particle state 3 is
# occupied twice.
#
# Lastly, the final step takes the ordered final Slater determinant and
# we search through the basis list to
# determine its index in the many-body basis, that is, $\beta$.
#
#
#
#
#
# ## Building a Hamiltonian matrix
#
# The Hamiltonian is then stored as an $N_{SD} \times N_{SD}$ array of real numbers, which
# can be allocated once you have created the many-body basis and know $N_{SD}$.
#
#
#
#
#
# ## Building a Hamiltonian matrix
#
# 1. Initialize $H(\alpha,\beta)=0.0$
#
# 2. Set up an outer loop over $\beta$
#
# 3. Loop over $\alpha = 1, NSD$
#
# 4. For each $\alpha$, loop over $a=1,ntbme$ and fetch $V(a)$ and the single-particle indices $p,q,r,s$
#
# 5. If $V(a) = 0$ skip. Otherwise, apply $\hat{a}^\dagger_p\hat{a}^\dagger_q \hat{a}_s \hat{a}_r$ to the Slater determinant labeled by $\alpha$.
#
# 6. Find, if any, the label $\beta$ of the resulting Slater determinant and the phase (which is 0, +1, -1).
#
# 7. If phase $\neq 0$, then update $H(\alpha,\beta)$ as $H(\alpha,\beta) + phase*V(a)$. The sum is important because multiple operators might contribute to the same matrix element.
#
# 8. Continue loop over $a$
#
# 9. Continue loop over $\alpha$.
#
# 10. End the outer loop over $\beta$.
#
# You should force the resulting matrix $H$ to be symmetric. To do this, when
# updating $H(\alpha,\beta)$, if $\alpha \neq \beta$, also update $H(\beta,\alpha)$.
#
#
#
#
#
# ## Building a Hamiltonian matrix
#
# You will also need to include the single-particle energies. This is easy: they only
# contribute to diagonal matrix elements, that is, $H(\alpha,\alpha)$.
# Simply find the occupied single-particle states $i$ and add the corresponding $\epsilon(i)$.
#
#
#
#
#
# ## Hamiltonian matrix without the bit representation
#
# Consider the many-body state $\Psi_{\lambda}$ expressed as linear combinations of
# Slater determinants ($SD$) of orthonormal single-particle states $\phi({\bf r})$:
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# \Psi_{\lambda} = \sum_i C_{\lambda i} SD_i
# \label{_auto1} \tag{2}
# \end{equation}
# $$
# Using the Slater-Condon rules the matrix elements of any one-body
# ($\cal{O}_1$) or two-body ($\cal{O}_2$) operator expressed in the
# determinant space have simple expressions involving one- and two-fermion
# integrals in our given single-particle basis.
# The diagonal elements are given by:
# $$
# \begin{eqnarray}
# \langle SD | \cal{O}_1 | SD \rangle & = & \sum_{i \in SD} \langle \phi_i | \cal{O}_1 | \phi_i \rangle \\
# \langle SD | \cal{O}_2 | SD \rangle & = & \frac{1}{2} \sum_{(i,j) \in SD}
# \langle \phi_i \phi_j | \cal{O}_2 | \phi_i \phi_j \rangle - \nonumber \\
# & &
# \langle \phi_i \phi_j | \cal{O}_2 | \phi_j \phi_i \rangle \nonumber
# \end{eqnarray}
# $$
# ## Hamiltonian matrix without the bit representation, one and two-body operators
#
# For two determinants which differ only by the substitution of single-particle states $i$ with
# a single-particle state $j$:
# $$
# \begin{eqnarray}
# \langle SD | \cal{O}_1 | SD_i^j \rangle & = & \langle \phi_i | \cal{O}_1 | \phi_j \rangle \\
# \langle SD | \cal{O}_2 | SD_i^j \rangle & = & \sum_{k \in SD}
# \langle \phi_i \phi_k | \cal{O}_2 | \phi_j \phi_k \rangle -
# \langle \phi_i \phi_k | \cal{O}_2 | \phi_k \phi_j \rangle \nonumber
# \end{eqnarray}
# $$
# For two determinants which differ by two single-particle states
# $$
# \begin{eqnarray}
# \langle SD | \cal{O}_1 | SD_{ik}^{jl} \rangle & = & 0 \\
# \langle SD | \cal{O}_2 | SD_{ik}^{jl} \rangle & = &
# \langle \phi_i \phi_k | \cal{O}_2 | \phi_j \phi_l \rangle -
# \langle \phi_i \phi_k | \cal{O}_2 | \phi_l \phi_j \rangle \nonumber
# \end{eqnarray}
# $$
# All other matrix elements involving determinants with more than two
# substitutions are zero.
#
#
#
#
# ## Strategies for setting up an algorithm
#
#
# An efficient implementation of these rules requires
#
# * to find the number of single-particle state substitutions between two determinants
#
# * to find which single-particle states are involved in the substitution
#
# * to compute the phase factor if a reordering of the single-particle states has occured
#
# We can solve this problem using our odometric approach or alternatively using a bit representation as discussed below and in more detail in
#
# * [Scemama and Gimer's article (Fortran codes)](https://github.com/scemama/slater_condon)
#
# * [Simen Kvaal's article on how to build an FCI code (C++ code)](https://arxiv.org/abs/0810.2644)
#
# We recommend in particular the article by <NAME>. It contains nice general classes for creation and annihilation operators as well as the calculation of the phase (see below).
#
#
#
#
#
# ## Computing expectation values and transitions in the shell-model
# When we diagonalize the Hamiltonian matrix, the eigenvectors are the coefficients $C_{\lambda i}$ used
# to express the many-body state $\Psi_{\lambda}$ in terms of a linear combinations of
# Slater determinants ($SD$) of orthonormal single-particle states $\phi({\bf r})$.
#
# With these eigenvectors we can compute say the transition likelyhood of a one-body operator as
# $$
# \langle \Psi_{\lambda} \vert \cal{O}_1 \vert \Psi_{\sigma} \rangle =
# \sum_{ij}C_{\lambda i}^*C_{\sigma j} \langle SD_i | \cal{O}_1 | SD_j \rangle .
# $$
# Writing the one-body operator in second quantization as
# $$
# \cal{O}_1 = \sum_{pq} \langle p \vert \cal{o}_1 \vert q\rangle a_p^{\dagger} a_q,
# $$
# we have
# $$
# \langle \Psi_{\lambda} \vert \cal{O}_1 \vert \Psi_{\sigma} \rangle =
# \sum_{pq}\langle p \vert \cal{o}_1 \vert q\rangle \sum_{ij}C_{\lambda i}^*C_{\sigma j} \langle SD_i |a_p^{\dagger} a_q | SD_j \rangle .
# $$
# ## Computing expectation values and transitions in the shell-model and spectroscopic factors
# The terms we need to evalute then are just the elements
# $$
# \langle SD_i |a_p^{\dagger} a_q | SD_j \rangle,
# $$
# which can be rewritten in terms of spectroscopic factors by inserting a complete set of Slater determinats as
# $$
# \langle SD_i |a_p^{\dagger} a_q | SD_j \rangle = \sum_{l}\langle SD_i \vert a_p^{\dagger}\vert SD_l\rangle \langle SD_l \vert a_q \vert SD_j \rangle,
# $$
# where $\langle SD_l\vert a_q(a_p^{\dagger})\vert SD_j\rangle$ are the spectroscopic factors. These can be easily evaluated in $m$-scheme. Using the Wigner-Eckart theorem we can transform these to a $J$-coupled scheme through so-called reduced matrix elements.
#
#
#
#
#
#
#
# ## Operators in second quantization
# In the build-up of a shell-model or FCI code that is meant to tackle large dimensionalities
# we need to deal with the action of the Hamiltonian $\hat{H}$ on a
# Slater determinant represented in second quantization as
# $$
# |\alpha_1\dots \alpha_n\rangle = a_{\alpha_1}^{\dagger} a_{\alpha_2}^{\dagger} \dots a_{\alpha_n}^{\dagger} |0\rangle.
# $$
# The time consuming part stems from the action of the Hamiltonian
# on the above determinant,
# $$
# \left(\sum_{\alpha\beta} \langle \alpha|t+u|\beta\rangle a_\alpha^{\dagger} a_\beta + \frac{1}{4} \sum_{\alpha\beta\gamma\delta}
# \langle \alpha \beta|\hat{v}|\gamma \delta\rangle a_\alpha^{\dagger} a_\beta^{\dagger} a_\delta a_\gamma\right)a_{\alpha_1}^{\dagger} a_{\alpha_2}^{\dagger} \dots a_{\alpha_n}^{\dagger} |0\rangle.
# $$
# A practically useful way to implement this action is to encode a Slater determinant as a bit pattern.
#
#
#
#
#
# ## Operators in second quantization
# Assume that we have at our disposal $n$ different single-particle states
# $\alpha_0,\alpha_2,\dots,\alpha_{n-1}$ and that we can distribute among these states $N\le n$ particles.
#
# A Slater determinant can then be coded as an integer of $n$ bits. As an example, if we have $n=16$ single-particle states
# $\alpha_0,\alpha_1,\dots,\alpha_{15}$ and $N=4$ fermions occupying the states $\alpha_3$, $\alpha_6$, $\alpha_{10}$ and $\alpha_{13}$
# we could write this Slater determinant as
# $$
# \Phi_{\Lambda} = a_{\alpha_3}^{\dagger} a_{\alpha_6}^{\dagger} a_{\alpha_{10}}^{\dagger} a_{\alpha_{13}}^{\dagger} |0\rangle.
# $$
# The unoccupied single-particle states have bit value $0$ while the occupied ones are represented by bit state $1$.
# In the binary notation we would write this 16 bits long integer as
# $$
# \begin{array}{cccccccccccccccc}
# {\alpha_0}&{\alpha_1}&{\alpha_2}&{\alpha_3}&{\alpha_4}&{\alpha_5}&{\alpha_6}&{\alpha_7} & {\alpha_8} &{\alpha_9} & {\alpha_{10}} &{\alpha_{11}} &{\alpha_{12}} &{\alpha_{13}} &{\alpha_{14}} & {\alpha_{15}} \\
# {0} & {0} &{0} &{1} &{0} &{0} &{1} &{0} &{0} &{0} &{1} &{0} &{0} &{1} &{0} & {0} \\
# \end{array}
# $$
# which translates into the decimal number
# $$
# 2^3+2^6+2^{10}+2^{13}=9288.
# $$
# We can thus encode a Slater determinant as a bit pattern.
#
#
#
#
#
# ## Operators in second quantization
# With $N$ particles that can be distributed over $n$ single-particle states, the total number of Slater determinats (and defining thereby the dimensionality of the system) is
# $$
# \mathrm{dim}(\mathcal{H}) = \left(\begin{array}{c} n \\N\end{array}\right).
# $$
# The total number of bit patterns is $2^n$.
#
#
#
#
# ## Operators in second quantization
# We assume again that we have at our disposal $n$ different single-particle orbits
# $\alpha_0,\alpha_2,\dots,\alpha_{n-1}$ and that we can distribute among these orbits $N\le n$ particles.
# The ordering among these states is important as it defines the order of the creation operators.
# We will write the determinant
# $$
# \Phi_{\Lambda} = a_{\alpha_3}^{\dagger} a_{\alpha_6}^{\dagger} a_{\alpha_{10}}^{\dagger} a_{\alpha_{13}}^{\dagger} |0\rangle,
# $$
# in a more compact way as
# $$
# \Phi_{3,6,10,13} = |0001001000100100\rangle.
# $$
# The action of a creation operator is thus
# $$
# a^{\dagger}_{\alpha_4}\Phi_{3,6,10,13} = a^{\dagger}_{\alpha_4}|0001001000100100\rangle=a^{\dagger}_{\alpha_4}a_{\alpha_3}^{\dagger} a_{\alpha_6}^{\dagger} a_{\alpha_{10}}^{\dagger} a_{\alpha_{13}}^{\dagger} |0\rangle,
# $$
# which becomes
# $$
# -a_{\alpha_3}^{\dagger} a^{\dagger}_{\alpha_4} a_{\alpha_6}^{\dagger} a_{\alpha_{10}}^{\dagger} a_{\alpha_{13}}^{\dagger} |0\rangle=-|0001101000100100\rangle.
# $$
# ## Operators in second quantization
# Similarly
# $$
# a^{\dagger}_{\alpha_6}\Phi_{3,6,10,13} = a^{\dagger}_{\alpha_6}|0001001000100100\rangle=a^{\dagger}_{\alpha_6}a_{\alpha_3}^{\dagger} a_{\alpha_6}^{\dagger} a_{\alpha_{10}}^{\dagger} a_{\alpha_{13}}^{\dagger} |0\rangle,
# $$
# which becomes
# $$
# -a^{\dagger}_{\alpha_4} (a_{\alpha_6}^{\dagger})^ 2 a_{\alpha_{10}}^{\dagger} a_{\alpha_{13}}^{\dagger} |0\rangle=0!
# $$
# This gives a simple recipe:
# * If one of the bits $b_j$ is $1$ and we act with a creation operator on this bit, we return a null vector
#
# * If $b_j=0$, we set it to $1$ and return a sign factor $(-1)^l$, where $l$ is the number of bits set before bit $j$.
#
#
#
#
#
# ## Operators in second quantization
# Consider the action of $a^{\dagger}_{\alpha_2}$ on various slater determinants:
# $$
# \begin{array}{ccc}
# a^{\dagger}_{\alpha_2}\Phi_{00111}& = a^{\dagger}_{\alpha_2}|00111\rangle&=0\times |00111\rangle\\
# a^{\dagger}_{\alpha_2}\Phi_{01011}& = a^{\dagger}_{\alpha_2}|01011\rangle&=(-1)\times |01111\rangle\\
# a^{\dagger}_{\alpha_2}\Phi_{01101}& = a^{\dagger}_{\alpha_2}|01101\rangle&=0\times |01101\rangle\\
# a^{\dagger}_{\alpha_2}\Phi_{01110}& = a^{\dagger}_{\alpha_2}|01110\rangle&=0\times |01110\rangle\\
# a^{\dagger}_{\alpha_2}\Phi_{10011}& = a^{\dagger}_{\alpha_2}|10011\rangle&=(-1)\times |10111\rangle\\
# a^{\dagger}_{\alpha_2}\Phi_{10101}& = a^{\dagger}_{\alpha_2}|10101\rangle&=0\times |10101\rangle\\
# a^{\dagger}_{\alpha_2}\Phi_{10110}& = a^{\dagger}_{\alpha_2}|10110\rangle&=0\times |10110\rangle\\
# a^{\dagger}_{\alpha_2}\Phi_{11001}& = a^{\dagger}_{\alpha_2}|11001\rangle&=(+1)\times |11101\rangle\\
# a^{\dagger}_{\alpha_2}\Phi_{11010}& = a^{\dagger}_{\alpha_2}|11010\rangle&=(+1)\times |11110\rangle\\
# \end{array}
# $$
# What is the simplest way to obtain the phase when we act with one annihilation(creation) operator
# on the given Slater determinant representation?
#
#
#
#
#
# ## Operators in second quantization
# We have an SD representation
# $$
# \Phi_{\Lambda} = a_{\alpha_0}^{\dagger} a_{\alpha_3}^{\dagger} a_{\alpha_6}^{\dagger} a_{\alpha_{10}}^{\dagger} a_{\alpha_{13}}^{\dagger} |0\rangle,
# $$
# in a more compact way as
# $$
# \Phi_{0,3,6,10,13} = |1001001000100100\rangle.
# $$
# The action of
# $$
# a^{\dagger}_{\alpha_4}a_{\alpha_0}\Phi_{0,3,6,10,13} = a^{\dagger}_{\alpha_4}|0001001000100100\rangle=a^{\dagger}_{\alpha_4}a_{\alpha_3}^{\dagger} a_{\alpha_6}^{\dagger} a_{\alpha_{10}}^{\dagger} a_{\alpha_{13}}^{\dagger} |0\rangle,
# $$
# which becomes
# $$
# -a_{\alpha_3}^{\dagger} a^{\dagger}_{\alpha_4} a_{\alpha_6}^{\dagger} a_{\alpha_{10}}^{\dagger} a_{\alpha_{13}}^{\dagger} |0\rangle=-|0001101000100100\rangle.
# $$
# ## Operators in second quantization
# The action
# $$
# a_{\alpha_0}\Phi_{0,3,6,10,13} = |0001001000100100\rangle,
# $$
# can be obtained by subtracting the logical sum (AND operation) of $\Phi_{0,3,6,10,13}$ and
# a word which represents only $\alpha_0$, that is
# $$
# |1000000000000000\rangle,
# $$
# from $\Phi_{0,3,6,10,13}= |1001001000100100\rangle$.
#
# This operation gives $|0001001000100100\rangle$.
#
# Similarly, we can form $a^{\dagger}_{\alpha_4}a_{\alpha_0}\Phi_{0,3,6,10,13}$, say, by adding
# $|0000100000000000\rangle$ to $a_{\alpha_0}\Phi_{0,3,6,10,13}$, first checking that their logical sum
# is zero in order to make sure that the state $\alpha_4$ is not already occupied.
#
#
#
#
#
#
#
# ## Operators in second quantization
# It is trickier however to get the phase $(-1)^l$.
# One possibility is as follows
# * Let $S_1$ be a word that represents the 1-bit to be removed and all others set to zero.
#
# In the previous example $S_1=|1000000000000000\rangle$
# * Define $S_2$ as the similar word that represents the bit to be added, that is in our case
#
# $S_2=|0000100000000000\rangle$.
# * Compute then $S=S_1-S_2$, which here becomes
# $$
# S=|0111000000000000\rangle
# $$
# * Perform then the logical AND operation of $S$ with the word containing
# $$
# \Phi_{0,3,6,10,13} = |1001001000100100\rangle,
# $$
# which results in $|0001000000000000\rangle$. Counting the number of 1-bits gives the phase. Here you need however an algorithm for bitcounting.
#
#
#
#
#
#
#
#
#
# ## Bit counting
#
#
# We include here a python program which may aid in this direction. It uses bit manipulation functions from <http://wiki.python.org/moin/BitManipulation>.
# + editable=true
import math
"""
A simple Python class for Slater determinant manipulation
Bit-manipulation stolen from:
http://wiki.python.org/moin/BitManipulation
"""
# bitCount() counts the number of bits set (not an optimal function)
def bitCount(int_type):
""" Count bits set in integer """
count = 0
while(int_type):
int_type &= int_type - 1
count += 1
return(count)
# testBit() returns a nonzero result, 2**offset, if the bit at 'offset' is one.
def testBit(int_type, offset):
mask = 1 << offset
return(int_type & mask) >> offset
# setBit() returns an integer with the bit at 'offset' set to 1.
def setBit(int_type, offset):
mask = 1 << offset
return(int_type | mask)
# clearBit() returns an integer with the bit at 'offset' cleared.
def clearBit(int_type, offset):
mask = ~(1 << offset)
return(int_type & mask)
# toggleBit() returns an integer with the bit at 'offset' inverted, 0 -> 1 and 1 -> 0.
def toggleBit(int_type, offset):
mask = 1 << offset
return(int_type ^ mask)
# binary string made from number
def bin0(s):
return str(s) if s<=1 else bin0(s>>1) + str(s&1)
def bin(s, L = 0):
ss = bin0(s)
if L > 0:
return '0'*(L-len(ss)) + ss
else:
return ss
class Slater:
""" Class for Slater determinants """
def __init__(self):
self.word = int(0)
def create(self, j):
print "c^+_" + str(j) + " |" + bin(self.word) + "> = ",
# Assume bit j is set, then we return zero.
s = 0
# Check if bit j is set.
isset = testBit(self.word, j)
if isset == 0:
bits = bitCount(self.word & ((1<<j)-1))
s = pow(-1, bits)
self.word = setBit(self.word, j)
print str(s) + " x |" + bin(self.word) + ">"
return s
def annihilate(self, j):
print "c_" + str(j) + " |" + bin(self.word) + "> = ",
# Assume bit j is not set, then we return zero.
s = 0
# Check if bit j is set.
isset = testBit(self.word, j)
if isset == 1:
bits = bitCount(self.word & ((1<<j)-1))
s = pow(-1, bits)
self.word = clearBit(self.word, j)
print str(s) + " x |" + bin(self.word) + ">"
return s
# Do some testing:
phi = Slater()
phi.create(0)
phi.create(1)
phi.create(2)
phi.create(3)
print
s = phi.annihilate(2)
s = phi.create(7)
s = phi.annihilate(0)
s = phi.create(200)
# -
# ## Eigenvalue problems, basic definitions
# Let us consider the matrix $\mathbf{A}$ of dimension $n$. The eigenvalues of
# $\mathbf{A}$ are defined through the matrix equation
# $$
# \mathbf{A}\mathbf{x}^{(\nu)} = \lambda^{(\nu)}\mathbf{x}^{(\nu)},
# $$
# where $\lambda^{(\nu)}$ are the eigenvalues and $\mathbf{x}^{(\nu)}$ the
# corresponding eigenvectors.
# Unless otherwise stated, when we use the wording eigenvector we mean the
# right eigenvector. The left eigenvalue problem is defined as
# $$
# \mathbf{x}^{(\nu)}_L\mathbf{A} = \lambda^{(\nu)}\mathbf{x}^{(\nu)}_L
# $$
# The above right eigenvector problem is equivalent to a set of $n$ equations with $n$ unknowns
# $x_i$.
#
#
#
#
#
# ## Eigenvalue problems, basic definitions
# The eigenvalue problem can be rewritten as
# $$
# \left( \mathbf{A}-\lambda^{(\nu)} \mathbf{I} \right) \mathbf{x}^{(\nu)} = 0,
# $$
# with $\mathbf{I}$ being the unity matrix. This equation provides
# a solution to the problem if and only if the determinant
# is zero, namely
# $$
# \left| \mathbf{A}-\lambda^{(\nu)}\mathbf{I}\right| = 0,
# $$
# which in turn means that the determinant is a polynomial
# of degree $n$ in $\lambda$ and in general we will have
# $n$ distinct zeros.
#
#
#
#
#
# ## Eigenvalue problems, basic definitions
# The eigenvalues of a matrix
# $\mathbf{A}\in {\mathbb{C}}^{n\times n}$
# are thus the $n$ roots of its characteristic polynomial
# $$
# P(\lambda) = det(\lambda\mathbf{I}-\mathbf{A}),
# $$
# or
# $$
# P(\lambda)= \prod_{i=1}^{n}\left(\lambda_i-\lambda\right).
# $$
# The set of these roots is called the spectrum and is denoted as
# $\lambda(\mathbf{A})$.
# If $\lambda(\mathbf{A})=\left\{\lambda_1,\lambda_2,\dots ,\lambda_n\right\}$ then we have
# $$
# det(\mathbf{A})= \lambda_1\lambda_2\dots\lambda_n,
# $$
# and if we define the trace of $\mathbf{A}$ as
# $$
# Tr(\mathbf{A})=\sum_{i=1}^n a_{ii}
# $$
# then
# $$
# Tr(\mathbf{A})=\lambda_1+\lambda_2+\dots+\lambda_n.
# $$
# ## Abel-Ruffini Impossibility Theorem
# The *Abel-Ruffini* theorem (also known as Abel's impossibility theorem)
# states that there is no general solution in radicals to polynomial equations of degree five or higher.
#
# The content of this theorem is frequently misunderstood. It does not assert that higher-degree polynomial equations are unsolvable.
# In fact, if the polynomial has real or complex coefficients, and we allow complex solutions, then every polynomial equation has solutions; this is the fundamental theorem of algebra. Although these solutions cannot always be computed exactly with radicals, they can be computed to any desired degree of accuracy using numerical methods such as the Newton-Raphson method or Laguerre method, and in this way they are no different from solutions to polynomial equations of the second, third, or fourth degrees.
#
# The theorem only concerns the form that such a solution must take. The content of the theorem is
# that the solution of a higher-degree equation cannot in all cases be expressed in terms of the polynomial coefficients with a finite number of operations of addition, subtraction, multiplication, division and root extraction. Some polynomials of arbitrary degree, of which the simplest nontrivial example is the monomial equation $ax^n = b$, are always solvable with a radical.
#
#
#
#
#
# ## Abel-Ruffini Impossibility Theorem
#
# The *Abel-Ruffini* theorem says that there are some fifth-degree equations whose solution cannot be so expressed.
# The equation $x^5 - x + 1 = 0$ is an example. Some other fifth degree equations can be solved by radicals,
# for example $x^5 - x^4 - x + 1 = 0$. The precise criterion that distinguishes between those equations that can be solved
# by radicals and those that cannot was given by Galois and is now part of Galois theory:
# a polynomial equation can be solved by radicals if and only if its Galois group is a solvable group.
#
# Today, in the modern algebraic context, we say that second, third and fourth degree polynomial
# equations can always be solved by radicals because the symmetric groups $S_2, S_3$ and $S_4$ are solvable groups,
# whereas $S_n$ is not solvable for $n \ge 5$.
#
#
#
#
#
# ## Eigenvalue problems, basic definitions
# In the present discussion we assume that our matrix is real and symmetric, that is
# $\mathbf{A}\in {\mathbb{R}}^{n\times n}$.
# The matrix $\mathbf{A}$ has $n$ eigenvalues
# $\lambda_1\dots \lambda_n$ (distinct or not). Let $\mathbf{D}$ be the
# diagonal matrix with the eigenvalues on the diagonal
# $$
# \mathbf{D}= \left( \begin{array}{ccccccc} \lambda_1 & 0 & 0 & 0 & \dots &0 & 0 \\
# 0 & \lambda_2 & 0 & 0 & \dots &0 &0 \\
# 0 & 0 & \lambda_3 & 0 &0 &\dots & 0\\
# \dots & \dots & \dots & \dots &\dots &\dots & \dots\\
# 0 & \dots & \dots & \dots &\dots &\lambda_{n-1} & \\
# 0 & \dots & \dots & \dots &\dots &0 & \lambda_n
# \end{array} \right).
# $$
# If $\mathbf{A}$ is real and symmetric then there exists a real orthogonal matrix $\mathbf{S}$ such that
# $$
# \mathbf{S}^T \mathbf{A}\mathbf{S}= \mathrm{diag}(\lambda_1,\lambda_2,\dots ,\lambda_n),
# $$
# and for $j=1:n$ we have $\mathbf{A}\mathbf{S}(:,j) = \lambda_j \mathbf{S}(:,j)$.
#
#
#
#
#
# ## Eigenvalue problems, basic definitions
# To obtain the eigenvalues of $\mathbf{A}\in {\mathbb{R}}^{n\times n}$,
# the strategy is to
# perform a series of similarity transformations on the original
# matrix $\mathbf{A}$, in order to reduce it either into a diagonal form as above
# or into a tridiagonal form.
#
# We say that a matrix $\mathbf{B}$ is a similarity
# transform of $\mathbf{A}$ if
# $$
# \mathbf{B}= \mathbf{S}^T \mathbf{A}\mathbf{S}, \hspace{1cm} \mathrm{where} \hspace{1cm} \mathbf{S}^T\mathbf{S}=\mathbf{S}^{-1}\mathbf{S} =\mathbf{I}.
# $$
# The importance of a similarity transformation lies in the fact that
# the resulting matrix has the same
# eigenvalues, but the eigenvectors are in general different.
#
#
#
#
#
# ## Eigenvalue problems, basic definitions
# To prove this we
# start with the eigenvalue problem and a similarity transformed matrix $\mathbf{B}$.
# $$
# \mathbf{A}\mathbf{x}=\lambda\mathbf{x} \hspace{1cm} \mathrm{and}\hspace{1cm}
# \mathbf{B}= \mathbf{S}^T \mathbf{A}\mathbf{S}.
# $$
# We multiply the first equation on the left by $\mathbf{S}^T$ and insert
# $\mathbf{S}^{T}\mathbf{S} = \mathbf{I}$ between $\mathbf{A}$ and $\mathbf{x}$. Then we get
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# (\mathbf{S}^T\mathbf{A}\mathbf{S})(\mathbf{S}^T\mathbf{x})=\lambda\mathbf{S}^T\mathbf{x} ,
# \label{_auto2} \tag{3}
# \end{equation}
# $$
# which is the same as
# $$
# \mathbf{B} \left ( \mathbf{S}^T\mathbf{x} \right ) = \lambda \left (\mathbf{S}^T\mathbf{x}\right ).
# $$
# The variable $\lambda$ is an eigenvalue of $\mathbf{B}$ as well, but with
# eigenvector $\mathbf{S}^T\mathbf{x}$.
#
#
#
#
#
# ## Eigenvalue problems, basic definitions
# The basic philosophy is to
# * Either apply subsequent similarity transformations (direct method) so that
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# \mathbf{S}_N^T\dots \mathbf{S}_1^T\mathbf{A}\mathbf{S}_1\dots \mathbf{S}_N=\mathbf{D} ,
# \label{_auto3} \tag{4}
# \end{equation}
# $$
# * Or apply subsequent similarity transformations so that $\mathbf{A}$ becomes tridiagonal (Householder) or upper/lower triangular (the *QR* method to be discussed later).
#
# * Thereafter, techniques for obtaining eigenvalues from tridiagonal matrices can be used.
#
# * Or use so-called power methods
#
# * Or use iterative methods (Krylov, Lanczos, Arnoldi). These methods are popular for huge matrix problems.
#
#
#
#
#
#
# ## Discussion of methods for eigenvalues
# **The general overview.**
#
#
# One speaks normally of two main approaches to solving the eigenvalue problem.
# * The first is the formal method, involving determinants and the characteristic polynomial. This proves how many eigenvalues there are, and is the way most of you learned about how to solve the eigenvalue problem, but for matrices of dimensions greater than 2 or 3, it is rather impractical.
#
# * The other general approach is to use similarity or unitary tranformations to reduce a matrix to diagonal form. This is normally done in two steps: first reduce to for example a *tridiagonal* form, and then to diagonal form. The main algorithms we will discuss in detail, Jacobi's and Householder's (so-called direct method) and Lanczos algorithms (an iterative method), follow this methodology.
#
#
#
#
#
# ## Eigenvalues methods
# Direct or non-iterative methods require for matrices of dimensionality $n\times n$ typically $O(n^3)$ operations. These methods are normally called standard methods and are used for dimensionalities
# $n \sim 10^5$ or smaller. A brief historical overview
#
# <table border="1">
# <thead>
# <tr><th align="center"> Year </th> <th align="center"> $n$ </th> <th align="center"> </th> </tr>
# </thead>
# <tbody>
# <tr><td align="center"> 1950 </td> <td align="center"> $n=20$ </td> <td align="center"> (Wilkinson) </td> </tr>
# <tr><td align="center"> 1965 </td> <td align="center"> $n=200$ </td> <td align="center"> (Forsythe et al.) </td> </tr>
# <tr><td align="center"> 1980 </td> <td align="center"> $n=2000$ </td> <td align="center"> Linpack </td> </tr>
# <tr><td align="center"> 1995 </td> <td align="center"> $n=20000$ </td> <td align="center"> Lapack </td> </tr>
# <tr><td align="center"> This decade </td> <td align="center"> $n\sim 10^5$ </td> <td align="center"> Lapack </td> </tr>
# </tbody>
# </table>
# shows that in the course of 60 years the dimension that direct diagonalization methods can handle has increased by almost a factor of
# $10^4$ (note this is for serial versions). However, it pales beside the progress achieved by computer hardware, from flops to petaflops, a factor of almost $10^{15}$. We see clearly played out in history the $O(n^3)$ bottleneck of direct matrix algorithms.
#
# Sloppily speaking, when $n\sim 10^4$ is cubed we have $O(10^{12})$ operations, which is smaller than the $10^{15}$ increase in flops.
#
#
#
#
#
# ## Discussion of methods for eigenvalues
# If the matrix to diagonalize is large and sparse, direct methods simply become impractical,
# also because
# many of the direct methods tend to destroy sparsity. As a result large dense matrices may arise during the diagonalization procedure. The idea behind iterative methods is to project the
# $n-$dimensional problem in smaller spaces, so-called Krylov subspaces.
# Given a matrix $\mathbf{A}$ and a vector $\mathbf{v}$, the associated Krylov sequences of vectors
# (and thereby subspaces)
# $\mathbf{v}$, $\mathbf{A}\mathbf{v}$, $\mathbf{A}^2\mathbf{v}$, $\mathbf{A}^3\mathbf{v},\dots$, represent
# successively larger Krylov subspaces.
#
# <table border="1">
# <thead>
# <tr><th align="center"> Matrix </th> <th align="center">$\mathbf{A}\mathbf{x}=\mathbf{b}$</th> <th align="center">$\mathbf{A}\mathbf{x}=\lambda\mathbf{x}$</th> </tr>
# </thead>
# <tbody>
# <tr><td align="left"> $\mathbf{A}=\mathbf{A}^*$ </td> <td align="left"> Conjugate gradient </td> <td align="left"> Lanczos </td> </tr>
# <tr><td align="left"> $\mathbf{A}\ne \mathbf{A}^*$ </td> <td align="left"> GMRES etc </td> <td align="left"> Arnoldi </td> </tr>
# </tbody>
# </table>
#
#
#
#
#
# ## Eigenvalues and Lanczos' method
# Basic features with a real symmetric matrix (and normally huge $n> 10^6$ and sparse)
# $\hat{A}$ of dimension $n\times n$:
#
# * Lanczos' algorithm generates a sequence of real tridiagonal matrices $T_k$ of dimension $k\times k$ with $k\le n$, with the property that the extremal eigenvalues of $T_k$ are progressively better estimates of $\hat{A}$' extremal eigenvalues.* The method converges to the extremal eigenvalues.
#
# * The similarity transformation is
# $$
# \hat{T}= \hat{Q}^{T}\hat{A}\hat{Q},
# $$
# with the first vector $\hat{Q}\hat{e}_1=\hat{q}_1$.
#
# We are going to solve iteratively
# $$
# \hat{T}= \hat{Q}^{T}\hat{A}\hat{Q},
# $$
# with the first vector $\hat{Q}\hat{e}_1=\hat{q}_1$.
# We can write out the matrix $\hat{Q}$ in terms of its column vectors
# $$
# \hat{Q}=\left[\hat{q}_1\hat{q}_2\dots\hat{q}_n\right].
# $$
# ## Eigenvalues and Lanczos' method, tridiagonal matrix
# The matrix
# $$
# \hat{T}= \hat{Q}^{T}\hat{A}\hat{Q},
# $$
# can be written as
# $$
# \hat{T} = \left(\begin{array}{cccccc}
# \alpha_1& \beta_1 & 0 &\dots & \dots &0 \\
# \beta_1 & \alpha_2 & \beta_2 &0 &\dots &0 \\
# 0& \beta_2 & \alpha_3 & \beta_3 & \dots &0 \\
# \dots& \dots & \dots &\dots &\dots & 0 \\
# \dots& & &\beta_{n-2} &\alpha_{n-1}& \beta_{n-1} \\
# 0& \dots &\dots &0 &\beta_{n-1} & \alpha_{n} \\
# \end{array} \right)
# $$
# ## Eigenvalues and Lanczos' method, tridiagonal and orthogonal matrices
# Using the fact that
# $$
# \hat{Q}\hat{Q}^T=\hat{I},
# $$
# we can rewrite
# $$
# \hat{T}= \hat{Q}^{T}\hat{A}\hat{Q},
# $$
# as
# $$
# \hat{Q}\hat{T}= \hat{A}\hat{Q}.
# $$
# ## Eigenvalues and Lanczos' method
# If we equate columns
# $$
# \hat{T} = \left(\begin{array}{cccccc}
# \alpha_1& \beta_1 & 0 &\dots & \dots &0 \\
# \beta_1 & \alpha_2 & \beta_2 &0 &\dots &0 \\
# 0& \beta_2 & \alpha_3 & \beta_3 & \dots &0 \\
# \dots& \dots & \dots &\dots &\dots & 0 \\
# \dots& & &\beta_{n-2} &\alpha_{n-1}& \beta_{n-1} \\
# 0& \dots &\dots &0 &\beta_{n-1} & \alpha_{n} \\
# \end{array} \right)
# $$
# we obtain
# $$
# \hat{A}\hat{q}_k=\beta_{k-1}\hat{q}_{k-1}+\alpha_k\hat{q}_k+\beta_k\hat{q}_{k+1}.
# $$
# ## Eigenvalues and Lanczos' method, defining the Lanczos' vectors
# We have thus
# $$
# \hat{A}\hat{q}_k=\beta_{k-1}\hat{q}_{k-1}+\alpha_k\hat{q}_k+\beta_k\hat{q}_{k+1},
# $$
# with $\beta_0\hat{q}_0=0$ for $k=1:n-1$. Remember that the vectors $\hat{q}_k$ are orthornormal and this implies
# $$
# \alpha_k=\hat{q}_k^T\hat{A}\hat{q}_k,
# $$
# and these vectors are called Lanczos vectors.
#
#
#
#
# ## Eigenvalues and Lanczos' method, basic steps
# We have thus
# $$
# \hat{A}\hat{q}_k=\beta_{k-1}\hat{q}_{k-1}+\alpha_k\hat{q}_k+\beta_k\hat{q}_{k+1},
# $$
# with $\beta_0\hat{q}_0=0$ for $k=1:n-1$ and
# $$
# \alpha_k=\hat{q}_k^T\hat{A}\hat{q}_k.
# $$
# If
# $$
# \hat{r}_k=(\hat{A}-\alpha_k\hat{I})\hat{q}_k-\beta_{k-1}\hat{q}_{k-1},
# $$
# is non-zero, then
# $$
# \hat{q}_{k+1}=\hat{r}_{k}/\beta_k,
# $$
# with $\beta_k=\pm ||\hat{r}_{k}||_2$.
| doc/LectureNotes/fcitheory.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="LQrfanTv0HcB"
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
# + id="Dz1pWIVW2sdz"
results = {}
# + id="GO_saAT60L__"
results['Green'] = {'lux':0.9216,
'target_wavelengths':{
'415':{
'color_name':'Violet',
'color_intensity':0,
},
'445':{
'color_name':'Indigo',
'color_intensity':0,
},
'480':{
'color_name':'Blue',
'color_intensity':6,
},
'515':{
'color_name':'Cyan',
'color_intensity':28,
},
'555':{
'color_name':'Green',
'color_intensity':25,
},
'590':{
'color_name':'Yellow',
'color_intensity':8,
},
'630':{
'color_name':'Orange',
'color_intensity':2,
},
'680':{
'color_name':'Red',
'color_intensity':0,
},
},
'special_measurements':{
'Clear':{
'color_name':'Clear',
'color_intensity':38,
},
'NIR':{
'color_name':'Near-IR',
'color_intensity':0,
},
}
}
# + id="2KRculDg27RP"
results['Ice Blue'] = {'lux':0.4032,
'target_wavelengths':{
'415':{
'color_name':'Violet',
'color_intensity':2,
},
'445':{
'color_name':'Indigo',
'color_intensity':12,
},
'480':{
'color_name':'Blue',
'color_intensity':18,
},
'515':{
'color_name':'Cyan',
'color_intensity':10,
},
'555':{
'color_name':'Green',
'color_intensity':2,
},
'590':{
'color_name':'Yellow',
'color_intensity':0,
},
'630':{
'color_name':'Orange',
'color_intensity':0,
},
'680':{
'color_name':'Red',
'color_intensity':0,
},
},
'special_measurements':{
'Clear':{
'color_name':'Clear',
'color_intensity':29,
},
'NIR':{
'color_name':'Near-IR',
'color_intensity':0,
},
}
}
# + id="1YY6s4V33KOk"
results['White'] = {'lux':0.1296,
'target_wavelengths':{
'415':{
'color_name':'Violet',
'color_intensity':0,
},
'445':{
'color_name':'Indigo',
'color_intensity':2,
},
'480':{
'color_name':'Blue',
'color_intensity':2,
},
'515':{
'color_name':'Cyan',
'color_intensity':2,
},
'555':{
'color_name':'Green',
'color_intensity':5,
},
'590':{
'color_name':'Yellow',
'color_intensity':4,
},
'630':{
'color_name':'Orange',
'color_intensity':2,
},
'680':{
'color_name':'Red',
'color_intensity':0,
},
},
'special_measurements':{
'Clear':{
'color_name':'Clear',
'color_intensity':13,
},
'NIR':{
'color_name':'Near-IR',
'color_intensity':0,
},
}
}
# + id="bUc1_4luE9E9"
results['YellowGreen'] = {'lux':0.6048,
'target_wavelengths':{
'415':{
'color_name':'Violet',
'color_intensity':0,
},
'445':{
'color_name':'Indigo',
'color_intensity':0,
},
'480':{
'color_name':'Blue',
'color_intensity':0,
},
'515':{
'color_name':'Cyan',
'color_intensity':11,
},
'555':{
'color_name':'Green',
'color_intensity':27,
},
'590':{
'color_name':'Yellow',
'color_intensity':21,
},
'630':{
'color_name':'Orange',
'color_intensity':9,
},
'680':{
'color_name':'Red',
'color_intensity':1,
},
},
'special_measurements':{
'Clear':{
'color_name':'Clear',
'color_intensity':37,
},
'NIR':{
'color_name':'Near-IR',
'color_intensity':0,
},
}
}
# + colab={"base_uri": "https://localhost:8080/", "height": 441} id="BFMRhkYB6vMU" outputId="5c252ef5-ba9f-4bbc-e624-df50cb24af1d"
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(27, 6))
colors = ['White', 'Green', 'Ice Blue', 'YellowGreen']
for i in range(4):
y = []
x = []
color = []
target_color = colors[i]
for wavelength in results[target_color]['target_wavelengths']:
x.append(wavelength)
y.append(results[target_color]['target_wavelengths'][wavelength]['color_intensity'])
color.append(results[target_color]['target_wavelengths'][wavelength]['color_name'])
axes[i].set_ylabel('Intensity')
axes[i].set_xlabel('Wavelength')
axes[i].set_title(f'Spectrogram of a {target_color} Tritium Vial at {results[target_color]["lux"]} Lux')
axes[i].bar(x, y, color=color)
# + colab={"base_uri": "https://localhost:8080/", "height": 398} id="1FwIE__fvLIU" outputId="3becc86f-efcb-4c27-c214-96040511a955"
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=(27, 6))
colors = ['White', 'Green', 'Ice Blue', 'YellowGreen']
for i in range(4):
y = []
x = np.arange(300, 700, 1)
color = []
target_color = colors[i]
for wavelength in results[target_color]['target_wavelengths']:
y = norm.pdf(x,int(wavelength),10)
y = (y * 1) / np.max(y)
y = y * results[target_color]['target_wavelengths'][wavelength]['color_intensity']
axes[i].plot(x,y, color=results[target_color]['target_wavelengths'][wavelength]['color_name'])
#choose plot style and display the bell curve
plt.style.use('fivethirtyeight')
plt.show()
# + id="Ub2NQrmJvLK4"
x = np.arange(300, 700, 1)
y = norm.pdf(x,500,10)
# + colab={"base_uri": "https://localhost:8080/"} id="sQ4TQspBvLNf" outputId="3849074f-62f7-4f2c-8ba1-82b1035acffc"
np.max(y)
# + id="ZBImlcUwvLP3"
# + id="1CRJcN_fvLSC"
# + id="Jf5qddXcvLUJ"
# + id="RBlfv0hWvLYA"
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="5bVlMNE-2oOj" outputId="56fc35b4-410d-4d54-ed33-1e2fedc8333b"
y = []
x = []
color = []
target_color = 'Green'
for wavelength in results[target_color]['target_wavelengths']:
x.append(wavelength)
y.append(results[target_color]['target_wavelengths'][wavelength]['color_intensity'])
color.append(results[target_color]['target_wavelengths'][wavelength]['color_name'])
fig, ax = plt.subplots()
ax.set_ylabel('Intensity')
ax.set_xlabel('Wavelength')
ax.set_title(f'Spectrogram of a {target_color} Tritium Vial at {results[target_color]["lux"]} Lux')
ax.bar(x, y, color=color)
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="lmkzMf1j6GU_" outputId="2f65fc8a-a473-4784-8b8e-1d3e017d677f"
y = []
x = []
color = []
target_color = 'White'
for wavelength in results[target_color]['target_wavelengths']:
x.append(wavelength)
y.append(results[target_color]['target_wavelengths'][wavelength]['color_intensity'])
color.append(results[target_color]['target_wavelengths'][wavelength]['color_name'])
fig, ax = plt.subplots()
ax.set_ylabel('Intensity')
ax.set_xlabel('Wavelength')
ax.set_title(f'Spectrogram of a {target_color} Tritium Vial at {results[target_color]["lux"]} Lux')
ax.bar(x, y, color=color)
# + colab={"base_uri": "https://localhost:8080/", "height": 313} id="JkW11I9w6Gdg" outputId="78549178-9d85-426a-a5b2-e3f4fc0d5295"
y = []
x = []
color = []
target_color = 'Ice Blue'
for wavelength in results[target_color]['target_wavelengths']:
x.append(wavelength)
y.append(results[target_color]['target_wavelengths'][wavelength]['color_intensity'])
color.append(results[target_color]['target_wavelengths'][wavelength]['color_name'])
fig, ax = plt.subplots()
ax.set_ylabel('Intensity')
ax.set_xlabel('Wavelength')
ax.set_title(f'Spectrogram of a {target_color} Tritium Vial at {results[target_color]["lux"]} Lux')
ax.bar(x, y, color=color)
# + colab={"base_uri": "https://localhost:8080/"} id="0sM1U0oW5AeQ" outputId="b4e6e5dd-463a-4165-94fa-d7aca40670db"
color
# + id="2iIRT6_84Jwo" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="a0db7b5c-6175-4de3-fd74-41ba9d034de2"
# importing modules
import numpy
from matplotlib import pyplot
# assigning time values of the signal
# initial time period, final time period and phase angle
signalTime = numpy.arange(415, 681, 1);
# getting the amplitude of the signal
signalAmplitude = numpy.sin(signalTime)
# plotting the magnitude spectrum of the signal
pyplot.magnitude_spectrum(signalAmplitude, color ='green')
pyplot.title("Magnitude Spectrum of the Signal")
pyplot.show()
# + colab={"base_uri": "https://localhost:8080/"} id="aQ7T9t4Csw69" outputId="4a9fd8c6-ecf8-436d-c1f4-5abcb9c9f981"
signalTime
# + colab={"base_uri": "https://localhost:8080/", "height": 398} id="ezzDIDNWuOPg" outputId="27daa5f1-9614-4d88-8218-30e23a1cf396"
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
#create range of x-values from -4 to 4 in increments of .001
x = np.arange(415, 681, 1)
#create range of y-values that correspond to normal pdf with mean=0 and sd=1
y = norm.pdf(x,500,10)
#define plot
fig, ax = plt.subplots(figsize=(9,6))
ax.plot(x,y)
#choose plot style and display the bell curve
plt.style.use('fivethirtyeight')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="r41h9ygBukFB" outputId="f744c2e6-8301-4f95-aebd-90c34474fada"
y
# + id="KJLGkhd2vHXA"
| measurement_techniques/analiyze_measurements/spectrogram_v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# +
import sys
sys.path.append('../')
sys.path.append('../support/')
sys.path.append('../lung_segmentation/')
from preprocessing import *
from ct_reader import *
import pandas as pd
from os.path import join, basename, isfile
from scipy.ndimage.interpolation import zoom
from glob import glob
from multiprocessing import Pool
from scipy.ndimage import morphology
from scipy.ndimage import label
from skimage import measure
from tqdm import tqdm
from multiprocessing import Pool
import pickle
# %pylab inline
from paths import *
from skimage.transform import resize
import warnings
import seaborn as sns
sns.set_style('ticks')
warnings.filterwarnings('ignore')
# -
def read_ct(path, ret_xy_spacing=False, ret_original_format=True):
patient = read_ct_scan(path)
image = get_pixels_hu(patient)
# image[image == image[0,0,0]] = 0
if ret_original_format:
return image, patient
if ret_xy_spacing:
return image, patient.GetSpacing()[0]
return image
def region_growing(img, seed, maxthr, structure=None):
"""code was tekan from:
https://github.com/loli/medpy/wiki/Basic-image-manipulation
"""
thrimg = img < maxthr
lmap, _ = label(thrimg, structure=structure)
lids = unique(lmap[seed])
region = zeros(img.shape, numpy.bool)
for lid in lids:
region |= lmap == lid
return region
def segment_nodules(patch, mask, is_nodule=True, magic_const=50):
prepared = (patch - patch.min()) / (patch.max() - patch.min())
kmeans = KMeans(n_clusters=2)
if IS_NODULE:
coords = where(mask == 2)
else:
coords = where(mask >= 0)
data = prepared[coords]
if data.shape[0] <= 2:
return mask
data = kmeans.fit_predict(expand_dims(data, 1))
kmean = zeros(mask.shape)
kmean[coords] = data + magic_const
labels, num = label(kmean, return_num=True, background=0)
nodule_a = argmax([sum(labels == i) for i in range(1, num + 1)]) + 1
init = kmeans.predict(expand_dims(prepared[labels == nodule_a], 1)).min()
nodule_b = list()
for i in range(1, num + 1):
if i != nodule_a:
if kmeans.predict(expand_dims(prepared[where(labels == i)], 1)).min() != init:
nodule_b.append((sum(labels == i), i))
nodule_b = max(nodule_b)[1]
A = prepared[labels == nodule_a]
B = prepared[labels == nodule_b]
if mean(A.reshape(-1)) > mean(B.reshape(-1)):
labels = labels == nodule_a
else:
labels = labels == nodule_b
return labels
BORDER = 32
SPACING = array([.9, .7, .7])
CPU = 24
candidates = pd.read_csv(join(PATH['LUNA_CSV'], 'candidates.csv'))
annotations = pd.read_csv(join(PATH['LUNA_CSV'], 'annotations.csv'))
candidates.head()
test = load(join(PATH['WEIGHTS'], 'test.npy'))
valid = load(join(PATH['WEIGHTS'], 'valid.npy'))
train = load(join(PATH['WEIGHTS'], 'train.npy'))
# +
def get_remind_files():
file_list = set(path for path in candidates.seriesuid)
# file_list = file_list.difference([basename(path).split('.npy')[0][:64]
# for path in glob(join(PATH['LUNA_VESSELS'], 'subset*', '*.npy'))]
# + [basename(path).split('.npy')[0][:64]
# for path in glob(join(PATH['LUNA_NODULES'], 'subset*', '*.npy'))])
file_list = [[(join(PATH['LUNA_DATA'], 'subset' + str(i), base_name + '.mhd'), i)
for i in range(10)
if isfile(join(PATH['LUNA_DATA'], 'subset' + str(i), base_name + '.mhd'))]
for base_name in file_list]
folders = [path[0][1] for path in file_list]
file_list = [path[0][0] for path in file_list]
return file_list, folders
# -
def overlap(lung, mask):
# iso = binary_dilation(imresize(isolated[163], (512, 512)))
labeled, num = label(mask)
coords = list()
for colour in range(1, labeled.max() + 1):
coords.append(where(labeled == colour))
coords = array([[int(coord[0].mean() / SPACING[0])
for coord in coords],
[int(coord[1].mean() / SPACING[1])
for coord in coords],
[int(coord[2].mean() / SPACING[2])
for coord in coords]])
lung = pad(lung,
((BORDER, BORDER),
(BORDER, BORDER),
(BORDER, BORDER)),
mode='edge')
patches = list()
for coord in coords.T:
patch = lung[coord[0]: coord[0] + 2 * BORDER,
coord[1]: coord[1] + 2 * BORDER,
coord[2]: coord[2] + 2 * BORDER]
patches.append(patch)
return patches, coords
# +
def operate(path, upsides):
lung, ct_lung = read_ct(path, ret_original_format=True)
lung, spacing = resample(lung, ct_lung, SPACING)
name = basename(path)
mask = load(join(PATH['DATA_ENHANCED'],
name + '.npy'))
# mask, spacing = resample(mask, (1, 1, 1), SPACING)
incorrects = list()
if name in upsides:
lung = flipud(lung)
mask = flipud(mask)
incorrects.append(-1)
batch, coords = overlap(lung, mask)
for patch, coord in zip(batch, coords.T):
if patch.shape != (2 * BORDER,
2 * BORDER,
2 * BORDER):
incorrects.append((path, coord))
continue
save(join(PATH['ENHANCED_CROPPED'],
name + '_'.join([str(coord[0]),
str(coord[1]),
str(coord[2])])),
patch.astype(int16))
return incorrects
# -
def get_remind_files():
file_list = set(glob(join(PATH['DATA'], '*')))
file_list = file_list.difference(set([join(PATH['DATA'], basename(path).split('.npy')[0][:32])
for path in glob(join(PATH['ENHANCED_CROPPED'], '*.npy'))]))
return sorted(list(file_list))
def overlap(lung, name, candidates, origin, spacing=SPACING):
nodules = candidates[candidates.seriesuid == name]
lung = pad(lung,
((BORDER, BORDER),
(BORDER, BORDER),
(BORDER, BORDER)),
mode='edge')
patches = list()
cancer = list()
rows = list()
for i, row in nodules.iterrows():
X = ceil((row.coordX - origin[0]) / spacing[1] + BORDER)
Y = ceil((row.coordY - origin[1]) / spacing[2] + BORDER)
Z = ceil((row.coordZ - origin[2]) / spacing[0] + BORDER)
patches.append(lung[Z - BORDER: Z + BORDER,
Y - BORDER: Y + BORDER,
X - BORDER: X + BORDER])
rows.append([row])
cancer.append(row['class'])
return patches, cancer, rows
def operate(path_and_folder):
resampling = True
bad = list()
path, folder = path_and_folder
lung, ct_lung = read_ct(path, ret_original_format=True)
spacing = list(reversed(ct_lung.GetSpacing()))
if resampling:
lung, spacing = resample(lung, ct_lung, SPACING)
name = basename(path).split('.mhd')[0]
batch, cancers, rows = overlap(lung,
name,
candidates,
ct_lung.GetOrigin(),
spacing)
table = dict()
for i, patch, cancer, row in zip(arange(len(cancers)),
batch,
cancers,
rows):
if patch.shape != (2 * BORDER,
2 * BORDER,
2 * BORDER):
continue
fold = 'LUNA_VESSELS'
if cancer:
fold = 'LUNA_NODULES'
save(join(PATH[fold],
'subset' + str(folder),
name + str(i)), patch.astype(int16))
table[name + str(i)] = row
return table
# +
batch_size = 100
patches = list(zip(file_list, folders))
table = dict()
for counter in tqdm(range(len(patches) // batch_size + 1)):
batch_files = patches[batch_size * counter:
batch_size * (counter + 1)]
with Pool(CPU) as pool:
tables = pool.map(operate, batch_files)
for t in tables:
table.update(t)
pickle.dump(table,
open(join(PATH['WEIGHTS'],
'table_nodules'),
'wb'))
# -
def overlap(lung, name, candidates, origin, spacing=SPACING):
nodules = candidates[candidates.seriesuid == name]
mask = zeros(lung.shape)
for i, row in nodules.iterrows():
X = ceil((row.coordX - origin[0]) / spacing[1])
Y = ceil((row.coordY - origin[1]) / spacing[2])
Z = ceil((row.coordZ - origin[2]) / spacing[0])
mask[Z, Y, X] = 1 + 15 * row['class']
return mask
def operate(path):
lung, ct_lung = read_ct(path, ret_original_format=True)
spacing = list(reversed(ct_lung.GetSpacing()))
lung, spacing = resample(lung, ct_lung, SPACING)
name = basename(path).split('.mhd')[0]
mask = overlap(lung, name,
candidates,
ct_lung.GetOrigin(),
spacing)
return lung, mask
def select_nodules(some_z, some_x, some_y,
best_z,
tolerance = 3):
coords = {
'x': [],
'y': [],
'z': []
}
for z, x, y in zip(some_z, some_x, some_y):
if (z >= best_z) and (z <= best_z + tolerance):
coords['z'].append(z)
coords['x'].append(x)
coords['y'].append(y)
return coords['z'], coords['x'], coords['y']
def plot_suspicies(path):
lung, mask = operate(path)
lung = clip(lung, -1000, 400)
fp = mask.copy() == 1
tp = mask.copy() == 16
fp_z, fp_x, fp_y = where(fp)
tp_z, tp_x, tp_y = where(tp)
print(tp_z)
for best_z in tp_z:
t_z, t_x, t_y = select_nodules(tp_z, tp_x, tp_y, best_z)
f_z, f_x, f_y = select_nodules(fp_z, fp_x, fp_y, best_z)
figure(figsize=(10, 10))
scatter(f_y, f_x, s=300, facecolors='none', edgecolors='gray', linewidths=1)
scatter(t_y, t_x, s=300, facecolors='none', edgecolors='w', linewidths=1)
imshow(-lung[best_z: best_z + tolerance].max(0));
show()
path = '/home/a.dobrenkii/Projects/Kaggle/DataScienceBowl2K17/data/LUNA/DATA/subset*'
patients = candidates[candidates['class'] == 1]
patients = patients.seriesuid.value_counts().index
for patient in tqdm(patients):
name = glob(join(path, patient + '.mhd'))[0]
plot_suspicies(name)
nodules = [name for name in train if basename(name)[:64] in bads[0][0][0]]
nodules = sorted(nodules, key=lambda x: int(basename(x)[64:-4]))
imshow(bads[0][2][100][32])
combined = list()
for bad in bads:
combined += bad
candidate_type = 0
for i in combined:
if 'vessel' in i[-1].lower():
candidate_type += 1
def extract_patches_vessels(lung, sobel, diff, amount=2):
candidate = diff == VESSEL_LABEL
if not candidate.sum():
return [], [], []
flag = 0
start = 1
labels, num = measure.label(candidate, background=0, return_num=True)
marks = arange(start, num + 1)
random.shuffle(marks)
patches = list()
sobels = list()
masks = list()
for k, label in enumerate(marks):
overlaped = labels == label
area = overlaped.sum()
if area < LOWER or area > UPPER:
continue
coords = where(labels == label)
medians = list()
deltas = list()
for j, coord in enumerate(coords):
medians.append(median(coord))
deltas.append((clip(int(medians[-1] - BORDER_VESSEL), 0, lung.shape[j]),
clip(int(medians[-1] + BORDER_VESSEL), 0, lung.shape[j])))
delta = (deltas[-1][1] - deltas[-1][0]) // 2
if delta < BORDER:
top = lung.shape[j] - deltas[-1][1]
bottom = deltas[-1][0]
if top > BORDER - delta and bottom > BORDER - delta:
deltas[-1] = (deltas[-1][0] - BORDER + delta,
deltas[-1][1] + BORDER - delta)
else:
bottom_top = [bottom, top]
min_shift = argmin(bottom_top)
max_shift = argmax(bottom_top)
remain = [0, 0]
remain[max_shift] = 2 * BORDER - (deltas[-1][1] - deltas[-1][0]) - 2 * bottom_top[min_shift]
deltas[-1] = (deltas[-1][0] - bottom_top[min_shift] - remain[0],
deltas[-1][1] + bottom_top[min_shift] + remain[1])
patches.append(lung[deltas[0][0]:deltas[0][1], deltas[1][0]:deltas[1][1], deltas[2][0]:deltas[2][1]])
sobels.append(sobel[deltas[0][0]:deltas[0][1], deltas[1][0]:deltas[1][1], deltas[2][0]:deltas[2][1]])
masks.append(diff[deltas[0][0]:deltas[0][1], deltas[1][0]:deltas[1][1], deltas[2][0]:deltas[2][1]])
flag += 1
if flag >= amount:
return patches, sobels, masks
return patches, sobels, masks
# +
for name in tqdm(preprocessed_files_pure):
for side in ['left', 'right']:
lung = load(join(PATH['LUNA_LUNGS'], name + 'lungs_' + side + '.npy'))
sobel = load(join(PATH['LUNA_SOBEL'], name + 'sobel_' + side + '.npy'))
diff = load(join(PATH['LUNA_MASKS'], name + 'diff_' + side + '.npy'))
patch, sobel, mask = extract_patches_vessels(lung, sobel, diff, 10)
for p in mask:
if p.shape != (90, 90, 90):
print(p.shape)
add = random.randint(0, 9)
for i in range(len(mask)):
save(join(PATH['LUNA_CAE'], name + side + str(i + add) + '_patch'), patch[i])
save(join(PATH['LUNA_CAE'], name + side + str(i + add) + '_sobel'), sobel[i])
save(join(PATH['LUNA_CAE'], name + side + str(i + add) + '_mask'), mask[i])
| IPython/Cat Patches.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Natural Language Inference and the Dataset
# :label:`sec_natural-language-inference-and-dataset`
#
# In :numref:`sec_sentiment`, we discussed the problem of sentiment analysis.
# This task aims to classify a single text sequence into predefined categories,
# such as a set of sentiment polarities.
# However, when there is a need to decide whether one sentence can be inferred form another,
# or eliminate redundancy by identifying sentences that are semantically equivalent,
# knowing how to classify one text sequence is insufficient.
# Instead, we need to be able to reason over pairs of text sequences.
#
#
# ## Natural Language Inference
#
# *Natural language inference* studies whether a *hypothesis*
# can be inferred from a *premise*, where both are a text sequence.
# In other words, natural language inference determines the logical relationship between a pair of text sequences.
# Such relationships usually fall into three types:
#
# * *Entailment*: the hypothesis can be inferred from the premise.
# * *Contradiction*: the negation of the hypothesis can be inferred from the premise.
# * *Neutral*: all the other cases.
#
# Natural language inference is also known as the recognizing textual entailment task.
# For example, the following pair will be labeled as *entailment* because "showing affection" in the hypothesis can be inferred from "hugging one another" in the premise.
#
# > Premise: Two women are hugging each other.
#
# > Hypothesis: Two women are showing affection.
#
# The following is an example of *contradiction* as "running the coding example" indicates "not sleeping" rather than "sleeping".
#
# > Premise: A man is running the coding example from Dive into Deep Learning.
#
# > Hypothesis: The man is sleeping.
#
# The third example shows a *neutrality* relationship because neither "famous" nor "not famous" can be inferred from the fact that "are performing for us".
#
# > Premise: The musicians are performing for us.
#
# > Hypothesis: The musicians are famous.
#
# Natural language inference has been a central topic for understanding natural language.
# It enjoys wide applications ranging from
# information retrieval to open-domain question answering.
# To study this problem, we will begin by investigating a popular natural language inference benchmark dataset.
#
#
# ## The Stanford Natural Language Inference (SNLI) Dataset
#
# Stanford Natural Language Inference (SNLI) Corpus is a collection of over 500000 labeled English sentence pairs :cite:`Bowman.Angeli.Potts.ea.2015`.
# We download and store the extracted SNLI dataset in the path `../data/snli_1.0`.
#
# + origin_pos=1 tab=["mxnet"]
import os
import re
from mxnet import gluon, np, npx
from d2l import mxnet as d2l
npx.set_np()
#@save
d2l.DATA_HUB['SNLI'] = (
'https://nlp.stanford.edu/projects/snli/snli_1.0.zip',
'9fcde07509c7e87ec61c640c1b2753d9041758e4')
data_dir = d2l.download_extract('SNLI')
# + [markdown] origin_pos=3
# ### Reading the Dataset
#
# The original SNLI dataset contains much richer information than what we really need in our experiments. Thus, we define a function `read_snli` to only extract part of the dataset, then return lists of premises, hypotheses, and their labels.
#
# + origin_pos=4 tab=["mxnet"]
#@save
def read_snli(data_dir, is_train):
"""Read the SNLI dataset into premises, hypotheses, and labels."""
def extract_text(s):
# Remove information that will not be used by us
s = re.sub('\\(', '', s)
s = re.sub('\\)', '', s)
# Substitute two or more consecutive whitespace with space
s = re.sub('\\s{2,}', ' ', s)
return s.strip()
label_set = {'entailment': 0, 'contradiction': 1, 'neutral': 2}
file_name = os.path.join(data_dir, 'snli_1.0_train.txt'
if is_train else 'snli_1.0_test.txt')
with open(file_name, 'r') as f:
rows = [row.split('\t') for row in f.readlines()[1:]]
premises = [extract_text(row[1]) for row in rows if row[0] in label_set]
hypotheses = [extract_text(row[2]) for row in rows if row[0] in label_set]
labels = [label_set[row[0]] for row in rows if row[0] in label_set]
return premises, hypotheses, labels
# + [markdown] origin_pos=5
# Now let us print the first 3 pairs of premise and hypothesis, as well as their labels ("0", "1", and "2" correspond to "entailment", "contradiction", and "neutral", respectively ).
#
# + origin_pos=6 tab=["mxnet"]
train_data = read_snli(data_dir, is_train=True)
for x0, x1, y in zip(train_data[0][:3], train_data[1][:3], train_data[2][:3]):
print('premise:', x0)
print('hypothesis:', x1)
print('label:', y)
# + [markdown] origin_pos=7
# The training set has about 550000 pairs,
# and the testing set has about 10000 pairs.
# The following shows that
# the three labels "entailment", "contradiction", and "neutral" are balanced in
# both the training set and the testing set.
#
# + origin_pos=8 tab=["mxnet"]
test_data = read_snli(data_dir, is_train=False)
for data in [train_data, test_data]:
print([[row for row in data[2]].count(i) for i in range(3)])
# + [markdown] origin_pos=9
# ### Defining a Class for Loading the Dataset
#
# Below we define a class for loading the SNLI dataset by inheriting from the `Dataset` class in Gluon. The argument `num_steps` in the class constructor specifies the length of a text sequence so that each minibatch of sequences will have the same shape.
# In other words,
# tokens after the first `num_steps` ones in longer sequence are trimmed, while special tokens “<pad>” will be appended to shorter sequences until their length becomes `num_steps`.
# By implementing the `__getitem__` function, we can arbitrarily access the premise, hypothesis, and label with the index `idx`.
#
# + origin_pos=10 tab=["mxnet"]
#@save
class SNLIDataset(gluon.data.Dataset):
"""A customized dataset to load the SNLI dataset."""
def __init__(self, dataset, num_steps, vocab=None):
self.num_steps = num_steps
all_premise_tokens = d2l.tokenize(dataset[0])
all_hypothesis_tokens = d2l.tokenize(dataset[1])
if vocab is None:
self.vocab = d2l.Vocab(all_premise_tokens + all_hypothesis_tokens,
min_freq=5, reserved_tokens=['<pad>'])
else:
self.vocab = vocab
self.premises = self._pad(all_premise_tokens)
self.hypotheses = self._pad(all_hypothesis_tokens)
self.labels = np.array(dataset[2])
print('read ' + str(len(self.premises)) + ' examples')
def _pad(self, lines):
return np.array([d2l.truncate_pad(
self.vocab[line], self.num_steps, self.vocab['<pad>'])
for line in lines])
def __getitem__(self, idx):
return (self.premises[idx], self.hypotheses[idx]), self.labels[idx]
def __len__(self):
return len(self.premises)
# + [markdown] origin_pos=12
# ### Putting All Things Together
#
# Now we can invoke the `read_snli` function and the `SNLIDataset` class to download the SNLI dataset and return `DataLoader` instances for both training and testing sets, together with the vocabulary of the training set.
# It is noteworthy that we must use the vocabulary constructed from the training set
# as that of the testing set.
# As a result, any new token from the testing set will be unknown to the model trained on the training set.
#
# + origin_pos=13 tab=["mxnet"]
#@save
def load_data_snli(batch_size, num_steps=50):
"""Download the SNLI dataset and return data iterators and vocabulary."""
num_workers = d2l.get_dataloader_workers()
data_dir = d2l.download_extract('SNLI')
train_data = read_snli(data_dir, True)
test_data = read_snli(data_dir, False)
train_set = SNLIDataset(train_data, num_steps)
test_set = SNLIDataset(test_data, num_steps, train_set.vocab)
train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True,
num_workers=num_workers)
test_iter = gluon.data.DataLoader(test_set, batch_size, shuffle=False,
num_workers=num_workers)
return train_iter, test_iter, train_set.vocab
# + [markdown] origin_pos=15
# Here we set the batch size to 128 and sequence length to 50,
# and invoke the `load_data_snli` function to get the data iterators and vocabulary.
# Then we print the vocabulary size.
#
# + origin_pos=16 tab=["mxnet"]
train_iter, test_iter, vocab = load_data_snli(128, 50)
len(vocab)
# + [markdown] origin_pos=17
# Now we print the shape of the first minibatch.
# Contrary to sentiment analysis,
# we have two inputs `X[0]` and `X[1]` representing pairs of premises and hypotheses.
#
# + origin_pos=18 tab=["mxnet"]
for X, Y in train_iter:
print(X[0].shape)
print(X[1].shape)
print(Y.shape)
break
# + [markdown] origin_pos=19
# ## Summary
#
# * Natural language inference studies whether a hypothesis can be inferred from a premise, where both are a text sequence.
# * In natural language inference, relationships between premises and hypotheses include entailment, contradiction, and neutral.
# * Stanford Natural Language Inference (SNLI) Corpus is a popular benchmark dataset of natural language inference.
#
#
# ## Exercises
#
# 1. Machine translation has long been evaluated based on superficial $n$-gram matching between an output translation and a ground-truth translation. Can you design a measure for evaluating machine translation results by using natural language inference?
# 1. How can we change hyperparameters to reduce the vocabulary size?
#
# + [markdown] origin_pos=20 tab=["mxnet"]
# [Discussions](https://discuss.d2l.ai/t/394)
#
| python/d2l-en/mxnet/chapter_natural-language-processing-applications/natural-language-inference-and-dataset.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.0-rc2
# language: julia
# name: julia-1.7
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Boundary Value Problems
#
# Another class of ODEs are *boundary value problems* (BVPs), where conditions on the solution are given at two different values for the independent variable (instead of at only an initial time, as for the IVPs). In these problems the independent variable is often a spatial coordinate, so we denote it by $x$. A model BVP problem is the Poisson equation with Dirichlet conditions at the endpoints, for example:
#
# $$
# \begin{align*}
# u''(x) &= f(x), \quad 0<x<1 \\
# u(0) &= \alpha \\
# u(1) &= \beta
# \end{align*}
# $$
#
# This problem can be solved using finite differences. Introduce $n+2$ points between 0 and 1, a grid spacing $h = 1/(n+1)$ and a corresponding grid of points $x_j = jh$, $j=0,1,\ldots,n+1$. At each of these grid points, we will approximate the solution numerically, that is, $u_j \approx u(x_j)$. To impose the differential equations, we need to estimate the second derivative $u''(x)$ at each grid point. We can do this using finite difference approximations, for example the second-order accurate formula
#
# $$
# u''(x_j) \approx \frac{1}{h^2}(u_{j+1} - 2u_j + u_{j-1})
# $$
#
# Using this we can approximate the differential equation at all interior points:
#
# $$
# u_{j+1} - 2u_j + u_{j-1} = h^2 f(x_j), \quad j = 1,\ldots, n
# $$
#
# The boundary values are simply imposed by setting $u_0 = \alpha$ and $u_{n+1} = \beta$. This leads to a tridiagonal linear system of equations $Au=b$ where
#
# $$
# A=
# \begin{bmatrix}
# -2 & 1 & & \\
# 1 & -2 & 1 & \\
# & & \ddots & \\
# & & 1 & -2
# \end{bmatrix}
# \quad u=
# \begin{bmatrix}
# u_1 \\ u_2 \\ \vdots \\ u_n
# \end{bmatrix}
# \quad b=
# \begin{bmatrix}
# h^2f(x_1) - \alpha \\
# h^2f(x_2) \\
# \vdots \\
# h^2f(x_n)-\beta
# \end{bmatrix}
# $$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Example BVP
#
# For example we consider the boundary value problem
#
#
# $$
# \begin{align*}
# u''(x) &= 10e^{2x}\sin(2\pi x), \quad 0<x<1 \\
# u(0) &= -1 \\
# u(1) &= 1
# \end{align*}
# $$
#
# We use a grid with $n=49$ interior points (50 intervals).
# -
using PyPlot, PyCall, LinearAlgebra
# + slideshow={"slide_type": "subslide"}
n = 49
h = 1 / (n+1)
x = h*(1:n)
f(x) = 10exp(2x)*sin(2π*x)
A = SymTridiagonal(-2ones(n), ones(n))
b = h^2*f.(x)
b[1] -= -1
b[end] -= 1
u = A \ b
plot(x, u)
grid(true)
# -
| textbook/content/Differential_Equations/Boundary_Value_Problems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="h9Yy6byI53UA"
# ## Imports
# + colab={"base_uri": "https://localhost:8080/"} id="KfcOkXAQbgZW" executionInfo={"status": "ok", "timestamp": 1640020612703, "user_tz": -330, "elapsed": 23490, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="f18a5097-2d9f-463e-9198-422e4717e1cd"
from google.colab import drive
drive.mount('/content/drive')
path ='drive/MyDrive/Code/Scripts/'
path_data ='drive/MyDrive/Data/'
import sys, os, random
sys.path.append(path)
import h5py
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.special import logsumexp
import tensorflow as tf
from Model import CO2_regressor
from Visualization import train_test_plot_CO2
from Utils import pre_process_CO2
seed=42
# + [markdown] id="mv3PhfY257FF"
# ## Setting Seeds
# + id="0StE2Sw2sgYY"
def seed_everything(seed):
os.environ['PYTHONHASHSEED']=str(seed)
os.environ['TF_CUDNN_DETERMINISTIC'] = '1' # new flag present in tf 2.0+
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
def set_global_determinism(seed):
seed_everything(seed=seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
os.environ['TF_CUDNN_DETERMINISTIC'] = '1'
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
set_global_determinism(seed)
# + [markdown] id="IeGtezdk5_BE"
# ## Data
# + [markdown] id="y8KqX-aZ6R9G"
# ### Loading
# + id="z3itXUMnchEF"
with h5py.File(path_data+'CO2/train.h5', 'r') as f:
data_train = np.concatenate((f['data'][:], f['label'][:]), axis=1)
with h5py.File(path_data+'CO2/test.h5', 'r') as f:
data_test = np.concatenate((f['data'][:], f['label'][:]), axis=1)
X_train = data_train[:, 0].reshape(-1, 1)
y_train = data_train[:, 1].reshape(-1, 1)
# Taking test data as complete train-data and extrapolated points
X_test = np.arange(-1.72, 3.51, 0.01).reshape(-1, 1)
# + [markdown] id="hX2D8dI86Ub1"
# ### Visualising
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="9BKHUvVbeRWu" executionInfo={"status": "ok", "timestamp": 1638443723572, "user_tz": -330, "elapsed": 717, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="31ed088b-c0e3-45d7-dd46-5e3b5b602ff4"
plt.plot(X_train, y_train)
# + [markdown] id="owfEnegi6W_o"
# ### Pre-processing
# + id="anZ1ha5CyHCS"
X_train, y_train=pre_process_CO2(X_train,y_train, normalize=False)
# + [markdown] id="uUSY6fvI6a7T"
# ## Training
# + [markdown] id="VKhmux9FLyox"
# ### Normal Dropout
# + colab={"base_uri": "https://localhost:8080/"} id="1g5ttZLtBvfQ" executionInfo={"status": "ok", "timestamp": 1638439897391, "user_tz": -330, "elapsed": 8166559, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="df1e7b1d-9224-421e-f82a-52a056a58de5"
# %%time
my_model_normal=CO2_regressor(hidden=[1024, 1024, 1024, 1024, 1024], drop_rate=0.1, activation='relu', shape=X_train.shape, gaussian=False)
history= my_model_normal.train(X_train, y_train, batch_size=128, lr=0.0001, max_epoch=100000, verbose=1)
# + colab={"base_uri": "https://localhost:8080/"} id="inr0JN3B7__I" executionInfo={"status": "ok", "timestamp": 1638439897393, "user_tz": -330, "elapsed": 88, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="b2101ad4-9f37-469d-f159-b3ac718f71fc"
# Saving model and training history
history_df = pd.DataFrame(history.history)
history_df.to_csv(path_data+"History/history_CO2_reg_100000ep_normal_drop")
my_model_normal.save(path_data+'Saved_model/model_CO2_reg_100000ep_normal_drop')
# + [markdown] id="-xT81_2Z2c22"
# ### Gaussian Dropout
# + colab={"base_uri": "https://localhost:8080/"} id="ighVnnbn2gbW" executionInfo={"status": "ok", "timestamp": 1638451884492, "user_tz": -330, "elapsed": 7940605, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="85ddf54a-9de9-444d-8e78-7273634e454a"
# %%time
my_model_gaussian=CO2_regressor(hidden=[1024, 1024, 1024, 1024, 1024], drop_rate=0.1, activation='relu', shape=X_train.shape, gaussian=True)
history= my_model_gaussian.train(X_train, y_train, batch_size=128, lr=0.0001, max_epoch=100000, verbose=1)
# + colab={"base_uri": "https://localhost:8080/"} id="Kzf8P3Kj3Fbd" executionInfo={"status": "ok", "timestamp": 1638451884494, "user_tz": -330, "elapsed": 90, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="f448c1d1-a2a7-4453-e7fb-05dd02974fad"
# Saving model and training history
history_df = pd.DataFrame(history.history)
history_df.to_csv(path_data+"History/history_CO2_reg_100000ep_gaussian_drop")
my_model_gaussian.save(path_data+'Saved_model/model_CO2_reg_100000ep_gaussian_drop')
# + [markdown] id="OcPNXV417l7t"
# ## Inference
# + id="nep3BOdVLRGk"
X_test= pre_process_CO2(X_test, normalize=False)
# + [markdown] id="qTk7D1wv7sN8"
# ### Normal Dropout
# + id="UX8s9N-Ve8YY" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1638439897395, "user_tz": -330, "elapsed": 15, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="ba50a42f-70d5-4357-b16e-91ea99858c31"
# %%time
yt = my_model_normal.get_predictions(X_test, T=5000)
y_mc = yt.mean(axis=0)
y_mc_std = yt.std(axis=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="4MNoNOXeZ5Uo" executionInfo={"status": "ok", "timestamp": 1638439897396, "user_tz": -330, "elapsed": 14, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="f1428b97-8a19-4a52-983d-02bc8e6ec2e8"
with open(path_data+'Pickle_Files/yt_CO2_reg_100000ep_normal_drop.pkl', 'wb') as f:
pickle.dump(yt, f)
train_test_plot_CO2(X_train=X_train, y_train = y_train,X_test = X_test,y_mc=y_mc,y_mc_std=y_mc_std)
# + [markdown] id="E9EL7n1u79rJ"
# ### Gaussian Dropout
# + colab={"base_uri": "https://localhost:8080/"} id="Kudtfg983Q40" executionInfo={"status": "ok", "timestamp": 1638451884495, "user_tz": -330, "elapsed": 15, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="066b8f22-3e13-4617-b961-c07ffffddd6e"
# %%time
yt = my_model.get_predictions(X_test, T=5000)
y_mc = yt.mean(axis=0)
y_mc_std = yt.std(axis=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="yx5-DSTk3SzE" executionInfo={"status": "ok", "timestamp": 1638451884496, "user_tz": -330, "elapsed": 14, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="3b81051b-5750-494c-fe1d-638f8e85bac5"
with open(path_data+'Pickle_Files/yt_CO2_reg_100000ep_gaussian_drop.pkl', 'wb') as f:
pickle.dump(yt, f)
train_test_plot_CO2(X_train=X_train, y_train = y_train,X_test = X_test,y_mc=y_mc,y_mc_std=y_mc_std)
# + [markdown] id="OB-wtJMq8Xj8"
# ## Results
# + [markdown] id="1-XARSxK8alZ"
# ### Training History
# + colab={"base_uri": "https://localhost:8080/", "height": 418} id="53mxOZGWbKjk" executionInfo={"status": "ok", "timestamp": 1639997065918, "user_tz": -330, "elapsed": 1582, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="52b3a5c3-f3f7-408b-fa22-c7efa114f51c"
# Reading training histories
normal_df=pd.read_csv(path_data+"History/history_CO2_reg_100000ep_normal_drop", index_col=0)
gaussian_df= pd.read_csv(path_data+"History/history_CO2_reg_100000ep_gaussian_drop", index_col=0)
# Plotting
plt.figure(figsize=(18,7))
plt.plot(normal_df['mse'])
plt.plot(gaussian_df['mse'])
plt.ylabel('MSE',fontsize=12)
plt.xlabel('epoch',fontsize=12)
plt.legend(['Normal Dropout', 'Gaussian Dropout'], loc='upper right', fontsize=18)
plt.savefig('CO2_history.png')
# + [markdown] id="vAkcszYw82d8"
# ### RMSE and Log-likelihood
#
# Taking $\tau=0.427114830213$
# + [markdown] id="_hOlM3rO86CX"
# #### Gaussian Dropout
# + colab={"base_uri": "https://localhost:8080/"} id="VHOOXJxY9V4F" executionInfo={"status": "ok", "timestamp": 1640022579667, "user_tz": -330, "elapsed": 22, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="ab793190-4729-4830-a651-5ed72f0a512d"
model_1= tf.keras.models.load_model(path_data+'Saved_model/model_CO2_reg_100000ep_gaussian_drop')
X_test = data_test[:, 0].reshape(-1, 1)
y_test = data_test[:, 1].reshape(-1, 1)
X_test, y_test= pre_process_CO2(X_train,y_train, normalize=False)
yt = np.array([model_1.predict(X_test) for _ in range(5000)]).squeeze()
y_mc = yt.mean(axis=0)
rmse = np.mean((y_test.squeeze() - y_mc.squeeze())**2.)**0.5
ll = (logsumexp(-0.5 * 0.427114830213 * (y_test[None] - y_mc)**2., 0) - np.log(5000) - 0.5*np.log(2*np.pi) + 0.5*np.log(0.427114830213))
test_ll = np.mean(ll)
test_ll, rmse
# + [markdown] id="4pD3HlzA88iE"
# #### Normal Dropout
# + colab={"base_uri": "https://localhost:8080/"} id="kylw0Q8D9uDE" executionInfo={"status": "ok", "timestamp": 1640024486091, "user_tz": -330, "elapsed": 878529, "user": {"displayName": "project bayesianNN", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "08311078184634888367"}} outputId="5db88186-007e-462a-f6af-a064473fc91f"
model_2= tf.keras.models.load_model(path_data+'Saved_model/model_CO2_reg_100000ep_normal_drop')
X_test = data_test[:, 0].reshape(-1, 1)
y_test = data_test[:, 1].reshape(-1, 1)
X_test, y_test= pre_process_CO2(X_train,y_train, normalize=False)
yt = np.array([model_2.predict(X_test) for _ in range(5000)]).squeeze()
y_mc = yt.mean(axis=0)
rmse = np.mean((y_test.squeeze() - y_mc.squeeze())**2.)**0.5
ll = (logsumexp(-0.5 * 0.427114830213 * (y_test[None] - y_mc)**2., 0) - np.log(5000) - 0.5*np.log(2*np.pi) + 0.5*np.log(0.427114830213))
test_ll = np.mean(ll)
test_ll, rmse
| CO2_Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 6: Test imports
import numpy as np
import matplotlib.pyplot as plt
# +
x = np.arange(100) # see HW
for i in range(3):
y = np.random.random(100)*5 # random numbers from 0-5
plt.plot(x,y)
plt.show()
# -
import pandas as pd # another often used shorthand
gdp = pd.read_csv("https://raw.githubusercontent.com/UIUC-iSchool-DataViz/spring2020/master/week01/data/GDP.csv")
gdp
# TahDah!
import scipy
import scipy.misc
import scipy.cluster
import PIL.Image as Image
import bqplot
# +
x = np.arange(100)
y = np.random.random(100) + 5
x_sc = bqplot.LinearScale()
y_sc = bqplot.LinearScale()
lines = bqplot.Lines(x = x, y = y, scales = {'x': x_sc, 'y': y_sc})
ax_x = bqplot.Axis(scale = x_sc, label = 'x value')
ax_y = bqplot.Axis(scale = y_sc, label = 'y value', orientation = 'vertical')
pz = bqplot.interacts.PanZoom( scales = {'x': [x_sc], 'y': [y_sc]} )
bqplot.Figure(marks = [lines], axes = [ax_x, ax_y], interaction = pz)
# -
import ipywidgets
ipywidgets.IntSlider()
# #!pip install ipyvolume
import ipyvolume
import numpy as np
import ipyvolume as ipv
V = np.zeros((128,128,128)) # our 3d array
# outer box
V[30:-30,30:-30,30:-30] = 0.75
V[35:-35,35:-35,35:-35] = 0.0
# inner box
V[50:-50,50:-50,50:-50] = 0.25
V[55:-55,55:-55,55:-55] = 0.0
ipv.quickvolshow(V, level=[0.25, 0.75], opacity=0.03, level_width=0.1, data_min=0, data_max=1)
| lesson06/.ipynb_checkpoints/test_imports_lesson06-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Tacotron 2 inference code
# Edit the variables **checkpoint_path** and **text** to match yours and run the entire code to generate plots of mel outputs, alignments and audio synthesis from the generated mel-spectrogram using Griffin-Lim.
# #### Import libraries and setup matplotlib
# +
import os
import sys
import time
import numpy as np
from soundfile import write
import torch
import matplotlib
import matplotlib.pylab as plt
import_path = os.path.join(os.getcwd(), "..")
sys.path.insert(0, import_path)
from hparams import create_hparams
from model import Tacotron2
from layers import TacotronSTFT, STFT
from audio_processing import griffin_lim
from train import load_model
from text import text_to_sequence
from waveglow.denoiser import Denoiser
sys.path.pop(0)
sys.path.append(os.path.join(import_path, "waveglow"))
checkpoint_path = "../outdir_ru/checkpoint_41000"
waveglow_path = '../data/waveglow_256channels.pt'
output_folder = "../data/"
# -
import IPython.display as ipd
# %matplotlib inline
def plot_data(data, figsize=(16, 4)):
fig, axes = plt.subplots(1, len(data), figsize=figsize)
for i in range(len(data)):
axes[i].imshow(data[i], aspect='auto', origin='bottom',
interpolation='none')
# #### Setup hparams
hparams = create_hparams()
hparams.sampling_rate = 22050
# #### Load model from checkpoint
model = load_model(hparams)
model.load_state_dict(torch.load(checkpoint_path)['state_dict'])
_ = model.cuda().eval().half()
# #### Load WaveGlow for mel2audio synthesis and denoiser
# +
waveglow = torch.load(waveglow_path)['model']
for m in waveglow.modules():
if 'Conv' in str(type(m)):
setattr(m, 'padding_mode', 'zeros')
waveglow.cuda().eval().half()
for k in waveglow.convinv:
k.float()
denoiser = Denoiser(waveglow)
# -
# #### Prepare text input
text = "Чат с техподдержкой — очевидное, но довольно дорогое решение. Но если применить технологии машинного обучения, то можно неплохо сэкономить."
# text = "Корабли лавировали лавировали да не вылавировали"
# text = "<NAME>"
# text = "привет, семён!"
# text = "он, я так понимаю, определяет знаки вопроса?"
# text = "ты @ я @ ты @ я @ ты @ я @ ты @ я @"
# text = "но"
sequence = np.array(text_to_sequence(text, ['english_cleaners']))[None, :]
sequence = torch.autograd.Variable(
torch.from_numpy(sequence)).cuda().long()
# #### Decode text input and plot results
mel_outputs, mel_outputs_postnet, _, alignments = model.inference(sequence)
plot_data((mel_outputs.float().data.cpu().numpy()[0],
mel_outputs_postnet.float().data.cpu().numpy()[0],
alignments.float().data.cpu().numpy()[0].T))
# #### Synthesize audio from spectrogram using WaveGlow
with torch.no_grad():
audio = waveglow.infer(mel_outputs_postnet, sigma=0.666)
ipd.Audio(audio[0].data.cpu().numpy(), rate=hparams.sampling_rate)
# #### (Optional) Remove WaveGlow bias
audio_denoised = denoiser(audio, strength=0.1)[:, 0]
audio = audio_denoised.cpu().numpy()
ipd.Audio(audio, rate=hparams.sampling_rate)
audio.shape
np.min(audio), np.max(audio)
| tacotron2_notebooks/inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import re, string
import pandas as pd
from time import time
from collections import defaultdict
import spacy
from sklearn.manifold import TSNE
from nltk.corpus import stopwords
STOPWORDS = set(stopwords.words('english'))
from gensim.models import Word2Vec
from gensim.test.utils import get_tmpfile
from gensim.models import KeyedVectors
import matplotlib.pyplot as plt
from nltk.stem import WordNetLemmatizer
from nltk import sent_tokenize
import matplotlib.pyplot as plt
# %matplotlib inline
import json
import torch
from transformers import BertModel, BertConfig, BertTokenizer, PreTrainedTokenizer
import csv
import logging # Setting up the loggings to monitor gensim
logging.basicConfig(format="%(levelname)s - %(asctime)s: %(message)s", datefmt= '%H:%M:%S', level=logging.INFO)
def clean_text(text):
'''Make text lowercase, remove text in square brackets, remove punctuation and remove words containing numbers.'''
text = text.lower().strip()
text = re.sub(r'\[*\]', '', text)
text = re.sub(r'[%s]' % re.escape(string.punctuation), '', text)
text = re.sub(r'\w*\d\w*', '', text)
#removes unicodes left in text so model does not learn unicodes
text = re.sub('^\\\\u[\d\D]{4}|-|σ|→|\\\\xad', '', text)
# Remove a sentence if it is only one word long
if len(text) > 2:
return ' '.join(word for word in text.split() if word not in STOPWORDS)
return
def tokenizer(text):
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokens = tokenizer.tokenize(str(text), add_special_tokens=True)
return tokens
# +
nlp = spacy.load('en_core_web_sm')
def lemmatizer(text):
if text == None:
print("Excuse me but there is an unexpected None value from cleaning the text!")
return ""
sent = []
tokens = []
doc = nlp(text)
for word in doc:
sent.append(word.lemma_)
return " ".join(sent)
# +
def feed2vec(filepath, tokenize=None):
global sentences, tokens
print("filepath is ", str(filepath))
#Creates a pandas dataframe for the text data from a json file
if ".json" in str(filepath):
df = pd.read_json(filepath)
else:
#FIXING THE READ JSON THING HERE W LOADING IT THEN WRITING? BC ABOVE IS GIVING ME A VALUE ERROR THAT'S HARD TO FIX
with open(filepath, "r") as read_file:
dataInit = json.load(read_file)
df = pd.DataFrame(data = dataInit)
#print("data type loaded from json is ", type(dataInit))
print(df.head(n = 10))
#Adds the column label text
df.columns = ['text']
#Text clean function is applied to the pandas dataframe
df_clean = pd.DataFrame(df.text.apply(lambda x: clean_text(x)))
#filters out all the None values in the cleaned dataset
#loc combines the operations in brackets into one single operation to avoid chaining indexes operations together
#copy explicitly tells pandas to make copy when creating master_of_none
#this is so later on only the copy is modified and there is no confusion between the copy and the original
master_of_none = df_clean.loc[df_clean.text.notnull()].copy()
if tokenize == None: #no tokenizing at all
tokens = None
#Lemmatizer function is applied to cleaned text with the none values removed
master_of_none["text_lemmatized"] = master_of_none.apply(lambda x: lemmatizer(x['text']), axis=1)
master_of_none['text_lemmatize_clean'] = master_of_none['text_lemmatized'].str.replace('-PRON-', '')
vocab = master_of_none['text_lemmatize_clean']
sentences = [row.split() for row in vocab]
word_freq = defaultdict(int)
for sent in sentences:
for i in sent:
word_freq[i] += 1
len(word_freq)
else: #returns sentences as tokens
tokens = []
sentences = sent_tokenize(str(master_of_none["text"]))
word_freq = defaultdict(int)
for sent in sentences:
tokens.append(tokenizer(sent))
for i in sent:
word_freq[i] += 1
len(word_freq)
return tokens #removed the print from here
# -
def w2v_train(w2vmodel, last_model = False, min_count = 20, window = 5, size = 500): #maybe pass in default variables here for easier optimization
global sentences, tokens #does global work if I'm calling this program full of functions
if tokens is not None:
words = tokens
else:
words = sentences
if last_model == True:
#Previously updated model is loaded
w2v_model = Word2Vec.load(last_model)
#model vocabulary is updated
w2v_model.build_vocab(words, update=True)
elif last_model == False:
# min_count: minimum number of occurrences of a word in the corpus to be included in the model.
# window: the maximum distance between the current and predicted word within a sentence.
# size: the dimensionality of the feature vectors
# workers: the number of cores your computer has
w2v_model = Word2Vec(min_count = min_count,
window = window,
size = size) #removed workers var from here and default settings
#the new model's vocabulary is built
w2v_model.build_vocab(words)
# train word vectors
#returns the number of words in the vocab and the number of words in the corpus
try:
t = time()
w2v_model.train(words, total_examples=w2v_model.corpus_count, epochs= 30, report_delay = 1) #w2v_model.epochs
except RuntimeError:
print("Vocab was not built. Check your w2v parameters and try again!")
#either the new or updated version of the w2v model is saved
print("made it here!")
print('Time to train the model: {} mins'.format(round((time() - t) / 60, 2))) #added here
#print(w2v_model.wv.vocab[:100]) #added here
all_vectors = []
all_words = []
print("corpus count is ", w2v_model.corpus_count)
print("epochs is ", w2v_model.epochs)
for word in w2v_model.wv.vocab:
all_words.append(word)
all_vectors.append(w2v_model.wv[word]) #to here
w2v_model.save(w2vmodel)
print("all_vectors(done via vocab) ", all_vectors)
print("all_vectors type is ", type(all_vectors))
print("all words in wv.vocab are ", all_words)
print("number of words in wv.vocab is ", len(all_words))
#return print("Training complete!")
return all_vectors #added this
# +
first_word = ['alcohol', 'ketone', 'alkene', 'carbon', 'proton', 'polymer', 'acid', 'oxidize', 'anion', 'electrophile', 'polar', 'positive', 'mechanism', 'resonance', 'synthesis', 'isomer', 'heat', 'aromatic', ]
second_word = ['hydroxyl', 'carbonyl', 'alkyne', 'nitrogen', 'hydrogen', 'chain', 'base', 'reduce', 'cation', 'nucleophile', 'nonpolar', 'negative', 'atom', 'solvent', 'electron', 'reaction', 'bond', 'equilibrium']
my_headers = ['model_name', 'alcohol + hydroxyl', 'ketone + carbonyl', 'alkene + alkyyne', 'carbon + nitrogen', 'proton + hydrogen', 'polymer + chain', 'acid + base', 'oxidize + reduce', 'anion + cation', 'electrophile + nucleophile', 'polar + nonpolar', 'positive + negative', 'mechanism + atom', 'resonance + solvent', 'synthesis + electron', 'isomer + reaction', 'heat + bond', 'aromatic + equilibrium', 'Top 10 Carbon']
def cosine_sim(w2vmodel, first_word, second_word):
global w2v_data
model_name = w2vmodel
w2v_model = Word2Vec.load(w2vmodel)
w2v_data = []
w2v_data.append(model_name)
for word1, word2 in zip(first_word, second_word):
try:
#synonym, antonym, or neutral pairs
cos_sim = w2v_model.wv.similarity(word1, word2)
w2v_data.append(cos_sim)
except KeyError:
cos_sim = 0
w2v_data.append(cos_sim)
print(f"{word1} or {word2} was not in the vocabulary")
try:
top10 = w2v_model.wv.most_similar(positive=['carbon'])
w2v_data.append(top10)
except KeyError:
top10 = 0
print("carbon is not in the vocabulary")
return print("Data collection and saving complete!")
# -
def data_saver(excel_file, my_headers, new_file = None):
try:
if new_file is not None:
with open(excel_file, 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(my_headers)
csvwriter.writerow(w2v_data)
else:
with open(excel_file, 'a', newline='') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(w2v_data)
except PermissionError:
print("Excel file is most likely open. Close it before running program")
return print("Data saved!!!")
def tsne_grapher(w2vmodel):
"Create TSNE model and plot it"
labels = []
tokens = []
model = Word2Vec.load(w2vmodel)
for word in model.wv.vocab:
tokens.append(model[word])
labels.append(word)
tsne_model = TSNE(perplexity=40, n_components=2, init='pca', n_iter=2500, random_state=23)
new_values = tsne_model.fit_transform(tokens)
x = []
y = []
for value in new_values:
x.append(value[0])
y.append(value[1])
plt.figure(figsize=(18, 18))
for i in range(len(x)):
plt.scatter(x[i],y[i])
plt.annotate(labels[i],
xy=(x[i], y[i]),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.show()
return
file1 = 'ChemLibre_JSONS/Basic_Principles_of_Organic_Chemistry_Roberts_and_Caserio.json'
file2 = 'ChemLibre_JSONS/Bruice_Map.json'
file3 = 'ChemLibre_JSONS/Catalytic_Asymmetric_Synthesis_Punniyamurthy.json'
file4 = 'ChemLibre_JSONS/Environmental_Chemistry.json'
file5 = 'ChemLibre_JSONS/How_to_be_a_Successful_Organic_Chemist_Sandtorv.json'
file6 = 'ChemLibre_JSONS/Logic_of_Organic_Synthesis_Rao.json'
file7 = 'ChemLibre_JSONS/Organic_Chemistry_A _Carbonyl_Early_Approach_McMichael.json'
file8 = 'ChemLibre_JSONS/Organic_Chemistry_Lab_Techniques_Nichols.json'
file9 = 'ChemLibre_JSONS/Organic_Chemistry_with_a_Biological_Emphasis_Soderberg.json'
file10 = 'ChemLibre_JSONS/Polymer_Chemistry.json'
file11 = 'ChemLibre_JSONS/Radical_Reactions_of_Carbohydrates_Binkley.json'
file12 = 'ChemLibre_JSONS/Schaller_Polymer.json'
file13 = 'ChemLibre_JSONS/Supplemental_Modules.json'
file14 = 'ChemLibre_JSONS/Wade_Map.json'
file15 = 'Springer_PDF/Brewing_Science_A_Multidisciplinary_Approach_by_Mosher_and_Trantham.json'
file16 = 'Springer_PDF/Advanced_Organic_Chemistry_Part_A_Structure_and_Mechanisms_by_Carey_and_Sundberg.json'
file17 = 'Springer_PDF/Advanced_Organic_Chemistry_Part_B_Reactions_and_Synthesis_by_Carey_and_Sundberg.json'
file18 = 'Springer_PDF/Principles_of_Polymer_Chemistry_by_Ravve.json'
file19 = 'Springer_PDF/Polymer_Synthesis_Theory_and_Practice_by_Braun_Cherdron_Rehahn_Ritter_and_Voit.json'
file20 = 'Springer_PDF/Polymer_Chemistry_by_Koltzsenburg_Maskos_and_Nuyken.json'
model1 = 'robers_and_caserio.model'
model2 = 'bruice.model'
model3 = 'punniyamurthy.model'
model4 = 'environmental.model'
model5 = 'sandtorv.model'
model6 = 'rao.model'
model7 = 'mcmichael.model'
model8 = 'nichols.model'
model9 = 'soderberg.model'
model10 = 'polymer.model'
model11 = 'binkley.model'
model12 = 'schaller.model'
model13 = 'supplemental.model'
model14 = 'wade.model'
model15 = 'mosher_and_trantham.model'
model16 = 'a_carey_and_sundberg.model'
model17 = 'b_carey_and_sundberg.model'
model18 = 'ravve.model'
model19 = 'braun_chedron_rehahn_ritter_and_voit.model'
model20 = 'koltzsenburg_maskos_and_nuyken.model'
# +
#feed2vec(file1)
#w2v_train(model1)
# +
#feed2vec(file2, tokenize=True)
#w2v_train(model2, last_model = False)
# -
| Bowman/Louisa_w2v_functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # VOLTTRON Simulation Collector Notebook
# This notebook sets up a simulation and forwards data
# from one VOLLTRON instance (this Collector) to another instance (the Aggregator).
#
# Most of the notebook's setup and execution is done with shell commands, called from Python.
# # Setup: Prepare the Volttron Environment
# VOLTTRON must be installed before using this notebook. For detailed instructions on
# installing and configuring a VOLTTRON/Jupyter server environment, see [Jupyter Notebooks](http://volttron.readthedocs.io/en/devguides/supporting/utilities/JupyterNotebooks.html)
# in VOLTTRON ReadTheDocs.
#
# As is described in that guide, environment variables should have been defined before starting
# the Jupyter server:
#
# ````
# $ export VOLTTRON_ROOT=~/repos/volttron
# ````
# (path of the VOLTTRON repository, installed prior to running bootstrap)
#
# ````
# $ export VOLTTRON_HOME=~/.volttron
# ````
# (directory in which the VOLTTRON instance runs)
#
# The first VOLTTRON instance on a server usually runs, by convention, in ~/.volttron.
# If multiple VOLTTRON instances are to be run on a single host, each must have its own VOLTTRON_HOME.
#
# Also before starting the Jupyter server, a VOLTTRON virtual environment should have been
# activated by executing the following in $VOLTTRON_ROOT:
#
# ````
# $ source env/bin/activate
# ````
#
# The Python code below does some initialization to prepare for the steps that follow.
# +
import datetime
import json
import os
import pprint
import sqlite3
import subprocess
import sys
import time
# Define a "run this shell command" method, wrapping subprocess.check_output()
def _sh(shell_command, shell=True, stderr=None):
try:
return_value = subprocess.check_output(shell_command, shell=shell, stderr=stderr)
except Exception, err:
print('Shell command failed: {}', shell_command)
print(err)
return_value = 'Error'
return return_value
# Same as _sh(), except that this also prints the command output, preceded by an optional label.
def _print_sh(shell_command, label=None, **kwargs):
print('{0}: {1}\n'.format(label+':' if label else '', _sh(shell_command, **kwargs)))
# Set up local variables vhome and vroot.
# The environment variables VOLTTRON_ROOT and VOLTTRON_HOME should already be defined -- see above.
# vroot = %env VOLTTRON_ROOT
# vhome = %env VOLTTRON_HOME
print("VOLTTRON_ROOT={}".format(vroot))
print("VOLTTRON_HOME={}".format(vhome))
# Define a VIP_SOCKET environment variable for use while installing and running agents.
socket_name = 'ipc://' + vhome + '/run/vip.socket'
# %env VIP_SOCKET=$socket_name
# Run from the VOLTTRON root directory.
os.chdir(vroot)
print("Initialization complete")
# -
# # Setup: Prepare the Simulation Environment
# The simulation software resides in a separate repository, volttron-applications.
#
# It must be downloaded from github, creating a new directory parallel to $VOLTTRON_ROOT, as follows:
#
# ````
# $ cd $VOLTTRON_ROOT
# $ cd ..
# $ git clone git://github.com/VOLTTRON/volttron-applications.git
# ````
#
# Then a symbolic link to it, named "applications", should be added under $VOLTTRON_ROOT:
#
# ````
# $ cd $VOLTTRON_ROOT
# $ ln -s ../volttron-applications/ applications
# ````
# # Setup: Shut Down All Agents
# This ensures a clean agent installation process by the notebook.
# +
print('Wait for the list to be displayed, and confirm that no agents are listed as running...\n')
# Shut down all agents.
_sh('volttron-ctl shutdown')
# List agent status to verify that the status of each agent is 0 or blank.
_print_sh('volttron-ctl status', stderr=subprocess.STDOUT)
# -
# # Setup: Discover the Collector's Network Parameters
# In order for this Collector to forward data to an Aggregator, the Aggregator
# must know the Collector's network parameters, storing them in its known_hosts file.
# Discover those parameters now.
#
# Copy the vip-address's IP and port, and the serverkey,
# to the Aggregator notebook under
# 'Setup: Add Each Collector to the known_hosts File',
# and execute that notebook's code to add this Collector to known_hosts.
# Obtain this server's IP address, volttron port number (usually 22916), and server key:
print('Obtaining network parameters and server key; please wait...\n')
_print_sh('curl ifconfig.me', label='Public IP address')
_print_sh('volttron-ctl auth serverkey', label='Serverkey')
_print_sh('cat {}/config'.format(vhome), label='Config file')
# # Setup: Configure the Aggregator's Network Parameters
# This Collector forwards data to an Aggregator, so it must be
# configured with the Aggregator's IP address, port number and server key.
#
# Define those parameters here.
#
# Obtain them from the Aggregator notebook,
# 'Setup: Discover the Aggregator's Network Parameters'.
# +
aggregator_vip_address = '172.16.58.3'
aggregator_vip_port = '22916'
aggregator_server_key = '<KEY>'
aggregator_vip = "tcp://{0}:{1}".format(aggregator_vip_address, aggregator_vip_port)
print('vip = {0}'.format(aggregator_vip))
print('aggregator_server_key = {0}'.format(aggregator_server_key))
# -
# # Setup: Test the TCP Connection
# The ForwardHistorian will send requests to the VOLTTRON Aggregator instance
# via TCP commands. Test that the Aggregator instance is capable of receiving
# TCP requests on the designated IP address and port.
#
# If this test fails, the port may not be open on the other server (firewall issue?),
# the request may be for the wrong IP address and/or port ID,
# or the other server's VOLTTRON instance may be down or incorrectly configured.
# Use an 'nc' (netcat) command to test the TCP connection
shell_command = 'nc -z -vv -w5 {0} {1}'.format(aggregator_vip_address, aggregator_vip_port)
_print_sh(shell_command, label='Network connection test result', stderr=subprocess.STDOUT)
# # Setup: Configure a SimulationAgent
# Simulations can be set up to use a wide range of varying parameters, or
# at the other extreme they can be run with "out of the box" parameters.
#
# For this simulation, run with an empty config file,
# using all default values.
config = """{}"""
print("config = {}".format(config))
config_path = vhome + '/my_simulation.config'
with open(config_path, 'w') as file:
file.write(config)
print('SimulationAgent configuration written to {}'.format(config_path))
# # Setup: Configure a ForwardHistorian
# Create a configuration file for this collector's ForwardHistorian.
#
# The file specifies the Aggregator's IP address, port and server key,
# and indicates which topics should be forwarded.
config = """{{
"destination-vip": "{0}",
"destination-serverkey": "{1}",
"required_target_agents": [],
"custom_topic_list": ["simstorage"],
"services_topic_list": ["devices"],
"topic_replace_list": [
{{
"from": "FromString",
"to": "ToString"
}}
]
}}""".format(aggregator_vip, aggregator_server_key)
print("config = {}".format(config))
config_path = vhome + '/my_simulation_forwarder.config'
with open(config_path, 'w') as file:
file.write(config)
print('Forwarder configuration written to {}'.format(config_path))
# # Setup: Install Agents
# Install each agent employed by the Collector: 3 simulation agents, a ForwardHistorian, and 2 Volttron Central agents.
# +
print('Wait for the list to be displayed, then confirm that all of these agents appear in it...')
def install_agent(dir=None, id=None, config=None, tag=None):
script_install_command = 'python scripts/install-agent.py -s {0} -i {1} -c {2} -t {3} -f'
_sh(script_install_command.format(dir, id, config, tag))
print('Installed {}'.format(tag))
# Install the three Simulation agents
install_agent(dir=vroot+'/applications/kisensum/Simulation/SimulationDriverAgent',
id='simulation.driver',
config=vroot+'/applications/kisensum/Simulation/SimulationDriverAgent/simulationdriver.config',
tag='simulation.driver')
install_agent(dir=vroot+'/applications/kisensum/Simulation/SimulationClockAgent',
id='simulationclock',
config=vroot+'/applications/kisensum/Simulation/SimulationClockAgent/simulationclock.config',
tag='simulationclock')
install_agent(dir=vroot+'/applications/kisensum/Simulation/SimulationAgent',
id='simulationagent',
config=vhome+'/my_simulation.config',
tag='simulationagent')
# Install a ForwardHistorian agent that forwards metrics to another VOLTTRON instance
install_agent(dir=vroot+'/services/core/ForwardHistorian',
id='forward_historian',
config=vhome+'/my_simulation_forwarder.config',
tag='forward_historian')
# Install a Platform Agent
install_agent(dir=vroot+'/services/core/VolttronCentralPlatform',
id='platform.agent',
config=vroot+'/services/core/VolttronCentralPlatform/config',
tag='vcp')
# Install a Volttron Central Agent
install_agent(dir=vroot+'/services/core/VolttronCentral',
id='volttron.central',
config=vroot+'/services/core/VolttronCentral/config',
tag='vc')
# List agent status to verify that the agents were installed successfully.
_print_sh('volttron-ctl status', stderr=subprocess.STDOUT)
# -
# # Setup: Install the Simulation Device Drivers
# +
print('Wait for the simulation driver configs to be displayed, then confirm that all of these configs appear in it...')
driver_root = vroot + '/applications/kisensum/Simulation/SimulationDriverAgent/'
def install_driver_csv(name=None, csv=None):
_sh('volttron-ctl config store simulation.driver {0} {1} --csv'.format(name, driver_root + csv))
def install_driver_config(name=None, config=None):
_sh('volttron-ctl config store simulation.driver {0} {1}'.format(name, driver_root + config))
# Install simload, the simulated load driver
install_driver_csv(name='simload.csv', csv='simload.csv')
install_driver_config(name='devices/simload', config='simload.config')
# Install simmeter, the simulated meter driver
install_driver_csv(name='simmeter.csv', csv='simmeter.csv')
install_driver_config(name='devices/simmeter', config='simmeter.config')
# Install simpv, the simulated PV driver
install_driver_csv(name='simpv.csv', csv='simpv.csv')
install_driver_config(name='devices/simpv', config='simpv.config')
# Install simstorage, the simulated storage driver
install_driver_csv(name='simstorage.csv', csv='simstorage.csv')
install_driver_config(name='devices/simstorage', config='simstorage.config')
# List the Simulation Driver configuration to confirm that the drivers were installed successfully.
_print_sh('volttron-ctl config list simulation.driver')
# -
# # Setup: Get the Collector's forward_historian Credentials
# The Collector's ForwardHistorian agent needs to authenticate to the Aggregator. Authentication is facilitated by adding the agent's credentials to the Aggregator's auth.json file.
#
# Copy the PUBLICKEY from the command output below. On the Aggregator, run `volttron-ctl auth add` from the command line. When prompted for credentials, paste the key.
_print_sh('volttron-ctl auth publickey --tag forward_historian')
# # Execution: Refresh Variables and Stop Agents
# Before starting up the agents, refresh all variables and make sure that all agents are stopped.
# +
print('Make a fresh start - refresh variable definitions, shut down any running agents, refresh the database')
import datetime
import json
import os
import pprint
import sqlite3
import subprocess
import sys
import time
# Define a "run this shell command" method, wrapping subprocess.check_output()
def _sh(shell_command, shell=True, stderr=None):
try:
return_value = subprocess.check_output(shell_command, shell=shell, stderr=stderr)
except Exception, err:
print('Shell command failed: {}', shell_command)
print(err)
return_value = 'Error'
return return_value
# Same as _sh(), except that this also prints the command output, preceded by an optional label.
def _print_sh(shell_command, label=None, **kwargs):
print('{0}: {1}\n'.format(label+':' if label else '', _sh(shell_command, **kwargs)))
# Set up local variables vhome and vroot.
# The environment variables VOLTTRON_ROOT and VOLTTRON_HOME should already be defined -- see above.
# vroot = %env VOLTTRON_ROOT
# vhome = %env VOLTTRON_HOME
print("VOLTTRON_ROOT={}".format(vroot))
print("VOLTTRON_HOME={}".format(vhome))
# Define a VIP_SOCKET environment variable for use while installing and running agents.
socket_name = 'ipc://' + vhome + '/run/vip.socket'
# %env VIP_SOCKET=$socket_name
# Run from the VOLTTRON root directory.
os.chdir(vroot)
# Shut down all agents.
_sh('volttron-ctl shutdown')
# List agent status to verify that the status of each agent is 0 or blank.
_print_sh('volttron-ctl status', stderr=subprocess.STDOUT)
# -
# # Execution: Start the agents
# When ready to start collecting metrics and forwarding them to the Aggregator, start the agents.
# +
print('Wait for the list to be displayed, then confirm that each started agent is running...')
_sh('volttron-ctl start --tag simulationclock')
_sh('volttron-ctl start --tag simulation.driver')
_sh('volttron-ctl start --tag simulationagent')
_sh('volttron-ctl start --tag forward_historian')
_sh('volttron-ctl start --tag vcp')
_sh('volttron-ctl start --tag vc')
# List agent status to verify that the started agents have status "running".
_print_sh('volttron-ctl status', stderr=subprocess.STDOUT)
# -
# # Shutdown: Stop all agents
# When finished, stop all VOLTTRON agents.
# +
# Stop all agents.
_sh('volttron-ctl shutdown')
# Verify that all agents have been stopped.
_print_sh('volttron-ctl status', stderr=subprocess.STDOUT)
| examples/JupyterNotebooks/SimulationCollector.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## VIF-Variance inflation factor
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
data=pd.read_csv("C:/Users/sachi/Downloads/student.csv")
df=pd.DataFrame(data)
X=add_constant(df)
X.head()
# -
pd.Series([variance_inflation_factor(X.values, i)
for i in range(X.shape[1])],
index=X.columns)
| VIF .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MyPy397
# language: python
# name: mypy397
# ---
import pandas as pd
df = pd.read_csv("../results/small_osmotic_models_9_agent.csv")
df = df.drop([df.columns[0]],axis=1)
df.head()
# # Evaluating state : s,a - s',fit
#Hay que implementar una funcion que aplique la acción al estado y generé otro estado
sample = df.loc[0]
print(sample)
# +
dummies_action={"None":'0',"adapt":1,"migrate":2,"replicate":3}
dummies_flavour={"None":'0',"small":1,"medium":2,"large":3}
map_services = {"s":1,"m":2,"l":3}
services = dict()
services[1]=[1,1,10,2,2,20,3,5,50,10]
services[2]=[1,1,10,2,2,20,3,5,50,10]
services[3]=[1,0,0,2,4,20,3,0,0,10]
sout = sample.copy()
if sample.action == "adapt":
sout.cf = dummies_flavour[sample.flavour]
ixHWold = "f%i_HW"%sample.cf
ixHWnew = "f%i_HW"%sout.cf
sout.avaiHW = sample.avaiHW+sample[ixHWold]-sample[ixHWnew]
if sample.action == "migrate":
#Como queda el nuevo estado tras una migración
sout.max_lat = sample.max_lat-1
if sample.action == "replicate":
#Como queda el nuevo estado tras una replicacion
pass
print(sout)
# -
# ## Feeding the model
df = df.drop([df.columns[0],"action","flavour","file"],axis=1)
print(df.columns)
| notebooks/.ipynb_checkpoints/DeepMario-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Thinking in tensors, writing in PyTorch
#
# A hands-on course by [<NAME>](https://p.migdal.pl) (2019). Version 0.2.
#
# []( https://colab.research.google.com/github/stared/thinking-in-tensors-writing-in-pytorch/blob/master/extra/Word%20vectors.ipynb)
#
#
# ## Extra: Word vectors
#
# **VERY WORK IN PROGRESS**
#
# ### Reading
#
# For a general reading, see:
#
# * [king - man + woman is queen; but why?](https://p.migdal.pl/2017/01/06/king-man-woman-queen-why.html)
# * [Word2vec in PyTorch](https://adoni.github.io/2017/11/08/word2vec-pytorch/)
#
# ### Notes
#
# We use the smallest, 50-dimensional, uncased GloVe word embedding:
#
# * [GloVe: Global Vectors for Word Representation by Stanford](https://nlp.stanford.edu/projects/glove/)
#
# Other popular pre-trained word embeddings:
#
# * [word2vec by Google](https://code.google.com/archive/p/word2vec/)
# * [fastText by Facebook](https://github.com/facebookresearch/fastText/blob/master/pretrained-vectors.md) (multilingual)
#
# See also:
#
# * [Aligning the fastText vectors of 78 languages](https://github.com/Babylonpartners/fastText_multilingual)
# * [gensim-data](https://github.com/RaRe-Technologies/gensim-data) - data repository for pretrained NLP models and NLP corpora.
#
#
# +
# %matplotlib inline
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
# -
wv = pd.read_table("./data/glove.6B.50d.txt",
delimiter=" ", header=None, index_col=0, quoting=3)
wv.head()
#
#
# $$\vec{v}_{\text{julia}} = [0.36, 1.18, -0.10, \ldots, 0.94]$$
wv.loc["julia"].values
def latex_vector(series, first=3, last=1):
from IPython.display import Math
if len(series) < first + last:
raise Exception("len(series) < first + last")
s = r"\vec{v}_{\text{" + series.name + r"}} = ["
vs_fmtd = ["{:.2f}".format(v) for v in series.values[:first]]
if len(series) > first + last:
vs_fmtd.append(r"\ldots")
vs_fmtd += ["{:.2f}".format(v) for v in series.values[-last:]]
s += ", ".join(vs_fmtd)
s += "]"
return Math(s)
latex_vector(wv.loc["julia"])
words = set(wv.index)
"daniel" in words
correlations = wv.loc[["cat", "dog", "bar", "pub", "beer", "tea", "coffee", "talked", "nicely"]].transpose().corr()
sns.clustermap(correlations, vmin=-1., vmax=1., cmap="coolwarm")
correlations = wv.loc[["hotel", "motel", "guesthouse", "bar", "pub", "party"]].transpose().corr()
sns.clustermap(correlations, vmin=-1., vmax=1., cmap="coolwarm")
np.dot(wv.loc["kate"], wv.loc["he"] - wv.loc["she"])
np.dot(wv.loc["john"], wv.loc["he"] - wv.loc["she"])
names = ["kate", "catherine", "john", "mark", "peter", "anna", "julia", "jacob", "jake",
"richard", "ted", "theodore", "sue", "susanne", "suzanne", "susan", "mary",
"leo", "leonard", "alexander", "alexandra", "alex", "sasha"]
all([name in words for name in names])
gender = wv.loc["he"] - wv.loc["she"]
wv.loc[names].dot(gender).sort_values()
wv.loc[names].dot(gender).sort_values().plot.barh()
diminutive = wv.loc["kate"] - wv.loc["catherine"]
proj = pd.DataFrame([gender, diminutive], index=["gender", "diminutive"]).transpose()
df_plot = wv.loc[names].dot(proj).sort_values(by="diminutive")
df_plot
# +
some_words = ["good", "bad", "ok", "not", "ugly", "beautiful", "awesome", "!", "?"]
assert(all([word in words for word in some_words]))
awesomeness = wvn.loc["awesome"] - wvn.loc["awful"]
wvn.loc[some_words].dot(awesomeness).sort_values()
# -
# ## Plots
pca = PCA(n_components=2)
X_pca = pca.fit_transform(wv.loc[names])
plt.plot(X_pca[:, 0], X_pca[:, 1], '.')
for i, name in enumerate(names):
plt.annotate(name, X_pca[i])
tsne = TSNE(n_components=2, perplexity=3.)
X_tsne = tsne.fit_transform(wv.loc[names])
plt.plot(X_tsne[:, 0], X_tsne[:, 1], '.')
for i, name in enumerate(names):
plt.annotate(name, X_tsne[i])
# ## Close
# normalize your data
wv.dot(wv.loc["dog"]).sort_values(ascending=False).head(10)
# let's normalize data
lens = (wv**2).sum(axis=1)
wvn = wv.div(np.sqrt(lens), axis='index')
wvn.dot(wvn.loc["dog"]).sort_values(ascending=False).head(20)
wvn.dot(wvn.loc["dog"]).sort_values(ascending=False).tail(20)
wvn.dot(wvn.loc["king"] - wvn.loc["man"] + wvn.loc["woman"]).sort_values(ascending=False).head(20)
wvn.dot(wvn.loc["kissed"] - wvn.loc["kiss"] + wvn.loc["eat"]).sort_values(ascending=False).head(20)
# ## Extremes
temp_diff = wvn.loc["hot"] - wvn.loc["cold"]
temp_avg = (wvn.loc["hot"] + wvn.loc["cold"]) / 2.
proj = pd.DataFrame([temp_diff, temp_avg], index=["temp_diff", "temp_avg"]).transpose()
temp_all = wvn.dot(proj).sort_values(by="temp_avg", ascending=False)
temp_all.head(20)
temp_all.head(200).sort_values(by="temp_diff", ascending=False)
| extra/Word vectors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="4ts0mmL3waKf" colab_type="code" colab={}
# # !pip install --upgrade tables
# # !pip install eli5
# # !pip install xgboost
# + id="YKUG5JDfwwRZ" colab_type="code" colab={}
import pandas as pd
import numpy as np
import eli5
import xgboost as xgb
from eli5.sklearn import PermutationImportance
from sklearn.metrics import mean_absolute_error as mean_absolute_error
from sklearn.model_selection import cross_val_score, KFold
from sklearn.dummy import DummyRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# + id="Z88uoEbQx3eq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="1499c185-561e-4266-9986-ee331c7a72e9" executionInfo={"status": "ok", "timestamp": 1583396421602, "user_tz": -60, "elapsed": 846, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car"
# + id="5OWblI40x7Bn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="452f2029-b827-4d15-c302-53b269910927" executionInfo={"status": "ok", "timestamp": 1583396661749, "user_tz": -60, "elapsed": 2465, "user": {"displayName": "Rafa\u014<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
df = pd.read_hdf('data/car.h5')
df.shape
# + [markdown] id="vSqA4xYry8E2" colab_type="text"
# ## Feature Engineering
# + id="GJXf52QMyCQK" colab_type="code" colab={}
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance( df[feat][0], list): continue
factorized_value = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_value
else:
df[feat + SUFFIX_CAT] = factorized_value
# + id="NcQDAf4S0uIC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="e001606d-32a9-497b-abb4-44a7b3667a0c" executionInfo={"status": "ok", "timestamp": 1583397255857, "user_tz": -60, "elapsed": 649, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
cat_feats = [x for x in df.columns if SUFFIX_CAT in x]
cat_feats = [x for x in cat_feats if 'price' not in x ]
len(cat_feats)
# + [markdown] id="6PAdmHBB2Xv-" colab_type="text"
# ## Function
# + id="BeF3DfTWzv4B" colab_type="code" colab={}
def run_model(model,feats):
X = df[feats].values
y = df['price_value'].values
scores = cross_val_score(model,X,y, cv=3, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + [markdown] id="cC6jpO3M2f-R" colab_type="text"
# ### DecisionTree
# + id="2GZVe-eB0Ywq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="c56eae5e-60f6-4206-e0f5-72be857b0f76" executionInfo={"status": "ok", "timestamp": 1583397654229, "user_tz": -60, "elapsed": 4799, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
run_model(DecisionTreeRegressor(max_depth=5), cat_feats)
# + [markdown] id="YWYzzo3Y2kPl" colab_type="text"
# ### Random Forest
# + id="EMxPLCnB2H25" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="737f2215-dedb-4e6e-e9cd-4eedffcc5b5e" executionInfo={"status": "ok", "timestamp": 1583397841759, "user_tz": -60, "elapsed": 124224, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
model = RandomForestRegressor(max_depth=5, n_estimators=50, random_state=0)
run_model(model, cat_feats)
# + [markdown] id="FpN8em4229_y" colab_type="text"
# ### SQBoost
# + id="94GKhGdy28V4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 92} outputId="1fc3cd29-bc41-4e6b-ccdf-c9794b0e3261" executionInfo={"status": "ok", "timestamp": 1583404986824, "user_tz": -60, "elapsed": 61141, "user": {"displayName": "Rafa\u01<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
xgb_params = {
'max_depth':5,
'n_estimators':50,
'learning_rate':0.1,
'seed':0
}
run_model(xgb.XGBRegressor(**xgb_params), cat_feats)
# + id="3vHdCtNz4Ml-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 418} outputId="c65d228c-6c0a-4d99-f628-c24bd8b027bc" executionInfo={"status": "ok", "timestamp": 1583399181616, "user_tz": -60, "elapsed": 262277, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
m = xgb.XGBRegressor(**xgb_params)
m.fit( X, y )
imp = PermutationImportance(m, random_state=0, ).fit(X, y)
eli5.show_weights(imp, feature_names= cat_feats)
# + id="8VRkiOiH4-BE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 92} outputId="c5c49964-b596-43ad-ca16-4ea8f3e30f28" executionInfo={"status": "ok", "timestamp": 1583405012542, "user_tz": -60, "elapsed": 13547, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
feats = ['param_napęd__cat','param_stan__cat','param_faktura-vat__cat','param_rok-produkcji__cat','param_skrzynia-biegów__cat','param_moc__cat','feature_kamera-cofania__cat','param_typ__cat','seller_name__cat','feature_światła-led__cat','feature_wspomaganie-kierownicy__cat','feature_system-start-stop__cat','param_pojemność-skokowa__cat','feature_regulowane-zawieszenie__cat','feature_łopatki-zmiany-biegów__cat','feature_asystent-pasa-ruchu__cat','feature_hud-(wyświetlacz-przezierny)__cat','feature_czujniki-parkowania-przednie__cat','param_marka-pojazdu__cat','param_kod-silnika__cat' ]
#len(feats)
run_model(xgb.XGBRegressor(**xgb_params), feats)
# + id="cneNwItiA566" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 92} outputId="2bb5cfd4-f413-4c83-8e9e-9f40c84a2bbc" executionInfo={"status": "ok", "timestamp": 1583405038060, "user_tz": -60, "elapsed": 13487, "user": {"displayName": "Rafa\u0142 Ziemianek", "photoUrl": "", "userId": "04790712274279300887"}}
df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) =='None' else int(x) )
feats = ['param_napęd__cat','param_stan__cat','param_faktura-vat__cat','param_rok-produkcji','param_skrzynia-biegów__cat','param_moc__cat','feature_kamera-cofania__cat','param_typ__cat','seller_name__cat','feature_światła-led__cat','feature_wspomaganie-kierownicy__cat','feature_system-start-stop__cat','param_pojemność-skokowa__cat','feature_regulowane-zawieszenie__cat','feature_łopatki-zmiany-biegów__cat','feature_asystent-pasa-ruchu__cat','feature_hud-(wyświetlacz-przezierny)__cat','feature_czujniki-parkowania-przednie__cat','param_marka-pojazdu__cat','param_kod-silnika__cat' ]
run_model(xgb.XGBRegressor(**xgb_params), feats)
# + id="6dAGVmOQBoFk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 92} outputId="33db9fc6-901b-44ed-84de-12f79851d269" executionInfo={"status": "ok", "timestamp": 1583405100243, "user_tz": -60, "elapsed": 13324, "user": {"displayName": "Rafa\u0142 Ziemianek", "photoUrl": "", "userId": "04790712274279300887"}}
#df['param_rok-produkcji'] = df['param_rok-produkcji'].map(lambda x: -1 if str(x) =='None' else int(x) )
df['param_moc'] = df['param_moc'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split(' ')[0]) )
feats = ['param_napęd__cat','param_stan__cat','param_faktura-vat__cat','param_rok-produkcji','param_skrzynia-biegów__cat','param_moc','feature_kamera-cofania__cat','param_typ__cat','seller_name__cat','feature_światła-led__cat','feature_wspomaganie-kierownicy__cat','feature_system-start-stop__cat','param_pojemność-skokowa__cat','feature_regulowane-zawieszenie__cat','feature_łopatki-zmiany-biegów__cat','feature_asystent-pasa-ruchu__cat','feature_hud-(wyświetlacz-przezierny)__cat','feature_czujniki-parkowania-przednie__cat','param_marka-pojazdu__cat','param_kod-silnika__cat' ]
run_model(xgb.XGBRegressor(**xgb_params), feats)
# + id="sEyJwE0_Cum5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="0fe99351-ea12-4996-ee03-ddc48a880836" executionInfo={"status": "ok", "timestamp": 1583404005110, "user_tz": -60, "elapsed": 525, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
df['param_pojemność-skokowa'].unique()
# + id="oqMpIpYfO7QD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 92} outputId="5c4b6f91-e449-4e03-dc61-ab6c73c5d251" executionInfo={"status": "ok", "timestamp": 1583405782695, "user_tz": -60, "elapsed": 13515, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
df['param_pojemność-skokowa'] = df['param_pojemność-skokowa'].map(lambda x: -1 if str(x) == 'None' else int(str(x).split('cm')[0].replace(' ','')) )
feats = ['param_napęd__cat','param_stan__cat','param_faktura-vat__cat','param_rok-produkcji','param_skrzynia-biegów__cat','param_moc','feature_kamera-cofania__cat','param_typ__cat','seller_name__cat','feature_światła-led__cat','feature_wspomaganie-kierownicy__cat','feature_system-start-stop__cat','param_pojemność-skokowa','feature_regulowane-zawieszenie__cat','feature_łopatki-zmiany-biegów__cat','feature_asystent-pasa-ruchu__cat','feature_hud-(wyświetlacz-przezierny)__cat','feature_czujniki-parkowania-przednie__cat','param_marka-pojazdu__cat','param_kod-silnika__cat' ]
run_model(xgb.XGBRegressor(**xgb_params), feats)
# + id="PdLb6iFsPepI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="d50046f8-6ba2-4ffe-8243-4523072595b1" executionInfo={"status": "ok", "timestamp": 1583406240991, "user_tz": -60, "elapsed": 1142, "user": {"displayName": "Rafa\u014<NAME>", "photoUrl": "", "userId": "04790712274279300887"}}
df['param_pojemność-skokowa'].unique()
# + id="c_25QwGIUE1h" colab_type="code" colab={}
# + id="1MiclHySYDe0" colab_type="code" colab={}
| day4_car_modelXGBoost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1. Indução e Projeto de Algoritmos
#
# ## 1.1. Prova por Indução
#
# Técnicas de demonstração por indução possuem grande importância na área de Ciência da Computação, pois permitem o desenvolvimento de um raciocínio construtivo que evidencia os passos necessários para a construção da solução de determinados problemas. Desta forma, além de serem úteis para provarmos teoremas, podem ser utilizadas para formularmos procedimentos **recursivos** e **indutivos**.
#
# Existem, basicamente, dois tipos de indução: **fraca** e **forte**. Ambos tipos seguem os seguintes passos durante o desenvolvimento de uma prova:
# 1. Base
# 2. Hipótese de indução
# 3. Passo de indução
#
#
# - <u>Príncipio da indução fraca</u>: sejam $P_n$ afirmações que podem ser verdadeiras ou falsas associadas à cada inteiro positivo $n\geq k$. Se $P_k$ é verdadeira e, para cada inteiro positivo $j\geq k$, se $P_j$ é verdadeira, então $P_{j+1}$ também o é. Assim sendo, $P_n$ é verdadeira para todo inteiro positivo $n\geq k$.
# - <u>Princípio da indução forte</u>: para alguns problemas, para provarmos que $P_{j+1}$ é verdadeira, temos que assumir a veracidade de toda $P_i$, $k\leq i\leq j$. Assim, novamente, vamos assumir que tenhamos $P_n$ afirmações que podem ser verdadeiras ou falsas associadas a cada inteiro positivo $n\geq k$. Se $P_k$ é verdadeira e, para cada inteiro positivo $j\geq k$, se $P_k,P_{k+1}\ldots,P_j$ são verdadeiras, então $P_{j+1}$ também o é. Assim sendo, $P_n$ é verdadeira para todo inteiro positivo $n\geq k$.
#
# Vejamos, agora, um exemplo de prova por **indução fraca**. Prove que a seguinte inequalidade é verdadeira:
#
# \begin{equation}
# \tag{1}
# \frac{1}{2}+\frac{1}{4}+\frac{1}{8}+\ldots+\frac{1}{2^n}<1\ \ \ \ \ \text{ para }n\geq1.
# \end{equation}
#
# 1. Primeiro, vamos provar a **base**, ou seja, vamos provar que a inequalidade é verdadeira para $n=1$. Neste caso, temos que $\frac{1}{2}<1$, ou seja, a afirmação é verdadeira.
# 2. O segundo passo consiste em assumir que a **hipótese** é verdadeira, ou seja, a inequalidade dada pela Equação 1 é verdadeira.
# 3. Em seguida, utilizando a hipótese de indução, provamos que a inequalidade acima é valida para $n+1$, ou seja, temos que provar que $\frac{1}{2}+\frac{1}{4}+\frac{1}{8}+\ldots+\frac{1}{2^{n+1}}<1$. Temos, então, que:
#
# \begin{equation}
# \tag{2}
# \frac{1}{2}+\frac{1}{4}+\frac{1}{8}+\ldots+\frac{1}{2^{n+1}} < 1,
# \end{equation}
# que pode ser também escrita da seguinte forma:
#
# \begin{equation}
# \tag{3}
# \frac{1}{2}+\frac{1}{4}+\frac{1}{8}+\ldots+\frac{1}{2^n}+\frac{1}{2^{n+1}} < 1.
# \end{equation}
#
# Reescrevendo a Equação 3, temos que:
# \begin{align}
# \tag{4}
# \frac{1}{2}+\frac{1}{2}\left(\frac{1}{2}+\frac{1}{4}+\ldots+\frac{1}{2^n}\right) &< 1 \\
# \frac{1}{2}\left(\frac{1}{2}+\frac{1}{4}+\ldots+\frac{1}{2^n}\right) &< 1-\frac{1}{2} \\
# \frac{1}{2}\left(\frac{1}{2}+\frac{1}{4}+\ldots+\frac{1}{2^n}\right) &< \frac{1}{2} \\
# \end{align}
#
# Agora, suponha $x = \frac{1}{2}+\frac{1}{4}+\ldots+\frac{1}{2^n}$ de tal forma que a Equação 4 possa ser escrita da seguinte forma:
#
# \begin{equation}
# \tag{5}
# \frac{1}{2}x < \frac{1}{2}.
# \end{equation}
# Por hipótese de indução, temos que $x<1$. Desta forma, a Equação 5 é verdadeira, como queríamos demonstrar.
#
# Vejamos um outro exemplo de prova por **indução fraca**. Vamos provar que a seguinte equação é verdadeira:
#
# \begin{equation}
# \tag{6}
# 1+2+3+\ldots+n = \frac{n(n+1)}{2}\ \ \ \ \ \text{ para }n\geq1.
# \end{equation}
#
# 1. Base: para $n=1$, temos que $1 = \frac{1+1}{2} = \frac{2}{2}$. Portanto, a base é verdadeira.
# 2. Hipótese de indução: vamos assumir que a Equação 6 é verdadeira para $n\geq 1$.
# 3. Passo de indução: vamos provar que a Equação 6 é válida para $n+1$. Assim sendo, temos que:
#
# \begin{equation}
# \tag{7}
# 1+2+3+\ldots+(n+1) = \frac{(n+1)((n+1)+1)}{2}\ \ \ \ \ \text{ para }n\geq1.
# \end{equation}
#
# A Equação 7 pode ser reescrita da seguinte maneira:
#
# \begin{align}
# \tag{8}
# 1+2+3+\ldots+n+(n+1) &= \frac{n(n+1)}{2}+(n+1)\\
# &=\frac{n(n+1)+2(n+1)}{2}\\
# &=\frac{(n+1)(n+2)}{2}\\
# &=\frac{(n+1)((n+1)+1)}{2}.
# \end{align}
# Assim sendo, provamos o passo de indução.
#
# Vejamos, agora, um exemplo de prova por **indução forte**. Seja a sequência $a_1,a_2,a_3,\ldots,a_n$ tal que $a_1 = 0$, $a_2 = 0$ e $a_k = 3a_{\lfloor k/2\rfloor}+2$ para $k\geq 3$. Prove que $a_n$ é par para $n \geq 1$.
#
# 1. Base: para $n=1$ e $n=2$ a base é verdadeira já que $a_1 = 0$ e $a_2 = 0$.
# 2. Hipótese de indução: vamos supor que $a_k$ é par para $1\leq k<n$.
# 3. Vamos provar que $a_n$ é par. Pela definição acima, temos que $a_n=3a_{\lfloor n/2\rfloor}+2$. Pela hipótese de indução, temos que o temos $a_{\lfloor n/2\rfloor}$ é par. Assim, o termo $3a_{\lfloor n/2\rfloor}$ também é par, ou seja, a multiplicação de um número ímpar por um número par resulta em um outro número par. Dado que o termo $3a_{\lfloor n/2\rfloor}$ é par, temos que $3a_{\lfloor n/2\rfloor}+2$ também é par. Assim sendo, provamos que $a_n$ é par para $n\geq 3$.
#
# ## 1.2. Projeto de Algoritmos por Indução
#
# Podemos fazer uso das técnicas de prova por indução para projetar algoritmos visando a resolução de diversos problemas, isto é, o desenvolvimento do algoritmo será dado de maneira análoga ao desenvolvimento de uma demonstração por indução. A complexidade final é dada por meio de uma análise de recorrência.
#
# O projeto de algoritmos por indução resulta em soluções recursivas, em que:
#
# - A base de indução corresponde ao critério de parada do algoritmo.
# - A aplicação da hipótese de indução corresponde às chamadas recursivas.
# - O passo de indução corresponde ao processo de obtenção da resposta final, ou seja, solução do algoritmo.
# Como benefício imediato, temos que o uso correto da técnica nos dá uma prova de corretude do algoritmo.
#
# Vejamos um exemplo de projeto de algoritmos por indução. Suponha o seguinte problema: dada uma sequência de números reais $a_n,a_{n-1},a_{n-2},\ldots,a_1,a_0$ e um número real $x$, calcular o valor do seguinte polinômio:
#
# \begin{equation}
# \tag{9}
# P_n(x) = a_nx^n+a_{n-1}x^{n-1}+\ldots+a_1x+a_0.
# \end{equation}
#
# Como vimos anteriormente, podemos demonstrar que conseguimos resolver o problema acima da seguinte maneira:
#
# 1. Base: Neste caso, temos que $n=0$ e a solução do problema seria $a_0$.
# 2. Hipótese de indução: suponha que consigamos calcular $P_{n-1}(x)$.
# 3. Passo de indução: para calcular $P_n(x)$ basta realizarmos a seguinte operação: $P_n(x) = P_{n-1}(x)+a_nx^n$.
# Pronto, o algoritmo está projetado conforme os passos que aprendemos em uma demonstração por indução. Basta, agora, implementarmos a solução acima conforme o seguinte algoritmo:
import numpy
import math
def P_n(A, n, x):
if n == 0: # Caso base
return A[0]
else:
tmp = A[n]*math.pow(x, n)
return tmp + P_n(A, n-1, x) # Hipótese de indução
# Exemplo de funcionamento:
n = 4
x = 2
A = numpy.random.randint(0, 10, n+1)
print('Vetor de entrada: '+ str(A))
out = P_n(A, n, x)
print('Resultado: '+ str(out))
# Vamos agora, calcular a complexidade de nossa solução acima. Como temos um algoritmos recursivo, podemos fazer uso da análise de recorrência, como apresentado abaixo:
#
# \begin{equation}
# \tag{10}
# T(n) =
# \begin{cases}
# 1 & \text{se $n=0$}\\
# T(n-1) + f(n)& \text{caso contrário,}
# \end{cases}
# \end{equation}
# em que $f(n)$ corresponde ao número de multiplicações e adições. Em nosso caso, o residual $f(n)$ corresponde à linha do algoritmo acima ``tmp = A[n]*math.pow(x, n)``, em que temos $n+1$ multiplicações. Ademais, devemos considerar a adição realizada na hipótese de indução na linha ``return tmp + P_n(A, n-1, x)``. Em suma, temos $n+1$ multiplicações e uma operação de adição.
#
# Podemos, então, reescrever a Equação 10 da seguinte forma:
#
# \begin{equation}
# \tag{11}
# T(n) =
# \begin{cases}
# 1 & \text{se $n=0$}\\
# T(n-1) + [(n+1) \text{ multiplicações} + 1 \text{ adição}]& \text{caso contrário.}
# \end{cases}
# \end{equation}
#
# Mais especificamente, temos que:
#
# \begin{align}
# \tag{12}
# T(n) & = \sum_{i=1}^n [(n+1) \text{ multiplicações} + 1 \text{ adição}]\\
# &= \sum_{i=1}^n [(n+1) \text{ multiplicações}] + \sum_{i=1}^n 1 \text{ adição}\\
# &= \sum_{i=1}^n [(n+1) \text{ multiplicações}] + n\text{ adições}\\
# &= \sum_{i=1}^n [n \text{ multiplicações}] + \sum_{i=1}^n [1 \text{ multiplicação}] + n\text{ adições}\\
# &= \sum_{i=1}^n [n \text{ multiplicações}] + n \text{ multiplicações} + n\text{ adições}\\
# &=\frac{n(n+1)}{2}+n \text{ multiplicações} + n\text{ adições}.
# \end{align}
#
# No entanto, conseguimos melhorar essa complexidade assumindo uma hipótese de indução "mais forte". Suponha, agora, que saibamos calcular $P_{n-1}(x)$ e também o valor de $x^{n-1}$. Neste caso, o caso base $n=0$ possui como solução o par $(a_0,1)$. Desta forma, temos uma segunda versão de nosso algoritmo:
def P_n2(A, n, x):
if n == 0: # Caso base
return (A[0], 1)
else:
(tmp_P, tmp_x) = P_n2(A, n-1, x)
x_n = x*tmp_x
return (tmp_P + A[n]*x_n, x_n) # Hipótese de indução
# Exemplo de funcionamento:
print('Vetor de entrada: '+ str(A))
(out, tmp) = P_n2(A, n, x)
print('Resultado: '+ str(out))
# O próximo passo consiste na análise da complexidade de nossa segunda solução. Temos, agora, a seguinte relação de recorrência:
#
# \begin{equation}
# \tag{13}
# T(n) =
# \begin{cases}
# 1 & \text{se $n=0$}\\
# T(n-1) + [2\text{ multiplicações} + 1\text{ adição}]& \text{caso contrário.}
# \end{cases}
# \end{equation}
#
# A solução da recorrência é dada por:
#
# \begin{align}
# \tag{14}
# T(n) &= \sum_{i=1}^n [2\text{ multiplicações} + 1\text{ adição}]\\
# &= 2\sum_{i=1}^n [1\text{ multiplicação}] + \sum_{i=1}^n [1\text{ adição}]\\
# &= 2n \text{ multiplicações} + n \text{ adições}.
# \end{align}
#
# Temos, ainda, uma terceira solução, que envolve uma hipótese de indução **ainda mais forte**. Suponha, agora, que consigamos calcular $P^\prime_{n-1}(x)=a_nx^{n-1}+a_{n-1}x^{n-2}+\ldots+a_1$. Note que $P_n(x) = xP^\prime_{n-1}(x)+a_0$.
#
# Temos que o caso base $n=0$ para a abordagem acima é trivial, ou seja, $a_0 = 0$. O algoritmo para essa terceira versão é dado como segue:
def P_n3(A, n, x):
if n == 0: # Caso base
return A[0]
else:
tmp_P = P_n3(A, n-1, x)
return x*tmp_P + A[0] # Hipótese de indução
# Exemplo de funcionamento:
print('Vetor de entrada: '+ str(A))
out = P_n3(A, n, x)
print('Resultado: '+ str(out))
# Neste caso, temos a seguinte relação de recorrência:
#
# \begin{equation}
# \tag{15}
# T(n) =
# \begin{cases}
# 1 & \text{se $n=0$}\\
# T(n-1) + [1\text{ multiplicação} + 1\text{ adição}]& \text{caso contrário.}
# \end{cases}
# \end{equation}
#
# A solução da recorrência é dada por:
#
# \begin{align}
# \tag{16}
# T(n) &= \sum_{i=1}^n [1\text{ multiplicação} + 1\text{ adição}]\\
# &= \sum_{i=1}^n [1\text{ multiplicação}] + \sum_{i=1}^n [1\text{ adição}]\\
# &= n \text{ multiplicações} + n \text{ adições}.
# \end{align}
#
# Assim sendo, podemos perceber que, dependendo da "força" da hipótese de indução, conseguimos implementar algoritmos mais eficientes.
#
# <font size="1">Este conteúdo foi elaborado com base em notas de aulas dos Profs. <NAME> e <NAME> (Instiuto de Computação, Unicamp).</font>
| complexidade_algoritmos/aula_inducao_projeto_algoritmos/induction_design_algorithm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# HIDDEN
from datascience import *
from prob140 import *
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
# %matplotlib inline
from scipy import stats
# ## Expectation ##
# Let $X$ have density $f$. Let $g$ be a real valued function on the real line, and suppose you want to find $E(g(X))$. Then you can follow a procedure analogous to the non-linear function rule we developed for finding expectations of functions of discrete random variables.
#
# - Write a generic value of $X$: that's $x$.
# - Apply the function $g$ to get $g(x)$.
# - Weight $g(x)$ by the chance that $X$ is *just around $x$*, resulting in the product $g(x) \cdot f(x)dx$.
# - "Sum" over all $x$, that is, integrate.
#
# The expectation is
# $$
# E(g(X)) ~ = ~ \int_{-\infty}^{\infty} g(x)\cdot f(x)dx
# $$
#
# **Technical Note:** We must be careful here as $g$ is an arbitrary function and the integral above need not exist. If $g$ is non-negative, then the integral is either finite or diverges to $+\infty$, but it doesn't oscillate. So if $g$ is non-negative, define
#
# $$
# E(g(X)) ~ = ~ \int_{-\infty}^{\infty} g(x)\cdot f(x)dx ~~~
# \text{provided the integral is finite.}
# $$
#
# For a general $g$, first check whether $E(\lvert g(X) \rvert )$ is finite, that is, whether
#
# $$
# \int_{-\infty}^{\infty} \lvert g(x) \rvert \cdot f(x)dx ~ < ~ \infty
# $$
#
# If it is finite then there is a theorem that says $\int_{-\infty}^{\infty} g(x)\cdot f(x)dx $ exists, so it makes sense to define
#
# $$
# E(g(X)) ~ = ~ \int_{-\infty}^{\infty} g(x)\cdot f(x)dx
# $$
#
# **Non-technical Note:** In almost all of our examples, we will not be faced with questions about the existence of integrals. For example, if the set of possible values of $g(X)$ is bounded, then its expectation exists. But we will see a few examples of random variables that don't have expectations. Such random variables have "heavy tails" and are important in many applications.
#
# All the properties of means, variances, and covariances that we proved for discrete variables are still true. The proofs need to be rewritten for random variables with densities, but we won't take the time to do that. Just use the properties as you did before. The Central Limit Theorem holds as well.
# ### Uniform $(0, 1)$ ###
# The random variable $U$ is *uniform on the unit interval* if its density is flat over that interval and zero everywhere else:
#
# $$
# f_U(u) =
# \begin{cases}
# 1 ~~~~~~ \text{if } 0 < u < 1 \\
# 0 ~~~~~~ \text{otherwise}
# \end{cases}
# $$
# NO CODE
plt.axes().set_aspect('equal','datalim')
plt.plot([0, 1], [1, 1], color='darkblue', lw=2)
plt.plot([-0.5, 0], [0, 0], color='darkblue', lw=2)
plt.plot([1, 1.5], [0, 0], color='darkblue', lw=2)
plt.xlabel('$u$')
plt.ylabel('$f_U(u)$', rotation=0)
plt.title('Density');
# The area under $f_U$ over an interval is a rectangle. So it follows easily that the probability of an interval is its length relative to the total length of the unit interval, which is 1. For example, for every pair $u_1$ and $u_2$ with $u_1 < u_2$,
#
# $$
# P(u_1 < U < u_2) ~ = ~ u_2 - u_1
# $$
#
# Equivalently, the cdf of $U$ is
#
# $$
# F_U(u) =
# \begin{cases}
# 0 ~~~ \text{if } u \le 0 \\
# u ~~~ \text{if } 0 < u < 1 \\
# 1 ~~~ \text{if } u \ge 1
# \end{cases}
# $$
# NO CODE
plt.axes().set_aspect('equal','datalim')
plt.plot([0, 1], [0, 1], color='darkblue', lw=2)
plt.plot([-0.5, 0], [0, 0], color='darkblue', lw=2)
plt.plot([1, 1.5], [1, 1], color='darkblue', lw=2)
plt.xlabel('$u$')
plt.ylabel('$F_U(u)$', rotation=0)
plt.title('CDF of $U$');
# The expectation $E(U)$ doesn't require an integral either. It's the balance point of the density "curve", which is 1/2. But if you insist, you can integrate:
#
# $$
# E(U) ~ = ~ \int_0^1 u\cdot 1du ~ = ~ \frac{1}{2}
# $$
#
# For the variance, you do have to integrate. By the formula for expectation given at the start of this section,
#
# $$
# E(U^2) ~ = ~ \int_0^1 u^2\cdot 1du ~ = ~ \frac{1}{3}
# ~~~~~~~~~~~~~~~
# Var(U) ~ = ~ \frac{1}{3} - \big{(}\frac{1}{2}\big{)}^2 ~ = ~ \frac{1}{12}
# $$
# ### Uniform $(a, b)$ ###
# Fix $a < b$. The uniform distribution on $(a, b)$ is flat over the interval $(a, b)$ and 0 elsewhere. Since its graph is a rectangle and the total area must be 1, the height of the rectangle is $\frac{1}{b-a}$.
#
# So if $X$ has the uniform $(a, b)$ distribution, then the density of $X$ is
#
# $$
# f_X(x) ~ = ~ \frac{1}{b-a}, ~~~~ a < x < b
# $$
#
# and 0 elsewhere. Probabilities are still relative lengths, so the cdf of $X$ is
#
# $$
# F_X(x) ~ = ~ \frac{x - a}{b - a}, ~~~~ a < x < b
# $$
#
# The expectation and variance of $X$ can be derived with little calculation once you notice that $X$ can be created by starting with a uniform $(0, 1)$ random variabe $U$.
#
# - **Step 1:** $U$ is uniform on $(0, 1)$
# - **Step 2:** $(b-a)U$ is uniform on $(0, (b-a))$
# - **Step 3:** $X = a + (b-a)U$ is uniform on $(a, b)$.
#
# Now $X$ is a linear transformation of $U$, so
#
# $$
# E(X) ~ = ~ a + (b-a)E(U) ~ = ~ a + \frac{b-a}{2} ~ = ~ \frac{a+b}{2}
# $$
#
# which is the midpoint of $(a, b)$. Also,
#
# $$
# Var(X) ~ = ~ \frac{(b-a)^2}{12}
# $$
# ### Example: Random Discs ###
# A screen saver chooses a random radius uniformly in the interval $(0, 2)$ centimeters and draws a disc with that radius. Then it chooses another radius in the same way, independently of the first, and draws another disc. And so on.
#
# **Question 1.** Let $S$ be the area of the first disc. Find $E(S)$.
#
# **Answer.** Let $R$ be the radius of the first disc. Then $S = \pi R^2$. So
#
# $$
# E(S) ~ = ~ \pi E(R^2) ~ = ~ \pi\big{(}Var(R) + (E(R))^2\big{)} ~ = ~
# \pi\big{(} \frac{4}{12} + 1^2\big{)} ~ = ~ 4.19 ~ cm^2
# $$
#
#
np.pi * (4/12 + 1)
# **Question 2.** Let $\bar{R}$ be the average radius of the first 100 discs. Find a number $c$ so that $P(\lvert \bar{R} - 1 \rvert < c) \approx 99\%$.
#
# **Answer.** Let $R_1, R_2, \ldots , R_{100}$ be the first 100 radii. These are i.i.d. random variables, each with mean 1 and variance $4/12$. So $E(\bar{R}) = 1$ and
#
# $$
# SD(\bar{R}) = \frac{\sqrt{4/12}}{\sqrt{100}} ~ = ~ 0.0577 ~ \mbox{cm}
# $$
sd_rbar = ((4/12)**0.5)/(100**0.5)
sd_rbar
# By the Central Limit Theorem, the distribution of $\bar{R}$ is approximately normal. Let's draw it using `Plot_norm`.
Plot_norm((0.8, 1.2), 1, sd_rbar)
plt.xlabel('Radius in Centimeters')
plt.title('Approximate Distribution of Sample Mean Radius');
# We are looking for $c$ such that there is about 99% chance that $\bar{R}$ is in the interval $(1-c, 1+c)$. Therefore $1 + c$ is the 99.5th (not 99th) percent point of the curve above, from which you can find $c$.
c = stats.norm.ppf(0.995, 1, sd_rbar) - 1
c
# There is another way to find $c$. Since $c$ is a distance from the mean, $c = zSD(\bar{R})$ where $z$ is such that the area between $-z$ and $z$ under the standard normal curve is about 99%. This $z$ is the 99.5th percent point of the standard normal curve.
z = stats.norm.ppf(0.995)
z
c = z*sd_rbar
c
# That's the same value of $c$ that we got by the previous method. The graph below shows the corresponding area of 99%.
Plot_norm((0.8, 1.2), 1, sd_rbar, left_end = 1-c, right_end = 1+c)
plt.xticks([1-c, 1, 1+c])
plt.xlabel('Radius in Centimeters')
plt.title('Gold Area is Approximately 99%');
| content/Chapter_15/03_Expectation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="UZcM9OUWn9UL"
# # RadiusNeighborsClassifier with MinMaxScaler & Polynomial Features
# + [markdown] id="kkBBu5jOn9UN"
# This Code template is for the Classification task using a simple Radius Neighbor Classifier using MinMax Scaler with pipeline and Polynomial Feature Transformation. It implements learning based on the number of neighbors within a fixed radius r of each training point, where r is a floating-point value specified by the user.
# + [markdown] id="sV-wW21on9UO"
# ## Required Packages
# + id="ub8SkpTMn9UO"
# !pip install imblearn
# + id="P9p65l2on9UP"
import numpy as np
import pandas as pd
import seaborn as se
import warnings
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.neighbors import RadiusNeighborsClassifier
from imblearn.over_sampling import RandomOverSampler
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder, MinMaxScaler, PolynomialFeatures
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
# + [markdown] id="gZ4zLaQwn9UP"
# ## Initialization
#
# Filepath of CSV file
# + id="UgKSOoBin9UQ"
#filepath
file_path= ""
# + [markdown] id="OnrRQqGmn9UR"
# List of features which are required for model training
# + id="1vX2W3Bbn9UR"
#x_values
features=[]
# + [markdown] id="ypjIdVmWn9US"
# Target feature for prediction
# + id="IBQ_0BcMn9US"
#y_value
target=''
# + [markdown] id="DEi4M31fn9UT"
# ## Data Fetching
#
# Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
#
# We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
# + colab={"base_uri": "https://localhost:8080/", "height": 203} id="etwU7FtSn9UV" outputId="d4801563-ac8c-4e1a-f6d9-a2f6d9a0b88a"
df=pd.read_csv(file_path)
df.head()
# + [markdown] id="zD1h5Fa5n9UV"
# ## Feature Selections
#
# It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
#
# We will assign all the required input features to X and target/outcome to Y.
# + id="oG_XMBiRn9UW"
X=df[features]
Y=df[target]
# + [markdown] id="ZiiEIb_4n9UW"
# ## Data Preprocessing
#
# Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes
# + id="zLKbvBqjn9UW"
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
# + [markdown] id="fyDXl00Xn9UW"
# Calling preprocessing functions on the feature and target set.
# + colab={"base_uri": "https://localhost:8080/", "height": 220} id="1kofl-esn9UW" outputId="5c788db5-b529-4bba-941c-e5513538a5d1"
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
# + [markdown] id="vpkV_q0Bn9UX"
# ## Correlation Map
#
# In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="zydLJ1gpn9UX" outputId="8c4aea50-3195-4c43-ba1d-07e87f189bc8"
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
# + [markdown] id="vrPwBoz-n9UX"
# ## Distribution Of Target Variable
# + colab={"base_uri": "https://localhost:8080/", "height": 391} id="edP-aLegn9UX" outputId="efc6c164-8a4a-42b0-8a9a-3a53d6ad9ff3"
plt.figure(figsize = (10,6))
se.countplot(Y)
# + [markdown] id="LWW-qhnXn9UY"
# ## Data Splitting
#
# The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
# + id="LqdMb7Wln9UY"
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
# + [markdown] id="wOYj_nNin9UY"
# ## Handling Target Imbalance
#
# The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.
#
# One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.
# + id="XwHzWWTTn9UY"
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
# + [markdown] id="etd4iD3in9UY"
# ## Data Rescaling
# **MinMaxScaler**
#
# Transform features by scaling each feature to a given range.
#
# This estimator scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one.
#
# The transformation is given by:
#
# X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0)) <br>
# X_scaled = X_std * (max - min) + min
#
# <a href="https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html">More about MinMaxScaler</a>
# + [markdown] id="k9rf5B0un9UZ"
# ## Feature Transformation
#
# **Polynomial Features**
#
# Generate polynomial and interaction features.
#
# Generate a new feature matrix consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
#
# [More on Polynomial Features](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PolynomialFeatures.html)
# + [markdown] id="tWeX6bMOn9UZ"
# ## Model
# RadiusNeighborsClassifier implements learning based on the number of neighbors within a fixed radius of each training point, where is a floating-point value specified by the user. In cases where the data is not uniformly sampled, radius-based neighbors classification can be a better choice.
#
# ### Tuning parameters
# **radius:** Range of parameter space to use by default for radius_neighbors queries.
#
# **algorithm:** Algorithm used to compute the nearest neighbors:
#
# **leaf_size:** Leaf size passed to BallTree or KDTree.
#
# **p:** Power parameter for the Minkowski metric.
#
# **metric:** the distance metric to use for the tree.
#
# **outlier_label:** label for outlier samples
#
# **weights:** weight function used in prediction.
# <br><br>FOR MORE INFO : <a href="https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.RadiusNeighborsClassifier.html">API</a>
# + colab={"base_uri": "https://localhost:8080/"} id="CQpc6yiwn9UZ" outputId="7fb0c35b-06d9-464d-c7e1-fca164e9f1bb"
model = make_pipeline(MinMaxScaler(),PolynomialFeatures(),RadiusNeighborsClassifier())
model.fit(x_train, y_train)
# + [markdown] id="Ieazeldon9UZ"
# ## Model Accuracy
#
# score() method return the mean accuracy on the given test data and labels.
#
# In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
# + colab={"base_uri": "https://localhost:8080/"} id="GBkGVi2Bn9Ua" outputId="9f19e112-2df4-4f56-feae-1f4f873a9403"
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
# + [markdown] id="OvWnRMcCn9Ua"
# ## Confusion Matrix
#
# A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="3iqTq8rCn9Ua" outputId="05acd490-3cbe-4752-e84c-d397a4d69554"
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
# + [markdown] id="s8MeEA5un9Ua"
# ## Classification Report
# A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.
#
# where:
# - Precision:- Accuracy of positive predictions.
# - Recall:- Fraction of positives that were correctly identified.
# - f1-score:- percent of positive predictions were correct
# - support:- Support is the number of actual occurrences of the class in the specified dataset.
# + colab={"base_uri": "https://localhost:8080/"} id="WKJd2p2zn9Ua" outputId="f37f1768-6bb4-4d4d-85aa-4219a1290384"
print(classification_report(y_test,model.predict(x_test)))
# + [markdown] id="DFcUtcFxZiSM"
# #### Creator: <NAME> , Github: [Profile - Iamgrootsh7](https://github.com/iamgrootsh7)
| Classification/Radius Neighbors/RadiusNeighborsClassifier_MinMaxScaler_PolynomialFeatures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### Importing required packages into python:
# +
# Required dependencies
# 1. NLTK
# 2. Gensim for word2vec
# 3. Keras with tensorflow/theano backend
import numpy as np
np.random.seed(1337)
import json, re, nltk, string, csv, sys, codecs
from nltk.corpus import wordnet
from gensim.models import Word2Vec
from keras.preprocessing import sequence
from keras.models import Model
from keras.layers import Dense, Dropout, Embedding, LSTM, Input, merge
from keras import layers
from keras.optimizers import RMSprop
from keras.utils import np_utils
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics.pairwise import cosine_similarity
# Hack to increase size due to Error: field larger than field limit (131072)
maxInt = sys.maxsize
decrement = True
while decrement:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
decrement = False
try:
csv.field_size_limit(maxInt)
except OverflowError:
maxInt = int(maxInt / 10)
decrement = True
open_bugs_csv = 'e1_open.csv'
closed_bugs_csv = 'm15_closed.csv'
#========================================================================================
# Initializing Hyper parameter
#========================================================================================
#1. Word2vec parameters
min_word_frequency_word2vec = 5
embed_size_word2vec = 200
context_window_word2vec = 5
#2. Classifier hyperparameters
numCV = 10
max_sentence_len = 50
min_sentence_length = 15
rankK = 10
batch_size = 32
#========================================================================================
# Preprocess the open bugs, extract the vocabulary and learn the word2vec representation
#========================================================================================
with open(open_bugs_csv) as data_file:
data = csv.reader(data_file, delimiter=';')
all_data = []
for item in data:
#1. Remove \r
current_title = unicode(item[1], errors='ignore').replace('\r', ' ')
#print current_title
current_desc = unicode(item[3], errors='ignore').replace('\r', ' ')
#print current_desc
#2. Remove URLs
current_desc = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', current_desc)
#3. Remove Stack Trace
start_loc = current_desc.find("Stack trace:")
current_desc = current_desc[:start_loc]
#4. Remove hex code
current_desc = re.sub(r'(\w+)0x\w+', '', current_desc)
current_title= re.sub(r'(\w+)0x\w+', '', current_title)
#5. Change to lower case
current_desc = current_desc.lower()
current_title = current_title.lower()
#6. Tokenize
current_desc_tokens = nltk.word_tokenize(current_desc)
current_title_tokens = nltk.word_tokenize(current_title)
#7. Strip trailing punctuation marks
current_desc_filter = [word.strip(string.punctuation) for word in current_desc_tokens]
current_title_filter = [word.strip(string.punctuation) for word in current_title_tokens]
#8. Join the lists
current_data = current_title_filter + current_desc_filter
current_data = filter(None, current_data)
all_data.append(current_data)
#print(len(all_data))
# Learn the word2vec model and extract vocabulary
wordvec_model = Word2Vec(all_data, min_count=min_word_frequency_word2vec, size=embed_size_word2vec, window=context_window_word2vec)
vocabulary = wordvec_model.wv.vocab
#print vocabulary
vocab_size = len(vocabulary)
#========================================================================================
# Preprocess the closed bugs, using the extracted the vocabulary
#========================================================================================
with open(closed_bugs_csv) as data_file:
data = csv.reader(data_file, delimiter=';')
all_data = []
all_owner = []
for item in data:
#1. Remove \r
current_title = unicode(item[1], errors='ignore').replace('\r', ' ')
current_desc = unicode(item[3], errors='ignore').replace('\r', ' ')
#2. Remove URLs
current_desc = re.sub(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', '', current_desc)
#3. Remove Stack Trace
start_loc = current_desc.find("Stack trace:")
current_desc = current_desc[:start_loc]
#4. Remove hex code
current_desc = re.sub(r'(\w+)0x\w+', '', current_desc)
current_title= re.sub(r'(\w+)0x\w+', '', current_title)
#5. Change to lower case
current_desc = current_desc.lower()
current_title = current_title.lower()
#6. Tokenize
current_desc_tokens = nltk.word_tokenize(current_desc)
current_title_tokens = nltk.word_tokenize(current_title)
#7. Strip punctuation marks
current_desc_filter = [word.strip(string.punctuation) for word in current_desc_tokens]
current_title_filter = [word.strip(string.punctuation) for word in current_title_tokens]
#8. Join the lists
current_data = current_title_filter + current_desc_filter
current_data = filter(None, current_data)
all_data.append(current_data)
all_owner.append(item[4])
#========================================================================================
# Split cross validation sets and perform deep learning + softamx based classification
#========================================================================================
totalLength = len(all_data)
splitLength = int(totalLength / (numCV + 1))
for i in range(1, numCV + 1):
# Split cross validation set
print("Starting work on cross validation set {0}".format(i))
train_data = all_data[:i*splitLength-1]
test_data = all_data[i*splitLength:(i+1)*splitLength-1]
train_owner = all_owner[:i*splitLength-1]
test_owner = all_owner[i*splitLength:(i+1)*splitLength-1]
# Remove words outside the vocabulary
updated_train_data = []
updated_train_data_length = []
updated_train_owner = []
final_test_data = []
final_test_owner = []
for j, item in enumerate(train_data):
current_train_filter = [word for word in item if word in vocabulary]
if len(current_train_filter) >= min_sentence_length:
updated_train_data.append(current_train_filter)
updated_train_owner.append(train_owner[j])
for j, item in enumerate(test_data):
current_test_filter = [word for word in item if word in vocabulary]
if len(current_test_filter) >= min_sentence_length:
final_test_data.append(current_test_filter)
final_test_owner.append(test_owner[j])
# Remove data from test set that is not there in train set
train_owner_unique = set(updated_train_owner)
test_owner_unique = set(final_test_owner)
unwanted_owner = list(test_owner_unique - train_owner_unique)
updated_test_data = []
updated_test_owner = []
updated_test_data_length = []
for j in range(len(final_test_owner)):
if final_test_owner[j] not in unwanted_owner:
updated_test_data.append(final_test_data[j])
updated_test_owner.append(final_test_owner[j])
unique_train_label = list(set(updated_train_owner))
classes = np.array(unique_train_label)
# Create train and test data for deep learning + softmax
X_train = np.empty(shape=[len(updated_train_data), max_sentence_len, embed_size_word2vec], dtype='float32')
Y_train = np.empty(shape=[len(updated_train_owner), 1], dtype='int32')
# 1 - start of sentence, # 2 - end of sentence, # 0 - zero padding. Hence, word indices start with 3
for j, curr_row in enumerate(updated_train_data):
sequence_cnt = 0
for item in curr_row:
if item in vocabulary:
X_train[j, sequence_cnt, :] = wordvec_model[item]
sequence_cnt = sequence_cnt + 1
if sequence_cnt == max_sentence_len-1:
break
for k in range(sequence_cnt, max_sentence_len):
X_train[j, k, :] = np.zeros((1, embed_size_word2vec))
Y_train[j, 0] = unique_train_label.index(updated_train_owner[j])
X_test = np.empty(shape=[len(updated_test_data), max_sentence_len, embed_size_word2vec], dtype='float32')
Y_test = np.empty(shape=[len(updated_test_owner),1], dtype='int32')
# 1 - start of sentence, # 2 - end of sentence, # 0 - zero padding. Hence, word indices start with 3
for j, curr_row in enumerate(updated_test_data):
sequence_cnt = 0
for item in curr_row:
if item in vocabulary:
X_test[j, sequence_cnt, :] = wordvec_model[item]
sequence_cnt = sequence_cnt + 1
if sequence_cnt == max_sentence_len-1:
break
for k in range(sequence_cnt, max_sentence_len):
X_test[j, k, :] = np.zeros((1, embed_size_word2vec))
Y_test[j, 0] = unique_train_label.index(updated_test_owner[j])
y_train = np_utils.to_categorical(Y_train, len(unique_train_label))
y_test = np_utils.to_categorical(Y_test, len(unique_train_label))
# TODO: Add x_train and x_test
# Construct the deep learning model
print("Creating Model")
sequence = Input(shape=(max_sentence_len, embed_size_word2vec), dtype='float32')
forwards_1 = LSTM(1024)(sequence)
after_dp_forward_4 = Dropout(0.20)(forwards_1)
backwards_1 = LSTM(1024, go_backwards=True)(sequence)
after_dp_backward_4 = Dropout(0.20)(backwards_1)
#merged = merge([after_dp_forward_4, after_dp_backward_4], mode='concat', concat_axis=-1)
merged = layers.concatenate([after_dp_forward_4, after_dp_backward_4], axis=-1)
after_dp = Dropout(0.5)(merged)
output = Dense(len(unique_train_label), activation='softmax')(after_dp)
model = Model(input=sequence, output=output)
rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08)
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
hist = model.fit(X_train, y_train, batch_size=batch_size, epochs=20) # Rename nb_epochs to epochs // Value original: 200
predict = model.predict(X_test)
accuracy = []
sortedIndices = []
pred_classes = []
if len(predict) == 0:
exit(1) # Avoid divide by zero
for ll in predict:
sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
for k in range(1, rankK + 1):
id = 0
trueNum = 0
for sortedInd in sortedIndices:
pred_classes.append(classes[sortedInd[:k]])
if y_test[id] in classes[sortedInd[:k]]:
trueNum += 1
id += 1
accuracy.append((float(trueNum) / len(predict)) * 100)
print("Test accuracy: ", accuracy)
train_result = hist.history
print(train_result)
del model
#========================================================================================
# Split cross validation sets and perform baseline classifiers
#========================================================================================
totalLength = len(all_data)
splitLength = totalLength / (numCV + 1)
for i in range(1, numCV+1):
# Split cross validation set
print("Starting cross validation {0}".format(i))
train_data = all_data[:i*splitLength-1]
test_data = all_data[i*splitLength:(i+1)*splitLength-1]
train_owner = all_owner[:i*splitLength-1]
test_owner = all_owner[i*splitLength:(i+1)*splitLength-1]
# Remove words outside the vocabulary
updated_train_data = []
updated_train_data_length = []
updated_train_owner = []
final_test_data = []
final_test_owner = []
for j, item in enumerate(train_data):
current_train_filter = [word for word in item if word in vocabulary]
if len(current_train_filter)>=min_sentence_length:
updated_train_data.append(current_train_filter)
updated_train_owner.append(train_owner[j])
for j, item in enumerate(test_data):
current_test_filter = [word for word in item if word in vocabulary]
if len(current_test_filter)>=min_sentence_length:
final_test_data.append(current_test_filter)
final_test_owner.append(test_owner[j])
# Remove data from test set that is not there in train set
train_owner_unique = set(updated_train_owner)
test_owner_unique = set(final_test_owner)
unwanted_owner = list(test_owner_unique - train_owner_unique)
updated_test_data = []
updated_test_owner = []
updated_test_data_length = []
for j in range(len(final_test_owner)):
if final_test_owner[j] not in unwanted_owner:
updated_test_data.append(final_test_data[j])
updated_test_owner.append(final_test_owner[j])
train_data = []
for item in updated_train_data:
train_data.append(' '.join(item))
test_data = []
for item in updated_test_data:
test_data.append(' '.join(item))
vocab_data = []
for item in vocabulary:
vocab_data.append(item)
# Extract tf based bag of words representation
tfidf_transformer = TfidfTransformer(use_idf=False)
count_vect = CountVectorizer(min_df=1, vocabulary= vocab_data,dtype=np.int32)
train_counts = count_vect.fit_transform(train_data)
train_feats = tfidf_transformer.fit_transform(train_counts)
print(train_feats.shape)
test_counts = count_vect.transform(test_data)
test_feats = tfidf_transformer.transform(test_counts)
print(test_feats.shape)
print("=" * 20)
# perform classifification
for classifier in range(1,5):
#classifier = 3 # 1 - Niave Bayes, 2 - Softmax, 3 - cosine distance, 4 - SVM
print classifier
if classifier == 1:
classifierModel = MultinomialNB(alpha=0.01)
classifierModel = OneVsRestClassifier(classifierModel).fit(train_feats, updated_train_owner)
predict = classifierModel.predict_proba(test_feats)
classes = classifierModel.classes_
accuracy = []
sortedIndices = []
pred_classes = []
for ll in predict:
sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
for k in range(1, rankK+1):
id = 0
trueNum = 0
for sortedInd in sortedIndices:
if updated_test_owner[id] in classes[sortedInd[:k]]:
trueNum += 1
pred_classes.append(classes[sortedInd[:k]])
id += 1
accuracy.append((float(trueNum) / len(predict)) * 100)
print accuracy
elif classifier == 2:
classifierModel = LogisticRegression(solver='lbfgs', penalty='l2', tol=0.01)
classifierModel = OneVsRestClassifier(classifierModel).fit(train_feats, updated_train_owner)
predict = classifierModel.predict(test_feats)
classes = classifierModel.classes_
accuracy = []
sortedIndices = []
pred_classes = []
for ll in predict:
sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
for k in range(1, rankK+1):
id = 0
trueNum = 0
for sortedInd in sortedIndices:
if updated_test_owner[id] in classes[sortedInd[:k]]:
trueNum += 1
pred_classes.append(classes[sortedInd[:k]])
id += 1
accuracy.append((float(trueNum) / len(predict)) * 100)
print accuracy
elif classifier == 3:
predict = cosine_similarity(test_feats, train_feats)
classes = np.array(updated_train_owner)
classifierModel = []
accuracy = []
sortedIndices = []
pred_classes = []
for ll in predict:
sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
for k in range(1, rankK+1):
id = 0
trueNum = 0
for sortedInd in sortedIndices:
if updated_test_owner[id] in classes[sortedInd[:k]]:
trueNum += 1
pred_classes.append(classes[sortedInd[:k]])
id += 1
accuracy.append((float(trueNum) / len(predict)) * 100)
print accuracy
elif classifier == 4:
classifierModel = svm.SVC(probability=True, verbose=False, decision_function_shape='ovr', random_state=42)
classifierModel.fit(train_feats, updated_train_owner)
predict = classifierModel.predict(test_feats)
classes = classifierModel.classes_
accuracy = []
sortedIndices = []
pred_classes = []
for ll in predict:
sortedIndices.append(sorted(range(len(ll)), key=lambda ii: ll[ii], reverse=True))
for k in range(1, rankK+1):
id = 0
trueNum = 0
for sortedInd in sortedIndices:
if updated_test_owner[id] in classes[sortedInd[:k]]:
trueNum += 1
pred_classes.append(classes[sortedInd[:k]])
id += 1
accuracy.append((float(trueNum) / len(predict)) * 100)
print accuracy
# -
| Triage-MHWEB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="WwY6TGlJbTmu"
# # **GIT_COMMITS_CHANGES**
#
# This notebook the selection of the rellevant attributes of the table `GIT_COMMITS_CHANGES`.
#
# First, we import the libraries we need and, then, we read the corresponding csv.
# + id="w16KbbRmbLjJ"
import pandas as pd
# + id="OtC_dc72bLjT" outputId="d4c3c04e-02b3-4df1-b4a9-174950137178"
gitCommitsChanges = pd.read_csv("../../../data/raw/GIT_COMMITS_CHANGES.csv")
print(gitCommitsChanges.shape)
list(gitCommitsChanges)
# + [markdown] id="zAZQil0LbkPf"
# We select the desired attributes of the table.
# + id="lMro6YsYbLjd"
attributes = ['projectID', 'commitHash', 'changeType', 'linesAdded', 'linesRemoved']
gitCommitsChanges = gitCommitsChanges[attributes]
# + id="9IRkiFcBbLjk" outputId="18284ef9-7efe-4ae7-bf43-e405e21050ac"
print(gitCommitsChanges.shape)
gitCommitsChanges.head()
# + [markdown] id="-ps73UHbbvs7"
# We save this new table into a csv.
# + id="qpjKBEaxbLjp"
gitCommitsChanges.to_csv('../../../data/interim/DataPreparation/SelectData/GIT_COMMITS_CHANGES_select.csv', header=True)
| notebooks/2-DataPreparation/1-SelectData/6-DB-GIT-COMMITS-CHANGES.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# # Defining a custom corpus for plotting text
#
#
# By default, the text samples will be transformed into a vector of word counts
# and then modeled using Latent Dirichlet Allocation (# of topics = 100) using a
# model fit to a large sample of wikipedia pages. However, you can optionally
# pass your own text to fit the semantic model. To do this define corpus as a
# list of documents (strings). A topic model will be fit on the fly and the text
# will be plotted.
#
#
# +
# Code source: <NAME>
# License: MIT
# load hypertools
import hypertools as hyp
# load the data
text_samples = ['i like cats alot', 'cats r pretty cool', 'cats are better than dogs',
'dogs rule the haus', 'dogs are my jam', 'dogs are a mans best friend',
'i haz a cheezeburger?']
# plot it
hyp.plot(text_samples, 'o', corpus=text_samples)
| docs/auto_examples/plot_corpus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.naive_bayes import BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from itertools import product
# -
dataset = pd.read_csv('weather.csv')
flabels = list(dataset)
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, -1].values
#Splitting the dataset into the Training Set and Test Set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size =0.33, random_state=1234)
print(X_train.shape, X_test.shape, Y_train.shape, Y_test.shape)
# ## Question 2: Naïve Bayes
# Train a Naïve Bayes classifier to predict RainTomorrow.
# As all attributes are binary vectors, use the BernoulliNB classifier provided by scikit-learn.
clf2 = BernoulliNB()
clf2.fit(X_train, Y_train)
print(clf2.classes_, clf2.class_count_)
Y_train_pred = clf2.predict(X_train)
Y_test_pred = clf2.predict(X_test)
print('Correctly predicted on TRAINING SET: {}, errors:{}'.format(sum(Y_train==Y_train_pred), sum(Y_train!=Y_train_pred)))
print('Correctly predicted on TEST set: {}, errors:{}'.format(sum(Y_test==Y_test_pred), sum(Y_test!=Y_test_pred)))
print(classification_report(Y_train,Y_train_pred))
print('Accuracy on TRAINING set: {:.2f}'.format(accuracy_score(Y_train, Y_train_pred)))
print(classification_report(Y_test, Y_test_pred))
print('Accuracy on TEST set: {:.2f}'.format(accuracy_score(Y_test, Y_test_pred)))
print("Confused Matrix:[ TRAINING ] \n"," ".join(["{:3d}".format(d) for d in clf2.classes_]),"<-- PREDICTED LABEL")
print(confusion_matrix(Y_train, Y_train_pred, labels=clf2.classes_))
print("Confused Matrix:[ TEST ] \n"," ".join(["{:3d}".format(d) for d in clf2.classes_]),"<-- PREDICTED LABEL")
print(confusion_matrix(Y_test, Y_test_pred, labels=clf2.classes_))
Y_pred_proba = clf2.predict_proba(X_test)
print(Y_pred_proba[0])
# # Question 3: Decision Tree
# Train a DecisionTreeClassifier to predict RainTomorrow. Use argument class_weight=’balanced’ when
# constructing the classifier, as the target variable RainTomorrow is not equally distributed in the data set.
dtclf = DecisionTreeClassifier(class_weight = "balanced",)
dtclf.fit(X_train, Y_train)
print(dtclf.classes_)
dt_Y_train_pred = dtclf.predict(X_train)
dt_Y_test_pred = dtclf.predict(X_test)
print('Correctly predicted on TRAINING SET: {}, errors:{}'.
format(sum(Y_train==dt_Y_train_pred), sum(Y_train!=dt_Y_train_pred)))
print('Correctly predicted on TEST set: {}, errors:{}'.format(sum(Y_test==dt_Y_test_pred), sum(Y_test!=dt_Y_test_pred)))
print(classification_report(Y_train,dt_Y_train_pred))
print('Accuracy on TRAINING set: {:.2f}'.format(accuracy_score(Y_train, dt_Y_train_pred)))
print(classification_report(Y_test, dt_Y_test_pred))
print('Accuracy on TEST set: {:.2f}'.format(accuracy_score(Y_test, dt_Y_test_pred)))
print("Confused Matrix:[ TRAINING ] \n"," ".join(["{:3d}".format(d) for d in dtclf.classes_]),"<-- PREDICTED LABEL")
print(confusion_matrix(Y_train, dt_Y_train_pred, labels=dtclf.classes_))
print("Confused Matrix:[ TEST ] \n"," ".join(["{:3d}".format(d) for d in dtclf.classes_]),"<-- PREDICTED LABEL")
print(confusion_matrix(Y_test, dt_Y_test_pred, labels=dtclf.classes_))
dt_Y_pred_proba = dtclf.predict_proba(X_test)
print(dt_Y_pred_proba[0])
# # Question 4: Diagnosis
# Does the Decision Tree model suffer from overfitting or underfitting? Justify why/why not.
# If the model exhibits overfitting or underfitting, revise your training procedure to remedy the problem, and
# re-evaluate the improved model. The DecisionTreeClassifier has a number of parameters that you can
# consider for tuning the model:
#
# * max_depth: maximum depth of the tree
# * min_samples_leaf: minimum number of samples in each leaf node
# * max_leaf_nodes: maximum number of leaf nodes
#
# ## Answer : Our Decision Tree model Suffers from ovefitting.
# First, we try to find out the best parameters for our decision tree:
# +
max_msl = max_md = max_mnl = max_acc = None
for msl, md, mnl in product(range(2,15), range(5, 50, 5), range(5, 100, 10)):
options = dict(min_samples_leaf=msl, max_depth=md, max_leaf_nodes=mnl)
clf3 = DecisionTreeClassifier(class_weight = "balanced", **options)
clf3.fit(X_train, Y_train)
Y_test_pred = clf3.predict(X_test)
curr_acc = accuracy_score(Y_test, Y_test_pred)
if max_acc == None or max_acc < curr_acc:
max_msl, max_md, max_mnl, max_acc = msl, md, mnl, curr_acc
# -
# Then we use those parameters to evaluate the performance of our new model with best params:
# +
best_params = dict(min_samples_leaf=max_msl, max_depth=max_md, max_leaf_nodes=max_mnl)
clf3 = DecisionTreeClassifier(class_weight = "balanced", **best_params)
clf3.fit(X_train, Y_train)
dtb_Y_train_pred = clf3.predict(X_train)
dtb_Y_test_pred = clf3.predict(X_test)
print(classification_report(Y_train,dtb_Y_train_pred))
print('Accuracy on TRAINING set: {:.2f}'.format(accuracy_score(Y_train,dtb_Y_train_pred))+"\n")
print(classification_report(Y_test, dtb_Y_test_pred))
print('Accuracy on TEST set: {:.2f}'.format(accuracy_score(Y_test, dtb_Y_test_pred))+"\n")
print("Confused Matrix[ TRAINING ]: \n"," ".join(["{:3d}".format(d) for d in clf3.classes_]),"<-- PREDICTED LABEL")
print(confusion_matrix(Y_train, dtb_Y_train_pred, labels=clf3.classes_))
print("Confused Matrix[ TEST ]: \n"," ".join(["{:3d}".format(d) for d in clf3.classes_]),"<-- PREDICTED LABEL")
print(confusion_matrix(Y_test, dtb_Y_test_pred, labels=clf3.classes_))
# -
dt_Y_pred_proba = clf3.predict_proba(X_test)
print(dt_Y_pred_proba[0])
# ### Random Forest
rclf = RandomForestClassifier(max_depth=2, random_state=0)
rclf.fit(X_train, Y_train)
print(rclf.classes_)
r_Y_train_pred = rclf.predict(X_train)
r_Y_test_pred = rclf.predict(X_test)
print('Correctly predicted on TRAINING SET: {}, errors:{}'.
format(sum(Y_train==r_Y_train_pred), sum(Y_train!=r_Y_train_pred)))
print('Correctly predicted on TEST set: {}, errors:{}'.format(sum(Y_test==r_Y_test_pred), sum(Y_test!=r_Y_test_pred)))
print(classification_report(Y_train,r_Y_train_pred))
print('Accuracy on TRAINING set: {:.2f}'.format(accuracy_score(Y_train, r_Y_train_pred)))
print(classification_report(Y_test, r_Y_test_pred))
print('Accuracy on TEST set: {:.2f}'.format(accuracy_score(Y_test, r_Y_test_pred)))
print("Confused Matrix:[ TRAINING ] \n"," ".join(["{:3d}".format(d) for d in rclf.classes_]),"<-- PREDICTED LABEL")
print(confusion_matrix(Y_train, r_Y_train_pred, labels=rclf.classes_))
print("Confused Matrix:[ TEST ] \n"," ".join(["{:3d}".format(d) for d in rclf.classes_]),"<-- PREDICTED LABEL")
print(confusion_matrix(Y_test, r_Y_test_pred, labels=rclf.classes_))
r_Y_pred_proba = rclf.predict_proba(X_test)
print(r_Y_pred_proba[0])
| Weather_RainTomorrow_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Chiebukar/Machine-Learning/blob/main/regression/disaster_tweets_classification.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="76a1DDBK6Lr8"
from google.colab import files
files.upload()
# + id="UXZx0g-l7ud5"
# create directory and move kaggle.json to directory
# !mkdir -p ~/.kaggle
# !cp kaggle.json ~/.kaggle/
# !chmod 600 ~/.kaggle/kaggle.json
# + id="sXIWEU1S8THp" colab={"base_uri": "https://localhost:8080/"} outputId="e08d71e8-444e-4242-8b60-23b9c64f2428"
# download dataset
# !kaggle competitions download -c nlp-getting-started
# + colab={"base_uri": "https://localhost:8080/"} id="65sUKOhP8e1X" outputId="50cf6608-167f-4dec-ce2e-9b34e2b6cd2c"
# View downloaded files
# !ls -d $PWD/*
# + id="RL3UZMKF8zp7"
# import required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# + id="5JmBpeot9cA_"
train_data = pd.read_csv('/content/train.csv')
test_data = pd.read_csv('/content/test.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="K7I7YAyf903c" outputId="318dfa7b-6373-4ab4-b0e4-7de8659ee651"
train_data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="D9Ze1VCY-GHQ" outputId="df24b098-7a2a-40ba-ffcb-2fb0445e0bc1"
train_data.shape, test_data.shape
# + colab={"base_uri": "https://localhost:8080/"} id="waTw9h76_pZN" outputId="d31e59b0-e399-45b0-e69a-bce888aab7ee"
train_data.isna().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="HzmYW-rJFb9G" outputId="c89c42c7-4d14-470b-d012-7f06f584ac2e"
train_data['keyword'].unique()
# + id="KnPUhNbXH0a4" colab={"base_uri": "https://localhost:8080/"} outputId="74878b85-b15e-4dc8-94c7-442164dfbd24"
train_data['target'].unique()
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="uCPaBUYR_AdF" outputId="b481a548-21d1-4999-9111-165383e2fb21"
train_data['target'].value_counts().plot(kind='bar')
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="5U3-aYiMG_cq" outputId="f00d3a40-70ad-4d2b-d395-1525a1618d3d"
test_data.head()
# + id="ZSWENSdGGWdl"
train_features = train_data['text']
train_label = train_data['target']
test_features = test_data['text']
# + id="l744AvalKK6j"
# get count of unique words in input
def get_num_of_words(data, text_column= 'input'):
results = Counter()
data[text_column].str.lower().str.split().apply(results.update)
return len(results)
# + id="995N33Rq7H4C"
from collections import Counter
# + colab={"base_uri": "https://localhost:8080/"} id="UFE51FJLKdoO" outputId="779e210c-4b7b-4c19-b4e4-aed864e71286"
NUM_WORDS = get_num_of_words(train_data, 'text')
NUM_WORDS
# + id="Y6ARnBH2KSkh"
# get maximum length of text
def get_max_len(data, text_column='input'):
max_len = data[text_column].map(len).max()
return max_len
# + colab={"base_uri": "https://localhost:8080/"} id="1YQTuFF8KmgV" outputId="e22c82b1-fed4-4770-bddf-5b891e4fe61a"
MAX_LEN = get_max_len(train_data, 'text')
MAX_LEN
# + id="QbeLz-KcHag2"
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# + id="AqCAS5hgJxbD"
def tokenize(data, NUM_WORDS, MAX_LEN, fit_data=train_features):
tokenizer = Tokenizer(num_words=NUM_WORDS, oov_token='UNK')
tokenizer.fit_on_texts(fit_data)
sequences = tokenizer.texts_to_sequences(data)
data = pad_sequences(sequences,maxlen=MAX_LEN, padding='post')
return np.array(data)
# + id="X-zjSH4SUDOe"
train_features = tokenize(train_features, 25000, 150)
test_features = tokenize(test_features, 25000, 150)
# + colab={"base_uri": "https://localhost:8080/"} id="pRjNYxQ8VSx5" outputId="dad63015-b9f0-4694-e5ae-18ee8ad3505a"
test_features[0]
# + colab={"base_uri": "https://localhost:8080/"} id="6QBStgjpa-o6" outputId="f66de06a-48cc-4d6c-d3f5-8301b61ab671"
train_label[:5]
# + id="26ADe-zvbbJg"
from sklearn.model_selection import train_test_split
# + colab={"base_uri": "https://localhost:8080/"} id="lQnhT4FqbSbv" outputId="39470a2b-246f-41c2-f5b1-41d1594169d6"
x_train, x_val, y_train, y_val = train_test_split(train_features, train_label, stratify=train_label, test_size=0.2)
y_train[:5], y_val[:5]
# + id="cslWgRWRWDIS"
from keras.models import Sequential
from keras.layers import Embedding, Conv1D, MaxPooling1D, GlobalMaxPooling1D, Dropout, Dense, BatchNormalization
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
from keras import regularizers
# + id="n0EPHyDNVa_5"
def build_model():
model = Sequential()
model.add(Embedding(25000, 8))
model.add(Conv1D(4, 3, activation='relu'))
model.add(MaxPooling1D())
model.add(Conv1D(8, 3, activation='relu', kernel_regularizer= regularizers.l2(0.001)))
model.add(GlobalMaxPooling1D())
model.add(Dense(4, activation='relu', kernel_regularizer= regularizers.l2(0.001)))
model.add(Dropout(0.25))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss= 'binary_crossentropy', optimizer='rmsprop', metrics=['acc'])
return model
# + colab={"base_uri": "https://localhost:8080/"} id="k9VGLw7BfRWS" outputId="0b0db681-0278-41d9-d397-ab72fcc335ad"
cnn_model = build_model()
cnn_model.summary()
# + id="ZBAY5OWCfqYE"
learn_control = ReduceLROnPlateau(monitor='val_acc', patience=5, verbose = 1, factor=0.2, min_lr= 1e-7)
filepath = 'weights.best.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor ='val_acc', verbose = 1, save_best_only = True, mode='max')
# + id="VBPqSm1jjWFg"
history = cnn_model.fit(x_train, y_train, epochs= 25, validation_data = (x_val, y_val),
batch_size = 8, callbacks = [learn_control, checkpoint])
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="v5Mx0HBwjx05" outputId="80d0769e-c8bd-40f2-bcb1-ac36bc632f4d"
history_df = pd.DataFrame(history.history)
history_df[['loss', 'val_loss']].plot()
# + id="48cqwfXBl_mB"
cnn_model.load_weights('weights.best.hdf5')
# + id="c2y2ss6g9f6E"
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors.classification import KNeighborsClassifier
from sklearn.ensemble.weight_boosting import AdaBoostClassifier
from sklearn.ensemble.gradient_boosting import GradientBoostingClassifier
from sklearn.ensemble.bagging import BaggingClassifier
from sklearn.ensemble.forest import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from xgboost import XGBClassifier
from catboost import CatBoostClassifier
from sklearn.model_selection import GridSearchCV
from mlxtend.classifier import StackingClassifier
# + id="-7f_4TyB697a"
config = {
'support vector machine' : {
'model' : SVC(),
'params': {
'C': [1, 2, 4, 6, 8],
'kernel': ['linear', 'poly', 'rbf', 'sigmoid', 'precomputed']
}
},
'K neighbors classifier' : {
'model' : KNeighborsClassifier(),
'params': {
'kneighborsclassifier__algorithm' : ['auto', 'ball_tree', 'kd_tree', 'brute'],
'kneighborsclassifier__metric': ['minkowski', 'precomputed'],
'kneighborsclassifier__n_neighbors': [1, 2, 5, 7, 10],
'kneighborsclassifier__weights':['uniform', 'distance']
}
},
'random forest classifier' : {
'model' : RandomForestClassifier(),
'params': {
'randomforestclassifier__criterion' : ['gini', 'entropy'],
'randomforestclassifier__n_estimators': [1,5,10],
'randomforestclassifier__warm_start' : [True, False]
}
},
'logistic regression' : {
'model' : LogisticRegression(),
'params': {
'logisticregression__penalty' : ['l1', 'l2', 'elasticnet', 'none'],
'logisticregression__C' : [1, 5, 10],
'logisticregression__fit_intercept' : [True, False],
'logisticregression__solver' : ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
'logisticregression__warm_start' : [True, False]
}
},
}
# + id="h6HV2TvuHfF2"
def get_best_model():
scores = []
best_estimator = {}
for model_name, model_params in config.items():
print('processing data with {}'.format(model_name))
clf = GridSearchCV(model_params['model'], model_params['params'], cv = 4, return_train_score= False)
clf.fit(x_train,y_train)
scores.append({
'model' : model_name,
'best_score' : clf.best_score_,
'best_params' : clf.best_params_
})
best_estimator[model_name] = clf.best_estimator_
return best_estimator, pd.DataFrame(scores)
# + [markdown] id="TvVCTDJ_SQOL"
# Blending Ensembling
# + id="BvHshyGuWbJN"
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score, KFold
# + id="5ZsowjXZZtxY"
kf = KFold(n_splits = 10, random_state = 0, shuffle = True)
# + id="hl76H0bHXFMA"
def get_score(model):
score = cross_val_score(model, x_train, y_train, scoring= 'accuracy', cv= kf)
return score.mean()
# + id="_-VeEclsaq_T"
baseline_models = {'adaboost' :AdaBoostClassifier(),
'gdboost': GradientBoostingClassifier(),
'baging_clf': BaggingClassifier(),
'xgboost': XGBClassifier() ,
'catboost': CatBoostClassifier(),
'SVM' : SVC(),
'random_forest': RandomForestClassifier(),
'KNN': KNeighborsClassifier(),
'Log_Regression': LogisticRegression(),
'stacking_clf': StackingClassifier(classifiers= (SVC(), LogisticRegression(), KNeighborsClassifier()),
meta_classifier = LogisticRegression(),
use_features_in_secondary = True)
}
# + id="XEezJn8eSees"
score = []
for model_name, model in baseline_models.items():
print('getting score for {}'.format(model_name))
model_score = get_score(model)
score.append({
'model' : model_name,
'base_score': model_score
})
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="N4m9e5H4e7mg" outputId="ce6e6cfd-8383-4ea0-a6b7-f6c257d73fd4"
scores_df = pd.DataFrame(score)
scores_df
# + colab={"base_uri": "https://localhost:8080/", "height": 166} id="eyOE8U40oY-X" outputId="9687473b-0348-412a-df6b-2ef297cb3604"
scores_df.sort_values(by= 'base_score', ascending=False, ignore_index=True).head(4)
# + id="7iTtQQ6uR4ve"
model1 = cnn_model
val_pred1 = model1.predict(x_val).round()
test_pred1 = model1.predict(test_features).round()
df_val_pred1 = pd.DataFrame(val_pred1).astype('int')
df_test_pred1 = pd.DataFrame(test_pred1).astype('int')
model2 = RandomForestClassifier()
model2.fit(x_train, y_train)
val_pred2 = model2.predict(x_val)
test_pred2 = model2.predict(test_features)
df_val_pred2 = pd.DataFrame(val_pred2)
df_test_pred2 = pd.DataFrame(test_pred2)
model3 = CatBoostClassifier()
model3.fit(x_train, y_train)
val_pred3 = model3.predict(x_val)
test_pred3 = model3.predict(test_features)
df_val_pred3 = pd.DataFrame(val_pred3)
df_test_pred3 = pd.DataFrame(test_pred3)
model4 = GradientBoostingClassifier()
model4.fit(x_train, y_train)
val_pred4 = model4.predict(x_val)
test_pred4 = model4.predict(test_features)
df_val_pred4 = pd.DataFrame(val_pred4)
df_test_pred4 = pd.DataFrame(test_pred4)
model5 = XGBClassifier()
model5.fit(x_train, y_train)
val_pred5 = model5.predict(x_val)
test_pred5 = model5.predict(test_features)
df_val_pred5 = pd.DataFrame(val_pred5)
df_test_pred5 = pd.DataFrame(test_pred5)
# + id="HbB8AgKTTgkD" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="724684c4-c012-415f-e3fa-90545da95bf6"
val_df= pd.concat([df_val_pred1, df_val_pred2, df_val_pred3, df_val_pred4, df_val_pred5], axis= 1)
val_df.columns = ['CNN', 'RD_forest', 'Catboost', 'GDboost', 'XGB']
val_df.head(5)
# + id="aD9S1ktEZXbf" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="3da31c7b-819c-4e69-d9e0-4ef8b4102e3b"
test_df= pd.concat([df_test_pred1, df_test_pred2, df_test_pred3, df_test_pred4, df_test_pred5], axis= 1)
test_df.columns = ['CNN', 'RD_forest', 'Catboost', 'GDboost', 'XGB']
test_df.head(5)
# + id="xxW9ZmA2VvVM"
from sklearn.linear_model import LogisticRegression
# + id="k1M_-c9vV43M" colab={"base_uri": "https://localhost:8080/"} outputId="713324d8-ef66-47da-98d3-1ecc0abf6375"
lr = LogisticRegression()
lr.fit(val_df, y_val)
lr.score(val_df, y_val)
# + id="0kbHzvxpZmKf" colab={"base_uri": "https://localhost:8080/"} outputId="d6e04be8-dba5-4bff-da2e-1bea94f380d2"
test_prediction = lr.predict(test_df)
test_prediction[:5]
# + id="geGavANXi480" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="169cbf62-f4ab-481c-a2db-07b0547420a4"
submission = pd.DataFrame(test_data['id'], columns= ['id'])
submission['target'] = test_prediction
submission.head()
# + id="PAqJPSRIm2fX"
submission.to_csv('submission.csv', index= False, header = True)
# + id="DwVE_z2kmnWT" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="53a386e7-6ce7-47d2-99b5-6e4a80191088"
files.download('submission.csv')
# + id="B5Ji4dHOMoWM"
from keras.layers import LSTM, Bidirectional
# + colab={"base_uri": "https://localhost:8080/"} id="tcJ0VHBmPlLt" outputId="8e77701a-6d6f-4eee-a475-922d129666a9"
x_train.shape
# + id="q5Vk5LptPO-F"
learn_control = ReduceLROnPlateau(monitor='val_acc', patience=5, verbose = 1, factor=0.2, min_lr= 1e-7)
filepath = 'weights.best.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor ='val_acc', verbose = 1, save_best_only = True, mode='max')
# + id="_vYAtz8OPO-R"
history = cnn_model.fit(x_train, y_train, epochs= 25, validation_data = (x_val, y_val),
batch_size = 8, callbacks = [learn_control, checkpoint])
# + id="XQpeToELPM8c"
| regression/disaster_tweets_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="rNdPrjzezI_n"
# #Install and Config LingX
# + id="hbVhjztiYmIR"
# !pip install lingx
# + [markdown] id="qnCEO8BwzQrZ"
# #Import Libraries
# + id="lC5czLQljHVA"
import lingx.utils.download_lang_models
from lingx.core.lang_model import get_nlp_object
nlp_en = get_nlp_object("en", use_critt_tokenization = True, package="partut")
nlp_zh = get_nlp_object("zh", use_critt_tokenization = True)
from lingx.utils.critt.aligner import generate_alignment_pipelines
from lingx.utils.critt.tables import readTPRDBtables
from lingx.utils.critt.tables import convert_st2segment , convert_tt2segment
from lingx.utils.critt.tables import expand_table_psycholingual , expand_table_monolingual , expand_table_bilingual
from lingx.utils.critt.tables import merge_st_tt , expand_table_error
# + [markdown] id="pe3DKyw2T8ri"
# # Convert ST an TT to Segment DataFrames
# + id="y_E-PH1NT8EB"
# !git clone https://github.com/ContentSide/lingx.git
path_to_tprdb = "/content/lingx/resources/TPRDB/EN-ZH_IMBst18/"
df_sg = readTPRDBtables(['Tables/'], "*sg", verbose=1, path=path_to_tprdb)
df_st = readTPRDBtables(['Tables/'], "*st", verbose=1, path=path_to_tprdb)
df_tt = readTPRDBtables(['Tables/'], "*tt", verbose=1, path=path_to_tprdb)
alignments_offset = generate_alignment_pipelines(df_st, df_tt)
analysis_st = convert_st2segment(df_st)
analysis_tt = convert_tt2segment(df_tt)
analysis_st
# + [markdown] id="rW56vRvDzYpN"
# #Calculate the Complexities
# + id="-6DTfW20abRt"
import numpy
import time
start_time = time.time()
analysis_st = expand_table_psycholingual(analysis_st, nlp_en, token_column="SToken")
analysis_tt = expand_table_psycholingual(analysis_tt, nlp_zh, token_column="TToken")
analysis_st = expand_table_monolingual(analysis_st, nlp_en, token_column="SToken")
analysis_tt = expand_table_monolingual(analysis_tt, nlp_zh, token_column="TToken")
analysis_st_tt = merge_st_tt(analysis_st, analysis_tt, alignments_offset)
analysis_st_tt = expand_table_bilingual(analysis_st_tt, nlp_en, nlp_zh, robust=True, bcr_error_value=numpy.nan)
print(analysis_st_tt)
print(analysis_st_tt.columns)
print("Running Time (Min): ",round((time.time() - start_time)/60,0))
# + [markdown] id="VIVoEEU_zk0U"
# #Connect the Metric Results to Human-level Analysis
# + id="8ebQMuKRmpze"
error_file_path = "/content/lingx/resources/TPRDB/EN-ZH_IMBst18/HumanEvaluations/errors.csv"
analysis_st_tt = expand_table_error(analysis_st_tt, error_file_path)
analysis_st_tt.columns
# + [markdown] id="FdhmYkqoWd9O"
# #Filter Table on Numeric Measures
# + id="mNbMQgrHfi-_"
table = analysis_st_tt[[
'IDT_MAX_TT', 'IDT_MEAN_TT', 'IDT_SUM_TT', 'DLT_MAX_TT', 'DLT_MEAN_TT',
'DLT_SUM_TT', 'IDT_DLT_MAX_TT', 'IDT_DLT_MEAN_TT', 'IDT_DLT_SUM_TT', 'LE_MEAN_TT',
'LE_MAX_TT', 'LE_SUM_TT', 'MBN_MEAN_TT', 'MBN_MAX_TT', 'MBN_SUM_TT',
'SToken', 'IDT_MAX_ST', 'IDT_MEAN_ST', 'IDT_SUM_ST', 'DLT_MAX_ST',
'DLT_MEAN_ST', 'DLT_SUM_ST', 'IDT_DLT_MAX_ST', 'IDT_DLT_MEAN_ST',
'IDT_DLT_SUM_ST', 'LE_MEAN_ST', 'LE_MAX_ST', 'LE_SUM_ST', 'MBN_MEAN_ST',
'MBN_MAX_ST', 'MBN_SUM_ST', 'Alignment', 'BCR_SUM_SUM_SUM',
'BCR_SUM_SUM_MAX', 'BCR_SUM_SUM_MEAN', 'BCR_SUM_MAX_SUM',
'BCR_SUM_MAX_MAX', 'BCR_SUM_MAX_MEAN', 'BCR_SUM_MEAN_SUM',
'BCR_SUM_MEAN_MAX', 'BCR_SUM_MEAN_MEAN',
'Any', 'Accuracy', 'Fluency', 'Style', 'Critical', 'Minor'
]]
# + [markdown] id="2d6PVsNDz0uY"
# #Correlation Matrix
# + id="Wi8SOvzVsOeZ"
report = table.corr(method="spearman")
# report.to_csv("report.csv")
report[['Any','Accuracy','Fluency','Style','Critical','Minor']]
| resources/ALAPP2021/ALAPP_2021_Paper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import warnings
warnings.simplefilter('ignore')
# -
df = pd.read_csv("Latest Covid-19 India Status.csv")
df.head()
# ## Highest Active Cases
#
act = df.groupby(['Active']).max().reset_index()
act.sort_values('Active' ,ascending=False)
df.describe().T
total_case_sort = df.sort_values(by='Total Cases',ascending = False)
plt.figure(figsize= (10,10))
sns.barplot(data = total_case_sort,x ='State/UTs',y='Total Cases')
plt.xticks(rotation = 90)
plt.show()
sns.catplot(data = df,x ='State/UTs',y='Active',kind="bar",height = 4,aspect =3)
plt.xticks(rotation = 90)
plt.show()
# +
status =[]
for vals in df['Active']:
if vals>10000:
status.append('high')
else:
status.append('low')
df ['Case Status'] = status
# -
df.head()
figure = px.bar(df,x = 'State/UTs',y ='Total Cases',color='Case Status')
figure.show()
# +
sorted_data = df.sort_values(by='Death Ratio (%)', ascending=False).head(10)
plt.bar(sorted_data['State/UTs'],sorted_data['Death Ratio (%)'])
plt.xlabel('State/UTs')
plt.ylabel('Percentage(death ratio)')
plt.title('highest number of deaths')
plt.xticks(rotation=90)
plt.show()
| Covid_Analysis_India.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import os
import h2o
from h2o.automl import H2OAutoML
h2o.init()
train_path = '../autox/data/ventilator/train.csv'
test_path = '../autox/data/ventilator/test.csv'
# Load data into H2O
train = h2o.import_file(train_path)
test = h2o.import_file(test_path)
train.head()
y = "pressure"
aml2 = H2OAutoML(max_runtime_secs = 7200, seed = 1, project_name = "kaggle_ventilator")
aml2.train(y = y, training_frame = train)
pred = aml2.predict(test)
sub = pd.read_csv(test_path)
sub = sub[['id']]
pred = h2o.as_list(pred)
sub[y] = pred['predict']
sub.to_csv("./h2o_sub_kaggle_ventilator.csv", index = False)
| demo/ventilator/h2o_kaggle_ventilator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
def loss(y_pred, y_true):
lag_mult = 1
#mse loss
mse_loss = tf.math.reduce_mean(tf.math.squared_difference(y_pred, y_true))
# penalty for non-unit quaternions
# penalty_norm = lag_mult * tf.math.abs(tf.linalg.norm(y_pred[:4]) - 1)
penalty_norm = tf.reduce_sum(tf.exp(tf.math.abs(tf.linalg.norm(y_pred[:4], axis=-1) - 1.)) -1.)
# penalty for negative quaternion
penalty_negative = lag_mult * tf.math.reduce_sum( tf.cast(y_pred[:4]<0, dtype=tf.float32) )
return mse_loss + penalty_norm + penalty_negative, mse_loss, penalty_norm, penalty_negative
# +
# y_pred = tf.constant([-1, 0., 0., 0., 2., 2., 2.])
y_pred = tf.constant([1, 0., 2., 0., 2., 2., 2.])
y_true = tf.constant([0.5, 0.5, 0.5, 0.5, 2., 2., 2.])
st = ["total_loss : %f\n", "mse_loss : %f\n", "penalty_norm : %f\n", "penalty_unitary : %f\n"]
for i in range(4):
print(st[i] %loss(y_pred, y_true)[i])
# -
| notebooks/loss_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (reco_gpu)
# language: python
# name: reco_gpu
# ---
# <i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
#
# <i>Licensed under the MIT License.</i>
# # NRMS: Neural News Recommendation with Multi-Head Self-Attention
# NRMS \[1\] is a neural news recommendation approach with multi-head selfattention. The core of NRMS is a news encoder and a user encoder. In the newsencoder, a multi-head self-attentions is used to learn news representations from news titles by modeling the interactions between words. In the user encoder, we learn representations of users from their browsed news and use multihead self-attention to capture the relatedness between the news. Besides, we apply additive
# attention to learn more informative news and user representations by selecting important words and news.
#
# ## Properties of NRMS:
# - NRMS is a content-based neural news recommendation approach.
# - It uses multi-self attention to learn news representations by modeling the iteractions between words and learn user representations by capturing the relationship between user browsed news.
# - NRMS uses additive attentions to learn informative news and user representations by selecting important words and news.
#
# ## Data format:
# For quicker training and evaluaiton, we sample MINDdemo dataset of 5k users from [MIND small dataset](https://msnews.github.io/). The MINDdemo dataset has the same file format as MINDsmall and MINDlarge. If you want to try experiments on MINDsmall and MINDlarge, please change the dowload source. Select the MIND_type parameter from ['large', 'small', 'demo'] to choose dataset.
#
# **MINDdemo_train** is used for training, and **MINDdemo_dev** is used for evaluation. Training data and evaluation data are composed of a news file and a behaviors file. You can find more detailed data description in [MIND repo](https://github.com/msnews/msnews.github.io/blob/master/assets/doc/introduction.md)
#
# ### news data
# This file contains news information including newsid, category, subcatgory, news title, news abstarct, news url and entities in news title, entities in news abstarct.
# One simple example: <br>
#
# `N46466 lifestyle lifestyleroyals The Brands Queen Elizabeth, Prince Charles, and Prince Philip Swear By Shop the notebooks, jackets, and more that the royals can't live without. https://www.msn.com/en-us/lifestyle/lifestyleroyals/the-brands-queen-elizabeth,-prince-charles,-and-prince-philip-swear-by/ss-AAGH0ET?ocid=chopendata [{"Label": "Prince Philip, Duke of Edinburgh", "Type": "P", "WikidataId": "Q80976", "Confidence": 1.0, "OccurrenceOffsets": [48], "SurfaceForms": ["Prince Philip"]}, {"Label": "Charles, Prince of Wales", "Type": "P", "WikidataId": "Q43274", "Confidence": 1.0, "OccurrenceOffsets": [28], "SurfaceForms": ["Prince Charles"]}, {"Label": "Elizabeth II", "Type": "P", "WikidataId": "Q9682", "Confidence": 0.97, "OccurrenceOffsets": [11], "SurfaceForms": ["Queen Elizabeth"]}] []`
# <br>
#
# In general, each line in data file represents information of one piece of news: <br>
#
# `[News ID] [Category] [Subcategory] [News Title] [News Abstrct] [News Url] [Entities in News Title] [Entities in News Abstract] ...`
#
# <br>
#
# We generate a word_dict file to tranform words in news title to word indexes, and a embedding matrix is initted from pretrained glove embeddings.
#
# ### behaviors data
# One simple example: <br>
# `1 U82271 11/11/2019 3:28:58 PM N3130 N11621 N12917 N4574 N12140 N9748 N13390-0 N7180-0 N20785-0 N6937-0 N15776-0 N25810-0 N20820-0 N6885-0 N27294-0 N18835-0 N16945-0 N7410-0 N23967-0 N22679-0 N20532-0 N26651-0 N22078-0 N4098-0 N16473-0 N13841-0 N15660-0 N25787-0 N2315-0 N1615-0 N9087-0 N23880-0 N3600-0 N24479-0 N22882-0 N26308-0 N13594-0 N2220-0 N28356-0 N17083-0 N21415-0 N18671-0 N9440-0 N17759-0 N10861-0 N21830-0 N8064-0 N5675-0 N15037-0 N26154-0 N15368-1 N481-0 N3256-0 N20663-0 N23940-0 N7654-0 N10729-0 N7090-0 N23596-0 N15901-0 N16348-0 N13645-0 N8124-0 N20094-0 N27774-0 N23011-0 N14832-0 N15971-0 N27729-0 N2167-0 N11186-0 N18390-0 N21328-0 N10992-0 N20122-0 N1958-0 N2004-0 N26156-0 N17632-0 N26146-0 N17322-0 N18403-0 N17397-0 N18215-0 N14475-0 N9781-0 N17958-0 N3370-0 N1127-0 N15525-0 N12657-0 N10537-0 N18224-0`
# <br>
#
# In general, each line in data file represents one instance of an impression. The format is like: <br>
#
# `[Impression ID] [User ID] [Impression Time] [User Click History] [Impression News]`
#
# <br>
#
# User Click History is the user historical clicked news before Impression Time. Impression News is the displayed news in an impression, which format is:<br>
#
# `[News ID 1]-[label1] ... [News ID n]-[labeln]`
#
# <br>
# Label represents whether the news is clicked by the user. All information of news in User Click History and Impression News can be found in news data file.
# ## Global settings and imports
# +
import sys
import os
import numpy as np
import zipfile
from tqdm import tqdm
import scrapbook as sb
from tempfile import TemporaryDirectory
import tensorflow as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from reco_utils.recommender.deeprec.deeprec_utils import download_deeprec_resources
from reco_utils.recommender.newsrec.newsrec_utils import prepare_hparams
from reco_utils.recommender.newsrec.models.nrms import NRMSModel
from reco_utils.recommender.newsrec.io.mind_iterator import MINDIterator
from reco_utils.recommender.newsrec.newsrec_utils import get_mind_data_set
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
# -
# ## Prepare parameters
# + tags=["parameters"]
epochs = 5
seed = 42
batch_size = 32
# Options: demo, small, large
MIND_type = 'demo'
# -
# ## Download and load data
# +
tmpdir = TemporaryDirectory()
data_path = tmpdir.name
train_news_file = os.path.join(data_path, 'train', r'news.tsv')
train_behaviors_file = os.path.join(data_path, 'train', r'behaviors.tsv')
valid_news_file = os.path.join(data_path, 'valid', r'news.tsv')
valid_behaviors_file = os.path.join(data_path, 'valid', r'behaviors.tsv')
wordEmb_file = os.path.join(data_path, "utils", "embedding.npy")
userDict_file = os.path.join(data_path, "utils", "uid2index.pkl")
wordDict_file = os.path.join(data_path, "utils", "word_dict.pkl")
yaml_file = os.path.join(data_path, "utils", r'nrms.yaml')
mind_url, mind_train_dataset, mind_dev_dataset, mind_utils = get_mind_data_set(MIND_type)
if not os.path.exists(train_news_file):
download_deeprec_resources(mind_url, os.path.join(data_path, 'train'), mind_train_dataset)
if not os.path.exists(valid_news_file):
download_deeprec_resources(mind_url, \
os.path.join(data_path, 'valid'), mind_dev_dataset)
if not os.path.exists(yaml_file):
download_deeprec_resources(r'https://recodatasets.z20.web.core.windows.net/newsrec/', \
os.path.join(data_path, 'utils'), mind_utils)
# -
# ## Create hyper-parameters
hparams = prepare_hparams(yaml_file,
wordEmb_file=wordEmb_file,
wordDict_file=wordDict_file,
userDict_file=userDict_file,
batch_size=batch_size,
epochs=epochs,
show_step=10)
print(hparams)
# ## Train the NRMS model
iterator = MINDIterator
model = NRMSModel(hparams, iterator, seed=seed)
print(model.run_eval(valid_news_file, valid_behaviors_file))
# %%time
model.fit(train_news_file, train_behaviors_file, valid_news_file, valid_behaviors_file)
# %%time
res_syn = model.run_eval(valid_news_file, valid_behaviors_file)
print(res_syn)
sb.glue("res_syn", res_syn)
# ## Save the model
# +
model_path = os.path.join(data_path, "model")
os.makedirs(model_path, exist_ok=True)
model.model.save_weights(os.path.join(model_path, "nrms_ckpt"))
# -
# ## Output Predcition File
# This code segment is used to generate the prediction.zip file, which is in the same format in [MIND Competition Submission Tutorial](https://competitions.codalab.org/competitions/24122#learn_the_details-submission-guidelines).
#
# Please change the `MIND_type` parameter to `large` if you want to submit your prediction to [MIND Competition](https://msnews.github.io/competition.html).
group_impr_indexes, group_labels, group_preds = model.run_fast_eval(valid_news_file, valid_behaviors_file)
with open(os.path.join(data_path, 'prediction.txt'), 'w') as f:
for impr_index, preds in tqdm(zip(group_impr_indexes, group_preds)):
impr_index += 1
pred_rank = (np.argsort(np.argsort(preds)[::-1]) + 1).tolist()
pred_rank = '[' + ','.join([str(i) for i in pred_rank]) + ']'
f.write(' '.join([str(impr_index), pred_rank])+ '\n')
f = zipfile.ZipFile(os.path.join(data_path, 'prediction.zip'), 'w', zipfile.ZIP_DEFLATED)
f.write(os.path.join(data_path, 'prediction.txt'), arcname='prediction.txt')
f.close()
# ## Reference
# \[1\] Wu et al. "Neural News Recommendation with Multi-Head Self-Attention." in Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)<br>
# \[2\] Wu, Fangzhao, et al. "MIND: A Large-scale Dataset for News Recommendation" Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics. https://msnews.github.io/competition.html <br>
# \[3\] GloVe: Global Vectors for Word Representation. https://nlp.stanford.edu/projects/glove/
| examples/00_quick_start/nrms_MIND.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Rescore docking poses with predicted binding affinity
#
# This notebook analyzes docking pose rescoring. It contains the same analyzes conducted by Chachulski & Windshügel in their [LEADS-FRAG paper](https://doi.org/10.1021/acs.jcim.0c00693), where they benchmark different molecular docking programs with and without rescoring:
#
# **Analysis 1**: To evaluate the rescoring based on the entire set of dockings, we count the number of top poses with an RMSD below several thresholds. We compare these counts before and after rescoring.
#
# **Analysis 2**: To directly compare rescoring with the scoring function used by the docking program, we count dockings where the best pose (RMSD-wise) is also the top ranked pose.
#
# Input are three CSV files:
# 1. docking poses with rank and RMSD
# 2. binding affinities predicted for each pose and its receptor
# 3. binding affinities predicted for each reference ligand and its receptor
#
# Some assumptions are made regarding format:
# - columns are separated by ";"
# - the column "Receptor" contains some sort of ID to link the values from different files
# - the column "PoseRank" contains each pose's rank according to the docking program (top pose ranked first)
# - the column "RMSD" contains RMSD between a pose and some reference
# - the column "PredictedBindingAffinity" contains the predicted binding affinities
#
# With the predicted affinity, we can re-rank the docking poses. To evaluate, if this brings any improvement for a specific docking, we compare the RMSD of the pose ranked highest by the docking program, and the pose with the highest affinity. If the top pose based on affinity has a lower RMSD, the re-ranking did improve the docking result.
#
# The predicted binding affinity for reference ligands opens another way of evaluating the rescoring: if it works well, the reference ligands should be assigned an affinity higher than all their docking poses.
#
# ## Load and prepare RMSD and affinity data
# +
# SELECT DATA TO LOAD
import pandas as pd
# Pose rank and RMSD
docking = pd.read_csv('../example_data/affinity_predictions/docking_poses.csv', sep=';')
# predicted affinity of each pose
predictions = pd.read_csv('../example_data/affinity_predictions/affinity_predictions.csv', sep=';')
# predicted affinity of the reference
refligs = pd.read_csv('../example_data/affinity_predictions/affinity_predictions_refligands.csv', sep=';')
# -
# The data frames look as follows:
docking
predictions
# To make life easier, make column names match between both frames.
predictions.rename(columns={'Ligand': 'PoseRank'}, inplace=True)
# Now the frames can be easily combined into a single frame.
combined = docking.merge(predictions, sort=True)
combined
# ## Rescoring ##
#
# Rescoring means assigning a new ranking order to the poses, in this case based on the predicted binding affinity. The higher the affinity, the better. We use RMSD to break ties in the affinity rank (lower RMSD means better rank).
affinity_rank = combined.sort_values(by='RMSD').groupby(by='Receptor').rank(ascending=False, method='first').rename(columns={'PredictedBindingAffinity': 'AffinityRank'}).AffinityRank
combined = combined.merge(affinity_rank, left_index=True, right_index=True, sort=True)
combined
# ## Did rescoring improve individual dockings?
# To compare the old and new rankings for individual dockings, look at the RMSD difference between old and new top pose. When substracting the new from the old top pose RMSD, a difference > 0 shows an improvement after rescoring. For this analysis, we only use dockings with potential for improvement, i.e. where top pose RMSD is not the lowest RMSD.
# +
# Rank poses according to RMSD
combined['RMSDRank'] = combined[['Receptor', 'RMSD']].groupby(by='Receptor').rank(ascending=True)
# Construct new data frame with minimum RMSD, top pose RMSD and top affinity RMSD for each receptor.
stats = combined.groupby(by='Receptor').min().rename(columns={'RMSD': 'MinRMSD'})
stats = stats.reset_index()[['Receptor', 'MinRMSD']]
stats = stats.merge(combined[combined.PoseRank == 1].rename(columns={'RMSD': 'TopPoseRMSD'})[['Receptor', 'TopPoseRMSD']])
stats = stats.merge(combined[combined.AffinityRank == 1][['Receptor', 'RMSD']].rename(columns={'RMSD': 'TopAffinityRMSD'}))
# All dockings with top pose RMSD > minimum RMSD can be improved through rescoring
improvable_dockings = stats[stats.TopPoseRMSD != stats.MinRMSD][['Receptor', 'TopPoseRMSD', 'TopAffinityRMSD', 'MinRMSD']]
print(f'There are {len(improvable_dockings)} dockings with potential for improvement:')
improvable_dockings[['Receptor', 'TopPoseRMSD', 'MinRMSD']]
# -
# Now we can use the difference of top pose RMSD and top affinity RMSD to see the effect of rescoring.
improvable_dockings['DeltaRMSD'] = improvable_dockings.TopPoseRMSD - improvable_dockings.TopAffinityRMSD
improved_dockings = improvable_dockings[improvable_dockings.DeltaRMSD > 0]
worse_dockings = improvable_dockings[improvable_dockings.DeltaRMSD < 0]
print(round(len(improved_dockings)/len(improvable_dockings) * 100, 1), '% of which improved.')
print(round(len(worse_dockings)/len(improvable_dockings) * 100, 1), '% of which degraded.')
print('Improved dockings:')
improved_dockings.drop(columns='MinRMSD').sort_values(by='DeltaRMSD', ascending=False, ignore_index=True)
# ## The big picture: Did rescoring improve overall docking results (Analysis 1)?
# To further compare the rankings, count top poses with an RMSD in several intervals. This is based on the premise, that the docking program is able to produce good poses, but the scoring function fails to rank them first. If rescoring would do better than the prior used scoring function, the amount of poses with low RMSD would increase after rescoring.
# +
top_docking_poses = docking[docking.PoseRank == 1]
n_dockings = len(top_docking_poses)
top_rescoring_poses = combined[combined.AffinityRank == 1]
thresholds = [0.5, 1.0, 1.5, 2.0, 2.5]
counts_docking = []
counts_rescoring = []
for threshold in thresholds:
counts_docking.append(top_docking_poses[top_docking_poses.RMSD >= threshold - 0.5].loc[top_docking_poses.RMSD < threshold].count()[0])
counts_rescoring.append(top_rescoring_poses[top_rescoring_poses.RMSD >= threshold - 0.5].loc[top_rescoring_poses.RMSD < threshold].count()[0])
pd.DataFrame({'RMSDThreshold': thresholds, 'TopPoseCountDocking': counts_docking, 'TopPoseCountRescoring': counts_rescoring})
# -
print(f'Before rescoring, {round(sum(counts_docking)/n_dockings * 100, 1)} % of dockings have a top pose with RMSD<2.5 A.')
print(f'After rescoring, {round(sum(counts_rescoring)/n_dockings * 100, 1)} % of dockings have a top pose with RMSD<2.5 A.')
# ## A closer look: Ranking of pose with lowest RMSD (Analysis 2)
# This is an analysis of the entire docking pipeline. Here we count what could be called correctly reproduced binding modes: the best pose with an RMSD<2.5 A ranked first.
# +
top_docking_poses = combined[combined.PoseRank == 1].loc[combined.RMSDRank == 1]
top_rescoring_poses = combined[combined.AffinityRank == 1].loc[combined.RMSDRank == 1]
thresholds = [0.5, 1.0, 1.5, 2.0, 2.5]
counts_docking = []
counts_rescoring = []
for threshold in thresholds:
counts_docking.append(top_docking_poses[top_docking_poses.RMSD >= threshold - 0.5].loc[top_docking_poses.RMSD < threshold].count()[0])
counts_rescoring.append(top_rescoring_poses[top_rescoring_poses.RMSD >= threshold - 0.5].loc[top_rescoring_poses.RMSD < threshold].count()[0])
pd.DataFrame({'RMSDThreshold': thresholds, 'LowestRMSDTopPoseCountDocking': counts_docking, 'LowestRMSDTopPoseCountRescoring': counts_rescoring})
# -
print(f'Before rescoring, {round(sum(counts_docking)/n_dockings * 100, 1)} % of dockings correctly reproduced the binding mode.')
print(f'After rescoring, {round(sum(counts_rescoring)/n_dockings * 100, 1)} % of dockings correctly reproduced the binding mode.')
# ## Validate prediction with reference ligands
# By including the reference ligand into the affinity ranking, we can validate the affinity prediction. If it works well, the reference ligand should rank first. To make the validation a little less strict, exclude those dockings, where the top pose (affinity-wise) already has an RMSD<1 A. Below all reference ligands with a higher predicted affinity than the top affinity pose are shown.
# +
refligs.rename(columns={'PredictedBindingAffinity': 'RefligBindingAffinity'}, inplace=True)
refligs = refligs.merge(stats[stats.TopAffinityRMSD >= 1])
# Compare affinity of top rescored pose with ref. ligand binding affinity
refligs = refligs.merge(combined[['Receptor', 'PredictedBindingAffinity']][combined.AffinityRank == 1].rename(columns={'PredictedBindingAffinity': 'MaxPoseAffinity'}))
top_ranked_refligs = refligs.loc[refligs.RefligBindingAffinity > refligs.MaxPoseAffinity].reset_index(drop=True)
n_refligs_should_rank_first = len(refligs)
print(n_refligs_should_rank_first, 'dockings have a top affinity pose with an RMSD>=1 A.')
print(f'{round(len(top_ranked_refligs)/n_refligs_should_rank_first * 100, 1)} % of their reference ligands have a higher predicted binding affinity than all corresponding docking poses:')
top_ranked_refligs[['Receptor', 'RefligBindingAffinity', 'MaxPoseAffinity']]
| notebooks/rescoring.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="joJV4qgvanUI"
# # ConfusionMatrix, ROC, AUC, MAE, RMSE
# ##By <NAME>, CE18
#
# ###I have taken the example of symtomatic and asymtomatic patients, and their COVID19 results, and analysed the data accordingly
# + id="N7c9eLS73mqd" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="c1f329e2-7c63-4f7f-a242-2a7ac9cc290d"
try:
import pycm
except:
# !pip install -q pycm
import pycm
from pycm import *
# !pip install scikit-plot
from __future__ import absolute_import
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.datasets import load_digits as load_data
import scikitplot as skplt
from sklearn import metrics
from sklearn.metrics import mean_squared_error
from math import sqrt
from sklearn.metrics import mean_absolute_error
# + [markdown] id="3iRSrzM_389V"
# ## 1 Confusion Matrix
# Description:
#
#
# I have taken COVID19 testing example as it seems fairly relevant at this point of time, here we can visualize and analyze the data of the patients who were asymtomatic and still got Corona.
#
# P.S. PyCM is a gem for this type of data visualization
# + id="E5oVoILd6x_U" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="46312b24-5125-4511-a1bf-f1788b5c198d"
COVID19_TestResult = [1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1]
COVID19_Symtomatic = [1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0]
ConMat = ConfusionMatrix(actual_vector=COVID19_TestResult, predict_vector=COVID19_Symtomatic) # Create CM From Data
print(ConMat)
# + id="bshIQ03y7kuI" colab={"base_uri": "https://localhost:8080/", "height": 345} outputId="0e37a4a1-833c-4eb1-a7bf-e41bf2057089"
def plot_confusion_matrix(df_confusion, title='Confusion matrix', cmap=plt.cm.gray_r):
plt.matshow(df_confusion, cmap='Blues') # imshow
#plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(df_confusion.columns))
plt.xticks(tick_marks, df_confusion.columns, rotation=45)
plt.yticks(tick_marks, df_confusion.index)
#plt.tight_layout()
plt.ylabel(df_confusion.index.name)
plt.xlabel(df_confusion.columns.name)
COVID19_TestResult = pd.Series([1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1],name='Result')
COVID19_Symtomatic = pd.Series([1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0],name='Symtomatic')
COVID19 = pd.crosstab(COVID19_TestResult, COVID19_Symtomatic, rownames=['Result'], colnames=['Symptomatic'], margins=True)
print(COVID19)
plot_confusion_matrix(COVID19)
# + [markdown] id="yOBDJ4FoKbu3"
# ## 2 ROC curve, AUC score
# Description:
#
# 1 In the first example, I have used my own dataset as given in the above task.
# The AUC score is around 87 which is not that bad.
#
# 2 In the second example, I have taken the load_digits datasets from sklearn
# + id="GYAzKjGhK35Z" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="60a9d32b-3c3e-49bb-a5db-0338d547db7f"
COVID19_TestResult = [1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0,1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0,1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1,1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1,]
COVID19_Symtomatic = [1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0,1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0,1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0,1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0,]
fpr, tpr, thresholds = metrics.roc_curve(COVID19_Symtomatic,COVID19_TestResult)
plt.plot(fpr, tpr)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.title('ROC curve for COVID19')
plt.xlabel('False Positive Rate (1 - Specificity)')
plt.ylabel('True Positive Rate (Sensitivity)')
plt.grid(True)
print("AUC SCORE :",metrics.roc_auc_score(COVID19_Symtomatic,COVID19_TestResult))
# + id="x83JSbZWMsHU" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="cf6551af-80eb-45fd-d0b1-df2107f79847"
X, y = load_data(return_X_y=True)
nb = GaussianNB()
nb.fit(X, y)
probas = nb.predict_proba(X)
skplt.metrics.plot_roc(y_true=y, y_probas=probas)
plt.show()
#print(metrics.roc_auc_score(y, probas))
# + [markdown] id="PaikGUIBRpO2"
# ## 3 Mean Absolute Error
# Description:
#
# Nothing much to discuss, just applied MAE to my dataset
# + id="i2eHymA6Ry8M" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c33dbe45-e025-4acf-d0f8-064cf7c6f91a"
mean_absolute_error(COVID19_TestResult,COVID19_Symtomatic)
# + [markdown] id="z5EopLlXWtHA"
# ## 4 Root Mean Squared Error
# Description:
#
# Nothing much to discuss, just applied RMSE to my dataset
# + id="f2wQhLWjWMxX" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b0b6dc37-4ff0-46a2-93c0-97518453bb52"
sqrt(mean_squared_error(COVID19_Symtomatic,COVID19_TestResult,squared=False)) # squared - True returns MSE value, False returns RMSE value
| confusionMatrix_ROC_AUC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 阅读本文大概需要 3.6 分钟。
#
# 原文:https://fastapi.tiangolo.com/alternatives/
#
# 作者:tiangolo,FastAPI 的创作者,github star 10.1k
#
# 翻译:somenzz
#
# 侵删
#
# 本文讲述了什么启发了 FastAPI 的诞生,它与其他替代框架的对比,以及从中汲取的经验。
#
# **简介**
#
# -----------
#
# 如果不是基于前人的成果,FastAPI将不会存在。在 FastAPI之前,前人已经创建了许多工具 。
#
# 几年来,我一直在避免创建新框架。首先,我尝试使用许多不同的框架,插件和工具来解决 **FastAPI**涵盖的所有功能。
#
# 但是有时候,没有更好的办法,除了创建具有所有这些功能的东西,从以前的工具中汲取最佳创意,并以最佳方式将它们组合起来,使用以前甚至没有的语言功能(Python 3.6 + 类型提示)。
#
# **在此之前的一些框架:**
#
# ------------------
#
# **Django**
#
# Django 是最流行的 Python 框架,受到广泛信任。它用于构建 Instagram 之类的系统。
#
# 它与关系数据库(例如 MySQL 或 PostgreSQL)相对紧密地结合在一起,因此,以 NoSQL 数据库(例如 Couchbase,MongoDB,Cassandra 等)作为 django 的主存储引擎并不是一件容易的事。
#
# 创建它是为了在后端生成 HTML,而不是创建现代前端(例如 React,Vue.js 和 Angular)或与其通信的其他系统(例如 IoT 设备)使用的 API 。
#
# **Django REST Framework**
#
# Django REST Framework 是一个非常灵活的框架,用于构建 Web API,以改善 Django 的 API 功能。
#
# Mozilla,Red Hat 和 Eventbrite 等许多公司都使用它。
#
# Django REST Framework 是第一个自动生成 API 文档的框架,自动生成 API 的接口文档是 FastAPI 框架诞生的缘由之一。
#
# **注意**
#
# Django REST Framework 框架的作者是 <NAME> ,<NAME> 也创造了 Starlette 和 Uvicorn。FastAPI 正是建立在 Starlette 和 Uvicorn 的基础之上。
#
# **启发 FastAPI 地方**:有一个自动 API 文档,Web 用户界面可供用户测试。
#
# **Flask**
#
# Flask 是一种轻量级的框架,它不包括数据库集成,也没有很多的附带的功能,虽然这 Django 那里是默认提供的。
#
# 这个简单性和灵活的特性允许使用 NoSQL 数据库作为主数据存储。尽管文档在某些方面有所技术性,但它非常简单,因此学习起来相对直观。
#
# 它还常用于其他不需要数据库,用户管理或 Django 中预建功能的应用程序。尽管其中许多功能都可以通过添加插件来实现。
#
# 各个模块之前的解耦,使之成为一个 “微框架”,可以通过扩展为精确地提供所需的东西,这是我想要保留的一项关键功能。
#
# 考虑到 Flask 的简单性,它似乎很适合构建 API。接下来要找到的是 Flask 的 “Django REST Framework”。
#
# **启发 FastAPI 地方:**成为一个微框架。易于混合和匹配所需的工具和零件。拥有一个简单易用的路由系统。
#
# **Requests**
#
# FastAPI 实际上不是 Requests 的替代工具。它们的适用范围非常不同。实际上,在 FastAPI 应用程序内部使用 Requests 是很常见的。
#
# 但是,FastAPI 从 Requests 中获得了很多启发。Requests 是一个与 API(作为客户端)进行交互的库,而 FastAPI 是一个用于构建 API(作为服务器)的库。它们或多或少地处于相反的末端,彼此互补。Requests 具有非常简单直观的设计,非常易于使用,并具有合理的默认值。但同时,它非常强大且可自定义。
#
# 这就是为什么,如官方网站所述:
#
# > Requests 是有史以来下载次数最多的 Python 软件包之一
#
# 您的使用方式非常简单。例如,要发出 GET 请求,您可以编写:
#
# >response = requests.get("http://example.com/some/url")
#
# FastAPI 对应的 API 路径操作如下所示:
#
# ```py
# @app.get("/some/url")
# def read_url():
# return {"message": "Hello World"}
# ```
#
# 它们使用起来的相似之处如 `requests.get(...)` 和 `@app.get(...)`。
#
# **启发 FastAPI 地方:**
#
# * 拥有简单直观的 API。
#
# * 直接,直观地使用 HTTP 方法名称(操作)。
#
# * 具有合理的默认值,功能强大的自定义。
#
# **Swagger****/****OpenAPI**
#
#
#
# 我想要 Django REST Framework 的主要功能是自动 API 文档。然后我发现 API 文档有一个标准叫 Swagger ,它使用 JSON 或 YAML 来描述。
#
# 并且 Swagger API 的 Web 用户界面已经被人创建出来了。因此,能够为 API 生成 Swagger 文档将允许自动使用此 Web 用户界面。
#
# 在某个时候,Swagger 被授予 Linux Foundation,将其重命名为 OpenAPI。这就是为什么在谈论版本 2.0 时通常会说 “Swagger”,对于版本 3 + 来说是 “ OpenAPI”。
#
# **启发 FastAPI 地方:**
#
# 为 API 规范采用开放标准,而不是使用自定义架构。并集成基于标准的用户界面工具:
#
# * Swagger UI
#
# * ReDoc
#
#
# 选择这两个是因为它们相当受欢迎且稳定,但是通过快速搜索,您可以找到数十个 OpenAPI 的其他替代用户界面(可以与 FastAPI 一起使用)。
#
# **Flask REST frameworks**
#
# 有几个 Flask REST frameworks ,但经过调查和试用,我发现,不少项目都停产或放弃,还存在有一些长期的问题,使得它们并不适合解决前面的问题。
#
# **Marshmallow**
#
# 一个由 API 系统所需的主要功能是数据的序列化,就是把数据从编程语言中的对象转称成可以在网络上传输的对象,比如数据库中的数据转换为 JSON 对象。将 Python 中的 datetime 对象转为字符串,等等。
#
# 另外一个功能就是数据的验证,确保传入的参数是有效的,例如,有些字段是一个 int,类型而不是字符串,这在检测输入数据是非常有用的。
#
# 如果没有数据验证,你就必须用手工写代码来完成所有的检查。
#
# 这两点功能就是 Marshmallow 所提供的,这些是一个伟大的库,之前我经常使用它。
#
# Marshmallow 产生之前 Python 还没有加入类型提示。因此,定义一个 schema 你需要引入 Marshmallow 特定的 utils 的和类。
#
# **启发 FastAPI 地方:**
#
# 使用代码来定义提供的数据类型和验证的 schema,验证都是自动化的。
#
# **Webargs**
#
# API 框架需要的另一大功能点是解析从前端发送的请求数据。Webargs (包括 Flask) 是提供这一功能的工具,它采用 Marshmallow 做数据验证。Webargs 和 Marshmallow 的作者是同一个开发人员。这是一个伟大的工具,在 FastAPI 诞生之前,我一直在用它。
#
# **启发 FastAPI 地方:**
#
# 对输入的请求数据的自动验证。
#
# (未完待续)
| 框架/FastAPI/FastAPI框架诞生的缘由(上).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # **Traffic Sign Recognition**
#
# **Objective : **
# The goals / steps of this project are the following:
# * Load the data set (see below for links to the project data set)
# * Explore, summarize and visualize the data set
# * Design, train and test a model architecture
# * Use the model to make predictions on new images
# * Analyze the softmax probabilities of the new images
# * Summarizing the results
#
# [//]: # (Image References)
#
# [image1]: ./examples/visualization.jpg "Visualization"
# [image2]: ./examples/grayscale.jpg "Grayscaling"
# [image3]: ./examples/random_noise.jpg "Random Noise"
# [image4]: ./examples/RGB_Image.jpg "RGB_Image"
# [image5]: ./examples/Gray_Image.jpg "Gray_Image"
# [image6]: ./examples/Data_Visualization_1.jpg "Data_Visualization_1"
# [image7]: ./examples/Data_Visualization_2.jpg "Data_Visualization_2"
#
# [Test1]: ./Test_Images/3.jpg "1"
# [Test2]: ./Test_Images/4.jpg "2"
# [Test3]: ./Test_Images/11.jpg "3"
# [Test4]: ./Test_Images/12.jpg "4"
# [Test5]: ./Test_Images/13.jpg "5"
# [Test6]: ./Test_Images/14.jpg "6"
# [Test7]: ./Test_Images/25.jpg "7"
# [Test8]: ./Test_Images/33.jpg "8"
# [Test9]: ./Test_Images/38.jpg "9"
#
# [Result1]: ./examples/Prediction_1.jpg "P_1"
# [Result2]: ./examples/Prediction_2.jpg "P_2"
# [Result3]: ./examples/Prediction_3.jpg "P_3"
# [Result4]: ./examples/Prediction_4.jpg "P_4"
# [Result5]: ./examples/Prediction_5.jpg "P_5"
#
#
# ## Rubric Points
# ### Here I will consider the [rubric points](https://review.udacity.com/#!/rubrics/481/view) individually and describe how I addressed each point in my implementation.
#
# ---
#
# ### Data Set Summary & Exploration
#
# #### 1. Provide a basic summary of the data set. In the code, the analysis should be done using python, numpy and/or pandas methods rather than hardcoding results manually.
#
# I used the pandas library to calculate summary statistics of the traffic
# signs data set:
#
# * The size of training set is ?
# > *Number of training examples = 34799*
# * The size of the validation set is ?
# >*Number of testing examples = 4410*
# * The size of test set is ?
# >*Number of testing examples = 12630*
# * The shape of a traffic sign image is ?
# >*Image data shape = (32, 32, 3)*
# * The number of unique classes/labels in the data set is ?
# >*Number of classes = 43*
#
# #### 2. Include an exploratory visualization of the dataset.
#
# * Visualizing the distribution of data by plotting output classes against the total number of data present in that class
#
# ![alt text][image6]
#
# > From the above plot, It is clear that the output classes are not equally distributed in the training data set.
#
# >> ***Solution*** : By performing `Data Augumentation` (example : rotating imgae, changing brightness), we can achieve equal distribution
#
# * Viewing the image and understanding the relationship with the output class
#
# ![alt text][image7]
#
#
# ### Design and Test a Model Architecture
#
# #### 1. Describe how you preprocessed the image data. What techniques were chosen and why did you choose these techniques? Consider including images showing the output of each preprocessing technique. Pre-processing refers to techniques such as converting to grayscale, normalization, etc. (OPTIONAL: As described in the "Stand Out Suggestions" part of the rubric, if you generated additional data for training, describe why you decided to generate additional data, how you generated the data, and provide example images of the additional data. Then describe the characteristics of the augmented training set like number of images in the set, number of images for each class, etc.)
#
# Since the images are taken in different environment, We are `Pre-process` our data set before feeding into neural network. In this project, The below two pre-processing technique is followed.
# > 1. Normalization
# >>By performing normalization, we can achieve zero mean and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project.
# > 2. RGB to Gray
# >> By reducting the color channel, we can reduce the computational cost and increase the accuracy in detecting the shape rather than color.
# >
# > RGB Image
# >> ![alt text][image4]
# >
# > Gray Image
# >> ![alt text][image5]
#
#
# #### 2. Describe what your final model architecture looks like including model type, layers, layer sizes, connectivity, etc.) Consider including a diagram and/or table describing the final model.
#
# I took LeNet architecture as base and implemented the Traffic sign classifier
#
# | Layer | Description |
# |:---------------------:|:---------------------------------------------:|
# | Input | 32x32x1 Gray image |
# | Convolution 5x5 | 1x1 stride, valid padding, outputs 28x28x6 |
# | ReLU Activation |
# | Max pooling | 2x2 stride, 2x2 filter, outputs 14x14x6 |
# | Convolution 5x5 | 1x1 stride, valid padding, outputs 10x10x16 |
# | ReLU Activation |
# | Max pooling | 2x2 stride, 2x2 filter, outputs 5x5x16 |
# | Fully connected#1 | 400x120 input, 120x84 output |
# | ReLU Activation |
# | Drop out | Drop out percentage 65% |
# | Fully connected#2 | 120x84 input, 84x43 output |
# | ReLU Activation |
# | Drop out | Drop out percentage 85% |
# | Fully connected#3 | 120x84 input, 84x43 output |
#
# #### 3. Describe how you trained your model. The discussion can include the type of optimizer, the batch size, number of epochs and any hyperparameters such as learning rate.
#
# **Model 1:** Epochs: 20, Learning rate: 0.001
#
# | Layer | Description |
# |:---------------------:|:---------------------------------------------:|
# | Input | 32x32x1 Gray image |
# | Convolution 5x5 | 1x1 stride, valid padding, outputs 28x28x6 |
# | SoftMax Activation |
# | Average pooling | 2x2 stride, 2x2 filter, outputs 14x14x6 |
# | Convolution 5x5 | 1x1 stride, valid padding, outputs 10x10x16 |
# | SoftMax Activation |
# | Average pooling | 2x2 stride, 2x2 filter, outputs 5x5x16 |
# | Fully connected#1 | 400x120 input, 120x84 output |
# | SoftMax Activation |
# | Fully connected#2 | 120x84 input, 84x43 output |
# | SoftMax Activation |
# | Fully connected#3 | 120x84 input, 84x43 output |
#
# In this model, The validation accuracy is raise upto in ~9%. The accuracy level is too low. We can increase accuray with increase in epochs. But, I belive that before tuning our epochs size, we can change the model. As first step, I have changed the following
# > 1. Activation function as "ReLU"
# > 2. Changed the pooling method from Average to Max
#
# **Model 2:** Epochs: 20, Learning rate: 0.001
#
# | Layer | Description |
# |:---------------------:|:---------------------------------------------:|
# | Input | 32x32x1 Gray image |
# | Convolution 5x5 | 1x1 stride, valid padding, outputs 28x28x6 |
# | ReLU Activation |
# | Max pooling | 2x2 stride, 2x2 filter, outputs 14x14x6 |
# | Convolution 5x5 | 1x1 stride, valid padding, outputs 10x10x16 |
# | ReLU Activation |
# | Max pooling | 2x2 stride, 2x2 filter, outputs 5x5x16 |
# | Fully connected#1 | 400x120 input, 120x84 output |
# | ReLU Activation |
# | Fully connected#2 | 120x84 input, 84x43 output |
# | ReLU Activation |
# | Fully connected#3 | 120x84 input, 84x43 output |
#
# In this model, The validation accuracy is raised upto ~70. To further increase the accuracy, Drop out has been introduced after the fully connected layer.
#
#
# **Model 3:** Epochs: 20, Learning rate: 0.001
#
# | Layer | Description |
# |:---------------------:|:---------------------------------------------:|
# | Input | 32x32x1 Gray image |
# | Convolution 5x5 | 1x1 stride, valid padding, outputs 28x28x6 |
# | ReLU Activation |
# | Max pooling | 2x2 stride, 2x2 filter, outputs 14x14x6 |
# | Convolution 5x5 | 1x1 stride, valid padding, outputs 10x10x16 |
# | ReLU Activation |
# | Max pooling | 2x2 stride, 2x2 filter, outputs 5x5x16 |
# | Fully connected#1 | 400x120 input, 120x84 output |
# | ReLU Activation |
# | Drop out | Drop out percentage 65% |
# | Fully connected#2 | 120x84 input, 84x43 output |
# | ReLU Activation |
# | Drop out | Drop out percentage 85% |
# | Fully connected#3 | 120x84 input, 84x43 output |
# With this model, I got around 89%. By tuning the hyper parameters, we can acheive the desired output. So, this model is selected for training.
#
# #### 4. Describe the approach taken for finding a solution and getting the validation set accuracy to be at least 0.93. Include in the discussion the results on the training, validation and test sets and where in the code these were calculated. Your approach may have been an iterative process, in which case, outline the steps you took to get to the final solution and why you chose those steps. Perhaps your solution involved an already well known implementation or architecture. In this case, discuss why you think the architecture is suitable for the current problem.
#
# **Type 1 :** Epochs: 20, Learning rate: 0.001
# The validation accuracy is starts in ~65% and goes upto ~90%. To achieve the good accuracy, we can perform the following ***TWO ways***. They are,
# > 1. Increase the Epochs
# > 2. Incraese the Learning Rate
#
# **Type 2:** Epochs: 50, Learing rate: 0.001
# The validation accuracy is saturated on ~93 after 20 Epochs. The accuracy is fluctuating after 30 Epochs which may due to overfitting. By reducing the epchos and increasing the learning rate, we can further improve the accuracy.
#
#
# **Type 3:** Epochs: 20, Learing rate: 0.004
# After tring different combination of epochs & learning rate, The following combination best fits my train data.
# > 1. Epochs : 20
# > 2. learning rate : 0.004
#
# My final model results were:
# * training set accuracy of ?
# >* `Train Accuracy = 99.569`
# * validation set accuracy of ?
# >* `Valid Accuracy = 95.215`
# * test set accuracy of ?
# >* `Test Accuracy = 93.658`
#
# If an iterative approach was chosen:
# * What was the first architecture that was tried and why was it chosen?
# > I started with LeNet Architecture. Because I have work experience in that and `LeNet architecture` worked well on MNIST data set. Hence I adapted the same to classify the traffic sign images.
#
# * What were some problems with the initial architecture?
# > I found difficulty in finding the activation function and pooling methods. By experimenting different combination, I solved this issue.
#
# * How was the architecture adjusted and why was it adjusted?
# > There were 2 major adjustment done while building the model.
# > 1. Finding the optimal activation function - To improve the `train_accuracy`
# > 2. Introducing drop out function after the fully connected layer - To make neural network less depend on train data. In turn, this will increase the `Validation_accuracy`
#
# * Which parameters were tuned? How were they adjusted and why?
# > I have changed the learning rate and epochs. They are inversely propotional to each other. I chose large the learning rate with small epochs.
#
# * What are some of the important design choices and why were they chosen? For example, why might a convolution layer work well with this problem? How might a dropout layer help with creating a successful model?
# >1. **Convolution layer** : This uses the weight sharing which helps in improving translation invariance.
# >2. **Drop Out** : This will make our neural network depends on the features of the data rather than the pixel value.
#
#
# ### Test a Model on New Images
#
# #### 1. Choose five German traffic signs found on the web and provide them in the report. For each image, discuss what quality or qualities might be difficult to classify.
#
# Here are Nine German traffic signs that I found on the web:
#
# ![alt text][Test1] ![alt text][Test2] ![alt text][Test3]
# ![alt text][Test4] ![alt text][Test5] ![alt text][Test6]
# ![alt text][Test7] ![alt text][Test8] ![alt text][Test9]
#
# The first image might be difficult to classify because ...
#
# #### 2. Discuss the model's predictions on these new traffic signs and compare the results to predicting on the test set. At a minimum, discuss what the predictions were, the accuracy on these new predictions, and compare the accuracy to the accuracy on the test set (OPTIONAL: Discuss the results in more detail as described in the "Stand Out Suggestions" part of the rubric).
#
# Here are the results of the prediction:
#
# | Image | Prediction |
# |:---------------------:|:---------------------------------------------:|
# | Right-of-way at the next intersection| Right-of-way at the next intersection|
# | Priority road | Ahead only |
# | Yield | Yield |
# | Stop | Priority road |
# | Road work | Ahead only |
# | Speed limit (60km/h) | Speed limit (60km/h) |
# | Turn right ahead | Speed limit (30km/h) |
# | Keep right | Keep right |
# | Speed limit (70km/h) | Speed limit (30km/h) |
#
#
# The model was able to correctly guess 4 of the 9 traffic signs, which gives an accuracy of 44.44%.
#
#
# #### 3. Describe how certain the model is when predicting on each of the five new images by looking at the softmax probabilities for each prediction. Provide the top 5 softmax probabilities for each image along with the sign type of each probability. (OPTIONAL: as described in the "Stand Out Suggestions" part of the rubric, visualizations can also be provided such as bar charts)
#
# The code for making predictions on my final model is located in the 21th cell of the Ipython notebook.
#
# The following image will depicts the top 5 prediction of each input.
# ![alt text][Result1]
# ![alt text][Result2]
# ![alt text][Result3]
# ![alt text][Result4]
# ![alt text][Result5]
# ### Analysis of output
# The following `Five` images are not classified correctly
# > 1. Priority Road
# > 2. Road Work
# > 3. Stop
# > 4. Turn Right Ahead
# > 5. Speed Limit(70Km/h)
#
# ##### Major Reasons for failure:
# 1. Our training data set has traffic sign at the center part of the image where as our chosen set has traffic sign in different part of the image.
# 2. Image quality is not good in some photos.(Road Work)
# 3. Input images are rotated(i.e., the photos taken in different angle)
# 4. Brightness level of image set is varied
# 5. Data is not uniformly spread across the training data set(i.e., if the count of the a particular label/class is small compare to other label in our training data, then there is possibility of mis-classification of that label)
#
| WriteUp_MD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Lasso Demo
#
#
# Show how to use a lasso to select a set of points and get the indices
# of the selected points. A callback is used to change the color of the
# selected points
#
# This is currently a proof-of-concept implementation (though it is
# usable as is). There will be some refinement of the API.
#
# +
from matplotlib import colors as mcolors, path
from matplotlib.collections import RegularPolyCollection
import matplotlib.pyplot as plt
from matplotlib.widgets import Lasso
import numpy as np
class Datum:
colorin = mcolors.to_rgba("red")
colorout = mcolors.to_rgba("blue")
def __init__(self, x, y, include=False):
self.x = x
self.y = y
if include:
self.color = self.colorin
else:
self.color = self.colorout
class LassoManager:
def __init__(self, ax, data):
self.axes = ax
self.canvas = ax.figure.canvas
self.data = data
self.Nxy = len(data)
facecolors = [d.color for d in data]
self.xys = [(d.x, d.y) for d in data]
self.collection = RegularPolyCollection(
6, sizes=(100,),
facecolors=facecolors,
offsets=self.xys,
transOffset=ax.transData)
ax.add_collection(self.collection)
self.cid = self.canvas.mpl_connect('button_press_event', self.onpress)
def callback(self, verts):
facecolors = self.collection.get_facecolors()
p = path.Path(verts)
ind = p.contains_points(self.xys)
for i in range(len(self.xys)):
if ind[i]:
facecolors[i] = Datum.colorin
else:
facecolors[i] = Datum.colorout
self.canvas.draw_idle()
self.canvas.widgetlock.release(self.lasso)
del self.lasso
def onpress(self, event):
if self.canvas.widgetlock.locked():
return
if event.inaxes is None:
return
self.lasso = Lasso(event.inaxes,
(event.xdata, event.ydata),
self.callback)
# acquire a lock on the widget drawing
self.canvas.widgetlock(self.lasso)
if __name__ == '__main__':
np.random.seed(19680801)
data = [Datum(*xy) for xy in np.random.rand(100, 2)]
ax = plt.axes(xlim=(0, 1), ylim=(0, 1), autoscale_on=False)
ax.set_title('Lasso points using left mouse button')
lman = LassoManager(ax, data)
plt.show()
| matplotlib/gallery_jupyter/event_handling/lasso_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D4_DeepLearning1/student/W3D4_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text"
# # Neuromatch Academy: Week 3, Day 4, Tutorial 2
# # Deep Learning: Encoding Neural Responses
#
# **Content creators**: <NAME>, <NAME>
#
# **Content reviewers**: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
#
#
#
# + [markdown] colab_type="text"
# ---
# #Tutorial Objectives
#
# In this tutorial, we'll use deep learning to build an encoding model from stimuli to neural activity. Specifically, we'll be looking at the activity of ~20,000 neurons in mouse primary visual cortex responding to oriented gratings recorded in [this study](https://www.biorxiv.org/content/10.1101/679324v2.abstract).
#
# Because the stimuli are 1D and the neurons respond with smooth tuning curves, we will model the neural responses as a 1D convolutional operation on the stimulus.
#
# In this tutorial, we will
# * Understand the basics of convolution
# * Build and train a convolutional neural network to predict neural responses using PyTorch
# * Visualize and analyze its internal representations
# + [markdown] colab_type="text"
# ---
# # Setup
#
# + cellView="both" colab={} colab_type="code"
import os
import numpy as np
import torch
from torch import nn
from torch import optim
from matplotlib import pyplot as plt
# + cellView="form" colab={} colab_type="code"
#@title Figure settings
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# + cellView="form" colab={} colab_type="code"
#@title Data retrieval and loading
import hashlib
import requests
fname = "W3D4_stringer_oribinned6_split.npz"
url = "https://osf.io/p3aeb/download"
expected_md5 = "b3f7245c6221234a676b71a1f43c3bb5"
if not os.path.isfile(fname):
try:
r = requests.get(url)
except requests.ConnectionError:
print("!!! Failed to download data !!!")
else:
if r.status_code != requests.codes.ok:
print("!!! Failed to download data !!!")
elif hashlib.md5(r.content).hexdigest() != expected_md5:
print("!!! Data download appears corrupted !!!")
else:
with open(fname, "wb") as fid:
fid.write(r.content)
# + cellView="form" colab={} colab_type="code"
#@title Helper Functions
# Some helper functions
def load_data_split(data_name=fname):
"""Load mouse V1 data from Stringer et al. (2019)
Data from study reported in this preprint:
https://www.biorxiv.org/content/10.1101/679324v2.abstract
These data comprise time-averaged responses of ~20,000 neurons
to ~4,000 stimulus gratings of different orientations, recorded
through Calcium imaginge. The responses have been normalized by
spontaneous levels of activity and then z-scored over stimuli, so
expect negative numbers. The repsonses were split into train and
test and then each set were averaged in bins of 6 degrees.
This function returns the relevant data (neural responses and
stimulus orientations) in a torch.Tensor of data type torch.float32
in order to match the default data type for nn.Parameters in
Google Colab.
It will hold out some of the trials when averaging to allow us to have test
tuning curves.
Args:
data_name (str): filename to load
Returns:
resp_train (torch.Tensor): n_stimuli x n_neurons matrix of neural responses,
each row contains the responses of each neuron to a given stimulus.
As mentioned above, neural "response" is actually an average over
responses to stimuli with similar angles falling within specified bins.
resp_test (torch.Tensor): n_stimuli x n_neurons matrix of neural responses,
each row contains the responses of each neuron to a given stimulus.
As mentioned above, neural "response" is actually an average over
responses to stimuli with similar angles falling within specified bins
stimuli: (torch.Tensor): n_stimuli x 1 column vector with orientation
of each stimulus, in degrees. This is actually the mean orientation
of all stimuli in each bin.
"""
with np.load(data_name) as dobj:
data = dict(**dobj)
resp_train = data['resp_train']
resp_test = data['resp_test']
stimuli = data['stimuli']
# Return as torch.Tensor
resp_train_tensor = torch.tensor(resp_train, dtype=torch.float32)
resp_test_tensor = torch.tensor(resp_test, dtype=torch.float32)
stimuli_tensor = torch.tensor(stimuli, dtype=torch.float32)
return resp_train_tensor, resp_test_tensor, stimuli_tensor
def plot_tuning(ax, stimuli, respi_train, respi_test, neuron_index, linewidth=2):
"""Plot the tuning curve of a neuron"""
ax.plot(stimuli, respi_train, 'y', linewidth=linewidth) # plot its responses as a function of stimulus orientation
ax.plot(stimuli, respi_test, 'm', linewidth=linewidth) # plot its responses as a function of stimulus orientation
ax.set_title('neuron %i' % neuron_index)
ax.set_xlabel('stimulus orientation ($^o$)')
ax.set_ylabel('neural response')
ax.set_xticks(np.linspace(0, 360, 5))
ax.set_ylim([-0.5, 2.4])
# from bayes day!
def my_gaussian(x_points, mu, sigma):
"""
Returns normalized Gaussian estimated at points `x_points`, with parameters: mean `mu` and std `sigma`
Args:
x_points (numpy array of floats): points at which the gaussian is evaluated
mu (scalar): mean of the Gaussian
sigma (scalar): std of the gaussian
Returns:
(numpy array of floats) : un-normalized Gaussian (i.e. without constant) evaluated at `x`
"""
px = np.exp(- 1/2/sigma**2 * (mu - x_points) ** 2)
px = px / px.sum()
return px
def plot_conv(pad, stimulus, filter, conv_out):
""" plot 1D convolution """
# plot stimulus
ax = fig.add_subplot(1,3,1)
ax.plot(np.arange(0, 360), stimulus, 'k')
ax.set_title('stimulus')
ax.set_xlabel('orientation ($^o$)')
ax.set_ylabel('stimulus')
# plot convolutional filter
ax = fig.add_subplot(1,3,2)
ax.plot(np.arange(-pad, pad), filter)
ax.set_xlabel('orientation ($^o$)')
ax.set_ylabel('magnitude')
ax.set_title('convolutional filter')
# plot convolutional output
ax = fig.add_subplot(1,3,3)
n_units = (~np.isnan(conv_out)).sum()
ax.scatter(np.arange(0,n_units),
conv_out[~np.isnan(conv_out)], s=30,
cmap='hsv', c=np.arange(0,n_units))
ax.set_xlabel('convolutional unit')
ax.set_ylabel('activation')
ax.set_title('activations of\nconvolutional units')
def plot_example_activations(act):
""" plot activations act and corresponding stimulus
Args:
act: activations of convolutional layer (n_bins x conv_channels x n_bins)
"""
ns = [10,25,40]
fig, axs = plt.subplots(1,3,figsize=(12,4))
for k, (n, ax) in enumerate(zip(ns, axs.flatten())):
ax.plot(n * np.ones(2), [act.min()*1.15, act.max()*1.15], 'k', linewidth=4)
ax.plot(act[n].T, '.', linewidth=2)
ax.set_xlabel('convolutional unit')
ax.set_ylabel('activation')
ax.set_title('stim id %d'%n)
leg = ['chan%d'%i for i in range(act.shape[1])]
leg.insert(0, 'stim')
n_units = act.shape[0]
for k,s in enumerate(leg):
if k==0:
ax.text(((n+15)%n_units)/n_units, .9-k*.1, s, transform=ax.transAxes, color='k', ha='center')
else:
ax.text(((n+15)%n_units)/n_units, .9-k*.1, s, transform=ax.transAxes, color='C%d'%(k-1), ha='center')
def train(net, custom_loss, train_data, train_labels,
test_data=None, test_labels=None,
learning_rate=10, n_iter=500, L2_penalty=0., L1_penalty=0.):
"""Run gradient descent for network without batches
Args:
net (nn.Module): deep network whose parameters to optimize with SGD
custom_loss: loss function for network
train_data: training data (n_train x input features)
train_labels: training labels (n_train x output features)
test_data: test data (n_train x input features)
test_labels: test labels (n_train x output features)
learning_rate (float): learning rate for gradient descent
n_epochs (int): number of epochs to run gradient descent
L2_penalty (float): magnitude of L2 penalty
L1_penalty (float): magnitude of L1 penalty
Returns:
train_loss: training loss across iterations
test_loss: testing loss across iterations
"""
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.5) # Initialize PyTorch SGD optimizer
train_loss = np.nan * np.zeros(n_iter) # Placeholder for train loss
test_loss = np.nan * np.zeros(n_iter) # Placeholder for test loss
# Loop over epochs
for i in range(n_iter):
y_pred = net(train_data) # Forward pass: compute predicted y by passing train_data to the model.
if L2_penalty>0 or L1_penalty>0:
weights = net.out_layer.weight
loss = custom_loss(y_pred, train_labels, weights, L2_penalty, L1_penalty)
else:
loss = custom_loss(y_pred, train_labels)
### Update parameters
optimizer.zero_grad() # zero out gradients
loss.backward() # Backward pass: compute gradient of the loss with respect to model parameters
optimizer.step() # step parameters in gradient direction
train_loss[i] = loss.item() # .item() transforms the tensor to a scalar and does .detach() for us
# Track progress
if (i+1) % (n_iter // 10) == 0 or i==0:
if test_data is not None and test_labels is not None:
y_pred = net(test_data)
if L2_penalty>0 or L1_penalty>0:
loss = custom_loss(y_pred, test_labels, weights, L2_penalty, L1_penalty)
else:
loss = custom_loss(y_pred, test_labels)
test_loss[i] = loss.item()
print(f'iteration {i+1}/{n_iter} | train loss: {train_loss[i]:.4f} | test loss: {test_loss[i]:.4f}')
else:
print(f'iteration {i+1}/{n_iter} | train loss: {train_loss[i]:.4f}')
return train_loss, test_loss
def plot_pred_weights(y_pred, y_train, y_test, weights):
""" plot example neural response prediction + weights """
fig = plt.figure(figsize=(12,4))
ax =fig.add_subplot(1,3,1)
ax.plot(y_train, 'y', linewidth=1)
ax.plot(y_test, 'm', linewidth=1)
ax.plot(y_pred, 'g', linestyle='-', linewidth=3)
ax.set_xlabel('stimulus bin')
ax.set_ylabel('response')
ax.text(0.1, 1.0, 'train', color='y', transform=ax.transAxes)
ax.text(0.1, 0.9, 'test', color='m', transform=ax.transAxes)
ax.text(0.1, 0.8, 'pred', color='g', transform=ax.transAxes)
ax=fig.add_subplot(1,3,2)
ax.plot(y_train, y_train, 'k', lw=1)
ax.scatter(y_train, y_pred, s=8, color='y')
ax.scatter(y_test, y_pred, s=8, color='m')
ax.set_xlabel('neural response')
ax.set_ylabel('predicted response', color='g')
ax.text(0.1, 1.0, 'train', color='y', transform=ax.transAxes)
ax.text(0.1, 0.9, 'test', color='m', transform=ax.transAxes)
plt.axis('square')
### plot weights of fully-connected layer for first 300 neurons
plt.subplot(1,3,3)
plt.imshow(weights, aspect='auto', cmap='bwr', vmin=-0.01,vmax=0.01)
plt.title('out_layer weights')
plt.ylabel('neurons')
plt.xlabel('convolutional units')
plt.colorbar()
plt.tight_layout()
plt.show()
def plot_prediction(ax, y_pred, y_test):
""" plot prediction of neural response + test neural response """
ax.plot(y_test,color='m')
ax.plot(y_pred, 'g', linewidth=3)
ax.set_xlabel('stimulus bin')
ax.set_ylabel('response')
def plot_training_curves(train_loss, test_loss):
f, ax = plt.subplots()
ax.plot(train_loss, 'y', label="Train loss")
ax.plot(test_loss, '.', markersize=10, color='m', label="Test loss")
ax.set(xlabel="Gradient descent iteration", ylabel="Mean squared error")
plt.legend()
# + [markdown] colab_type="text"
# ---
# # Section 1: Neural Tuning Curves
#
# In the next cell, we plot the turning curves of a random subset of neurons. We have binned the stimuli orientations more than in Tutorial 1.
#
# Rerun the cell to look at different example neurons and observe the diversity of tuning curves in the population. How can we fit these neural responses with an encoding model?
#
#
#
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 520} colab_type="code" outputId="477a3fec-5088-4b8d-b235-c07a962144df"
#@title
#@markdown Execute this cell to load data and plot neural tuning curves
### Load data and bin at 8 degrees
# responses are split into test and train
resp_train, resp_test, stimuli = load_data_split()
n_stimuli, n_neurons = resp_train.shape
print('resp_train contains averaged responses of %i neurons to %i binned stimuli' % (n_neurons, n_stimuli))
#print(resp_train.shape)
# also make stimuli into array of 0's and 1's
n_bins = len(stimuli)
stim_binary = torch.eye(n_bins, dtype=torch.float32)
# Visualize tuning curves
fig, axs = plt.subplots(3, 5, figsize=(15,7))
for k, ax in enumerate(axs.flatten()):
neuron_index = np.random.choice(n_neurons) # pick random neuron
plot_tuning(ax, stimuli, resp_train[:, neuron_index], resp_test[:, neuron_index], neuron_index, linewidth=2)
if k==0:
ax.text(1.0, 0.9, 'train', color='y', transform=ax.transAxes)
ax.text(1.0, 0.65, 'test', color='m', transform=ax.transAxes)
fig.tight_layout()
plt.show()
# + [markdown] colab_type="text"
# ---
# # Section 2: Introduction to convolutions
#
#
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 537} colab_type="code" outputId="f445fbd3-c589-4f99-a8f8-53660f00a5a9"
#@title Video 1: Intro to convolutions
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="vPNu8CNg9i4", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# + [markdown] colab_type="text"
# ## Section 2.1: 1d convolution in numpy
#
# We provide an example function below, described in the video, which performs a 1D convolution of a stimulus input $s$ with **filter** $f$ of size $K$ (these filters are also called *kernels*). In particular, it computes:
#
# $$a_x = \sum^{K/2}_{i=-K/2} f_i \, s_{x-i}$$
#
# where $a_x$ is the convolutional output at position $x$.
#
# There is no exercise in this section but make sure you understand what is happening in the function below (i.e. what a convolution is). It can be helpful to write or draw this out on paper to clarify! You could even make your own short stimulus and filter and calculate what you think the convolutional output should be by hand, and then compare to the function output.
# + colab={} colab_type="code"
def convolve1d(stimulus, f, pad, stride):
""" Pads stimulus and performs 1d convolution
Args:
stimulus (ndarray): the 1D input for the convolution
f (ndarray): the 1D filter for the convolution
pad (scalar): the amount of zero padding for the stimulus
stride (scalar): how far the filter moves every step
Returns:
(ndarray): convolutional output, same size as stimulus
"""
# Pad the stimulus
zero_pads = np.zeros(pad)
padded_stimulus = np.concatenate((zero_pads, stimulus, zero_pads))
# Initialize convolutional output
a = np.nan * np.zeros(360)
# Compute the convolution
for x in np.arange(0+pad, 360+pad, stride, int): # loop over positions x
# Compute element-wise multiplication between filter and stimulus
a[x - pad] = (f * padded_stimulus[x-pad : x+pad]).sum()
return a
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" outputId="379c97e1-00e4-4f46-d62d-030b0fce90a5"
#@title
#@markdown Execute this cell to call convolve1d with a Gaussian filter and plot results
# Convolutional parameters
K = 49 # size of convolutional filter
stride = 1 # how often to compute the convolution along the stim axis
pad = K // 2 # we will need to pad stimulus with zeros to perform convolution
# Create stimulus
ori = 135
stimulus = np.zeros(360)
stimulus[ori] = 1.0
# Create Gaussian filter
# we will use the code from W2D1 (bayes day) to create this!
# mean of gaussian mu=0
i = np.arange(-pad, pad)
f = my_gaussian(i, 0.0, sigma=10)
# Call function
a = convolve1d(stimulus, f, pad, stride)
# Plot results
fig = plt.figure(figsize=(15,4))
plot_conv(pad, stimulus, f, a)
plt.show()
# + [markdown] colab_type="text"
# ## Section 2.2: Convolutional layer
#
# You have just learned how to compute what is called a single convolutional **channel**: a single filter applied to the input resulting in several units, where the number of units depends on the *stride* you set.
#
# (Note if filter size *K* is odd and you set the *pad=K//2* and *stride=1* (as is the default above), you get a **channel** of units that is the same size as the input.)
#
# *Contemplation:* How does a neuron potentially combine those activation units and create the tuning curves they have? Will we need more than one convolutional filter to recreate all the responses we see?
#
#
# + [markdown] colab={} colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial2_Solution_185efdad.py)
#
#
# + [markdown] colab_type="text"
# Let's add more convolutional channels and implement this operation efficiently using pytorch. A *layer* of convolutional channels can be implemented with one line of code using the PyTorch class `nn.Conv1d()`, which requires the following arguments for initialization:
# * $C^{in}$: the number of input channels
# * $C^{out}$: the number of output channels (number of different convolutional filters)
# * $K$: the size of the $C^{out}$ different convolutional filters
#
# When you run the network, you can input a stimulus of arbitrary length ($H^{in}$), but it needs to be shaped as a 2D input $C^{in} \times H^{in}$. In our case, $C^{in}=1$ because there is only one orientation input and $H^{in}$ is the number of stimulus bins $B$.
#
# <p align="center">
# <img src="https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/static/convolutional_layer.PNG?raw=true" width="600" />
# </p>
# + colab={} colab_type="code"
class ConvolutionalLayer(nn.Module):
"""Deep network with one convolutional layer
Attributes: conv (nn.Conv1d): convolutional layer
"""
def __init__(self, c_in=1, c_out=8, K=9):
"""Initialize layer
Args:
c_in: number of input stimulus channels
c_out: number of output convolutional channels
K: size of each convolutional filter
"""
super().__init__()
self.conv = nn.Conv1d(c_in, c_out, kernel_size=K,
padding=K//2, stride=1)
def forward(self, s):
"""Run stimulus through convolutional layer
Args:
s (torch.Tensor): n_stimuli x h tensor with stimuli
Returns:
(torch.Tensor): n_stimuli x c_out x h tensor with convolutional layer unit activations.
"""
s = s.unsqueeze(1) # n_stimuli x 1 x h, add a singleton dimension for the single channel
a = self.conv(s) # output of convolutional layer
return a
# + [markdown] colab_type="text"
# ### Exercise 1: 1D convolution in pytorch
#
# We will now run the convolutional layer on our stimulus. In particular, we will use the binary stimuli (`stim_binary`), which is a 60 x 60 tensor where each row contains the binary stimuli for one orientation (all zeros except for a one at that orientation). This tensor is size 60 instead of 360 because we have binned the orientations. Each row of this matrix is a different example orientation that we want to convolve - see cell below to visualize three rows of this tensor.
#
#
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" outputId="a4adbbcd-12c5-4793-d9ed-deab66557feb"
# @markdown Execute this cell to visualize stim_binary
row_inds = [10, 25, 40]
fig = plt.figure(figsize=(15,4))
for j, row_ind in enumerate(row_inds):
ax = fig.add_subplot(1, 3, j+1)
ax.plot(np.arange(0, 60), stim_binary[row_ind,:], 'k')
ax.set_title('stim_binary row '+str(row_ind))
ax.set_xlabel('orientation bin')
ax.set_ylabel('stimulus')
plt.show()
# + [markdown] colab_type="text"
# `nn.Conv1d` takes in a tensor of size $(N, C^{in}, H^{in}$) where $N$ is the number of examples, $C^{in}$ is the number of input channels, and $H^{in}$ is the number of stimulus bins $B$. Since our stimulus has only one input channel, the `ConvolutionalLayer` class adds the $C^{in}$ dimension for us: we need to input an $(N, H^{in})$ stimulus, which `stim_binary` is!
#
#
# We will plot the outputs of the convolution. `convout` is a tensor of size $(N, C^{out}, H^{in})$ where $N$ is the number of examples and $C^{out}$ are the number of convolutional channels. In the plot, the activations for a single channel are shown in one color.
# + colab={} colab_type="code"
# Convolution layer parameters
K = 9 # filter size, now that we've binned let's make this smaller than for the numpy conv
conv_channels = 8 # how many convolutional channels to have in our layer
convout = np.zeros(0) # assign convolutional activations to convout
################################################################################
## TODO for students: compute convolution activations from stim_binary using pytorch
# Complete and uncomment
################################################################################
# Initialize conv layer
# convLayer = ConvolutionalLayer(...)
# Call conv layer on stimulus
# convout = convLayer(...)
# convout = convout.detach() # detach gradients
# print(convout.shape) # can you identify what each of these dimensions are?
# Plot results
# plot_example_activations(convout)
# + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 358} colab_type="text" outputId="158c9b20-a905-4e1b-eb11-16b898d4454c"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial2_Solution_44e9267a.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=851 height=270 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D4_DeepLearning1/static/W3D4_Tutorial2_Solution_44e9267a_1.png>
#
#
# + [markdown] colab_type="text"
# #### Think!
# - Why are the convolutional activations for a given channel the same for many units?
# - What is the width of the non-constant activations (i.e. how many units in a given channel would differ from the constant)?
# - How many weights does this convLayer have?
# - How many would it have if it were a fully connected layer?
# + [markdown] colab={} colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial2_Solution_32703d8b.py)
#
#
# + [markdown] colab_type="text"
# ---
# # Section 3: Encoding model using convolutions
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 537} colab_type="code" outputId="f1b29908-9e8d-4654-814c-c13fa77d6f64"
#@title Video 2: Encoding model using convolutions
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Me8X3Kro0EE", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# + [markdown] colab_type="text"
# ## Section 3.1: Convolutional layer & fully connected layer
#
# We will now build an encoding model by hooking this convolutional layer up to a fully connected layer, like the one that we used in Tutorial 1 (`nn.Linear`). We will use this model to predict neural responses.
#
# <p align="center">
# <img src="https://github.com/NeuromatchAcademy/course-content/blob/master/tutorials/static/conv_fc.PNG?raw=true" width="800" />
# </p>
#
# This linear layer will have weights $W^{out}$ and we will get an output vector $\mathbf{y}$ of predicted neural responses.
# + [markdown] colab_type="text"
# ### Exercise 2: Implement encoding model
#
# In this exercise, you will create the encoding model described above. In particular, you will:
#
# * Add a fully connected layer to `__init__` method of network.
# * Add a fully connected layer to `forward` method of network.
#
# We will then train the network using the helper function `train`. Full training will take a few minutes: if you want to train for just a few steps to speed up the code while iterating on your code, you can decrease the `n_iter` input from 500.
# + colab={} colab_type="code"
class ConvFC(nn.Module):
"""Deep network with one convolutional layer + one fully connected layer
Attributes:
conv (nn.Conv1d): convolutional layer
dims (tuple): shape of convolutional layer output
out_layer (nn.Linear): linear layer
"""
def __init__(self, n_neurons, c_in=1, c_out=8, K=9, b=60):
""" initialize layer
Args:
c_in: number of input stimulus channels
c_out: number of convolutional channels
K: size of each convolutional filter
h: number of stimulus bins, n_bins
"""
super().__init__()
self.conv = nn.Conv1d(c_in, c_out, kernel_size=K, padding=K//2)
self.dims = (c_out, b) # dimensions of conv layer output
M = np.prod(self.dims) # number of hidden units
################################################################################
## TO DO for students: add fully connected layer to network (self.out_layer)
# Fill out function and remove
raise NotImplementedError("Student exercise: add fully connected layer to initialize network")
################################################################################
self.out_layer = nn.Linear(M, ...)
nn.init.normal_(self.out_layer.weight, std=0.01) # initialize weights to be small
def forward(self, s):
""" Predict neural responses to stimuli s
Args:
s (torch.Tensor): p x L tensor with stimuli
Returns:
torch.Tensor: p x N tensor with convolutional layer unit activations.
"""
s = s.unsqueeze(1) # p x 1 x L, add a singleton dimension for the single channel
a = self.conv(s) # output of convolutional layer
a = a.view(-1, np.prod(self.dims)) # flatten each convolutional layer output into a vector
################################################################################
## TO DO for students: add fully connected layer to forward pass of network (self.out_layer)
# Fill out function and remove
raise NotImplementedError("Student exercise: add fully connected layer to network")
################################################################################
y = ...
return y
# Choose loss function
MSE_loss = nn.MSELoss()
## Initialize network
# net = ConvFC(n_neurons)
## Run GD on training set data
## ** this time we are also providing the test data to estimate the test loss
# train_loss, test_loss = train(net, MSE_loss, stim_binary, resp_train,
# test_data=stim_binary, test_labels=resp_test,
# n_iter=500, learning_rate=20)
## Plot the training loss over iterations of GD
# plot_training_curves(train_loss, test_loss)
# + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 625} colab_type="text" outputId="f2e0daf0-ae57-45b9-c82a-6ce98087a5e4"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial2_Solution_fda4c007.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=558 height=414 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D4_DeepLearning1/static/W3D4_Tutorial2_Solution_fda4c007_11.png>
#
#
# + [markdown] colab_type="text"
# We trained this network to predict the neural responses -- see the yellow curve for the training loss. We also computed the test loss every 50 iterations. The training loss goes down throughout training but the testing loss doesn’t -- why is this? *We are overfitting to the NOISE in the training set.*
#
# Let’s look at a prediction for a single neuron (below). The yellow curve is the training data, the pink curve is the testing data and the prediction is in green. You can barely see the yellow curve because the prediction has fit so well to the training data. However, some of what it has fit is noise.
#
# If we look at the weight matrix, we see that the weights are all positive or negative. Did we expect this to happen? Or did we think that this tuning curve is the sum of only a few filters and positions?
#
#
# + [markdown] colab={} colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial2_Solution_5d5c2c4f.py)
#
#
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 322} colab_type="code" outputId="42817544-0db2-4aee-a81a-29938819d3c5"
#@title
#@markdown Execute this cell to examine prediction for example neuron and see weights
# Input stimuli to network
y_pred = net(stim_binary)
print('output shape: ', y_pred.shape) # what are the two dimensions of this network output?
# Plot example neural response prediction and some fully-connected layer weights
# Look at the weights of the out_layer of the network
weights = net.out_layer.weight.detach()
print('output weights shape: ', weights.shape) # what are these two dimensions of the fully connected layer weights?
# Plot prediction + neuron + weights
neuron_index = np.random.choice(n_neurons)
plot_pred_weights(y_pred[:,neuron_index].detach(), resp_train[:,neuron_index],
resp_test[:,neuron_index], weights[:20])
# + [markdown] colab_type="text"
# *Comprehension check*: what does each dimension of the output and output weights correspond to?
#
# + [markdown] colab={} colab_type="text"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial2_Solution_fa083d69.py)
#
#
# + [markdown] colab_type="text"
# We can reduce overfitting using L2 regularization as we learned on W1D4.
#
# Additionally there is another type of regularization you might want... If we think of a neuron as a sum of a few convolutional filters, we might expect the weight matrix of the fully-connected layer to be sparse. Therefore, we can also apply an L1 regularization penalty to enforce sparsity.
#
#
# + [markdown] colab_type="text"
# ---
# # (Bonus) Section 4: Regularization
#
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 537} colab_type="code" outputId="a7a229ff-454c-4054-c73b-ff34035fea71"
#@title Video 3: Regularization
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="Qnn5OPHKo5w", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# + [markdown] colab_type="text"
#
# As discussed in the lecture, it is often important to incorporate regularization terms into the loss function to avoid overfitting. In particular, in this case, we want to use these terms to enforce sparsity in the output layer.
#
# Here we'll consider the classic L2 regularization penalty $\mathcal{R}_{L2}$, which is the sum of squares of each weight in the network $\sum_{ij} {\mathbf{W}^{out}_{ij}}^2$ times a constant that we call `L2_penalty`.
#
# We will also add an L1 regularization penalty $\mathcal{R}_{L1}$ to enforce sparsity of the weights, which is the sum of the absolute values of the weights $\sum_{ij} |{\mathbf{W}^{out}_{ij}}|$ times a constant that we call `L1_penalty`.
#
# We will add both of these to the loss function:
# \begin{equation}
# L = (y - \tilde{y})^2 + \mathcal{R}_{L2} + \mathcal{R}_{L1}
# \end{equation}
#
# The parameters `L2_penalty` and `L1_penalty` are inputs to the train function.
# + [markdown] colab_type="text"
# ### (Bonus) Exercise 3: Add regularization to training
#
# We will create a new loss function that adds L1 and L2 regularization.
# In particular, you will:
# * add L2 loss penalty to the weights
# * add L1 loss penalty to the weights
#
#
# We will then train the network using this loss function. Full training will take a few minutes: if you want to train for just a few steps to speed up the code while iterating on your code, you can decrease the n_iter input from 500.
#
# Hint: since we are using `torch` instead of `np`, we will use `torch.abs` instead of `np.absolute`. You can use `torch.sum` or `.sum()` to sum over a tensor.
#
#
#
#
# + colab={} colab_type="code"
def regularized_MSE_loss(output, target, weights=None, L2_penalty=0, L1_penalty=0):
"""loss function for MSE
Args:
output (torch.Tensor): output of network
target (torch.Tensor): neural response network is trying to predict
weights (torch.Tensor): fully-connected layer weights (net.out_layer.weight)
L2_penalty : scaling factor of sum of squared weights
L1_penalty : scalaing factor for sum of absolute weights
Returns:
(torch.Tensor) mean-squared error with L1 and L2 penalties added
"""
loss_fn = nn.MSELoss()
loss = loss_fn(output, target)
##############################################################################
# TO DO: add L1 and L2 regularization to the loss function and remove the error
raise NotImplementedError("Student exercise: complete regularized_MSE_loss")
##############################################################################
if weights is not None:
L2 = L2_penalty * ...
L1 = L1_penalty * ...
loss += L1 + L2
return loss
# Initialize network
net = ConvFC(n_neurons)
# Uncomment below to test your function
# Train network
# train_loss, test_loss = train(net, regularized_MSE_loss, stim_binary, resp_train,
# test_data=stim_binary, test_labels=resp_test,
# learning_rate=10, n_iter=500,
# L2_penalty=1e-4, L1_penalty=1e-6)
# Plot the training loss over iterations of GD
# plot_training_curves(train_loss, test_loss)
# + [markdown] colab={"base_uri": "https://localhost:8080/", "height": 626} colab_type="text" outputId="d525e1c6-2d68-4315-9566-1daff87c96ca"
# [*Click for solution*](https://github.com/NeuromatchAcademy/course-content/tree/master//tutorials/W3D4_DeepLearning1/solutions/W3D4_Tutorial2_Solution_c4027856.py)
#
# *Example output:*
#
# <img alt='Solution hint' align='left' width=558 height=413 src=https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/tutorials/W3D4_DeepLearning1/static/W3D4_Tutorial2_Solution_c4027856_11.png>
#
#
# + [markdown] colab_type="text"
# If we now train the network with these regularization penalties we find that the train and test loss are similar throughout training: both continue decreasing.
#
# We will now look at the predictions after using the regularized loss function. We can see below that the prediction is much smoother than before! This is because the weight matrix is in fact sparser (zero is represented by white in this color map)
#
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 286} colab_type="code" outputId="15e6aedc-ade3-45a6-d844-88e3c05de82d"
#@title
#@markdown Execute this cell to examine prediction for example neuron and see weights
# Plot prediction + neuron + weights
weights = net.out_layer.weight.detach()
y_pred = net(stim_binary)
neuron_index = np.random.choice(n_neurons)
plot_pred_weights(y_pred[:,neuron_index].detach(), resp_train[:,neuron_index],
resp_test[:,neuron_index], weights[:20])
# + [markdown] colab_type="text"
#
# Now let's look at what the predictions look like for many neurons
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 430} colab_type="code" outputId="44be92e2-ea32-4bc2-e6ea-caf2e2c4fa9e"
#@title
#@markdown Execute this cell to examine predictions for random subsets of neurons
# Visualize tuning curves & plot neural predictions
fig, axs = plt.subplots(2, 5, figsize=(15,6))
for k, ax in enumerate(axs.flatten()):
ineur = np.random.choice(n_neurons)
plot_prediction(ax, y_pred[:,ineur].detach(), resp_test[:,ineur])
if k==0:
ax.text(.1, 1., 'test', color='m', transform=ax.transAxes)
ax.text(.1, .9, 'prediction', color='g', transform=ax.transAxes)
fig.tight_layout()
plt.show()
# + [markdown] colab_type="text"
# ---
# # Summary
#
# In this notebook, we built and evaluated a neural network based encoding model to predict neural responses from stimuli. To do so we :
# * implemented a basic convolution filter
# * implemented and trained a convolutional neural network with multiple filters to predict neural responses using PyTorch
# * learned about and implemented L2/L1 regularization to avoid overfitting
#
# What can this tell us about the representation of oriented gratings in mouse visual cortex? Maybe we can think of interpreting each of the convolutional channels as a computation performed by a single group of neurons in thalamus, and each visual cortical neuron combines various groups of thalamic neurons. But we'd have to test hypotheses like these by, for instance, recording thalamic neurons.
#
#
# Tutorial 3 is bonus, although we recommend watching the videos if possible!
# + [markdown] colab_type="text"
# ---
# # Appendix
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 537} colab_type="code" outputId="b2d2b3b3-b4d4-4828-bd06-d37918636e66"
#@title Video 4: Some practical advice for fitting neural networks
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="eU74NFroIHk", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# + [markdown] colab_type="text"
# ## Why CNN's?
#
# CNN models are particularly [well-suited](https://www.nature.com/articles/nn.4244) to modeling the visual system for a number of reasons:
#
# 1. **Distributed computation**: like any other neural network, CNN's use distributed representations to compute -- much like the brain seems to do. Such models, therefore, provide us with a vocabulary with which to talk and think about such distributed representations. Because we know the exact function the model is built to perform (e.g. orientation discrimination), we can analyze its internal representations with respect to this function and begin to interpret why the representations look the way they do. Most importantly, we can then use these insights to analyze the structure of neural representations observed in recorded population activity. We can qualitatively and quantitatively compare the representations we see in the model and in a given population of real neurons to hopefully tease out the computations it performs.
#
# 2. **Hierarchical architecture**: like in any other deep learning architecture, each layer of a deep CNN comprises a non-linear transformation of the previous layer. Thus, there is a natural hierarchy whereby layers closer to the network output represent increasingly more abstract information about the input image. For example, in a network trained to do object recognition, the early layers might represent information about edges in the image, whereas later layers closer to the output might represent various object categories. This resembles the [hierarchical structure of the visual system](https://pubmed.ncbi.nlm.nih.gov/1822724/), where [lower-level areas](https://www.jneurosci.org/content/25/46/10577.short) (e.g. retina, V1) represent visual features of the sensory input and [higher-level areas](https://www.sciencedirect.com/science/article/pii/S089662731200092X) (e.g. V4, IT) represent properties of objects in the visual scene. We can then naturally use a single CNN to model multiple visual areas, using early CNN layers to model lower-level visual areas and late CNN layers to model higher-level visual areas.
#
# Relative to fully connected networks, CNN's, in fact, have further hierarchical structure built-in through the max pooling layers. Recall that each output of a convolution + pooling block is the result of processing a local patch of the inputs to that block. If we stack such blocks in a sequence, then the outputs of each block will be sensitive to increasingly larger regions of the initial raw input to the network: an output from the first block is sensitive to a single patch of these inputs, corresponding to its "receptive field"; an output from the second block is sensitive to a patch of outputs from the first block, which together are sensitive to a larger patch of raw inputs comprising the union of their receptive fields. Receptive fields thus get larger for deeper layers (see [here](http://colah.github.io/posts/2014-07-Conv-Nets-Modular/) for a nice visual depiction of this). This resembles primate visual systems, where neurons in higher-level visual areas respond to stimuli in wider regions of the visual field than neurons in lower-level visual areas.
#
# 3. **Convolutional layers**: through the weight sharing constraint, the outputs of each channel of a convolutional layer process different parts of the input image in exactly the same way. This architectural constraint effectively builds into the network the assumption that objects in the world typically look the same regardless of where they are in space. This is useful for modeling the visual system for two (largely separate) reasons:
# * Firstly, this assumption is generally valid in mammalian visual systems, since mammals tend to view the same object from many perspectives. Two neurons at a similar hierarchy in the visual system with different receptive fields could thus end up receiving statistically similar synaptic inputs, so that the synaptic weights developed over time may end up being similar as well.
# * Secondly, this architecture significantly improves object recognition ability. Object recognition was essentially an unsolved problem in machine learning until the [advent](https://en.wikipedia.org/wiki/AlexNet) of techniques for effectively training *deep* convolutional neural networks. Fully connected networks on their own can't achieve object recognition abilities anywhere close to human levels, making them bad models of human object recognition. Indeed, it is generally the case that [the better a neural network model is at object recognition, the closer the match between its representations and those observed in the brain](https://www.pnas.org/content/111/23/8619.short). That said, it is worth noting that our much simpler orientation discrimination task (in Tutorial 3) can be solved by relatively simple networks.
| tutorials/W3D4_DeepLearning1/student/W3D4_Tutorial2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
import datetime, os
# hide tf logs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # or any {'0', '1', '2'},
# 0 (default) shows all, 1 to filter out INFO logs, 2 to additionally filter out WARNING logs, and 3 to additionally filter out ERROR logs
import scipy.optimize
import scipy.io
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker
import time
from pyDOE import lhs #Latin Hypercube Sampling
import pandas as pd
import seaborn as sns
import codecs, json
# generates same random numbers each time
np.random.seed(1234)
tf.random.set_seed(1234)
print("TensorFlow version: {}".format(tf.__version__))
# -
# # Data Prep
#
# Training and Testing data is prepared from the solution file
# +
x_1 = np.linspace(-1,1,256) # 256 points between -1 and 1 [256x1]
x_2 = np.linspace(1,-1,256) # 256 points between 1 and -1 [256x1]
X, Y = np.meshgrid(x_1,x_2)
# -
# # Test Data
#
# We prepare the test data to compare against the solution produced by the PINN.
# +
X_u_test = np.hstack((X.flatten(order='F')[:,None], Y.flatten(order='F')[:,None]))
# Domain bounds
lb = np.array([-1, -1]) #lower bound
ub = np.array([1, 1]) #upper bound
a_1 = 1
a_2 = 4
usol = np.sin(a_1 * np.pi * X) * np.sin(a_2 * np.pi * Y) #solution chosen for convinience
u = usol.flatten('F')[:,None]
# -
# # Training Data
def trainingdata(N_u,N_f):
leftedge_x = np.hstack((X[:,0][:,None], Y[:,0][:,None]))
leftedge_u = usol[:,0][:,None]
rightedge_x = np.hstack((X[:,-1][:,None], Y[:,-1][:,None]))
rightedge_u = usol[:,-1][:,None]
topedge_x = np.hstack((X[0,:][:,None], Y[0,:][:,None]))
topedge_u = usol[0,:][:,None]
bottomedge_x = np.hstack((X[-1,:][:,None], Y[-1,:][:,None]))
bottomedge_u = usol[-1,:][:,None]
all_X_u_train = np.vstack([leftedge_x, rightedge_x, bottomedge_x, topedge_x])
all_u_train = np.vstack([leftedge_u, rightedge_u, bottomedge_u, topedge_u])
#choose random N_u points for training
idx = np.random.choice(all_X_u_train.shape[0], N_u, replace=False)
X_u_train = all_X_u_train[idx[0:N_u], :] #choose indices from set 'idx' (x,t)
u_train = all_u_train[idx[0:N_u],:] #choose corresponding u
'''Collocation Points'''
# Latin Hypercube sampling for collocation points
# N_f sets of tuples(x,t)
X_f = lb + (ub-lb)*lhs(2,N_f)
X_f_train = np.vstack((X_f, X_u_train)) # append training points to collocation points
return X_f_train, X_u_train, u_train
# # PINN
#
# $W \in \mathcal{R}^{n_{l-1}\times{n_l}}$
#
# Creating sequential layers using the $\textit{class}$ tf.Module
class Sequentialmodel(tf.Module):
def __init__(self, layers, name=None):
self.W = [] # Weights and biases
self.parameters = 0 # total number of parameters
for i in range(len(layers)-1):
input_dim = layers[i]
output_dim = layers[i+1]
#Xavier standard deviation
std_dv = np.sqrt((2.0/(input_dim + output_dim)))
#weights = normal distribution * Xavier standard deviation + 0
w = tf.random.normal([input_dim, output_dim], dtype = 'float64') * std_dv
w = tf.Variable(w, trainable=True, name = 'w' + str(i+1))
b = tf.Variable(tf.cast(tf.zeros([output_dim]), dtype = 'float64'), trainable = True, name = 'b' + str(i+1))
self.W.append(w)
self.W.append(b)
self.parameters += input_dim * output_dim + output_dim
def evaluate(self,x):
# pre-processing input
x = (x - lb)/(ub - lb) #feature scaling
a = x
for i in range(len(layers)-2):
z = tf.add(tf.matmul(a, self.W[2*i]), self.W[2*i+1])
a = tf.nn.tanh(z)
a = tf.add(tf.matmul(a, self.W[-2]), self.W[-1]) # For regression, no activation to last layer
return a
def get_weights(self):
parameters_1d = [] # [.... W_i,b_i..... ] 1d array
for i in range (len(layers)-1):
w_1d = tf.reshape(self.W[2*i],[-1]) #flatten weights
b_1d = tf.reshape(self.W[2*i+1],[-1]) #flatten biases
parameters_1d = tf.concat([parameters_1d, w_1d], 0) #concat weights
parameters_1d = tf.concat([parameters_1d, b_1d], 0) #concat biases
return parameters_1d
def set_weights(self,parameters):
for i in range (len(layers)-1):
shape_w = tf.shape(self.W[2*i]).numpy() # shape of the weight tensor
size_w = tf.size(self.W[2*i]).numpy() #size of the weight tensor
shape_b = tf.shape(self.W[2*i+1]).numpy() # shape of the bias tensor
size_b = tf.size(self.W[2*i+1]).numpy() #size of the bias tensor
pick_w = parameters[0:size_w] #pick the weights
self.W[2*i].assign(tf.reshape(pick_w,shape_w)) # assign
parameters = np.delete(parameters,np.arange(size_w),0) #delete
pick_b = parameters[0:size_b] #pick the biases
self.W[2*i+1].assign(tf.reshape(pick_b,shape_b)) # assign
parameters = np.delete(parameters,np.arange(size_b),0) #delete
def loss_BC(self,x,y):
loss_u = tf.reduce_mean(tf.square(y-self.evaluate(x)))
return loss_u
def loss_PDE(self, x_to_train_f):
g = tf.Variable(x_to_train_f, dtype = 'float64', trainable = False)
k = 1
x_1_f = g[:,0:1]
x_2_f = g[:,1:2]
with tf.GradientTape(persistent=True) as tape:
tape.watch(x_1_f)
tape.watch(x_2_f)
g = tf.stack([x_1_f[:,0], x_2_f[:,0]], axis=1)
u = self.evaluate(g)
u_x_1 = tape.gradient(u,x_1_f)
u_x_2 = tape.gradient(u,x_2_f)
u_xx_1 = tape.gradient(u_x_1,x_1_f)
u_xx_2 = tape.gradient(u_x_2,x_2_f)
del tape
q = -( (a_1*np.pi)**2 + (a_2*np.pi)**2 - k**2 ) * np.sin(a_1*np.pi*x_1_f) * np.sin(a_2*np.pi*x_2_f)
f = u_xx_1 + u_xx_2 + k**2 * u - q #residual
loss_f = tf.reduce_mean(tf.square(f))
return loss_f, f
def loss(self,x,y,g):
loss_u = self.loss_BC(x,y)
loss_f, f = self.loss_PDE(g)
loss = loss_u + loss_f
return loss, loss_u, loss_f
def optimizerfunc(self,parameters):
self.set_weights(parameters)
with tf.GradientTape() as tape:
tape.watch(self.trainable_variables)
loss_val, loss_u, loss_f = self.loss(X_u_train, u_train, X_f_train)
grads = tape.gradient(loss_val,self.trainable_variables)
del tape
grads_1d = [ ] #store 1d grads
for i in range (len(layers)-1):
grads_w_1d = tf.reshape(grads[2*i],[-1]) #flatten weights
grads_b_1d = tf.reshape(grads[2*i+1],[-1]) #flatten biases
grads_1d = tf.concat([grads_1d, grads_w_1d], 0) #concat grad_weights
grads_1d = tf.concat([grads_1d, grads_b_1d], 0) #concat grad_biases
return loss_val.numpy(), grads_1d.numpy()
def optimizer_callback(self,parameters):
loss_value, loss_u, loss_f = self.loss(X_u_train, u_train, X_f_train)
u_pred = self.evaluate(X_u_test)
error_vec = np.linalg.norm((u-u_pred),2)/np.linalg.norm(u,2)
tf.print(loss_value, loss_u, loss_f, error_vec)
# # Main
# +
N_u = 400 #Total number of data points for 'u'
N_f = 10000 #Total number of collocation points
# Training data
X_f_train, X_u_train, u_train = trainingdata(N_u,N_f)
layers = np.array([2, 3, 1]) #1 hidden layer
PINN = Sequentialmodel(layers)
init_params = PINN.get_weights().numpy()
start_time = time.time()
# train the model with Scipy L-BFGS optimizer
results = scipy.optimize.minimize(fun = PINN.optimizerfunc,
x0 = init_params,
args=(),
method='L-BFGS-B',
jac= True, # If jac is True, fun is assumed to return the gradient along with the objective function
callback = PINN.optimizer_callback,
options = {'disp': None,
'maxcor': 200,
'ftol': 1 * np.finfo(float).eps, #The iteration stops when (f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol
'gtol': 5e-10,
'maxfun': 50000,
'maxiter': 10000,
'iprint': -1, # no iteration updates
'maxls': 50})
elapsed = time.time() - start_time
print('Training time: %.2f' % (elapsed))
print(results)
PINN.set_weights(results.x)
''' Model Accuracy '''
u_pred = PINN.evaluate(X_u_test)
error_vec = np.linalg.norm((u-u_pred),2)/np.linalg.norm(u,2) # Relative L2 Norm of the error (Vector)
print('Test Error: %.5f' % (error_vec))
# -
# # Building the Hessian matrix
#
# [Refer TensorFlow 2.3 Documentation Example](https://www.tensorflow.org/guide/advanced_autodiff#example_hessian)
#
# What is the 'block' matrix?
#
# A matrix of tuples representing the dimensions of sub-hessians and to track indices while assembling the Hessian using sub-hessians
#
# Sub-hessians: $ \frac{1}{\partial^{W^{[l]}}} \big( \frac{\partial loss}{\partial^{W^{[l]}}} \big )$
#
# Each column contains the matrix $\frac{\partial loss}{\partial^{W^{[l]}}}$
#
# Each row contains kernel $W^{[l]}$
#
# Each matrix element is a tensor of size (kernel[i].shape, kernel[j].shape)
#
# Example: kernel shape:(5,4), shape of matrix $\frac{\partial loss}{\partial^{W^{[l]}}}$: (2,3)
#
# shape of sub-Hessian: (5,4,2,3)
#
# We reduce the shape of theses higher order tensors into 2D tensors using tf.reshape
#
# +
num_kernels = (len(layers)-1)*2 # total number of weight and bias tensors
block = np.zeros((num_kernels,num_kernels),object)
for j in range(num_kernels):
for i in range(j+1):
if i == j:
s = tf.reduce_prod(PINN.W[i].shape)
block[i,j] = (s.numpy(),s.numpy())
else:
block[j,i] = (tf.reduce_prod(PINN.W[j].shape).numpy(), tf.reduce_prod(PINN.W[i].shape).numpy())
block[i,j] = block[j,i]
print(block)
# -
# Computation and assembly of sub-hessians
# +
# Initialise Hessian
# N x N square matrix , N = total number of parameters
H_u = np.zeros((PINN.parameters,PINN.parameters))
# pointer to mark position of sub-hessian assembly
pointer = np.array([0, 0]) # coordinates to upper left corner element of current block
for j in range(num_kernels):
for i in range(j+1):
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
loss_value, loss_u, loss_f = PINN.loss(X_u_train, u_train, X_f_train)
g = tape1.gradient(loss_u, PINN.W[i]) #sub-gradient , n_in * n_out
h = tape2.jacobian(g,PINN.W[j]) # sub-hessian
#diagonal term
if i == j :
# reshape higher order tensor into 2D tensor
h_mat = tf.reshape(h, block[j,i]) # [?]
# shape of block, block is square for diagonal terms
block_shape = h_mat.shape
# Assemble block in H matrix
# position of assembly determined by 'pointer' and size of block
H_u[pointer[0]:pointer[0]+block_shape[0], pointer[1]:pointer[1]+block_shape[1]] = h_mat
# move pointer to new poistion
# move to next row ---> determined by number of rows in current block
pointer[0] = pointer[0] + block_shape[0]
pointer[1] = 0
#non-diagonal term
else:
# reshape higher order tensor into 2D tensor
print(h.shape)
h_mat = tf.reshape(h, block[j,i])
# shape of block
block_shape = h_mat.shape
# Assemble block in H matrix
# position of assembly determined by 'pointer' and size of block
H_u[pointer[0]:pointer[0]+block_shape[0], pointer[1]:pointer[1]+block_shape[1]] = h_mat
# Assemble symmteric part by switching indices and transposing the block
H_u[pointer[1]:pointer[1]+block_shape[1], pointer[0]:pointer[0]+block_shape[0]] = tf.transpose(h_mat)
# move pointer to new poistion
# move to next column ---> determined by number of columns in current block
pointer[1] = pointer[1] + block_shape[1]
# +
# Initialise Hessian
# N x N square matrix , N = total number of parameters
H_f = np.zeros((PINN.parameters,PINN.parameters))
# pointer to mark position of sub-hessian assembly
pointer = np.array([0, 0]) # coordinates to upper left corner element of current block
for j in range(num_kernels):
for i in range(j+1):
with tf.GradientTape() as tape2:
with tf.GradientTape() as tape1:
loss_value, loss_u, loss_f = PINN.loss(X_u_train, u_train, X_f_train)
g = tape1.gradient(loss_f, PINN.W[i]) #sub-gradient , n_in * n_out
h = tape2.jacobian(g,PINN.W[j]) # sub-hessian
#diagonal term
if i == j :
# reshape higher order tensor into 2D tensor
h_mat = tf.reshape(h, block[j,i]) # [?]
# shape of block, block is square for diagonal terms
block_shape = h_mat.shape
# Assemble block in H matrix
# position of assembly determined by 'pointer' and size of block
H_f[pointer[0]:pointer[0]+block_shape[0], pointer[1]:pointer[1]+block_shape[1]] = h_mat
# move pointer to new poistion
# move to next row ---> determined by number of rows in current block
pointer[0] = pointer[0] + block_shape[0]
pointer[1] = 0
#non-diagonal term
else:
# reshape higher order tensor into 2D tensor
h_mat = tf.reshape(h, block[j,i])
# shape of block
block_shape = h_mat.shape
# Assemble block in H matrix
# position of assembly determined by 'pointer' and size of block
H_u[pointer[0]:pointer[0]+block_shape[0], pointer[1]:pointer[1]+block_shape[1]] = h_mat
# Assemble symmteric part by switching indices and transposing the block
H_f[pointer[1]:pointer[1]+block_shape[1], pointer[0]:pointer[0]+block_shape[0]] = tf.transpose(h_mat)
# move pointer to new poistion
# move to next column ---> determined by number of columns in current block
pointer[1] = pointer[1] + block_shape[1]
# -
# # Compute Eigenvalues
#
# In this section we use the hermitian property of the Hessian and the approx Hessian inverse matrices and compute their eigenvalues using the 'tf.linalg.eigvalsh()' method and plot them for comparison
# Eigenvalues of Hessian (H)
# +
H_u = tf.convert_to_tensor(H_u, dtype = tf.float32)
v_hess_u = tf.linalg.eigvalsh(H_u)
H_f = tf.convert_to_tensor(H_f, dtype = tf.float32)
v_hess_f = tf.linalg.eigvalsh(H_f)
np.savetxt("hess_eigvals.txt", np.array([v_hess_u, v_hess_f]).T)
# -
s1 = np.loadtxt("prove_stiffness/split/hess_eigvals.txt")
s2 = np.loadtxt("prove_stiffness/split/hess_eigvals_stiff.txt")
# +
### Plotting ###
fig,ax = plt.subplots()
plt.plot(s1[:,0], '--', color = 'orange', label = r'$\nabla_{\theta}^2 \hat J_{BC}$ (Non-Stiff)')
plt.plot(s1[:,1], color = 'orange', label = r'$\nabla_{\theta}^2 \hat J_{PDE}$ ')
plt.plot(s2[:,0], '--', color = 'green', label = r'$\nabla_{\theta}^2 \hat J_{BC}$ (Stiff)')
plt.plot(s2[:,1], color = 'green', label = r'$\nabla_{\theta}^2 \hat J_{PDE}$')
plt.yscale('symlog')
plt.legend()
plt.savefig('prove_stiffness/split/split_200.png', dpi = 500)
| TensorFlow/Helmholtz Equation/Experiments/Helmholtz_Equation_custom-Hessian_split.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## PHYS 105A: Introduction to Scientific Computing
#
# # Overview
#
# <NAME>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Modern Science
#
# 1. Empirical evidence
# 2. Scientific theory
# 3. Computational science
# 4. Data science
# + [markdown] slideshow={"slide_type": "fragment"}
# Modern science is complex!
#
# The last two require computing!
# + [markdown] slideshow={"slide_type": "slide"}
# ### Computational Science
#
# Physical Models $\rightarrow$ Data and Results
#
# * Nonlinear and many degrees of free (DoF) systems:
# * Kelvin--Helmholtz instability: https://youtu.be/Bqg31OwAqEQ
# * Kolmogorov forced turbulence: https://youtu.be/Q2HFwsThk0U
#
# * Multi-physics problems:
# * Black hole accretion flow: https://youtu.be/-GQ1aUHwx-k
# * Multi-wavelength black hole images: https://youtu.be/G6sSwRf_9ME
# * Galactic center black hole: https://youtu.be/2bnP6UtfY2k
#
# * Large scale simulation libraries:
# * Illustris: https://www.illustris-project.org
# * TNG Project: https://www.tng-project.org
# + [markdown] slideshow={"slide_type": "slide"}
# ### Data Science
#
# Data and ML $\rightarrow$ Result
#
# * Experiments with very large data sets:
# * Event Horizon Telescope: https://eventhorizontelescope.org/science
# * <NAME> Observatory (LSST): https://www.lsst.org/
#
# * Let computer learn algorithms from data:
# * Computer vision: http://www.image-net.org
# * Self-driving cars: https://en.wikipedia.org/wiki/Self-driving_car
#
# * Knowledge discovery from data:
# * UA Data Science Institute: https://datascience.arizona.edu/
# * Machine Learning (ML): https://scikit-learn.org/
#
# * Artificial Intelligence (AI):
# * AlphaGo: https://deepmind.com/research/case-studies/alphago-the-story-so-far
# * AlphaFold: https://deepmind.com/blog/article/AlphaFold-Using-AI-for-scientific-discovery
# + [markdown] slideshow={"slide_type": "slide"}
# ## Expected Learning Outcomes
#
# Upon completion of this course, students will be able to:
#
# * understand the nature and application of computation methods in physical science;
# * use popular development tools (shell, `git`, Jupyter, etc);
# * use good software development practices (version control, documentation, and automation);
# * use computational thinking to break down complex physics problems;
# * solve these problems by writing programs in the `python` and `C` programming languages;
# * speak and write about scientific knowledge;
# * appreciate computation complexity and have a basic awareness of numerical errors;
# * use data analysis and numerical methods properly, and be aware of their common pitfalls;
# * critically analyze and interpret data and results presented in tables, graphs and charts as well as perform appropriate computations;
# * read and understand scientific literature from popular sources such as magazines and newspapers;
# * aware of a wide range of science use cases, and develop the skill to self-learn computation tools and methods.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Topics
#
# We plan to cover the following topics in PHYS 105A:
#
# \# | Lecture | Hands-on
# --- | --- | ---
# 1 | Overview | Sign up for accounts and set up development environment
# 2 | Essential tools for scientific computing | Unix shells, remote login, version control, etc
# 3 | The `python` programming language | Jupyter Lab and `python` programming (assignment 10pts)
# 4 | Random numbers and Monte Carlo methods | Monte Carlo in `python` (assignment 10pts)
# 5 | The `C` programming language | Monte Carlo in `C` (assignment 10pts)
# 6 | Statistics and data processing methods | Compute statistics in `python` (assignment 10pts); project planning
# 7 | Array programming and `numpy` | Compare code performance; project helpout
# | Project presentations | (project 20pts)
# ... | ... | ...
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Schedule
#
# Week of | Monday | Tuesday<br/>(Session 1) | Wednesday | Thursday<br/>(Session 2) | Friday
# --- | --- | --- | --- | --- | ---
# Jan 10 | | | | #1
# Jan 17 | MLK Day | #1 | | #2
# Jan 24 | | #2 | | #3
# Jan 31 | | #3 | HO #3 due | #4
# Feb 7 | HO #3 due | #4 | HO #4 due | #5
# Feb 14 | HO #4 due | #5 | HO #5 due | #6; project planning
# Feb 21 | HO #5 due | #6; project planning | HO #6 due | Reading Day
# Feb 28 | HO #6 due | #7; project helpout | | #7; project helpout
# Mar 7 | | Reading Day | Reading Day;<br/>projects due | Project presentations
# ... |
# + [markdown] slideshow={"slide_type": "slide"}
# ## Grading Scale and Policies
#
# There are 8 assignments and 2 projects in total.
# Each assignment worth 10 points and each project worth 20 points.
#
# This course provides regular letter grades (A–E), which are based on a
# simple point system:
#
# * A: 80–100 points
# * B: 70–79.9 points
# * C: 60–69.9 points
# * D: 50–59.9 points
# * E: 0–49.9 points
#
# No scaling will be applied.
# Nevertheless, the points for a student's worst two assignments or one
# project will not be counted.
#
# -
| 01/Overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def my_first_function():
"This is our first function"
print("Hello Python")
my_first_function()
def my_sum(x, y):
sum = x + y
return sum
my_sum(1, 2)
def my_sum(x, y, z):
sum = x + y * z
return sum ** 2
my_sum(1, 2, 3)
def function1(x, *args):
print(x)
function1('hello')
def function1(x, *args):
print(x)
print(args)
function1('hello')
def function1(x, *args):
print(x)
for argument in args:
print(argument)
function1(1, 2, 3)
print(list(range(10)))
my_var= 10
print(my_var*10)
# +
def my_var_func():
my_var = 10
print(my_var)
my_var_func()
# +
my_var = 5
def my_var_func():
global my_var
print(my_var)
my_var = 10
print(my_var)
my_var_func()
# -
import math
mystr = "This is my string the contains words for regex work"
import re
a = re.match("This", mystr)
a
type(a)
a.group()
a = re.match("This", mystr, re.I)
a
a.group()
arp = "22.22.22.1 0 b4:a9:5a:ff:c8:45 VLAN#222"
a = re.search(r"(.+?) +(\d) +(.+?)\s{2,}(\w)*", arp)
a
a.group(1)
a.group(2, 3, 4)
a = re.findall(r"(.+?) +(\d) +(.+?)\s{2,}(\w)*", arp)
a
a = re.findall(r"\d\d\.\d{2}\.[0-9][0-9]\.[0-9]{1,3}", arp)
a
b = re.sub(r"\d", "7", arp)
b
class MyRouter(object):
"This is a class that describes the char. of a router"
def_init_(self, routername, model, serialno, ios):
self.routername = routername
self.model = model
self.serialno = serialno
self.ios = ios
list2 = [x ** 2 for x in range(10)]
list2
list3 = [x ** 2 for x in range(10) if x > 5]
list3
dict1 = {x: x * 2 for x in range(10)}
dict1
a = lambda x, y: x * y
type(a)
a(2, 10)
def myfunc(mylist):
list_xy = []
for x in range(10):
for y in range(5):
result = x * y
list_xy.append(result)
return list_xy + mylist
myfunc([100, 200, 300, 400])
import threading
import time
# +
def myfunc():
print("Start")
time.sleep(3)
print('End')
threads = []
for i in range(5):
th = threading.Thread(target = myfunc)
th.start()
threads.append(th)
for th in threads:
th.join()
# +
def myfunc():
print("Start")
time.sleep(3)
print('End')
threads = []
for i in range(5):
th = threading.Thread(target = myfunc)
th.start()
threads.append(th)
for th in threads:
myfunc()
# -
r1 = range(10)
list(map((lambda a: a * 10), r1))[-1]
# +
import math
while True:
print("\Choose the math opperation.\n")
oper = input("/Your option from the menu: ")
if oper == "0":
val1 = float(input("\nFirst value: "))
val2 = float(input("\nSecond value: "))
print("\nThe result is: " + str(val1 + val2) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "1":
val1 = float(input("\nFirst value: "))
val2 = float(input("\nSecond value: "))
print("\nThe result is: " + str(val1 - val2) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "2":
val1 = float(input("\nFirst value: "))
val2 = float(input("\nSecond value: "))
print("\nThe result is: " + str(val1 * val2) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "3":
val1 = float(input("\nFirst value: "))
val2 = float(input("\nSecond value: "))
print("\nThe result is: " + str(val1 / val2) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "4":
val1 = float(input("\nFirst value: "))
val2 = float(input("\nSecond value: "))
print("\nThe result is: " + str(val1 % val2) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "5":
val1 = float(input("\nFirst value: "))
val2 = float(input("\nSecond value: "))
print("\nThe result is: " + str(math.pow(val1, val2)) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "6":
val1 = float(input("\nFirst value: "))
print("\nThe result is: " + str(math.sqrt(val1)) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "7":
val1 = float(input("\nFirst value: "))
print("\nThe result is: " + str(math.log(val1, 2)) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "8":
val1 = float(input("\nFirst value: "))
print("\nThe result is: " + str(math.sin(math.radians(val1))) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "9":
val1 = float(input("\nFirst value: "))
print("\nThe result is: " + str(math.cos(math.radians(val1))) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
elif oper == "10":
val1 = float(input("\nFirst value: "))
val2 = float(input("\nSecond value: "))
print("\nThe result is: " + str(math.log(val1, 2)) + "\n")
back = input('\nGo back to the main menu? (y/n) ')
if back == "y":
continue
else:
break
else:
print("\nInvalid option\n")
continue
# END
# +
import mysql.connector
mydb = mysql.connector.connect(
host="localhost",
user="yourusername",
password="<PASSWORD>"
)
print(mydb)
# -
False = 't'
a = (1,2,3)
split(a[:])
5%2
5//2
5/2
# +
i = ('i=5')
f = ('f=2.4')
print(i,f)
# -
i = 5
f = 2.4
ival = str(i)
fval = str(f)
print (ival, fval)
i = 'i = 5'
f = 'f = 2.6'
ival = str(i)
fval = str(f)
print (ival, fval)
Budgetmonth = int(input("input your budget"))
# +
def budget(Budgetmonth - (Expenses1+Expenses2+Expenses3+Expenses4):
Expenses1 = int(input("input your first expense"))
Expenses2 = int(input("input your second expenses"))
Expenses3 = int(input("input your third expenses"))
Expenses4 = int(input("input your fourth expenses"))
if Expenses >= 0:
Budgetmonth = (Budgetmonth - (Expenses1+Expenses2+Expenses3+Expenses4))
return
# -
budget(Budgetmonth)
budget(Budgetmonth)
| GathiPythonSection12-20.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Write a Python program for binary search.
#
# In computer science, binary search, also known as half-interval search, is a search algorithm that finds the position of a target value within a sorted array. Binary search compares the target value to the middle element of the array. If they are not equal, the half in which the target cannot lie is eliminated and the search continues on the remaining half, again taking the middle element to compare to the target value, and repeating this until the target value is found. If the search ends with the remaining half being empty, the target is not in the array.
def binary_search(item_list, item):
first = 0
last = len(item_list)-1
found = False
while (first <= last and not found):
mid = (first+last)//2
if item_list[mid] == item:
found = True
else:
if item < item_list[mid]:
last = mid - 1
else:
first = mid + 1
return found
mylist = [2,3,1,12,34]
mylist
mylist.sort()
mylist
print(binary_search(mylist, 12))
print(binary_search(mylist, 13))
## Giving unsorted array
def binary (item_list, item):
item_list.sort()
print(item_list)
first = 0
last = len(item_list)-1
found = False
while (first <= last and not found):
mid = (first+last)//2
if item_list[mid] == item:
found = True
else:
if item < item_list[mid]:
last = mid - 1
else:
first = mid + 1
return found
mylist = [2,3,1,12,34]
print(binary(mylist, 12))
print(binary(mylist, 13))
| docs/Notebooks/01.binary-search.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="AhDwkoat1H5Z" outputId="7cba3fe2-3309-4b15-ccf0-ce5c1a3a1c66"
from google.colab import drive
drive.mount('/content/drive')
# + id="sWYbuVI3G90Q"
# %cd '/content/drive/MyDrive/VOC2007'
# !unzip data_object_image_2.zip
# !unzip data_object_label_2.zip
# + colab={"base_uri": "https://localhost:8080/"} id="_Tifh44Y1Jt8" outputId="ee736392-11d6-4cbc-fbc0-4b4df68dd149"
# %cd '/content/drive/MyDrive'
# !git clone https://github.com/oliver0922/SSD.Pytorch.git
# + id="h4Y9D3AjHE04"
# %cd '/content/drive/MyDrive/SSD.Pytorch'
# !python class_name_chane.py
# + id="FEDAmP9xHHPc"
# !python txt_to_xml.py
# + id="pBjQprZqHI6v"
# !python create_train_test_txt.py
# + id="cEU0IUNdQOYS" colab={"base_uri": "https://localhost:8080/"} outputId="397c8365-2eeb-42ff-c547-869bb2ad5468"
# !pip install visdom
# + id="KCkIohhCHLOI"
# %cd '/content/drive/MyDrive/SSD.Pytorch'
# !python create_train_test_txt.py
# + colab={"base_uri": "https://localhost:8080/"} id="fVljMCi61xnq" outputId="55ff2a4a-c21d-42cd-d289-9caaa924abd2"
# %cd '/content/drive/MyDrive/SSD.Pytorch/weights'
# !wget https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth
# + colab={"base_uri": "https://localhost:8080/"} id="IzirRpYSZbFE" outputId="45452793-0071-4dbb-8cbd-6a7ec323d6af"
# %cd '/content/drive/MyDrive/SSD.Pytorch'
# !python train.py --input 512 --num_class 6 --num_epoch 400 --lr 0.001 --batch_size 16
# + colab={"base_uri": "https://localhost:8080/"} id="5lpBi67GWyQp" outputId="08b1c781-a42d-4df2-e417-accab3b5a492"
# %cd '/content/drive/MyDrive/SSD.Pytorch'
# !python eval.py --input 512
# + id="zuz15cTsSsIi"
y
| ssd_kitti_pytorch_efficientnet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mateiral informatics sample project
#
# > Sample project covering various steps necessary for developing a model for material property prediction
#
# - toc: true
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [chemical-science, python, machine-learning]
# A random forest regression model is built to predict the heat capacity ($C_p$) of solid inorganic materials at different temperatures. The dataset is collected from the [NIST JANAF Thermochemical Table](https://doi.org/10.18434/T42S31)
#
# This project is adapted from recent publication looking at best practices for setting up mateial informatics task.
# * [<NAME> et al., “Machine Learning for Materials Scientists: An Introductory Guide toward Best Practices,” Chem. Mater., vol. 32, no. 12, pp. 4954–4965, 2020](https://doi.org/10.1021/acs.chemmater.0c01907).
# +
import os
import pandas as pd
import numpy as np
np.random.seed(42)
# +
#----- PLOTTING PARAMS ----#
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
# High DPI rendering for mac
# %config InlineBackend.figure_format = 'retina'
# Plot matplotlib plots with white background:
# %config InlineBackend.print_figure_kwargs={'facecolor' : "w"}
plot_params = {
'font.size' : 15,
'axes.titlesize' : 15,
'axes.labelsize' : 15,
'axes.labelweight' : 'bold',
'xtick.labelsize' : 12,
'ytick.labelsize' : 12,
}
plt.rcParams.update(plot_params)
# -
# # Loading and cleaning the data
# +
root_dir = os.getcwd()
csv_file_path = os.path.join(root_dir, 'material_cp.csv')
df = pd.read_csv(csv_file_path)
# -
df.sample(5)
print(df.shape)
df.describe().round(2)
# Rename columns for better data handling
rename_dict = {'FORMULA':'formula', 'CONDITION: Temperature (K)':'T', 'PROPERTY: Heat Capacity (J/mol K)':'Cp'}
df = df.rename(columns=rename_dict)
df
# Check for null entries in the dataset
columns_has_NaN = df.columns[df.isnull().any()]
df[columns_has_NaN].isnull().sum()
# Show the null entries in the dataframe
is_NaN = df.isnull()
row_has_NaN = is_NaN.any(axis=1)
df[row_has_NaN]
df_remove_NaN = df.dropna(subset=['formula','Cp','T'])
df_remove_NaN.isnull().sum()
# Remove unrealistic values from the dataset
df_remove_NaN.describe()
# +
T_filter = (df_remove_NaN['T'] < 0)
Cp_filter = (df_remove_NaN['Cp'] < 0)
df_remove_NaN_neg_values = df_remove_NaN.loc[(~T_filter) & (~Cp_filter)]
print(df_remove_NaN_neg_values.shape)
# -
# # Splitting data
#
# The dataset in this exercise contains different formulae, Cp and T for that entry as a function of T. There are lot of repeated formulae and there is a chance that randomly splitting the dataset in train/val/test would lead to leaks of material entries between 3 sets.
#
# To avoid this the idea is to generate train/val/test such that all material entries belonging a particular type are included in only that set. Eg: B2O3 entries are only in either train/val/test set. To do so let's first find the unique material entries in the set and sample those without replacement when making the new train/val/test set
df = df_remove_NaN_neg_values.copy()
df
# Quick and definitely dirty
from sklearn.model_selection import train_test_split
train_df, test_df = train_test_split(df, test_size=0.4, random_state=42)
# There are going to be couple of materials which are going to be present in training and test both
# +
# check for intersection
train_set = set(train_df['formula'].unique())
test_set = set(test_df['formula'].unique())
# Check for intersection with val and test
len(train_set.intersection(test_set))
# -
# Start with unique splitting task
len(df['formula'].unique())
# Out of 244 unique materials entries, 233 are present in both training and test. This is problematic for model building especially since we're going to featurize the materials using solely the composition-based features.
# +
f_entries = df['formula'].value_counts()[:50]
fig, ax = plt.subplots(1,1, figsize=(5,20))
ax.barh(f_entries.index, f_entries.values)
ax.tick_params(axis='x', rotation=90);
# -
df['formula'].unique()[:10]
# Creating train/val/test manually
unique_entries = df['formula'].unique()
# Set size for train/val/test set
train_set = 0.7
val_set = 0.2
test_set = 1 - train_set - val_set
num_entries_train = int( train_set * len(unique_entries) )
num_entries_val = int( val_set * len(unique_entries) )
num_entries_test = int( test_set * len(unique_entries) )
print(num_entries_train, num_entries_val, num_entries_test)
# +
# Train formula
train_formulae = np.random.choice(unique_entries, num_entries_train, replace=False)
unique_entries_minus_train = [i for i in unique_entries if i not in train_formulae]
# Val formula
val_formulae = np.random.choice(unique_entries_minus_train, num_entries_val, replace=False)
unique_entries_minus_train_val = [i for i in unique_entries_minus_train if i not in val_formulae]
# Test formula
test_formulae = unique_entries_minus_train_val.copy()
# -
print(len(train_formulae), len(val_formulae), len(test_formulae))
train_points = df.loc[ df['formula'].isin(train_formulae) ]
val_points = df.loc[ df['formula'].isin(val_formulae) ]
test_points = df.loc[ df['formula'].isin(test_formulae) ]
print(train_points.shape, val_points.shape, test_points.shape)
# +
# Quick sanity check of the method
train_set = set(train_points['formula'].unique())
val_set = set(val_points['formula'].unique())
test_set = set(test_points['formula'].unique())
# Check for intersection with val and test
print(len(train_set.intersection(val_set)), len(train_set.intersection(test_set)))
# -
# # Model fitting
#
# ## Featurization
#
# Composition-based feature vector (CBFV) is used to describe each mateiral entry (eg: Cr<sub>2</sub>O<sub>3</sub>) with set of elemental and composition based numbers.
# Import the package and the generate_features function
from cbfv.composition import generate_features
# +
rename_columns = {'Cp':'target'}
train_points['Type'] = 'Train'
val_points['Type'] = 'Val'
test_points['Type'] = 'Test'
total_data = pd.concat([train_points, val_points, test_points], ignore_index=True);
total_data = total_data.rename(columns=rename_columns)
# -
total_data.sample(5)
train_df = total_data.loc[ total_data['Type'] == 'Train' ].drop(columns=['Type']).reset_index(drop=True)
val_df = total_data.loc[ total_data['Type'] == 'Val' ].drop(columns=['Type']).reset_index(drop=True)
test_df = total_data.loc[ total_data['Type'] == 'Test' ].drop(columns=['Type']).reset_index(drop=True)
# ### Sub-sampling
#
# Only some points from the original training data `train_df` are used to ensure the analysis is tractable
train_df.shape
train_df = train_df.sample(n=1000, random_state=42)
train_df.shape
# Generate features
X_unscaled_train, y_train, formulae_entry_train, skipped_entry = generate_features(train_df, elem_prop='oliynyk', drop_duplicates=False, extend_features=True, sum_feat=True)
X_unscaled_val, y_val, formulae_entry_val, skipped_entry = generate_features(val_df, elem_prop='oliynyk', drop_duplicates=False, extend_features=True, sum_feat=True)
X_unscaled_test, y_test, formulae_entry_test, skipped_entry = generate_features(test_df, elem_prop='oliynyk', drop_duplicates=False, extend_features=True, sum_feat=True)
X_unscaled_train.head(5)
formulae_entry_train.head(5)
X_unscaled_train.shape
# ## Feature scaling
X_unscaled_train.columns
X_unscaled_train.describe().round(2)
X_unscaled_train['range_heat_of_vaporization_(kJ/mol)_'].hist();
from sklearn.preprocessing import StandardScaler, normalize
stdscaler = StandardScaler()
X_train = stdscaler.fit_transform(X_unscaled_train)
X_val = stdscaler.transform(X_unscaled_val)
X_test = stdscaler.transform(X_unscaled_test)
pd.DataFrame(X_train, columns=X_unscaled_train.columns).describe().round(2)
pd.DataFrame(X_train, columns=X_unscaled_train.columns)['range_heat_of_vaporization_(kJ/mol)_'].hist()
X_train = normalize(X_train)
X_val = normalize(X_val)
X_test = normalize(X_test)
pd.DataFrame(X_train, columns=X_unscaled_train.columns).describe().round(2)
pd.DataFrame(X_train, columns=X_unscaled_train.columns)['range_heat_of_vaporization_(kJ/mol)_'].hist()
# ## Model fitting
# +
from time import time
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
# -
model = RandomForestRegressor(random_state=42)
# %%time
model.fit(X_train, y_train)
def display_performance(y_true, y_pred):
r2 = r2_score(y_true, y_pred)
mae = mean_absolute_error(y_true, y_pred)
rmse = np.sqrt(mean_squared_error(y_true, y_pred))
print('R2: {0:0.2f}\n'
'MAE: {1:0.2f}\n'
'RMSE: {2:0.2f}'.format(r2, mae, rmse))
return(r2, mae, rmse)
y_pred = model.predict(X_val)
display_performance(y_val,y_pred);
# +
fig, ax = plt.subplots(1,1, figsize=(5,5))
ax.scatter(y_val, y_pred, alpha=0.6, label='Random Forest')
lims = [np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# Linear fit
reg = np.polyfit(y_val, y_pred, deg=1)
ax.plot(lims, reg[0] * np.array(lims) + reg[1], 'r--', linewidth=1.5, label='Linear Fit')
ax.plot(lims, lims, 'k--', alpha=0.75, zorder=0, label='Parity Line')
ax.set_aspect('equal')
ax.set_xlabel('True value')
ax.set_ylabel('Model predicted')
ax.legend(loc='best')
# -
# ## Feature Importance
feature_name = [i for i in X_unscaled_train.columns]
len(feature_name)
X_train.shape
len(model.estimators_)
mean_feature_importance = model.feature_importances_
std_feature_importance = np.std([ tree.feature_importances_ for tree in model.estimators_ ], axis=0)
feat_imp_df = pd.DataFrame({'name':feature_name, 'mean_imp':mean_feature_importance, 'std_dev':std_feature_importance})
feat_imp_df_top = feat_imp_df.sort_values('mean_imp', ascending=False)[:20]
feat_imp_df_top[:5]
fig, ax = plt.subplots(1,1, figsize=(30,3))
ax.bar(feat_imp_df_top['name'], feat_imp_df_top['mean_imp'], yerr=feat_imp_df_top['std_dev'])
ax.tick_params(axis='x', rotation=90)
ax.set_title('Feature Importance');
top_feature_list = feat_imp_df.loc[ feat_imp_df['mean_imp'] > 0.001 ]['name']
len(top_feature_list)
# +
X_train_df = pd.DataFrame(X_train, columns=feature_name)
X_val_df = pd.DataFrame(X_val, columns=feature_name)
X_train_short = X_train_df[list(top_feature_list)]
X_val_short = X_val_df[list(top_feature_list)]
# -
print(X_train_short.shape, X_train.shape)
# ### Refit a new model on small feature set
model_small = RandomForestRegressor(random_state=42)
# %%time
model_small.fit(X_train_short, y_train)
y_pred = model_small.predict(X_val_short)
display_performance(y_val, y_pred);
# +
fig, ax = plt.subplots(1,1, figsize=(5,5))
ax.scatter(y_val, y_pred, alpha=0.6, label='Random Forest')
lims = [np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes
np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes
]
# Linear fit
reg = np.polyfit(y_val, y_pred, deg=1)
ax.plot(lims, reg[0] * np.array(lims) + reg[1], 'r--', linewidth=1.5, label='Linear Fit')
ax.plot(lims, lims, 'k--', alpha=0.75, zorder=0, label='Parity Line')
ax.set_aspect('equal')
ax.set_xlabel('True value')
ax.set_ylabel('Model predicted')
ax.legend(loc='best')
# -
# ## Cross-validation
# Combine train and validation set to generate one train - test set for cross-validation
# Train stack
X_y_train = np.c_[X_train_short, y_train]
X_y_train.shape
np.unique(X_y_train[:,-1] - y_train)
# Validation stack
X_y_val = np.c_[X_val_short, y_val]
X_Y_TRAIN = np.vstack((X_y_train, X_y_val))
# +
X_TRAIN = X_Y_TRAIN[:,0:-1].copy()
Y_TRAIN = X_Y_TRAIN[:,-1].copy()
print(X_TRAIN.shape, Y_TRAIN.shape)
# +
from sklearn.model_selection import cross_validate
def display_score(scores, metric):
score_key = 'test_{}'.format(metric)
print(metric)
print('Mean: {}'.format(scores[score_key].mean()))
print('Std dev: {}'.format(scores[score_key].std()))
# -
# %%time
_scoring = ['neg_root_mean_squared_error', 'neg_mean_absolute_error']
forest_scores = cross_validate(model, X_TRAIN, Y_TRAIN,
scoring = _scoring, cv=5)
display_score(forest_scores, _scoring[0])
display_score(forest_scores, _scoring[1])
# ## Hyperparameter Optimization
import joblib
from sklearn.model_selection import RandomizedSearchCV
# +
random_forest_base_model = RandomForestRegressor(random_state=42)
param_grid = {
'bootstrap':[True],
'min_samples_leaf':[5,10,100,200,500],
'min_samples_split':[5,10,100,200,500],
'n_estimators':[100,200,400,500],
'max_features':['auto','sqrt','log2'],
'max_depth':[5,10,15,20]
}
# -
CV_rf = RandomizedSearchCV(estimator=random_forest_base_model,
n_iter=50,
param_distributions=param_grid,
scoring='neg_root_mean_squared_error',
cv = 5, verbose = 1, n_jobs=-1, refit=True)
# %%time
with joblib.parallel_backend('multiprocessing'):
CV_rf.fit(X_TRAIN, Y_TRAIN)
print(CV_rf.best_params_, CV_rf.best_score_)
pd.DataFrame(CV_rf.cv_results_).sort_values('rank_test_score')[:5]
best_model = CV_rf.best_estimator_
best_model
| _notebooks/2021-04-28-material_prop_walkthrough.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# This tool converts a folder of samples to a big rectangular matrix with one mono sample per row.
#
# Samples should be placed in `data/mydataset/samples/`. They could be `.mp3`, `.wav`, or anything else that ffmpeg can work with. They may be all in one folder, or in nested sub-folders.
#
# Change the path below to point to the root directory, e.g., `data/mydataset/`.
#
# The samplerate `sr` is not necessarily the native samplerate of the samples, it's the samplerate you want to load them at.
#
# The output of this notebook is:
# * `data/mydataset/durations.txt`
# * `data/mydataset/filenames.txt`
# * `data/mydataset/samples.npy`
data_root = '/Users/zebra/Developer/Python/AudioNotebooks/data/drums'
sr = 44000
max_length = sr*4 # ignore samples longer than 4 seconds
fixed_length = sr/4 # trim all samples to 250 milliseconds
limit = None # set this to 100 to only load the first 100 samples
import numpy as np
from os.path import join
from utils import *
from multiprocessing import Pool
from list_all_files import list_all_files_with_subdirs
import list_all_files
files = list_all_files.list_all_files_within_subdirs(join(data_root, 'samples'), ['.wav'])
len(files)
def load_sample(fn, sr=None,
max_length=None, fixed_length=None, normalize=True):
if fn == '': # ignore empty filenames
return None
audio, _ = ffmpeg_load_audio(fn, sr, mono=True)
duration = len(audio)
if duration == 0: # ignore zero-length samples
return None
if max_length and duration >= max_length: # ignore long samples
return None
if fixed_length:
audio.resize(fixed_length)
max_val = np.abs(audio).max()
if max_val == 0: # ignore completely silent sounds
return None
if normalize:
audio /= max_val
return (fn, audio, duration)
def job(fn):
return load_sample(fn, sr=sr,
max_length=max_length, fixed_length=fixed_length)
pool = Pool()
# %time results = pool.map(job, files[:limit])
print 'Processed', len(results), 'samples'
valid = filter(None, results)
filenames = [x[0] for x in valid]
samples = [x[1] for x in valid]
durations = [x[2] for x in valid]
samples = np.asarray(samples)
np.savetxt(join(data_root, 'filenames.txt'), filenames, fmt='%s')
np.savetxt(join(data_root, 'durations.txt'), durations, fmt='%i')
# %time np.save(join(data_root, 'samples.npy'), samples)
print 'Saved', len(valid), 'samples'
| Collect Samples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### notebook purpose
#
# evaluate variations of qdess experiment
# +
import os, sys
import numpy as np
import matplotlib.pyplot as plt
import torch
sys.path.insert(0, '../')
from utils.evaluate import calc_metrics_imgs, plot_row_qdess, scale_0_1, get_mu_diff
from utils.data_io import get_mtr_ids_and, load_imgs, load_imgs_many_inits
# +
accel = 4
path_base = '/bmrNAS/people/dvv/out_qdess/accel_{}x/'.format(accel)
path_gt = path_base + 'gt/'
path_bl = path_base + 'calib/24/'
path_new = path_base + 'calib/max/'
# -
# ### load imgs, calc metrics
#
# - alternative image loader if comparing across multiple initializations via `many_inits/`
# +
mtr_id_list = get_mtr_ids_and(path_bl, path_new)
mtr_id_list = mtr_id_list[:6]
imgs_gt = load_imgs(mtr_id_list, path=path_gt)
imgs_bl = load_imgs(mtr_id_list, path=path_bl)
imgs_new = load_imgs(mtr_id_list, path=path_new)
# inits_bl, inits_new = 1, 4
# imgs_bl = load_imgs_many_inits(mtr_id_list, path_bl,
# num_inits=inits_bl)
# imgs_new = load_imgs_many_inits(mtr_id_list, path_new,
# num_inits=inits_new)
metrics_bl = calc_metrics_imgs(imgs_gt, imgs_bl)
metrics_new = calc_metrics_imgs(imgs_gt, imgs_new)
# -
# ### bonus (delete later)
#
# - plot quant metrics v. calibration region size
def get_mu(imgs_gt, mtr_id_list, path_eval):
imgs_eval = load_imgs(mtr_id_list, path=path_eval)
metrics = calc_metrics_imgs(imgs_gt, imgs_eval)
return np.around(np.mean(metrics, 0), 4)
# +
accel = 8
path_base = '/bmrNAS/people/dvv/out_qdess/accel_{}x/'.format(accel)
path_gt = path_base + 'gt/'
path_bl = path_base + 'calib/24/'
mtr_id_list = get_mtr_ids_and(path_bl, path_gt)
imgs_gt = load_imgs(mtr_id_list, path=path_gt)
calib_regions = ['24', '32', '40', '64', 'max']
num_cal, num_echos, num_metrics = len(calib_regions), 2, 5
mu_all = np.empty((num_cal, num_echos, num_metrics))
for idx_c, calib in enumerate(calib_regions):
path_eval = '{}calib/{}/'.format(path_base, calib)
mu_all[idx_c] = get_mu(imgs_gt, mtr_id_list, path_eval)
# print(mu)
# +
calib_regions[-1] = '72'
calib_regions = list(map(int, calib_regions))
metric_list = ['vif', 'msssim', 'ssim', 'psnr', 'hfen']
mu_all_ = np.transpose(mu_all, (1, 2, 0))# (2, 0, 1))
for idx_e in np.arange(num_echos):
for idx_m in np.arange(num_metrics):
if idx_m == 3:
continue
line = mu_all_[idx_e, idx_m, :]
plt.plot(calib_regions, line, label=metric_list[idx_m])
plt.legend()
plt.title('accel 8x echo{} quant scores v. cal region size'.format(idx_e+1))
plt.show()
# sys.exit()
# -
# ### avg metrics across all samples
# +
print(mtr_id_list)
mu_bl = np.around(np.mean(metrics_bl, 0), 4)
mu_new = np.around(np.mean(metrics_new, 0), 4)
mu_diff = mu_new - mu_bl
# print('\n', mu_bl, '\n', mu_new, '\n\n', mu_diff, '\n')
print(mu_new/mu_bl-1)
# -
# ### indiv sample metrics
for idx_s, mtr_id in enumerate(mtr_id_list):
print(mtr_id)
print(metrics_bl[idx_s])
print(metrics_new[idx_s])
# print(np.around(metrics_new[idx_s] / metrics_bl[idx_s] - 1, 4))
sys.exit()
# ### plot
# +
title_list = ['gt', 'bl', 'new']
clim_list = [None] * len(title_list)
DIFF_MAP = True
DIFF_MAP_ALL = False
for idx_s in np.arange(imgs_gt.shape[0]):
for idx_e in np.arange(imgs_gt.shape[1]):
im_gt, im1, im2 = imgs_gt[idx_s, idx_e], \
imgs_bl[idx_s, idx_e], \
imgs_new[idx_s, idx_e]
# im_gt, im1, im2 = scale_0_1(im_gt), scale_0_1(im1), scale_0_1(im2)
arr_list = [im_gt, im1, im2]
if DIFF_MAP:
C = 4 # constant for plotting on clim=(0,1)
im_diff = C * np.abs(scale_0_1(im1) - scale_0_1(im2))
# im_diff = C * np.abs(scale_0_1(im2) - scale_0_1(im_gt))
arr_list.append(im_diff)
title_list.append('diff')
clim_list.append((0,1))
if DIFF_MAP_ALL: # plot all diffs
C = 2
title_list = ['gt', 'bl', 'diff(gt, bl)', 'new', \
'diff(gt, new)']
arr_list[2] = C * np.abs(scale_0_1(im1) - scale_0_1(im_gt))
arr_list.append(im2)
arr_list.append(C * np.abs(scale_0_1(im2) - scale_0_1(im_gt)))
clim_list = [None, None, (0,1), None, (0,1)]
plot_row_qdess(arr_list, title_list, clim_list)
sys.exit()
| ipynb/20210301_eval.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using distorter
#
# The concept of cation mutation (ion substitution) to tune the properties of compounds has been around at least [since the 1950s](https://www.sciencedirect.com/science/article/pii/0022369758900507) and has proved a useful concept in [more recent studies](https://journals.aps.org/prb/abstract/10.1103/PhysRevB.79.165211).
#
# 
#
# This example generates all possible (symmetry inequivalent) substitutions of Sr on Ba sites in the cubic perovskite
# BaTiO3; single and double substitutions.
# The distorter module uses the [ASE Python library](https://wiki.fysik.dtu.dk/ase/) to achieve this.
import smact.builder as builder
import smact.distorter as distort
# +
def pretty_print_atoms(atoms, linewrap=15):
entries = ["{0:5.3f} {1:5.3f} {2:5.3f} {symbol}".format(*position,
symbol=symbol)
for position, symbol in zip(atoms.get_positions(),
atoms.get_chemical_symbols())]
for output_i in range(linewrap):
line = ''
for i, entry in enumerate(entries):
if (output_i == i % linewrap):
line = line + entry + '\t'
print(line)
# Build the input
smact_lattice, test_case = builder.cubic_perovskite(['Ba', 'Ti', 'O'],
repetitions=[2, 2, 2])
hlinewidth = 68
print('-' * hlinewidth)
print("Original coordinates: ")
pretty_print_atoms(test_case)
print('-' * hlinewidth)
# Do the single substitution first, it is trivial as all Ba
# sites are equivalent we will choose the first Ba
subs_site = [0.0, 0.0, 0.0]
single_substitution = distort.make_substitution(test_case, subs_site, "Sr")
print("Single: ")
pretty_print_atoms(single_substitution)
# Build a sub-lattice you wish to disorder [test case do the Ba sub-lattice]
sub_lattice = distort.build_sub_lattice(single_substitution, "Ba")
# Enumerate the inequivalent sites
inequivalent_sites = distort.get_inequivalent_sites(sub_lattice,
single_substitution)
# Replace Ba at inequivalent sites with Sr
for i, inequivalent_site in enumerate(inequivalent_sites):
print('-' * hlinewidth)
print("Substituted coordinates {0}".format(i))
distorted = distort.make_substitution(single_substitution,
inequivalent_site,
"Sr")
pretty_print_atoms(distorted)
print('='*hlinewidth)
# -
# The distorted objects are of the ase atoms class,
# so can be written out to a crystal structure file like a cif
distorted.write('example_output.cif')
| examples/Cation_mutation/cation_mutation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 05 衡量回归算法的标准,MSE vs MAE
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
# ### 波士顿房产数据
boston = datasets.load_boston()
boston.keys()
print(boston.DESCR)
boston.feature_names
x = boston.data[:,5] # 只使用房间数量这个特征
x.shape
y = boston.target
y.shape
plt.scatter(x, y)
plt.show()
np.max(y)
x = x[y < 50.0]
y = y[y < 50.0]
x.shape
y.shape
plt.scatter(x, y)
plt.show()
# ### 使用简单线性回归法
# +
from playML.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, seed=666)
# -
x_train.shape
y_train.shape
x_test.shape
y_test.shape
from playML.SimpleLinearRegression import SimpleLinearRegression
reg = SimpleLinearRegression()
reg.fit(x_train, y_train)
reg.a_
reg.b_
plt.scatter(x_train, y_train)
plt.plot(x_train, reg.predict(x_train), color='r')
plt.show()
plt.scatter(x_train, y_train)
plt.scatter(x_test, y_test, color="c")
plt.plot(x_train, reg.predict(x_train), color='r')
plt.show()
y_predict = reg.predict(x_test)
# ### MSE
mse_test = np.sum((y_predict - y_test)**2) / len(y_test)
mse_test
# ### RMSE
# +
from math import sqrt
rmse_test = sqrt(mse_test)
rmse_test
# -
# ### MAE
mae_test = np.sum(np.absolute(y_predict - y_test))/len(y_test)
mae_test
# ### 封装我们自己的评测函数
#
# 代码参见 [这里](playML/metrics.py)
from playML.metrics import mean_squared_error
from playML.metrics import root_mean_squared_error
from playML.metrics import mean_absolute_error
mean_squared_error(y_test, y_predict)
root_mean_squared_error(y_test, y_predict)
mean_absolute_error(y_test, y_predict)
# ### scikit-learn中的MSE和MAE
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
mean_squared_error(y_test, y_predict)
mean_absolute_error(y_test, y_predict)
# ### MSE v.s. MAE
| 05-Linear-Regression/05-Regression-Metrics-MSE-vs-MAE/05-Regression-Metrics-MSE-vs-MAE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Expectation Reflection for Breast Cancer Diagnosis
# In this work, we apply our method, Expectation Reflection (ER), to predict Breast Cancer. We compare the performance of ER with other existing methods such as Logistic Regression, Naive Bayes, Dicision Tree, Random Forest.
# +
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
import expectation_reflection as ER
import matplotlib.pyplot as plt
# %matplotlib inline
# -
np.random.seed(1)
# load data
df = pd.read_csv('../breast_cancer_data.txt',sep= ',')
df.head()
# We will drop out the first column `id` and move the target `diagnosis` to the last column, just for convenience.
df = df.drop('id', axis=1) # remove id column
df1 = df.pop('diagnosis') # remove column diagnosis and store it in df1
df['diagnosis'] = df1 # add df1 to df as a 'new' column
# convert B to -1, M to 1:
df.diagnosis = [0 if t == "B" else 1 for t in df.diagnosis]
# +
# select features and target:
ds = np.array(df).astype(float)
# features:
X = ds[:,:-1]
l,n = X.shape
print(l,n)
# target:
y = ds[:,-1]
# convert 1,0 to 1,-1:
y = 2*y - 1
# -
# ### Shuffle data
from sklearn.utils import shuffle
X, y = shuffle(X, y)
# +
#from sklearn.preprocessing import StandardScaler
#X = StandardScaler().fit_transform(X)
from sklearn.preprocessing import MinMaxScaler
X = MinMaxScaler().fit_transform(X)
# -
# ### Prediction
# +
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
def ML_inference(X_train,y_train,X_test,y_test,method='expectation_reflection'):
if method == 'expectation_reflection':
h0,w = ER.fit(X_train,y_train,niter_max=20,regu=0.001)
y_pred = ER.predict(X_test,h0,w)
accuracy = accuracy_score(y_test,y_pred)
else:
if method == 'logistic_regression':
model = LogisticRegression(solver='liblinear')
if method == 'naive_bayes':
model = GaussianNB()
if method == 'random_forest':
model = RandomForestClassifier(criterion = "gini", random_state = 1,
max_depth=3, min_samples_leaf=5,n_estimators=100)
if method == 'decision_tree':
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
accuracy = accuracy_score(y_test,y_pred)
return accuracy
# -
list_methods=['expectation_reflection','naive_bayes','logistic_regression','decision_tree','random_forest']
def compare_ML_inference(X,y,train_size):
npred = 100
accuracy = np.zeros((len(list_methods),npred))
for ipred in range(npred):
X, y = shuffle(X, y)
X_train0,X_test,y_train0,y_test = train_test_split(X,y,test_size=0.2,random_state = ipred)
idx_train = np.random.choice(len(y_train0),size=int(train_size*len(y_train0)),replace=False)
X_train,y_train = X_train0[idx_train],y_train0[idx_train]
for i,method in enumerate(list_methods):
accuracy[i,ipred] = ML_inference(X_train,y_train,X_test,y_test,method)
#print('% 20s :'%method,accuracy)
#print(y_train.shape[0],y_test.shape[0])
return accuracy.mean(axis=1),accuracy.std(axis=1)
# +
list_train_size = [1.,0.8,0.6,0.4,0.2,0.1,0.05]
acc = np.zeros((len(list_train_size),len(list_methods)))
acc_std = np.zeros((len(list_train_size),len(list_methods)))
for i,train_size in enumerate(list_train_size):
acc[i,:],acc_std[i,:] = compare_ML_inference(X,y,train_size)
print(train_size,acc[i,:])
# -
plt.figure(figsize=(4,3))
plt.plot(list_train_size,acc[:,0],'k-',label='Expectation Reflection')
plt.plot(list_train_size,acc[:,1],'b-',label='Naive Bayes')
plt.plot(list_train_size,acc[:,2],'r-',label='Logistic Regression')
plt.plot(list_train_size,acc[:,3],'b--',label='Decision Tree')
plt.plot(list_train_size,acc[:,4],'r--',label='Random Forest')
plt.xlabel('train-size')
plt.ylabel('acc')
plt.legend()
| .ipynb_checkpoints/1main_breast_cancer-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.10.0 64-bit (''adventofcode-mOkh6lsX'': pipenv)'
# language: python
# name: python3
# ---
# # A\* search
#
# - <https://adventofcode.com/2021/day/15>
#
# Part 1 is a straight-forward A\* search problem.
#
# +
from __future__ import annotations
from dataclasses import dataclass, replace
from heapq import heappop, heappush
from itertools import count
from typing import Iterator, TypeAlias
Pos: TypeAlias = tuple[int, int]
@dataclass(frozen=True)
class Node:
"""Node on the A* search graph"""
x: int = 0
y: int = 0
risk: int = 0
@property
def pos(self) -> Pos:
return self.x, self.y
def cost(self, target: Pos) -> int:
"""Calculate the cost for this node, f(n) = g(n) + h(n)
The cost of this node is the total risk encounterd (g) plus
estimated cost to get to end goal (h).
Here we use the manhattan distance to the target as
the estimated cost.
"""
return self.risk + abs(target[0] - self.x) + abs(target[1] - self.y)
def transitions(self, cavern: Cavern) -> Iterator[Node]:
positions = (
(self.x + dx, self.y + dy) for dx, dy in ((-1, 0), (0, -1), (0, 1), (1, 0))
)
yield from (
replace(self, x=x, y=y, risk=self.risk + cavern[x, y])
for x, y in positions
if (x, y) in cavern
)
class Cavern:
def __init__(self, map: list[str]) -> None:
self._height = len(map)
self._width = len(map[0])
self._matrix = [[int(c) for c in row] for row in map]
self.target = (self._width - 1, self._height - 1)
def __getitem__(self, pos: Pos) -> int:
x, y = pos
return self._matrix[y][x]
def __contains__(self, pos: Pos) -> bool:
x, y = pos
return 0 <= x < self._width and 0 <= y < self._height
def __str__(self) -> str:
return "\n".join(["".join([str(r) for r in row]) for row in self._matrix])
def lowest_total_risk(self) -> int:
start = Node()
open = {start}
unique = count() # tie breaker when costs are equal
pqueue = [(start.cost(self.target), next(unique), start)]
closed = set()
risks = {start.pos: start.risk} # pos -> risk. Ignore nodes that took more risk
while open:
node = heappop(pqueue)[-1]
if node.pos == self.target:
return node.risk
open.remove(node)
closed.add(node)
for new in node.transitions(self):
if new in closed or new in open:
continue
if risks.get(new.pos, float("inf")) < new.risk:
continue
risks[new.pos] = new.risk
open.add(new)
heappush(pqueue, (new.cost(self.target), next(unique), new))
test_cavern_map = """\
1163751742
1381373672
2136511328
3694931569
7463417111
1319128137
1359912421
3125421639
1293138521
2311944581
""".splitlines()
test_cavern = Cavern(test_cavern_map)
assert test_cavern.lowest_total_risk() == 40
# +
import aocd
cavern_map = aocd.get_data(day=15, year=2021).splitlines()
cavern = Cavern(cavern_map)
print("Part 1:", cavern.lowest_total_risk())
# -
# # Part 2: scale up the map
#
# Part two tests if your A\* search can handle a larger map.
# +
from itertools import product
class LargeCavern(Cavern):
def __init__(self, map: list[str]) -> None:
super().__init__(map)
source = self._matrix
self._matrix = [
[(source[y][x] + dx + dy - 1) % 9 + 1 for dx, x in product(range(5), range(self._width))]
for dy, y in product(range(5), range(self._height))
]
self._width *= 5
self._height *= 5
self.target = (self._width - 1, self._height - 1)
test_large_cavern = LargeCavern(test_cavern_map)
assert test_large_cavern.lowest_total_risk() == 315
# -
larger_cavern = LargeCavern(cavern_map)
print("Part 2:", larger_cavern.lowest_total_risk())
| 2021/Day 15.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
# +
from pathlib import Path
import sys
sys.path.append("c:\\Users\\kpdav\\machine_learning\\projects\\PGA-portfolio-optimizer\\config")
sys.path.append("c:\\Users\\kpdav\\machine_learning\\projects\\PGA-portfolio-optimizer\\src\\data")
import config
import pandas as pd
pd.options.display.max_columns = 999
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
# -
feature_store_path = str(Path(config.TIMESERIES_FRAMEWORK_DIR, "ts_feature_store.csv"))
feature_df = pd.read_csv(feature_store_path, parse_dates=["date"])
# ## __Return Label__
#
#
# __Below is the target variable ("fantasy_total_points") plotted for each point in the training set.__
#
# +
plt.plot(feature_df["fantasy_total_points"])
plt.xlabel("Player Instance")
plt.ylabel("fantasy_total_points")
plt.title("Fantasy total points over training set");
# -
# __Next I standardize the target variable and plot a similar figure__
feature_df["standardized_fantasy_total_points"] = feature_df[["fantasy_total_points"]].apply(lambda x: (x - np.mean(x)) / np.std(x))
# +
plt.plot(feature_df["standardized_fantasy_total_points"])
plt.xlabel("Player Instance")
plt.ylabel("standardized_fantasy_total_points")
plt.title("Standardized fantasy total points over training set");
# +
# standardized fantasy total points distribution
sns.displot(feature_df, x="standardized_fantasy_total_points", hue="made_cut").set(title="Distribution Comparison")
plt.show();
| notebooks/return_label.ipynb |