code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import clustertools as ctools
import numpy as np
# # Setup
# As discussed in the documentation, a wrapper has been written around the LIMEPY code (<NAME>. & <NAME>. 2015, MNRAS, 454, 576 & <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. 2019, MNRAS, 487, 147) to automatically setup clusters from a pre-defined distribution function. If one is familiar with LIMEPY, simply give the ``setup_cluster`` the same commands you would give ``limepy.sample``. For example, to setup a King (1966) cluster with W0=5.0, a mass of 1000 Msun and an effective radius of 3 pc using 1000 stars:
cluster=ctools.setup_cluster('limepy',g=1,phi0=5.0,M=1000.,rm=3,N=1000)
print(cluster.ntot,cluster.rm,cluster.mtot)
# Alternatively in ``clustertools`` one can simply using ``'king'`` and ``'W0'``:
cluster=ctools.setup_cluster('king',W0=5.0,M=1000.,rm=3,N=1000)
print(cluster.ntot,cluster.rm,cluster.mtot)
# It is also possible, for King (1966) clusters, to specify ``c`` instead of ``W0``, as I have included for convenience conversion functions ``c_to_w0`` and ``w0_to_c`` as both valus are quoted throughout the literature.
# # Galactic Globular Clusters
# It is possible to set up a StarCluster that represents a Galactic Globular Cluster, where the structural information is taken from either <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>. 2019, MNRAS, 485, 4906 (default) or <NAME>. 1996 (2010 Edition), AJ, 112, 1487. Orbital information is taken from <NAME>., 2019, MNRAS, 484,2832. To setup Pal 5, for example:
#
cluster=ctools.setup_cluster('Pal5')
print(cluster.ntot,cluster.rm,cluster.mtot)
print(cluster.xgc,cluster.ygc,cluster.zgc,cluster.vxgc,cluster.vygc,cluster.vzgc)
# Unless otherwise specified, the cluster is setup to be in ``pckms`` units in clustercentric coordinates. The number of stars is set using ``mbar`` variable which has a default of 0.3 solar masses.
# Clusters can easily be viewed as they would be in the sky using the ``skyplot`` command:
ctools.skyplot(cluster)
| docs/source/notebooks/setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.6 64-bit
# metadata:
# interpreter:
# hash: 0cf725079c7d16f2cba0a185a776402bb287255802a557bed9d05e4eed5bfa43
# name: python3
# ---
# # Providers and products
from eodag import EODataAccessGateway
dag = EODataAccessGateway()
# ## Providers available
# The method [available_providers()](../../api_reference/core.rst#eodag.api.core.EODataAccessGateway.available_providers) returns a list of the pre-configured providers.
available_providers = dag.available_providers()
available_providers
print(f"eodag has {len(available_providers)} providers already configured.")
# It can take a product type as an argument and will return the providers known to `eodag` that offer this product.
dag.available_providers("S2_MSI_L1C")
# ## Product types available
# The method [list_product_types()](../../api_reference/core.rst#eodag.api.core.EODataAccessGateway.list_product_types) returns a dictionary that represents `eodag`'s internal product type catalog.
catalog = dag.list_product_types()
catalog[0]
products_id = [p["ID"] for p in catalog]
products_id
print(f"EODAG has {len(products_id)} product types stored in its internal catalog.")
# The method can take a provider name as an argument and will return the product types known to `eodag` that are offered by this provider.
peps_products = dag.list_product_types("peps")
[p["ID"] for p in peps_products]
# ## Combine these two methods
# These two methods can be combined to find which product type is the most common in `eodag`'s catalog among all the providers.
availability_per_product = []
for product in products_id:
providers = dag.available_providers(product)
availability_per_product.append((product, len(providers)))
availability_per_product = sorted(availability_per_product, key=lambda x: x[1], reverse=True)
most_common_p_type, nb_providers = availability_per_product[0]
print(f"The most common product type is '{most_common_p_type}' with {nb_providers} providers offering it.")
# These can be also used to find out which provider (as configured by `eodag`) offers the hights number of different product types.
availability_per_provider = []
for provider in dag.available_providers():
provider_products_id = [
p["ID"]
for p in dag.list_product_types(provider)
]
availability_per_provider.append(
(provider, len(provider_products_id))
)
availability_per_provider = sorted(availability_per_provider, key=lambda x: x[1], reverse=True)
provider, nb_p_types = availability_per_provider[0]
print(f"The provider with the largest number of product types is '{provider}' with {nb_p_types}.")
| docs/notebooks/api_user_guide/2_providers_products_available.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/nathamsr11/Project-14.-Parkinson-s-Disease-Detection.ipynb/blob/main/Project_14_Parkinson's_Disease_Detection.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="9B5Zl1UOBMAJ"
# Importing the Dependencies
# + id="YOCpZ1Vm6cfW"
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.metrics import accuracy_score
# + [markdown] id="PZm-USrtB_q4"
# Data Collection & Analysis
# + id="5YC2lGuVBiZA"
# loading the data from csv file to a Pandas DataFrame
parkinsons_data = pd.read_csv('/content/parkinsons.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 216} id="Iw8z6w60Djd2" outputId="7c38d6f8-9794-42ee-c546-a3c479d442a1"
# printing the first 5 rows of the dataframe
parkinsons_data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="cK7L_o2TDuZb" outputId="bc2d89a6-5979-41cd-bded-0b0ee44f0008"
# number of rows and columns in the dataframe
parkinsons_data.shape
# + colab={"base_uri": "https://localhost:8080/"} id="NLmzHIgnEGi4" outputId="80d5c4e1-8ae7-4686-b26f-ba73eb8f499d"
# getting more information about the dataset
parkinsons_data.info()
# + colab={"base_uri": "https://localhost:8080/"} id="70rgu_k4ET9F" outputId="bdbf9fe7-a6cb-4389-dee4-e69b8afb1932"
# checking for missing values in each column
parkinsons_data.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 306} id="1AxFu0-nEhSA" outputId="ac7e78a5-45ad-4e2a-e0fc-89c90367d5fe"
# getting some statistical measures about the data
parkinsons_data.describe()
# + colab={"base_uri": "https://localhost:8080/"} id="3O8AclzwExyH" outputId="b63cd18d-477c-4f09-877c-93242e082cf2"
# distribution of target Variable
parkinsons_data['status'].value_counts()
# + [markdown] id="L1srlxtEFYfN"
# 1 --> Parkinson's Positive
#
# 0 --> Healthy
#
# + colab={"base_uri": "https://localhost:8080/", "height": 157} id="zUrPan7CFTMq" outputId="eec7e87b-eb34-46f7-e644-6a1536eeb036"
# grouping the data bas3ed on the target variable
parkinsons_data.groupby('status').mean()
# + [markdown] id="8RY6c0waGSs7"
# Data Pre-Processing
# + [markdown] id="We7sRYu7Gc4q"
# Separating the features & Target
# + id="UAcz8jFnFuzH"
X = parkinsons_data.drop(columns=['name','status'], axis=1)
Y = parkinsons_data['status']
# + colab={"base_uri": "https://localhost:8080/"} id="guRof_8WG1Yn" outputId="61c562ee-4886-4d7f-cdfe-052890d1eda8"
print(X)
# + colab={"base_uri": "https://localhost:8080/"} id="xSNrvkJoG3cY" outputId="a9962d49-037d-495b-c934-809567955862"
print(Y)
# + [markdown] id="WDeqEaaHHBAS"
# Splitting the data to training data & Test data
# + id="4c6nrCiVG6NB"
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=2)
# + colab={"base_uri": "https://localhost:8080/"} id="6OqUka96H35c" outputId="d770b48c-ebae-48c8-e637-9df61230c03c"
print(X.shape, X_train.shape, X_test.shape)
# + [markdown] id="ACsXtFTGIFU-"
# Data Standardization
# + id="DbpeUHeUH-4A"
scaler = StandardScaler()
# + colab={"base_uri": "https://localhost:8080/"} id="MVkVqUbhIdBs" outputId="bddcf5c3-842a-4f92-cc63-813e13ad3b5d"
scaler.fit(X_train)
# + id="1FeONzpiInv5"
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="OS2_4yaVJAiH" outputId="0c515226-8b53-4b03-928e-58edf30cc465"
print(X_train)
# + [markdown] id="QIOAtx35JUMg"
# Model Training
# + [markdown] id="fWlsaBNuJV5g"
# Support Vector Machine Model
# + id="IDInA1u5JCZ9"
model = svm.SVC(kernel='linear')
# + colab={"base_uri": "https://localhost:8080/"} id="F01DNpqWKmaW" outputId="7b78fa09-3465-4765-d508-eb379733db15"
# training the SVM model with training data
model.fit(X_train, Y_train)
# + [markdown] id="1z_-nZfuLJrH"
# Model Evaluation
# + [markdown] id="Rj3XAnF8LMF4"
# Accuracy Score
# + id="5LwxNgnqK1Za"
# accuracy score on training data
X_train_prediction = model.predict(X_train)
training_data_accuracy = accuracy_score(Y_train, X_train_prediction)
# + colab={"base_uri": "https://localhost:8080/"} id="-dS9tcGdLm41" outputId="25767495-13bc-4040-c7ae-be89443cfe11"
print('Accuracy score of training data : ', training_data_accuracy)
# + id="rNUO2uHmLtjY"
# accuracy score on training data
X_test_prediction = model.predict(X_test)
test_data_accuracy = accuracy_score(Y_test, X_test_prediction)
# + colab={"base_uri": "https://localhost:8080/"} id="BsF3UnQ2L_aR" outputId="b6d0d040-949e-4a71-ad1f-e4a7ea6102db"
print('Accuracy score of test data : ', test_data_accuracy)
# + [markdown] id="QlR4JG4YMfOR"
# Building a Predictive System
# + colab={"base_uri": "https://localhost:8080/"} id="w0FjSoO1MGBU" outputId="f2677b3a-c5c6-4719-b9ee-c361ec33dec9"
input_data = (197.07600,206.89600,192.05500,0.00289,0.00001,0.00166,0.00168,0.00498,0.01098,0.09700,0.00563,0.00680,0.00802,0.01689,0.00339,26.77500,0.422229,0.741367,-7.348300,0.177551,1.743867,0.085569)
# changing input data to a numpy array
input_data_as_numpy_array = np.asarray(input_data)
# reshape the numpy array
input_data_reshaped = input_data_as_numpy_array.reshape(1,-1)
# standardize the data
std_data = scaler.transform(input_data_reshaped)
prediction = model.predict(std_data)
print(prediction)
if (prediction[0] == 0):
print("The Person does not have Parkinsons Disease")
else:
print("The Person has Parkinsons")
| Project_14_Parkinson's_Disease_Detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lineáris és homogén transzformációk
#
# _Tartalom_: lineáris és homgén transzformációk pyhonban
#
# _Szükséges előismereketek_: python, matplotlib, numpy, opencv, lineáris algebra
#
#
# Érdemes megnézni témában a következő videót:
# https://www.youtube.com/watch?v=kYB8IZa5AuE (3blue1brown Linear transformations and matrices | Essence of linear algebra, chapter 3) a lecke példáit követik a feldolgozást.
#
#
# A lineáris transzformációkat 2x2-es, míg a 2D-s geometriai transzformációkat 3x3-as mátrixok segítségével írhatjuk fel. Egy pont transzformáció utáni koordinátáit a pont homogén koordinátás vektora és a transzformációs mátrix szorzata adja. De nézzük ezt inkább példán.
#
# Kiindulásképp rajzoljunk ki pár koordinátát:
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
a0 = np.array([[0, 2], [0, 5], [3, 5], [3, 2], [0, 2], [0, 5], [1.5, 7],
[0.45, 5.6], [0.45, 6.5], [0.6, 6.5], [0.6, 5.8], [1.5, 7], [3, 5]])
plt.plot(a0[:, 0], a0[:, 1], "*-", label="a0")
plt.grid(True)
plt.axis("equal")
plt.legend()
plt.show()
# -
# Vegyük a következő `t1` transzformációs mátrixot.
#
# $t_1 = \left[ \begin{array}{cccc}
# 0 & -1 \\
# 1 & 0 \\\end{array} \right]$
#
# Szorozzuk össze `a0`-val, , így `a1`-et kapjuk, figyeljük meg az ereményt.
# +
t1 = np.array([[0, 1], [-1, 0]])
a1 = np.dot(a0, t1)
plt.plot(a0[:, 0], a0[:, 1], "*-", label="a0")
plt.plot(a1[:, 0], a1[:, 1], "*-", label="a1")
plt.grid(True)
plt.axis("equal")
plt.legend()
print("t1 = \n", np.transpose(t1))
plt.show()
# -
# Vegyük a következő `t2` transzformációs mátrixot.
#
# $t_2 = \left[ \begin{array}{cccc}
# 1 & 1 \\
# 0 & 1 \\\end{array} \right]$
t2 = np.array([[1, 0], [1, 1]])
a2 = np.dot(a0, t2)
plt.plot(a0[:, 0], a0[:, 1], "*-", label="a0")
plt.plot(a2[:, 0], a2[:, 1], "*-", label="a2")
plt.grid(True)
plt.axis("equal")
plt.legend()
print("t2 = \n", np.transpose(t2))
plt.show()
# A `t4` transzformációs mátrixot a vízszintes tengelyre tükröz:
#
# $t_4 = \left[ \begin{array}{cccc}
# 1 & 0 \\
# 0 & -1 \\\end{array} \right]$
#
# A `t5` transzformációs mátrixot 2xes méretűre nagyítja az alakzatot:
#
# $t_5 = \left[ \begin{array}{cccc}
# 2 & 0 \\
# 0 & 2 \\\end{array} \right]$
# +
t3 = np.array([[1, 2], [3, 1]])
t4 = np.array([[1, 0], [0, -1]])
t5 = np.array([[2, 0], [0, 2]])
a3 = np.dot(a0, t3)
a4 = np.dot(a0, t4)
a5 = np.dot(a0, t5)
plt.plot(a0[:, 0], a0[:, 1], "*-", label="a0")
plt.plot(a3[:, 0], a3[:, 1], "*-", label="a3")
plt.plot(a4[:, 0], a4[:, 1], "*-", label="a4")
plt.plot(a5[:, 0], a5[:, 1], "*-", label="a5")
plt.grid(True)
plt.axis("equal")
plt.legend()
print("\nt3 = \n", np.transpose(t3))
print("\nt4 = \n", np.transpose(t4))
print("\nt5 = \n", np.transpose(t5))
plt.show()
# -
# Az origó körüli α szöggel történő forgatás mátrixa:
#
# $t_r = \left[ \begin{array}{cccc}
# cos(\alpha) & -sin(\alpha) \\
# sin(\alpha) & cos(\alpha) \\\end{array} \right]$
plt.plot(a0[:, 0], a0[:, 1], "*-", label="a0")
for rot in range(12):
rot /= 2
t6 = np.array([[np.cos(rot), np.sin(rot)], [-np.sin(rot), np.cos(rot)]])
a6 = np.dot(a0, t6)
plt.plot(a6[:, 0], a6[:, 1], "*-", label=str(rot))
plt.grid(True)
plt.axis("equal")
plt.legend()
plt.show()
# További olvasnivaló:
# - https://upload.wikimedia.org/wikipedia/commons/2/2c/2D_affine_transformation_matrix.svg
# - https://en.wikipedia.org/wiki/Transformation_matrix
# - https://en.wikipedia.org/wiki/Homogeneous_coordinates#Use_in_computer_graphics
| eload/ealeshtranszfromaciok.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="JyDm1XaNOSDp" colab_type="text"
# ## Normal transformations
#
# - Some models assume that the data is normally distributed
#
# - We can transform variables to show a normal distribution
#
#
# ## Examples
#
# - Reciprocal or inverse transformations
#
# - Logarithmic
#
# - Square root transformation
#
# - Exponential
#
# - Box-Cox
#
# + id="42hbGwCeDd8-" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="a1e3967b-ce15-428e-d1ac-a2b0000f764a"
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# + id="Ds9gl_oFEATI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="c811684d-096f-45e2-e6de-194b61e48d0e"
from google.colab import drive
drive.mount('/content/gdrive')
data = pd.read_csv("gdrive/My Drive/Colab Notebooks/FeatureEngineering/train.csv")
# + id="rnhovydPdtY6" colab_type="code" colab={}
cats = ['Age', 'Fare', 'Survived']
# + id="m6VLaQQCOSDx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="8c3413e4-2929-4dab-fea8-c87b577a1958"
data = data[cats]
data.head()
# + id="Hxi8MZ7Eddw8" colab_type="code" colab={}
sns.set()
def distro(data, columns):
import scipy.stats as stats
for col in columns:
fig, ax = plt.subplots(1,2, figsize=(15,6))
stats.probplot(data[col].dropna(), dist="norm", plot=ax[0])
ax[0].set_title("QQPlot")
sns.distplot(data[col], ax=ax[1])
ax[1].set_title("Distribution")
# + id="rYiMvI5deHEy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f1cae339-1646-4a27-d3ed-ec4cd3ad31cd"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data[['Age', 'Fare']].fillna(data.mean()),
data['Survived'], test_size=0.2)
X_train.shape, X_test.shape
# + id="PErortzZeP6J" colab_type="code" colab={}
cols = cats[:-1]
# + id="3ubzBSFeeLNZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 803} outputId="78ff4719-6e53-4e7b-9ab2-d7de2ca34a38"
distro(X_train, cols)
# + id="2Lfa4rdAe95I" colab_type="code" colab={}
def boxcox(X_train, X_test, cols):
from scipy import stats
for col in cols:
X_train.loc[X_train[col]==0, col] = 0.0001
X_train[col],_ = stats.boxcox(X_train[col]+1)
X_test[col],_ = stats.boxcox(X_test[col]+1)
# + id="L94ROSZjgpxK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="2d7d891a-6ee0-481e-b1f6-90dc2bd8e975"
X_train.describe()
# + id="cEUwXezFhkC8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 294} outputId="2f694055-328c-4f52-9c56-101852cfb13d"
boxcox(X_train, X_test, ['Fare'])
X_train.describe()
# + id="dmkVgM4Yfw42" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 410} outputId="1de5bdee-6163-459f-ef8d-97a4613676ec"
distro(X_train, ['Fare'])
# + id="RRQO20xQhz--" colab_type="code" colab={}
| FeatureEngineering_DataScience/Demo200_NormalTransformations_BoxCox.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from utils import *
import importlib
import utils
importlib.reload(utils)
import os.path
from os import path
import pandas as pd
import utils_optimization as opt
importlib.reload(opt)
import data_association as da
importlib.reload(da)
import time
import numpy.linalg as LA
# +
# read & rectify each camera df individually
data_path = pathlib.Path().absolute().joinpath('../3D tracking')
tform_path = pathlib.Path().absolute().joinpath('../tform')
# -
names = ['p1c1','p1c3','p1c4','p1c5','p1c6']
for name in names:
file = 'record_51_'+name+'_00000_3D_track_outputs.csv'
camera = utils.find_camera_name(file)
file_path = data_path.joinpath(file)
df = utils.read_data(file_path)
df = utils.preprocess(file_path, tform_path, skip_row = 0)
df = utils.preprocess_data_association(df, file,tform_path)
df.to_csv(data_path.joinpath('DA/'+camera+'.csv'), index=False)
# df.to_csv('p1c2_pre.csv')
# df = df[df['Frame #']<1800]
import utils
importlib.reload(utils)
import utils_optimization as opt
importlib.reload(opt)
import data_association as da
importlib.reload(da)
df = utils.preprocess_data_association(df, file,tform_path)
df.to_csv(data_path.joinpath('DA/'+camera+'.csv'), index=False)
# +
# ... rectifying ...
camera = 'p1c6'
df = utils.read_data(data_path.joinpath('DA/'+camera+'.csv'))
df = opt.rectify(df)
# ... post processing ...
df = utils.post_process(df)
# ... saving ...
df.to_csv(data_path.joinpath('rectified/'+camera+'_noextend.csv'), index=False)
print('saved.')
# -
print('extending tracks to edges of the frame...')
# camera = 'p1c4'
df = utils.read_data(data_path.joinpath('rectified/'+camera+'_noextend.csv'))
xmin, xmax, ymin, ymax = utils.get_camera_range(camera)
maxFrame = max(df['Frame #'])
print(xmin, xmax)
args = (xmin, xmax, maxFrame)
tqdm.pandas()
# df = df.groupby('ID').apply(extend_prediction, args=args).reset_index(drop=True)
df = utils.applyParallel(df.groupby("ID"), utils.extend_prediction, args=args).reset_index(drop=True)
df.to_csv(data_path.joinpath('rectified/'+camera+'.csv'), index=False)
# +
# combine all modifyID dataframes into one and plot them all
names = ['p1c1','p1c2','p1c3','p1c5','p1c6']
li = []
for name in names:
df = utils.read_data(data_path.joinpath('rectified/modifyID/'+name+'.csv'))
# if name == 'p1c3': # keep the upper half
li.append(df)
dfall = pd.concat(li, axis=0, ignore_index=True)
# try keeping only one bbox per object per frame
dfall = utils.applyParallel(dfall.groupby("Frame #"), utils.del_repeat_meas_per_frame).reset_index(drop=True)
# +
# make an animation based on LMCS
import os
import glob
import importlib
import animation_utils as an
importlib.reload(an)
image_folder = '../FramePic'
camera = 'p1c2'
df= utils.read_data(data_path.joinpath('record_51_p1c2_00000_3D_track_outputs.csv'))
df = utils.img_to_road(df, tform_path, camera)
# df = df[(df['Frame #']>1800)&(df['Frame #']<3600)]
filelist = glob.glob(os.path.join(image_folder, "*"))
for f in filelist:
os.remove(f)
if len(df['camera'].dropna().unique())==1:
dim0 = utils.get_camera_range((df['camera'].dropna().unique()[0]))
else:
# dim0 = utils.get_camera_range('all')
dim0 = (0.0, 365, -5, 45)
print(dim0)
dim = [d * 3.281 for d in dim0] # convert meter to feet
an.generate_frames(df, dim, skip_frame=1, image_folder=image_folder)
# -
# Fetch image files from the folder, and create an animation.
importlib.reload(an)
video_name = '../'+camera+'_raw.mp4'
an.write_video(image_folder, video_name, fps=30)
# +
# visualize footprint on the camera video
import utils
importlib.reload(utils)
import plot_rectified_objects
importlib.reload(plot_rectified_objects)
video = str(data_path.joinpath('raw video/record_51_p1c4_00000.mp4'))
label_file = str(data_path.joinpath('rectified/p1c4.csv'))
plot_rectified_objects.plot_vehicle_csv(video,label_file, frame_rate = 0,show_2d = False,show_3d = True,show_LMCS = True,show_rectified = False, ds=True)
# -
# replace RAV4's IDs
# names = ['p1c1'],'p1c3','p1c2','p1c5','p1c6'
camera = 'p1c4'
df= utils.read_data(data_path.joinpath('rectified/'+camera+'.csv'))
print(len(df['ID'].unique()))
df["ID"].replace({535:9999, 549:9998, 589:9997, 622:9996,635:9995,656:9994, 706:9993, 713:9992,721:9991,725:9990,759:9989}, inplace=True)
print(len(df['ID'].unique()))
df.to_csv('../3D tracking/rectified/modifyID/'+camera+'.csv')
# +
import utils_optimization as opt
importlib.reload(opt)
dfda = utils.read_data(data_path.joinpath('DA/p1c2.csv'))
pre = dfda[dfda['ID']==238]
# pre = car.copy()
# post = df[df['ID']==240]
post = pre.copy()
lams = (1,0,0,0.0,0.0)
# lams = (1,0.2,0.2,0.05,0.02)
post = opt.rectify_single_camera(post, args = lams)
utils.plot_track_compare(pre,post)
# -
print(len(pre))
print(len(pre['Frame #'].unique()))
utils.plot_track_df(post)
import utils
importlib.reload(utils)
utils.dashboard([pre,post])
plt.scatter(pre['Frame #'].values, pre.fbr_x.values)
pre.direction
| ipynb_files/I24-vandertest-3D-tracking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Conditionals
# ## Testing truth value
# ### `bool`
print('type of True and False: {}'.format(type(True)))
print('0: {}, 1: {}'.format(bool(0), bool(1)))
print('empty list: {}, list with values: {}'.format(bool([]), bool(['woop'])))
print('empty dict: {}, dict with values: {}'.format(bool({}), bool({'Python': 'cool'})))
# ### `==, !=, >, <, >=, <=`
print('1 == 0: {}'.format(1 == 0))
print('1 != 0: {}'.format(1 != 0))
print('1 > 0: {}'.format(1 > 0))
print('1 > 1: {}'.format(1 > 1))
print('1 < 0: {}'.format(1 < 0))
print('1 < 1: {}'.format(1 < 1))
print('1 >= 0: {}'.format(1 >= 0))
print('1 >= 1: {}'.format(1 >= 1))
print('1 <= 0: {}'.format(1 <= 0))
print('1 <= 1: {}'.format(1 <= 1))
# You can combine these:
print('1 <= 2 <= 3: {}'.format(1 <= 2 <= 3))
# ### `and, or, not`
python_is_cool = True
java_is_cool = False
empty_list = []
secret_value = 3.14
print('Python and java are both cool: {}'.format(python_is_cool and java_is_cool))
print('secret_value and python_is_cool: {}'.format(secret_value and python_is_cool))
print('Python or java is cool: {}'.format(python_is_cool or java_is_cool))
print('1 >= 1.1 or 2 < float("1.4"): {}'.format(1 >= 1.1 or 2 < float('1.4')))
print('Java is not cool: {}'.format(not java_is_cool))
# You can combine multiple statements, execution order is from left to right. You can control the execution order by using brackets.
print(bool(not java_is_cool or secret_value and python_is_cool or empty_list))
print(bool(not (java_is_cool or secret_value and python_is_cool or empty_list)))
# ## `if`
# +
statement = True
if statement:
print('statement is True')
if not statement:
print('statement is not True')
# -
empty_list = []
# With if and elif, conversion to `bool` is implicit
if empty_list:
print('empty list will not evaluate to True') # this won't be executed
val = 3
if 0 <= val < 1 or val == 3:
print('Value is positive and less than one or value is three')
# ## `if-else`
my_dict = {}
if my_dict:
print('there is something in my dict')
else:
print('my dict is empty :(')
# ## `if-elif-else`
val = 88
if val >= 100:
print('value is equal or greater than 100')
elif val > 10:
print('value is greater than 10 but less than 100')
else:
print('value is equal or less than 10')
# You can have as many `elif` statements as you need. In addition, `else` at the end is not mandatory.
# +
greeting = 'Hello fellow Pythonista!'
language = 'Italian'
if language == 'Swedish':
greeting = 'Hejsan!'
elif language == 'Finnish':
greeting = 'Latua perkele!'
elif language == 'Spanish':
greeting = 'Hola!'
elif language == 'German':
greeting = '<NAME>!'
print(greeting)
# -
# For more detailed overview about conditionals, check this [tutorial from Real Python](https://realpython.com/python-conditional-statements/).
| intro_python/python_tutorials/basics_of_python/notebooks/beginner/notebooks/conditionals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
Participants = ['John', 'Leila', 'Maria', 'Dwayne', 'George', 'Catherine']
Participants
Participants[1:3]
Participants[:2]
Participants[4:]
Participants[-2:]
# ********
Maria_ind = Participants.index("Maria")
Maria_ind
# *********
Newcomers = ['Joshua', 'Brittany']
Newcomers
Bigger_List = [Participants, Newcomers]
Bigger_List
Participants.sort()
Participants
Participants.sort(reverse=True)
Participants
Numbers = [1,2,3,4,5]
Numbers.sort()
Numbers
Numbers.sort(reverse = True)
Numbers
| 11 - Introduction to Python/7_Sequences/3_List Slicing (4:30)/List Slicing - Lecture_Py3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/suhkisoo/course-v3/blob/master/Example%20of%20Book%20Revised.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="P0oi5nujci0T"
# # Download
# + id="4zwo857ncfYE"
# !pip install tensorflow
# + id="RYuqr8uocfCQ"
# !pip install keras
# + [markdown] id="iOmpBUDRbC6S"
# # Import
# + id="N2lVRPCXahtP"
# train a generative adversarial network on a one-dimensional function
from numpy import hstack
from numpy import zeros
from numpy import ones
from numpy.random import rand
from numpy.random import randn
from keras.models import Sequential
from keras.layers import Dense
from matplotlib import pyplot
# + [markdown] id="tD4tQj9KbIVb"
# # Define
# + id="o_a1LDh6ah8X"
# define the standalone discriminator model
def define_discriminator(n_inputs=2):
# class Sequential(Model) = Linear stack of layers
# The `Model` class adds training & evaluation routines to a `Network`.
# class Model(Network): add(self, layer): Adds a layer instance on top of the layer stack.
model = Sequential() # model is an object of class Sequential
model.add(Dense(25, activation='relu', kernel_initializer='he_uniform', input_dim=n_inputs))
# n_inputs = 2; n_output=25
model.add(Dense(1, activation='sigmoid'))
# n_input = 25 = n_output of the previous layer; n_output =1 ( its value o or 1)
# compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# model.loss = loss ="binary_crossentropy"
return model # model is a reference to the current instance of the class
# + id="rRWZti8GaiDd"
# define the standalone generator model
def define_generator(latent_dim, n_outputs=2): # latent_dim =5
model = Sequential()
model.add(Dense(15, activation='relu', kernel_initializer='he_uniform',
input_dim=latent_dim)) # n_input=5 = latent_dim; n_output=15
model.add(Dense(n_outputs, activation='linear')) # n_input = 15 = n_output of the previous layer; n_output = 2
return model
# + id="FjWtTm7kaiGW"
# define the combined generator and discriminator model, for updating the generator
def define_gan(generator, discriminator):
# make weights in the discriminator not trainable
discriminator.trainable = False # discriminator is set as not trainable when it is part of the composite model
# But it is trainable when it is used alone
# connect them
model = Sequential()
# add generator
model.add(generator)
# add the discriminator
model.add(discriminator)
# compile model
model.compile(loss='binary_crossentropy', optimizer='adam')
# model.loss = loss ="binary_crossentropy"
return model
# + id="XQ6WeZGraiJb"
# Making the discriminator not trainable is a clever trick in the Keras API. The trainable
# property impacts the model when it is compiled. The discriminator model was compiled with
# trainable layers, therefore the model weights in those layers will be updated when the standalone
# model is updated via calls to train on batch().
# + id="0fhZcsQdaiM9"
# generate n real samples with class labels
def generate_real_samples(n):
# generate inputs in [-0.5, 0.5]
X1 = rand(n) - 0.5
# generate outputs X^2
X2 = X1 * X1
# stack arrays
X1 = X1.reshape(n, 1)
X2 = X2.reshape(n, 1)
X = hstack((X1, X2)) # X = hstack( [1,2], [3,4] ) ==>[ [1,3],[2,4] ] : 128 points
# generate class labels
y = ones((n, 1)) # y = 128 labels
return X, y # # A pair of 128 real samples and their 128 labels
# + id="HOxu8xiDayBE"
# generate points in latent space as input for the generator
def generate_latent_points(latent_dim, n):
# generate points in the latent space
x_input = randn(latent_dim * n) # [01, 02, 0.9,...., 0,1]
# reshape into a batch of inputs for the network
x_input = x_input.reshape(n, latent_dim) # 128 * 5 matrix
return x_input
# + id="BdcFBdJTayIq"
# use the generator to generate n fake examples, with class labels
def generate_fake_samples(generator, latent_dim, n):
# generate points in latent space
x_input = generate_latent_points(latent_dim, n) # 128 x 5: 128 samples of 5 random numbers
# predict outputs
X = generator.predict(x_input) # X = 128 generator outputs for 128 samples of 5 numbers
# create class labels
y = zeros((n, 1)) # y = 128 labels
return X, y # A pair of 128 fake samples and their 128 labels
# + id="725wIROIa2lk"
# evaluate the discriminator and plot real and fake points
def summarize_performance(epoch, generator, discriminator, latent_dim, n=100):
# prepare real samples
x_real, y_real = generate_real_samples(n) # (x_real, y_real): A pair of 128 real samples and their 128 labels
# evaluate discriminator on real examples
_, acc_real = discriminator.evaluate(x_real, y_real,
verbose=0) # acc_real = THe accuray of the discriminator net that tells "real" for real samples
# prepare fake examples
x_fake, y_fake = generate_fake_samples(generator, latent_dim,
n) # (x_fake, y_fake): # A pair of 128 fake samples and their 128 labels
# evaluate discriminator on fake examples
_, acc_fake = discriminator.evaluate(x_fake, y_fake,
verbose=0) # acc_fake = The accuray of the discriminator net that tells "fake" for fake samples
# summarize discriminator performance
print(epoch, acc_real, acc_fake) # acc_real = accuray of the discriminator net that says "real" for real samples
# scatter plot real and fake data points
pyplot.scatter(x_real[:, 0], x_real[:, 1], color='red')
pyplot.scatter(x_fake[:, 0], x_fake[:, 1], color='blue')
# save plot to file
filename = 'generated_plot_e%03d.png' % (epoch + 1)
pyplot.savefig(filename)
pyplot.close()
# + id="shjRYbYFa2rp"
# train the generator and discriminator
def train(g_model, d_model, gan_model, latent_dim, n_epochs=10000, n_batch=128, n_eval=2000):
# determine half the size of one batch, for updating the discriminator
half_batch = int(n_batch / 2)
# manually enumerate epochs
for i in range(n_epochs):
# prepare real samples
x_real, y_real = generate_real_samples(half_batch)
# prepare fake examples
x_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)
# update discriminator
# "Runs a single gradient update on a single batch of data":
d_model.train_on_batch(x_real, y_real) # train the discriminator using real samples
d_model.train_on_batch(x_fake,
y_fake) # train the discriminator using the fake samples generated by the current generator net.
# prepare points in latent space as input for the generator
x_gan = generate_latent_points(latent_dim,
n_batch) # x_gan = 128 x 5 matrix \\> This means 128 samples of 5 random numbers
# create inverted labels for the fake samples
y_gan = ones((n_batch, 1)) # 128 1's
# update the generator via the discriminator's error
gan_model.train_on_batch(x_gan,
y_gan) # train the generator (g_model) with the discriminator frozen: x_gan=input, y_gan=target=label
# evaluate the model every n_eval epochs
if (i + 1) % n_eval == 0:
summarize_performance(i, g_model, d_model, latent_dim)
# + [markdown] id="LPKqrK8sbPOT"
# # Main Code
# + id="I2Ob3t6na8VB" outputId="14dfaf50-eed8-4bac-be30-e700a81a33df" colab={"base_uri": "https://localhost:8080/"}
# size of the latent space
latent_dim = 5
# create the discriminator
discriminator = define_discriminator()
# discriminator is a reference to the instance of the Sequential class
# discriminator defines the loss function and the optimization method
# create the generator
generator = define_generator(latent_dim) # generator does not define the loss function and the optimization method
# create the gan
gan_model = define_gan(generator, discriminator)
# train model
train(generator, discriminator, gan_model, latent_dim)
# train the discriminator on real samples and the fake samples generated by the current generator net
# Then, train the generator with the discriminator set frozen (not trainable)
| Example of Book Revised.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv("diabetes.csv")
dataset.head(5)
dataset.shape # number of records
dataset.describe()
dataset.corr() # if there is a 1-1 correlation, this is redundant
features = dataset.drop(["Outcome"], axis=1)
labels = dataset["Outcome"]
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.25)
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier()
classifier.fit(features_train, labels_train)
pred = classifier.predict(features_test)
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(labels_test, pred)
print(format(accuracy))
| snippets/ML Sketches/knear_diabetes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="XioqwXsDe3_T"
# ####In this document we will see how to calculate the FLOPs for KERAS model
# + id="cQBckzYjexT2"
#Install package
#Requirements Python 3.6+ & Tensorflow 2.2+
# !pip install keras-flops
# + id="TKwl0X2QfQGT"
#import packages
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPooling2D, Dropout, Conv2DTranspose, BatchNormalization, Activation
# + id="qH3vsdmgfWgc"
inp = Input((512, 512, 1))
x = BatchNormalization(input_shape=(512, 512, 1),name='input0',)(inp)
x = Conv2D(8, (3, 3), input_shape=(512, 512, 1), padding='same',strides=(1, 1), activation='relu')(x)
x = Activation('relu')(x)
x = Conv2D(16, (3, 3), input_shape=(512, 512, 1), padding='same',strides=(1, 1), activation='relu')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(32, (3, 3), input_shape=(512, 512, 1), padding='same',strides=(1, 1), activation='relu')(x)
x = Activation('relu')(x)
x = Conv2DTranspose(16, (5, 5), input_shape=(512, 512, 1), padding='same',strides=(1, 1), activation='relu')(x)
x = Activation('relu')(x)
out = Conv2DTranspose(1, (5, 5), input_shape=(512, 512, 1), padding='same',strides=(1, 1), activation='relu',name='output0')(x)
model = Model(inp, out)
# + colab={"base_uri": "https://localhost:8080/"} id="et2o6tX1fnwH" outputId="b513738e-c2d1-4a0b-a5ff-554b2d3ee451"
model.summary()
# + [markdown] id="L3iSiNn7ibf4"
# ####Note : 1 MACC = 2 Flops (1 Mutlitiplication + 1 Addition)
# + colab={"base_uri": "https://localhost:8080/"} id="j8yjbw7afqPp" outputId="b84a3458-6c2a-4a52-d182-0257556e2006"
from keras_flops import get_flops
flops = get_flops(model, batch_size=1)
print(f"FLOPs: {flops / 10 ** 9:.7} GFlops or BFlops")
print(f"MACCs: {0.5*flops / 10 ** 9:.7} MACCs")
# + [markdown] id="EoO-xNculFfZ"
# #Tried and not working properly
# + [markdown] id="uM-42rFrltmZ"
# ##### All the below get_flops function was not working fo the custom developed model shown above
#
#
#
# + id="Kn-RRKb_f8CK"
def get_flops(model_h5_path):
session = tf.compat.v1.Session()
graph = tf.compat.v1.get_default_graph()
with graph.as_default():
with session.as_default():
model = tf.keras.models.load_model(model_h5_path)
run_meta = tf.compat.v1.RunMetadata()
opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()
# Optional: save printed results to file
# flops_log_path = os.path.join(tempfile.gettempdir(), 'tf_flops_log.txt')
# opts['output'] = 'file:outfile={}'.format(flops_log_path)
# We use the Keras session graph in the call to the profiler.
flops = tf.compat.v1.profiler.profile(graph=graph,
run_meta=run_meta, cmd='op', options=opts)
return flops.total_float_ops
# + id="JJqIdjd9lQqg"
def get_flops2():
session = tf.compat.v1.Session()
graph = tf.compat.v1.get_default_graph()
with graph.as_default():
with session.as_default():
#model = tf.keras.applications.MobileNet(
# alpha=1, weights=None, input_tensor=tf.compat.v1.placeholder('float32', shape=(1, 224, 224, 3)))
model = model1
run_meta = tf.compat.v1.RunMetadata()
opts = tf.compat.v1.profiler.ProfileOptionBuilder.float_operation()
# Optional: save printed results to file
# flops_log_path = os.path.join(tempfile.gettempdir(), 'tf_flops_log.txt')
# opts['output'] = 'file:outfile={}'.format(flops_log_path)
# We use the Keras session graph in the call to the profiler.
flops = tf.compat.v1.profiler.profile(graph=graph,
run_meta=run_meta, cmd='op', options=opts)
tf.compat.v1.reset_default_graph()
return flops.total_float_ops
# + id="cCDDoCh5lSZK"
# This code is for older version of tensorflow
import tensorflow as tf
import keras.backend as K
def get_flops_0():
run_meta = tf.RunMetadata()
opts = tf.profiler.ProfileOptionBuilder.float_operation()
# We use the Keras session graph in the call to the profiler.
flops = tf.profiler.profile(graph=K.get_session().graph,
run_meta=run_meta, cmd='op', options=opts)
return flops.total_float_ops # Prints the "flops" of the model.
# .... Define your model here ....
# You need to have compiled your model before calling this.
print(get_flops_0())
# + id="PlDCuzW1lbWN"
| Calculate_FLOPs_MACCs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.023137, "end_time": "2021-02-12T00:03:08.989368", "exception": false, "start_time": "2021-02-12T00:03:08.966231", "status": "completed"} tags=[]
# ## Dependencies
# + _kg_hide-input=true _kg_hide-output=true papermill={"duration": 9.835954, "end_time": "2021-02-12T00:03:18.847797", "exception": false, "start_time": "2021-02-12T00:03:09.011843", "status": "completed"} tags=[]
# !pip install --quiet efficientnet
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _kg_hide-input=true _kg_hide-output=true _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 13.345713, "end_time": "2021-02-12T00:03:32.218850", "exception": false, "start_time": "2021-02-12T00:03:18.873137", "status": "completed"} tags=[]
import warnings, time
from kaggle_datasets import KaggleDatasets
from sklearn.model_selection import KFold
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from tensorflow.keras import optimizers, Sequential, losses, metrics, Model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import efficientnet.tfkeras as efn
from cassava_scripts import *
from scripts_step_lr_schedulers import *
import tensorflow_addons as tfa
seed = 0
seed_everything(seed)
warnings.filterwarnings('ignore')
# + [markdown] papermill={"duration": 0.022105, "end_time": "2021-02-12T00:03:32.263240", "exception": false, "start_time": "2021-02-12T00:03:32.241135", "status": "completed"} tags=[]
# ### Hardware configuration
# + _kg_hide-input=true papermill={"duration": 5.877159, "end_time": "2021-02-12T00:03:38.164157", "exception": false, "start_time": "2021-02-12T00:03:32.286998", "status": "completed"} tags=[]
# TPU or GPU detection
# Detect hardware, return appropriate distribution strategy
strategy, tpu = set_up_strategy()
AUTO = tf.data.experimental.AUTOTUNE
REPLICAS = strategy.num_replicas_in_sync
print(f'REPLICAS: {REPLICAS}')
# + [markdown] papermill={"duration": 0.024513, "end_time": "2021-02-12T00:03:38.212525", "exception": false, "start_time": "2021-02-12T00:03:38.188012", "status": "completed"} tags=[]
# # Model parameters
# + papermill={"duration": 0.033723, "end_time": "2021-02-12T00:03:38.267915", "exception": false, "start_time": "2021-02-12T00:03:38.234192", "status": "completed"} tags=[]
BATCH_SIZE = 8 * REPLICAS
BATCH_SIZE_CL = 8 * REPLICAS
LEARNING_RATE = 1e-5 * REPLICAS
LEARNING_RATE_CL = 1e-5 * REPLICAS
EPOCHS_CL = 15
EPOCHS = 20
HEIGHT = 512
WIDTH = 512
HEIGHT_DT = 512
WIDTH_DT = 512
CHANNELS = 3
N_CLASSES = 5
N_FOLDS = 5
FOLDS_USED = 1
ES_PATIENCE = 5
# + [markdown] papermill={"duration": 0.025041, "end_time": "2021-02-12T00:03:38.314869", "exception": false, "start_time": "2021-02-12T00:03:38.289828", "status": "completed"} tags=[]
# # Load data
# + _kg_hide-input=true papermill={"duration": 0.627925, "end_time": "2021-02-12T00:03:38.967063", "exception": false, "start_time": "2021-02-12T00:03:38.339138", "status": "completed"} tags=[]
database_base_path = '/kaggle/input/cassava-leaf-disease-classification/'
train = pd.read_csv(f'{database_base_path}train.csv')
print(f'Train samples: {len(train)}')
GCS_PATH = KaggleDatasets().get_gcs_path(f'cassava-leaf-disease-tfrecords-center-{HEIGHT_DT}x{WIDTH_DT}') # Center croped and resized (50 TFRecord)
# GCS_PATH_EXT = KaggleDatasets().get_gcs_path(f'cassava-leaf-disease-tfrecords-external-{HEIGHT_DT}x{WIDTH_DT}') # Center croped and resized (50 TFRecord) (External)
# GCS_PATH_CLASSES = KaggleDatasets().get_gcs_path(f'cassava-leaf-disease-tfrecords-classes-{HEIGHT_DT}x{WIDTH_DT}') # Center croped and resized (50 TFRecord) by classes
# GCS_PATH_EXT_CLASSES = KaggleDatasets().get_gcs_path(f'cassava-leaf-disease-tfrecords-classes-ext-{HEIGHT_DT}x{WIDTH_DT}') # Center croped and resized (50 TFRecord) (External) by classes
FILENAMES_COMP = tf.io.gfile.glob(GCS_PATH + '/*.tfrec')
# FILENAMES_2019 = tf.io.gfile.glob(GCS_PATH_EXT + '/*.tfrec')
# FILENAMES_COMP_CBB = tf.io.gfile.glob(GCS_PATH_CLASSES + '/CBB*.tfrec')
# FILENAMES_COMP_CBSD = tf.io.gfile.glob(GCS_PATH_CLASSES + '/CBSD*.tfrec')
# FILENAMES_COMP_CGM = tf.io.gfile.glob(GCS_PATH_CLASSES + '/CGM*.tfrec')
# FILENAMES_COMP_CMD = tf.io.gfile.glob(GCS_PATH_CLASSES + '/CMD*.tfrec')
# FILENAMES_COMP_Healthy = tf.io.gfile.glob(GCS_PATH_CLASSES + '/Healthy*.tfrec')
# FILENAMES_2019_CBB = tf.io.gfile.glob(GCS_PATH_EXT_CLASSES + '/CBB*.tfrec')
# FILENAMES_2019_CBSD = tf.io.gfile.glob(GCS_PATH_EXT_CLASSES + '/CBSD*.tfrec')
# FILENAMES_2019_CGM = tf.io.gfile.glob(GCS_PATH_EXT_CLASSES + '/CGM*.tfrec')
# FILENAMES_2019_CMD = tf.io.gfile.glob(GCS_PATH_EXT_CLASSES + '/CMD*.tfrec')
# FILENAMES_2019_Healthy = tf.io.gfile.glob(GCS_PATH_EXT_CLASSES + '/Healthy*.tfrec')
TRAINING_FILENAMES = FILENAMES_COMP
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES)
print(f'GCS: train images: {NUM_TRAINING_IMAGES}')
display(train.head())
# + [markdown] papermill={"duration": 0.035478, "end_time": "2021-02-12T00:03:39.029860", "exception": false, "start_time": "2021-02-12T00:03:38.994382", "status": "completed"} tags=[]
# # Augmentation
# + papermill={"duration": 0.052252, "end_time": "2021-02-12T00:03:39.106326", "exception": false, "start_time": "2021-02-12T00:03:39.054074", "status": "completed"} tags=[]
def data_augment(image, label):
# p_rotation = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_spatial = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_rotate = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_pixel_1 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_pixel_2 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_pixel_3 = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# p_shear = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_crop = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
p_cutout = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
# # Shear
# if p_shear > .2:
# if p_shear > .6:
# image = transform_shear(image, HEIGHT, shear=20.)
# else:
# image = transform_shear(image, HEIGHT, shear=-20.)
# # Rotation
# if p_rotation > .2:
# if p_rotation > .6:
# image = transform_rotation(image, HEIGHT, rotation=45.)
# else:
# image = transform_rotation(image, HEIGHT, rotation=-45.)
# Flips
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if p_spatial > .75:
image = tf.image.transpose(image)
# Rotates
if p_rotate > .75:
image = tf.image.rot90(image, k=3) # rotate 270º
elif p_rotate > .5:
image = tf.image.rot90(image, k=2) # rotate 180º
elif p_rotate > .25:
image = tf.image.rot90(image, k=1) # rotate 90º
# Pixel-level transforms
if p_pixel_1 >= .4:
image = tf.image.random_saturation(image, lower=.7, upper=1.3)
if p_pixel_2 >= .4:
image = tf.image.random_contrast(image, lower=.8, upper=1.2)
if p_pixel_3 >= .4:
image = tf.image.random_brightness(image, max_delta=.1)
# Crops
if p_crop > .6:
if p_crop > .9:
image = tf.image.central_crop(image, central_fraction=.5)
elif p_crop > .8:
image = tf.image.central_crop(image, central_fraction=.6)
elif p_crop > .7:
image = tf.image.central_crop(image, central_fraction=.7)
else:
image = tf.image.central_crop(image, central_fraction=.8)
elif p_crop > .3:
crop_size = tf.random.uniform([], int(HEIGHT*.6), HEIGHT, dtype=tf.int32)
image = tf.image.random_crop(image, size=[crop_size, crop_size, CHANNELS])
image = tf.image.resize(image, size=[HEIGHT, WIDTH])
if p_cutout > .5:
image = data_augment_cutout(image)
return image, label
# + [markdown] papermill={"duration": 0.026172, "end_time": "2021-02-12T00:03:39.161538", "exception": false, "start_time": "2021-02-12T00:03:39.135366", "status": "completed"} tags=[]
# ## Auxiliary functions
# + _kg_hide-input=true papermill={"duration": 0.051197, "end_time": "2021-02-12T00:03:39.245488", "exception": false, "start_time": "2021-02-12T00:03:39.194291", "status": "completed"} tags=[]
# CutOut
def data_augment_cutout(image, min_mask_size=(int(HEIGHT * .1), int(HEIGHT * .1)),
max_mask_size=(int(HEIGHT * .125), int(HEIGHT * .125))):
p_cutout = tf.random.uniform([], 0, 1.0, dtype=tf.float32)
if p_cutout > .85: # 10~15 cut outs
n_cutout = tf.random.uniform([], 10, 15, dtype=tf.int32)
image = random_cutout(image, HEIGHT, WIDTH,
min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout)
elif p_cutout > .6: # 5~10 cut outs
n_cutout = tf.random.uniform([], 5, 10, dtype=tf.int32)
image = random_cutout(image, HEIGHT, WIDTH,
min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout)
elif p_cutout > .25: # 2~5 cut outs
n_cutout = tf.random.uniform([], 2, 5, dtype=tf.int32)
image = random_cutout(image, HEIGHT, WIDTH,
min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=n_cutout)
else: # 1 cut out
image = random_cutout(image, HEIGHT, WIDTH,
min_mask_size=min_mask_size, max_mask_size=max_mask_size, k=1)
return image
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _kg_hide-input=true _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 0.071811, "end_time": "2021-02-12T00:03:39.348403", "exception": false, "start_time": "2021-02-12T00:03:39.276592", "status": "completed"} tags=[]
# Datasets utility functions
def random_crop(image, label):
"""
Resize and reshape images to the expected size.
"""
image = tf.image.random_crop(image, size=[HEIGHT, WIDTH, CHANNELS])
return image, label
def prepare_image(image, label):
"""
Resize and reshape images to the expected size.
"""
image = tf.image.resize(image, [HEIGHT, WIDTH])
image = tf.reshape(image, [HEIGHT, WIDTH, CHANNELS])
return image, label
def center_crop_(image, label, height_rs, width_rs, height=HEIGHT_DT, width=WIDTH_DT, channels=3):
image = tf.reshape(image, [height, width, channels]) # Original shape
h, w = image.shape[0], image.shape[1]
if h > w:
image = tf.image.crop_to_bounding_box(image, (h - w) // 2, 0, w, w)
else:
image = tf.image.crop_to_bounding_box(image, 0, (w - h) // 2, h, h)
image = tf.image.resize(image, [height_rs, width_rs]) # Expected shape
return image, label
def read_tfrecord_(example, labeled=True, sparse=True, n_classes=5):
"""
1. Parse data based on the 'TFREC_FORMAT' map.
2. Decode image.
3. If 'labeled' returns (image, label) if not (image, name).
"""
if labeled:
TFREC_FORMAT = {
'image': tf.io.FixedLenFeature([], tf.string),
'target': tf.io.FixedLenFeature([], tf.int64),
}
else:
TFREC_FORMAT = {
'image': tf.io.FixedLenFeature([], tf.string),
'image_name': tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_single_example(example, TFREC_FORMAT)
image = decode_image(example['image'])
if labeled:
label_or_name = tf.cast(example['target'], tf.int32)
if not sparse: # One-Hot Encoding needed to use "categorical_crossentropy" loss
label_or_name = tf.one_hot(tf.cast(label_or_name, tf.int32), n_classes)
else:
label_or_name = example['image_name']
return image, label_or_name
def get_dataset(filenames, labeled=True, ordered=False, repeated=False,
cached=False, augment=False, batch_size=BATCH_SIZE, sparse=True):
"""
Return a Tensorflow dataset ready for training or inference.
"""
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.Dataset.list_files(filenames)
dataset = dataset.interleave(tf.data.TFRecordDataset, num_parallel_calls=AUTO)
else:
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTO)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(lambda x: read_tfrecord_(x, labeled=labeled, sparse=sparse), num_parallel_calls=AUTO)
if augment:
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.map(scale_image, num_parallel_calls=AUTO)
dataset = dataset.map(prepare_image, num_parallel_calls=AUTO)
if labeled:
dataset = dataset.map(lambda x, y: conf_output(x, y, sparse=sparse), num_parallel_calls=AUTO)
if not ordered:
dataset = dataset.shuffle(2048)
if repeated:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size)
if cached:
dataset = dataset.cache()
dataset = dataset.prefetch(AUTO)
return dataset
def conf_output(image, label, sparse=False):
"""
Configure the output of the dataset.
"""
aux_label = [0.]
aux_2_label = [0.]
if sparse:
if label == 4: # Healthy
aux_label = [1.]
if label == 3: # CMD
aux_2_label = [1.]
else:
if tf.math.argmax(label, axis=-1) == 4: # Healthy
aux_label = [1.]
if tf.math.argmax(label, axis=-1) == 3: # CMD
aux_2_label = [1.]
return (image, (label, aux_label, aux_2_label))
# + [markdown] papermill={"duration": 0.023362, "end_time": "2021-02-12T00:03:39.396005", "exception": false, "start_time": "2021-02-12T00:03:39.372643", "status": "completed"} tags=[]
# # Training data samples (with augmentation)
# + _kg_hide-input=true papermill={"duration": 0.031354, "end_time": "2021-02-12T00:03:39.450695", "exception": false, "start_time": "2021-02-12T00:03:39.419341", "status": "completed"} tags=[]
# train_dataset = get_dataset(FILENAMES_COMP, ordered=True, augment=True)
# train_iter = iter(train_dataset.unbatch().batch(20))
# display_batch_of_images(next(train_iter))
# display_batch_of_images(next(train_iter))
# + [markdown] papermill={"duration": 0.024839, "end_time": "2021-02-12T00:03:39.498891", "exception": false, "start_time": "2021-02-12T00:03:39.474052", "status": "completed"} tags=[]
# # Model
# + papermill={"duration": 0.046823, "end_time": "2021-02-12T00:03:39.569257", "exception": false, "start_time": "2021-02-12T00:03:39.522434", "status": "completed"} tags=[]
class UnitNormLayer(L.Layer):
"""
Normalize vectors (euclidean norm) in batch to unit hypersphere.
"""
def __init__(self, **kwargs):
super(UnitNormLayer, self).__init__(**kwargs)
def call(self, input_tensor):
norm = tf.norm(input_tensor, axis=1)
return input_tensor / tf.reshape(norm, [-1, 1])
def encoder_fn(input_shape):
inputs = L.Input(shape=input_shape, name='input_image')
base_model = efn.EfficientNetB3(input_tensor=inputs,
include_top=False,
weights='imagenet',
pooling='avg')
norm_embeddings = UnitNormLayer()(base_model.output)
model = Model(inputs=inputs, outputs=norm_embeddings)
return model
def add_projection_head(input_shape, encoder, num_projection_layers=1):
inputs = L.Input(shape=input_shape, name='input_image')
features = encoder(inputs)
outputs = L.Dense(128, activation='relu', name='projection_head')(features)
outputs = UnitNormLayer(name='output')(outputs)
output_healthy = L.Dense(128, activation='relu', name='projection_head_aux1')(features)
output_healthy = UnitNormLayer(name='output_healthy')(output_healthy)
output_cmd = L.Dense(128, activation='relu', name='projection_head_aux2')(features)
output_cmd = UnitNormLayer(name='output_cmd')(output_cmd)
model = Model(inputs=inputs, outputs=[outputs, output_healthy, output_cmd])
return model
def classifier_fn(input_shape, N_CLASSES, encoder, trainable=True):
for layer in encoder.layers:
layer.trainable = trainable
unfreeze_model(encoder) # unfreeze all layers except "batch normalization"
inputs = L.Input(shape=input_shape, name='input_image')
features = encoder(inputs)
features = L.Dropout(.5)(features)
features = L.Dense(512, activation='relu')(features)
features = L.Dropout(.5)(features)
output = L.Dense(N_CLASSES, activation='softmax', name='output')(features)
output_healthy = L.Dense(1, activation='sigmoid', name='output_healthy')(features)
output_cmd = L.Dense(1, activation='sigmoid', name='output_cmd')(features)
model = Model(inputs=inputs, outputs=[output, output_healthy, output_cmd])
return model
# + papermill={"duration": 0.038799, "end_time": "2021-02-12T00:03:39.633840", "exception": false, "start_time": "2021-02-12T00:03:39.595041", "status": "completed"} tags=[]
temperature = 0.1
class SupervisedContrastiveLoss(losses.Loss):
def __init__(self, temperature=0.1, name=None):
super(SupervisedContrastiveLoss, self).__init__(name=name)
self.temperature = temperature
def __call__(self, labels, feature_vectors, sample_weight=None):
# Normalize feature vectors
feature_vectors_normalized = tf.math.l2_normalize(feature_vectors, axis=1)
# Compute logits
logits = tf.divide(
tf.matmul(
feature_vectors_normalized, tf.transpose(feature_vectors_normalized)
),
temperature,
)
return tfa.losses.npairs_loss(tf.squeeze(labels), logits)
# + [markdown] papermill={"duration": 0.023603, "end_time": "2021-02-12T00:03:39.682059", "exception": false, "start_time": "2021-02-12T00:03:39.658456", "status": "completed"} tags=[]
# ### Learning rate schedule
# + _kg_hide-input=true papermill={"duration": 38.267957, "end_time": "2021-02-12T00:04:17.975607", "exception": false, "start_time": "2021-02-12T00:03:39.707650", "status": "completed"} tags=[]
lr_start = 1e-8
lr_min = 1e-6
lr_max = LEARNING_RATE
num_cycles = 1
warmup_epochs = 3
hold_max_epochs = 0
total_epochs = EPOCHS
step_size = (NUM_TRAINING_IMAGES//BATCH_SIZE)
hold_max_steps = hold_max_epochs * step_size
total_steps = total_epochs * step_size
warmup_steps = warmup_epochs * step_size
def lrfn(total_steps, warmup_steps=0, lr_start=1e-4, lr_max=1e-3, lr_min=1e-4, num_cycles=1.):
@tf.function
def cosine_with_hard_restarts_schedule_with_warmup_(step):
""" Create a schedule with a learning rate that decreases following the
values of the cosine function with several hard restarts, after a warmup
period during which it increases linearly between 0 and 1.
"""
if step < warmup_steps:
lr = (lr_max - lr_start) / warmup_steps * step + lr_start
else:
progress = (step - warmup_steps) / (total_steps - warmup_steps)
lr = lr_max * (0.5 * (1.0 + tf.math.cos(np.pi * ((num_cycles * progress) % 1.0))))
if lr_min is not None:
lr = tf.math.maximum(lr_min, float(lr))
return lr
return cosine_with_hard_restarts_schedule_with_warmup_
lrfn_fn = lrfn(total_steps, warmup_steps, lr_start, lr_max, lr_min, num_cycles)
rng = [i for i in range(total_steps)]
y = [lrfn_fn(tf.cast(x, tf.float32)) for x in rng]
sns.set(style='whitegrid')
fig, ax = plt.subplots(figsize=(20, 6))
plt.plot(rng, y)
print(f'{total_steps} total steps and {step_size} steps per epoch')
print(f'Learning rate schedule: {y[0]:.3g} to {max(y):.3g} to {y[-1]:.3g}')
# + _kg_hide-input=true papermill={"duration": 28.376492, "end_time": "2021-02-12T00:04:46.381140", "exception": false, "start_time": "2021-02-12T00:04:18.004648", "status": "completed"} tags=[]
print('Classifier Learning rate scheduler')
step_size_cl = (NUM_TRAINING_IMAGES//BATCH_SIZE_CL)
total_steps_cl = (EPOCHS_CL * step_size_cl)
warmup_steps_cl = warmup_epochs * step_size_cl
num_cycles_cl = 1
lrfn_fn = lrfn(total_steps_cl, warmup_steps_cl, lr_start, LEARNING_RATE_CL, lr_min, num_cycles_cl)
rng = [i for i in range(total_steps_cl)]
y = [lrfn_fn(tf.cast(x, tf.float32)) for x in rng]
sns.set(style='whitegrid')
fig, ax = plt.subplots(figsize=(20, 6))
plt.plot(rng, y)
print(f'{total_steps_cl} total steps and {step_size_cl} steps per epoch')
print(f'Learning rate schedule: {y[0]:.3g} to {max(y):.3g} to {y[-1]:.3g}')
# + [markdown] papermill={"duration": 0.029533, "end_time": "2021-02-12T00:04:46.440199", "exception": false, "start_time": "2021-02-12T00:04:46.410666", "status": "completed"} tags=[]
# # Training
# + _kg_hide-input=true _kg_hide-output=true papermill={"duration": 1843.460881, "end_time": "2021-02-12T00:35:29.933422", "exception": false, "start_time": "2021-02-12T00:04:46.472541", "status": "completed"} tags=[]
skf = KFold(n_splits=N_FOLDS, shuffle=True, random_state=seed)
oof_pred = []; oof_labels = []; oof_names = []; oof_folds = []; history_list = []; oof_embed = []
for fold,(idxT, idxV) in enumerate(skf.split(np.arange(15))):
if fold >= FOLDS_USED:
break
if tpu: tf.tpu.experimental.initialize_tpu_system(tpu)
K.clear_session()
print(f'\nFOLD: {fold+1}')
print(f'TRAIN: {idxT} VALID: {idxV}')
# Create train and validation sets
TRAIN_FILENAMES = tf.io.gfile.glob([GCS_PATH + '/Id_train%.2i*.tfrec' % x for x in idxT])
# FILENAMES_COMP_CBB = tf.io.gfile.glob([GCS_PATH_CLASSES + '/CBB%.2i*.tfrec' % x for x in idxT])
# FILENAMES_COMP_CBSD = tf.io.gfile.glob([GCS_PATH_CLASSES + '/CBSD%.2i*.tfrec' % x for x in idxT])
# FILENAMES_COMP_CGM = tf.io.gfile.glob([GCS_PATH_CLASSES + '/CGM%.2i*.tfrec' % x for x in idxT])
# FILENAMES_COMP_Healthy = tf.io.gfile.glob([GCS_PATH_CLASSES + '/Healthy%.2i*.tfrec' % x for x in idxT])
VALID_FILENAMES = tf.io.gfile.glob([GCS_PATH + '/Id_train%.2i*.tfrec' % x for x in idxV])
np.random.shuffle(TRAIN_FILENAMES)
ct_train = count_data_items(TRAIN_FILENAMES)
ct_valid = count_data_items(VALID_FILENAMES)
step_size = (ct_train // BATCH_SIZE)
warmup_steps = (warmup_epochs * step_size)
total_steps = (total_epochs * step_size)
### Pre-train the encoder
print('Pre-training the encoder using "Supervised Contrastive" Loss')
with strategy.scope():
encoder = encoder_fn((None, None, CHANNELS))
unfreeze_model(encoder) # unfreeze all layers except "batch normalization"
encoder_proj = add_projection_head((None, None, CHANNELS), encoder)
encoder_proj.summary()
lrfn_fn = lrfn(total_steps, warmup_steps, lr_start, LEARNING_RATE, lr_min, num_cycles)
optimizer = optimizers.Adam(learning_rate=lambda: lrfn_fn(tf.cast(optimizer.iterations, tf.float32)))
encoder_proj.compile(optimizer=optimizer,
loss={'output': SupervisedContrastiveLoss(temperature),
'output_healthy': SupervisedContrastiveLoss(temperature),
'output_cmd': SupervisedContrastiveLoss(temperature)},
loss_weights={'output': 1.,
'output_healthy': .1,
'output_cmd': .1})
history_enc = encoder_proj.fit(x=get_dataset(TRAIN_FILENAMES, repeated=True),
# validation_data=get_dataset(VALID_FILENAMES, ordered=True),
steps_per_epoch=step_size,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
verbose=2).history
### Train the classifier with the frozen encoder
print('Training the classifier with the frozen encoder')
step_size_cl = (ct_train // BATCH_SIZE_CL)
total_steps_cl = (EPOCHS_CL * step_size_cl)
with strategy.scope():
model = classifier_fn((None, None, CHANNELS), N_CLASSES, encoder, trainable=True) #, trainable=False)
model.summary()
lrfn_fn = lrfn(total_steps_cl, warmup_steps_cl, lr_start, LEARNING_RATE_CL, lr_min, num_cycles_cl)
optimizer = optimizers.Adam(learning_rate=lambda: lrfn_fn(tf.cast(optimizer.iterations, tf.float32)))
model.compile(optimizer=optimizer,
loss={'output': losses.CategoricalCrossentropy(label_smoothing=.3),
'output_healthy': losses.BinaryCrossentropy(label_smoothing=.1),
'output_cmd': losses.BinaryCrossentropy(label_smoothing=.1)},
loss_weights={'output': 1.,
'output_healthy': .1,
'output_cmd': .1},
metrics={'output': metrics.CategoricalAccuracy(),
'output_healthy': metrics.BinaryAccuracy(),
'output_cmd': metrics.BinaryAccuracy()})
model_path = f'model_{fold}.h5'
ckpoint = ModelCheckpoint(model_path, mode='max', verbose=0,
save_best_only=True, save_weights_only=True,
monitor='val_output_categorical_accuracy')
es = EarlyStopping(patience=ES_PATIENCE, mode='max',
restore_best_weights=True, verbose=1,
monitor='val_output_categorical_accuracy')
history = model.fit(x=get_dataset(TRAIN_FILENAMES, repeated=True, augment=True, batch_size=BATCH_SIZE_CL, sparse=False),
validation_data=get_dataset(VALID_FILENAMES, ordered=True, batch_size=BATCH_SIZE_CL, sparse=False),
steps_per_epoch=step_size,
epochs=EPOCHS_CL,
callbacks=[ckpoint, es],
verbose=2).history
### RESULTS
print(f"#### FOLD {fold+1} OOF Accuracy = {np.max(history['val_output_categorical_accuracy']):.3f}")
history_list.append(history)
# Load best model weights
model.load_weights(model_path)
# OOF predictions
ds_valid = get_dataset(VALID_FILENAMES, ordered=True)
oof_folds.append(np.full((ct_valid), fold, dtype='int8'))
oof_labels.append([target[0].numpy() for img, target in iter(ds_valid.unbatch())])
x_oof = ds_valid.map(lambda image, target: image)
oof_pred.append(model.predict(x_oof)[0])
# OOF names
ds_valid_names = get_dataset(VALID_FILENAMES, labeled=False, ordered=True)
oof_names.append(np.array([img_name.numpy().decode('utf-8') for img, img_name in iter(ds_valid_names.unbatch())]))
oof_embed.append(encoder.predict(x_oof)) # OOF embeddings
# + [markdown] papermill={"duration": 0.052367, "end_time": "2021-02-12T00:35:30.038230", "exception": false, "start_time": "2021-02-12T00:35:29.985863", "status": "completed"} tags=[]
# ## Model loss graph
# + _kg_hide-input=true papermill={"duration": 0.486178, "end_time": "2021-02-12T00:35:30.576501", "exception": false, "start_time": "2021-02-12T00:35:30.090323", "status": "completed"} tags=[]
for fold, history in enumerate(history_list):
print(f'\nFOLD: {fold+1}')
plot_metrics(history, acc_name='output_categorical_accuracy')
# + [markdown] papermill={"duration": 0.055929, "end_time": "2021-02-12T00:35:30.688802", "exception": false, "start_time": "2021-02-12T00:35:30.632873", "status": "completed"} tags=[]
# # Model evaluation
# + _kg_hide-input=true papermill={"duration": 0.668235, "end_time": "2021-02-12T00:35:31.412220", "exception": false, "start_time": "2021-02-12T00:35:30.743985", "status": "completed"} tags=[]
y_true = np.concatenate(oof_labels)
# y_true = np.argmax(y_true, axis=-1)
y_prob = np.concatenate(oof_pred)
y_pred = np.argmax(y_prob, axis=-1)
folds = np.concatenate(oof_folds)
names = np.concatenate(oof_names)
acc = accuracy_score(y_true, y_pred)
print(f'Overall OOF Accuracy = {acc:.3f}')
df_oof = pd.DataFrame({'image_id':names, 'fold':fold,
'target':y_true, 'pred':y_pred})
df_oof = df_oof.assign(probs=[prob for prob in y_prob])
df_oof.to_csv('oof.csv', index=False)
display(df_oof.head())
print(classification_report(y_true, y_pred, target_names=CLASSES))
# + [markdown] papermill={"duration": 0.056716, "end_time": "2021-02-12T00:35:31.525209", "exception": false, "start_time": "2021-02-12T00:35:31.468493", "status": "completed"} tags=[]
# # Confusion matrix
# + _kg_hide-input=true papermill={"duration": 0.445247, "end_time": "2021-02-12T00:35:32.026607", "exception": false, "start_time": "2021-02-12T00:35:31.581360", "status": "completed"} tags=[]
fig, ax = plt.subplots(1, 1, figsize=(20, 12))
cfn_matrix = confusion_matrix(y_true, y_pred, labels=range(len(CLASSES)))
cfn_matrix = (cfn_matrix.T / cfn_matrix.sum(axis=1)).T
df_cm = pd.DataFrame(cfn_matrix, index=CLASSES, columns=CLASSES)
ax = sns.heatmap(df_cm, cmap='Blues', annot=True, fmt='.2f', linewidths=.5).set_title('Train', fontsize=30)
plt.show()
# + [markdown] papermill={"duration": 0.057806, "end_time": "2021-02-12T00:35:32.142434", "exception": false, "start_time": "2021-02-12T00:35:32.084628", "status": "completed"} tags=[]
# # Visualize embeddings outputs
# + _kg_hide-input=true papermill={"duration": 40.72914, "end_time": "2021-02-12T00:36:12.929480", "exception": false, "start_time": "2021-02-12T00:35:32.200340", "status": "completed"} tags=[]
y_embeddings = np.concatenate(oof_embed)
visualize_embeddings(y_embeddings, y_true)
# + [markdown] papermill={"duration": 0.074453, "end_time": "2021-02-12T00:36:13.078930", "exception": false, "start_time": "2021-02-12T00:36:13.004477", "status": "completed"} tags=[]
# # Visualize predictions
# + _kg_hide-input=true papermill={"duration": 0.084343, "end_time": "2021-02-12T00:36:13.238577", "exception": false, "start_time": "2021-02-12T00:36:13.154234", "status": "completed"} tags=[]
# train_dataset = get_dataset(TRAINING_FILENAMES, ordered=True)
# x_samp, y_samp = dataset_to_numpy_util(train_dataset, 18)
# y_samp = np.argmax(y_samp, axis=-1)
# x_samp_1, y_samp_1 = x_samp[:9,:,:,:], y_samp[:9]
# samp_preds_1 = model.predict(x_samp_1, batch_size=9)
# display_9_images_with_predictions(x_samp_1, samp_preds_1, y_samp_1)
# x_samp_2, y_samp_2 = x_samp[9:,:,:,:], y_samp[9:]
# samp_preds_2 = model.predict(x_samp_2, batch_size=9)
# display_9_images_with_predictions(x_samp_2, samp_preds_2, y_samp_2)
| Model backlog/Models/Train/125-cassava-leaf-effnetb3-scl-imagenet-512x512.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from scipy.stats.mstats import gmean
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
p=[]
subs=[]
# -
#0.12
subs.append(pd.read_csv('results/sub_p0_epoch15.csv'))
#0.099
subs.append(pd.read_csv('results/sub_p3_epoch20.csv'))
subs.append(pd.read_csv('results/sub_p3_epoch20.csv'))
#0.096
subs.append(pd.read_csv('results/sub_p4_epoch19.csv'))
subs.append(pd.read_csv('results/sub_p4_epoch19.csv'))
subs.append(pd.read_csv('results/sub_p4_epoch19.csv'))
#0.108
subs.append(pd.read_csv('results/sub_p5_epoch20.csv'))
#0.104
subs.append(pd.read_csv('results/sub_p6_epoch20.csv'))
subs.append(pd.read_csv('results/sub_p6_epoch20.csv'))
#0.1415
subs.append(pd.read_csv('results/sub_p1_epoch15.csv'))
#0.1628
subs.append(pd.read_csv('results/sub_p2_epoch15.csv'))
l=len(subs)
predictions=[np.array(subs[i].iloc[:,1:])+1e-50 for i in range(l)]
predictions[0]
final_res=gmean(predictions)
final_res.shape
subs[0].shape
final_sub=subs[0].copy()
final_sub.iloc[:,1:]=final_res
final_sub.head()
final_sub.to_csv("final_submission.csv",index=False)
| .ipynb_checkpoints/blend-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
prev = pd.read_csv('../../input/previous_application.csv')
dtype_df = prev.dtypes.reset_index()
dtype_df.columns = ["Count", "Type"]
dtype_df[dtype_df['Type']=='int64']
dtype_df = prev.dtypes.reset_index()
dtype_df.columns = ["Count", "Type"]
dtype_df[dtype_df['Type']=='object']
dtype_df = prev.dtypes.reset_index()
dtype_df.columns = ["Count", "Type"]
dtype_df[dtype_df['Type']=='float64']
prev.head()
| Feature_Eng/Explore_Prev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style="width: 100%; overflow: hidden;">
# <div style="width: 150px; float: left;"> <img src="https://raw.githubusercontent.com/DataForScience/Networks/master/data/D4Sci_logo_ball.png" alt="Data For Science, Inc" align="left" border="0" width=160px> </div>
# <div style="float: left; margin-left: 10px;"> <h1>Graphs and Networks</h1>
# <h1>Lesson II - Graph Properties</h1>
# <p><NAME><br/>
# <a href="http://www.data4sci.com/">www.data4sci.com</a><br/>
# @bgoncalves, @data4sci</p></div>
# </div>
# +
from collections import Counter
from pprint import pprint
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import watermark
# %load_ext watermark
# %matplotlib inline
# -
# We start by print out the versions of the libraries we're using for future reference
# %watermark -n -v -m -g -iv
# Load default figure style
plt.style.use('./d4sci.mplstyle')
# ## Graph class
# We now integrate our prefered graph representation into a class that we can build on. For now we provide it with just placeholders for our data
class Graph:
def __init__(self, directed=False):
self._nodes = {}
self._edges = {}
self._directed = directed
# For ease of explanation, we will be adding methods to this class as we progress. To allow for this in a convenient way, we must declare a Python decorator that will be in charge of modifying the class as we implement further functionality
# Understanding this function is not important for the scope of the lecture, but if you are curious, you cna find more information on [Decorators](https://www.python.org/dev/peps/pep-0318/) and [setattr](https://docs.python.org/3/library/functions.html#setattr) in the offical Python documentation
def add_method(cls):
def decorator(func):
setattr(cls, func.__name__, func)
return func
return decorator
# We can already instanciate our skeleton class
G = Graph()
# and verify that it has nothing hiding inside other than the default Python methods and the fields we defined
dir(G)
# ## Nodes
# Now we add our first utility methors. *add_node* will be responsible for adding a single node to the Graph, while *add_nodes_from* will prove useful to add nodes in bulk. We can also add node attributes by passing keyword arguments to any of these two functions
@add_method(Graph)
def add_node(self, node, **kwargs):
self._nodes[node] = kwargs
@add_method(Graph)
def add_nodes_from(self, nodes, **kwargs):
for node in nodes:
if isinstance(node, tuple):
self._nodes[node[0]] = node[1:]
else:
self._nodes[node] = kwargs
@add_method(Graph)
def nodes(self):
return list(self._nodes.keys())
# And we can now check that this added functionality is now available to our Graph
dir(G)
# And that they work as promised
G.add_node("A", color="blue")
# And naturally
G._nodes
# Or, for a more complex example:
G.add_node("Z", color="green", size=14)
G._nodes
# *add_nodes_from* treats the first parameter as an iterable. This means that we can pass a string and it will add a node for each character.
G.add_nodes_from("ABC", color='red')
G._nodes
# Here it is important to note 2 things:
#
# - Since add_nodes_from expects the first argument to be a list of nodes, it treated each character of the string as an individual node
# - By adding the same node twice we overwrite the previous version.
# ## Edges
# Now we add the equivalent functionality for edges.
# +
@add_method(Graph)
def add_edge(self, node_i, node_j, **kwargs):
if node_i not in self._nodes:
self.add_node(node_i)
if node_j not in self._nodes:
self.add_node(node_j)
if node_i not in self._edges:
self._edges[node_i] = {}
if node_j not in self._edges[node_i]:
self._edges[node_i][node_j] = {}
self._edges[node_i][node_j] = kwargs
if not self._directed:
if node_j not in self._edges:
self._edges[node_j] = {}
if node_i not in self._edges[node_j]:
self._edges[node_j][node_i] = {}
self._edges[node_j][node_i] = kwargs
@add_method(Graph)
def add_edges_from(self, edges, **kwargs):
for edge in edges:
self.add_edge(*edge, **kwargs)
# -
# Before we proceed, let us create a new Graph object
G = Graph()
G._directed
# And add the edges from the edge list we considered before
edge_list = [
('A', 'B'),
('A', 'C'),
('A', 'E'),
('B', 'C'),
('C', 'D'),
('C', 'E'),
('D', 'E')]
G.add_edges_from(edge_list)
# And we can easily check that it looks correct, both for nodes and edges
G._nodes
G._edges
# For Completeness, we add a function to return a list of all the edges and their attributes (if any)
@add_method(Graph)
def edges(self, node_i=None):
e = []
if node_i is None:
edges = self._edges
else:
edges = [node_i]
for node_i in edges:
for node_j in self._edges[node_i]:
e.append([node_i, node_j, self._edges[node_i][node_j]])
return e
# So we recover the undirected version of the edge list we started with
G.edges()
# ## Graph properties
# Now that we have a minimally functional Graph object, we can start implementing functionality to retrieve information about the Graph.
# ### Node information
# Obtaining the number of nodes is simple enough:
@add_method(Graph)
def number_of_nodes(self):
return len(self._nodes)
# So we confirm that we have 5 nodes as expected
G.number_of_nodes()
# And to retrieve the degree of each node one must simply check the number of corresponding entries in the edge dictionary
@add_method(Graph)
def degrees(self):
deg = {}
for node in self._nodes:
if node in self._edges:
deg[node] = len(self._edges[node])
else:
deg[node] = 0
return deg
# With the expected results
G.degrees()
# ### Edge Information
# The number of edges is simply given by:
@add_method(Graph)
def number_of_edges(self):
n_edges = 0
for node_i in self._edges:
n_edges += len(self._edges[node_i])
# If the graph is undirected, don't double count the edges
if not self._directed:
n_edges /= 2
return n_edges
# And so we find, as expected
G.number_of_edges()
# We also add a conveniency method to check if the graph id directed
@add_method(Graph)
def is_directed(self):
return self._directed
G.is_directed()
# ### Weights
# As we saw, each edge can potentially have a weight associated with it (it defaults to 1). We also provide a function to recover a dictionary mapping edges to weights
@add_method(Graph)
def weights(self, weight="weight"):
w = {}
for node_i in self._edges:
for node_j in self._edges[node_i]:
if weight in self._edges[node_i][node_j]:
w[(node_i, node_j)] = self._edges[node_i][node_j][weight]
else:
w[(node_i, node_j)] = 1
return w
# As we didn't explicitly include any weight information in our graph, we find that all the weights are 1
G._edges['A']['B']['weight']=4
G._edges
G.weights()
# ### Topology and Correlations
# One particularly useful property of a graph is the list of nearest neighbors of a given node. With our formulation, this is particularly simple to implement
@add_method(Graph)
def neighbours(self, node):
if node in self._edges:
return list(self._edges[node].keys())
else:
return []
# So we find that node $C$ has as nearest neighbours nodes $A$, $B$, $D$, $E$
G.neighbours('C')
# We are also intersted in the degree and weight distributions. Before we can compute them, we define a utility function to generate a probability distribution from a dictionary of values
@add_method(Graph)
def _build_distribution(data, normalize=True):
values = data.values()
dist = list(Counter(values).items())
dist.sort(key=lambda x:x[0])
dist = np.array(dist, dtype='float')
if normalize:
norm = dist.T[1].sum()
dist.T[1] /= norm
return dist
# By default the probability distribution is normalized such that the sum of all values is 1. Using this utility function it is now easy to calculate the degree distribution
@add_method(Graph)
def degree_distribution(self, normalize=True):
deg = self.degrees()
dist = Graph._build_distribution(deg, normalize)
return dist
# The degree distribution for our Graph is then:
G.degree_distribution(normalize=False)
# Where we can see that we have 2 nodes of both degree 2 and 3 and 1 of degree 4.
# Similarly, for the weight distribution
@add_method(Graph)
def weight_distribution(self, normalize=True):
deg = self.weights()
dist = Graph._build_distribution(deg, normalize)
return dist
# And we naturally find that all of our edges have weight 1.
G.weight_distribution(normalize = False)
# We now calculate the average degree of the nearest neighbours for each node.
@add_method(Graph)
def neighbour_degree(self):
knn = {}
deg = self.degrees()
for node_i in self._edges:
NN = self.neighbours(node_i)
total = [deg[node_j] for node_j in NN]
knn[node_i] = np.mean(total)
return knn
G.neighbour_degree()
# And the distribution by degree:
@add_method(Graph)
def neighbour_degree_function(self):
knn = {}
count = {}
deg = self.degrees()
for node_i in self._edges:
NN = self.neighbours(node_i)
total = [deg[node_j] for node_j in NN]
curr_k = deg[node_i]
knn[curr_k] = knn.get(curr_k, 0) + np.mean(total)
count[curr_k] = count.get(curr_k, 0) + 1
for curr_k in knn:
knn[curr_k]/=count[curr_k]
knn = list(knn.items())
knn.sort(key=lambda x: x[0])
return np.array(knn)
# From which we obtain:
G.neighbour_degree_function()
# ## <NAME>ate Club
# <NAME>. Res. 33, 452 (1977)
# Let's now look at an empirical Graph
# For convenience, we load the data from a file using numpy
edges = np.loadtxt('data/karate.txt')
edges[:10]
# Now we can use the functions defined above to generate the corresponding graph
Karate = Graph()
Karate.add_edges_from(edges)
# Our graph has 34 nodes
Karate.number_of_nodes()
# And 78 edges
Karate.number_of_edges()
# The degree distribution is:
Pk = Karate.degree_distribution()
Pk
# Which we can plot easily
plt.plot(Pk.T[0], Pk.T[1])
plt.xlabel('k')
plt.ylabel('P[k]')
# The average degree of the nearest neighbours as a function of the degree is:
knn = Karate.neighbour_degree_function()
# Which we plot as well
plt.plot(knn.T[0], knn.T[1])
plt.xlabel('k')
plt.ylabel(r'$\langle K_{nn}[k] \rangle$')
# Finally, before we proceed to the next nodebook, we save the current state of our Graph class. For this we use some Jupyter Notebook magic. It's not important to understand this, but you can read about it in the [Jupyter notebook](https://jupyter-notebook.readthedocs.io/en/stable/examples/Notebook/Importing%20Notebooks.html) documentation.
def export_class(path, filename):
import io
from nbformat import read
with io.open(path, 'r', encoding='utf-8') as f:
nb = read(f, 4)
fp = open(filename, "wt")
for cell in nb.cells:
if cell.cell_type == 'code':
first_line = cell.source.split('\n')[0]
if "class " in first_line or "add_method" in first_line:
print(cell.source, file=fp)
print("\n", file=fp)
elif "import" in first_line:
for line in cell.source.split('\n'):
if not line.startswith("%"):
print(line.strip(), file=fp)
print("\n", file=fp)
fp.close()
# Suffice it to say, that after this line, we'll have a Python module called "Graph.py" containing all the methors in our Graph class
export_class('2. Graph Properties.ipynb', 'Graph.py')
# <div style="width: 100%; overflow: hidden;">
# <img src="data/D4Sci_logo_full.png" alt="Data For Science, Inc" align="center" border="0" width=300px>
# </div>
| 2. Graph Properties.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programming with Python
#
# ## Episode 2 - Analyzing Data from Multiple Files
#
# Teaching: 20 min,
# Exercises: 20 min
# ## Objectives
# - Use a library function to get a list of filenames that match a wildcard pattern.
# - How can I do the same operations on many different files?
# - Write a for loop to process multiple files.
# We now have almost everything we need to process all our data files. The only thing that’s missing is a library with a rather unpleasant name `glob`, let's import it.
# ```
# import glob
# ```
import glob
# The `glob` library contains a function, also called `glob`, that finds files and directories whose names match a pattern. We provide those patterns as strings which may include *wildcards*:
#
# - the wildcard character `*` matches zero or more of any character
# - the wildcard character `?` matches a single character.
#
# We can use this to get the names of all the CSV files in the `data` directory:
# ```
# print(glob.glob('data/inflammation*.csv'))
# ```
print(glob.glob('data/i*.csv')) #*i, in, inflamm, whatever, just has to be similar to the file names for glob to find it
print()
filelist = glob.glob('data/i*.csv')
print(sorted(filelist))
# `glob.glob`’s result is a list of matching filenames and directory paths (in arbitrary order).
#
# This means we can loop over it to do something with each filename in turn. In our case, the “something” we want to do is generate a set of plots for each file in our complete inflammation dataset.
#
# If we want to start by analyzing just the first three files in alphabetical order, we can use the built-in `sorted` function to generate a new sorted list from the `glob.glob` output:
#
# ```
# import numpy
# import matplotlib.pyplot
#
# filenames = sorted(glob.glob('data/inflammation*.csv'))
# filenames = filenames[0:3]
#
# print(filenames)
# ```
# +
import numpy
import matplotlib.pyplot
import glob
filenames = sorted(glob.glob('data/inflammation*.csv'))
filenames = filenames[0:3]
print(filenames)
for filename in filenames:
# print(filename)
data = numpy.loadtxt(fname=filename, delimiter=',')
print()
print(filename, 'total sum:', numpy.sum(data))
print(filename, 'sum per day:\n', numpy.sum(data, axis=0))
# print(numpy.sum(data, axis=0).shape) ## 'len' in line leos looks nicer when printed
print(len(numpy.sum(data, axis=0)))
# print(data)
print()
print('final data variable:\n', data)
# -
# and now we can loop over each filename in turn using the code from an earlier episode to product a set of plots for each.
# ```
# for f in filenames:
# print(f)
#
# data = numpy.loadtxt(fname=f, delimiter=',')
#
# fig = matplotlib.pyplot.figure(figsize=(10.0, 3.0))
#
# axes1 = fig.add_subplot(1, 3, 1)
# axes2 = fig.add_subplot(1, 3, 2)
# axes3 = fig.add_subplot(1, 3, 3)
#
# axes1.set_ylabel('average')
# axes1.plot(numpy.mean(data, axis=0))
#
# axes2.set_ylabel('max')
# axes2.plot(numpy.max(data, axis=0))
#
# axes3.set_ylabel('min')
# axes3.plot(numpy.min(data, axis=0))
#
# fig.tight_layout()
# matplotlib.pyplot.show()
# ```
for f in filenames:
print(f)
data = numpy.loadtxt(fname=f, delimiter=',')
fig = matplotlib.pyplot.figure(figsize=(10.0, 3.0))
subplot1 = fig.add_subplot(1, 3, 1)
subplot2 = fig.add_subplot(1, 3, 2)
subplot3 = fig.add_subplot(1, 3, 3)
subplot1.set_ylabel('average'), subplot1.set_xlabel('Days')
subplot1.plot(numpy.mean(data, axis=0))
subplot2.set_ylabel('max'), subplot2.set_xlabel('Days')
subplot2.plot(numpy.max(data, axis=0))
subplot3.set_ylabel('min'), subplot3.set_xlabel('Days')
subplot3.plot(numpy.min(data, axis=0))
fig.tight_layout()
matplotlib.pyplot.show() #see Introduction lesson for figure code explanation
# Sure enough, the maxima of the first two data sets show exactly the same ramp as the first, and their minima show the same staircase structure; a different situation has been revealed in the third dataset, where the maxima are a bit less regular, but the minima are consistently zero - probably indicating an issue in our data.
# ## Excercises
# #### Plotting differences between data files
# Plot the difference between the average of the first dataset and the average of the second dataset, i.e., the difference between the leftmost plot of the first two figures.
# +
import glob
import numpy as np
import matplotlib.pyplot as plt
# Grab all the filenames and sort them
filenames = sorted(glob.glob('data/inflammation*.csv'))
# load the first 2 data files
data0 = np.loadtxt(fname=filenames[0], delimiter=',')
data1 = np.loadtxt(fname=filenames[1], delimiter=',')
# create a figure
fig = plt.figure(figsize=(10.0, 3.0))
data0_mean = np.mean(data0, axis=0)
data1_mean = np.mean(data1, axis=0)
data_mean_diff = data0_mean - data1_mean
plt.ylabel('difference in average (file 0 minus file 1)'), plt.xlabel('Days')
plt.plot(data_mean_diff)
plt.ylim(-2,2)
plt.xlim(0,39)
plt.show()
# -
# #### Generate Composite Statistics
# Use each of the files once, to generate a dataset containing values averaged over all patients:
#
# +
import glob
import numpy as np
import matplotlib.pyplot as plt
filenames = glob.glob('data/inflammation*.csv')
composite_data = np.zeros((60,40))
for f in filenames:
#
# read each file (np.loadtxt) and add it to the composite_data vector
#
# and rescale it
composite_data/=len(filenames)
print(composite_data)
# -
# and now plot the stats for the composite date it using `matplotlib`
# +
fig = plt.figure(figsize=(10.0, 3.0))
axes1 = fig.add_subplot(1, 3, 1)
axes2 = fig.add_subplot(1, 3, 2)
axes3 = fig.add_subplot(1, 3, 3)
axes1.set_ylabel('average')
axes1.plot(np.mean(composite_data, axis=0))
axes2.set_ylabel('max')
axes2.plot(np.max(composite_data, axis=0))
axes3.set_ylabel('min')
axes3.plot(np.min(composite_data, axis=0))
fig.tight_layout()
plt.show()
# -
# ## Key Points
# Use `glob.glob(pattern)` to create a list of files whose names match a pattern.
#
# Use `*` in a pattern to match zero or more characters, and ? to match any single character.
# ### Save, and version control your changes
#
# - save your work: `File -> Save`
# - add all your changes to your local repository: `Terminal -> git add .`
# - commit your updates a new Git version: `Terminal -> git commit -m "End of Episode 2"`
# - push your lastest commits to GitHub: `Terminal -> git push`
| lessons/python/ep4-multiple-files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WBO Interpolation
#
# Openforcefield is interested in implementation of wbo interpolated parameters. These torsion parameters calculate the wbo between the two cetral atoms invovled in a torsion. WBO correlates with the torsion barrier height and also captures the behavior of remote chemical properties which allowing for potential reduction of parameters and improved accuracy.
#
# ### Calculation of k
# - For each molecule the torsion is checked to match TIG-fit0, and then checks if the application of the torsion is 4-fold around the torsion match
# - The script sets up a forcebalance optimization for a single torsion targets and performs the optimization
# - The k value is parsed from the .offxml optimized torsion TIG-fit0
#
#
#imports
import os
import json
import tempfile
from openforcefield.typing.engines.smirnoff import (ForceField,
UnassignedValenceParameterException, BondHandler, AngleHandler,
ProperTorsionHandler, ImproperTorsionHandler,
vdWHandler)
from plot_td_energies import collect_td_targets_data, plot_td_targets_data
from fragmenter.utils import HARTREE_2_KJMOL
import pickle
import shutil
from openforcefield.topology import Molecule, Topology
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
force_balance_file = 'optimize.in'
from make_torsion_target_new import *
from openforcefield.topology import Molecule, Topology
import pickle
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
import qcportal as ptl
import gzip
# +
def genData(ff, ff2):
"""
Definition: Generate the kvalue from forcebalance runs for each molecule in the target directory
ff: The force field used for forcebalance runs, includes "TIG-fit0" parameter
ff2: The force field used for labeling torsion parameters for analysis, for example force field with full wbo interpolated parameters
"""
#clear out target directory to create new targets based on dataset specifications
#shutil.rmtree(target_directory)
#os.mkdir(target_directory)
ff_path = '/Users/jessica/Documents/Grad_research/wbointerpolation/by_molecule_experiment/fb-fit0/openff-1.3.0.offxml'
directory = './targets'
sub_dir = [x[0] for x in os.walk(directory) if os.path.basename(x[0]).startswith('td')]
forcefield = ForceField(ff_path, allow_cosmetic_attributes=True)
#Create targets folder in subdirectory
force_balance_file = 'optimize.in'
# Find which line of optimize.in the targets specificaiton begins at
with open(force_balance_file, 'r') as f:
replace_index = None
force_balance_lines = f.readlines()
for index,line in enumerate(force_balance_lines):
try:
#if 'name' in line[:4]:
if line.startswith('name'):
replace_index = index
break
except:
pass
#print(replace_index)
count = 0
plot_dict = {}
kwbo={}
kwbo_dict={}
for asub in sub_dir:
#do somehting with metadata
metadata = os.path.join(asub,'metadata.json')
#do something with mol2 file
mol2 = os.path.join(asub, 'input.mol2')
#get indices from json
with open(metadata, 'r') as f:
data = json.load(f)
#print(data['attributes']['canonical_isomeric_explicit_hydrogen_mapped_smiles'])
cmiles=data['attributes']['canonical_isomeric_explicit_hydrogen_mapped_smiles']
dihedral=tuple(data['dihedrals'][0])
entry_value = data['entry_label']
dataset = data['dataset_name']
#print(dihedral)
client = ptl.FractalClient()
ds = client.get_collection("TorsionDriveDataset", dataset)
ds.status(status="COMPLETE")
entry = ds.get_entry(entry_value)
TID = entry.object_map['default']
print("entry:")
print(TID)
#molecules = Molecule.from_file(mol2, allow_undefined_stereo=True)
print(cmiles)
molecules=Molecule.from_mapped_smiles(cmiles)
topology = Topology.from_molecules([molecules])
molecules.visualize()
# Let's label using the Parsley force field
forcefield = ForceField(ff, allow_cosmetic_attributes=True)
# Run the molecule labeling
molecule_force_list = forcefield.label_molecules(topology)
#print(dict(molecule_force_list[0]['ProperTorsions']))
# Print out a formatted description of the torsion parameters applied to this molecule
plot_dict = {}
for mol_idx, mol_forces in enumerate(molecule_force_list):
for force_tag, force_dict in mol_forces.items():
if force_tag == 'ProperTorsions':
tigcount=0
for (atom_indices, parameter) in force_dict.items():
#print('param id', parameter.id)
if (parameter.id == "TIG-fit0"):
tigcount+=1
print(tigcount)
#if tigcount==4:
if tigcount>0:
for (atom_indices, parameter) in force_dict.items():
#print('param id', parameter.id)
if (parameter.id == "TIG-fit0"):
#print('check1', parameter.id)
#check dihedral in forward or reverse order
if (atom_indices == dihedral) or (atom_indices == dihedral[::-1]):
#print(atom_indices, dihedral)
#print(asub.split('/')[-1])
#paste
print('check2')
#run forcebalance
tmp_file = tempfile.NamedTemporaryFile(suffix='.in')
with open('debug.in','w') as f1:
for index,line in enumerate(force_balance_lines):
if index != replace_index:
print(line)
f1.write(line)
else:
f1.write(line.split()[0] + ' ' + os.path.basename(asub)+"\n")
print('ForceBalance ' + 'debug.in')
print(os.system('ForceBalance ' + 'debug.in'))
print('done')
#getwbo
wborder= getWBO(cmiles, atom_indices)
print(wborder)
#get kval
#data stored into kval.pickle
kval = readKval(name=os.path.basename(asub))
print(kval)
print('kval', kval[os.path.basename(asub)])
#get qdata and store energies
elist = []
with open(os.path.join(asub,'qdata.txt')) as f:
lines = f.readlines()
elist = [float(line.split()[1]) for line in lines if 'ENERGY' in line]
#get the alternative TIG ID
#added
# Let's label using the Parsley force field
forcefield2 = ForceField(ff2, allow_cosmetic_attributes=True)
# Run the molecule labeling
molecule_force_list = forcefield2.label_molecules(topology)
#print(dict(molecule_force_list[0]['ProperTorsions']))
# Print out a formatted description of the torsion parameters applied to this molecule
#plot_dict = {}
for mol_idx, mol_forces in enumerate(molecule_force_list):
for force_tag, force_dict in mol_forces.items():
if force_tag == 'ProperTorsions':
for (atom_indices, parameter) in force_dict.items():
if (atom_indices == dihedral) or (atom_indices == dihedral[::-1]):
parameter_match=parameter.id
print(parameter_match)
#store info into dictionary for plotting later
kwbo[cmiles]=[wborder,kval[os.path.basename(asub)], parameter_match, getTB(elist), TID]
kwbo_dict[cmiles]={'wbo': wborder, 'kval': kval[os.path.basename(asub)], 'id': parameter_match, 'tb':getTB(elist), 'tid':TID}
#make plot
plot_data = collect_td_targets_data('debug.tmp', 'targets')
plot_dict.update(plot_data)
plot_td_targets_data(plot_data)
with open('plot_data_plot_nonring.pk', 'wb') as pk:
pickle.dump(plot_dict,pk)
with open('plot_k_wbo_list_nonring.pk', 'wb') as pk:
pickle.dump(kwbo,pk)
with open('plot_k_wbo_dict_nonring.pk', 'wb') as pk:
pickle.dump(kwbo_dict,pk)
return
def readKval(read_dir='debug.tmp',modification='TIG-fit0',parameter='k1', name='*'):
####Add this to script
#from openforcefield.typing.engines.smirnoff import ForceField
#ff = ForceField('openff-1.0.0.offxml')
#proper_torsion_handler = ff.get_parameter_handler('ProperTorsions')
#param = proper_torsion_handler.get_parameter({'id':'t5'})
#print(param[0].k1)
import glob
import pickle
folders = glob.glob(os.path.join(read_dir,name))
storage_dict = {}
for adir in folders:
print('adir', adir)
try:
last_iter = glob.glob((os.path.join(adir,'iter*')))[-1]
pass_var = 1
except:
pass_var = 0
if pass_var:
xml = os.path.join(last_iter,'test.offxml')
with open(xml, 'r') as f:
lines = f.readlines()
for line in lines:
if modification in line:
split = line.split()
for asplit in split:
if parameter+'=' in asplit:
value = float(asplit.split('"')[1])
print(os.path.basename(adir))
storage_dict[os.path.basename(adir)] = value
with open('kval.pickle', 'wb') as f:
pickle.dump(storage_dict, f)
return storage_dict
def getWBO(cmiles, dihedral_indices):
"""
Description: Get the WBO of a molecule using the cmiles and dihedral indicies
input:
cmiles: String a cmiles for a particular molecule
dihedral_indices: List of the dihedral indices from a torsion scan
return:
wbo: The WBO of a the central bond
"""
from openforcefield.topology import Molecule, Topology
#print(def(Molecule))
print(dir(Molecule))
print(cmiles)
offmol=Molecule.from_mapped_smiles(cmiles)
#offmol = Molecule.from_mapped_smiles(cmiles)
offmol.assign_fractional_bond_orders()
bond = offmol.get_bond_between(dihedral_indices[1], dihedral_indices[2])
wbo=bond.fractional_bond_order
return wbo
def getTB(energies):
"""
Definition: Calculate the torsion barrier from a list of energies from a list of final energies
input:
energies: List of final energies
return:
torsion barrier: The TB from a torsion scan
"""
#energies = list(tdr_object.get_final_energies().values())
#tmp = list(tdr_object.get_final_energies().keys())
tmp = energies
print(tmp)
start = -180
angles = [i * np.pi / 180 for i in range(0, len(tmp))]
print(angles)
angles, energies = zip(*sorted(zip(angles, energies)))
angles = np.array(angles)
energies = np.array(energies)
angles = np.append(
angles[-3:] - 2 * np.pi, np.append(angles, angles[:3] + 2 * np.pi)
)
energies = np.append(energies[-3:], np.append(energies, energies[:3]))
idx = []
for i in range(len(angles) - 2):
m1 = (energies[i + 1] - energies[i]) / (angles[i + 1] - angles[i])
m2 = (energies[i + 2] - energies[i + 1]) / (angles[i + 2] - angles[i + 1])
if np.sign(m1) == np.sign(m2):
continue
else:
idx.append(i + 1)
torsion_barriers = []
for i in range(int(len(idx) - 1)):
torsion_barriers.append(
abs(HARTREE_2_KJMOL * abs(energies[idx[i]] - energies[idx[i + 1]]))
)
torsion_barriers = np.array(torsion_barriers)
return max(torsion_barriers)
def makeTDTargets(ds, ff):
forcefield = ForceField(ff, allow_cosmetic_attributes=True)
#make td Targets
for d in ds:
torsiondrive_data=download_torsiondrive_data(d)
#make_torsiondrive_target(d, torsiondrive_data, test_ff=forcefield)
#with open('torsiondrive_data.pickle', 'rb') as f:
# torsiondrive_data = pickle.load(f)
make_torsiondrive_target(d, torsiondrive_data, test_ff=forcefield)
# +
#directory = '/Users/jessica/Downloads/release_1.2.0/fb-fit/targets/'
#ff = '/Users/jessica/Documents/Grad_research/WBO_Torsions_ChayaPaper/release_1.3.0_2/fb-fit/fb-fit0/forcefield/test.offxml'
#be very careful when specifying this
target_directory='/Users/jessica/Documents/Grad_research/wbointerpolation/by_molecule_experiment/fb-fit0/targets'
#all of the molecules there is torsions
#break them out into subplots for the TIG parameters
#Plot for molecules that use TIG5a, but use the TIG-fit0 plots
#Plot for TIG3
#run the same experiments with Carbon-Nitrogen central bonds parameters
#makePlots(ds, ff, param_fit, ff_analysis, param_analysis, target_directory)
ds=['SMIRNOFF Coverage Torsion Set 1']
param_fit=['TIG-fit0']
param_analysis=[]
ff_analysis=''
ff='/Users/jessica/Documents/Grad_research/wbointerpolation/by_molecule_experiment/fb-fit0/forcefield/test.offxml'
# +
ff='/Users/jessica/Documents/Grad_research/wbointerpolation/by_molecule_experiment/fb-fit0/forcefield/test.offxml'
ff2='/Users/jessica/Documents/Grad_research/wbointerpolation/by_molecule_experiment/fb-fit0/forcefield/fit0_optimized.offxml'
genData(ff, ff2)
# +
ds = ['OpenFF Substituted Phenyl Set 1']
#makeTDTargets(ds, ff, param_fit, ff_analysis, param_analysis, target_directory)
#'Fragment Stability Benchmark',
#'OpenFF Fragmenter Validation 1.0',
'Fragment Stability Benchmark',
'OpenFF Fragmenter Validation 1.0',
'OpenFF Gen 2 Torsion Set 1 Roche 2',
'OpenFF Gen 2 Torsion Set 2 Coverage 2',
#problem with this dataset
#'OpenFF Full TorsionDrive Benchmark 1'
'OpenFF Gen 2 Torsion Set 3 Pfizer Discrepancy 2',
ds = [
'OpenFF Gen 2 Torsion Set 4 eMolecules Discrepancy 2',
'OpenFF Gen 2 Torsion Set 5 Bayer 2',
'OpenFF Gen 2 Torsion Set 6 Supplemental 2',
'OpenFF Group1 Torsions',
'OpenFF Group1 Torsions 2',
'OpenFF Group1 Torsions 3',
'OpenFF Primary Benchmark 1 Torsion Set',
'OpenFF Primary Benchmark 2 Torsion Set',
'OpenFF Primary TorsionDrive Benchmark 1',
'OpenFF Rowley Biaryl v1.0',
'OpenFF Substituted Phenyl Set 1',
'OpenFF-benchmark-ligand-fragments-v1.0',
'Pfizer Discrepancy Torsion Dataset 1',
'SMIRNOFF Coverage Torsion Set 1',
'TorsionDrive Paper']
ds = ['OpenFF Substituted Phenyl Set 1']
#ds = ['OpenFF Fragmenter Validation 1.0']
#ds=['OpenFF Full TorsionDrive Benchmark 1']
ff= '/Users/jessica/Documents/Grad_research/wbointerpolation/by_molecule_experiment/fb-fit0/forcefield/test.offxml'
#makeTDTargets(ds, ff)
# -
makePlots(ds, ff, param_fit, ff_analysis, param_analysis, target_directory, ff2)
ff2='/Users/jessica/Documents/Grad_research/wbointerpolation/by_molecule_experiment/fb-fit0/forcefield/fit0_optimized.offxml'
ff_path = '/Users/jessica/Documents/Grad_research/wbointerpolation/by_molecule_experiment/fb-fit0/openff-1.3.0.offxml'
#ff_path = 'openff-1.3.0.offxml'
#ff_path ='/Users/jessica/Documents/Grad_research/wbointerpolation/by_molecule_experiment/fb-fit0/forcefield/test.offxml'
forcefield = ForceField(ff_path, allow_cosmetic_attributes=True)
# +
def plot_wbo_vs_k(filename):
with open(filename, 'rb') as f:
x = pickle.load(f)
#print(x)
wbo=[]
kval=[]
ids=set()
for key, item in x.items():
wbo.append(item[0])
kval.append(item[1])
ids.add(item[2])
#print(ids)
dataDictWBO={}
dataDictKval={}
for i in ids:
dataDictWBO[i]=[]
dataDictKval[i]=[]
for i in ids:
for key, item in x.items():
if item[2]==i:
dataDictWBO[i].append(item[0])
dataDictKval[i].append(item[1])
#print(wbo)
#print(kval)
for k, item in dataDictWBO.items():
plt.scatter(dataDictWBO[k], dataDictKval[k], label=k)
#plt.scatter(wbo, kval)
plt.xlabel("WBO")
plt.legend()
plt.ylabel("kval (kcal/mol)")
plt.title("Substituted Phenyl Set: [*:1]~[#6X3:2]~!@[#6X3:3]~[*:4]")
plt.savefig('initialplot_energycompare.pdf')
plt.show()
def plot_wbo_tb_compare(filename):
with open(filename, 'rb') as f:
x = pickle.load(f)
#print(x)
wbo=[]
kval=[]
tb=[]
ids=set()
for key, item in x.items():
wbo.append(item[0])
kval.append(item[1])
ids.add(item[2])
tb.append(item[3]*0.239006)
#print(ids)
print(tb)
print(kval)
dataDictWBO={}
dataDictKval={}
for i in ids:
dataDictWBO[i]=[]
dataDictKval[i]=[]
for i in ids:
for key, item in x.items():
if item[2]==i:
dataDictWBO[i].append(item[0])
dataDictKval[i].append(item[1])
fig, ax1 = plt.subplots()
colors = list(matplotlib.colors.cnames.values())
print(colors)
colors=['#D3D3D3', '#90EE90', '#D3D3D3', '#FFB6C1', '#FFA07A', '#20B2AA', '#87CEFA', '#778899', '#778899', '#B0C4DE', '#FFFFE0', '#00FF00', '#32CD32', '#FAF0E6', '#FF00FF', '#800000', '#66CDAA', '#0000CD', '#BA55D3', '#9370DB', '#3CB371', '#7B68EE', '#00FA9A', '#48D1CC', '#C71585', '#191970', '#F5FFFA', '#FFE4E1', '#FFE4B5', '#FFDEAD', '#000080', '#FDF5E6', '#808000', '#6B8E23', '#FFA500', '#FF4500', '#DA70D6', '#EEE8AA', '#98FB98', '#AFEEEE', '#DB7093', '#FFEFD5', '#FFDAB9', '#CD853F', '#FFC0CB', '#DDA0DD', '#B0E0E6', '#800080', '#663399', '#FF0000', '#BC8F8F', '#4169E1', '#8B4513', '#FA8072', '#F4A460', '#2E8B57', '#FFF5EE', '#A0522D', '#C0C0C0', '#87CEEB', '#6A5ACD', '#708090', '#708090', '#FFFAFA', '#00FF7F', '#4682B4', '#D2B48C', '#008080', '#D8BFD8', '#FF6347', '#40E0D0', '#EE82EE', '#F5DEB3', '#FFFFFF', '#F5F5F5', '#FFFF00', '#9ACD32']
j=3
for i, val in enumerate(wbo):
ax1.scatter(wbo[i], kval[i], color=colors[i], marker='o', s=80, alpha=.5)
color = 'tab:red'
ax1.set_xlabel("WBO")
ax1.set_ylabel("k (kcal/mol)")
#ax1.scatter(wbo, kval, color=color)
ax1.tick_params(axis='y')
#ax1.legend()
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
#ax2.set_ylabel('sin', color=color) # we already handled the x-label with ax1
for i, val in enumerate(wbo):
ax2.scatter(wbo[i], tb[i], color=colors[i], marker='^', s=80, alpha=.5)
#ax2.scatter(wbo, tb, color=color)
ax2.tick_params(axis='y')
ax2.set_ylabel('Torsion Barrier (kcal/mol)')
#ax2.legend()
fig.tight_layout() # otherwise the right y-label is slightly clipped
marks = [
Line2D([0], [0], marker='o', color='w', label='k',
markerfacecolor='grey', markersize=12, alpha=.5),
Line2D([0], [0], marker='^', color='w', label='TB',
markerfacecolor='grey', markersize=12, alpha=.5)]
plt.legend(handles=marks)
plt.title("Torsion Barrier vs k: [*:1]~[#6X3:2]~!@[#6X3:3]~[*:4] \n Substituted Phenyl Dataset")
plt.savefig('tbvsk.pdf', bbox_inches='tight')
plt.show()
def plot_wbo_tb_compare_solid(filename):
with open(filename, 'rb') as f:
x = pickle.load(f)
#print(x)
wbo=[]
kval=[]
tb=[]
ids=set()
for key, item in x.items():
wbo.append(item[0])
kval.append(item[1])
ids.add(item[2])
tb.append(item[3]*0.239006)
#print(ids)
print(tb)
print(kval)
dataDictWBO={}
dataDictKval={}
for i in ids:
dataDictWBO[i]=[]
dataDictKval[i]=[]
for i in ids:
for key, item in x.items():
if item[2]==i:
dataDictWBO[i].append(item[0])
dataDictKval[i].append(item[1])
fig, ax1 = plt.subplots()
colors = list(matplotlib.colors.cnames.values())
#for i, val in enumerate(wbo):
# ax1.scatter(wbo[i], kval[i], color=colors[i], marker='o', s=50, alpha=.5)
color = 'tab:red'
ax1.scatter(wbo, kval, color=color, marker='o', s=80, alpha=.5, label="k")
ax1.set_xlabel("WBO")
ax1.set_ylabel("k (kcal/mol)", color=color)
#ax1.scatter(wbo, kval, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
#color=['#ffb3ba']
#ax2.set_ylabel('sin', color=color) # we already handled the x-label with ax1
#ax1.scatter(wbo, tb, color=color, marker='^', s=50, alpha=.5)
ax2.scatter(wbo, tb, color=color, marker='^', s=80, alpha=.5, label="TB")
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_ylabel('Torsion Barrier (kcal/mol)', color=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
elements=[Line2D([0], [0], color='red', label='k',markerfacecolor='red', markersize=15, alpha=.5),Line2D([0], [0], color=color, label='TB', markersize=15, alpha=.5) ]
plt.legend(handles=elements)
plt.title("Torsion Barrier vs k: [*:1]~[#6X3:2]~!@[#6X3:3]~[*:4] \n Substituted Phenyl Dataset")
plt.savefig('tbvsk_solidcolor.pdf', bbox_inches='tight')
plt.show()
plot_wbo_vs_k('plot_k_wbo.pk')
plot_wbo_tb_compare('plot_k_wbo.pk')
plot_wbo_tb_compare_solid('plot_k_wbo.pk')
# +
with gzip.open('sterics_data.pkl', 'rb') as f:
lj = pickle.load(f)
print(lj.columns)
print(lj['LJ energy diff kcal/mol'])
print('keys',lj['LJ energy diff kcal/mol'].keys())
print('energy',lj['LJ energy diff kcal/mol']['1762227'])
ID=['18537025', '3745392', '3745370', '18886248', '18536199', '18045331', '18535845', '18886245', '18535848', '18537010', '4269710', '18537009', '18537022', '4269708', '4269709', '19953582', '18045328', '18537012', '18537008', '18537018', '4269707', '3745476', '18536057', '18536192', '18045333', '4269712', '18535844', '18886247', '3745617', '18045330', '18537019', '6098536', '18886246', '18886237', '4269703', '18536064', '18535839', '19953583', '6098519', '18886249', '18536061', '19953581', '3745367', '18536065', '4269711', '3745366', '18537017', '4269706', '3745658', '3745390', '18535843', '18537023', '18535849', '4269704', '3745541', '18535838', '19953579', '3745391', '19953580', '18537021', '18886244', '18886238', '18537027', '18537026', '18535846', '18535842', '18886243', '18537020', '18536062', '18536201', '4269705', '18535847', '18536063', '18536056']
#for i in ID:
# print(lj['LJ energy diff kcal/mol'][i])
pk_keys = lj['LJ energy diff kcal/mol'].keys()
append_list = []
nomatch=[]
for i in ID:
if i in pk_keys:
append_list.append(i)
else:
nomatch.append(i)
print('match', append_list)
print('no match', nomatch)
#print(lj['LJ energy diff kcal/mol']['18537025'])
#kwbo_dict[cmiles]={'wbo': wborder, 'kval': kval[os.path.basename(asub)], 'id': parameter_match, 'tb':getTB(elist), 'tid':TID}
def plot_wbo_tb_compare_solid(filename):
with open(filename, 'rb') as f:
x = pickle.load(f)
#print(x)
wbo=[]
kval=[]
tb=[]
ids=set()
tid=[]
#print('debug', [(key,item) for key,item in x.items()])
#print(x)
tids=['3745392', '3745370', '18535845', '18535848', '4269710', '4269708', '4269709', '19953582', '18045328', '4269707', '3745476', '18536057', '18045333', '4269712', '18886247', '3745617', '18045330', '18537019', '18886237', '4269703', '18536064', '18535839', '19953583', '18886249', '18536061', '19953581', '3745367', '4269711', '3745366', '4269706', '3745658', '3745390', '18535849', '4269704', '3745541', '18535838', '19953579', '3745391', '19953580', '18886244', '18886238', '18535846', '18535842', '18886243', '18536062', '18536201', '4269705', '18535847', '18536063', '18536056']
print(x)
for key, item in x.items():
if item['tid'] in tids:
if lj['LJ energy diff kcal/mol'][item['tid']]<3.4:
#print("Not filtered")
wbo.append(item['wbo'])
kval.append(item['kval'])
ids.add(item['id'])
tid.append(item['tid'])
tb.append(item['tb']*0.239006)
#print(ids)
#print(tid)
#print(tb)
#print(kval)
dataDictWBO={}
dataDictKval={}
#print(ids)
for i in ids:
dataDictWBO[i]=[]
dataDictKval[i]=[]
#print(x)
for i in ids:
for key, item in x.items():
#print(item)
#print(key)
#print(item['id'])
#print(i)
if item['id']==i:
dataDictWBO[i].append(item['wbo'])
dataDictKval[i].append(item['kval'])
fig, ax1 = plt.subplots()
colors = list(matplotlib.colors.cnames.values())
#for i, val in enumerate(wbo):
# ax1.scatter(wbo[i], kval[i], color=colors[i], marker='o', s=50, alpha=.5)
color = 'tab:red'
ax1.scatter(wbo, kval, color=color, marker='o', s=80, alpha=.5, label="k")
error=[]
for k, t in zip(kval, tb):
error.append(k-(t/4))
print(error)
ax1.errorbar(wbo, kval, yerr=error, fmt='none', color=color, alpha=.5)
ax1.set_xlabel("WBO")
ax1.set_ylabel("k (kcal/mol)", color=color)
#ax1.scatter(wbo, kval, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
#color=['#ffb3ba']
#ax2.set_ylabel('sin', color=color) # we already handled the x-label with ax1
#ax1.scatter(wbo, tb, color=color, marker='^', s=50, alpha=.5)
ax2.scatter(wbo, tb, color=color, marker='^', s=80, alpha=.5, label="TB")
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_ylabel('Torsion Barrier (kcal/mol)', color=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
elements=[Line2D([0], [0], color='red', label='k',markerfacecolor='red', markersize=15, alpha=.5),Line2D([0], [0], color=color, label='TB', markersize=15, alpha=.5) ]
plt.legend(handles=elements)
plt.title("Torsion Barrier vs k: [*:1]~[#6X3:2]~!@[#6X3:3]~[*:4] \n 1.2.0 training & subtituted phenyl datasets")
plt.savefig('tbvsk_solidcolor.pdf', bbox_inches='tight')
plt.show()
def plot_wbo_tb_compare_solid_noerrorbar(filename):
with open(filename, 'rb') as f:
x = pickle.load(f)
#print(x)
wbo=[]
kval=[]
tb=[]
ids=set()
tid=[]
#print('debug', [(key,item) for key,item in x.items()])
#print(x)
tids=['3745392', '3745370', '18535845', '18535848', '4269710', '4269708', '4269709', '19953582', '18045328', '4269707', '3745476', '18536057', '18045333', '4269712', '18886247', '3745617', '18045330', '18537019', '18886237', '4269703', '18536064', '18535839', '19953583', '18886249', '18536061', '19953581', '3745367', '4269711', '3745366', '4269706', '3745658', '3745390', '18535849', '4269704', '3745541', '18535838', '19953579', '3745391', '19953580', '18886244', '18886238', '18535846', '18535842', '18886243', '18536062', '18536201', '4269705', '18535847', '18536063', '18536056']
for key, item in x.items():
if item['tid'] in tids:
if lj['LJ energy diff kcal/mol'][item['tid']]<3.4:
#print("Not filtered")
wbo.append(item['wbo'])
kval.append(item['kval'])
ids.add(item['id'])
tid.append(item['tid'])
tb.append(item['tb']*0.239006)
#print(ids)
#print(tid)
#print(tb)
#print(kval)
dataDictWBO={}
dataDictKval={}
#print(ids)
for i in ids:
dataDictWBO[i]=[]
dataDictKval[i]=[]
#print(x)
for i in ids:
for key, item in x.items():
#print(item)
#print(key)
#print(item['id'])
#print(i)
if item['id']==i:
dataDictWBO[i].append(item['wbo'])
dataDictKval[i].append(item['kval'])
fig, ax1 = plt.subplots()
colors = list(matplotlib.colors.cnames.values())
#for i, val in enumerate(wbo):
# ax1.scatter(wbo[i], kval[i], color=colors[i], marker='o', s=50, alpha=.5)
color = 'tab:red'
ax1.scatter(wbo, kval, color=color, marker='o', s=80, alpha=.5, label="k")
#ax1.errorbar(wbo, kval, yerr=error, fmt='none', color=color, alpha=.5)
ax1.set_xlabel("WBO")
ax1.set_ylabel("k (kcal/mol)", color=color)
#ax1.scatter(wbo, kval, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
#color=['#ffb3ba']
#ax2.set_ylabel('sin', color=color) # we already handled the x-label with ax1
#ax1.scatter(wbo, tb, color=color, marker='^', s=50, alpha=.5)
ax2.scatter(wbo, tb, color=color, marker='^', s=80, alpha=.5, label="TB")
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_ylabel('Torsion Barrier (kcal/mol)', color=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
elements=[Line2D([0], [0], color='red', label='k',markerfacecolor='red', markersize=15, alpha=.5),Line2D([0], [0], color=color, label='TB', markersize=15, alpha=.5) ]
plt.legend(handles=elements)
plt.title("Torsion Barrier vs k: [*:1]~[#6X3:2]~!@[#6X3:3]~[*:4] \n 1.2.0 training & subtituted phenyl datasets")
plt.savefig('tbvsk_solidcolor_noerror_bar_steric_filter.pdf', bbox_inches='tight')
plt.show()
plot_wbo_tb_compare_solid('plot_k_wbo_dict.pk')
plot_wbo_tb_compare_solid_noerrorbar('plot_k_wbo_dict.pk')
# +
with gzip.open('sterics_data.pkl', 'rb') as f:
lj = pickle.load(f)
print(lj.columns)
print(lj['LJ energy diff kcal/mol'])
print('keys',lj['LJ energy diff kcal/mol'].keys())
print('energy',lj['LJ energy diff kcal/mol']['1762227'])
ID=['18537025', '3745392', '3745370', '18886248', '18536199', '18045331', '18535845', '18886245', '18535848', '18537010', '4269710', '18537009', '18537022', '4269708', '4269709', '19953582', '18045328', '18537012', '18537008', '18537018', '4269707', '3745476', '18536057', '18536192', '18045333', '4269712', '18535844', '18886247', '3745617', '18045330', '18537019', '6098536', '18886246', '18886237', '4269703', '18536064', '18535839', '19953583', '6098519', '18886249', '18536061', '19953581', '3745367', '18536065', '4269711', '3745366', '18537017', '4269706', '3745658', '3745390', '18535843', '18537023', '18535849', '4269704', '3745541', '18535838', '19953579', '3745391', '19953580', '18537021', '18886244', '18886238', '18537027', '18537026', '18535846', '18535842', '18886243', '18537020', '18536062', '18536201', '4269705', '18535847', '18536063', '18536056']
#for i in ID:
# print(lj['LJ energy diff kcal/mol'][i])
pk_keys = lj['LJ energy diff kcal/mol'].keys()
append_list = []
nomatch=[]
for i in ID:
if i in pk_keys:
append_list.append(i)
else:
nomatch.append(i)
print('match', append_list)
print('no match', nomatch)
#print(lj['LJ energy diff kcal/mol']['18537025'])
#kwbo_dict[cmiles]={'wbo': wborder, 'kval': kval[os.path.basename(asub)], 'id': parameter_match, 'tb':getTB(elist), 'tid':TID}
def plot_wbo_tb_compare_solid(filename):
with open(filename, 'rb') as f:
x = pickle.load(f)
#print(x)
wbo=[]
kval=[]
tb=[]
ids=set()
tid=[]
#print('debug', [(key,item) for key,item in x.items()])
#print(x)
tids=['3745392', '3745370', '18535845', '18535848', '4269710', '4269708', '4269709', '19953582', '18045328', '4269707', '3745476', '18536057', '18045333', '4269712', '18886247', '3745617', '18045330', '18537019', '18886237', '4269703', '18536064', '18535839', '19953583', '18886249', '18536061', '19953581', '3745367', '4269711', '3745366', '4269706', '3745658', '3745390', '18535849', '4269704', '3745541', '18535838', '19953579', '3745391', '19953580', '18886244', '18886238', '18535846', '18535842', '18886243', '18536062', '18536201', '4269705', '18535847', '18536063', '18536056']
#print(x)
for key, item in x.items():
if item['tid'] in tids:
if lj['LJ energy diff kcal/mol'][item['tid']]<3.4:
#print("Not filtered")
wbo.append(item['wbo'])
kval.append(item['kval'])
ids.add(item['id'])
tid.append(item['tid'])
tb.append(item['tb']*0.239006)
#print(ids)
#print(tid)
#print(tb)
#print(kval)
dataDictWBO={}
dataDictKval={}
#print(ids)
for i in ids:
dataDictWBO[i]=[]
dataDictKval[i]=[]
#print(x)
for i in ids:
for key, item in x.items():
#print(item)
#print(key)
#print(item['id'])
#print(i)
if item['id']==i:
dataDictWBO[i].append(item['wbo'])
dataDictKval[i].append(item['kval'])
fig, ax1 = plt.subplots()
colors = list(matplotlib.colors.cnames.values())
#for i, val in enumerate(wbo):
# ax1.scatter(wbo[i], kval[i], color=colors[i], marker='o', s=50, alpha=.5)
color = 'tab:red'
ax1.scatter(wbo, kval, color=color, marker='o', s=80, alpha=.5, label="k")
error=[]
for k, t in zip(kval, tb):
error.append(k-(t/4))
print(error)
ax1.errorbar(wbo, kval, yerr=error, fmt='none', color=color, alpha=.5)
ax1.set_xlabel("WBO")
ax1.set_ylabel("k (kcal/mol)", color=color)
#ax1.scatter(wbo, kval, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
#color=['#ffb3ba']
#ax2.set_ylabel('sin', color=color) # we already handled the x-label with ax1
#ax1.scatter(wbo, tb, color=color, marker='^', s=50, alpha=.5)
ax2.scatter(wbo, tb, color=color, marker='^', s=80, alpha=.5, label="TB")
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_ylabel('Torsion Barrier (kcal/mol)', color=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
elements=[Line2D([0], [0], color='red', label='k',markerfacecolor='red', markersize=15, alpha=.5),Line2D([0], [0], color=color, label='TB', markersize=15, alpha=.5) ]
plt.legend(handles=elements)
plt.title("Torsion Barrier vs k: [*:1]~[#6X3:2]~!@[#6X3:3]~[*:4] \n 1.2.0 training & subtituted phenyl datasets")
plt.savefig('tbvsk_solidcolor.pdf', bbox_inches='tight')
plt.show()
def plot_wbo_tb_compare_solid_noerrorbar(filename):
with open(filename, 'rb') as f:
x = pickle.load(f)
#print(x)
wbo=[]
kval=[]
tb=[]
ids=set()
tid=[]
#print('debug', [(key,item) for key,item in x.items()])
#print(x)
tids=['3745392', '3745370', '18535845', '18535848', '4269710', '4269708', '4269709', '19953582', '18045328', '4269707', '3745476', '18536057', '18045333', '4269712', '18886247', '3745617', '18045330', '18537019', '18886237', '4269703', '18536064', '18535839', '19953583', '18886249', '18536061', '19953581', '3745367', '4269711', '3745366', '4269706', '3745658', '3745390', '18535849', '4269704', '3745541', '18535838', '19953579', '3745391', '19953580', '18886244', '18886238', '18535846', '18535842', '18886243', '18536062', '18536201', '4269705', '18535847', '18536063', '18536056']
for key, item in x.items():
if item['tid'] in tids:
if lj['LJ energy diff kcal/mol'][item['tid']]<3.4:
#print("Not filtered")
wbo.append(item['wbo'])
kval.append(item['kval'])
ids.add(item['id'])
tid.append(item['tid'])
tb.append(item['tb']*0.239006)
#print(ids)
#print(tid)
#print(tb)
#print(kval)
dataDictWBO={}
dataDictKval={}
#print(ids)
for i in ids:
dataDictWBO[i]=[]
dataDictKval[i]=[]
#print(x)
for i in ids:
for key, item in x.items():
#print(item)
#print(key)
#print(item['id'])
#print(i)
if item['id']==i:
dataDictWBO[i].append(item['wbo'])
dataDictKval[i].append(item['kval'])
fig, ax1 = plt.subplots()
colors = list(matplotlib.colors.cnames.values())
#for i, val in enumerate(wbo):
# ax1.scatter(wbo[i], kval[i], color=colors[i], marker='o', s=50, alpha=.5)
color = 'tab:red'
ax1.scatter(wbo, kval, color=color, marker='o', s=80, alpha=.5, label="k")
#ax1.errorbar(wbo, kval, yerr=error, fmt='none', color=color, alpha=.5)
ax1.set_xlabel("WBO")
ax1.set_ylabel("k (kcal/mol)", color=color)
#ax1.scatter(wbo, kval, color=color)
ax1.tick_params(axis='y', labelcolor=color)
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = 'tab:blue'
#color=['#ffb3ba']
#ax2.set_ylabel('sin', color=color) # we already handled the x-label with ax1
#ax1.scatter(wbo, tb, color=color, marker='^', s=50, alpha=.5)
ax2.scatter(wbo, tb, color=color, marker='^', s=80, alpha=.5, label="TB")
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_ylabel('Torsion Barrier (kcal/mol)', color=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
elements=[Line2D([0], [0], color='red', label='k',markerfacecolor='red', markersize=15, alpha=.5),Line2D([0], [0], color=color, label='TB', markersize=15, alpha=.5) ]
plt.legend(handles=elements)
plt.title("Torsion Barrier vs k: [*:1]~[#6X3:2]~!@[#6X3:3]~[*:4] \n 1.2.0 training & subtituted phenyl datasets")
plt.savefig('tbvsk_solidcolor_noerror_bar_steric_filter.pdf', bbox_inches='tight')
plt.show()
plot_wbo_tb_compare_solid('plot_k_wbo_dict.pk')
plot_wbo_tb_compare_solid_noerrorbar('plot_k_wbo_dict.pk')
# +
import gzip
from openeye import oechem
# Highlight element of interest
#subs = oechem.OESubSearch("[#6]") # carbon
subs = None
# Read the input SMILES
with open('optimization_inputs.smi', 'r') as infile:
smiles_list = infile.readlines()
smiles_list = [ smiles.strip() for smiles in smiles_list ]
with open(filename, 'rb') as f:
x = pickle.load(f)
print(x)
# Build OEMols
oemols = list()
for smiles in smiles_list:
oemol = oechem.OEMol()
oechem.OESmilesToMol(oemol, smiles)
oemols.append(oemol)
# Generate a PDF of all molecules in the set
pdf_filename = 'optimization_inputs.pdf'
from openeye import oedepict
itf = oechem.OEInterface()
PageByPage = True
suppress_h = True
rows = 10
cols = 6
ropts = oedepict.OEReportOptions(rows, cols)
ropts.SetHeaderHeight(25)
ropts.SetFooterHeight(25)
ropts.SetCellGap(2)
ropts.SetPageMargins(10)
report = oedepict.OEReport(ropts)
cellwidth, cellheight = report.GetCellWidth(), report.GetCellHeight()
opts = oedepict.OE2DMolDisplayOptions(cellwidth, cellheight, oedepict.OEScale_Default * 0.5)
opts.SetAromaticStyle(oedepict.OEAromaticStyle_Circle)
pen = oedepict.OEPen(oechem.OEBlack, oechem.OEBlack, oedepict.OEFill_On, 1.0)
opts.SetDefaultBondPen(pen)
oedepict.OESetup2DMolDisplayOptions(opts, itf)
for i, mol in enumerate(oemols):
cell = report.NewCell()
mol_copy = oechem.OEMol(mol)
oedepict.OEPrepareDepiction(mol_copy, False, suppress_h)
disp = oedepict.OE2DMolDisplay(mol_copy, opts)
# Highlight element of interest
unique = False
if subs is not None:
for match in subs.Match(mol_copy, unique):
oedepict.OEAddHighlighting(disp, oechem.OEColor(oechem.OEYellow), oedepict.OEHighlightStyle_BallAndStick, match)
oedepict.OERenderMolecule(cell, disp)
#oedepict.OEDrawCurvedBorder(cell, oedepict.OELightGreyPen, 10.0)
oedepict.OEWriteReport(pdf_filename, report)
heavyAtomCount=[]
atoms=[]
for mol in oemols:
heavyAtomCount.append(oechem.OECount(mol, oechem.OEIsHeavy()))
# atoms.append(oechem.OECount(mol, oechem.GetExplicitDegree()))
print(heavyAtomCount)
print(min(heavyAtomCount))
print(max(heavyAtomCount))
#print(atoms)
#print(min(atoms))
#print(max(atoms))
# -
adict = readKval()
print(adict)
import xml
print(dir(xml.etree))
import xml.etree.ElementTree as ET
root = ET.parse('debug.tmp/td_OpenFF_Substituted_Phenyl_Set_1_152_C11ClH8NO/iter_0001/test.offxml').getroot()
all_name_elements = tree.findall('*/name')
tree = ET.parse('debug.tmp/td_OpenFF_Substituted_Phenyl_Set_1_152_C11ClH8NO/iter_0001/test.offxml').getroot()
all_name_elements = tree.findall('*/TIG-fit0')
print(all_name_elements)
print(dir(tree))
# cd ../../
plot_data = collect_td_targets_data('debug.tmp', 'targets')
| by_molecule_fits/kversusWBOplots_cleanup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from db_schema import engine, Incident, Category, Participant
import pandas as pd
from datetime import datetime as dt
from sqlalchemy.orm import sessionmaker
from us_state_abbrev import us_state_abbrev as abr
import time
Session = sessionmaker(bind=engine)
session = Session()
csv_data = pd.read_csv('data/data-clean.csv', parse_dates=['date'])
csv_data['incident_characteristics'] = csv_data['incident_characteristics'].fillna('')
csv_data['participant_age'] = csv_data['participant_age'].fillna('')
csv_data['participant_gender'] = csv_data['participant_gender'].fillna('').str.lower()
csv_data['participant_status'] = csv_data['participant_status'].fillna('').str.lower()
csv_data['participant_type'] = csv_data['participant_type'].fillna('').str.lower()
csv_data.head()
all_categories = {}
incidents = []
now = time.time()
for _,row in csv_data.iterrows():
if _ > 0 and _ % 10000 == 0:
print(f'{_} records processed')
incident = Incident(date=row['date'],
state=abr[row['state']],
n_killed=row['n_killed'],
n_injured=row['n_injured'])
# Categories
categories = [cat for cat in row['incident_characteristics'].split('|') if cat]
for cat in categories:
category = all_categories.get(cat)
if not category:
category = Category(name=cat)
all_categories[cat] = category
incident.categories.append(category)
# Participants
participants = {}
# - Age
parts = [p for p in row['participant_age'].split('|') if p]
for part in parts:
if not part:
continue
i,raw_age = [p for p in part.split(':') if p]
age = int(raw_age) if raw_age.isdigit() else None
participant = participants.get(i)
if not participant:
participant = Participant(age=age)
incident.participants.append(participant)
participants[i] = participant
# - Gender
parts = [p for p in row['participant_gender'].split('|') if p]
for part in parts:
if not part:
continue
i,raw_gender = [p for p in part.split(':') if p]
is_male=True if raw_gender == 'male' else False if raw_gender == 'female' else None
participant = participants.get(i)
if not participant:
participant = Participant(is_male=is_male)
incident.participants.append(participant)
participants[i] = participant
else:
participant.is_male = is_male
# - Status
parts = [p for p in row['participant_status'].split('|') if p]
for part in parts:
if not part:
continue
i,raw_status = [p for p in part.split(':') if p]
is_killed = True if 'killed' in raw_status else False if 'injured' in raw_status else None
participant = participants.get(i)
if not participant:
participant = Participant(is_killed=is_killed)
incident.participants.append(participant)
participants[i] = participant
else:
participant.is_killed = is_killed
# - Type
parts = [p for p in row['participant_type'].split('|') if p]
for part in parts:
if not part:
continue
i,raw_type = [p for p in part.split(':') if p]
is_victim = True if 'victim' in raw_type else \
False if 'suspect' in raw_type or 'subject' in raw_type else None
participant = participants.get(i)
if not participant:
participant = Participant(is_victim=is_victim)
incident.participants.append(participant)
participants[i] = participant
else:
participant.is_victim = is_victim
incidents.append(incident)
print('Created all incidents. Commiting...')
session.add_all(incidents)
session.commit()
now = int(time.time() - now)
print(f'Importing {len(csv_data)} incidents took {now // 60}:{now % 60:02d}')
# +
# #!jupyter nbconvert --to Script db_import
| db_import.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Question 3
# (a) Finding max and min elements in a list
# +
#using inbuilt function
list1=[261,157,89,143,67,90,135,78,99,378]
max_list1=max(list1)
min_list1=min(list1)
print("Maximum element in the list is: ",max_list1)
print("Minimum element in the list is: ",min_list1)
#without using inbuilt function
def Maximum(x):
max_num = x[0]
for num in x:
if max_num < num:
max_num = num
return max_num
print("\nMaximum number is: ",Maximum(list1))
def Minimum(y):
min_num= y[0]
for num in y:
if min_num > num:
min_num = num
return min_num
print("Minimum number is: ",Minimum(list1))
# -
# (b) Finding an element of a given rank in a list (using partition)
# +
def rank(A):
R = [0 for x in range(len(A))]
for i in range(len(A)):
(r,s) = (1,1)
for j in range(len(A)):
if j!= i and A[j] < A[i]:
r += 1
if j != i and A[j] == A[i]:
s += 1
R[i] = r + (s - 1) / 2
return R
A = [261,157,89,143,67,90,135,78,99,378]
B = rank(A)
print(B)
n = float(input("Enter a rank from above list to find number correspoding to it: "))
for i in range(len(B)):
if B[i] == n:
print('The number c orresponding to given rank is: ',A[i])
break
# -
# (c) Finding the rank of a given item in a list
def rank_list(lst,num):
rank=0
buff=[]
fleg=False
for i in range(len(lst)):
if lst[i]==num:
fleg=True
break
if fleg:
for i in lst:
if (i<num) and (i not in buff):
rank+=1
buff.append(i)
else:
return "Item not found in given list!"
return rank+1
A=[261,157,89,143,67,90,135,78,99,378]
target=143
print("Rank of element in given list is: ",rank_list(A,target))
# # Question 5
# Priority queue operations:
# +
class Node:
def __init__(self, info, priority):
self.info = info
self.priority = priority
class PriorityQueue:
def __init__(self):
self.queue = list()
# if you want you can set a maximum size for the queue
def insert(self, node):
if self.size() == 0:
self.queue.append(node)
else:
#print("Inside")
for x in range(0, self.size()):
if node.priority >= self.queue[x].priority:
if x == (self.size()-1):
self.queue.insert(x+1, node)
else:
pass
else:
self.queue.insert(x,node)
return True
def show(self):
for x in self.queue:
#print("Inside")
print(str(x.info)+" - "+str(x.priority))
#print (str(x.info)+" - "+str(x.priority))
def size(self):
return len(self.queue)
def delete(self):
return self.queue.pop(0)
def extractMin(self):
temp = 0
for i in range(0, self.size()):
if self.queue[temp].info > self.queue[i].info:
temp = i
else:
pass
print("Minimum element is", self.queue[temp])
del self.queue[temp]
return self.queue
# -
# (a) Delete
# (b) Inserrt
# +
p = PriorityQueue()
node1 = Node(10, 3)
node2 = Node(22, 2)
node3 = Node(3, 1)
node4 = Node(12, 26)
node5 = Node(14, 25)
node6 = Node(20, 12)
p.insert(node1)
p.insert(node2)
p.insert(node3)
p.insert(node4)
p.insert(node5)
p.insert(node6)
# -
p.show()
p.delete()
p.show()
# (c) Extract Min
# (d) Heap Key increase/decrease
# +
def buildHeap(A):
l = len(A)//2
for i in range(l, -1, -1): # makes n calls (worst case scenario)
heapifyMin(A, i)
#print(i)
return A
def heapifyMin(A, i):
smallest = i
lc = 2*i+1
rc = 2*i+2
if lc < len(A) and A[smallest] > A[lc]:
smallest = lc
if rc < len(A) and A[smallest] > A[rc]:
smallest = rc
if smallest != i:
A[i], A[smallest] = A[smallest], A[i]
heapifyMin(A, smallest)
def insert(A, value):
A.append(value)
arr = buildHeap(A)
return arr
def delete(A):
A.pop(0)
n = buildHeap(A)
return A
def extractMin(A):
x = A.pop(0)
print(x)
new = buildHeap(A)
return new
def increaseKey(A, old, new):
if new < old:
return False
change = False
for i in range(0, len(A)):
if A[i] == old:
change = True
pIndex = i
else:
pass
if change:
A[pIndex] = new
n = buildHeap(A)
return n
else:
print("Not present in data structure")
return False
def decreaseKey(A, old, new):
if new > old:
return False
change = False
for i in range(0, len(A)):
if A[i] == old:
change = True
pIndex = i
else:
pass
if change:
A[pIndex] = new
n = buildHeap(A)
return n
else:
print("Not present in data structure")
return False
# -
A = [4,2,6,1,5,7,9,11,12,3]
minH = buildHeap(A)
#extractMin(minH)
print(minH)
print(decreaseKey(minH,12,10))
insert(A, 17)
delete(A)
| Question 3 and 5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kriaz100/deep-learning-with-python-notebooks/blob/master/chapter04_getting-started-with-neural-networks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="qUDWMlH5dHmM"
# This is a companion notebook for the book [Deep Learning with Python, Second Edition](https://www.manning.com/books/deep-learning-with-python-second-edition?a_aid=keras&a_bid=76564dff). For readability, it only contains runnable code blocks and section titles, and omits everything else in the book: text paragraphs, figures, and pseudocode.
#
# **If you want to be able to follow what's going on, I recommend reading the notebook side by side with your copy of the book.**
#
# This notebook was generated for TensorFlow 2.6.
# + [markdown] id="ZDxT_1IjdHmk"
# # Getting started with neural networks: Classification and regression
# + [markdown] id="oLh5pZhEdHmp"
# ## Classifying movie reviews: A binary classification example
# + [markdown] id="J5iOn0nhdHms"
# ### The IMDB dataset
# + [markdown] id="-bjRLq57dHmt"
# **Loading the IMDB dataset**
#
# Read about IMDB data on p 115 in the book (Chap 4).
#
# Contains 50,000 movie reviews that are highly polarized. It is split into training data set with 25,000 reviews, and a test dataset which also has 25,000 reviews. In both datasets, 50% reviews are negative and 50% positive.
#
# Note that this is a binary classification problem where the output is a negative (0) or positive (1) review. We specify that 10,000 most frequently used words shold be included (num_words=10,000).
# + id="X9GQFRhTdHmw" outputId="f02fdf64-eeb3-4087-821d-f9f5023210e7" colab={"base_uri": "https://localhost:8080/"}
from tensorflow.keras.datasets import imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(
num_words=10000)
# + id="iY_vyxwxdHm8"
# Printing the first review
# -- its just list of indexed words, with no word index exceeding 10,000.
train_data[0]
# + id="cKhETZAfdHm_" outputId="732d2e1e-c96b-4295-e88e-ec0a0d47a658" colab={"base_uri": "https://localhost:8080/"}
# printing label of first review
# --- 1=positive, 0=negative
train_labels[0]
# + id="F6Upxp-2dHnC" outputId="6e875c72-f52c-4014-ffe3-6afe1357c669" colab={"base_uri": "https://localhost:8080/"}
# No index exceeds 10,000.
max([max(sequence) for sequence in train_data])
# + [markdown] id="z8F-ZUJ1dHnL"
# **Decoding reviews back to text**
#
# Note: <font color='steelblue'>imdb.get_word_index</font> returns a word index dictionary where Keys are word strings and values are their index.
# + id="oqpuud-mKR73"
# Retrieve the word index file mapping words to indices
word_index = imdb.get_word_index()
# Reverse the word index to obtain a dict mapping indices to words
reverse_word_index = dict(
[(value, key) for (key, value) in word_index.items()])
# Decode the first sequence in the dataset
decoded_review = " ".join(
[reverse_word_index.get(i - 3, "?") for i in train_data[0]])
# + id="EYGOyczfKmVf" outputId="227b7763-bb11-4663-fe76-02ae2323d0b5" colab={"base_uri": "https://localhost:8080/", "height": 122}
decoded_review
# + [markdown] id="k_8PzGHpdHnQ"
# ### Preparing the data
# + [markdown] id="zkaj0AM2Jr2w"
#
# + [markdown] id="fAPZm_d4dHnR"
# **Encoding the integer sequences via multi-hot encoding**
# + id="oCwbe-uhdHnU"
import numpy as np
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
for j in sequence:
results[i, j] = 1.
return results
# vectorise the train and test datasets using vectorize_sequences function above
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
# + [markdown] id="K-Mu-FnylQjb"
# In above cell we defined function vectorize_sequences. This function performed multi-hot encoding (can also be done w built-in CategoryEncoding layer in Keras)
#
# First, created an all-zero matrix of shape (len(sequences), dimension) where the dimension=10,000
#
# Next set specific indices of results[i] to 1s.
# + id="a7v5esADdHnZ" outputId="d088bc82-e335-4f88-da7e-7f1619593fec" colab={"base_uri": "https://localhost:8080/"}
x_train[0]
# + id="Hq_f1DbPdHnb"
y_train = np.asarray(train_labels).astype("float32")
y_test = np.asarray(test_labels).astype("float32")
# + [markdown] id="lwLTU1r4dHnf"
# ### Building your model
#
# We define model architecture with keras sequential API.
# The model has 2 hidden layers (16 neurons each) with relu activations. The output layer is defined with sigmoid activation (becuase the output has to be a probability).
# + [markdown] id="SDwpRcR3dHnh"
# **Model definition**
# + id="9BhVrrtvdHnj"
from tensorflow import keras
from tensorflow.keras import layers
model = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(1, activation="sigmoid")
])
# + [markdown] id="cK0O7uEZdHnl"
# **Compiling the model**
#
# We compile the model with rmsprop **optimize**r, binary-crossentropy **loss function**, and 'accuracy' as the **performance metric**
# + id="G3fDhiBodHnn"
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
# + [markdown] id="F2OD8CiIdHnr"
# ### Validating your approach
# + [markdown] id="tRVQWTCHdHnt"
# **Setting aside a validation set**
#
# - Deep learning models are never evaluated on the training data. It is a standard practice to set aside some *validation data* for monitoring the accuracy of the model during training. Our validation data is 10000 examples.
#
# - The remaining samples i.e. partial_x_train and partial_y_train are for actual training.
# + id="rkeFNl2BdHnw"
x_val = x_train[:10000]
partial_x_train = x_train[10000:]
y_val = y_train[:10000]
partial_y_train = y_train[10000:]
# + [markdown] id="EZd4wqVxdHnz"
# **Training your model**
#
# - The fit method performs model training.
# - To train the model, we will use 20 epochs and mini-batches of 512 samples.
# - Model will be trained on partial_x_train, partial_y_train but evaluated on validation data x-val and y_val of 10000 samples already set aside
# - The fit method creates an object 'History'. This object has a member 'history', which is a dictionary that contains data about everything that happened during training.
#
# + id="tW7ICAZVdHn1" outputId="d1a2a7bd-0f0d-46a7-d6e1-fb8f395ce382" colab={"base_uri": "https://localhost:8080/"}
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
# + [markdown] id="rCqGb52tyWBJ"
# The output above shows that model accuracy on validation data set is 87 percent (although on training data set in is 99 percent! This is over-fitting the training data. Hence the importance of setting aside validation data.
# + [markdown] id="kgXAd7K5z3sO"
# The dictionary history has four entries, one for each metric we were monitoring: "loss", "accuracy", "val_loss", "val_accuracy"
# + id="tNHL3k56dHn3" outputId="a6a641ac-ce60-4db9-a8f0-549b3599c984" colab={"base_uri": "https://localhost:8080/"}
history_dict = history.history
history_dict.keys()
# + [markdown] id="suOb7Ii9dHn8"
# **Plotting the training and validation loss**
#
# - We use matplotlib to plot validation loss and training loss against epochs
# - "bo" is for "blue dot" (represents training loss)
# - "b" is for "solid blue line" (represents validation loss)
# + id="sCk8qRzCdHn-" outputId="a11944f6-29bd-4976-8e6d-08a5a8905f21" colab={"base_uri": "https://localhost:8080/", "height": 295}
import matplotlib.pyplot as plt
history_dict = history.history
loss_values = history_dict["loss"]
val_loss_values = history_dict["val_loss"]
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, "bo", label="Training loss")
plt.plot(epochs, val_loss_values, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
# + [markdown] id="jrxZDHCn3uaG"
# The graph shows that while the training loss continues to fall, the validataion loss first falls and then starts rises again after 4rth epoch.
# + [markdown] id="AacHXx4BdHoA"
# **Plotting the training and validation accuracy**
# + id="WSXJBL48dHoB" outputId="61fcd396-3816-412f-9e52-296024b22221" colab={"base_uri": "https://localhost:8080/", "height": 295}
plt.clf()
acc = history_dict["accuracy"]
val_acc = history_dict["val_accuracy"]
plt.plot(epochs, acc, "bo", label="Training acc")
plt.plot(epochs, val_acc, "b", label="Validation acc")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
# + [markdown] id="kf6fofiQ8ImV"
# The above plot shows the same behavior for accuracy. The training accuracy goes on increrasing but valiudation accuracy peaks near 4rth epoch.
# + [markdown] id="TXUqHcwSdHoD"
# **Retraining a model from scratch**
#
# - We noted from plots of valiation loss and accuracy that both of them are at their optima near the 4rth epoch. We don't need to continue training beyond that.
# - So we re-train the model on full training data (x_train and y_train), *with number of epochs = 4*
# - <font color='blue'>However, we evaluate the model on test data (x_test, and y_test)</font>
# + id="trG8uvBtdHoE" outputId="184d9e37-f6bb-42ab-aa40-53e5a3761630" colab={"base_uri": "https://localhost:8080/"}
model = keras.Sequential([
layers.Dense(16, activation="relu"),
layers.Dense(16, activation="relu"),
layers.Dense(1, activation="sigmoid")
])
model.compile(optimizer="rmsprop",
loss="binary_crossentropy",
metrics=["accuracy"])
model.fit(x_train, y_train, epochs=4, batch_size=512)
results = model.evaluate(x_test, y_test)
# + id="zBJzEI3YdHoI" outputId="22cd759c-c7cb-4f8e-bcd3-5b993d8b588e" colab={"base_uri": "https://localhost:8080/"}
results
# + [markdown] id="usubchM--Vpp"
# <font color='blue'>We get 88 percent accuracy on test data.</font>
# + [markdown] id="DBucnL76dHoL"
# ### Using a trained model to generate predictions on new data
# The predict method returns predicted probability of review being positive (i.e. probability of class label being 1).
# + id="J8ZulkwydHoO" outputId="cad128d8-810a-4e5e-beb1-913d121e96be" colab={"base_uri": "https://localhost:8080/"}
model.predict(x_test)
# + [markdown] id="88PSs5IidHoP"
# ### Further experiments
# + [markdown] id="oekovP7bdHoR"
# ### Wrapping up
# + [markdown] id="RLzkCLScdHoU"
# ## Classifying newswires: A multiclass classification example
# + [markdown] id="GD71Kx2hdHoW"
# ### The Reuters dataset
# + [markdown] id="n1pn4bJidHoe"
# **Loading the Reuters dataset**
# + id="hsiyNok_dHog"
from tensorflow.keras.datasets import reuters
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(
num_words=10000)
# + id="Ro2MKCs1dHoi"
len(train_data)
# + id="9F1UGZTJdHoj"
len(test_data)
# + id="Ai1oYY5IdHok"
train_data[10]
# + [markdown] id="lmlhxHMJdHol"
# **Decoding newswires back to text**
# + id="Vk8273cLdHom"
word_index = reuters.get_word_index()
reverse_word_index = dict([(value, key) for (key, value) in word_index.items()])
decoded_newswire = " ".join([reverse_word_index.get(i - 3, "?") for i in
train_data[0]])
# + id="oAemUAS8dHon"
train_labels[10]
# + [markdown] id="egSyv3f0dHoo"
# ### Preparing the data
# + [markdown] id="RgCsdh6gdHop"
# **Encoding the input data**
# + id="_UcjqRiNdHoq"
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
# + [markdown] id="mEJQHclhdHow"
# **Encoding the labels**
# + id="pNNSWW_odHoy"
def to_one_hot(labels, dimension=46):
results = np.zeros((len(labels), dimension))
for i, label in enumerate(labels):
results[i, label] = 1.
return results
y_train = to_one_hot(train_labels)
y_test = to_one_hot(test_labels)
# + id="EPwWiFeGdHo0"
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(train_labels)
y_test = to_categorical(test_labels)
# + [markdown] id="4Nj4N28idHo3"
# ### Building your model
# + [markdown] id="oKJD2KFhdHo4"
# **Model definition**
# + id="WOJvK3WadHo5"
model = keras.Sequential([
layers.Dense(64, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(46, activation="softmax")
])
# + [markdown] id="CPI7H1ledHo6"
# **Compiling the model**
# + id="2jl_uddTdHo9"
model.compile(optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"])
# + [markdown] id="wPGNCjO6dHpB"
# ### Validating your approach
# + [markdown] id="l0ej1RkrdHpC"
# **Setting aside a validation set**
# + id="-l84mRyNdHpD"
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = y_train[:1000]
partial_y_train = y_train[1000:]
# + [markdown] id="t7dfvq3UdHpE"
# **Training the model**
# + id="hs5ZMFg9dHpF"
history = model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=512,
validation_data=(x_val, y_val))
# + [markdown] id="o4ijYyZBdHpH"
# **Plotting the training and validation loss**
# + id="DELcw1NEdHpK"
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
# + [markdown] id="catgo4IUdHpN"
# **Plotting the training and validation accuracy**
# + id="EH3L7ne8dHpR"
plt.clf()
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
plt.plot(epochs, acc, "bo", label="Training accuracy")
plt.plot(epochs, val_acc, "b", label="Validation accuracy")
plt.title("Training and validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
# + [markdown] id="qBIEQ9rSdHpS"
# **Retraining a model from scratch**
# + id="jaSK4R-ddHpS"
model = keras.Sequential([
layers.Dense(64, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(46, activation="softmax")
])
model.compile(optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"])
model.fit(x_train,
y_train,
epochs=9,
batch_size=512)
results = model.evaluate(x_test, y_test)
# + id="Mc97o8S6dHpV"
results
# + id="skxVQsGsdHpW"
import copy
test_labels_copy = copy.copy(test_labels)
np.random.shuffle(test_labels_copy)
hits_array = np.array(test_labels) == np.array(test_labels_copy)
hits_array.mean()
# + [markdown] id="ZSw74xiQdHpY"
# ### Generating predictions on new data
# + id="ckZ3fNBgdHpZ"
predictions = model.predict(x_test)
# + id="DA3-M0EqdHpa"
predictions[0].shape
# + id="m40J2fpfdHpb"
np.sum(predictions[0])
# + id="bOS0B5q1dHpb"
np.argmax(predictions[0])
# + [markdown] id="pYK_fYR5dHpg"
# ### A different way to handle the labels and the loss
# + id="oTuToWhldHph"
y_train = np.array(train_labels)
y_test = np.array(test_labels)
# + id="h7gZDxGqdHpi"
model.compile(optimizer="rmsprop",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
# + [markdown] id="hkm9phYWdHpj"
# ### The importance of having sufficiently large intermediate layers
# + [markdown] id="PkrLaZBPdHpk"
# **A model with an information bottleneck**
# + id="ChN8vA1mdHpk"
model = keras.Sequential([
layers.Dense(64, activation="relu"),
layers.Dense(4, activation="relu"),
layers.Dense(46, activation="softmax")
])
model.compile(optimizer="rmsprop",
loss="categorical_crossentropy",
metrics=["accuracy"])
model.fit(partial_x_train,
partial_y_train,
epochs=20,
batch_size=128,
validation_data=(x_val, y_val))
# + [markdown] id="JfZ96xo4dHpl"
# ### Further experiments
# + [markdown] id="RDiEa2-JdHpm"
# ### Wrapping up
# + [markdown] id="QwkYhaOwdHpm"
# ## Predicting house prices: A regression example
# + [markdown] id="wzW1AHS4dHpn"
# ### The Boston Housing Price dataset
# + [markdown] id="WsX8UUHodHpo"
# **Loading the Boston housing dataset**
# + id="YFhB57RydHpq"
from tensorflow.keras.datasets import boston_housing
(train_data, train_targets), (test_data, test_targets) = boston_housing.load_data()
# + id="-8J6f1DSdHpr"
train_data.shape
# + id="QmdIGqJydHpr"
test_data.shape
# + id="RaBRhCZXdHps"
train_targets
# + [markdown] id="V5o7bN-wdHpv"
# ### Preparing the data
# + [markdown] id="TK62fWT-dHpw"
# **Normalizing the data**
# + id="eJTsrlL4dHpw"
mean = train_data.mean(axis=0)
train_data -= mean
std = train_data.std(axis=0)
train_data /= std
test_data -= mean
test_data /= std
# + [markdown] id="LfmeuH_-dHpy"
# ### Building your model
# + [markdown] id="z8KOiP_ydHpz"
# **Model definition**
# + id="I_A_0Z3NdHp0"
def build_model():
model = keras.Sequential([
layers.Dense(64, activation="relu"),
layers.Dense(64, activation="relu"),
layers.Dense(1)
])
model.compile(optimizer="rmsprop", loss="mse", metrics=["mae"])
return model
# + [markdown] id="3dYSzQqkdHp1"
# ### Validating your approach using K-fold validation
# + [markdown] id="KkhBFgKrdHp2"
# **K-fold validation**
# + id="IAAnp63PdHp2"
k = 4
num_val_samples = len(train_data) // k
num_epochs = 100
all_scores = []
for i in range(k):
print(f"Processing fold #{i}")
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
model = build_model()
model.fit(partial_train_data, partial_train_targets,
epochs=num_epochs, batch_size=16, verbose=0)
val_mse, val_mae = model.evaluate(val_data, val_targets, verbose=0)
all_scores.append(val_mae)
# + id="Q8Ndct8ydHp3"
all_scores
# + id="3EFPj54ZdHp4"
np.mean(all_scores)
# + [markdown] id="0raMYvlKdHp5"
# **Saving the validation logs at each fold**
# + id="aWroNAaodHp6"
num_epochs = 500
all_mae_histories = []
for i in range(k):
print(f"Processing fold #{i}")
val_data = train_data[i * num_val_samples: (i + 1) * num_val_samples]
val_targets = train_targets[i * num_val_samples: (i + 1) * num_val_samples]
partial_train_data = np.concatenate(
[train_data[:i * num_val_samples],
train_data[(i + 1) * num_val_samples:]],
axis=0)
partial_train_targets = np.concatenate(
[train_targets[:i * num_val_samples],
train_targets[(i + 1) * num_val_samples:]],
axis=0)
model = build_model()
history = model.fit(partial_train_data, partial_train_targets,
validation_data=(val_data, val_targets),
epochs=num_epochs, batch_size=16, verbose=0)
mae_history = history.history["val_mae"]
all_mae_histories.append(mae_history)
# + [markdown] id="-LZgmrq-dHp7"
# **Building the history of successive mean K-fold validation scores**
# + id="_dqZsEeldHp8"
average_mae_history = [
np.mean([x[i] for x in all_mae_histories]) for i in range(num_epochs)]
# + [markdown] id="4WiVph8HdHp9"
# **Plotting validation scores**
# + id="WCjwvF_KdHp_"
plt.plot(range(1, len(average_mae_history) + 1), average_mae_history)
plt.xlabel("Epochs")
plt.ylabel("Validation MAE")
plt.show()
# + [markdown] id="en5EP9lxdHqB"
# **Plotting validation scores, excluding the first 10 data points**
# + id="aAJUA0MldHqC"
truncated_mae_history = average_mae_history[10:]
plt.plot(range(1, len(truncated_mae_history) + 1), truncated_mae_history)
plt.xlabel("Epochs")
plt.ylabel("Validation MAE")
plt.show()
# + [markdown] id="h-TnhK0YdHqE"
# **Training the final model**
# + id="Vy1BcvgFdHqF"
model = build_model()
model.fit(train_data, train_targets,
epochs=130, batch_size=16, verbose=0)
test_mse_score, test_mae_score = model.evaluate(test_data, test_targets)
# + id="bn-nor83dHqH"
test_mae_score
# + [markdown] id="QfPTR1ltdHqI"
# ### Generating predictions on new data
# + id="l6ZDaVeJdHqJ"
predictions = model.predict(test_data)
predictions[0]
# + [markdown] id="T55IZAYWdHqL"
# ### Wrapping up
# + [markdown] id="dBVtg__udHqM"
# ## Summary
| chapter04_getting-started-with-neural-networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import community
import numpy as np
import networkx as nx
import matplotlib as mpl
from matplotlib.pyplot import imshow
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import graphviz
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
import random
import pydoc
import sys
sys.path.append("..")
from ds_navin import McmcTree as Tree
from utils import ColorPrint as _
font = {'weight' : 'normal',
'size' : 15}
mpl.rc('font', **font)
# +
### load Navin's data
D = np.loadtxt('../datasets/real/dataNavin.csv', delimiter=' ')
gensNames = np.loadtxt('../datasets/real/dataNavin.geneNames', dtype=str)
D.shape
C_num = D.shape[1]
G_num = D.shape[0]
_.print_warn( 'There is {} cells and {} mutations at {} genes in this dataset.'.format(C_num, G_num, len(gensNames)) )
### fill missed data
def tf(m,c):
os = len(np.where(D[:,c]==1.))*1.
zs = len(np.where(D[:,c]==0.))*1.
return 1. if np.random.rand() < os/(os+zs) else 0.
for m in range(G_num):
for c in range(C_num):
if D[m,c] == 3.:
D[m,c] = tf(m,c)
### SCITE Tree with Navin's data
SCITE_Navin_Tree = nx.DiGraph()
edges = [
('DNM3','ITGAD'), ('ITGAD','BTLA'), ('BTLA','PAMK3'), ('PAMK3', 'FCHSD2'), ('FCHSD2','LSG1'),
('LSG1','DCAF8L1'), ('DCAF8L1','PIK3CA'), ('PIK3CA','CASP3'), ('CASP3','TRIM58'), ('TRIM58','TCP11'),
('TCP11','MARCH11'), ('MARCH11','DUSP12'), ('DUSP12','PPP2RE'), ('PPP2RE','ROPN1B'), ('ROPN1B','PITRM1'),
('PITRM1','FBN2'), ('FBN2','PRDM9'), ('FBN2','GPR64'), ('PRDM9','CABP2'), ('PRDM9','ZEHX4'),
('PRDM9','H1ENT'), ('PRDM9', 'WDR16'), ('CABP2', 'TRIB2'), ('ZEHX4','DKEZ'), ('WDR16','GLCE'),
('GLCE','CALD1'), ('CABP2','C15orf23'), ('CABP2','CNDP1'), ('CNDP1','CXXX1'), ('CNDP1','c1orf223'),
('CXXX1','FUBP3'), ('c1orf223','TECTA'), ('GPR64','MUTHY'), ('MUTHY','SEC11A'), ('SEC11A','KIAA1539'),
('SEC11A','RABGAP1L'), ('RABGAP1L','ZNE318'), ('KIAA1539','FGFR2'), ('FGFR2','PLXNA2')
]
dl = list(d for d in D)
SNT = Tree(gensNames, D=D, data_list=dl, name='Paper Tree')
SNT.set_edges(edges, remove_edges=True)
_.print_bold( 'SCITE Navis\'s Tree Error:', SNT.get_best_error() )
SNT.plot_best_T('paper_tree')
# +
### Run
alpha = 0.001
beta = 0.01
### Run
dl = list(d for d in D)
T = Tree(gensNames, D, data_list=dl)
T.set_edges(edges, remove_edges=True)
# T.randomize()
# T.plot_best_T('sn_initial_tree')
T.set_rho(150)
# T.set_edges(edges, remove_edges=True)
for i in range(300):
if T.next():
break
# T.plot_best_T('sn_best_tree')
# T.plot_all_results()
# T.plot_best_T()
# -
T.calc_curr_t_error()
SNT.calc_curr_t_error()
# T.plot_best_T('sn_best_tree')
T.plot_all_results()
np.log(0.01)
# +
filename='pure_best_T.png'
pdot = nx.drawing.nx_pydot.to_pydot(T.get_tree())
pdot.write_png(filename)
img = mpimg.imread(filename)
plt.figure(figsize=(10,20))
plt.imshow(img)
plt.title('PM best tree with error:{:0.3f}'.format(1.697))
plt.axis('off')
plt.savefig(filename)
# +
filename='navin_best_T.png'
pdot = nx.drawing.nx_pydot.to_pydot(SNT.get_best_tree())
pdot.write_png(filename)
img = mpimg.imread(filename)
plt.figure(figsize=(10,20))
plt.imshow(img)
plt.title('Navin best tree with error:{:0.3f}'.format(1.823))
plt.axis('off')
plt.savefig(filename)
# -
# +
# ### Benchmarking
# best_T = T.get_best_tree()
# best_g = best_T.to_undirected()
# gt_T = T.gt_T
# gt_T = gt_T.to_undirected()
# best_pair_dists = dict(nx.all_pairs_shortest_path_length(best_g))
# gt_pair_dists = dict(nx.all_pairs_shortest_path_length(gt_T ))
# diffs = []
# for i in range(M-1):
# for j in range(i+1, M):
# best_dis = best_pair_dists[str(i)][str(j)]
# gt_dis = gt_pair_dists [i][j]
# diff = abs(best_dis - gt_dis)
# diffs.append(diff)
# means_pwd.append(np.mean(diffs))
# varia_pwd.append(np.var(diffs))
# best_engs.append(T.get_best_error())
# print(f'\trond={rond}, mean_pwd={means_pwd[-1]:0.4f}, varia_pwd={varia_pwd[-1]:0.4f}, best_eng={best_engs[-1]:0.4f}')
| src/navin_res.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import csv
path = os.path.join("Resources","budget_data.csv")
months = []
revenue = []
with open(path,newline="") as infile:
csv = csv.reader(infile ,delimiter = ",")
next(csv)
for row in csv :
months.append(row[0])
revenue.append(int(row[1]))
# Initialize Variables
count_months = 0
# Aggregate Functions
for i in months :
count_months += 1
sum_revenue = round(sum(revenue),2)
zipped = zip(revenue[:-1], revenue[1:])
def change(x,y) :
return y - x
changes = list(map(change, revenue[:-1],revenue[1:]))
total_avg=round((sum(changes)/len(changes)),2)
# Max and Min
max_change = max(changes)
max_month = months[list(changes).index(max_change) + 1]
min_change = min(changes)
min_month = months[list(changes).index(min_change) + 1]
# Print Statments
final = (f"Financial Analysis\n"
f"----------------------\n"
f"Total Months: {count_months}\n"
f"Total: $ {sum_revenue}\n"
f"Average Change: $ {total_avg}\n"
f"Greatest Increase in Profits: {max_month} (${max_change})\n"
f"Greates Decrease in Profits: {min_month} (${min_change})\n")
print(final)
# -
| PyBank/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## Spark Lib
import findspark
findspark.init()
import pyspark
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.feature import StringIndexer, VectorIndexer
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.mllib.util import MLUtils
from pyspark.ml.feature import StringIndexer, IndexToString
from pyspark.ml.feature import VectorAssembler, VectorIndexer
from pyspark.ml.classification import MultilayerPerceptronClassifier
from pyspark.ml.classification import NaiveBayes
from pyspark.ml.classification import LogisticRegression
from pyspark.ml.classification import LinearSVC, OneVsRest
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.classification import DecisionTreeClassifier
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
from pyspark.ml.linalg import Vectors
from pyspark.mllib.util import MLUtils
#import pyarrow
## SKLearn Lib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.model_selection import cross_val_score
import time
start_time = time.time()
# %matplotlib inline
# -
# ## Configure parameters
# +
# Path to dataset file
#data_path='/data/biodata/Iris/'
# %store -r path
# Sample of train and test dataset
train_sample = 0.7
test_sample = 0.3
# +
# Create Spark Session
spark = SparkSession.builder \
.master("local[8]") \
.appName("MachineLearningIris") \
.getOrCreate()
# Enable Arrow-based columnar data transfers
#spark.conf.set("spark.sql.execution.arrow.enabled", "true")
# +
# Load Iris CSV dataset to Spark Dataframe
orig_data = spark.read.format("csv").options(sep=',',header='true',inferschema='true').\
load(path)
print("Original Dataframe read from CSV file")
#orig_data.dtypes
orig_data.show(5)
# +
# ML libraries doesn't accept string column => everything should be numeric!
# create a numeric column "label" based on string column "class"
indexer = StringIndexer(inputCol="class", outputCol="label").fit(orig_data)
label_data = indexer.transform(orig_data)
# Save the inverse map from numeric "label" to string "class" to be used further in response
labelReverse = IndexToString().setInputCol("label")
# Show labeled dataframe with numeric lable
print("Dataframe with numeric lable")
label_data.show(5)
# +
# Drop string column "class", no string column
label_data = label_data.drop("class")
# Most Machine Learning Lib inpute 2 columns: label (output) and feature (input)
# The label column is the result to train ML algorithm
# The feature column should join all parameters as a Vector
# Set the column names that is not part of features list
ignore = ['label']
# list will be all columns parts of features
list = [x for x in label_data.columns if x not in ignore]
# VectorAssembler mount the vector of features
assembler = VectorAssembler(
inputCols=list,
outputCol='features')
# Create final dataframe composed by label and a column of features vector
data = (assembler.transform(label_data).select("label","features"))
print("Final Dataframe suitable to classifier input format")
#data.printSchema()
data.show(5)
# +
# Split ramdomly the dataset into train and test group
# [0.7,0.3] => 70% for train and 30% for test
# [1.0,0.2] => 100% for train and 20% for test, not good, acuracy always 100%
# [0.1,0.02] => 10% for train and 2% for test, if big datasets
# 1234 is the random seed
(train, test) = data.randomSplit([train_sample, test_sample], 1234)
# +
start_time_svm = time.time()
# create the trainer and set its parameters
trainer = LinearSVC(featuresCol='features', labelCol='label',\
maxIter=100, regParam=0.1)
# LinearSVC classify ONLY in two classes
# To classify in more than 2 classes, the OneVsrest should be used
# Cloud use any kind of classifies
# instantiate the One Vs Rest Classifier.
ovr_trainer = OneVsRest(classifier=trainer)
# train the multiclass model.
model = ovr_trainer.fit(train)
# score the model on test data.
result_svm = model.transform(test)
# +
# compute accuracy on the test set against model
evaluator = MulticlassClassificationEvaluator(labelCol="label", predictionCol="prediction",\
metricName="accuracy")
accuracy_svm = evaluator.evaluate(result_svm) * 100
time_svm = time.time() - start_time_svm
print("Suport Vector Machines (SVM): accuracy = %3.1f %%" % accuracy_svm)
print("Suport Vector Machines (SVM): time = %3.3f s" % time_svm)
# -
print("Suport Vector Machines (SVM) Final Result")
result_svm.show()
| Workloads/machine-learning/SVM_Iris.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/julianovale/pythonparatodos/blob/main/M%C3%B3dulo04Aula06.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="df3uqhCHUNMM"
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 111} id="ETyrfctcUfjQ" outputId="81227fc6-3664-4dc4-b636-3adb78e4344c"
df = pd.DataFrame(data = [[4,5],[15,3]],index=['H','M'],columns=['F','C'])
df
# + colab={"base_uri": "https://localhost:8080/"} id="EVnk0sgEU_qQ" outputId="5aab7abd-e405-46ba-e1f8-0afbee8cf904"
soma_linha = df.sum(axis = 1)
print(soma_linha)
# + colab={"base_uri": "https://localhost:8080/"} id="V5_fhkpnVrmQ" outputId="566aab15-80ec-4a18-a84c-0a557d4be9f5"
soma_coluna = df.sum(axis = 0)
print(soma_coluna)
| Módulo04Aula06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Various torch packages
import torch
import torch.nn as nn
import torch.nn.functional as F
# torchvision
from torchvision import datasets, transforms
# ------------------------
# get up one directory
import sys, os
sys.path.append(os.path.abspath('../'))
# ------------------------
# custom packages
import models.aux_funs as maf
import optimizers as op
import regularizers as reg
import train
import math
import utils.configuration as cf
import utils.datasets as ud
from utils.datasets import get_data_set, GaussianSmoothing
from models.fully_connected import fully_connected
# -
# -----------------------------------------------------------------------------------
# Fix random seed
# -----------------------------------------------------------------------------------
random_seed = 2
cf.seed_torch(random_seed)
# # Parameters
conf_args = {#
# data specification
'data_file':"../../Data", 'train_split':0.95, 'data_set':"Fashion-MNIST", 'download':False,
# cuda
'use_cuda':True, 'num_workers':4, 'cuda_device':0, 'pin_memory':True,
#
'epochs':100,
# optimizer
'delta':1.0, 'lr':0.001, 'lamda_0':0.05, 'lamda_1':0.05, 'optim':"AdaBreg", 'row_group':True,
'reg':reg.reg_l1_l2, 'beta':0.0,
# model
'model_size':7*[28*28], 'act_fun':torch.nn.ReLU(),
# initialization
'sparse_init':0.03, 'r':[1,5,1],
# misc
'random_seed':random_seed, 'eval_acc':True, 'name':'---', 'super_type':'---'
}
conf = cf.Conf(**conf_args)
# # Define DenseNet model
# +
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes):
super(BasicBlock, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.out_planes = out_planes
self.in_planes = in_planes
self.bn1 = nn.BatchNorm2d(out_planes)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=1,padding=1)
s = torch.zeros((self.in_planes,))
def forward(self, x):
out = self.relu(self.conv1(x))
return torch.cat([x, out], 1)
class DenseBlock(nn.Module):
def __init__(self, num_layers, in_planes, out_planes):
super(DenseBlock, self).__init__()
layers = []
for i in range(num_layers):
layers.append(BasicBlock(in_planes + i*out_planes, out_planes))
self.layer = nn.Sequential(*layers)
def forward(self, x):
return self.layer(x)
class LinearBlock(nn.Module):
def __init__(self, in_size, out_size):
super(LinearBlock, self).__init__()
self.linear = nn.Linear(in_size, out_size)
self.act = nn.ReLU()
def forward(self, x):
x = nn.Flatten()(x[:,-1,:])
return self.act(self.linear(x))
class DenseNet(nn.Module):
def __init__(self, depth, planes, num_classes, mean = 0.0, std = 1.0, im_channels = 3, im_size=32):
super(DenseNet, self).__init__()
self.mean = mean
self.std = std
self.depth = depth
self.planes = planes
self.num_classes = num_classes
self.conv1 = nn.Conv2d(im_channels, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = DenseBlock(depth, planes, planes)
self.trans1 = BasicBlock((depth + 1) * planes, 1)
self.fc = LinearBlock(im_size*im_size, num_classes)
def forward(self, x):
out = (x-self.mean)/self.std
out = self.conv1(out)
out = self.block1(out)
out = self.trans1(out)
return self.fc(out)
# +
# -----------------------------------------------------------------------------------
# define the model and an instance of the best model class
# -----------------------------------------------------------------------------------
model_kwargs = {'mean':conf.data_set_mean, 'std':conf.data_set_std, 'im_size':conf.im_shape[1],
'im_channels':conf.im_shape[0]}
model = DenseNet(5, 12, 10, **model_kwargs)
# sparsify
maf.sparse_bias_uniform_(model, 0,conf.r[0])
maf.sparse_weight_normal_(model, conf.r[1])
maf.sparsify_(model, conf.sparse_init, row_group = conf.row_group)
model = model.to(conf.device)
# +
# -----------------------------------------------------------------------------------
# define the model and an instance of the best model class
# -----------------------------------------------------------------------------------
model_kwargs = {'mean':conf.data_set_mean, 'std':conf.data_set_std}
def init_weights(conf, model):
# sparsify
maf.sparse_bias_uniform_(model, 0, conf.r[0])
maf.sparse_weight_normal_(model, conf.r[1])
maf.sparse_weight_normal_(model, conf.r[1],ltype=nn.Conv2d)
maf.sparsify_(model, conf.sparse_init, ltype = nn.Conv2d, row_group = conf.row_group)
model = model.to(conf.device)
return model
# +
# -----------------------------------------------------------------------------------
# Optimizer
# -----------------------------------------------------------------------------------
def get_skips(model):
for m in model.modules():
if hasattr(m,'skips'):
yield m.skips
else:
continue
def print_skips(model):
for m in model.modules():
if hasattr(m,'skips'):
print((0.001*torch.round(1000*m.skips.data).cpu()))
def skips_to_list(model):
skips = []
for m in model.modules():
if hasattr(m,'skips'):
skips.append(m.skips.data.tolist())
return skips
def init_opt(conf, model):
# Get access to different model parameters
weights_linear = maf.get_weights_linear(model)
weights_conv = maf.get_weights_conv(model)
biases = maf.get_bias(model)
skips = get_skips(model)
# -----------------------------------------------------------------------------------
# Initialize optimizer
# -----------------------------------------------------------------------------------
reg1 = conf.reg(lamda=conf.lamda_0)
reg2 = reg.reg_l1(lamda=conf.lamda_1)
reg3 = reg.reg_l1_l2_conv(lamda=conf.lamda_0)
if conf.optim == "SGD":
opt = torch.optim.SGD(model.parameters(), lr=conf.lr, momentum=conf.beta)
elif conf.optim == "AdaBreg":
opt = op.AdaBreg([{'params': weights_linear, 'lr' : conf.lr},
{'params': weights_conv, 'lr' : conf.lr, 'reg' : reg3},
{'params': biases, 'lr': conf.lr},
{'params': skips, 'lr':conf.lr, 'reg':reg2}])
# learning rate scheduler
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt, factor=0.5, patience=5,threshold=0.01)
return opt, scheduler
# -
save_params = False
if save_params:
conf.write_to_csv()
# # Dataset
train_loader, valid_loader, test_loader = ud.get_data_set(conf)
# # History and Run Specification
# -----------------------------------------------------------------------------------
# initalize history
# -----------------------------------------------------------------------------------
tracked = ['loss', 'node_sparse']
train_hist = {}
val_hist = {}
# # Training
# +
# -----------------------------------------------------------------------------------
# Reinit weigts and the corresponding optimizer
# -----------------------------------------------------------------------------------
model = init_weights(conf, model)
opt, scheduler = init_opt(conf, model)
# -----------------------------------------------------------------------------------
# train the model
# -----------------------------------------------------------------------------------
for epoch in range(conf.epochs):
print(25*"<>")
print(50*"|")
print(25*"<>")
print('Epoch:', epoch)
# ------------------------------------------------------------------------
# train step, log the accuracy and loss
# ------------------------------------------------------------------------
train_data = train.train_step(conf, model, opt, train_loader)
# update history
for key in tracked:
if key in train_data:
var_list = train_hist.setdefault(key, [])
var_list.append(train_data[key])
# ------------------------------------------------------------------------
# validation step
val_data = train.validation_step(conf, model, opt, valid_loader)
print_skips(model)
# update history
for key in tracked:
if key in val_data:
var = val_data[key]
if isinstance(var, list):
for i, var_loc in enumerate(var):
key_loc = key+"_" + str(i)
var_list = val_hist.setdefault(key_loc, [])
val_hist[key_loc].append(var_loc)
else:
var_list = val_hist.setdefault(key, [])
var_list.append(var)
scheduler.step(train_data['loss'])
print("Learning rate:",opt.param_groups[0]['lr'])
#best_model(train_data['acc'], val_data['acc'], model=model)
| notebooks/DenseNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
plt.rc('text', usetex=True)
plt.rcParams['text.latex.preamble'] = r"\usepackage{amsmath}"
plt.rcParams['font.family'] = 'monospace'
from scipy.interpolate import Rbf, InterpolatedUnivariateSpline
import pickle
import matplotlib.image as mpimg
from matplotlib.lines import Line2D
# + [markdown] tags=[]
# ________________
# ## Function for making the plots
# +
def TErr(err=[]):
return np.sqrt(np.sum([x**2 for x in err]))
def formatter(data):
y = [elem[0] for elem in data]
yerr = [elem[1] for elem in data]
x = [(elem[2] + elem[3])/2 for elem in data]
xerr = [(elem[3] - elem[2])/2 for elem in data]
return x, y, xerr, yerr
def placeLogo(bottom, left):
# add the logo
plt3 = plt.twinx()
plt3.axis('off')
plt3.set_ylim(bottom=0, top=1)
logo = mpimg.imread('../../plots/logo/HEPfit-logo.png')
size = 0.5
bottom = bottom
top = bottom + size
left = left
right = left + size*2.3398
extent = (left, right, bottom, top)
imgplot = plt.imshow(logo, extent=extent, alpha=0.85)
# -
# __________________________
# ## Experimental Data
# +
# Experimental Data
### arXiv:1612.05014
P5p_BELLE = [[0.42, 0.414367, 0.1, 4.], [-0.025, 0.318002, 4., 8.]]
### CMS-PAS-BPH-15-008
P5p_CMS = [[0.105, 0.33708, 1., 2.], [-0.555, 0.35795, 2., 4.3],
[-0.955, 0.268, 4.3, 6.], [-0.66, 0.22023, 6., 8.68]]
### arXiv:1805.04000
P5p_ATLAS = [[0.67, TErr([0.26, 0.16]), 0.04, 2.], [-0.33, TErr([0.31, 0.13]), 2., 4.],
[0.26, TErr([0.35, 0.18]), 4., 6.]]
### arXiv:2003.04831
P5p_LHCb = [[0.521, TErr([0.095, 0.024]), 0.10, 0.98], [0.365, TErr([0.122, 0.013]), 1.1, 2.5],
[-0.150, TErr([0.144, 0.032]), 2.5, 4.], [-0.439, TErr([0.111, 0.036]), 4., 6.],
[-0.583, TErr([0.090, 0.030]), 6., 8.]]
data_d = {}
data_d['Belle'] = P5p_BELLE
data_d['CMS'] = P5p_CMS
data_d['ATLAS'] = P5p_ATLAS
data_d['LHCb'] = P5p_LHCb
# -
# __________________
# ## Dump data for $P_5^\prime$
#
# __NOTE:__ Do not run this unless you have the data. You can load data below.
# +
# set dump to True to dump data
dump = False
if dump:
FDD_path = '../../../TheNewHope/PSR3/SM/FDD/SM/p5p.txt'
PDD_path = '../../../TheNewHope/PSR3/SM/PDD/SM/p5p.txt'
PMD_path = '../../../TheNewHope/PSR3/SM/PMD/SM/p5p.txt'
LHCb_bins = [[x[2], x[3]] for x in P5p_LHCb]
# data for P5p FDD
P5p_FDD = pd.read_csv(FDD_path, header=None)
P5p_FDD.columns = ['mean', 'sd']
P5p_FDD['upper'] = P5p_FDD['mean'] + P5p_FDD['sd']
P5p_FDD['lower'] = P5p_FDD['mean'] - P5p_FDD['sd']
P5p_FDD['bins'] = LHCb_bins
# data for P5p PDD
P5p_PDD = pd.read_csv(PDD_path, header=None)
P5p_PDD.columns = ['mean', 'sd']
P5p_PDD['upper'] = P5p_PDD['mean'] + P5p_PDD['sd']
P5p_PDD['lower'] = P5p_PDD['mean'] - P5p_PDD['sd']
P5p_PDD['bins'] = LHCb_bins
# data for P5p PMD
P5p_PMD = pd.read_csv(PMD_path, header=None)
P5p_PMD.columns = ['mean', 'sd']
P5p_PMD['upper'] = P5p_PMD['mean'] + P5p_PMD['sd']
P5p_PMD['lower'] = P5p_PMD['mean'] - P5p_PMD['sd']
P5p_PMD['bins'] = LHCb_bins
data = {}
data['FDD'] = P5p_FDD
data['PDD'] = P5p_PDD
data['PMD'] = P5p_PMD
with open('../../data/bsll_2021/P5p_SM.data', 'wb') as f:
pickle.dump(data, f)
# + [markdown] tags=[]
# ______________
# ## Load Data
# -
with open('../../data/bsll_2021/P5p_SM.data', 'rb') as f:
data = pickle.load(f)
# ______________________
# ## $P_5^\prime$ plot
# +
plt.figure(figsize=(6,4))
colors = ['#E18AD4', '#63a088', '#6699CC', '#56203d']
style = ['dashdot', '--', ':', '-']
bands = ['orange', 'crimson', 'limegreen']
for i, had in enumerate(['FDD', 'PDD', 'PMD']):
for row in data[had].iterrows():
item = row[1]
plt.fill_between(item['bins'], [item.upper, item.upper], [item.lower, item.lower], alpha=0.5, color=bands[i])
for i, key in enumerate(['Belle', 'CMS', 'ATLAS', 'LHCb']):
x, y, xerr, yerr = formatter(data_d[key])
eb = plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='o', color=colors[i], ecolor=colors[i], elinewidth=2, capsize=8, markersize=8)
eb[-1][0].set_linestyle(style[i])
eb[-1][1].set_linestyle(style[i])
# settings for the plot
plt.xlim(0,9)
plt.ylim(-1.,1.)
plt.grid(':', alpha=0.4)
plt.xlabel(r'$q^2\ [\rm{GeV}^2]$', fontsize=16)
plt.ylabel(r'$P_5^\prime$', fontsize=16)
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=14)
# add the logo
# placeLogo(2.5, 3.8)
# make the legend
size = 10
line0 = Line2D([0], [0], color=colors[0], linewidth=2, linestyle=style[0], solid_capstyle='butt', alpha=0.8)
line1 = Line2D([0], [0], color=colors[1], linewidth=2, linestyle=style[1], solid_capstyle='butt', alpha=0.8)
line2 = Line2D([0], [0], color=colors[2], linewidth=2, linestyle=style[2], solid_capstyle='butt', alpha=0.8)
line3 = Line2D([0], [0], color=colors[3], linewidth=2, linestyle=style[3], solid_capstyle='butt', alpha=0.8)
line4 = Line2D([0], [0], color=bands[0], linewidth=6, linestyle='-', solid_capstyle='butt', alpha=0.5)
line5 = Line2D([0], [0], color=bands[1], linewidth=6, linestyle='-', solid_capstyle='butt', alpha=0.5)
line6 = Line2D([0], [0], color=bands[2], linewidth=6, linestyle='-', solid_capstyle='butt', alpha=0.5)
labels = [r'$\rm{Belle}$', r'$\rm{CMS}$', r'$\rm{ATLAS}$', r'$\rm{LHCb}$', r'$\rm{Data\ Driven}$', r'$\rm{LCSR\ @\ q^2\le1}$', r'$\rm{LCSR}$']
leg = plt.figlegend(handles=[line0, line1, line2, line3, line4, line5, line6], labels=labels, handlelength=2., labelspacing=0.15, bbox_to_anchor=[0.975, 0.95],
loc='upper right', prop={'size': size}, ncol=1, fancybox=True, framealpha=1, columnspacing=1)
plt.tight_layout()
plt.savefig('../../plots/bsll_2021/P5p.pdf', dpi=300)
plt.show()
| notebooks/bsll_2021/P5p.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting started
#
# ## Interactive usage
#
# Niche Vlaanderen can be used interactively in Python. For interactive use, we recommend using the [Jupyter notebook](https://inbo.github.io/niche_vlaanderen/installation.html#running-niche). This allows one to save documentation, code and results in the same file.
#
# This file itself is a notebook, and if you want to reproduce our result this is possible:
#
# * Download the [niche sourcecode](https://github.com/inbo/niche_vlaanderen/releases) (use the zip file).
# * Extract the file and navigate to the `docs` folder from the anaconda prompt. Make sure you also extract the testcases directory, as this contains the data we will be using.
# * Activate the niche environment: `activate niche` (not necessary if you used the alternative install).
# * Run Jupyter notebook: `jupyter notebook`. This should open a web page (similar or equal to http://localhost:8888 ) - check your anaconda prompt for a link if this is not the case.
# * Navigate your web browser to the `getting_started.ipynb` file (in the Files tab, which should be opened by default).
# * Any cell with code can be run by by pressing Ctrl+Enter. If you are unfamiliar with notebooks, you can take some time familiarizing yourself by taking the User interface tour from the `Help` menu.
#
# ## Steps to create a Niche model
# To calculate a Niche model one has to take the following steps:
#
# * Initialize the model (create a ``Niche`` object)
# * Add the different input grids (or constant values) to the model (``set_input`` method)
# * Run the model (``run`` method)
# * Optionally inspect the results using ``plot()`` and ``table``.
# * Save the results (``write`` method).
#
# These steps are mirrored in the design of the ``Niche`` class which is given below.
#
# Optionally the user can also create difference maps showing how much MHW/MLW has
# to change to allow a certain vegetation type. This is done using the ``deviation`` parameter of the ``run`` method.
#
#
#
# ## Creating a simple NICHE model
#
# For our first example, we will be creating a [simple model](https://inbo.github.io/niche_vlaanderen/vegetatie.html#eenvoudig-niche-model), using only MHW, MLW and soil for the predictions.
#
# The first step is importing the `niche_vlaanderen` module. For convenience, we will be importing as `nv`.
import niche_vlaanderen as nv
# ### Creating a niche object
# Next we create a `Niche` object. This object will hold all the data and results for an analysis.
simple = nv.Niche()
# ### Adding input files
#
# After initialization, we can add input layers to this object, using the `set_input` method.
simple.set_input("mhw","../testcase/zwarte_beek/input/mhw.asc")
simple.set_input("mlw","../testcase/zwarte_beek/input/mlw.asc")
simple.set_input("soil_code","../testcase/zwarte_beek/input/soil_code.asc")
# ### Running the model
# These three input files are the only required for running a simple NICHE model. This means we can already run our model.
simple.run(full_model=False)
# ### Inspecting the model
# After a model is run, we can inspect the results using the `table` method. Note that the values are given in ha. In the example below we also use the [head](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html) function to show only the first five rows.
simple.table.head()
# The returned object is a [Pandas dataframe](https://pandas.pydata.org/pandas-docs/stable/10min.html) which makes it easy to manipulate (for example calculating a crosstabulation, filtering, ...) or save it to a file. Below we present two examples which can be useful when working with these data. The first is saving the data to a csv file.
simple.table.to_csv("demo.csv")
# By using the pandas [pivot_table](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.pivot_table.html#pandas.DataFrame.pivot_table) method, we can create a summarized table. Note that only the first 5 rows are shown because we use the [head](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.head.html) function
# +
result = simple.table.pivot_table(index="vegetation",
values="area_ha",
columns="presence",
fill_value=0).head()
result
# -
# It is also possible to show actual grids using the `plot` method.
simple.plot(11)
import matplotlib.pyplot as plt
# %matplotlib inline
plt.show()
# It is possible to give your model a `name` - this will be shown when plotting and will be used when writing the files.
# +
simple.name="scen_1"
simple.plot(11)
plt.show()
# -
# ### Saving the model
# The model results can be saved to disk using the ``write`` function. As an argument, this takes the directory to which you want to save, \_output_scen1 in our example. When saving a model, the log file, containing the model configuration, a summary table and all 28 vegetation grids will be saved.
# Note we specify the `overwrite_files` option so subsequent calls of the example will not raise an error
simple.write("_output_scen1", overwrite_files=True)
# Below you can see the list of files that is generated by this operation: There are the 28 vegetation grids (`*.tif` files), the summary table (`summary.csv`) and a logfile (`log.txt`). All files are prepended with the model `name` we set earlier.
import os
os.listdir("_output_scen1")
# ### Showing the model configuration
# While using niche, it is always possible to view the configuration by typing the object name.
simple
# <div class="alert alert-info">
# Note that this overview contains the same information as the logfile which we wrote before. Later on, we will show that this can be used as input when running Niche with a configuration file (either from [within Python](advanced_usage.ipynb#Using-config-files) or from the [command line](cli.rst)).
# </div>
# ## Running a full Niche model
#
# A full Niche model requires more inputs that only mhw, mlw and soil_code. The full list can be found in the [documentation](cli.rst#full-model). It is also possible to look at the `minimal_input` set. When trying to run a model without sufficient inputs, a warning will be generated.
nv.niche.minimal_input()
# If we add all the required values, we can run the full model. Note that it is possible to set a constant value instead of a complete grid
full = nv.Niche()
path = "../testcase/zwarte_beek/input/"
full.set_input("mhw", path + "mhw.asc")
full.set_input("mlw", path + "mlw.asc")
full.set_input("soil_code", path + "soil_code.asc")
full.set_input("nitrogen_animal", 0)
full.set_input("inundation_acidity", path + "inundation.asc")
full.set_input("inundation_nutrient", path + "inundation.asc")
full.set_input("nitrogen_fertilizer",0)
full.set_input("minerality", path + "minerality.asc")
full.set_input("management", path + "management.asc")
full.set_input("nitrogen_atmospheric", 0)
full.set_input("msw", path + "msw.asc")
full.set_input("rainwater", 0)
full.set_input("seepage", path + "seepage.asc")
full.run()
# We can look at the full model using the same `table` and `plot` functions as we used for the simple model.
full.table.head()
# Comparing to the simple model, one can observe that the area where a vegetation type can be present is always smaller than in the simple model.
simple.table.head()
# In the next tutorial, we will focus on more [advanced methods for using the package](https://inbo.github.io/niche_vlaanderen/advanced_usage.html), starting with the comparison of these two models.
| docs/getting_started.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pystilts import tpipe, addcol
# +
# from the pystilts sourcecode
def __checkq__(expression):
expression = expression.replace('"', '').replace("'", '')
expression = '"{0}"'.format(expression)
return expression
# -
mocLocation="ChandraMOC13_nograting.fits"
ztfobjects_path='/Users/cxc/CDAAnnotation/example_notebooks/VOTables/ztf_api_stamp_objects.xml'
outfile='test.xml'
exp="""nearMoc(\\"%s\\", meanra, meandec, 0.02)""" % mocLocation
exp='"{0}"'.format(exp)
print(exp)
#adds column with nearMoc boolean
addcol(name='nearMoc', expression=exp,
before=None, after=None, units=None, ucd=None,
desc=None, infile=ztfobjects_path, outfile=outfile)
| FITS_handling/junk_notes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/brendontj/CC-Fraud-Detection/blob/master/cc_fraud_detector.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="yNc2f7lJTRQn"
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score, StratifiedKFold
from sklearn.feature_selection import mutual_info_classif, SelectKBest
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, roc_curve, roc_auc_score, mean_absolute_error, accuracy_score, plot_roc_curve, auc
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from math import sqrt
# + [markdown] id="JaBZqp3BOa-Y"
# O conjunto de dados apresenta em suma dados numéricos obtidos após transformação PCA (Principal Component Analysis). Não foi possível obter os dados previamente a esta transformação.
# + [markdown] id="VWXQdTqzY2NR"
# Leitura do dataset de entrada com informações referentes a transações de cartão de crédito.
# + colab={"base_uri": "https://localhost:8080/", "height": 241} id="eZUVuwvxffVP" outputId="2a4d4ec1-de45-43fc-dfd7-c23c60ab1fe6"
data = pd.read_csv('creditcard.csv')
print('Quantidade de linhas do dataset {}'.format(data.shape[0]))
data.head()
# + [markdown] id="w7jDIwMPOrus"
# - Removemos os registros que apresentam features com valores faltantes
# - Removemos a feature "time" por achar que a mesma não é relevante para predizer se uma transação é ou não fraudulenta
# + colab={"base_uri": "https://localhost:8080/", "height": 241} id="CfTeMAhsg_MW" outputId="6c25a120-9238-4ba9-b657-a6fcc93dcbfe"
df = data.dropna()
df = df.drop(columns="Time")
df['ID']= np.arange(1,len(df.Class)+1)
print('Quantidade de linhas do dataset sem valor Null/NaN/NaT {}'.format(df.shape[0]))
df.head()
# + [markdown] id="TqCDFMkhQM_E"
# Particionamos o dataset de entrada em 80% para o conjunto de treino e 20% para o conjunto de teste.
# + colab={"base_uri": "https://localhost:8080/"} id="GcqF7KN6igzQ" outputId="510bee01-d9d1-40dd-994b-4f846d52e050"
x_train, x_test, y_train, y_test = train_test_split(df.drop(['ID', 'Class'], axis=1), df['Class'], test_size=0.20, random_state = 0)
print('Dados de treino {}\n'.format(x_train.shape))
print('Dados de teste {}\n'.format(x_test.shape))
# + id="OLscTbRRBcLL"
df_train = x_train.copy()
df_train['Class'] = y_train
df_test = x_test.copy()
df_test['Class'] = y_test
# + [markdown] id="5wEvSRSBQd4f"
# Descrição estatística do conjunto de treino
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="1gaKurr2lN-V" outputId="c8d75992-f5eb-4e37-c4f6-f7cce4f65b29"
df_train.describe()
# + [markdown] id="-PTYipQtQoAB"
# Descrição estatística do conjunto de teste
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="1VR9euGZlQgg" outputId="b1987be0-41ae-4a07-bf4a-f85f38fb1f6e"
df_test.describe()
# + [markdown] id="vWOs48cfSpOU"
# Contagem dos valores de cada classe. 0 indicando uma transação onde não há fraude e 1 indicando uma fraude.
# + colab={"base_uri": "https://localhost:8080/"} id="RC10tOWjIU8t" outputId="be48cb76-4afe-4d47-9e8e-c12b7a4c7404"
df_train['Class'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="Ho9sDYS_IZ88" outputId="83db54d3-0493-458f-e9d5-164aeb033c97"
df_test['Class'].value_counts()
# + [markdown] id="vs-IeSx3S2ej"
# Gráficos com a quantidade de cada classe nos conjuntos de dados
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="2rsmquWsJUEb" outputId="a15acbe8-2176-45bf-8eef-3ebb553ef6ef"
ax = sns.countplot(x="Class", data=df_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="-OJgW1w0Jk_-" outputId="4a38ad3e-2d61-4c6b-93ad-6c33343b28f1"
bx = sns.countplot(x="Class", data=df_test)
# + colab={"base_uri": "https://localhost:8080/"} id="tsoWBnd_GnzL" outputId="21dfb0a6-429d-4434-e19c-9e67dbc0ae6b"
n_fraudulent_transactions = df_train['Class'].value_counts()[1]
print('Quantidade de transações fraudulentas no dataset de treino ({}) representando um total de ({})% do dataset'.format(n_fraudulent_transactions, (n_fraudulent_transactions/df_train.shape[0])*100))
n_fraudulent_transactions = df_test['Class'].value_counts()[1]
print('Quantidade de transações fraudulentas no dataset de teste ({}) representando um total de ({})% do dataset'.format(n_fraudulent_transactions, (n_fraudulent_transactions/df_test.shape[0])*100))
# + [markdown] id="gROHtou2TStE"
# Utilizamos o `mutual_info_classif` para estimar informações através de testes estatísticos, auxiliando na seleção de atributos que possuem forte relacionamento com a variável que estamos tentando prever.
# + colab={"base_uri": "https://localhost:8080/"} id="UgGdYx7flG_D" outputId="35d9a61d-3c2d-476e-85bd-ddf5e4cad71a"
mic = mutual_info_classif(x_train, y_train)
mic
# + colab={"base_uri": "https://localhost:8080/", "height": 638} id="l9fETRgXl-c8" outputId="5635f342-8d31-44e2-9fcf-0caff73e0171"
mic = pd.Series(mic)
mic.index = x_train.columns
mic = mic.sort_values(ascending = True)
mic.plot.bar(figsize=(22,10))
# + [markdown] id="_fAwqTgKUcdX"
# Selecionamos as K variáveis que mais se relacionam com a coluna que indica a classificação da transação. `k=22`
# + id="NdBDaW-HnABb"
selection = SelectKBest(mutual_info_classif, k= 22).fit(x_train, y_train)
X_train = x_train[x_train.columns[selection.get_support()]]
X_test = x_test[x_test.columns[selection.get_support()]]
# + [markdown] id="UGLg7-cjVC7g"
# Função utilizada para gerar as curvas do K fold cross validation
# + id="pZWsc5L893EQ"
def plot_Kfold_cross_validation_curves(md, x_data, y_data):
cv = StratifiedKFold(n_splits=5)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
fig, ax = plt.subplots()
for i, (train, test) in enumerate(cv.split(x_data, y_data)):
md.fit(x_data.iloc[train], y_data.iloc[train])
viz = plot_roc_curve(md, x_data.iloc[test], y_data.iloc[test],
name='ROC fold {}'.format(i),
alpha=0.3, lw=1, ax=ax)
interp_tpr = np.interp(mean_fpr, viz.fpr, viz.tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(viz.roc_auc)
ax.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05],
title="ROC for K fold cross-validation curves")
ax.legend(loc="lower right")
plt.show()
# + [markdown] id="owxa6ZjPOKhh"
# # Random Forest
# + [markdown] id="e1ixcAFfjL3S"
# Utilizaremos a classe padrão do classificador Random Forest, não utilizamos variações na parametrização da classe devido a obtenção de um resultado satisfatório com os parâmetros padrões.
# + colab={"base_uri": "https://localhost:8080/"} id="aFymemqQq2JH" outputId="a910eead-4de3-4494-bc92-368bb801c803"
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
# + [markdown] id="5cCP7fsx5_58"
# ## Treino
# + [markdown] id="lGL1A2ixjbRT"
# Relatório de classificação da predição com o modelo Random forest com o sample de treino
# + colab={"base_uri": "https://localhost:8080/"} id="5S5NGcYljXTS" outputId="c81ff4dc-a144-43e1-9a75-21c44e4a20f4"
predictions = rf.predict(X_train)
print(classification_report(y_train, predictions))
# + [markdown] id="4Bi71KcCjn7r"
# Matriz de confusão dos valores preditos com o conjunto de treino
# + id="njyVMGnunwwp" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="3be018e3-ef24-48dc-cfec-cfe70960e33b"
pd.crosstab(y_train, predictions, rownames=['Real'],colnames=['Predito'],margins=True)
# + [markdown] id="sE7DnFY-jzJv"
# Scores das validações cruzadas
# + id="0ct-yzB2fCPM" colab={"base_uri": "https://localhost:8080/"} outputId="5b135f26-583d-4184-de48-2efcbfe498d2"
scores = cross_val_score(rf, X_train, y_train, cv=5, scoring='accuracy')
scores
# + [markdown] id="eKTu5VKtj_Sa"
# Media dos scores obtidos das validações cruzadas
# + colab={"base_uri": "https://localhost:8080/"} id="-mUN-alkjv_k" outputId="ff3c4d10-65df-4397-a3b8-393160fcc36d"
scores.mean()
# + [markdown] id="knVoURIF9sWn"
# Acurácia das predições com base no conjunto de treino
# + colab={"base_uri": "https://localhost:8080/"} id="1otoQtsp9rWH" outputId="6faa94e7-f3ce-44a3-bb18-8392d139641c"
accuracy_score(y_train, predictions)
# + [markdown] id="Vg6h_dD98MqW"
# Erro absoluto com base no conjunto de treino
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="hJQvPDMd8ItB" outputId="e55a2c33-2426-4a57-8ce4-456eb0e9851c"
e = mean_absolute_error(y_train, predictions)
e
# + [markdown] id="mGAT9y95nCOj"
# Curva ROC
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="3KTJsdBMV2Ze" outputId="9dcd8e11-6319-4e60-b458-10652557cec2"
fpr, tpr, _ = roc_curve(y_train, predictions)
roc_auc_scr = roc_auc_score(y_train, predictions)
plt.plot(fpr,tpr,label="data, auc="+str(roc_auc_scr))
plt.legend(loc=4)
plt.show()
# + [markdown] id="eg2wN52vVvGf"
# Curvas da K fold cross-validation
# + id="IpLo2KtjmIAK" outputId="1396b4ba-e508-4629-d256-a45c23963f3d" colab={"base_uri": "https://localhost:8080/", "height": 295}
plot_Kfold_cross_validation_curves(rf, X_train, y_train)
# + [markdown] id="-4_DNbB_6Dp2"
# ## Teste
# + [markdown] id="BLAB8P6AkG2L"
# Predição com o sample de teste
# + id="2NP6MyeutU7D"
predictions_test = rf.predict(X_test)
# + [markdown] id="xyaMdfoAkUyW"
# Relatório de classificação da predição com o modelo Random forest com o sample de teste
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="8dP_bcm5kQ3C" outputId="0741c7f6-0c10-48cf-8591-e4780dbcce67"
print(classification_report(y_test, predictions_test))
# + [markdown] id="JQ6w9D7SkkMo"
# Matriz de confusão dos valores preditos com o conjunto de teste
# + id="uJIkYyI1oKzr" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="d1950c34-4740-44fa-8fdc-4943eec3c857"
pd.crosstab(y_test, predictions_test, rownames=['Real'],colnames=['Predito'],margins=True)
# + [markdown] id="nNenluCDksUE"
# Validação cruzada utilizando 5 pastas com conjunto de teste
# + id="7SyA2FdfnlJk" colab={"base_uri": "https://localhost:8080/"} outputId="17790ef5-d019-4583-e562-d2472f7929db"
scores = cross_val_score(rf, X_test, predictions_test, cv=5, scoring='accuracy')
scores
# + [markdown] id="54zV1n_mk1ma"
# Media dos scores obtidos com o conjunto de teste
# + colab={"base_uri": "https://localhost:8080/"} id="p__RCkAdkzPE" outputId="82876f26-589a-46df-fc65-84c36ea8c083"
scores.mean()
# + [markdown] id="4B5va7gw9-mR"
# Acurácia do modelo
# + colab={"base_uri": "https://localhost:8080/"} id="wePT4YrX9930" outputId="337652d1-5010-4ca4-c609-fb81dee03bac"
accuracy_score(y_test, predictions_test)
# + [markdown] id="6-CjgM258h6e"
# Mean Absolute Error
# + colab={"base_uri": "https://localhost:8080/"} id="0sdttlBo8hA7" outputId="f9eee546-b1f8-40cd-8a5e-fb73859c17ee"
e = mean_absolute_error(y_test, predictions_test)
e
# + [markdown] id="U42RdVNMtJ37"
# Curva ROC com o conjunto de teste
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="3spn8Gf2mEt5" outputId="82ce286a-ffea-4b5e-8e5f-a8a82abfe204"
fpr, tpr, _ = roc_curve(y_test, predictions_test)
roc_auc_scr = roc_auc_score(y_test, predictions_test)
plt.plot(fpr,tpr,label="data, auc="+str(roc_auc_scr))
plt.legend(loc=4)
plt.show()
# + [markdown] id="boChx6PpWFps"
# Curvas da K fold cross-validation com os dados de teste
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="eeo98JS4WL2e" outputId="b535e645-4730-4035-9334-957f8b109236"
plot_Kfold_cross_validation_curves(rf, X_test, y_test)
# + [markdown] id="1sZhJG0MOQw1"
# # KNN
# + [markdown] id="klIFQi1jwtHx"
# Utilizaremos a classe padrão do K Neighbors Classifier, utilizaremos apenas o parâmetro `n_neighbors=3` pois o mesmo demonstrou um aumento na acurácia do modelo. Para descobrir isso executamos `i` execuções com `i` variando de 1 até 25 e a execução com 3 vizinhos mostrou a melhor acurácia.
# + colab={"base_uri": "https://localhost:8080/"} id="Q7IW0wPwjux3" outputId="5bcc1d0d-4c9e-4409-9e8e-fecef0660d3a"
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
# + [markdown] id="Lv7zuKEW5xbu"
# ## Treino
# + [markdown] id="g-V04aOB-jlH"
# Predição da classificação dos dados de treino com base no modelo treinado
# + id="l8Ja5V0sBwrB"
y_pred = knn.predict(X_train)
# + [markdown] id="cR26Z8xQujoo"
# Relatório de classificação da predição com o modelo K Neighbors classifier com o sample de treino
# + id="WeXgWfnJwM7j" colab={"base_uri": "https://localhost:8080/"} outputId="5916ddd9-9299-4701-c0cb-65fd1f0f838e"
print(classification_report(y_train, y_pred))
# + [markdown] id="2nkSZEpsu9kI"
# Matriz de confusão dos valores preditos com o conjunto de treino
# + id="O8LDjwVXxbv1" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="366177de-8574-43be-aeac-1caf2916a586"
pd.crosstab(y_train, y_pred, rownames=['Real'],colnames=['Predito'],margins=True)
# + [markdown] id="Nz3MN_49vL9V"
# Scores das validações cruzadas
# + id="Nim0I8PMxvCO" colab={"base_uri": "https://localhost:8080/"} outputId="8a9f2a2b-e1c6-49c0-b850-5e0c5343a5bf"
scores = cross_val_score(knn, X_train, y_train, cv=5, scoring='accuracy')
scores
# + [markdown] id="7Vb_nM5vvR_X"
# Media dos scores obtidos das validações cruzadas
# + id="eDIZWLplxyRU" colab={"base_uri": "https://localhost:8080/"} outputId="5dddc96a-a53a-474a-fca8-46d9224e4311"
scores.mean()
# + [markdown] id="b6wNjhh1vWKX"
# Acurácia das predições com base no conjunto de treino
# + id="kogl1nyfx3HQ" colab={"base_uri": "https://localhost:8080/"} outputId="ad4ff247-2187-4727-eac5-b6df3d760df5"
accuracy_score(y_train, y_pred)
# + [markdown] id="WKasHT03lkUC"
# Erro absoluto médio
# + id="4NqqkFI-yAkn" colab={"base_uri": "https://localhost:8080/"} outputId="f6789edd-06f9-4b3a-e461-f49c791236e8"
e = mean_absolute_error(y_train, y_pred)
e
# + [markdown] id="RQr1EFuPvjCF"
# Curva ROC dos dados de treino
# + id="hpsP9EKoz0f_" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="78de1362-0ff4-44db-ff0a-8e844258d509"
fpr, tpr, _ = roc_curve(y_train, y_pred)
roc_auc_scr = roc_auc_score(y_train, y_pred)
plt.plot(fpr,tpr,label="data, auc="+str(roc_auc_scr))
plt.legend(loc=4)
plt.show()
# + [markdown] id="AALgecBhvp6V"
# Curvas da K fold cross-validation
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="LcHPDaSYkQGH" outputId="cbb22792-aa21-479a-f1aa-1a331c0b0ca1"
plot_Kfold_cross_validation_curves(knn, X_train, y_train)
# + [markdown] id="1e3GXDKx5sKD"
# ## Teste
# + [markdown] id="zEvlMg5dv7Lx"
# Predição com base no modelo treinado utilizando o sample de teste
# + id="D29TGihY3uM6"
y_pred = knn.predict(X_test)
# + [markdown] id="ifPexNQxwCxI"
# Relatório de classificação da predição com o modelo KNN com o sample de teste
# + colab={"base_uri": "https://localhost:8080/"} id="01Hb5MHF33Tg" outputId="b11ed758-48f4-475c-dbc7-fed635ea8605"
print(classification_report(y_test, y_pred))
# + [markdown] id="BR8h1QOVwIlC"
# Matriz de confusão dos valores preditos com o conjunto de teste
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="tybBBEAh35Pv" outputId="a22cc6a5-87fe-453f-9ecb-30436751cec7"
pd.crosstab(y_test, y_pred, rownames=['Real'],colnames=['Predito'],margins=True)
# + [markdown] id="YbDWhkKGwL1K"
# Scores das validações cruzadas
# + colab={"base_uri": "https://localhost:8080/"} id="fMHYao9y39w4" outputId="508d2d2e-42fd-4333-a4d5-b8f6cb4dad56"
scores = cross_val_score(knn, X_test, y_test, cv=5, scoring='accuracy')
scores
# + [markdown] id="IUObozPcwPH-"
# Media dos scores obtidos com o conjunto de teste
# + colab={"base_uri": "https://localhost:8080/"} id="UpaadsvS4DV2" outputId="f6d46fe5-8c9b-4846-f695-741e3e9dcd8e"
scores.mean()
# + [markdown] id="thT0iXd7wS-1"
# Acurácia do modelo com base nos dados preditos do conjunto de teste
# + colab={"base_uri": "https://localhost:8080/"} id="FpRovKRa4ICh" outputId="00d1e019-a0ef-4f05-9152-f5b94de1c8df"
accuracy_score(y_test, y_pred)
# + [markdown] id="VqzRyyBdwZ-Q"
# Erro absoluto do modelo com base no conjunto de teste
# + colab={"base_uri": "https://localhost:8080/"} id="6q9YEHuO4KJX" outputId="f6b34093-5949-4b32-ade5-0b3e41f63357"
e = mean_absolute_error(y_test, y_pred)
e
# + [markdown] id="NRqPJIf6wgRa"
# Curva ROC com o conjunto de teste
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="_tk57NUV4LjN" outputId="17fd008c-f653-4fbf-d701-95d25b986957"
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_auc_scr = roc_auc_score(y_test, y_pred)
plt.plot(fpr,tpr,label="data, auc="+str(roc_auc_scr))
plt.legend(loc=4)
plt.show()
# + [markdown] id="gg1pAZrrwkHW"
# Curvas da K fold cross-validation com os dados de teste
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="yF24AvUS4QlH" outputId="359a387d-f57b-4172-a845-91d93216b916"
plot_Kfold_cross_validation_curves(knn, X_test, y_test)
# + [markdown] id="giZi9VFEPbfG"
# # MLPClassifier
# + [markdown] id="u9DwQbkxxoRG"
# Utilizaremos a classe do MLP Classifier com algumas alterações dos parâmetros default, pois principalmente relacionado ao número de iterações acaba fazendo com que o tempo de execução se torne algo muito custoso principalmente para executar as k validações cruzadas. Optamos por diminuir o número de layers como o número de neurônios da rede neural para 2 camadas com 50 neurônios cada e um número máximo de iterações igual a 5. Importante deixar claro que o modelo apresenta uma acurácia superor utizando a parametrização padrão da classe.
# + id="TCP1wGp0PdwJ"
clf = MLPClassifier(hidden_layer_sizes=(50,50), max_iter=5, alpha=0.0001,
solver='sgd', verbose=10, random_state=21,tol=0.000000001)
# + [markdown] id="cQ9wZSqs5fQw"
# ## Treino
# + [markdown] id="n8yuJ2YKoIBL"
# Treino e predição com o modelo treinado utilizando o MLP classifier
# + id="9g-Gk6rq2FIT" colab={"base_uri": "https://localhost:8080/"} outputId="329e348a-1c9c-41f2-c749-6afacd<PASSWORD>"
clf.fit(X_train, y_train)
y_pred = clf.predict(X_train)
# + [markdown] id="29BeP_pxo2Se"
# Relatório de classificação da predição com o modelo MLPClassifier com o sample de treino
# + id="ZL0h0Lx32Nyt" colab={"base_uri": "https://localhost:8080/"} outputId="7d7b42ce-65e1-4323-be18-6e24b93c1ef1"
print(classification_report(y_train, y_pred))
# + [markdown] id="NJt7rGaAqPzM"
# Matriz de confusão dos valores preditos com o conjunto de treino
# + id="AUGrMZhT2Srz" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="a4c9fa94-b01a-4a17-ec28-e2cb0013e0e2"
pd.crosstab(y_train, y_pred, rownames=['Real'],colnames=['Predito'],margins=True)
# + [markdown] id="nc0KrzlDqVkO"
# Scores das validações cruzadas com conjunto de treino
#
# + id="jJEpukK92ZED" colab={"base_uri": "https://localhost:8080/"} outputId="9dafc25d-224f-472d-b1e7-853fcb93a62c"
scores = cross_val_score(clf, X_train, y_train, cv=5, scoring='accuracy')
scores
# + [markdown] id="cJSUAaXAqon-"
# Media dos scores obtidos das validações cruzadas
# + id="xKIgmpAo2bpy" colab={"base_uri": "https://localhost:8080/"} outputId="116464aa-bac1-4985-b548-b3cdc1e420f6"
scores.mean()
# + [markdown] id="u6IzlY3JruWI"
# Acurácia das predições com base no conjunto de treino
# + id="9CObHkwe2NEz" colab={"base_uri": "https://localhost:8080/"} outputId="0b9d6158-2b43-4271-cfd2-26d691f5fc17"
accuracy_score(y_train, y_pred)
# + [markdown] id="MMSaYQWPrvxv"
# Erro absoluto com base no conjunto de treino
# + id="V-7innmQ2d0F" colab={"base_uri": "https://localhost:8080/"} outputId="f99679e2-5404-4ee0-a1ca-d612940a6cdb"
e = mean_absolute_error(y_train, y_pred)
e
# + [markdown] id="qpvzt34sr567"
# Curva ROC com os dados de treino
# + id="e7VLXhbi3XCI" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="37fd7d60-6df5-4df4-bcb3-4fabd508dcac"
fpr, tpr, _ = roc_curve(y_train, y_pred)
roc_auc_scr = roc_auc_score(y_train, y_pred)
plt.plot(fpr,tpr,label="data, auc="+str(roc_auc_scr))
plt.legend(loc=4)
plt.show()
# + [markdown] id="Ndz9FKl9r91L"
# Curvas da K fold cross-validation com os dados de treino
# + id="ZW6DtnbJ4hw9" colab={"base_uri": "https://localhost:8080/", "height": 910} outputId="26676b64-702e-4c10-c732-8837c142fca2"
plot_Kfold_cross_validation_curves(clf, X_train, y_train)
# + [markdown] id="Wnv9olx35m6Q"
# ## Teste
# + [markdown] id="aFWWnqfIsFE3"
# Predição dos dados de teste com o modelo treinado utilizando o MLP classifier
# + id="bB7rrxM241Yr"
y_pred = clf.predict(X_test)
# + [markdown] id="ZwWYvdNysR2V"
# Relatório de classificação da predição com o modelo MLPClassifier com o sample de teste
# + id="RUVaLQc746WR" colab={"base_uri": "https://localhost:8080/"} outputId="803e6da2-7932-4ff5-83e6-7da1919c9169"
print(classification_report(y_test, y_pred))
# + [markdown] id="mM6NcLFvsZhu"
#
# Matriz de confusão dos valores preditos com o conjunto de teste
# + id="bDmSabqj4-YW" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="cf033ae8-2e8d-4c45-8dcf-dc548e62f523"
pd.crosstab(y_test, y_pred, rownames=['Real'],colnames=['Predito'],margins=True)
# + [markdown] id="8oUaUSc8sfDW"
# Scores das validações cruzadas com conjunto de treino
# + id="bgp98eI55Cgl" colab={"base_uri": "https://localhost:8080/"} outputId="3853bd0e-badb-4f8d-fc9e-3b7814e6f566"
scores = cross_val_score(clf, X_test, y_test, cv=5, scoring='accuracy')
scores
# + [markdown] id="RyswG7Qjsha0"
# Média dos scores obtidos das k validações cruzadas
# + id="3duu5Rxy5F3a" colab={"base_uri": "https://localhost:8080/"} outputId="53790f36-0e9b-47ef-f09b-5a8b6a9d6856"
scores.mean()
# + [markdown] id="uytkE_CBspwW"
# Acurácia das predições com base no conjunto de teste
# + id="KQYudM9b5H38" colab={"base_uri": "https://localhost:8080/"} outputId="4623108b-abe7-405f-8f3e-b4f5aba1db08"
accuracy_score(y_test, y_pred)
# + [markdown] id="uH_3qEecsuPq"
# Erro absoluto com base no conjunto de teste
# + id="SRvKrjsK5ObE" colab={"base_uri": "https://localhost:8080/"} outputId="5eb14678-ddae-4ab6-94a6-61748807945b"
e = mean_absolute_error(y_test, y_pred)
e
# + [markdown] id="N0EHpEnQswBq"
# Curva ROC utilizando os dados de teste
# + id="uCOF_vtk5RPb" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="92a12571-d992-4f58-8b14-4cba41819812"
fpr, tpr, _ = roc_curve(y_test, y_pred)
roc_auc_scr = roc_auc_score(y_test, y_pred)
plt.plot(fpr,tpr,label="data, auc="+str(roc_auc_scr))
plt.legend(loc=4)
plt.show()
# + [markdown] id="s9N2xNcts-Eg"
# Curvas da K fold cross-validation com os dados de teste
# + id="NUMi0WOV5UAM" colab={"base_uri": "https://localhost:8080/", "height": 910} outputId="6a555738-96f1-4a18-d638-7fbf16a5202b"
plot_Kfold_cross_validation_curves(clf, X_test, y_test)
# + [markdown] id="NTCGM1oI80Z0"
# É uma pena para a execução do trabalho com base nesse tema não ter o dataset pré processamento para identificar um possível overfit nos modelos criados, haja vista a grande acurácia apresentada. Apesar da incógnita perando as features dos dados pré processamento podemos concluir que o objetivo foi alcançado com sucesso. Podemos fazer esta afirmação olhando para as taxas de falso positivo e falso negativo já que os dados em si apresentam em ampla maioria registros de transações não fraudulentas, logo o peso de marcar uma transação como fraudulenda ou não sem que a mesma tenha realmente esta classificação adquire um peso maior.
| cc_fraud_detector.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="tDnwEv8FtJm7"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="JlknJBWQtKkI"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="60RdWsg1tETW"
# # Custom layers
# + [markdown] colab_type="text" id="BcJg7Enms86w"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/eager/custom_layers"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/eager/custom_layers.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/eager/custom_layers.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="UEu3q4jmpKVT"
# We recommend using `tf.keras` as a high-level API for building neural networks. That said, most TensorFlow APIs are usable with eager execution.
#
# + colab={} colab_type="code" id="pwX7Fii1rwsJ"
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
tf.enable_eager_execution()
# + [markdown] colab_type="text" id="zSFfVVjkrrsI"
# ## Layers: common sets of useful operations
#
# Most of the time when writing code for machine learning models you want to operate at a higher level of abstraction than individual operations and manipulation of individual variables.
#
# Many machine learning models are expressible as the composition and stacking of relatively simple layers, and TensorFlow provides both a set of many common layers as a well as easy ways for you to write your own application-specific layers either from scratch or as the composition of existing layers.
#
# TensorFlow includes the full [Keras](https://keras.io) API in the tf.keras package, and the Keras layers are very useful when building your own models.
#
# + colab={} colab_type="code" id="8PyXlPl-4TzQ"
# In the tf.keras.layers package, layers are objects. To construct a layer,
# simply construct the object. Most layers take as a first argument the number
# of output dimensions / channels.
layer = tf.keras.layers.Dense(100)
# The number of input dimensions is often unnecessary, as it can be inferred
# the first time the layer is used, but it can be provided if you want to
# specify it manually, which is useful in some complex models.
layer = tf.keras.layers.Dense(10, input_shape=(None, 5))
# + [markdown] colab_type="text" id="Fn69xxPO5Psr"
# The full list of pre-existing layers can be seen in [the documentation](https://www.tensorflow.org/api_docs/python/tf/keras/layers). It includes Dense (a fully-connected layer),
# Conv2D, LSTM, BatchNormalization, Dropout, and many others.
# + colab={} colab_type="code" id="E3XKNknP5Mhb"
# To use a layer, simply call it.
layer(tf.zeros([10, 5]))
# + colab={} colab_type="code" id="Wt_Nsv-L5t2s"
# Layers have many useful methods. For example, you can inspect all variables
# in a layer using `layer.variables` and trainable variables using
# `layer.trainable_variables`. In this case a fully-connected layer
# will have variables for weights and biases.
layer.variables
# + colab={} colab_type="code" id="6ilvKjz8_4MQ"
# The variables are also accessible through nice accessors
layer.kernel, layer.bias
# + [markdown] colab_type="text" id="O0kDbE54-5VS"
# ## Implementing custom layers
# The best way to implement your own layer is extending the tf.keras.Layer class and implementing:
# * `__init__` , where you can do all input-independent initialization
# * `build`, where you know the shapes of the input tensors and can do the rest of the initialization
# * `call`, where you do the forward computation
#
# Note that you don't have to wait until `build` is called to create your variables, you can also create them in `__init__`. However, the advantage of creating them in `build` is that it enables late variable creation based on the shape of the inputs the layer will operate on. On the other hand, creating variables in `__init__` would mean that shapes required to create the variables will need to be explicitly specified.
# + colab={} colab_type="code" id="5Byl3n1k5kIy"
class MyDenseLayer(tf.keras.layers.Layer):
def __init__(self, num_outputs):
super(MyDenseLayer, self).__init__()
self.num_outputs = num_outputs
def build(self, input_shape):
self.kernel = self.add_variable("kernel",
shape=[int(input_shape[-1]),
self.num_outputs])
def call(self, input):
return tf.matmul(input, self.kernel)
layer = MyDenseLayer(10)
print(layer(tf.zeros([10, 5])))
print(layer.trainable_variables)
# + [markdown] colab_type="text" id="tk8E2vY0-z4Z"
# Overall code is easier to read and maintain if it uses standard layers whenever possible, as other readers will be familiar with the behavior of standard layers. If you want to use a layer which is not present in tf.keras.layers or tf.contrib.layers, consider filing a [github issue](http://github.com/tensorflow/tensorflow/issues/new) or, even better, sending us a pull request!
# + [markdown] colab_type="text" id="Qhg4KlbKrs3G"
# ## Models: composing layers
#
# Many interesting layer-like things in machine learning models are implemented by composing existing layers. For example, each residual block in a resnet is a composition of convolutions, batch normalizations, and a shortcut.
#
# The main class used when creating a layer-like thing which contains other layers is tf.keras.Model. Implementing one is done by inheriting from tf.keras.Model.
# + colab={} colab_type="code" id="N30DTXiRASlb"
class ResnetIdentityBlock(tf.keras.Model):
def __init__(self, kernel_size, filters):
super(ResnetIdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
self.conv2a = tf.keras.layers.Conv2D(filters1, (1, 1))
self.bn2a = tf.keras.layers.BatchNormalization()
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization()
self.conv2c = tf.keras.layers.Conv2D(filters3, (1, 1))
self.bn2c = tf.keras.layers.BatchNormalization()
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
block = ResnetIdentityBlock(1, [1, 2, 3])
print(block(tf.zeros([1, 2, 3, 3])))
print([x.name for x in block.trainable_variables])
# + [markdown] colab_type="text" id="wYfucVw65PMj"
# Much of the time, however, models which compose many layers simply call one layer after the other. This can be done in very little code using tf.keras.Sequential
# + colab={} colab_type="code" id="L9frk7Ur4uvJ"
my_seq = tf.keras.Sequential([tf.keras.layers.Conv2D(1, (1, 1)),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(2, 1,
padding='same'),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.Conv2D(3, (1, 1)),
tf.keras.layers.BatchNormalization()])
my_seq(tf.zeros([1, 2, 3, 3]))
# + [markdown] colab_type="text" id="c5YwYcnuK-wc"
# # Next steps
#
# Now you can go back to the previous notebook and adapt the linear regression example to use layers and models to be better structured.
| site/en/tutorials/eager/custom_layers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Comparison to iPRG2012 consensus
# +
import os
import sys
src_dir = os.path.abspath('../src')
if src_dir not in sys.path:
sys.path.append(src_dir)
# +
# %matplotlib inline
import math
import Levenshtein
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import squarify
from matplotlib_venn import venn2, venn2_circles
from ann_solo import reader
# -
# plot styling
plt.style.use(['seaborn-white', 'seaborn-paper'])
plt.rc('font', family='serif')
sns.set_palette('Set1')
sns.set_context('paper', font_scale=1.) # two-column figure
# +
psms_consensus = pd.read_csv(
'../../data/external/iprg2012/iprg2012ConsensusSpectrumIDcomparison.tsv',
sep='\t', header=0, skipfooter=4, engine='python').rename(
columns={'bestSequence': 'sequence_consensus',
'Precursor_z': 'charge_consensus'})
# get the same PSM identifiers
psms_consensus = psms_consensus.set_index(psms_consensus['Index1_de'] - 1)
psms_annsolo = reader.read_mztab_ssms(
'../../data/processed/iprg2012/brute_force/bf_oms_shifted.mztab')
# +
psms_annsolo['mass_diff'] = (
(psms_annsolo['exp_mass_to_charge'] - psms_annsolo['calc_mass_to_charge'])
* psms_annsolo['charge'])
psms = psms_annsolo[['sequence', 'charge', 'search_engine_score[1]', 'mass_diff']].join(
psms_consensus[['sequence_consensus', 'charge_consensus']], how='outer')
# don't disambiguate between I and L
for label in ['sequence', 'sequence_consensus']:
psms[label] = psms[label].str.replace('I', 'L')
# remove SpectraST modification masses
psms['sequence'] = psms['sequence'].str.replace(r'n?\[\d+\]', '')
# +
def edit_distance(seq1, seq2, normed=False):
if not pd.isnull(seq1) and not pd.isnull(seq2):
dist = Levenshtein.distance(seq1, seq2)
if normed:
dist /= max(len(seq1), len(seq2))
return dist
else:
return math.inf
psms['edit_dist'] = psms.apply(
lambda psm: edit_distance(psm['sequence'], psm['sequence_consensus']),
axis=1)
psms['edit_dist_norm'] = psms.apply(
lambda psm: edit_distance(psm['sequence'], psm['sequence_consensus'], True),
axis=1)
# -
# get unique keys for spectrum ID - peptide assignment combinations
set_consensus = set(psms.loc[
psms['sequence_consensus'].notnull(), 'sequence_consensus'].items())
set_annsolo = set(psms.loc[psms['sequence'].notnull(), 'sequence'].items())
# +
width = 7
height = width / 1.618 # golden ratio
fig, ax = plt.subplots(figsize=(width, height))
v = venn2([set_annsolo, set_consensus],
set_labels=['ANN-SoLo', 'iPRG2012 consensus'],
set_colors=[next(ax._get_lines.prop_cycler)['color'],
next(ax._get_lines.prop_cycler)['color']],
alpha=1., ax=ax)
c = venn2_circles([set_annsolo, set_consensus], linewidth=1.0, ax=ax)
# plt.savefig('iprg2012_consensus_venn.pdf', dpi=300, bbox_inches='tight')
plt.show()
plt.close()
# -
psms_match = psms[psms['sequence_consensus'].notnull() &
psms['sequence'].notnull() &
(psms['sequence_consensus'] == psms['sequence'])]
psms_different = psms[psms['sequence_consensus'].notnull() &
psms['sequence'].notnull() &
(psms['sequence_consensus'] != psms['sequence'])]
psms_unique_consensus = (psms[psms['sequence_consensus'].notnull()]
.drop(psms_match.index, errors='ignore')
.drop(psms_different.index, errors='ignore'))
psms_unique_annsolo = (psms[psms['sequence'].notnull()]
.drop(psms_match.index, errors='ignore')
.drop(psms_different.index, errors='ignore'))
print(f'# identical PSMs: {len(psms_match)}\n'
f'# conflicting PSMs: {len(psms_different)}\n'
f'# unique PSMs ANN-SoLo: {len(psms_unique_annsolo)}\n'
f'# unique PSMs iPRG2012 consensus: {len(psms_unique_consensus)}')
# +
width = 7
height = width / 1.618 # golden ratio
fig, ax = plt.subplots(figsize=(width, height))
# ax.hist(psms_different['edit_dist_norm'], bins=np.arange(0, 1.05, 0.05))
ax.hist(psms_different['edit_dist'], bins=np.arange(0, 25, 1))
ax.set_xlabel('Edit distance')
ax.set_ylabel('Number of conflicting SSMs')
sns.despine()
plt.savefig('iprg2012_consensus_distance.pdf', dpi=300, bbox_inches='tight')
plt.show()
plt.close()
# -
threshold_different = 3
num_high_sim = len(psms_different[psms_different['edit_dist'] <= threshold_different])
num_low_sim = len(psms_different[psms_different['edit_dist'] > threshold_different])
print(f'# conflicting PSMs with high sequence similarity: {num_high_sim}\n'
f'# conflicting PSMs with low sequence similarity: {num_low_sim}\n'
f' (sequence similarity threshold = {threshold_different} amino acids)')
# +
width = 7
height = width / 1.618 # golden ratio
fig, ax = plt.subplots(figsize=(width, height))
squares = {'identical': len(psms_match),
'conflicting\nsimilar': num_high_sim,
'conflicting\ndifferent': num_low_sim,
'unique ANN-SoLo': len(psms_unique_annsolo),
'unique iPRG2012 consensus': len(psms_unique_consensus)}
squares = {f'{key}\n({value} SSMs)': value
for (key, value) in squares.items()}
colors = sns.color_palette('Set1', len(squares))
squarify.plot(sizes=squares.values(), color=colors, label=squares.keys(),
ax=ax, alpha=0.8, linewidth=1, edgecolor='black')
ax.set_xticks([])
ax.set_yticks([])
plt.savefig('iprg2012_consensus_treemap.pdf', dpi=300, bbox_inches='tight')
plt.show()
plt.close()
# -
peptides_library = set()
filename_pepidx = '../../data/interim/iprg2012/human_yeast_targetdecoy.pepidx'
with open(filename_pepidx) as f_in:
for line in f_in:
if not line.startswith('#'):
peptide, info, _ = line.split()
charge = info.split('|')[0]
peptides_library.add((peptide, float(charge)))
psms_notfound = psms_different # pd.concat([psms_different, psms_unique_consensus])
peptides_notfound = set(zip(psms_notfound['sequence_consensus'],
psms_notfound['charge_consensus']))
peptides_notfound_library = peptides_notfound & peptides_library
ions_consensus = psms['sequence_consensus'] + psms['charge_consensus'].map(str)
psms_notfound_library = (psms[ions_consensus.isin(
[f'{seq}{charge}' for seq, charge in peptides_notfound_library])]
.merge(psms_notfound))
peptides_notfound_notlibrary = peptides_notfound - peptides_library
psms_notfound_notlibrary = (psms[ions_consensus.isin(
[f'{seq}{charge}' for seq, charge in peptides_notfound_notlibrary])]
.merge(psms_notfound))
print(f'# conflicting iPRG consensus identification ions not found IN library: '
f'{len(peptides_notfound_library)} '
f'({len(psms_notfound_library)} spectra)\n'
f'# conflicting iPRG consensus identification ions not found NOT IN library: '
f'{len(peptides_notfound_notlibrary)} '
f'({len(psms_notfound_notlibrary)} spectra)\n')
| notebooks/iprg2012_consensus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import re
import nltk
from sklearn.datasets import load_files
from nltk.corpus import stopwords
# #!conda install -y imbalanced-learn
#Are we analysing the larger 'big' corpus of reviews or the cleaner, 5-core set?
dobig=0
#Cell #2 or Cell#3 only will work, so this notebook is safe to run every cell in order though some will error out depending on what value is set here
# +
#Review Large corpus
#cause this cell to fail if not dobig
#create 1 file for every doc for some of the loading functions to work
if not dobig:
raise SystemExit("Cell not required if dobig=", dobig)
import gzip
import simplejson
def parse(filename):
f = open(filename, 'r')
entry = {}
for l in f:
l = l.strip()
colonPos = l.find(':')
if colonPos == -1:
yield entry
entry = {}
continue
eName = l[:colonPos]
rest = l[colonPos+2:]
entry[eName] = rest
yield entry
#Build the amazon json data for instruments into the format for sklearn.datasets.load_files
import json
import sys, os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
datadir="C:\\Users\\DellAdmin\\Documents\\TUD course\\Data Mining\\Assignment2\\Text Mining\\databig"
all_reviews=[]
path=os.path.join(datadir, "out")
if not os.path.exists(path):
os.mkdir(path)
for rate in ['1.0','2.0', '3.0', '4.0', '5.0']:
path2 = os.path.join(datadir, "out", rate)
if not os.path.exists(path2):
os.mkdir(path2)
revcount=0
df = pd.DataFrame({'reviewNo':[], 'rating': [], 'reviewText':[]})
all_reviews=[]
for rev in parse('C:\\Users\\DellAdmin\\Documents\\TUD course\\Data Mining\\Assignment2\\Text Mining\\data\\Musical_Instruments.txt'):
# print (simplejson.dumps(e))
try:
rate=rev['review/score']
text=rev['review/text']
except:
pass
df.loc[len(df.index)] = [revcount, float(rate), text]
revcount+=1
fname= '.'.join([ str(revcount),'txt'] )
all_reviews.append(rev)
path2 = os.path.join(datadir, "out",rate, fname )
if not os.path.exists(path2):
with open(path2, "w") as f:
f.write(text)
# +
#Build the amazon json data for instruments into the format for sklearn.datasets.load_files
#Review Small corpus
#create 1 file for every doc for some of the loading functions to work
#cause this cell to fail if dobig
if dobig:
raise SystemExit("Cell not required if dobig=", dobig)
import json
import sys, os
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
datadir="C:\\Users\\DellAdmin\\Documents\\TUD course\\Data Mining\\Assignment2\\Text Mining\\data"
f=open("C:\\Users\\DellAdmin\\Documents\\TUD course\\Data Mining\\Assignment2\\Text Mining\\Musical_Instruments_5.json", encoding='utf-8')
reviews=f.readlines()
f.close()
all_reviews=[]
path=os.path.join(datadir, "out")
if not os.path.exists(path):
os.mkdir(path)
for rate in ['1.0','2.0', '3.0', '4.0', '5.0']:
path2 = os.path.join(datadir, "out", rate)
if not os.path.exists(path2):
os.mkdir(path2)
revcount=0
df = pd.DataFrame({'reviewNo':[], 'rating': [], 'reviewText':[]})
for revstr in reviews:
rev=json.loads(revstr)
all_reviews.append(rev)
rate=str(rev['overall'])
#remove blank reviews (shortest seen is just "excellent" )
if len(rev['reviewText']) <3:
print (f"no review data for {revcount} {rate}")
print (rev['reviewText'])
continue
df.loc[len(df.index)] = [revcount, float(rate), rev['reviewText']]
revcount+=1
fname= '.'.join([ str(revcount),'txt'] )
path2 = os.path.join(datadir, "out",rate, fname )
if not os.path.exists(path2):
with open(path2, "w") as f:
f.write(rev['reviewText'])
# -
print("number of reviews is:", len(all_reviews))
sns.distplot(df['rating'], kde=False, bins=5)
print(df.skew(axis=0))
plt.savefig('rating_chart.png')
df.describe()
#download and print the stop words for the English language
from nltk.corpus import stopwords
#nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
# +
from wordcloud import WordCloud
import matplotlib.pyplot as plt
#wordcloud for bad rating (1)
text="\n".join(df.loc[df['rating'] ==1.0]['reviewText'])
#tokenise the data set
from nltk.tokenize import sent_tokenize, word_tokenize
#df[df.]
words = word_tokenize(text)
#print(words)
# removes punctuation and numbers
wordsFiltered = [word.lower() for word in words if word.isalpha()]
#print(wordsFiltered)
# remove stop words from tokenised data set
filtered_words = [word for word in wordsFiltered if word not in stopwords.words('english')]
#print(filtered_words)
wc = WordCloud(max_words=100, margin=10, background_color='white',
scale=3, relative_scaling = 0.5, width=500, height=400,
random_state=1).generate(' '.join(filtered_words))
plt.figure(figsize=(20,10))
plt.imshow(wc)
plt.axis("off")
plt.show()
wc.to_file("wordcloud_bad.png")
#wordcloud for good rating (5)
text="\n".join(df.loc[df['rating'] ==5.0]['reviewText'])
#tokenise the data set
from nltk.tokenize import sent_tokenize, word_tokenize
#df[df.]
words = word_tokenize(text)
#print(words)
# removes punctuation and numbers
wordsFiltered = [word.lower() for word in words if word.isalpha()]
#print(wordsFiltered)
# remove stop words from tokenised data set
filtered_words = [word for word in wordsFiltered if word not in stopwords.words('english')]
#print(filtered_words)
wc = WordCloud(max_words=100, margin=10, background_color='white',
scale=3, relative_scaling = 0.5, width=500, height=400,
random_state=1).generate(' '.join(filtered_words))
plt.figure(figsize=(20,10))
plt.imshow(wc)
plt.axis("off")
plt.show()
wc.to_file("wordcloud_good.png")
# +
#This dataset will allow use to perform a type of Sentiment Analysis Classification
#source_file_dir = r"C:\Users\DellAdmin\Documents\TUD course\Data Mining\Assignment2\Text Mining\aclImdb\train"
source_file_dir = r"C:\Users\DellAdmin\Documents\TUD course\Data Mining\Assignment2\Text Mining\data\out"
if dobig:
source_file_dir = r"C:\Users\DellAdmin\Documents\TUD course\Data Mining\Assignment2\Text Mining\databig\out"
#The load_files function automatically divides the dataset into data and target sets.
#load_files will treat each folder inside the folder as one category
# and all the documents inside that folder will be assigned its corresponding category.
movie_data = load_files(source_file_dir)
X, y = movie_data.data, movie_data.target
#load_files function loads the data from both "neg" and "pos" folders into the X variable,
# while the target categories are stored in y
# +
documents = []
from nltk.stem import WordNetLemmatizer
stemmer = WordNetLemmatizer()
#transform list of documents (X) to be more compute friendly and combine semantically identical potantial words
#Array output is of same length and order as X
#y contains the categories
for sen in range(0, len(X)):
# Remove all the special characters, numbers, punctuation
document = re.sub(r'[\d\W]+', ' ', str(X[sen]))
# remove all single characters
document = re.sub(r'\s+[a-zA-Z]\s+', ' ', document)
# Remove single characters from the start of document with a space
document = re.sub(r'^[a-zA-Z]\s+', ' ', document)
# Substituting multiple spaces with single space
document = re.sub(r'\s+', ' ', document, flags=re.I)
# Removing prefixed 'b'
document = re.sub(r'^b\s+', '', document)
# Converting to Lowercase
document = document.lower()
# Lemmatization
document = document.split()
document = [stemmer.lemmatize(word) for word in document]
document = ' '.join(document)
documents.append(document)
# -
# Function to run generic classifier - for models which use a classifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from numpy import sqrt
import sklearn
import sklearn.naive_bayes
def classify(classifier, X_train, X_test, y_train, y_test):
classifier.fit(X_train, y_train)
#Now label/classify the Test DS
y_pred = classifier.predict(X_test)
#Evaluate the model
print("Accuracy:", accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test,y_pred))
l=[ x*x for x in y_pred-y_test]
tot=0
for i in l:
tot+=i
print( "average rms error is: %f" % sqrt(tot/len(l)))
return classifier
# +
# Cell performs the feature selection (3000 is best for small data set, 1500 for medium due to memory constraints)
from sklearn.feature_extraction.text import CountVectorizer
#experiment with a number of different models by commenting/uncommentig the relevant here
#extract vocab, any workds occirng in 5 or more reviews, but less than 70%. Extract N most frequent words
#vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7,stop_words=stopwords.words('english'),
#vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7, ngram_range=(1, 1), stop_words=stopwords.words('english'),
#vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7, ngram_range=(1, 4),stop_words=[],
#vectorizer = CountVectorizer(max_features=500, min_df=5, max_df=0.7, ngram_range=(1, 2),stop_words=[],
#vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7,stop_words=[],
#vectorizer = CountVectorizer(max_features=2000, min_df=5, max_df=0.7, ngram_range=(1, 2),stop_words=[],
#vectorizer = CountVectorizer(max_features=8192, min_df=5, max_df=0.7, ngram_range=(1, 2),stop_words=[],
#vectorizer = CountVectorizer(max_features=200, min_df=5, max_df=0.7, ngram_range=(1, 2),stop_words=[],
#vectorizer = CountVectorizer(max_features=8192, min_df=5, max_df=0.7, ngram_range=(1, 2),stop_words=[],
#vectorizer = CountVectorizer(max_features=3000, min_df=5, max_df=0.7, ngram_range=(1, 1),stop_words=[],
#vectorizer = CountVectorizer(max_features=100, min_df=5, max_df=0.7, stop_words=stopwords.words('english') ,
#vectorizer = CountVectorizer(max_features=200, min_df=5, max_df=0.7, stop_words=stopwords.words('english') ,
#vectorizer = CountVectorizer(max_features=1000, min_df=5, max_df=0.7, ngram_range=(1, 2),#stop_words=stopwords.words('english') ,
vectorizer = CountVectorizer(max_features=1500, min_df=5, max_df=0.7, ngram_range=(1, 2),stop_words=[],
strip_accents='unicode'
)
X = vectorizer.fit_transform(documents).toarray()
from sklearn.feature_extraction.text import TfidfTransformer
tfidfconverter = TfidfTransformer()
X = tfidfconverter.fit_transform(X).toarray()
# -
#quick plot of values as n increases, manually copied from above runs on small data set
quickResults= pd.DataFrame({'features':
[200, 500, 1000, 1500, 2000, 3000, 5000, 8192], 'RMS':
[1.63, 1.44, 1.30, 1.20, 1.12, 1.02, 0.94, 0.88 ]})
quickResults.plot(x="features", y="RMS")
# +
#split into train and test. Oversample reviews scores with fewer examples
from sklearn.model_selection import train_test_split
# #!conda install -y imbalanced-learn
from imblearn.over_sampling import RandomOverSampler
X_train_in, X_test, y_train_in, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
#Train DS = 70%
#Test DS = 30%
# define oversampling strategy
oversample = RandomOverSampler(sampling_strategy='not majority', random_state=2)
# fit and apply the transform
X_train, y_train = oversample.fit_resample(X_train_in,y_train_in)
# +
#Fits number of features and some of the transforms such as stop word removal and ngrams are all evaluated with NB model
c=sklearn.naive_bayes.MultinomialNB()
#with small numbers of features (200 for example, ngrams hinder the selection) as does leaving stopwords in
#with large numbers of features, ngrams and stopwords being left in helps
c=classify(c , X_train, X_test, y_train, y_test)
features=abs(c.coef_[0])+abs(c.coef_[1])+abs(c.coef_[2])+abs(c.coef_[3])+abs(c.coef_[4])
pd.Series(features, index=vectorizer.get_feature_names()).nlargest(20).plot(kind='barh')
# +
#First advanced model SVC (too slow with medium data set)
from sklearn import svm
if not dobig:
svm = svm.SVC(gamma=0.001, C=100., kernel = 'linear')
c=classify(svm , X_train, X_test, y_train, y_test)
# +
#Import Random Forest Model
#Use RandomForest algorithm to create a model
#n_estimators = number of trees in the Forest
from sklearn.ensemble import RandomForestClassifier
classifier_rf = RandomForestClassifier(n_estimators=40, random_state=0)
classifier_rf.fit(X_train, y_train)
# -
from sklearn.naive_bayes import GaussianNB
classifier_nb = GaussianNB()
classifier_nb.fit(X_train, y_train)
classifier_nb.predict(X_test)
# +
#Now label/classify the Test DS
y_pred = classifier_nb.predict(X_test)
#Evaluate the model
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
print("Accuracy:", accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
from numpy import sqrt
l=[ x*x for x in y_pred-y_test]
tot=0
for i in l:
tot+=i
print( "average rms error is: %f" % sqrt(tot/len(l)))
# +
#Cant do multi-class ROC
#from sklearn.metrics import roc_curve
#y_score=classifier_nb.predict_proba(X_test)
#print(y_score[0:10])##
#
#for i in range(0,4):
# y_true=y_test
# roc_curve(y_true, y_score)
# print(y_true[0:10])
# +
#Now label/classify the Test DS
y_pred = classifier_rf.predict(X_test)
#Evaluate the model
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
print("Accuracy:", accuracy_score(y_test, y_pred))
print(confusion_matrix(y_test,y_pred))
from numpy import sqrt
l=[ x*x for x in y_pred-y_test]
tot=0
for i in l:
tot+=i
print( "average rms error is: %f" % sqrt(tot/len(l)))
# -
len(X_train)
classify(classifier_nb, X_train, X_test, y_train, y_test)
from sklearn.neighbors import KNeighborsClassifier
neigh = KNeighborsClassifier()
classify(neigh, X_train, X_test, y_train, y_test)
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier(random_state=0)
classify(clf, X_train, X_test, y_train, y_test)
sklearn.__version__
# +
c=sklearn.naive_bayes.MultinomialNB()
#dont remove stop words
#average rms error is: 1.225739
#remopving stop words
#average rms error is: 1.236424
c=classify(c , X_train, X_test, y_train, y_test)
# -
c=sklearn.linear_model.LogisticRegression(max_iter=500)
c=classify(c , X_train, X_test, y_train, y_test)
| textclassification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # TLT Classification example usecase
#
# #### This notebook shows an example use case for classification using the Transfer Learning Toolkit. **_It is not optimized for accuracy._**
#
# 0. [Set up env variables](#head-0)
# 1. [Prepare dataset and pretrained model](#head-1)<br>
# 1.1 [Split the dataset into train/test/val](#head-1-1)<br>
# 1.2 [Download pre-trained model](#head-1-2)<br>
# 2. [Provide training specfication](#head-2)
# 3. [Run TLT training](#head-3)
# 4. [Evaluate trained models](#head-4)
# 5. [Prune trained models](#head-5)
# 6. [Retrain pruned models](#head-6)
# 7. [Testing the model](#head-7)
# 8. [Visualize inferences](#head-8)
# 0. [Export and Deploy!](#head-9)
# ## 0. Setup env variables <a class="anchor" id="head-0"></a>
# Please replace the **$API_KEY** with your api key on **ngc.nvidia.com**
# %env USER_EXPERIMENT_DIR=/workspace/tlt-experiments
# %env DATA_DOWNLOAD_DIR=/workspace/tlt-experiments/data
# %env SPECS_DIR=/workspace/examples/specs
# %env API_KEY=$API_KEY
# ## 1. Prepare datasets and pre-trained model <a class="anchor" id="head-1"></a>
# We will be using the pascal VOC dataset for the tutorial. To find more details please visit
# http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit. Please download the dataset present at http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar to $DATA_DOWNLOAD_DIR.
# Check that file is present
import os
DATA_DIR = os.environ.get('DATA_DOWNLOAD_DIR')
if not os.path.isfile(os.path.join(DATA_DIR , 'VOCtrainval_11-May-2012.tar')):
print('tar file for dataset not found. Please download.')
else:
print('Found dataset.')
# unpack
# !tar -xvf $DATA_DOWNLOAD_DIR/VOCtrainval_11-May-2012.tar -C $DATA_DOWNLOAD_DIR
# verify
# !ls $DATA_DOWNLOAD_DIR/VOCdevkit/VOC2012
# ### 1.1 Split the dataset into train/val/test <a class="anchor" id="head-1-1"></a>
# Pascal VOC Dataset is converted to our format (for classification) and then to train/val/test in the next two blocks.
# +
from os.path import join as join_path
import os
import glob
import re
import shutil
DATA_DIR=os.environ.get('DATA_DOWNLOAD_DIR')
source_dir = join_path(DATA_DIR, "VOCdevkit/VOC2012")
target_dir = join_path(DATA_DIR, "formatted")
suffix = '_trainval.txt'
classes_dir = join_path(source_dir, "ImageSets", "Main")
images_dir = join_path(source_dir, "JPEGImages")
classes_files = glob.glob(classes_dir+"/*"+suffix)
for file in classes_files:
# get the filename and make output class folder
classname = os.path.basename(file)
if classname.endswith(suffix):
classname = classname[:-len(suffix)]
target_dir_path = join_path(target_dir, classname)
if not os.path.exists(target_dir_path):
os.makedirs(target_dir_path)
else:
continue
print(classname)
with open(file) as f:
content = f.readlines()
for line in content:
tokens = re.split('\s+', line)
if tokens[1] == '1':
# copy this image into target dir_path
target_file_path = join_path(target_dir_path, tokens[0] + '.jpg')
src_file_path = join_path(images_dir, tokens[0] + '.jpg')
shutil.copyfile(src_file_path, target_file_path)
# +
import os
import glob
import shutil
from random import shuffle
DATA_DIR=os.environ.get('DATA_DOWNLOAD_DIR')
SOURCE_DIR=join_path(DATA_DIR, 'formatted')
TARGET_DIR=os.path.join(DATA_DIR,'split')
# list dir
dir_list = os.walk(SOURCE_DIR).next()[1]
# for each dir, create a new dir in split
for dir_i in dir_list:
# print("Splitting {}".format(dir_i))
newdir_train = os.path.join(TARGET_DIR, 'train', dir_i)
newdir_val = os.path.join(TARGET_DIR, 'val', dir_i)
newdir_test = os.path.join(TARGET_DIR, 'test', dir_i)
if not os.path.exists(newdir_train):
os.makedirs(newdir_train)
if not os.path.exists(newdir_val):
os.makedirs(newdir_val)
if not os.path.exists(newdir_test):
os.makedirs(newdir_test)
img_list = glob.glob(os.path.join(SOURCE_DIR, dir_i, '*.jpg'))
# shuffle data
shuffle(img_list)
for j in range(int(len(img_list)*0.7)):
shutil.copy2(img_list[j], os.path.join(TARGET_DIR, 'train', dir_i))
for j in range(int(len(img_list)*0.7), int(len(img_list)*0.8)):
shutil.copy2(img_list[j], os.path.join(TARGET_DIR, 'val', dir_i))
for j in range(int(len(img_list)*0.8), len(img_list)):
shutil.copy2(img_list[j], os.path.join(TARGET_DIR, 'test', dir_i))
print('Done splitting dataset.')
# -
# !ls $DATA_DOWNLOAD_DIR/split/test/cat
# ### 1.2 Download pretrained models <a class="anchor" id="head-1-2"></a>
# Print the list of available models. Find your **ORG** and **TEAM** on ngc.nvidia.com and replace the **-o** and **-t** arguments.
# !tlt-pull -k $API_KEY -lm -o nvtltea -t iva
# Download the resnet18 classification model.
# !tlt-pull -d $USER_EXPERIMENT_DIR -k $API_KEY -m tlt_iva_classification_resnet18 -v 1 -o nvtltea -t iva
print("Check that model is downloaded into dir.")
# !ls -l $USER_EXPERIMENT_DIR
# ## 2. Provide training specfication <a class="anchor" id="head-2"></a>
# * Training dataset
# * Validation dataset
# * Pre-trained models
# * Other training (hyper-)parameters such as batch size, number of epochs, learning rate etc.
# !cat $SPECS_DIR/classification_spec.cfg
# ## 3. Run TLT training <a class="anchor" id="head-3"></a>
# * Provide the sample spec file and the output directory location for models
print('Create an output dir')
# !mkdir $USER_EXPERIMENT_DIR/output
print('Model checkpoints and logs:')
print('---------------------')
# !ls -l $USER_EXPERIMENT_DIR/output
# ### Please change the **train_dataset_path, val_dataset_path, pretrained_model_path** in the spec file below if these values are different.
# +
print("Check spec file")
# !cat $SPECS_DIR/classification_spec.cfg
# -
# !tlt-train classification -e $SPECS_DIR/classification_spec.cfg -r $USER_EXPERIMENT_DIR/output -k $API_KEY
# ## 4. Evaluate trained models <a class="anchor" id="head-4"></a>
# !tlt-evaluate classification -d $DATA_DOWNLOAD_DIR/split/test \
# -pm $USER_EXPERIMENT_DIR/output/weights/resnet_001.tlt \
# -b 32 -k $API_KEY
# ## 5. Prune trained models <a class="anchor" id="head-5"></a>
# * Specify pre-trained model
# * Equalization criterion
# * Threshold for pruning
# * Exclude prediction layer that you don't want pruned (e.g. predictions)
# !tlt-prune -pm $USER_EXPERIMENT_DIR/output/weights/resnet_001.tlt \
# -o $USER_EXPERIMENT_DIR/output/resnet_001_pruned \
# -eq union \
# -pth 0.7 -k $API_KEY
print('Pruned model:')
print('------------')
# !ls -1 $USER_EXPERIMENT_DIR/output/resnet_001_pruned
# ## 6. Retrain pruned models <a class="anchor" id="head-6"></a>
# * Model needs to be re-trained to bring back accuracy after pruning
# * Specify re-training specification
# ### Please change the **train_dataset_path, val_dataset_path, pretrained_model_path** in the spec file below if these values are different.
# !cat $SPECS_DIR/classification_retrain_spec.cfg
# !tlt-train classification -e $SPECS_DIR/classification_retrain_spec.cfg -r $USER_EXPERIMENT_DIR/output_retrain -k $API_KEY
# ## 7. Testing the model! <a class="anchor" id="head-7"></a>
# !tlt-evaluate classification -d $DATA_DOWNLOAD_DIR/split/test \
# -pm $USER_EXPERIMENT_DIR/output_retrain/weights/resnet_001.tlt \
# -b 32 -k $API_KEY
# ## 8. Visualize Inferences <a class="anchor" id="head-8"></a>
# To see the output results of our model on test images, we can use the tlt-infer tool. Note that using models trained for higher epochs will result in better results. We'll run inference on a directory of images.
# !tlt-infer classification -m $USER_EXPERIMENT_DIR/output_retrain/weights/resnet_001.tlt \
# -k $API_KEY -b 32 -d $DATA_DOWNLOAD_DIR/split/test/person \
# -cm $USER_EXPERIMENT_DIR/output_retrain/classmap.json
# Optionally, you can also run inference on a single image. Uncomment the code below for an example.
# +
# #!tlt-infer classification -m $USER_EXPERIMENT_DIR/output_retrain/weights/resnet_001.tlt \
# # -k $API_KEY -b 32 -i $DATA_DOWNLOAD_DIR/split/test/person/2008_000032.jpg \
# # -cm $USER_EXPERIMENT_DIR/output_retrain/classmap.json
# -
# As explained in Getting Started Guide, this outputs a results.csv file in the same directory. We can use a simple python program to see the visualize the output of csv file.
# +
import matplotlib.pyplot as plt
from PIL import Image
import os
import csv
from math import ceil
DATA_DIR = os.environ.get('DATA_DOWNLOAD_DIR')
csv_path = os.path.join(DATA_DIR, 'split', 'test', 'person', 'result.csv')
results = []
with open(csv_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
results.append((row[1], row[2]))
w,h = 200,200
fig = plt.figure(figsize=(30,30))
columns = 5
rows = 1
for i in range(1, columns*rows + 1):
ax = fig.add_subplot(rows, columns,i)
img = Image.open(results[i][0])
img = img.resize((w,h), Image.ANTIALIAS)
plt.imshow(img)
ax.set_title(results[i][1], fontsize=40)
# -
# ## 9. Export and Deploy! <a class="anchor" id="head-9"></a>
# !tlt-export $USER_EXPERIMENT_DIR/output_retrain/weights/resnet_001.tlt \
# --input_dim 3,224,224 \
# -o $USER_EXPERIMENT_DIR/export/final_model.uff \
# --enc_key $API_KEY \
# --outputs predictions/Softmax
print('Exported model:')
print('------------')
# !ls -lh $USER_EXPERIMENT_DIR/export/
| training/object_detection/examples/classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **TEXT MINING 4 - FILTERING AND STOPWORD REMOVAL**
# Build Date | April, 7th 2020
# --- | ---
# Male Tutor | <NAME> & <NAME>
# Female | <NAME> & <NAME>
# # **Pengertian**
#
# Filtering adalah proses menyaring kata atau memilah kata yang tidak berguna dalam suatu dokumen atau teks. Kata-kata itu dihapus atau dibuang karena dinilai tidak memiliki arti dalam proses `Text Mining`. Misalnya :
#
# contoh |
# --- |
# kuliah daring adalah bentuk dari usaha pemerintah untuk meredam COVID-19 |
#
# Dalam proses filtering kita memerlukan `stopword` dimana stopword ini akan digunakan sebagai patokan kata mana saja yang harus dihilangkan dalam suatu kalimat atau dokumen. Misal kata yang perlu dihilangkan dalam suatu teks adalah kata penghubung 'di', 'ke', 'daripada', 'adalah', 'kepada', dsb. Kumpulan stopword ini juga sering di sebut dengan `wordlist`. Lalu bagaimana jika kalimat diatas jika di filter ? Kita akan membuang kata **adalah, dari, untuk**.
#
# contoh |
# --- |
# kuliah daring bentuk usaha pemerintah meredam COVID-19 |
#
# Selanjutnya kita akan mengimplementasikannya kedalam kodingan.
# ## **A. Filtering dengan `NLTK`**
#
# Langkah awalnya kita import terlebih dahulu library `nltk`. Ketikkan kode berikut :
# > `import nltk`
# Disini kita tetap harus melakukan `tokenizing` dan `case folding` agar kalimat kita memberikan akurasi yang baik. Untuk itu kita import fungsi `sent_tokenize` dan `word_tokenize` dari library `nltk`. Ketikkan kode berikut :
# > `from nltk.tokenize import sent_tokenize, word_tokenize`
# selanjutnya, kita import fungsi `stopword`. Ketikkan kode berikut :
# > `from nltk.corpus import stopwords`
# Kita masih menggunakan kalimat yang sama seperti di modul pertemuan ke 2. Yaitu **"Fakultas yang ada di Universitas Pakuan ada 6 diantaranya adalah Fakultas Teknik, MIPA, FISIB, FKIP, FH, dan Pasca-Sarjana"**. Yang akan dideklarasikan dalam variable **teks**. Ketikkan kode berikut :
# > `teks = "Fakultas yang ada di Universitas Pakuan ada 6 diantaranya adalah Fakultas Teknik, MIPA, FISIB, FKIP, FH, dan Pasca-Sarjana"`
# Disini kita akan menggunakan library `string` nah jangan lupa import library nya terlebiih dahulu.
# > `import string`
# Kita lakukan proses case folding terlebih dahulu dengan kode berikut dan disimpan dalam variable **proses_cf**.
# > `proses_cf = teks.translate(str.maketrans('','', string.punctuation)).lower()`
# Lalu kita lakukan tokenizing dan disimpan dalam variable **proses_token**. Ketikkan kode berikut :
# > `proses_token = nltk.tokenize.word_tokenize (proses_cf)`
# proses melihat list stopword yang ada di nltk, lalu set bahasanya di bahasa Indonesia. Ketikkan kode berikut :
# > `listStopwords = set(stopwords.words('Indonesian'))`
# Proses menghapus kata yaitu menggunakan argumen yang disimpan dalam variable **removed**. Lalu kita menggunakan perulangan `for`. Cara membacanya adalah *untuk t di variable proses_token, jika tidak ada teks didalam list stopword maka hapus dan gabungkan kembali*. Ketikkan kode berikut : ( **perhatikan indentasi** sperti contoh dibawah ini. Ingat Python itu `case sensitive` )
#
# > `removed = []`
# >`for t in proses_token:`
# >>> `if t not in listStopwords:`
# >>>> `removed.append(t)`
# Jika sudah, mari kita cetak hasilnya dengan mengetikkan kode berikut :
#
# > `print(removed)`
# ## **B. Filtering dengan `Sastrawi`**
#
# Kita telah berhasil melakukan proses filtering dengan `nltk`. Kali ini kita akan menggunakan library `Sastrawi` khusus bahasa Indonesia. Tentunya `wordlist` nya akan lebih banyak dan telah disesuaikan.
#
# ### **1. Melihat Daftar Wordlist**
#
# Nah, disini kita juga bisa menggunakan beberapa fungsi , misalnya kali ini kita akan menggunakan fungsi `StopWordRemover`. Ketikkan kode berikut untuk mengimport Sastrawi dan fungsinya :
# > `from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory`
# Kita akan menampung fungsi tersebut dalam variable **factory**. Ketikkan kode berikut :
# > `factory = StopWordRemoverFactory()`
# Lalu kita deklarasikan variable **daftar_stopword** untuk mendapatkan daftar stopword dari Sastrawi. Ketikkan kode berikut :
#
# > `daftar_stopword = factory.get_stop_words()`
# Jika sudah kita tampilkan daftarnya. Ketikkan kode berikut ini :
#
# > `print(daftar_stopword)`
# ### **2. Filtering Dengan Sastrawi**
#
# Kita awali dengan import library dari Sastrawi. Ketikkan kode berikut :
# > `from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory`
# Lakukan tokenizing menggunakan library `nltk`. Ketikkan kode berikut : ( **kita tidak perlu import library lagi karena sudah dilakukan diatas**)
# >`from nltk.tokenize import word_tokenize`
# Kita akan menampung fungsi tersebut dalam variable **factory**. Ketikkan kode berikut :
# > `factory = StopWordRemoverFactory()`
# Mendeklarasikan variable **stopword** untuk menampung fungsi pembuatan `stopword_remover`. Ketikkan kode berikut :
# > `stopword = factory.create_stop_word_remover()`
# Deklarasikan variable **kalimat** untuk menampung teks baru. Ketikkan kode berikut :
#
# > `kalimat = "Andi kerap melakukan transaksi rutin secara daring atau online. Menurut Andi belanja online lebih praktis & murah."`
# Lakukan proses case folding. Ketikkan kode berikut :
# > `kalimat_cf =kalimat.translate(str.maketrans('','',string.punctuation)).lower()`
# Proses melakukan stopword dengan mendeklarasikan variable **kalimat_sw**
# > `kalimat_sw = stopword.remove(kalimat_cf)`
#
# Jangan lupa lakukan tokenisasi. Ketikkan kode berikut :
#
# > `kalimat_token = nltk.tokenize.word_tokenize(kalimat_sw)`
# Cetak hasilnya. Ketik kode berikut :
# > `print(kalimat_token)`
# ### 3.Menambah Stopword atau WordList sendiri
#
# Adalakanya dalam riset kita membutuhkan wordlist sendiri, karena didalam library yang bersangkutan tidak ada. Maka kita bisa mendeklarasikannya.
# Kodingannya masih sama dengan yang diatas namun ada beberapa penambahan yaitu variable **more_stopword** misalnya. Mari lihat pada kodingan dibawah ini.
# +
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory, StopWordRemover, ArrayDictionary
from nltk.tokenize import word_tokenize
stop_factory = StopWordRemoverFactory().get_stop_words()
# -
# deklarasikan variable **more_stopword** dan isilah wordlist apa yang mau ditambahkan. Disini saya mengisi dengan **daring dan online** Ketikkan kode berikut :
#
# > `more_stopword = ['daring', 'online']`
#
kalimat = "Andi kerap melakukan transaksi rutin secara daring atau online. Menurut Andi belanja online lebih praktis & murah."
# Gabungankan wordlist yang sudah dideklarasikan dengan variable **data**. Ketikkan kode berikut :
# > `data = stop_factory + more_stopword`
#
#kode lanjutan
dictionary = ArrayDictionary(data)
str = StopWordRemover(dictionary)
kalimat_sw = stopword.remove(kalimat)
tokens = nltk.tokenize.word_tokenize(str.remove(kalimat_sw))
print(tokens)
# -- **Tetap Dirumah and Stay Safe** --
| TM_4_FilteringStopwords.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="00224e65"
# ### Install package
# + id="cdd72d61" outputId="a2c0e778-f6ab-4ab8-dfc2-dd36f1ff5845" colab={"base_uri": "https://localhost:8080/"}
# !pip install pytorch-adapt
# + [markdown] id="08cad7f4"
# ### Import packages
# + id="ae8bd4d9"
import torch
from tqdm import tqdm
from pytorch_adapt.containers import Models, Optimizers
from pytorch_adapt.datasets import DataloaderCreator, get_mnist_mnistm
from pytorch_adapt.hooks import DANNHook
from pytorch_adapt.models import Discriminator, mnistC, mnistG
from pytorch_adapt.utils.common_functions import batch_to_device
from pytorch_adapt.validators import IMValidator
# + [markdown] id="7a230884"
# ### Create datasets and dataloaders
# + id="9bca7bd9" outputId="87388305-cfa2-44b1-9ee2-7db2f0fd5b48" colab={"base_uri": "https://localhost:8080/", "height": 509, "referenced_widgets": ["951cca98651b420e835403b8d9abe1b8", "1a1a7321f66043a1af8f385a0a43b7d2", "a6d1236e6b064b059f24ae34e16a4607", "ea9c400a297244a9bbb91aed980dac24", "4af721e0d6844e53a142b4b7ca25fb5a", "<KEY>", "84a4452defa6491dba5965f2940980a5", "0ca21b198da248478b26df0077a8f701", "<KEY>", "<KEY>", "<KEY>", "301edfd5f21341fa9fec9e1a273e222d", "3c31d06ced494b46a21e2e97906f0f88", "<KEY>", "<KEY>", "aaab30a8eaba4219aba64810de7408ca", "<KEY>", "<KEY>", "1b762e1258fd440f80d214f0a27388b4", "<KEY>", "777a7a137ada47d8aefe6869e1e3780a", "<KEY>", "<KEY>", "7a23b485ac49429e97dd8f37ae977512", "<KEY>", "<KEY>", "abe241a8f14d43e2a9ae0de9acb201d1", "<KEY>", "16b3b629e01a40f39c97d40db93dee85", "47182276e2e94310b7200f1e7a889c31", "26566c656d4c40a1b66a867dbc0adfce", "<KEY>", "c4ddbcda4414453bb09af64f5ee4eedf", "33881ca02da64eb0a8885ad45958d82f", "<KEY>", "<KEY>", "<KEY>", "ba6458aa2b014c10a8f77e43eee8b635", "2cc3f37a4d3545879897d700663e4b78", "1a62de479b9e4a918b8be14b5e1b7465", "64fa5f3e2ecb40209f119364b7e953a8", "<KEY>", "<KEY>", "5f2bf8aca1264465be942a09a7ec0da9", "<KEY>", "<KEY>", "3a324e1b927146b4a32ff2a6ea0696ca", "<KEY>", "928d101dc3594ceb8376325b0ae41a3d", "<KEY>", "8dade271696b47d49350e23f47820782", "129ca01b108e4d6687efde07a0450edd", "668352ceee474849b57daadde78a6226", "<KEY>", "a401727a1945459c9fedca27d70144d8"]}
datasets = get_mnist_mnistm(["mnist"], ["mnistm"], folder=".", download=True)
dc = DataloaderCreator(batch_size=32, num_workers=2)
dataloaders = dc(**datasets)
# + [markdown] id="1d9f26b9"
# ### Create models, optimizers, hook, and validator
# + id="b5313ca5" outputId="84ba0aa7-4bf1-40f3-a112-ad30efd3c24a" colab={"base_uri": "https://localhost:8080/", "height": 116, "referenced_widgets": ["c46acf65b13148409f103e8bb0faa8fe", "a81c92445d0a4edf8e8b9a725961145b", "36ed8a8d2d974c0e841d6085865b89bd", "3e3bd7611f144278a547824eec212919", "f7b660c2e6d6414d9b65faddc46b5251", "ed7160f4b0634febbf63a8f0a5a2c932", "999c4becf7eb425f8ca1297ea2e434ee", "bc161cf5a2894ad09b6671805818b813", "222a525d140a4014ac06c95ae0b0f39d", "309e3744f33045a0a28cd8d523a87029", "<KEY>", "<KEY>", "4352f7b7462c4f2f9ef068fdac2e486e", "<KEY>", "52b13c448ad348d29071b126da884d26", "20babcbebac84a4db11b2217490ecbf9", "<KEY>", "34932f4a9c534c47837abf66c3e6200c", "<KEY>", "92f18a03237e4e2e87091d0635b64115", "<KEY>", "9459ad139793466a8ba742d961bba4b0"]}
device = torch.device("cuda")
G = mnistG(pretrained=True).to(device)
C = mnistC(pretrained=True).to(device)
D = Discriminator(in_size=1200, h=256).to(device)
models = Models({"G": G, "C": C, "D": D})
optimizers = Optimizers((torch.optim.Adam, {"lr": 0.0001}))
optimizers.create_with(models)
optimizers = list(optimizers.values())
hook = DANNHook(optimizers)
validator = IMValidator()
# + [markdown] id="7e785496"
# ### Train and evaluate
# + id="58b27d15" outputId="cf473028-484a-4a89-fb2f-25601dbe32ad" colab={"base_uri": "https://localhost:8080/"}
for epoch in range(2):
# train loop
models.train()
for data in tqdm(dataloaders["train"]):
data = batch_to_device(data, device)
_, loss = hook({**models, **data})
# eval loop
models.eval()
logits = []
with torch.no_grad():
for data in tqdm(dataloaders["target_train"]):
data = batch_to_device(data, device)
logits.append(C(G(data["target_imgs"])))
logits = torch.cat(logits, dim=0)
# validation score
score = validator(target_train={"logits": logits})
print(f"\nEpoch {epoch} score = {score}\n")
# + id="570a7436"
| examples/getting_started/DANNVanilla.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# # State-Space Modeling
#
# ## Extended Mechanical Oscillator
#
# A mechanical oscillator with two spring-damping-mass series connected subsystems (see Figure below) are examined.
#
# At both subsystems $ i \in \left\{1,2\right\} $ operate the forces of the springs
#
# $$
# F_{c,i}(t) = c_{i} \left[ y_{i}(t) - y_{i-1}(t) \right] \text{,}
# $$
#
# of the attenuators (damping)
#
# $$
# F_{c,i}(t) = d_{i} \left[ \dot{y}_{i}(t) - \dot{y}_{i-1}(t) \right]
# $$
#
# and of the mass
#
# $$
# F_{m,i}(t) = m_{i} \ddot{y}_{i}(t) \text{.}
# $$
#
# Here, it is assumed that the left boundary is not movable and thus
#
# $$
# y_{0}(t) = \dot{y}_{0}(t) \equiv 0 \text{.}
# $$
#
# Two external forces $F_{ex,i}$ are applied at the masses and the resulting sum of forces are
#
# $$
# F_{ex,1}(t) ~=~ F_{m,1}(t) + F_{c,1}(t) + F_{d_1}(t) - F_{c,2}(t) - F_{d,2}(t) \\
# \qquad \qquad \qquad \qquad \qquad \quad ~=~ m_{1} \ddot{y}_{1}(t) + c_{i} y_{1}(t) + d_{1} \dot{y}_{1}(t) - c_{2} \left[ y_{2}(t) - y_{1}(t) \right] - d_{2} \left[ \dot{y}_{2}(t) - \dot{y}_{1}(t) \right] \tag{1}
# $$
#
# and
#
# $$
# F_{ex,2}(t) ~=~ F_{m,2}(t) + F_{c,2}(t) + F_{d_2}(t) \\
# \qquad \qquad \qquad \qquad \qquad \quad ~=~ m_{2} \ddot{y}_{2}(t) + c_{2} \left[ y_{2}(t) - y_{1}(t) \right] + d_{2} \left[ \dot{y}_{2}(t) - \dot{y}_{1}(t) \right] \text{.} \tag{2}
# $$
#
# The external forces impact a change of positions $y_{i}(t)$ which are measured by a sensor. That means, the positions $y_{i}(t)$ are the outputs of the system.
#
foo = """
<svg height="500" width="700">
<defs>
<!-- arrowhead marker definition -->
<marker id="arrow" viewBox="0 0 10 10" refX="5" refY="5"
markerWidth="6" markerHeight="6"
orient="auto-start-reverse">
<path d="M 0 0 L 10 5 L 0 10 z" />
</marker>
</defs>
<line x1="0" y1="0" x2="0" y2="150" style="stroke:rgb(0,0,0);stroke-width:4" />
<!-- Damping 1-->
<line x1="0" y1="30" x2="40" y2="30" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="40" y1="10" x2="40" y2="50" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="40" y1="10" x2="160" y2="10" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="40" y1="50" x2="160" y2="50" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="100" y1="15" x2="100" y2="45" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="100" y1="30" x2="200" y2="30" style="stroke:rgb(0,0,0);stroke-width:2" />
<text x="80" y="70" fill="black">Damping d_1</text>
<!-- Spring 1-->
<line x1="0" y1="120" x2="40" y2="120" style="stroke:rgb(0,0,0);stroke-width:2" />
<polyline points="40,120 50,100 60,140 70,100 80,140 90,100 100,140 110,100 120,140 130,100 140,140 150,100 160,140 170,100 180,120"
style="fill:none;stroke:black;stroke-width:2" />
<line x1="180" y1="120" x2="200" y2="120" style="stroke:rgb(0,0,0);stroke-width:2" />
<text x="80" y="160" fill="black">Spring c_1</text>
<!-- Mass 1-->
<rect x="200" y="10" width="50" height="130" style="fill:rgb(255,255,255);stroke-width:3;stroke:rgb(0,0,0)" />
<text x="200" y="160" fill="black">Mass m_1</text>
<!-- Damping 2-->
<line x1="250" y1="30" x2="290" y2="30" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="290" y1="10" x2="290" y2="50" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="290" y1="10" x2="410" y2="10" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="290" y1="50" x2="410" y2="50" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="350" y1="15" x2="350" y2="45" style="stroke:rgb(0,0,0);stroke-width:2" />
<line x1="350" y1="30" x2="450" y2="30" style="stroke:rgb(0,0,0);stroke-width:2" />
<text x="330" y="70" fill="black">Damping d_2</text>
<!-- Spring 2-->
<line x1="250" y1="120" x2="290" y2="120" style="stroke:rgb(0,0,0);stroke-width:2" />
<polyline points="290,120 300,100 310,140 320,100 330,140 340,100 350,140 360,100 370,140 380,100 390,140 400,100 410,140 420,100 430,120"
style="fill:none;stroke:black;stroke-width:2" />
<!-- Mass 2-->
<rect x="450" y="10" width="50" height="130" style="fill:rgb(255,255,255);stroke-width:3;stroke:rgb(0,0,0)" />
<text x="440" y="160" fill="black">Mass m_2</text>
<line x1="430" y1="120" x2="450" y2="120" style="stroke:rgb(0,0,0);stroke-width:2" />
<text x="330" y="160" fill="black">Spring c_2</text>
<!-- Positions -->
<line x1="0" y1="160" x2="0" y2="190" style="stroke:rgb(0,0,0);stroke-width:4" />
<line x1="0" y1="175" x2="30" y2="175" style="stroke:rgb(0,0,0);stroke-width:2" marker-end="url(#arrow)" />
<text x="45" y="180" fill="black" font-weight="bold" font-size="large">y</text>
<text x="2" y="220" fill="black" font-size="large">y_0</text>
<text x="215" y="220" fill="black" font-size="large">y_1</text>
<text x="465" y="220" fill="black" font-size="large">y_2</text>
<!-- Overview Forces 1 -->
<line x1="225" y1="260" x2="225" y2="410" style="stroke:rgb(0,0,0);stroke-width:3" />
<line x1="155" y1="275" x2="225" y2="275" style="stroke:rgb(0,0,0);stroke-width:2" marker-start="url(#arrow)" />
<line x1="155" y1="335" x2="225" y2="335" style="stroke:rgb(0,0,0);stroke-width:2" marker-start="url(#arrow)" />
<line x1="155" y1="395" x2="225" y2="395" style="stroke:rgb(0,0,0);stroke-width:2" marker-start="url(#arrow)" />
<line x1="225" y1="270" x2="295" y2="270" style="stroke:rgb(0,0,0);stroke-width:2" marker-end="url(#arrow)" />
<line x1="225" y1="325" x2="295" y2="325" style="stroke:rgb(0,0,0);stroke-width:2" marker-end="url(#arrow)" />
<line x1="225" y1="385" x2="295" y2="385" style="stroke:rgb(0,0,0);stroke-width:2" marker-end="url(#arrow)" />
<text x="175" y="300" fill="black">F_d,1</text>
<text x="175" y="360" fill="black">F_m,1</text>
<text x="175" y="420" fill="black">F_c,1</text>
<text x="245" y="295" fill="black">F_d,2</text>
<text x="245" y="350" fill="black">F_ex,1</text>
<text x="245" y="405" fill="black">F_c,2</text>
<!-- Overview Forces 2 -->
<line x1="475" y1="260" x2="475" y2="410" style="stroke:rgb(0,0,0);stroke-width:3" />
<line x1="405" y1="275" x2="475" y2="275" style="stroke:rgb(0,0,0);stroke-width:2" marker-start="url(#arrow)" />
<line x1="405" y1="335" x2="475" y2="335" style="stroke:rgb(0,0,0);stroke-width:2" marker-start="url(#arrow)" />
<line x1="405" y1="395" x2="475" y2="395" style="stroke:rgb(0,0,0);stroke-width:2" marker-start="url(#arrow)" />
<line x1="475" y1="325" x2="545" y2="325" style="stroke:rgb(0,0,0);stroke-width:2" marker-end="url(#arrow)" />
<text x="425" y="300" fill="black">F_d,2</text>
<text x="425" y="360" fill="black">F_m,2</text>
<text x="425" y="420" fill="black">F_c,2</text>
<text x="495" y="350" fill="black">F_ex,2</text>
</svg>
"""
display("image/svg+xml", foo)
# ## Differential Equation
#
# The differential equation is built as a state-space model. To do so, a n-th order differential equation is transfered to a system of first order differential equations with n states. Here, the two second-order differential equations are reshaped as one system of first order differential equations with four states.
#
# The external forces are renamed as inputs
# $$
# u_{1}(t) ~=~ F_{ex,1}(t) \quad \text{and} \quad u_{2}(t) ~=~ F_{ex,2}(t) \text{.}
# $$
#
# The outputs or measured positions $y_{i}(t)$ are renamed as
#
# $$
# x_{1}(t) := y_{1}(t) \qquad \text{and} \qquad x_{2}(t) := y_{2}(t)
# $$
#
# and new variables are introduced with
#
# $$
# x_{3}(t) ~=~ \dot{y}_{1}(t) ~=~ \dot{x}_{1}(t) \text{,} \\
# x_{4}(t) ~=~ \dot{y}_{2}(t) ~=~ \dot{x}_{2}(t) \text{.}
# $$
#
#
# The sum of forces $(1)$ and $(2)$ are rewritten with the new variables as
#
# $$
# \dot{x}_{3}(t) ~=~ -\frac{c_{1}}{m_{1}} x_{1}(t) - \frac{d_{1}}{m_{1}} x_{3}(t) + \frac{c_{2}}{m_{1}} \left[ x_{2}(t) - x_{1}(t) \right] + \frac{d_{2}}{m_{1}} \left[ x_{4}(t) - x_{3}(t) \right] + \frac{1}{m_{1}} u_{1}(t)
# $$
#
# and
#
# $$
# \dot{x}_{4}(t) ~=~ - \frac{c_{2}}{m_{2}} \left[ x_{2}(t) - x_{1}(t) \right] - \frac{d_{2}}{m_{2}} \left[ x_{4}(t) - x_{3}(t) \right] + \frac{1}{m_{2}} u_{2}(t) \text{.}
# $$
#
# Finally, the variables are reorganized in a matrix-vector notation to gain the state-space model
#
# $$
# \begin{pmatrix}
# \dot{x}_{1}(t) \\
# \dot{x}_{2}(t) \\
# \dot{x}_{3}(t) \\
# \dot{x}_{4}(t)
# \end{pmatrix}
# =
# \begin{pmatrix}
# 0 & 0 & 1 & 0 \\
# 0 & 0 & 0 & 1 \\
# \frac{-1}{m_{1}}\left[c_{1} + c_{2}\right] & \frac{c_{2}}{m_{1}} & \frac{-1}{m_{1}}\left[d_{1} + d_{2}\right] & \frac{d_{2}}{m_{1}} \\
# \frac{c_{2}}{m_{2}} & -\frac{c_{2}}{m_{2}} & \frac{d_{2}}{m_{2}} & -\frac{d_{2}}{m_{2}}
# \end{pmatrix}
# \begin{pmatrix}
# x_{1}(t) \\
# x_{2}(t) \\
# x_{3}(t) \\
# x_{4}(t)
# \end{pmatrix}
# +
# \begin{pmatrix}
# 0 & 0 \\
# 0 & 0 \\
# \frac{1}{m_{1}} & 0 \\
# 0 &\frac{1}{m_{2}}
# \end{pmatrix}
# \begin{pmatrix}
# u_{1}(t) \\
# u_{2}(t)
# \end{pmatrix}
# $$
#
# with states $x_{i}$ and output
#
# $$
# \begin{pmatrix}
# y_{1}(t) \\
# y_{2}(t)
# \end{pmatrix}
# =
# \begin{pmatrix}
# 1 & 0 & 0 & 0 \\
# 0 & 1 & 0 & 0
# \end{pmatrix}
# \begin{pmatrix}
# x_{1}(t) \\
# x_{2}(t) \\
# x_{3}(t) \\
# x_{4}(t)
# \end{pmatrix}
# \text{.}
# $$
#
# Linear time-invariant (LTI) systems are noted in the standard form as
#
# $$
# \dot{x}(t) = A ~ x(t) + B ~ u(t) \\
# ~y(t) = C ~ x(t) + D ~ u(t)
# $$
#
# in which the matrices correspond here to
#
# $$
# A =
# \begin{pmatrix}
# 0 & 0 & 1 & 0 \\
# 0 & 0 & 0 & 1 \\
# \frac{-1}{m_{1}}\left[c_{1} + c_{2}\right] & \frac{c_{2}}{m_{1}} & \frac{-1}{m_{1}}\left[d_{1} + d_{2}\right] & \frac{d_{2}}{m_{1}} \\
# \frac{c_{2}}{m_{2}} & -\frac{c_{2}}{m_{2}} & \frac{d_{2}}{m_{2}} & -\frac{d_{2}}{m_{2}}
# \end{pmatrix}
# \qquad \text{,} \qquad
# B =
# \begin{pmatrix}
# 0 & 0 \\
# 0 & 0 \\
# \frac{1}{m_{1}} & 0 \\
# 0 &\frac{1}{m_{2}}
# \end{pmatrix}
# $$
# and
# $$
# C =
# \begin{pmatrix}
# 1 & 0 & 0 & 0 \\
# 0 & 1 & 0 & 0
# \end{pmatrix}
# \qquad \text{,} \qquad
# D = 0_{2 \times 2}
# \text{.}
# $$
# ## Stability
#
# The system without control
#
# $$
# \dot{x}(t) = A x(t)
# $$
#
# is stable if **all** Eigenvalues $\lambda_{i}$ of A are smaller than zero. The Eigenvalues are found by calculating
#
# $$
# \lambda ~ x(t) = A ~ x(t) \quad \Rightarrow \quad (\lambda I - A) ~ x(t) = 0
# $$
#
# and solving the determinant
#
# $$
# \det(\lambda I - A) = 0 \text{.}
# $$
# ## Simulation
#
# The mechanical oscillator with two masses is simulated next. Firstly, the physical constants for mass, damping and spring, and the matrices of the dynamical system have to be specified.
# +
using LinearAlgebra, DifferentialEquations, Plots;
# Defining physical constants
const d₁ = 0.5; # Damping
const d₂ = 0.5;
const c₁ = 0.1; # Spring constant
const c₂ = 0.1;
const m₁ = 1.0; # Mass
const m₂ = 2.0;
# System matrices
A = [0 0 1 0; 0 0 0 1; (-1/m₁)*(c₁ + c₂) c₂/m₁ (-1/m₁)*(d₁ + d₂) d₂/m₁; c₂/m₂ -c₂/m₂ d₂/m₂ -d₂/m₂]
B = [0 0; 0 0; 1/m₁ 0; 0 1/m₂];
C = [1 0 0 0; 0 1 0 0];
println(" A = ", A, "\n B = ", B,"\n C = ", C)
# -
# ### Stability
#
# Next, the stability is proved to guarantee a suitable behaviour.
ev = eigvals(A)
println("Eigenvalues: ", ev)
# The real part of all Eigenvalues is smaller than zero, thus the uncontrolled system is stable. Furthermore, the imaginary part of some Eigenvalues is not zero and so the system dynamics tend to oscillating behaviour.
#
# ### Defining the Ordinary Differential Equation
#
# The ordinary differential equation is defined as a Julia function, the initial value $x_{0} = \left(1, 2, 0, 0\right)^{\top}$ and the simulation time range $t \in \left[ 0, 100 \right]$ are set, and the ODE prolem is built. Here, the system has an input of two scaled step functions
#
# $$
# u_{1}(t) = 0.5 \quad \text{for} ~ t \geq 0
# $$
# and
# $$
# u_{2}(t) = 1.5 \quad \text{for} ~ t \geq 0 \text{.}
# $$
# +
# Definition of ODE
function mech_oscillator(dx,x,p,t)
u1 = 0.5; # 1. Control input
u2 = 1.5; # 2. Control input
u = [u1; u2]
dx .= A*x + B*u # Right-hand side of ODE
end
x₀ = [1.0; 2.0; 0.0; 0.0]; # Initial values
tspan = (0.0, 100.0); # Time span
#Build ODE Problem
prob = ODEProblem(mech_oscillator,x₀,tspan, A);
# -
# ### Results
#
# Finally, the ODE problem is solved and the output is plotted.
# +
sol = solve(prob); # Solve ODE Problem
y = C * sol; # Calculate system output
plot(sol.t, transpose(y), title="System response", xaxis="Time [s]", yaxis="Position [m]")
| 2020-1-Summer/text/jupyter/CE_2020_State_Space_Modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:PythonData]
# language: python
# name: conda-env-PythonData-py
# ---
from bs4 import BeautifulSoup as bs
import requests
import pymongo
import pandas as pd
import re
import os
import csv
import json
import pprint
from datetime import datetime, timedelta
# +
# corona_url = "https://github.com/globalcitizen/2019-wuhan-coronavirus-data/blob/master/data-sources/dxy/data/20200205-091401-dxy-2019ncov-data.json"
corona_file = 'data/20200205-091401.json'
with open(corona_file, encoding="utf8") as f:
my_file = json.load(f)
for x in my_file:
pprint.pprint(x)
# -
my_array = []
my_dict = pd.Series()
my_dict = my_file
print(my_dict)
# +
# new_date=datetime.now().strftime ('%Y-%m-%d')
for x in my_dict:
my_temp = (x['provinceName'], x["provinceShortName"],x["confirmedCount"],x["suspectedCount"],x['curedCount'], x['deadCount'],
x['locationId'],x['cities'])
print(my_temp)
my_array.append(my_temp)
# -
# CAUTION: THIS WILL OVERWRITE ANY CSV FILE ALREAYDY THERE WITH THAT NAME
new_date=datetime.now().strftime ('%Y-%m-%d')
output_path = os.path.join("output", "dataframe_" + new_date + ".csv")
my_df.to_csv(output_path)
my_df = pd.DataFrame()
my_df = pd.DataFrame(my_array)
my_df.columns = ['provinceName', "provinceShortName", "confirmedCount", "suspectedCount",'curedCount','deadCount','locationId','cities']
my_df.head()
# +
# Or we can scrape:
# Retrieve page with the requests module
# REPLACE THIS FILE NAME:
corona_file = "data-sources/dxy/data/20200218-175113-dxy-2019ncov-data.json"
corona_url = "https://github.com/globalcitizen/2019-wuhan-coronavirus-data/blob/master/" + corona_file
print(corona_url)
new_date = corona_file[22:26]+"-"+corona_file[26:28]+"-"+corona_file[28:30]
print(new_date)
# new_date=datetime.now().strftime ('2020-02-18')
response = requests.get(corona_url)
print(response)
soup = bs(response.text, 'lxml')
# print(soup)
# +
my_text = soup.find_all('td', class_='blob-code blob-code-inner js-file-line', attrs={"id":"LC1"})
# print(my_text)
# # Get first 15 direct flights
limit = 100
for index, d_fl in enumerate(my_text):
# print(index, d_fl.text)
# print(d_fl.text)
my_scrape_file = d_fl.text
if index==limit:
break
# print(my_scrape_file)
my_series = eval(my_scrape_file)
# print(my_series)
# +
# for x in my_series:
# print(x)
my_scrape_df = pd.DataFrame(my_series)
# my_scrape_df = my_scrape_df[['provinceName', "provinceShortName", "confirmedCount", "suspectedCount",'curedCount','deadCount','locationId','cities']]
my_scrape_df = my_scrape_df[['provinceName', "provinceShortName", "confirmedCount", "suspectedCount",'curedCount','deadCount', 'cities']]
my_scrape_df['date'] = new_date
my_scrape_df.head()
# +
# new_date=datetime.now().strftime ('%Y-%m-%d')
output_path = os.path.join("data", "df_" + new_date + ".csv")
my_scrape_df.to_csv(output_path)
# -
# For later. Loop through all the files created
for filename in os.listdir("data"):
if filename.endswith(".csv"):
# print(os.path.join(directory, filename))
print(filename)
continue
else:
continue
input_file = open('data/urls_to_data_files.txt')
try:
for i, line in enumerate(input_file):
print(line)
finally:
input_file.close()
# print f"{0} line(s) printed".format(i+1)
| Old files/Corona_Virus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <figure>
# <IMG SRC="https://mamba-python.nl/images/logo_basis.png" WIDTH=125 ALIGN="right">
#
# </figure>
#
# # Voorbeeld Python toepassing
#
# <br>
#
#
#
#
# Dit notebook is gemaakt als voorbeeld van een Python toepassing voor de MAMBA Python cursus.
#
# <br>
# <br>
#
# <div style="text-align: right"> developed by MAMBA </div>
# ## Eigen toepassing
#
# Doel: Bepalen van de capaciteit van verschillende fietspaden in Montreal aan de hand van het gemiddelde en het maximaal aantal fietser op die fietspaden.
#
# input: .csv - bestand met aantal fietsers per fietspad per dag.
#
# gewenste output: .png - Grafieken en getallen waaruit de gemiddelde en maximaal benodigde capaciteit blijkt.
#
# ## Stappenplan<a class="anchor" id="0"></a>
# 1. [import packages](#1)
# 2. [lees data in](#2)
# 3. [bewerk data](#3)
# 4. [plot resultaten](#4)
# 5. [Analyse](#5)
# 6. [Bronnen](#6)
# ## 1. import packages<a class="anchor" id="1"></a>
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
import numpy as np
#settings
# %matplotlib inline
plt.style.use('seaborn')
# [terug naar inhoudsopgave](#0)
#
# ## 2. lees data in <a class="anchor" id="2"></a>
# In dit geval een dataset met het aantal fietsers op 9 verschillende plekken in Montreal
df = pd.read_csv('data/comptagevelo2012.csv')
# bekijk eerste 3 regels
df[:3]
# [terug naar inhoudsopgave](#0)
#
# ## 3. bewerk data<a class="anchor" id="3"></a>
# pas kolomnaam aan
df.rename(columns={'Unnamed: 1': 'Hour'}, inplace=True)
df[:3]
# lees de datum in als datetime object en gebruik dit als index
df.index = pd.to_datetime(df['Date'] + ' ' + df['Hour'], dayfirst=True)
df[:3]
# [terug naar inhoudsopgave](#0)
#
# ## 4. Plot resultaten <a class="anchor" id="4"></a>
df.plot(figsize=(16,6))
# [terug naar inhoudsopgave](#0)
#
# ## 5. Analyse <a class="anchor" id="5"></a>
# kijk naar het maximum aantal fietsers per pad per dag
print(df.max())
# kijk naar het gemiddelde aantal fietsers per pad per dag
print(df.mean())
# #### Piek bij <NAME>
#
# Meetpunt Pont_Jacques_Cartier laat een vreemde piek zien. Deze waarde wordt nader onderzocht door deze apart te plotten, in te zoomen en een beschrijving te maken.
df['Pont_Jacques_Cartier'].plot(figsize=(16,6))
# zoom in op de piek
df[dt.datetime(2012,3,1):dt.datetime(2012,4,1)].plot(figsize=(16,6))
#bekijk <NAME>
df['Pont_Jacques_Cartier'].describe()
# Conclusie, de extreme waarde moet een fout zijn.
# verwijder piek uit de data
df.loc[dt.datetime(2012,3,18):dt.datetime(2012,3,19),'Pont_Jacques_Cartier'] = np.nan
df['Pont_Jacques_Cartier'].describe()
# probeer de variatie in het aantal fietsers te verklaren. Er zijn duidelijk meer fietsers in de zomer.
# Maar hoe zit het met de grote schommelingen per week?
df['Maisonneuve_2'].plot(figsize=(16,6))
# plot het gemiddelde aantal fietsers per weekdag
df['week_dag'] = df.index.dayofweek
df['dag_naam'] = df.index.day_name()
gb = df.groupby('week_dag').mean()
ax = gb.plot(figsize=(16,6))
tick_labels = ax.set_xticklabels(['' , 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun', ''])
# plot het maximaal aantal fietsers per werkdag
df['week_dag'] = df.index.dayofweek
df['dag_naam'] = df.index.day_name()
gb = df.groupby('week_dag').max()
ax = gb.plot(figsize=(16,6))
tick_labels = ax.set_xticklabels(['' , 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun', ''])
# [terug naar inhoudsopgave](#0)
#
# ## 6. bronnen <a class="anchor" id="6"></a>
# This notebook was created using the following sources:
# - http://nbviewer.jupyter.org/github/jvns/pandas-cookbook/blob/v0.2/cookbook/Chapter%201%20-%20Reading%20from%20a%20CSV.ipynb
| Exercise_notebooks/Your_Application/voorbeeld_toepassing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reading a file with pandas
# ## Overview
# - **Teaching:** 10 min
# - **Exercises:** 5 min
#
# **Questions**
# * How can I read my data file into pandas?
#
# **Objectives**
# * Use pandas to read in a CSV file.
#
# One of the most common situations is that you have some data file containing the data you want to read. Perhaps this is data you've produced yourself or maybe it's from a collegue. In an ideal world the file will be perfectly formatted and will be trivial to import into pandas but since this is so often not the case, it provides a number of features to make your ife easier.
#
# Full information on reading and writing is available in the pandas manual on [IO tools](http://pandas.pydata.org/pandas-docs/stable/io.html) but first it's worth noting the common formats that pandas can work with:
# - Comma separated tables (or tab-separated or space-separated etc.)
# - Excel spreadsheets
# - HDF5 files
# - SQL databases
#
# For this lesson we will focus on plain-text CSV files as they are perhaps the most common format. Imagine we have a CSV file like (you can download this file from [city_pop.csv](../data/city_pop.csv)):
# !cat ../data/city_pop.csv # Uses the IPython 'magic' !cat to print the file
# We can use the pandas function `read_csv()` to read the file and convert it to a `DataFrame`. Full documentation for this function can be found in [the manual](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) or, as with any Python object, directly in the notebook by typing `help(pd.read_csv)`.
# +
import pandas as pd
csv_file = '../data/city_pop.csv'
pd.read_csv(csv_file)
# -
# We can see that by default it's done a fairly bad job of parsing the file (this is mostly because it has been construsted to be as obtuse as possible). It's making a lot of assumptions about the structure of the file but in general it's taking quite a naïve approach.
#
# The first this we notice is that it's treating the text at the top of the file as though it's data. Checking [the documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html) we see that the simplest way to solve this is to use the `skiprows` argument to the function to which we give an integer giving the number of rows to skip:
pd.read_csv(csv_file,
skiprows=5,
)
# ## Info: Editing cells
# If you are following along with this material in a notebook, don't forget you can edit a cell and execute it again.
# In this lesson, you can just keep modifying the input to the `read_csv()` function and re-execute the cell, rather than making a new cell for each modification.
# The next most obvious problem is that it is not separating the columns at all. This is controlled by the `sep` argument which is set to `','` by default (hence *comma* separated values). We can simply set it to the appropriate semi-colon:
pd.read_csv(csv_file,
skiprows=5,
sep=';'
)
# Reading the descriptive header of our data file we see that a value of `-1` signifies a missing reading so we should mark those too. This can be done after the fact but it is simplest to do it at import-time using the `na_values` argument:
pd.read_csv(csv_file,
skiprows=5,
sep=';',
na_values='-1'
)
# The last this we want to do is use the `year` column as the index for the `DataFrame`. This can be done by passing the name of the column to the `index_col` argument:
df3 = pd.read_csv(csv_file,
skiprows=5,
sep=';',
na_values='-1',
index_col='year'
)
df3
# ## Exercise: Comma separated files
# - There is another file called `cetml1659on.dat` (available from [here](../data/cetml1659on.dat)). This contains some historical weather data for a location in the UK. Import that file as a Pandas `DataFrame` using `read_csv()`, making sure that you cover all the NaN values. Be sure to look at the [documentation](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.read_csv.html#pandas.read_csv) for `read_csv()`.
# - How many years had a negative average temperature in January?
# - What was the average temperature in June over the years in the data set? Tip: look in the [documentation](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html) for which method to call.
#
# We will come back to this data set at a later stage.
#
# Hints for the first part:
# * The syntax for whitespace delimited data is `sep='\s+'`, which is not immediately obvious from the documentation.
# * The data is almost comlete (which is unusual for scientific data) and there are only two invalid entries. Look at the last row of the file and, given that the data is temperature data, deduce which values need to be `na_values`. (You can use a list to give multiple `na_values`)
# * If you can't work out how to do the first part of this exercise, take a look at the solutions.
# ## Solution: Comma seperated files
# * Read in the CSV file, skipping the first 6 rows, using whitespace to separate data, invalid data -99.9 and -99.99:
#
# ```python
# import pandas as pd
#
# weather_csv = 'cetml1659on.dat'
# weather_df = pd.read_csv(weather_csv,
# skiprows=6,
# sep='\s+',
# na_values=['-99.9', '-99.99']
# )
# print(weather_df.head())
# ```
#
# Output:
# ```brainfuck
# JAN FEB MAR APR MAY JUN JUL AUG SEP OCT NOV DEC YEAR
# 1659 3.0 4.0 6.0 7.0 11.0 13.0 16.0 16.0 13.0 10.0 5.0 2.0 8.87
# 1660 0.0 4.0 6.0 9.0 11.0 14.0 15.0 16.0 13.0 10.0 6.0 5.0 9.10
# 1661 5.0 5.0 6.0 8.0 11.0 14.0 15.0 15.0 13.0 11.0 8.0 6.0 9.78
# 1662 5.0 6.0 6.0 8.0 11.0 15.0 15.0 15.0 13.0 11.0 6.0 3.0 9.52
# 1663 1.0 1.0 5.0 7.0 10.0 14.0 15.0 15.0 13.0 10.0 7.0 5.0 8.63
#
# ```
#
# * Select all data in the January column less that 0, use `len()` so we don't have to count the rows ourself.
#
# ```python
# weather_df[weather_df['JAN'] < 0] # Would output all the entries
# len(weather_df[weather_df['JAN'] < 0]) # Just counts the number of rows
# ```
#
# Output:
# ```brainfuck
# 20
# ```
#
# * The average of the data can be found using the `.mean()` method:
#
# ```python
# weather_df['JUN'].mean()
# ```
#
# Output:
# ```brainfuck
# 14.325977653631282
# ```
# .
# ## Key Points
# * Pandas provides the `read_csv()` function for reading in CSV files.
# * Although it saves us a lot of work the syntax can be quite tricky.
| notebooks_plain/05_pandas_pt2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Image segmentation
# Image segmentation is the process of separating an image into multiple regions.
#
# See also
# * [Image manipulation and processing using Numpy and Scipy by <NAME> and <NAME>](https://scipy-lectures.org/advanced/image_processing/index.html#basic-image)
# * [Tutorial on image segmentation with scikit-image](https://scikit-image.org/docs/dev/user_guide/tutorial_segmentation.html)
#
# Let's start again by defining an image as a two dimensional array and visualize it using pyclesperanto.
import numpy as np
from pyclesperanto_prototype import imshow
import matplotlib.pyplot as plt
image = np.asarray([
[1, 0, 2, 1, 0, 0, 0],
[0, 3, 1, 0, 1, 0, 1],
[0, 5, 5, 1, 0, 1, 0],
[0, 6, 6, 5, 1, 0, 2],
[0, 0, 5, 6, 3, 0, 1],
[0, 1, 2, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 1, 0]
])
imshow(image, colorbar=True)
# + [markdown] tags=[]
# ## Binary images
# The most basic way of that is binarization, turning the image into a "positive" and a "negative" region. Typically, binary images are used for that, which could for example contain two different pixel values `True` and `False` representing "positive" and "negative", respectively. Technically, every image can be interpreted as a binary image using the rationale "Every pixel is considered positive that is neither `False` nor `0`."
#
# ## Image thresholding
# A very basic algorithm for separating low intensity regions from high intensity regions in the image is thresholding.
# We will now make a new image containing `True` and `False` as pixel values depending on if the original image had intensity lower or higher a given threshold. As this image has just two different pixel values, it is a binary image:
# +
threshold = 4
binary_image = image > threshold
# -
binary_image
imshow(binary_image)
# [Matplotlib](https://matplotlib.org/) might be more flexible when visualizing images, e.g. for drawing outlines around regions of interest:
# +
# create a new plot
fig, axes = plt.subplots(1,1)
# add two images
axes.imshow(image, cmap=plt.cm.gray)
axes.contour(binary_image, [0.5], linewidths=1.2, colors='r')
| docs/20_image_segmentation/06_Introduction_to_image_segmentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import local_models.local_models
import local_models.algorithms
import local_models.utils
import local_models.linear_projections
import local_models.loggin
import local_models.TLS_models
import numpy as np
import matplotlib.pyplot as plt
import sklearn.linear_model
import sklearn.cluster
from importlib import reload
from ml_battery.utils import cmap
import matplotlib as mpl
import sklearn.datasets
import sklearn.decomposition
import logging
import ml_battery.log
import time
import os
import mayavi
import mayavi.mlab
import string
import subprocess
import functools
import cv2
#on headless systems, tmux: "Xvfb :1 -screen 0 1280x1024x24 -auth localhost", then "export DISPLAY=:1" in the jupyter tmux
mayavi.mlab.options.offscreen = True
logger = logging.getLogger(__name__)
#reload(local_models.local_models)
#reload(lm)
#reload(local_models.loggin)
#reload(local_models.TLS_models)
np.warnings.filterwarnings('ignore')
# +
def import_shit():
import local_models.local_models
import local_models.algorithms
import local_models.utils
import local_models.linear_projections
import local_models.loggin
import local_models.TLS_models
import numpy as np
import logging
import string
import ml_battery.log
logger = logging.getLogger(__name__)
#reload(local_models.local_models)
#reload(lm)
#reload(local_models.loggin)
#reload(local_models.TLS_models)
np.warnings.filterwarnings('ignore')
return logger
def mean_center(data, weights=None):
return data - np.average(data, axis=0,weights=weights)
def load_converged_data(pth):
convergededs = []
for dat in sorted(os.listdir(pth)):
convergededs.append(np.loadtxt(os.path.join(pth, dat)))
return np.concatenate(convergededs, axis=0)
def plt_grid(fig, grid, data_avg, data_std):
nodes = mayavi.mlab.points3d(grid[:,0], grid[:,1], grid[:,2],
scale_mode='scalar', scale_factor=1,
colormap='gist_earth', figure=fig)
nodes.glyph.scale_mode = 'scale_by_vector'
nodes.mlab_source.dataset.point_data.vectors = np.ones((grid.shape[0],3))*(np.average(data_std)/60)
nodes.mlab_source.dataset.point_data.scalars = (grid[:,1] - (data_avg[1]-3*data_std[1]))/(6*data_std[1])
return nodes
def plt_data(fig, data, data_std):
nodes = mayavi.mlab.points3d(data[:,0], data[:,1], data[:,2],
scale_mode='scalar', scale_factor=1,
colormap='Greens', figure=fig)
nodes.glyph.scale_mode = 'scale_by_vector'
nodes.mlab_source.dataset.point_data.vectors = np.ones((data.shape[0],3))*(np.average(data_std)/60)
nodes.mlab_source.dataset.point_data.scalars = np.ones((data.shape[0]))
return nodes
def get_normals(kernel, linear_models, data):
if hasattr(kernel.bandwidth, "__call__"):
linear_params_vecs, linear_params_mean = local_models.linear_projections.transformate_data(data, kernel, linear_models, k=kernel.k)
else:
linear_params_vecs, linear_params_mean = local_models.linear_projections.transformate_data(data, kernel, linear_models, r=kernel.support_radius())
return linear_params_vecs
def align_normals(data, normals, k=10, iterations=100):
balltree = sklearn.neighbors.BallTree(data)
pairwise_nearest_indices = balltree.query(data,k=k,sort_results=True,return_distance=False)
for iteration in range(iterations):
alignments = []
for index in range(1,pairwise_nearest_indices.shape[1]):
alignment = np.einsum("ij,ij->i",normals,normals[pairwise_nearest_indices[:,index]])
alignments.append(alignment)
alignment = np.average(alignments, axis=0)
wrong_alignment = np.sign(alignment)
normals = normals*wrong_alignment.reshape(-1,1)
return normals
def align_edge_normals(data, normals, edge_range=0.1):
data_mins, data_maxes, data_ranges = local_models.linear_projections.min_max_range(data)
graph_bounds = local_models.linear_projections.sane_graph_bounds(data_mins, data_maxes, data_ranges, -edge_range)
mins = data < graph_bounds[:1]
maxes = data > graph_bounds[1:]
mins_alignment = np.sign(np.einsum("ij,ij->i",mins,-1*normals))
maxes_alignment = np.sign(np.einsum("ij,ij->i",maxes,normals))
mins_alignment += np.logical_not(mins_alignment) # turn 0s into 1s (so they don't change)
maxes_alignment += np.logical_not(maxes_alignment)
return normals*mins_alignment.reshape(-1,1)*maxes_alignment.reshape(-1,1)
def plt_normals(fig, normals, data, data_std):
nodes = mayavi.mlab.quiver3d(data[:,0], data[:,1], data[:,2],
normals[:,0], normals[:,1], normals[:,2],
scale_mode='scalar', scale_factor=np.average(data_std)/5,
colormap='Purples', figure=fig, line_width=1.0)
return nodes
def normalize_view(fig, data_avg, data_std, azimuth=0, elevation=0):
mayavi.mlab.view(azimuth=azimuth, elevation=elevation, distance=15*np.average(data_std), focalpoint=(data_avg[0], data_avg[1], data_avg[2]))
def plt_and_save(data, grid, normals, pth):
data_avg = np.average(data, axis=0)
data_std = np.std(data, axis=0)
figure = mayavi.mlab.figure(figure=None, bgcolor=(1,1,1), fgcolor=(0,0,0), engine=None, size=(1000, 500))
data_nodes = plt_data(figure, data, data_std)
converged_nodes = plt_grid(figure, grid, data_avg, data_std)
normal_vecs = plt_normals(figure, normals, grid, data_std)
neg_normal_vecs = plt_normals(figure, -normals, grid, data_std)
normalize_view(figure, data_avg, data_std)
mayavi.mlab.savefig(pth, magnification=2)
mayavi.mlab.close(figure)
def serialize_plt(pth):
import zlib
with open(pth, 'rb') as f:
dat = f.read()
return zlib.compress(dat)
def deserialize_plt(dat, pth):
import zlib
with open(pth, 'wb') as f:
f.write(zlib.decompress(dat))
return pth
def distributed_plt_and_save(data, grid, bandwidth):
import numpy as np
import mayavi
import mayavi.mlab
import string
import os
#on headless systems, tmux: "Xvfb :1 -screen 0 1280x1024x24 -auth localhost", then "export DISPLAY=:1" in the jupyter tmux
mayavi.mlab.options.offscreen = True
unique_id = "".join(np.random.choice(list(string.ascii_lowercase), replace=True, size=20))
pth = "/ramfs/{}.png".format(unique_id)
try:
plt_and_save(data, grid, bandwidth, pth)
result = serialize_plt(pth)
except:
os.remove(pth)
# -
FRESH=True
kernel_names = {
local_models.local_models.GaussianKernel: 'gaussian',
local_models.local_models.TriCubeKernel: 'tricube'
}
mpl.rcParams['figure.figsize'] = [8.0, 8.0]
data_file = "/home/brown/Downloads/subject001/3d/andreadm2.stl"
# +
import stl
tri_mesh = stl.mesh.Mesh.from_file(data_file).points.reshape(-1,3)
# -
tri_mesh.shape
data, cts = np.unique(tri_mesh, axis=0, return_counts=True)
data.shape
(data.shape[0]*100)**(1/3)
np.mean(data, axis=0)
# +
KERNEL=local_models.local_models.TriCubeKernel
RUN = 1
project_dir = "../data/faces_{}_{:03d}".format(kernel_names[KERNEL], RUN)
os.makedirs(project_dir, exist_ok=1)
# + active=""
# mayavi.mlab.figure(figure=None, bgcolor=(1,1,1), fgcolor=(0,0,0), engine=None, size=(800, 800))
# #mayavi.mlab.surf(grid[0], grid[1], kde_wireframe/z_scale, colormap='Greys', opacity=1)
# #nodes = mayavi.mlab.points3d(converged[:,0], converged[:,1], converged[:,2], scale_mode='scalar', color=(1,0,0))
# #nodes.mlab_source.dataset.point_data.scalars = np.ones(converged.shape[0])*0.1
# bunodes = mayavi.mlab.points3d(data[:,0], data[:,1], data[:,2], scale_mode='scalar', color=(0,1,0))
# bunodes.mlab_source.dataset.point_data.scalars = np.ones(data.shape[0])*0.05
#
#
# #mayavi.mlab.axes()
#
# #mayavi.mlab.view(views[0][1],views[0][0])
# data_avg = np.average(data, axis=0)
# for az in [0,90,180,270]:
# for el in [0,90,180,270]:
# mayavi.mlab.view(azimuth=az, elevation=el, distance=15*np.average(data_avg), focalpoint=(data_avg[0], data_avg[1], data_avg[2]))
# #mayavi.mlab.move(forward=None, right=0.1*data_avg[1], up=-0.02*data_avg[0])
# title = "data_{:03d}_{:03d}".format(az,el)
# mayavi.mlab.savefig(os.path.join(project_dir, "{}.png".format(title)))
# #mayavi.mlab.clf()
# -
linear_models = local_models.local_models.LocalModels(local_models.TLS_models.LinearODR_mD(2))
linear_models.fit(data)
avg_pt_dist = np.average(linear_models.index.query(data, k=2)[0][:,1])
avg_pt_dist
random_data_subset = data[np.random.randint(data.shape[0], size=50)]
queried = linear_models.index.query_radius(random_data_subset, r=avg_pt_dist*10)
list(map(lambda x: x.shape, queried))
kernel = local_models.local_models.TriCubeKernel(bandwidth=avg_pt_dist*10)
linear_params_vecs, linear_params_mean = local_models.linear_projections.transformate_data(data, kernel, linear_models, r=kernel.support_radius())
linear_params_vecs.shape
def imshow(pth, cv2color=cv2.IMREAD_COLOR, **kwargs):
img = cv2.imread(pth, cv2color)
plt.imshow(img, **kwargs)
# +
N = int(data.shape[0]/10)
sample_indices = np.random.choice(np.arange(data.shape[0]), size=N)
pth = os.path.join(project_dir, "single_convergence.png")
data_avg = np.average(data, axis=0)
data_std = np.std(data, axis=0)
figure = mayavi.mlab.figure(figure=None, bgcolor=(1,1,1), fgcolor=(0,0,0), engine=None, size=(1000, 500))
data_nodes = plt_data(figure, linear_params_mean[sample_indices], data_std)
normals = np.cross(*np.rollaxis(linear_params_vecs[sample_indices],1))
#normal_vecs = plt_normals(figure, normals, linear_params_mean[sample_indices], data_std)
#neg_normal_vecs = plt_normals(figure, -normals, linear_params_mean[sample_indices], data_std)
normalize_view(figure, data_avg, data_std, azimuth=40, elevation=80)
mayavi.mlab.savefig(pth, magnification=2)
mayavi.mlab.close(figure)
# -
imshow(pth)
plt.axis("off")
global_linear_model = local_models.TLS_models.LinearODR_mD(2)
global_linear_model.fit(data)
# +
global_params_mean = global_linear_model.intercept_
global_linear_vecs = global_linear_model.cov_eigenvectors[global_linear_model.cov_eigenvalues_sorter]
#basis_changer = np.linalg.inv(global_linear_vecs)
basis_changer = global_linear_vecs.T
# -
E = np.block([[global_linear_vecs, global_params_mean.reshape(-1,1)],[np.zeros(3),1]])
np.round(E,4)[np.triu_indices(4)]
bases_changed = (data-global_params_mean)@basis_changer
x,y,z = bases_changed.T
plt.scatter(x,y,c=z)
plt.axis("off")
| examples/pointcloud_faces.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import psycopg2
import pandas as pd
# from sqlalchemy.types import Integer, Text, String, DateTime
import sqlalchemy as s
import matplotlib
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import json
with open("config.json") as config_file:
config = json.load(config_file)
database_connection_string = 'postgres+psycopg2://{}:{}@{}:{}/{}'.format(config['user'], config['password'], config['host'], config['port'], config['database'])
dbschema='augur_data'
engine = s.create_engine(
database_connection_string,
connect_args={'options': '-csearch_path={}'.format(dbschema)})
# -
repo_list = pd.DataFrame()
repo_list_query = f"""
SELECT repo_id, repo_name from repo
WHERE repo_group_id = 25155;
"""
repo_list = pd.read_sql_query(repo_list_query, con=engine)
print(repo_list)
repo_list = pd.DataFrame()
repo_list_query = f"""
SELECT repo_id, repo_name from repo
WHERE repo_name = 'spring-framework' OR repo_name = 'spring-boot';
"""
repo_list = pd.read_sql_query(repo_list_query, con=engine)
print(repo_list)
## List of repository IDs for the report
repo_dict = {25760, 25663}
# +
#Weekly Complete: Total Pull Requests Closed
sql_DF = pd.DataFrame()
print(sql_DF)
for value in repo_dict:
print(value)
# sql_DFa = pd.read_sql
myQuery = f"""
SELECT
*
FROM
(
SELECT
date_part( 'year', week :: DATE ) AS YEAR,
date_part( 'week', week :: DATE ) AS week
FROM
( SELECT * FROM ( SELECT week :: DATE FROM generate_series ( TIMESTAMP '2017-07-01', TIMESTAMP '2020-01-30', INTERVAL '1 week' ) week ) d ) x
) y
LEFT OUTER JOIN (
SELECT
repo_id,
repo_name,
repo_group,
date_part( 'year', pr_created_at :: DATE ) AS YEAR,
date_part( 'week', pr_created_at :: DATE ) AS week,
AVG ( hours_to_close ) AS wk_avg_hours_to_close,
AVG ( days_to_close ) AS wk_avg_days_to_close,
COUNT ( pr_src_id ) AS total_prs_open_closed
FROM
(
SELECT
repo.repo_id AS repo_id,
repo.repo_name AS repo_name,
repo_groups.rg_name AS repo_group,
pull_requests.pr_created_at AS pr_created_at,
pull_requests.pr_closed_at AS pr_closed_at,
pull_requests.pr_src_id AS pr_src_id,
( EXTRACT ( EPOCH FROM pull_requests.pr_closed_at ) - EXTRACT ( EPOCH FROM pull_requests.pr_created_at ) ) / 3600 AS hours_to_close,
( EXTRACT ( EPOCH FROM pull_requests.pr_closed_at ) - EXTRACT ( EPOCH FROM pull_requests.pr_created_at ) ) / 86400 AS days_to_close
FROM
repo,
repo_groups,
pull_requests
WHERE
repo.repo_group_id = repo_groups.repo_group_id
AND repo.repo_id = pull_requests.repo_id
AND repo.repo_id = {value}
AND pull_requests.pr_src_state = 'closed'
ORDER BY
hours_to_close
) L
GROUP BY
L.repo_id,
L.repo_name,
L.repo_group,
YEAR,
week
ORDER BY
repo_id,
YEAR,
week
) T USING ( week, YEAR )
ORDER BY
YEAR,
week;
"""
sql_DFa = pd.read_sql_query(myQuery, con=engine)
repo_id = value
sql_DFa[['wk_avg_hours_to_close', 'wk_avg_days_to_close', 'total_prs_open_closed' ]] = sql_DFa[['wk_avg_hours_to_close', 'wk_avg_days_to_close', 'total_prs_open_closed' ]].fillna(value=0)
sql_DFa[['repo_id']] = sql_DFa[['repo_id']].fillna(value=repo_id)
if not sql_DF.empty:
# print(sql_DFa)
sql_DF = pd.concat([sql_DF, sql_DFa])
# print(sql_DF)
else:
print('first time')
sql_DF = sql_DFa
# print(sql_DF)
sql_DF.set_index('repo_id', 'year', 'week')
sql_DF.set_index('repo_id', 'year', 'week')
sql_DF['year'] = sql_DF['year'].map(int)
sql_DF['week'] = sql_DF['week'].map(int)
sql_DF['repo_id'] = sql_DF['repo_id'].map(int)
sql_DF['week'] = sql_DF['week'].map(lambda x: '{0:0>2}'.format(x))
sql_DF['yearweek'] = sql_DF['year'].map(str)+sql_DF['week'].map(str)
#sql_DF['yearweek'] = sql_DF['yearweek'].map(int)
sql_DF['week'] = sql_DF['week'].map(int)
sql_DF.set_index('repo_id', 'yearweek')
#print(sql_DF)
#sql_DF.dtypes
sns.set_style('ticks')
#sns.palplot(sns.color_palette('husl', 8))
#sns.set_palette('husl')
sns.set(style="whitegrid")
# Total PRS Closed
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(24, 8)
plotter = sns.lineplot(x='yearweek', y='total_prs_open_closed', style='repo_name', data=sql_DF, sort=False, legend='full', linewidth=2.5, hue='repo_name').set_title("Total Pull Requests Closed by Week, July 2017-January 2020")
#ax.tick_params(axis='x', which='minor', labelsize='small', labelcolor='m', rotation=30)
plotterlabels = ax.set_xticklabels(sql_DF['yearweek'], rotation=90, fontsize=8)
fig.savefig('images/prs-total-closed-wk.png')
#Average Days Open
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(24, 24)
plotter = sns.lineplot(x='yearweek', y='wk_avg_days_to_close', style='repo_name', data=sql_DF, sort=False, legend='full', linewidth=2.5, hue='repo_name').set_title("Average Closed PR Time Open by Week, July 2017-January 2020")
#ax.tick_params(axis='x', which='minor', labelsize='small', labelcolor='m', rotation=30)
plotterlabels = ax.set_xticklabels(sql_DF['yearweek'], rotation=90, fontsize=8)
fig.savefig('images/prs-average-open-time-wk.png')
# +
# Monthly Complete
pr_monthDF = pd.DataFrame()
print(pr_monthDF)
for value in repo_dict:
print(value)
# sql_DFa = pd.read_sql
pr_monthquery = f"""
SELECT
*
FROM
(
SELECT
date_part( 'year', month :: DATE ) AS YEAR,
date_part( 'month', month :: DATE ) AS month
FROM
( SELECT * FROM ( SELECT month :: DATE FROM generate_series ( TIMESTAMP '2017-07-01', TIMESTAMP '2020-01-30', INTERVAL '1 month' ) month ) d ) x
) y
LEFT OUTER JOIN (
SELECT
repo_id,
repo_name,
repo_group,
date_part( 'year', pr_created_at :: DATE ) AS YEAR,
date_part( 'month', pr_created_at :: DATE ) AS month,
AVG ( hours_to_close ) AS wk_avg_hours_to_close,
AVG ( days_to_close ) AS wk_avg_days_to_close,
COUNT ( pr_src_id ) AS total_prs_open_closed
FROM
(
SELECT
repo.repo_id AS repo_id,
repo.repo_name AS repo_name,
repo_groups.rg_name AS repo_group,
pull_requests.pr_created_at AS pr_created_at,
pull_requests.pr_closed_at AS pr_closed_at,
pull_requests.pr_src_id AS pr_src_id,
( EXTRACT ( EPOCH FROM pull_requests.pr_closed_at ) - EXTRACT ( EPOCH FROM pull_requests.pr_created_at ) ) / 3600 AS hours_to_close,
( EXTRACT ( EPOCH FROM pull_requests.pr_closed_at ) - EXTRACT ( EPOCH FROM pull_requests.pr_created_at ) ) / 86400 AS days_to_close
FROM
repo,
repo_groups,
pull_requests
WHERE
repo.repo_group_id = repo_groups.repo_group_id
AND repo.repo_id = pull_requests.repo_id
AND repo.repo_id = {value}
AND pull_requests.pr_src_state = 'closed'
ORDER BY
hours_to_close
) L
GROUP BY
L.repo_id,
L.repo_name,
L.repo_group,
YEAR,
month
ORDER BY
repo_id,
YEAR,
month
) T USING ( month, YEAR )
ORDER BY
YEAR,
month;
"""
pr_monthDFa = pd.read_sql_query(pr_monthquery, con=engine)
repo_id = value
pr_monthDFa[['wk_avg_hours_to_close', 'wk_avg_days_to_close', 'total_prs_open_closed' ]] = pr_monthDFa[['wk_avg_hours_to_close', 'wk_avg_days_to_close', 'total_prs_open_closed' ]].fillna(value=0)
pr_monthDFa[['repo_id']] = pr_monthDFa[['repo_id']].fillna(value=repo_id)
if not pr_monthDF.empty:
# print(sql_DFa)
pr_monthDF = pd.concat([pr_monthDF, pr_monthDFa])
# print(sql_DF)
else:
print('first time')
pr_monthDF = pr_monthDFa
# print(sql_DF)
pr_monthDF.set_index('repo_id', 'year', 'month')
pr_monthDF.set_index('repo_id', 'year', 'month')
import matplotlib.pyplot as plt
pr_monthDF['year'] = pr_monthDF['year'].map(int)
pr_monthDF['month'] = pr_monthDF['month'].map(int)
pr_monthDF['repo_id'] = pr_monthDF['repo_id'].map(int)
pr_monthDF['month'] = pr_monthDF['month'].map(lambda x: '{0:0>2}'.format(x))
pr_monthDF['yearmonth'] = pr_monthDF['year'].map(str)+pr_monthDF['month'].map(str)
#sql_DF['yearweek'] = sql_DF['yearweek'].map(int)
pr_monthDF['month'] = pr_monthDF['month'].map(int)
pr_monthDF.set_index('repo_id', 'yearmonth')
#print(sql_DF)
#sql_DF.dtypes
sns.set_style('ticks')
#sns.palplot(sns.color_palette('husl', 8))
#sns.set_palette('husl')
sns.set(style="whitegrid")
#Total PRS open and closed by month
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(24, 8)
plottermonth = sns.lineplot(x='yearmonth', y='total_prs_open_closed', style='repo_name', data=pr_monthDF, sort=False, legend='full', linewidth=2.5, hue='repo_name').set_title("Total Pull Requests Closed by Month, July 2017-January 2020")
#ax.tick_params(axis='x', which='minor', labelsize='small', labelcolor='m', rotation=30)
plottermonthlabels = ax.set_xticklabels(pr_monthDF['yearmonth'], rotation=90, fontsize=13)
fig.savefig('images/prs-monthly-total-open-closed.png')
#Average Days Open by Month
fig, ax = plt.subplots()
# the size of A4 paper
fig.set_size_inches(24, 24)
plottermonth = sns.lineplot(x='yearmonth', y='wk_avg_days_to_close', style='repo_name', data=pr_monthDF, sort=False, legend='full', linewidth=2.5, hue='repo_name').set_title("Average Closed PR Time Open by Month, July 2017-January 2020")
#ax.tick_params(axis='x', which='minor', labelsize='small', labelcolor='m', rotation=30)
plotterlabels = ax.set_xticklabels(pr_monthDF['yearmonth'], rotation=90, fontsize=8)
fig.savefig('images/prs-average-open-time-month.png')
# -
| code/dawn_experimental.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 4_TensorRT_Inference_Test
#
# This notebook would run a test inference on the TensorRT engine that was created in notebook 3.
#
#Import Packages
import numpy as np
from onnx_helper import ONNXClassifierWrapper,convert_onnx_to_engine
import torch
import json
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
# %matplotlib inline
#Set constants
BATCH_SIZE=1
N_CLASSES=1000
PRECISION=np.float32
image_size=224
TRT_PATH='models/efficientnetb2_batch1.trt'
# +
print("Loading TRT")
trt_model=ONNXClassifierWrapper(TRT_PATH,[BATCH_SIZE,N_CLASSES],target_dtype=PRECISION)
# -
# ### It is important to note that TensorRT engine expects the input to be [Batch, Height, Width, Channels]
# ##### Hence in the below step, it is transposed to be of such dimensions.
# +
img=Image.open('images/cat_1.jpg')
test_image=np.copy(img)
image_size=224
tfms=transforms.Compose([transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
img=tfms(img)
img=img.unsqueeze(0)
print("Img shape",img.shape)
BATCH_SIZE=1
dummy_batch=img.numpy()
#np.zeros((BATCH_SIZE,3,224,224))
#for idx in range(BATCH_SIZE):
# dummy_batch[idx]=img.numpy
#print(dummy_batch.shape)
dummy_batch=dummy_batch.transpose((0,3,2, 1))
#print(dummy_batch.shape)
# -
#dummy_batch=np.zeros((BATCH_SIZE,224,224,3))
print("Predict")
predictions=trt_model.predict(dummy_batch)
#print("Prediction",predictions)
labels_map=json.load(open('labels_map.txt'))
labels_map=[labels_map[str(i)] for i in range(1000)]
predt=torch.from_numpy(predictions)
#print(predt)
preds=torch.topk(predt,k=1).indices.squeeze(0).tolist()
#print(preds)
# +
for idx in preds:
#idx = [int(idx) for idx in preds]
label=labels_map[idx]
prob=torch.softmax(predt,dim=1)[0,idx].item()
print('{:<75} ({:.2f}%)'.format(label, prob*100))
plt.imshow(test_image)
plt.title('Test image')
plt.show()
# -
| 4_TensorRT_Inference_Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mesdaagency/LANDSAT8/blob/main/AGENCIA_ESPACIAL_MEXICANA_ODC_and_COLAB_LS8_QUERETARO.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="wUjM0D_z4h8X"
# <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/01.01.Getting_Started_ODC_and_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8leRDXsMoEU-"
# <a name="top"></a>
# # Getting Started: Open Data Cube on Google Colab
#
# 1. [Open Data Cube Framework](#ODC-Framework)
# 2. [Google Colab and Jupyter Notebooks](#what-is)
# 3. [Products and Measurements](#products-measurements)
# 4. [How to Load Data](#How-to-Load-Data)
# 5. [Explore More Applications](#explore-applications)
# 6. [Reference Links](#reference-links)
#
# *The article below provides an overview of ODC on Google Colab. If you wish to run the notebooks, you'll need to setup a Google Earth Engine account. This is free, and you can read more about how to do that [here](https://www.openearthalliance.org/sandbox).*
# + [markdown] id="7zfNDGZysxQH"
# <a name="ODC-Framework"></a>
# # 1. Open Data Cube Framework
#
# The Open Data Cube (ODC, [opendatacube.org](https://www.opendatacube.org/)) is a software framework with the objective of:
#
# > ... increasing the impact of satellite data by providing an open and freely accessible exploitation tool, and to foster a community to develop, sustain, and grow the breadth and depth of applications.
#
# An ODC instance is made up of data, a mechanism to index that data (e.g. database), and an open source Python code base making use of a wide variety of Python libraries. A more detailed introduction and some history of the ODC can be found [here](https://medium.com/opendatacube/what-is-open-data-cube-805af60820d7).
#
#
#
#
#
#
# + [markdown] id="nKGC41A00eEn"
# 
# + [markdown] id="HbliNERXMUJc"
# The ODC framework can run on a wide variety of infrastructure from a simple Docker instance running on a laptop computer scaling up to continential coverage (e.g. [Digital Earth Afica](https://www.digitalearthafrica.org/)). This notebook is intended to give you an introduction to the ODC running on the Google Colab plaform, and utalising data from the [Google Earth Engine Catalog](https://developers.google.com/earth-engine/datasets).
#
# The notebook provides:
#
# * A quick introduction to Jupyter notebooks and the Colab environment (with links to read more)
# * A worked example of how to configure an ODC connection from Google Colab
# * An example listing of the products and measurements available
# * How to load some sample data
# * Links to worked examples demonstrating several Earth observation applications
#
# This is intended as a familarisation and training resource, and we hope it will help start your journey with ODC!
#
# [Back to the top](#top)
# + [markdown] id="bu4ksWosrsbq"
# <a name="what-is"></a>
# # 2. Google Colab and Jupyter Notebooks
#
# Google Colaboratory (or Google Colab, [colab.research.google.com](https://colab.research.google.com)) is an environment that allows anybody to write and execute arbitrary Python code through the browser, and is especially well suited to machine learning, data analysis and education. Colab is a hosted Jupyter Notebook service, where 'Notebooks' (like this one) containing live code, equations, visualizations and narrative text can be shared. You can read more about these environments in the [reference links below](#reference-links).
#
# While Jupyter notebooks are relatively intuitive, if you don't understand what the code and cells below are, then a quick read of the [features overview](https://colab.research.google.com/notebooks/basic_features_overview.ipynb) should help get you strated.
#
# Loading the satellite datasets used by this notebook requires a [Google Earth Engine](https://earthengine.google.com/) account. These accounts are free to setup, and the steps required are described [here](https://www.openearthalliance.org/sandbox).
#
# ***Finally, one important note. This is a sandbox environment. Feel free to experiment! From time to time, you might see errors. That's OK - you can't break anything in here. And you can always start anew by [reloading the Notebook](https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/01.01.Getting_Started_ODC_and_Colab.ipynb). If you have questions, [try the Open Earth Alliance Forum](https://www.openearthalliance.org/forum).***
#
# Now, onto the code...! First, a quick test: try running this cell. (If you're not sure how, [try this](https://colab.research.google.com/notebooks/basic_features_overview.ipynb).)
# + colab={"base_uri": "https://localhost:8080/"} id="zOvygfe4t_pW" outputId="fe8f18e9-b326-4c11-9ede-e335a2dbe889"
print('Push [shift] + [enter] to run this cell - now you are ready to Go!')
# + [markdown] id="12SuxRjIuFX4"
# Having run the cell above, you will have initialised your Google Colab environment, and are now ready to setup your ODC instance.
#
# The next code block provides Colab with access to your Google Drive to store content and results. *It will ask you to click on an authentication link, and then paste an authentication code back into the notebook cell.* The steps for the process are shown below:
#
# <img src="https://ceos.org/document_management/SEO/ODC%20Colab%20Images/Google%20Drive%20Auth%20Info.png" width="100%">
#
# + id="MKHyVmWZcwzA" colab={"base_uri": "https://localhost:8080/"} outputId="c04295f2-385c-4cc8-eca4-1569d4a248c5"
from google.colab import drive
drive.mount('force_remount=True')
# + [markdown] id="HX3_ZIT-uvNX"
# Now that you've setup the link to Google Drive, the next cell will grab the Python libraries necessary to run ODC on Google Colab. This is based on the [ODC-Colab](https://github.com/ceos-seo/odc-colab) repository build by the CEOS Systems Engineering Office.
# + id="Oon11EUY4h8e" colab={"base_uri": "https://localhost:8080/"} outputId="ae88605b-f083-4cc7-9015-277502560d0a"
# !wget -nc https://raw.githubusercontent.com/ceos-seo/odc-colab/master/odc_colab.py
from odc_colab import odc_colab_init
odc_colab_init(install_odc_gee=True)
# + [markdown] id="Qkbspdn2vRXG"
# The next code block will populate the ODC data index allowing ODC to access data from the [Google Earth Engine Catalog](https://developers.google.com/earth-engine/datasets).
# + id="eSkt3mPC4h8f" colab={"base_uri": "https://localhost:8080/"} outputId="9128fbbb-f531-47c0-b050-78d992ca30c6"
from odc_colab import populate_db
populate_db()
# + [markdown] id="mHjsVmU2wMvT"
# This next code block will establish the connection to the Google Earth Engine datasets. *As above, it will ask you to click on an authentication link, and then paste an authentication code back into the notebook cell.* The steps for the process are shown below:
#
# <img src="https://ceos.org/document_management/SEO/ODC%20Colab%20Images/GEE%20Auth%20Info.png" width="100%">
#
# Following that, the block loads a couple of key Python librarires used later in the Notebook.
# + id="8sRzE8L34h8g" colab={"base_uri": "https://localhost:8080/"} outputId="9e1e48ba-dea5-429d-92fe-812189f2d4c8"
# Suppress Warning Messages
import warnings
warnings.filterwarnings('ignore')
# Load Data Cube Configuration
from odc_gee import earthengine
dc = earthengine.Datacube(app='Getting_Started_loading_data')
# Import Data Cube API
import utils.data_cube_utilities.data_access_api as dc_api
api = dc_api.DataAccessApi()
# Import Utilities
import xarray as xr
# + [markdown] id="wLWJYWdHyUdv"
# Now we have an ODC instance established with a connection to data!
# + [markdown] id="u7s_vPN5jiK-"
# [Back to the top](#top)
# + [markdown] id="tJe3cbUbs75t"
# <a name="products-measurements"></a>
# # 3. Products and Measurements
# + [markdown] id="2lwWuhJyxyfl"
# Within this ODC instance, 'products' refer to satellites (platform), and their instruments. To see what products are available, you can run the following command.
# + id="-QPZQYwnbi68" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="8459d29f-43ba-4ad5-d9a6-7f087732426c"
products = dc.list_products()
display_columns = ["name",
"description",
"platform",
"instrument",
"crs",
"resolution"]
products[display_columns].sort_index()
# + [markdown] id="ZuvfujtDysGO"
# For each of the products available, there are a number of measuements available. These measurements correspond to the various 'bands' collected by the satellite instrument. In some cases, these are radar bands, or other derived layers such as a cloud mask.
#
# You can see a list of measurements by running the command below. Have a play to try different products (from the code block above), but note that the example below is set to run using the Landsat product `ls8_google`.
# + colab={"base_uri": "https://localhost:8080/", "height": 452} id="H-E7C-2E3RHY" outputId="53ec7c12-c301-4581-d405-2c5ba6f07d9a"
product = "ls8_google"
measurements = dc.list_measurements()
measurements.loc[product]
# + [markdown] id="tDS1Wxbljj8A"
# [Back to the top](#top)
# + [markdown] id="bjLNsPEJtMLa"
# <a name="How-to-Load-Data"></a>
# # 4. How to Load Data
#
# Now that you are familar with the basics of ODC and Colab, let's try loading some data and plotting an image.
#
# Loading data requires the use of the `dc.load()` function from the [datacube documentation](https://datacube-core.readthedocs.io/en/latest/dev/api/generate/datacube.Datacube.load.html). Below we give an example of how to use this function, using the Landsat-8 product `ls8_google`. You can also use other products from the table you generated above.
# + id="YXnIKhVQ4h8g"
# Define the Product and Platform
# This data is indexed from Google Earth Engine data sources
product = "ls8_google"
platform = "LANDSAT_8"
# + [markdown] id="Pk-khAmmhPPj"
# We now need to choose where on Earth we want to look and when. Be careful of the box size and time range you choose - the more data, the longer the code will take to run! We found that the following parameters give output within a couple of minutes.
#
# The analysis region should be given by a tuple of latitudes and a tuple of longnitudes that specify the sides of the region. Below we calculate the box sides by specifying a box centre and size (in degrees).
#
# Below we load data for the city of Mombasa, Kenya for all of 2020, by specifying a box centre and size.
# + [markdown] id="PdNbV8hVVINX"
# <a name="change_lat_lon"></a>
# + id="F2YFbpZ94h8g" colab={"base_uri": "https://localhost:8080/"} outputId="33097d0b-eaae-4f51-f5bf-42ce418c2787"
# MODIFY HERE
# Select an analysis region (Latitude-Longitude). Values should be defined from MIN to MAX (left to right)
# Specify box centre and box size in degrees.
# Example: Queretaro, Queretaro
lat_long = (20.588818, -100.389888)
box_size_deg = 0.125
latitude = (lat_long[0]-box_size_deg/2, lat_long[0]+box_size_deg/2)
longitude = (lat_long[1]-box_size_deg/2, lat_long[1]+box_size_deg/2)
print('Latitude corners: ' + str(latitude))
print('Longitude corners: ' + str(longitude))
# Define Time Range - Select a time period within the extents of the dataset (Year-Month-Day)
# Landsat-8 time range: 07-Apr-2013 to current
time_extents = ('2020-01-01', '2020-12-31')
# + [markdown] id="R0eDMkZbhvKk"
# The code below renders a map that can be used to view the region selected above. To choose a new region, use the mouse to explore the map, and click on the map to view Lat-Lon coordinates of any location that could define the box center or edges.
# + id="PfbLB4Z94h8g" colab={"base_uri": "https://localhost:8080/", "height": 907} outputId="6f6e98b8-06c4-4c70-b943-522ade2ed33b"
from utils.data_cube_utilities.dc_display_map import display_map
display_map(latitude,longitude)
# + [markdown] id="edLQET0IjtW-"
# Now to load the data - this block might take a few minutes to run.
# + id="hqT-xTXidv_s" colab={"base_uri": "https://localhost:8080/", "height": 232} outputId="60c0c76b-5ea3-4e35-d198-7fac44fe32d3"
# Load data
ds = dc.load(product=product,
x=latitude,
y=longitude,
time=time_extents,
measurements=['red', 'green', 'blue', 'nir', 'swir1', 'swir2'])
print(ds)
# + [markdown] id="Moiqs6YqPfZ4"
# Finally, we plot a single timeslice. The range of possible time slices is given above by the `time` dimension. Note that the Python counts from zero, so the final time slice will be the above number `-1`.
#
# Below we plot both the 'true' colour image, as well as the 'false' colour image, which is commonly used for Landsat data viewing.
#
#
# + id="YZg2dl4x4h8k"
# Load the plotting utility
from utils.data_cube_utilities.dc_rgb import rgb
import matplotlib.pyplot as plt
# + [markdown] id="AaPH6csIUDQ3"
# <a name="modify_slice"></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 485} id="RGFU03tN4h8k" outputId="c186c225-03ac-4661-979a-1a29421cf48f"
# MODIFY HERE
# Select one of the time slices and create an output image.
# Clouds will be visible in WHITE for an output image
slice = 10 # select the time slice number here
# Select the output image bands
# Users can create other combinations of bands (loaded above), as desired
# True-Color = red, green, blue (this is the common true-color RGB image)
# False Color = swir2, nir, green (this is commonly used for Landsat data viewing)
true_rgb = ds.isel(time=slice)[['red', 'green', 'blue']].to_array()
false_rgb = ds.isel(time=slice)[['swir2', 'nir', 'green']].to_array()
# Define the plot settings and show the plots
# Users may want to alter the figure sizes or plot titles
# The "vmax" value controls the brightness of the images and can be adjusted
fig, ax = plt.subplots(1, 2, figsize=(16, 8))
true_rgb.plot.imshow(ax=ax[0], vmin=0, vmax=3000)
false_rgb.plot.imshow(ax=ax[1], vmin=0, vmax=5000)
ax[0].set_title('True Color'), ax[0].xaxis.set_visible(False), ax[0].yaxis.set_visible(False)
ax[1].set_title('False Color'), ax[1].xaxis.set_visible(False), ax[1].yaxis.set_visible(False)
plt.show()
# + [markdown] id="nSy3aIWKgWv1"
# Congratulations - you have loaded and plotted your first dataset with ODC and Colab!
#
# Now it's your turn to try having a play by going back and modifying some of the code blocks and running them again. Here are a couple of ideas:
#
# * [Click here](#modify_slice) to update the `slice` variable in the cell above to view a different time slice (e.g. try `slice = 8`). Use [shift] + [enter] to re-run that cell, and notice the change in clouds!
# * [Click here](#change_lat_lon) to enter a new Lat/Lon location, enter that in the block above the map, and then run successive code blocks to see the data from the region. (Tip: you can find the Lat/Lon in [Google Maps](https://maps.google.com/) by right clicking the map where you're interested and click the lat-long listed to copy it to the clipboard.)
#
# [Back to the top](#top)
# + [markdown] id="bpVtejZBtPsK"
# <a name="explore-applications"></a>
# # 5. Publicly available ODC notebooks
#
# We have also developed a number of application-specific notebooks to help users understand the capabilties of the ODC. To continue learning about the ODC choose one of the following notebooks and get started! We are continuously updating these to make them better and always value user feedback - if you have any comments please don't hesitate to get in touch (where...)!
#
# You can find descriptions below, and the notebooks in this [GitHub repository folder](https://github.com/ceos-seo/odc-colab/tree/master/notebooks).
#
# - **Cloud statistics (Landsat 8)** *Calculate cloud statistics for specific regions Landsat-8 data.*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.01.Colab_Cloud_Statistics_L8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.01.Colab_Cloud_Statistics_L8.ipynb)
# - **Median mosaic (Landsat 8)** *Create a custom Landsat cloud-filtered median mosaic for any time period and location.*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.03.Colab_Median_Mosaic_L8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.03.Colab_Median_Mosaic_L8.ipynb)
# - **Vegetation change (Landsat 8)** *Use changes in the Normalized Difference Vegetation Index (NDVI) to identify vegetation change.*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.06.Colab_Vegetation_Change_L8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.06.Colab_Vegetation_Change_L8.ipynb)
# - **Water WOFS (Landsat 8)** *Demonstration of the Australian Water Observations from Space (WOFS) algorithm.*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.04.Colab_Water_WOFS_L8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.04.Colab_Water_WOFS_L8.ipynb)
# - **Spectral Products (Landsat-8)** *Compute different spectral products created using mathematical combinations of specific spectral bands.*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.05.Colab_Spectral_Products_L8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.05.Colab_Spectral_Products_L8.ipynb)
# - **Cloud statistics (Sentinel 2)** *Calculate cloud statistics for specific regions Sentinel-2 data.*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.02.Colab_Cloud_Statistics_S2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.02.Colab_Cloud_Statistics_S2.ipynb)
# - **Vegetation Phenology (Landsat 8)** *Calculate vegetation phenology changes using Landsat 8 and Normalized Difference Vegetation Index (NDVI).*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.07.Colab_Vegetation_Phenology_L8.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.07.Colab_Vegetation_Phenology_L8.ipynb)
# - **Mission coincidences** *Find concident acquisition regions for three missions: Landsat-8, Sentinel-2 and Sentinel-1.*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.09.Colab_Mission_Coincidences.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.09.Colab_Mission_Coincidences.ipynb)
# - **Sentinel 1 data viewer** *View Sentinel-1 data over a specified region, including several different data products for single and multi-data analyses.*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.08.Colab_S1_Data_Viewer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.08.Colab_S1_Data_Viewer.ipynb)
# - **VIIRS night lights** *Use nightlight radiance measurements from VIIRS to study urban growth and loss of power from storms.*
# - <a href="https://colab.research.google.com/github/ceos-seo/odc-colab/blob/master/notebooks/02.10.Colab_VIIRS_Night_Lights.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" height="16px" alt="Open In Colab"/></a>
# -[View on GitHub](https://github.com/ceos-seo/odc-colab/blob/master/notebooks/02.10.Colab_VIIRS_Night_Lights.ipynb)
#
# [Back to the top](#top)
# + [markdown] id="X3FNgAQZoR2V"
# <a name="reference-links"></a>
# # 6. Reference Links
#
# *Google Colab and Jupyter Notebooks*
#
# * [What is Google Colab](https://colab.research.google.com/notebooks/intro.ipynb)
# * [Introduction to the Jupyter Notebook Environment on Google Colab](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)
# * [Description of Jupyter Notebooks from the Jupyter Project](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html)
# * [ODC-Colab Repository from the CEOS Systems Engineering Office](https://github.com/ceos-seo/odc-colab)
# * [How to index new products on the ODC-Colab](https://github.com/ceos-seo/odc-gee#local-index)
#
# *Open Data Cube*
#
# * [What is the Open Data Cube?](https://medium.com/opendatacube/what-is-open-data-cube-805af60820d7)
# * [opendatacube.org/](https://www.opendatacube.org)
# * [github.com/opendatacube](https://github.com/opendatacube)
# * [Open Earth Alliance](https://www.openearthalliance.org/)
# * [User Forum](https://www.openearthalliance.org/forum)
#
# *Open Data Cube Instances*
#
# * [Digital Earth Australia](https://www.ga.gov.au/dea)
# * [Digital Earth Africa](https://www.digitalearthafrica.org/)
# * [Swiss Data Cube](https://www.swissdatacube.org/)
#
# [Back to the top](#top)
| AGENCIA_ESPACIAL_MEXICANA_ODC_and_COLAB_LS8_QUERETARO.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # for authors
import cPickle as pickle
author_full_name_dict = pickle.load(open("authors_full_name_dict.p", "rb"))
import sqlite3
conng = sqlite3.connect('pmcv1-graph.db')
cg = conng.cursor()
import Queue
import networkx as nx
def buildfnauthortree(rootnode, mastergraphcursor, fndict, depth = 2):
_g =nx.DiGraph()
q = Queue.Queue()
q.put((rootnode, 0))
while not q.empty():
node = q.get()
if node[1] < depth:
mastergraphcursor.execute('''SELECT coauthors FROM coauthors WHERE author = ?''', [node[0]])
coauthors = cg.fetchone()[0].split(',')
for author in coauthors:
if unicode(fndict[author][0]+" "+fndict[author][1]) not in _g.nodes():
_g.add_edge(unicode(fndict[node[0]][0]+ " "+fndict[node[0]][1]),
unicode(fndict[author][0]+" "+fndict[author][1]))
q.put((author, node[1]+1))
return _g
rootauthor = u'padubidriv.shivaprasad'
g = buildfnauthortree(rootauthor, cg, author_full_name_dict, 2)
import matplotlib.pyplot as plt
# %matplotlib inline
nx.draw(g)
#dump graph in json format for d3 to plot
from networkx.readwrite import json_graph
import io, json
with io.open('testgraphdata.json', 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(json_graph.tree_data(g, u"<NAME>",
attrs={'children': 'children', 'id': 'name'}))))
f.close()
# # for citations
import sqlite3
conng = sqlite3.connect('pmcv1-graph.db')
cg = conng.cursor()
import Queue
import networkx as nx
def buildcitenetwork(rootnode, mastergraphcursor, indepth = 0, outdepth = 2):
_g =nx.DiGraph()
q = Queue.Queue()
#first go in out direction
q.put((rootnode, 0))
while not q.empty():
node = q.get()
if node[1] < outdepth:
mastergraphcursor.execute('''SELECT outcites FROM cites WHERE pmid = ?''', [node[0]])
try:
cites = map(int, cg.fetchone()[0].split(','))
for cite in cites:
if cite not in _g.nodes():
_g.add_edge(node[0], cite)
q.put((cite, node[1]+1))
except ValueError: #when there are none
pass
#now go in in direction
q.put((rootnode, 0))
while not q.empty():
node = q.get()
if node[1] < indepth:
mastergraphcursor.execute('''SELECT incites FROM cites WHERE pmid = ?''', [node[0]])
try:
cites = map(int, cg.fetchone()[0].split(','))
for cite in cites:
if cite not in _g.nodes():
_g.add_edge(cite, node[0])
q.put((cite, node[1]+1))
except ValueError:
pass
return _g
g= buildcitenetwork(21437221, cg, 2, 2)
import matplotlib.pyplot as plt
# %matplotlib inline
nx.draw(g)
#dump graph in json format for d3 to plot
from networkx.readwrite import json_graph
import io, json
with io.open('citetestdata.json', 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(json_graph.node_link_data(g, attrs={'source': 'source',
'target': 'target',
'key': 'key',
'id': 'name'}))))
f.close()
# # with citation levels colors as attribute
# 0 = root node;
# 1 = out cite 1;
# 2 = out cite 2;
# -1 = in cite 1, etc.
import sqlite3
conng = sqlite3.connect('pmcv1-graph.db')
cg = conng.cursor()
# +
import Queue
import networkx as nx
import colorbrewer
def buildcitenetwork(rootnode, mastergraphcursor, indepth = 0, outdepth = 2, colorscheme = colorbrewer.PuOr):
_g =nx.DiGraph()
q = Queue.Queue()
#set up colors
_colors = colorscheme[max(outdepth,indepth)*2+1]
#first go in out direction
q.put((rootnode, 0))
_g.add_node(rootnode, color = rgbtohex(_colors[(len(_colors)-1)/2]))
while not q.empty():
node = q.get()
if node[1] < outdepth:
mastergraphcursor.execute('''SELECT outcites FROM cites WHERE pmid = ?''', [node[0]])
try:
cites = map(int, cg.fetchone()[0].split(','))
for cite in cites:
if cite not in _g.nodes():
_g.add_node(cite, color = rgbtohex(_colors[(len(_colors)-1)/2+node[1]+1]))
_g.add_edge(node[0], cite)
q.put((cite, node[1]+1))
except ValueError: #when there are none
pass
#now go in in direction
q.put((rootnode, 0))
while not q.empty():
node = q.get()
if node[1] < indepth:
mastergraphcursor.execute('''SELECT incites FROM cites WHERE pmid = ?''', [node[0]])
try:
cites = map(int, cg.fetchone()[0].split(','))
for cite in cites:
if cite not in _g.nodes():
_g.add_node(cite, color = rgbtohex(_colors[(len(_colors)-1)/2-node[1]-1]))
_g.add_edge(cite, node[0])
q.put((cite, node[1]+1))
except ValueError:
pass
return _g
import struct
def rgbtohex(rgbtupleorlistoftuples):
if type(rgbtupleorlistoftuples) == list:
returnlist = []
for tup in rgbtupleorlistoftuples:
returnlist.append(struct.pack('BBB',*tup).encode('hex'))
return returnlist
else:
return struct.pack('BBB',*rgbtupleorlistoftuples).encode('hex')
# -
g= buildcitenetwork(21437221, cg, 2, 2)
# +
#g.nodes(data=True)
# -
#dump graph in json format for d3 to plot
from networkx.readwrite import json_graph
import io, json
with io.open('citetestdata.json', 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(json_graph.node_link_data(g, attrs={'source': 'source',
'target': 'target',
'key': 'key',
'id': 'name',
'color': 'color'
}))))
f.close()
# # with colored citation levels AND first auth ln et al. as label
#
# 1. some authors missing due to parser errors, others missing because papers not in PMC
# 2. would like to do author, date as format but didn't parse the dates. If I re-run I should do this
# 3. if I get TF-IDF keywords working well could use that
import sqlite3
conng = sqlite3.connect('pmcv1-graph.db')
cg = conng.cursor()
connfull = sqlite3.connect('pmcv1-full.db')
cf = connfull.cursor()
# +
import Queue
import networkx as nx
import colorbrewer
def buildcitenetwork(rootnode, mastergraphcursor, authcursor, indepth = 0, outdepth = 2,
colorscheme = colorbrewer.PuOr):
_g =nx.DiGraph()
q = Queue.Queue()
#set up colors
_colors = colorscheme[max(outdepth,indepth)*2+1]
#first go in out direction
q.put((rootnode, 0))
authcursor.execute('''SELECT ln FROM authors WHERE pmid = ? AND authnum = 0''', [rootnode])
_g.add_node(rootnode, color = rgbtohex(_colors[(len(_colors)-1)/2]), ln = authcursor.fetchone()[0])
while not q.empty():
node = q.get()
if node[1] < outdepth:
#authcursor.execute('''SELECT ln FROM authors WHERE pmid = ? AND authnum = 0''', [21437221])
mastergraphcursor.execute('''SELECT outcites FROM cites WHERE pmid = ?''', [node[0]])
try:
cites = map(int, cg.fetchone()[0].split(','))
for cite in cites:
if cite not in _g.nodes():
authcursor.execute('''SELECT ln FROM authors WHERE pmid = ? AND authnum = 0''', [cite])
try:
lastname = authcursor.fetchone()[0]
except TypeError:
lastname = cite
_g.add_node(cite, color = rgbtohex(_colors[(len(_colors)-1)/2+node[1]+1]), ln = lastname)
_g.add_edge(node[0], cite)
q.put((cite, node[1]+1))
except ValueError: #when there are none
pass
#now go in in direction
q.put((rootnode, 0))
while not q.empty():
node = q.get()
if node[1] < indepth:
mastergraphcursor.execute('''SELECT incites FROM cites WHERE pmid = ?''', [node[0]])
try:
cites = map(int, cg.fetchone()[0].split(','))
for cite in cites:
if cite not in _g.nodes():
authcursor.execute('''SELECT ln FROM authors WHERE pmid = ? AND authnum = 0''', [cite])
try:
lastname = authcursor.fetchone()[0]
except TypeError:
lastname = cite
_g.add_node(cite, color = rgbtohex(_colors[(len(_colors)-1)/2-node[1]-1]), ln = lastname)
_g.add_edge(cite, node[0])
q.put((cite, node[1]+1))
except ValueError:
pass
return _g
import struct
def rgbtohex(rgbtupleorlistoftuples):
if type(rgbtupleorlistoftuples) == list:
returnlist = []
for tup in rgbtupleorlistoftuples:
returnlist.append(struct.pack('BBB',*tup).encode('hex'))
return returnlist
else:
return struct.pack('BBB',*rgbtupleorlistoftuples).encode('hex')
# -
g = buildcitenetwork(18593145, cg, cf, 2, 2)
from networkx.readwrite import json_graph
import io, json
with io.open('citetestdata.json', 'w', encoding='utf-8') as f:
f.write(unicode(json.dumps(json_graph.node_link_data(g, attrs={'source': 'source',
'target': 'target',
'key': 'key',
'id': 'name',
'color': 'color',
'ln': 'ln'
}))))
f.close()
| D3-viz/generateGraphData.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## Gaussian Process Regression
# ## Part I - Multivariate Gaussian Distribution
# ## 2nd Machine Learning in Heliophysics
# ## Boulder, CO
# ### 21 - 25 March 2022
#
# ### <NAME> (University of Colorado, Boulder & NOAA Space Weather Prediction Center)
# #### <EMAIL>
# This work is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">Creative Commons Attribution 4.0 International License</a>.<img align="right" width="88" height="31" src=https://i.creativecommons.org/l/by/4.0/88x31.png> <a rel="license" href="http://creativecommons.org/licenses/by/4.0/">
# + [markdown] slideshow={"slide_type": "slide"}
# Gaussian Process is a powerful technique for regression and classification
# # + It is a <strong>non-parametric</strong> method
# # + It has a much simpler algorithm than parametric equivalents (neural networks, etc.)
# # + But it is harder to understand...
#
# + [markdown] slideshow={"slide_type": "fragment"}
# The output of GP is a fully probabilistic prediction in terms of Gaussian distributions (mean and variance)
# + [markdown] slideshow={"slide_type": "fragment"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # References
# ## The bible of GP
# 
#
# Available online (legally!)
# http://www.gaussianprocess.org/gpml/chapters/
#
# We will cover mostly Chapter 2 (Regression), Chapter 4 (Covariance Functions), Chapter 5 (Hyperparamaters)
#
# + [markdown] cell_style="center" slideshow={"slide_type": "slide"}
# # Gaussian distribution
# <img align="right" width="200" height="100" src=https://upload.wikimedia.org/wikipedia/commons/e/ec/Carl_Friedrich_Gauss_1840_by_Jensen.jpg>
# <em>There are over 100 topics all named after Gauss</em>
# https://en.wikipedia.org/wiki/List_of_things_named_after_Carl_Friedrich_Gauss
#
# ## Starting with one variable
#
# The Gaussian distribution is arguably the most ubiquitous distribution in statistics, physics, social sciences, economy, etc.
# # + Central Limit Theorem
# # + Thermodynamical equilibrium (Maxwell–Boltzmann distribution)
# # + Brownian motion
# # + etc.
#
# Also called <strong> Normal distribution </strong>
#
# + [markdown] slideshow={"slide_type": "fragment"}
# $$p(x|\mu,\sigma) = \frac{1}{\sqrt{2\pi}\sigma}\exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right)$$
#
# + cell_style="split" slideshow={"slide_type": "fragment"}
# %matplotlib inline
from ipywidgets import interactive
import matplotlib.pyplot as plt
import numpy as np
def f(sigma, mu):
plt.figure(2)
x = np.linspace(-10, 10, num=1000)
plt.plot(x, 1/np.sqrt(2*np.pi)/sigma * np.exp(-0.5*(x-mu)**2/sigma**2))
plt.ylim(-0.1, 1)
plt.show()
interactive_plot = interactive(f, sigma=(0, 3.0), mu=(-3, 3, 0.5))
output = interactive_plot.children[-1]
output.layout.height = '350px'
interactive_plot
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# Why does the peak of the distribution change?
#
#
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# The distribution is normalized:
#
# $$\frac{1}{\sqrt{2\pi}\sigma}\int_{-\infty}^\infty \exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right)dx=1$$
# + [markdown] cell_style="center" slideshow={"slide_type": "slide"}
# $$p(x|\mu,\sigma) = \frac{1}{\sqrt{2\pi}\sigma}\exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right)$$
#
# The mean (expectation) value of a random variable $x$ normally distributed is
#
# $\mathbb{E}(x) = \int_{-\infty}^\infty p(x) x dx = \frac{1}{\sqrt{2\pi}\sigma}\int_{-\infty}^\infty \exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right) x dx = \frac{1}{\sqrt{2\pi}\sigma}\int_{-\infty}^\infty \exp\left(-\frac{z^2}{2\sigma^2}\right) (z+\mu) dz$ =
#
# + [markdown] slideshow={"slide_type": "fragment"}
# $$\mu$$
# + [markdown] slideshow={"slide_type": "slide"}
# $$p(x|\mu,\sigma) = \frac{1}{\sqrt{2\pi}\sigma}\exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right)$$
#
# The variance of a random variable $x$ is defined as
#
# $var(x) = \mathbb{E}(x^2) - \mathbb{E}(x)^2$
# + [markdown] slideshow={"slide_type": "fragment"}
# When $x$ is normally distributed
#
# $\mathbb{E}(x^2) = \frac{1}{\sqrt{2\pi}\sigma}\int_{-\infty}^\infty \exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right) x^2 dx = \sigma^2 + \mu^2$
#
# $var(x) = \sigma^2$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Gaussian distribution of 2 variables
#
# If two variables $x$ and $y$ are independent, their <strong> joint</strong> probability is
#
#
# $$p(x,y) = p(x)p(y)$$
#
# $$p(x,y) = \frac{1}{{4\pi}\sigma_x\sigma_y}\exp\left(-\frac{(x-\mu_x)^2}{2\sigma_x^2}-\frac{(y-\mu_y)^2}{2\sigma_y^2}\right)$$
#
# + slideshow={"slide_type": "slide"}
# %matplotlib inline
def f(sigma_x, sigma_y):
fig = plt.figure(figsize=(10, 10))
xx, yy = np.mgrid[-10:10:0.2, -10:10:0.2]
f = 1/(4*np.pi)/sigma_x/sigma_y * np.exp(-0.5*(xx**2/sigma_x**2+yy**2/sigma_y**2))
ax = plt.axes(projection='3d')
surf = ax.plot_surface(xx, yy, f, rstride=1, cstride=1, cmap='coolwarm', edgecolor='none')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('PDF')
fig.colorbar(surf, shrink=0.5, aspect=5) # add color bar indicating the PDF
interactive_plot = interactive(f, sigma_x=(0, 3.0), sigma_y=(0,3.0))
output = interactive_plot.children[-1]
interactive_plot
# + [markdown] slideshow={"slide_type": "slide"}
# A better way of displaying 2D distributions is by using contour lines (isocontours).
#
# What family of curves are represented by this equation ?
#
# $\frac{(x-\mu_x)^2}{2\sigma_x^2}+\frac{(y-\mu_y)^2}{2\sigma_y^2}=const$
# + slideshow={"slide_type": "slide"}
def f(sigma_x, sigma_y):
fig = plt.figure(figsize=(7, 7))
xx, yy = np.mgrid[-10:10:0.2, -10:10:0.2]
f = 1/(4*np.pi)/sigma_x/sigma_y * np.exp(-0.5*(xx**2/sigma_x**2+yy**2/sigma_y**2))
ax = fig.gca()
ax.set_xlim(-10, 10)
ax.set_ylim(-10, 10)
cfset = ax.contourf(xx, yy, f, cmap='coolwarm')
ax.imshow(np.rot90(f), cmap='coolwarm', extent=[-10,10,-10,10]),
cset = ax.contour(xx, yy, f, colors='k')
ax.clabel(cset, inline=1, fontsize=10)
ax.set_xlabel('x')
ax.set_ylabel('y')
interactive_plot = interactive(f, sigma_x=(0, 3.0), sigma_y=(0,3.0))
output = interactive_plot.children[-1]
#output.layout.height = '500px'
# + slideshow={"slide_type": "slide"}
# %matplotlib inline
interactive_plot
# + [markdown] slideshow={"slide_type": "slide"}
# # Matrix form
#
# $$p(x,y) = \frac{1}{{4\pi}\sigma_x\sigma_y}\exp\left(-\frac{(x-\mu_x)^2}{2\sigma_x^2}-\frac{(y-\mu_y)^2}{2\sigma_y^2}\right)$$
#
# The 2D normal distribution can be rewritten as
#
# $$p(x,y) = \frac{1}{4\pi\sigma_x\sigma_y}\exp\left(-\frac{1}{2}\left(\begin{bmatrix}x \\ y \end{bmatrix} - \begin{bmatrix}\mu_x \\ \mu_y \end{bmatrix}\right)^T \begin{bmatrix} \sigma_x^2 & 0 \\ 0 & \sigma_y^2 \end{bmatrix}^{-1} \left(\begin{bmatrix}x \\ y \end{bmatrix} - \begin{bmatrix}\mu_x \\ \mu_y \end{bmatrix}\right) \right)$$
#
#
# + [markdown] slideshow={"slide_type": "fragment"}
# that is
#
#
# $$p(x,y) = \frac{1}{4\pi|\boldsymbol{D}|^{1/2}}\exp\left(-\frac{1}{2}(\boldsymbol{x}-\boldsymbol{\mu})^T \boldsymbol{D}^{-1}(\boldsymbol{x}-\boldsymbol{\mu}) \right)$$
#
#
# where $\boldsymbol{x} = \begin{bmatrix} x \\ y \end{bmatrix}$ , $\boldsymbol{\mu} = \begin{bmatrix} \mu_x \\ \mu_y \end{bmatrix}$, $\boldsymbol{D}=\begin{bmatrix} \sigma_x^2 & 0 \\ 0 & \sigma_y^2 \end{bmatrix}$
# + [markdown] slideshow={"slide_type": "slide"}
# We can now introduce a rotation of the coordinates $(x,y)$ via a rotation matrix $\boldsymbol{R}$ such that
# $\boldsymbol{x}\rightarrow\boldsymbol{Rx}$
#
# $$p(x,y) = \frac{1}{4\pi|\boldsymbol{D}|^{1/2}}\exp\left(-\frac{1}{2}(\boldsymbol{Rx}-\boldsymbol{R\mu})^T \boldsymbol{D}^{-1}(\boldsymbol{Rx}-\boldsymbol{R\mu}) \right)$$
#
#
# + [markdown] slideshow={"slide_type": "fragment"}
# which finally reduces to
# $$p(x,y) = \frac{1}{4\pi|\boldsymbol{\Sigma}|^{1/2}}\exp\left(-\frac{1}{2}(\boldsymbol{x}-\boldsymbol{\mu})^T \boldsymbol{\Sigma}^{-1}(\boldsymbol{x}-\boldsymbol{\mu}) \right)$$
#
# with $\boldsymbol{\Sigma}^{-1} = \boldsymbol{R^T}\boldsymbol{D}^{-1}\boldsymbol{R}$
#
# + [markdown] slideshow={"slide_type": "fragment"}
# $\boldsymbol{R}$ is a rotation matrix, so it is unitary: $\boldsymbol{R}\boldsymbol{R}^T=\boldsymbol{I}$, hence:
#
# $$\boldsymbol{\Sigma} = \boldsymbol{R^T}\boldsymbol{D}\boldsymbol{R}$$
#
# (proof: $\boldsymbol{I}=\boldsymbol{\Sigma}\boldsymbol{\Sigma}^{-1} = \boldsymbol{R}^T\boldsymbol{D}\boldsymbol{R}\boldsymbol{R}^T\boldsymbol{D}^{-1}\boldsymbol{R}=\boldsymbol{I}$)
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# This can now be generalized to any number of variables $D$, and we have then derived the general formula for a multivariate Gaussian distribution
#
# $$p(x,y) = \frac{1}{(2\pi\boldsymbol)^{D/2}|{\Sigma}|^{1/2}}\exp\left(-\frac{1}{2}(\boldsymbol{x}-\boldsymbol{\mu})^T \boldsymbol{\Sigma}^{-1}(\boldsymbol{x}-\boldsymbol{\mu}) \right)$$
#
# for which there is always an appropriate tranformation of variables (rotation) that makes the variables independent.
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# The general rotation matrix for an angle $\theta$ in 2D is
#
# $R=\begin{bmatrix}\cos\theta & -\sin\theta \\ \sin\theta & \cos\theta\end{bmatrix}$
#
#
# + slideshow={"slide_type": "slide"}
def f(sigma_x, sigma_y, theta=0):
fig = plt.figure(figsize=(7, 7))
xx, yy = np.mgrid[-5:5:0.1, -5:5:0.1]
theta = theta /180*np.pi
R=np.matrix([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
D_inv=np.matrix([[1/sigma_x**2,0],[0, 1/sigma_y**2]])
D=np.matrix([[sigma_x**2,0],[0, sigma_y**2]])
Sigma = np.matmul(np.matmul(np.transpose(R),D),R)
Sigma_inv = np.matmul(np.matmul(np.transpose(R),D_inv),R)
f = 1/(4*np.pi)/sigma_x/sigma_y * np.exp(-0.5*(xx**2*Sigma_inv[0,0]+ 2*xx*yy*Sigma_inv[0,1]+yy**2*Sigma_inv[1,1]))
ax = fig.gca()
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
cfset = ax.contourf(xx, yy, f, cmap='coolwarm')
ax.imshow(np.rot90(f), cmap='coolwarm', extent=[-10,10,-10,10]),
cset = ax.contour(xx, yy, f, colors='k')
ax.clabel(cset, inline=1, fontsize=10)
ax.set_xlabel('x', fontsize=16)
ax.set_ylabel('y', fontsize=16)
ax.text(-4,3,np.core.defchararray.add('$\Sigma^{-1}=$\n',np.array_str(Sigma_inv)), fontsize=16)
ax.text(-4,-3.5,np.core.defchararray.add('$\Sigma=$\n',np.array_str(Sigma)), fontsize=16)
interactive_plot = interactive(f, sigma_x=(0, 3.0), sigma_y=(0,3.0),theta=(0,180))
output = interactive_plot.children[-1]
# + cell_style="split" slideshow={"slide_type": "slide"}
interactive_plot
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# Something peculiar about these matrices?
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# They are symmetric!
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# What if instead we choose to use a matrix $\Sigma^{-1}$ that is NOT symmetric?
#
# (Note: any matrix can be decomposed in the sum of a symmetric and an anti-symmetric matrix)
#
# Exercise: show that the anti-symmetric part disappears from the exponent in the Gaussian
#
# + [markdown] slideshow={"slide_type": "slide"}
# Hence: without loss of generality $\Sigma^{-1}$ can be taken as symmetric.
#
# The inverse of a symmetric matrix is symmetric: $\Sigma$ is also symmetric
#
# $\boldsymbol{\Sigma}$ is called the <strong> Covariance matrix</strong>
#
# $\boldsymbol{\Sigma}^{-1}$ is called the <strong> Precision matrix</strong>
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Covariance
#
# If we have a set of random variables $\boldsymbol{X}=\{X_1,X_2,\ldots,X_D\}$ the <strong>covariance</strong> between two variables is defined as:
#
# $$cov(X_i,X_j)=\mathbb{E}[(X_i-\mathbb{E}[X_i]) (X_j-\mathbb{E}[X_j])]$$
#
# and the covariance matrix is the corresponding matrix of elements $\boldsymbol{K}_{i,j}=cov(X_i,X_j)$. Hence the diagonal entries of the covariance matrix are the variances of each element of $\mathbf{X}$.
#
# $\mathbf{\Sigma}=\begin{bmatrix}cov(X_1,X_1) & cov(X_1,X_2) & \cdots & cov(X_1,X_D) \\ cov(X_2,X_1) & cov(X_2,X_2) & \cdots & cov(X_2,X_D)\\ \vdots & \vdots & \vdots & \vdots\\ cov(X_D,X_1) & cov(X_D,X_2) & \cdots & cov(X_D,X_D) \end{bmatrix}$
#
# + [markdown] slideshow={"slide_type": "slide"}
# Exercise: show that if two random variables $X$ and $Y$ are independent, their covariance is equal to zero.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Partitioned covariance and precision matrices
#
# Assume we split our $D-$ dimensional set of random variables $\boldsymbol{X}$ in two sets $\boldsymbol{x_a}$ and $\boldsymbol{x_b}$ (each can be multi-dimensional).
#
# Likewise, we can split the mean values in two corresponding sets $\boldsymbol{\mu_a}$ and $\boldsymbol{\mu_b}$.
# The vectors $\boldsymbol{X}$ and $\boldsymbol{\mu}$ can then be expressed as:
#
# $\boldsymbol{X}=\begin{bmatrix}\boldsymbol{x_a}\\ \boldsymbol{x_b}\end{bmatrix} $, $\boldsymbol{\mu}=\begin{bmatrix}\boldsymbol{\mu_a}\\ \boldsymbol{\mu_b}\end{bmatrix} $.
#
# The covariance matrix $\boldsymbol{\Sigma}$ can be partitioned as
#
# $\boldsymbol{\Sigma}=\begin{bmatrix} \boldsymbol{\Sigma}_{aa} & \boldsymbol{\Sigma}_{ab}\\ \boldsymbol{\Sigma}_{ba} & \boldsymbol{\Sigma}_{bb}\end{bmatrix}$ Notice that $\boldsymbol{\Sigma}_{aa}$ and $\boldsymbol{\Sigma}_{bb}$ are still symmetric, while $\boldsymbol{\Sigma}_{ab}=\boldsymbol{\Sigma}_{ba}^T$
#
# We can also introduce a similar partition for the precision matrix $\boldsymbol\Lambda=\boldsymbol\Sigma^{-1}$:
#
# $\boldsymbol{\Lambda}=\begin{bmatrix} \boldsymbol{\Lambda}_{aa} & \boldsymbol{\Lambda}_{ab}\\ \boldsymbol{\Lambda}_{ba} & \boldsymbol{\Lambda}_{bb}\end{bmatrix}$
# However, keep in mind that the partition of the inverse is not equal to the inverse of a partition!
# $\boldsymbol{\Lambda}_{aa}\ne\boldsymbol{\Sigma}_{aa}^{-1}$
# + [markdown] slideshow={"slide_type": "slide"}
# ## When Gaussian always Gaussian
#
# We can now reason in terms of the D-dimensional multivariate Gaussian distribution defined over the joint set $(\boldsymbol x_a,\boldsymbol x_b)$ as $p(\boldsymbol x_a,\boldsymbol x_b) = \mathcal{N}(\boldsymbol x|\boldsymbol\mu,\boldsymbol{\Sigma})$.
#
# The Gaussian distribution has unique properties!
#
# # + The marginal distribution $p(\boldsymbol x_a) = \int p(\boldsymbol x_a,\boldsymbol x_b) d\boldsymbol x_b$ is Gaussian
# # + The conditional distribution $p(\boldsymbol x_a|\boldsymbol x_b) = \frac{p(\boldsymbol x_a,\boldsymbol x_b)}{p(\boldsymbol x_b)} = \frac{p(\boldsymbol x_a,\boldsymbol x_b)}{\int p(\boldsymbol x_a,\boldsymbol x_b) d\boldsymbol x_a}$ is Gaussian
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Marginal distribution
#
# $p(\boldsymbol x_a, \boldsymbol x_b)=\mathcal{N}\left(\boldsymbol x|\begin{bmatrix}\boldsymbol{\mu_a}\\ \boldsymbol{\mu_b}\end{bmatrix} ,\begin{bmatrix} \boldsymbol{\Sigma}_{aa} & \boldsymbol{\Sigma}_{ab}\\ \boldsymbol{\Sigma}_{ba} & \boldsymbol{\Sigma}_{bb}\end{bmatrix}\right)$
#
# The marginal distribution is obtained when we 'marginalize' (ie integrate) the distribution over a set of random variables. In the 2D graphical representation this can be understood as collapsing the distribution over one axes.
#
# What are the mean and covariance matrix of the marginal distribution ?
#
#
# $p(\boldsymbol x_a) = \int p(\boldsymbol x_a,\boldsymbol x_b) d\boldsymbol x_b = \mathcal{N}(\boldsymbol x_a| ?, ?)$
# + [markdown] slideshow={"slide_type": "fragment"}
# $p(\boldsymbol x_a) = \int p(\boldsymbol x_a,\boldsymbol x_b) d\boldsymbol x_b = \mathcal{N}(\boldsymbol x_a| \boldsymbol \mu_a, \boldsymbol \Sigma_{aa})$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Conditional distribution
# Whereas the result for the marginal distribution is somewhat intuitive, a less intuitive result holds for the conditional distribution, which we derive here.
#
# The conditional distribution $p(\boldsymbol x_a| \boldsymbol x_b)$ is simply evaluated by considering the joint distribution $p(\boldsymbol x_a,\boldsymbol x_b)$ and considering $\boldsymbol x_b$ as a constant.
#
# Using the partioning introduced above for $\boldsymbol x$, $\boldsymbol \mu$ and the precision matrix $\boldsymbol\Lambda$, we have:
#
# $(\boldsymbol{x}-\boldsymbol{\mu})^T\boldsymbol{\Sigma}^{-1}(\boldsymbol{x}-\boldsymbol{\mu})=(\boldsymbol{x_a}-\boldsymbol{\mu_a})^T\boldsymbol{\Lambda}_{aa}(\boldsymbol{x_a}-\boldsymbol{\mu_a})+2(\boldsymbol{x_a}-\boldsymbol{\mu_a})^T\boldsymbol{\Lambda}_{ab}(\boldsymbol{x_b}-\boldsymbol{\mu_b})+(\boldsymbol{x_b}-\boldsymbol{\mu_b})^T\boldsymbol{\Lambda}_{bb}(\boldsymbol{x_b}-\boldsymbol{\mu_b})$
#
#
# + [markdown] slideshow={"slide_type": "fragment"}
# Now, we expect $p(\boldsymbol x_a| \boldsymbol x_b)\sim\mathcal N(\boldsymbol x_a|\boldsymbol\mu_{a|b},\boldsymbol\Sigma_{a|b})$.
# A general form for the argument of the exponent is
#
# $(\boldsymbol{x}_a-\boldsymbol{\mu_{a|b}})^T\boldsymbol{\Sigma}^{-1}_{a|b}(\boldsymbol{x}_a-\boldsymbol{\mu}_{a|b})=\boldsymbol x_a^T \boldsymbol{\Sigma}^{-1}_{a|b} \boldsymbol x_a -2 \boldsymbol x_a^T \boldsymbol{\Sigma}^{-1}_{a|b}\boldsymbol \mu_{a|b} + \boldsymbol \mu^T_{a|b} \boldsymbol{\Sigma}^{-1}_{a|b} \boldsymbol \mu_{a|b}$ (where we have used the symmetry of $\boldsymbol\Sigma_{a|b}$).
#
# + [markdown] slideshow={"slide_type": "slide"}
# $(\boldsymbol{x}-\boldsymbol{\mu})^T\boldsymbol{\Sigma}^{-1}(\boldsymbol{x}-\boldsymbol{\mu})=(\boldsymbol{x_a}-\boldsymbol{\mu_a})^T\boldsymbol{\Lambda}_{aa}(\boldsymbol{x_a}-\boldsymbol{\mu_a})+2(\boldsymbol{x_a}-\boldsymbol{\mu_a})^T\boldsymbol{\Lambda}_{ab}(\boldsymbol{x_b}-\boldsymbol{\mu_b})+(\boldsymbol{x_b}-\boldsymbol{\mu_b})^T\boldsymbol{\Lambda}_{bb}(\boldsymbol{x_b}-\boldsymbol{\mu_b})$
#
# $(\boldsymbol{x}_a-\boldsymbol{\mu_{a|b}})^T\boldsymbol{\Sigma}^{-1}_{a|b}(\boldsymbol{x}_a-\boldsymbol{\mu}_{a|b})=\boldsymbol x_a^T \boldsymbol{\Sigma}^{-1}_{a|b} \boldsymbol x_a -2 \boldsymbol x_a^T \boldsymbol{\Sigma}^{-1}_{a|b}\boldsymbol \mu_{a|b} + \boldsymbol \mu^T_{a|b} \boldsymbol{\Sigma}^{-1}_{a|b} \boldsymbol \mu_{a|b}$
#
# It is now sufficient to equate equal terms in $\boldsymbol x_a$ in the above two equations.
#
# Terms in $\boldsymbol x_a^2\longrightarrow$: $\boldsymbol x_a^T \boldsymbol{\Sigma}^{-1}_{a|b} \boldsymbol x_a = \boldsymbol x_a^T \boldsymbol{\Lambda}_{aa} \boldsymbol x_a$, from which $\boldsymbol{\Sigma}_{a|b} = \boldsymbol{\Lambda}_{aa}^{-1}$
#
# Terms in $\boldsymbol x_a\longrightarrow$: $2\boldsymbol x_a^T(-\boldsymbol\Lambda_{aa}\boldsymbol\mu_a+\boldsymbol\Lambda_{ab}\boldsymbol (\boldsymbol x_b-\boldsymbol \mu_b))= -2\boldsymbol x_a^T\boldsymbol\Sigma^{-1}_{a|b}\boldsymbol\mu_{a|b}$ from which $\boldsymbol\mu_{a|b}=\Sigma_{a|b}(\boldsymbol\Lambda_{aa}\boldsymbol\mu_a-\boldsymbol\Lambda_{ab}\boldsymbol (\boldsymbol x_b-\boldsymbol \mu_b))=\boldsymbol\mu_a-\boldsymbol\Lambda_{aa}^{-1}\boldsymbol\Lambda_{ab}\boldsymbol (\boldsymbol x_b-\boldsymbol \mu_b)$
#
# + [markdown] slideshow={"slide_type": "slide"}
# So far we have:
#
# $\boldsymbol{\Sigma}_{a|b} = \boldsymbol{\Lambda}_{aa}^{-1}$
#
# $\boldsymbol\mu_{a|b}=\boldsymbol\mu_a-\boldsymbol\Lambda_{aa}^{-1}\boldsymbol\Lambda_{ab}\boldsymbol (\boldsymbol x_b-\boldsymbol \mu_b)$
#
# However, we would like to express the covariance matrix and the mean of the conditional distribution $p(\boldsymbol x_a| \boldsymbol x_b)$ in terms of the partioned co-variance matrix and mean of the full distribution. We need to use the following identity that relates the inverse of a partitioned matrix, with the partition of the inverse:
#
# $\begin{bmatrix}A & B \\ C & D\end{bmatrix}^{-1} = \begin{bmatrix}(A-BD^{-1}C)^{-1} & -(A-BD^{-1}C)^{-1}BD^{-1} \\-D^{-1}C(A-BD^{-1}C)^{-1} & D^{-1}+D^{-1}C(A-BD^{-1}C)^{-1}BD^{-1} \end{bmatrix}$
#
# In our case
#
# $\boldsymbol{\begin{bmatrix}\boldsymbol\Sigma_{aa} & \boldsymbol\Sigma_{ab} \\ \boldsymbol\Sigma_{ba} & \boldsymbol\Sigma_{bb}\end{bmatrix}^{-1} = \begin{bmatrix}\boldsymbol\Lambda_{aa} & \boldsymbol\Lambda_{ab} \\ \boldsymbol\Lambda_{ba} & \boldsymbol\Lambda_{bb}\end{bmatrix}}$
#
# Hence: $\boldsymbol\Lambda_{aa} = (\boldsymbol\Sigma_{aa}- \boldsymbol\Sigma_{ab}\boldsymbol\Sigma_{bb}^{-1}\boldsymbol\Sigma_{ba})^{-1}$ and $\boldsymbol\Lambda_{ab} = - (\boldsymbol\Sigma_{aa}- \boldsymbol\Sigma_{ab}\boldsymbol\Sigma_{bb}^{-1}\boldsymbol\Sigma_{ba})^{-1}\boldsymbol\Sigma_{ab}\boldsymbol\Sigma_{bb}^{-1}$
#
# and finally:
#
# \begin{equation}\boxed{\boldsymbol\mu_{a|b}=\boldsymbol\mu_a+\boldsymbol\Sigma_{ab}\boldsymbol\Sigma_{bb}^{-1}(\boldsymbol x_b - \boldsymbol\mu_b) \\
# \boldsymbol\Sigma_{a|b} = \boldsymbol\Sigma_{aa} - \boldsymbol\Sigma_{ab}\boldsymbol\Sigma_{bb}^{-1}\boldsymbol\Sigma_{ba}} \end{equation}
# + cell_style="center" slideshow={"slide_type": "slide"}
## Example with a 2D distribution
def f(mu_a, mu_b, x_b=0, sigma_aa=1, sigma_bb=1, sigma_ab=0):
fig = plt.figure(figsize=(7, 7))
xx, yy = np.mgrid[-5:5:0.1, -5:5:0.1]
y=np.linspace(-5,5,100)
Sigma = np.matrix([[sigma_aa,sigma_ab],[sigma_ab, sigma_bb]])
Sigma_inv = np.linalg.inv(Sigma)
Sigma_det = np.linalg.det(Sigma)
f = 1/(4*np.pi)/np.sqrt(Sigma_det) * np.exp(-0.5*((xx-mu_a)**2*Sigma_inv[0,0]+ 2*(xx-mu_a)*(yy-mu_b)*Sigma_inv[0,1]+(yy-mu_b)**2*Sigma_inv[1,1]))
mu_ab = mu_a +sigma_ab/sigma_bb*(x_b-mu_b)
Sigma_cond = sigma_aa-sigma_ab**2/sigma_bb
ax = fig.gca()
ax.set_xlim(-5, 5)
ax.set_ylim(-5, 5)
cfset = ax.contourf(xx, yy, f, cmap='coolwarm')
cset = ax.contour(xx, yy, f, colors='k')
ax.clabel(cset, inline=1, fontsize=10)
ax.plot([-5, 5],[x_b,x_b], color='black')
ax.plot(y,x_b+ 1/np.sqrt(2*np.pi)/Sigma_cond * np.exp(-0.5*(y-mu_ab)**2/Sigma_cond**2), color='yellow', linewidth=2)
ax.set_xlabel('x_a', fontsize=16)
ax.set_ylabel('x_b', fontsize=16)
ax.text(-4,3,np.core.defchararray.add('$\Sigma^{-1}=$\n',np.array_str(Sigma_inv)), fontsize=16)
ax.text(-4,-3.5,np.core.defchararray.add('$\Sigma=$\n',np.array_str(Sigma)), fontsize=16)
interactive_plot = interactive(f, sigma_aa=(0, 3.0), sigma_bb=(0,3.0), sigma_ab=(0,3.0), mu_a=(-2.0,2.0), mu_b=(-2.0,2.0),x_b=(-2.0,2.0))
output = interactive_plot.children[-1]
# + cell_style="split" slideshow={"slide_type": "slide"}
interactive_plot
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# What are the interesting properties of $\boldsymbol\mu_{a|b}$ and $\boldsymbol\Sigma_{a|b}$ ??
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# $\boldsymbol\mu_{a|b}$ depends linearly on $\boldsymbol x_b$
#
# $\boldsymbol\Sigma_{a|b}$ depends on all partitions of $\boldsymbol\Sigma$ but it is INDEPENDENT of $\boldsymbol x_b$
# + [markdown] cell_style="split" slideshow={"slide_type": "fragment"}
# ## End of Part 1 !!
| Gaussian Process Regression Part 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Sin regularizar
# +
library('fastDummies')
set.seed(103783)
# Cargo el csv
mat <- read.csv("student-mat.csv")
# Train y Test
sample <- sample.int(n = nrow(mat), size = floor(.75*nrow(mat)), replace = F)
train <- mat[sample, ]
test <- mat[-sample, ]
y_train <- c(train$G3)
x_train <- subset(train, select = -c(G3))
y_test <- c(test$G3)
x_test <- subset(test, select = -c(G3))
# -
# Preproceso
preprocesar <- function(mat) {
mat_prepros <- dummy_cols(mat,remove_first_dummy = TRUE)
mat_prepros <- mat_prepros[-c(1,2,4,5,6,9,10,11,12,16,17,18,19,20,21,22,23)]
as.data.frame(scale(mat_prepros))
}
x_train <- preprocesar(x_train)
model <- lm(formula=y_train~.,data=x_train)
summary(model)
x_test <- preprocesar(x_test)
pred <- predict(model, x_test)
modelEval <- cbind(y_test, pred)
colnames(modelEval) <- c('Actual', 'Predicted')
modelEval <- as.data.frame(modelEval)
mse <- mean((modelEval$Actual - modelEval$Predicted)**2)
rmse <- sqrt(mse)
print(cat("Mean Squared Error:",mse))
print(cat("Mean Absolute Error:",rmse))
# # Lasso
library("glmnet")
# +
# Cargo el csv
mat <- read.csv("student-mat.csv")
# Train y Test
sample <- sample.int(n = nrow(mat), size = floor(.75*nrow(mat)), replace = F)
train <- mat[sample, ]
test <- mat[-sample, ]
y_train <- c(train$G3)
x_train <- subset(train, select = -c(G3))
y_test <- c(test$G3)
x_test <- subset(test, select = -c(G3))
# +
mat_prepros <- dummy_cols(x_train,remove_first_dummy = TRUE)
mat_prepros <- mat_prepros[-c(1,2,4,5,6,9,10,11,12,16,17,18,19,20,21,22,23)]
x_train <- scale(mat_prepros)
mat_prepros <- dummy_cols(x_test,remove_first_dummy = TRUE)
mat_prepros <- mat_prepros[-c(1,2,4,5,6,9,10,11,12,16,17,18,19,20,21,22,23)]
x_test <- scale(mat_prepros)
# -
lambdas <- 10^seq(2, -3, by = -.1)
# +
# Setting alpha = 1 implements lasso regression
lasso_reg <- cv.glmnet(x_train, y_train, alpha = 1, lambda = lambdas, standardize = TRUE, nfolds = 5)
# Best
lambda_best <- lasso_reg$lambda.min
lambda_best
# +
eval_results <- function(true, predicted, df) {
SSE <- sum((predicted - true)^2)
SST <- sum((true - mean(true))^2)
R_square <- 1 - SSE / SST
RMSE = sqrt(SSE/nrow(df))
# Model performance metrics
data.frame(
RMSE = RMSE,
Rsquare = R_square
)
}
# Prediction and evaluation on train data
predictions_train <- predict(lasso_reg, s = lambda_best, newx = x_train)
eval_results(y_train, predictions_train, x_train)
predictions_test <- predict(lasso_reg, s = lambda_best, newx = x_test)
eval_results(y_test, predictions_test, x_test)
# -
coef(lasso_reg, s = "lambda.min")
# # Ridge
library("glmnet")
# +
# Cargo el csv
mat <- read.csv("student-mat.csv")
# Train y Test
sample <- sample.int(n = nrow(mat), size = floor(.75*nrow(mat)), replace = F)
train <- mat[sample, ]
test <- mat[-sample, ]
y_train <- c(train$G3)
x_train <- subset(train, select = -c(G3))
y_test <- c(test$G3)
x_test <- subset(test, select = -c(G3))
# +
mat_prepros <- dummy_cols(x_train,remove_first_dummy = TRUE)
mat_prepros <- mat_prepros[-c(1,2,4,5,6,9,10,11,12,16,17,18,19,20,21,22,23)]
x_train <- scale(mat_prepros)
mat_prepros <- dummy_cols(x_test,remove_first_dummy = TRUE)
mat_prepros <- mat_prepros[-c(1,2,4,5,6,9,10,11,12,16,17,18,19,20,21,22,23)]
x_test <- scale(mat_prepros)
# -
lambdas <- 10^seq(2, -3, by = -.1)
ridge_reg <- cv.glmnet(x_train, y_train, alpha = 0, lambda = lambdas)
optimal_lambda <- ridge_reg$lambda.min
optimal_lambda
# +
# Prediction and evaluation on train data
predictions_train <- predict(ridge_reg, s = optimal_lambda, newx = x_train)
eval_results(y_train, predictions_train, x_train)
# Prediction and evaluation on test data
predictions_test <- predict(ridge_reg, s = optimal_lambda, newx = x_test)
eval_results(y_test, predictions_test, x_test)
# -
coef(ridge_reg, s = "lambda.min")
| M-regresion_lineal_r.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:PROJ_irox_oer] *
# language: python
# name: conda-env-PROJ_irox_oer-py
# ---
# ### Featurize IrOx slabs from Seoin
# ---
# +
# #########################################################
# This haven't been done
# #########################################################
# ('features', 'o', 'O_magmom'),
# ('features', 'o', 'Ir_magmom'),
# ('features', 'o', 'Ir*O_bader'),
# ('features', 'o', 'Ir_bader'),
# ('features', 'o', 'O_bader'),
# ('features', 'o', 'p_band_center'),
# #########################################################
# These are done
# #########################################################
# ('features', 'o', 'bulk_oxid_state'),
# ('features', 'o', 'angle_O_Ir_surf_norm'),
# ('features', 'o', 'active_o_metal_dist'),
# ('features', 'o', 'effective_ox_state'),
# ('features', 'o', 'ir_o_mean'),
# ('features', 'o', 'ir_o_std'),
# ('features', 'o', 'octa_vol'),
# ('features', 'o', 'dH_bulk'),
# ('features', 'o', 'volume_pa'),
# -
# ### Import Modules
# +
import os
print(os.getcwd())
import sys
import pickle
from pathlib import Path
import pandas as pd
import numpy as np
# #########################################################
from methods_features import get_octa_geom, get_octa_vol
from methods_features import get_angle_between_surf_normal_and_O_Ir
# #########################################################
from local_methods import get_df_coord_local
from local_methods import get_effective_ox_state
# -
pd.set_option("display.max_columns", None)
# pd.set_option('display.max_rows', None)
# pd.options.display.max_colwidth = 100
# +
dir_i = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/seoin_irox_data")
# #########################################################
path_i = os.path.join(
dir_i, "out_data/df_ads_e.pickle")
with open(path_i, "rb") as fle:
df_ads_e = pickle.load(fle)
# #########################################################
path_i = os.path.join(
dir_i, "out_data/df_oer.pickle")
with open(path_i, "rb") as fle:
df_oer = pickle.load(fle)
# #########################################################
path_i = os.path.join(
dir_i, "process_bulk_data",
"out_data/df_seoin_bulk.pickle")
with open(path_i, "rb") as fle:
df_bulk = pickle.load(fle)
df_bulk = df_bulk.set_index("crystal")
# +
# # TEMP
# print(111 * "TEMP | ")
# df_ads_e = df_ads_e.dropna(axis=0, subset=["active_site__o", "active_site__oh", "active_site__ooh"])
# +
data_dict_list = []
for name_i, row_i in df_ads_e.iterrows():
# #####################################################
name_dict_i = dict(zip(
df_ads_e.index.names,
name_i))
# #####################################################
name_str_i = row_i["name"]
index_o_i = row_i.index_o
active_site_o_i = row_i.active_site__o
# bulk_oxid_state_i = row_i.bulk_oxid_state
# #####################################################
crystal_i = name_dict_i["crystal"]
# #####################################################
# #####################################################
row_oer_o_i = df_oer.loc[index_o_i]
# #####################################################
atoms_o_i = row_oer_o_i.atoms
atoms_o_init_i = row_oer_o_i.atoms_init
# #####################################################
# #####################################################
row_bulk_i = df_bulk.loc[crystal_i]
# #####################################################
volume_pa_i = row_bulk_i.volume_pa
dH_i = row_bulk_i.dH
# #####################################################
df_coord_o_final_i = get_df_coord_local(
name=name_str_i,
ads="o",
atoms=atoms_o_i,
append_str="_final",
)
df_coord_o_init_i = get_df_coord_local(
name=name_str_i,
ads="o",
atoms=atoms_o_init_i,
append_str="_init",
)
eff_ox_out_i = get_effective_ox_state(
active_site=active_site_o_i,
df_coord_i=df_coord_o_final_i,
df_coord_init_i=df_coord_o_init_i,
metal_atom_symbol="Ir",
)
eff_ox_i = eff_ox_out_i["effective_ox_state"]
# #####################################################
# Octahedral geometry
octa_geom_out = get_octa_geom(
df_coord_i=df_coord_o_final_i,
active_site_j=active_site_o_i,
atoms=atoms_o_i,
verbose=True,
)
for key_i in octa_geom_out.keys():
octa_geom_out[key_i + "__o"] = octa_geom_out.pop(key_i)
octa_vol_i = get_octa_vol(
df_coord_i=df_coord_o_final_i,
active_site_j=active_site_o_i,
verbose=True,
)
# #####################################################
# Ir-O Angle relative to surface normal
angle_i = get_angle_between_surf_normal_and_O_Ir(
atoms_o_i,
df_coord=df_coord_o_final_i,
active_site=active_site_o_i,
)
# #####################################################
data_dict_i = dict()
# #####################################################
data_dict_i["effective_ox_state__o"] = eff_ox_i
data_dict_i["octa_vol__o"] = octa_vol_i
data_dict_i["angle_O_Ir_surf_norm__o"] = angle_i
data_dict_i["dH_bulk"] = dH_i
data_dict_i["volume_pa"] = volume_pa_i
# data_dict_i["bulk_oxid_state"] = bulk_oxid_state_i
# #####################################################
data_dict_i.update(octa_geom_out)
data_dict_i.update(name_dict_i)
# #####################################################
data_dict_list.append(data_dict_i)
# #####################################################
# #########################################################
df_feat = pd.DataFrame(data_dict_list)
df_feat = df_feat.set_index(df_ads_e.index.names)
df_features_targets = pd.concat([
df_feat,
df_ads_e.drop(columns=["O_Ir_frac_ave", ])
], axis=1)
# #########################################################
# -
# ### Processing columns
# +
df_features_targets.columns.tolist()
multicolumn_assignments = {
# #######################
# Features ##############
"effective_ox_state__o": ("features", "effective_ox_state", "", ),
# "effective_ox_state__o": ("features", "o", "effective_ox_state", ),
"octa_vol__o": ("features", "o", "octa_vol", ),
"active_o_metal_dist__o": ("features", "o", "active_o_metal_dist", ),
"ir_o_mean__o": ("features", "o", "ir_o_mean", ),
"ir_o_std__o": ("features", "o", "ir_o_std", ),
"angle_O_Ir_surf_norm__o": ("features", "o", "angle_O_Ir_surf_norm", ),
"bulk_oxid_state": ("features", "bulk_oxid_state", "", ),
"dH_bulk": ("features", "dH_bulk", "", ),
"volume_pa": ("features", "volume_pa", "", ),
# #######################
# Targets ###############
"e_o": ("targets", "e_o", "", ),
"e_oh": ("targets", "e_oh", "", ),
"e_ooh": ("targets", "e_ooh", "", ),
"g_o": ("targets", "g_o", "", ),
"g_oh": ("targets", "g_oh", "", ),
"g_ooh": ("targets", "g_ooh", "", ),
# #######################
# Data ##################
"index_bare": ("data", "index_bare", "", ),
"index_o": ("data", "index_o", "", ),
"index_oh": ("data", "index_oh", "", ),
"index_ooh": ("data", "index_ooh", "", ),
"name": ("data", "name", "", ),
"active_site__o": ("data", "active_site__o", "", ),
"active_site__oh": ("data", "active_site__oh", "", ),
"active_site__ooh": ("data", "active_site__ooh", "", ),
"stoich": ("data", "stoich", "", ),
}
# +
new_cols = []
for col_i in df_features_targets.columns:
new_col_i = multicolumn_assignments.get(col_i, col_i)
new_cols.append(new_col_i)
idx = pd.MultiIndex.from_tuples(new_cols)
df_features_targets.columns = idx
# -
df_features_targets = df_features_targets.reindex(columns=[
"targets",
"data",
"format",
"features",
"features_pre_dft",
"features_stan",
], level=0)
df_features_targets = df_features_targets.sort_index(axis=1)
# new_cols = []
other_cols = []
other_feature_cols = []
ads_feature_cols = []
for col_i in df_features_targets.columns:
if col_i[0] == "features":
if col_i[1] in ["o", "oh", "ooh", ]:
# print(col_i)
ads_feature_cols.append(col_i)
else:
other_feature_cols.append(col_i)
else:
other_cols.append(col_i)
df_features_targets = df_features_targets[
other_cols + other_feature_cols + ads_feature_cols
]
# ### Write data to file
# +
# Pickling data ###########################################
directory = os.path.join(
os.environ["PROJ_irox_oer"],
"workflow/seoin_irox_data/featurize_data",
"out_data")
if not os.path.exists(directory):
os.makedirs(directory)
path_i = os.path.join(directory, "df_features_targets.pickle")
with open(path_i, "wb") as fle:
pickle.dump(df_features_targets, fle)
# #########################################################
# -
df_features_targets.head()
# + active=""
#
#
#
# + jupyter={}
# df_features_targets
# + jupyter={}
# # df_features_targets.sort_values([("features", ) ])
# df_features_targets.columns = df_features_targets.columns.sortlevel()[0]
# + jupyter={}
# df_features_targets
# + jupyter={}
# # df_features_targets =
# df_features_targets.reindex(columns=[
# # "targets",
# # "data",
# # "format",
# "features",
# # "features_pre_dft",
# # "features_stan",
# ], level=0)
# + jupyter={}
# df_ads_e.index.to_frame().crystal.unique().tolist()
# + jupyter={}
# row_i
# -
# + jupyter={}
# assert False
# + jupyter={}
# df_features_targets.columns = df_features_targets.columns.sortlevel()[0]
# + jupyter={}
# df_features_targets
# + jupyter={}
# assert False
# + jupyter={}
# df_features_targets.columns
# + jupyter={}
# df_features_targets["features"]
# + jupyter={}
# assert False
# +
# df_ads_e
# +
# df_features_targets
# +
# assert False
# +
# df_features_targets["effective_ox_state__o"].tolist()
# +
# df_features_targets
| workflow/seoin_irox_data/featurize_data/featurize_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="j7flsxJvf59i" outputId="53ba5348-dc93-4cbd-e789-fc2b4e6eaa28"
from google.colab import drive
import os
import sys
from collections import OrderedDict
import pickle
drive.mount('/content/drive')
os.chdir("./drive/MyDrive/Project/")
# + id="AXPtIfNBgDzV"
# %%capture
# # !python -m pip install -U matplotlib
# !pip install tweet-preprocessor
# !pip install matplotlib==3.1.3
# !pip install transformers
# !apt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg
# !pip install librosa soundfile numpy sklearn pyaudio
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import tensorflow_datasets as tfds
from transformers import TFRobertaForSequenceClassification
from transformers import RobertaTokenizer
import os
import yaml
from datetime import datetime
from os import path
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
% matplotlib inline
import warnings
warnings.filterwarnings("ignore")
# + [markdown] id="SdBquL_JZgOZ"
# # Exploratory Data analysis for MELD Dataset
#
# ---
#
#
# + id="j5KQBptTMJ6O"
#load meld text data
train = pd.read_csv("train_sent_emo.csv")
test = pd.read_csv("test_sent_emo.csv")
validation = pd.read_csv("dev_sent_emo.csv")
#adding all data together
train = train.append(test, ignore_index=True)
train = train.append(validation, ignore_index=True)
# + id="XRv311UgYEpq"
# add time of clips to dataset
train['time'] = 0
for i in range(len(train['StartTime'])):
train['time'][i] = (datetime.strptime(train['EndTime'][i], '%H:%M:%S,%f')- datetime.strptime(train['StartTime'][i], '%H:%M:%S,%f')).total_seconds() #.timestamp()
# + colab={"base_uri": "https://localhost:8080/"} id="WPKp6tPeHeBC" outputId="3c4ff963-8db5-4221-f9d0-db37132f473b"
print("MEAN of time of clips: ",np.asarray(train['time']).mean())
print("STD DEV of the time clips: ",np.asarray(train['time']).std())
# + [markdown] id="8zMIx9vIaxf1"
# GET BIN OF MELD EMOTIONS
# + id="0i1Bo8EddQRY" colab={"base_uri": "https://localhost:8080/"} outputId="75fa0c40-999b-4a74-9e9e-b8b671b012e2"
unique = train.Emotion.unique()
e = [[] for i in range(len(unique))]
ecnt = [0 for i in range(len(unique))]
for i,val in enumerate (unique):
for k in range(len(train['Emotion'])):
if train['Emotion'][k]==val:
e[i].append(train['Utterance'][k])
ecnt[i]+=1
print("data bins:")
for i,val in enumerate(unique):
print(f'{val}:{ecnt[i]}')
# + id="MwNkZ6NCedLv"
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="mgnd6qvNewfM" outputId="1e182ecc-f8ba-4fe8-eff0-05fa032522e1"
clouds = []
for i in range(len(unique)):
print(unique[i])
wordcloud = WordCloud(background_color='white').generate(' '.join(e[i]).replace("\x92",""))
# Display the generated image:
plt.title(f"Top Words with the Emotion {unique[i].capitalize() }")
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.show()
# + id="tLcrVn78hrZq"
import itertools
merged = list(itertools.chain(*e))
bins = {}
for i,val in enumerate(merged):
merged[i]=val.replace("\x92","")
res = len(merged[i].split())
if res in bins:
bins[res]+=1
else:
bins[res]=1
# + colab={"base_uri": "https://localhost:8080/"} id="QADRbYqYdHcL" outputId="b466d412-3249-438c-a550-f0623696bf97"
for i in range(len(e)):
print(unique[i],len(e[i]))
# + id="EGP7hRkscotT"
un = train['Speaker'].unique()
vals = {}
cont = {}
for i,val in enumerate(un):
speaker = val
data = train.loc[train['Speaker'] == val]
vals[speaker] = data
# print(speaker, len(data['Speaker']))
cont[speaker] = len(data['Speaker'])
PersonList = sorted(cont.items(), key=lambda cont: cont[1], reverse=True)
# + colab={"base_uri": "https://localhost:8080/"} id="-6EYg8PEFpq7" outputId="13789259-029f-4d25-995b-683eeeaaf9e5"
print("number of unique characthers: ",len(train['Speaker'].unique()))
# + id="WTf5IAUSZlIf" colab={"base_uri": "https://localhost:8080/"} outputId="4561a1fb-a561-49bb-b2cf-6e614d7b060a"
print("top lines by actors",PersonList[:6])
# + id="bPR5Y2uFRTMM" colab={"base_uri": "https://localhost:8080/"} outputId="f4567bec-3c59-4de4-efc3-5ea66a03db70"
total = len(train['Speaker'])
cn80 = 0
for i in range(0,6):
cn80 += PersonList[i][1]
total = len(train['Speaker'])
ratio = cn80/total
left = total-cn80
print(total)
# + id="Sve5hTPQQbOE"
dataleft = total - int(0.95*total)
# + colab={"base_uri": "https://localhost:8080/"} id="ET2yodLubPxd" outputId="2da97a85-31ee-44c8-b049-0d7602fdde74"
tot80=6
print("charachter bins")
print(tot80,cn80)
count20=0
tot20 = 0
for i in PersonList[6:]:
if i[1] > 20:
tot20+=1
count20+=i[1]
totall=6+tot20
print(tot20,count20)
count10=0
tot10 = 0
for i in PersonList[totall:]:
if i[1] > 10:
tot10+=1
count10+=i[1]
totall+=tot10
print(tot10,count10)
count5=0
tot5 = 0
for i in PersonList[totall:]:
if i[1] > 5:
tot5+=1
count5+=i[1]
totall+=tot5
print(tot5,count5)
count0=0
tot0 = 0
for i in PersonList[totall:]:
if i[1] > 0:
tot0+=1
count0+=i[1]
print(tot0,count0)
x = [cn80, count20, count10,count5,count0]
bins = ['90+','20-89','10-19','5-9','1-4']
number = [tot80,tot20,tot10,tot5,tot0]
# + [markdown] id="7YxSb8YNigUY"
# # number of lines spoken by charachters
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="nPJoUrOZcR0q" outputId="a6418f5b-8d2a-45f0-86ae-8e695ea7684c"
plt.bar(bins,x)
plt.xlabel('Bins')
plt.ylabel('Number of Samples')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 309} id="h_harRk3jIsD" outputId="d35b63e5-ac85-44e5-e635-90031ba80b7c"
fig, ax = plt.subplots()
one = ax.bar(bins, x, label='Number of')
plt.xlabel('Lines Spoken')
plt.ylabel('Number of lines')
# ax.bar_label(one, labels=number,padding=3)
ax.set_title('Number of Lines by Speakers ')
plt.tight_layout(pad=0.5)
plt.margins(0.1)
plt.show()
# + [markdown] id="bIV4rNdKjMOs"
# # <NAME>
# + colab={"base_uri": "https://localhost:8080/", "height": 387} id="D1X22Bp-HVNk" outputId="6e97f82f-a6a0-4be7-e9e4-9f11f344e0d8"
import statistics
import seaborn as sns
import nltk
import preprocessor as p
allll_text = []
alll_len = []
all_seperate = []
bigrams = []
wods =[]
for i in range(len(train['Utterance'])):
k = p.clean(train['Utterance'][i].lower())
allll_text.append(k)
alll_len.append(len(k))
all_seperate+=k.split()
wods.append(len(k.split()))
# bigrams.append()
gr = list(nltk.bigrams(k.split()))
for i in gr:
bigrams.append(f'{i[0]} {i[1]}')
statistics.mean(alll_len)
statistics.stdev(alll_len)
# sns.displot(, x="Word_distrbution", bins=20)
sns.displot(alll_len)
# + colab={"base_uri": "https://localhost:8080/", "height": 309} id="u9c_jS-gIQUF" outputId="4333a3a6-13b0-4ed2-8259-c9b5fde8be42"
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
one = ax.bar(unique,ecnt , label='Number of')
plt.xlabel('Emotion')
plt.ylabel('Number of lines')
# ax.bar_label(one,padding=3)
ax.set_title('Number Lines Per Emotion')
plt.tight_layout(pad=0.5)
plt.margins(0.1)
plt.show()
| data preprocessing/Meld_Data_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Welcome to JupyterLab: Get to know the environment
# This section is not yet about Python, but the interface that allows us to work with Python in an interactive and fun way. What you see here is __JupyterLab__, a browser based developer environments to __interactively run code and write rich documentation__. The heart of JupyterLab are so called __notebooks__ (you are looking at one right now), which consist of __cells__ that can contain either executable code or markdown (formatted content like text, images, links, etc).
#
# __Jupyter notebooks are excellent for prototyping, data exploration and visualization__ due to their linear nature (cell-by-cell top down) and the possibility to combine code snippets with nicely formatted text cells (such as what you are looking at right here). When it comes to building automation scripts or more complex applications there are more suitable developer environments (VisualCode, PyCharm) that support the required code architecture patterns (splitting code in reusable functions and modules) better.
#
# The following paragraphs explain the most important features, but there is much more to check out in the [documentation](https://jupyterlab.readthedocs.io/en/stable/getting_started/overview.html) if you are interested.
# #### How to run code interactively
# 1. Click into the grey area with the code. Press __Ctrl + Enter__ to run the code in the cell or __Shift + Enter__ to run the code and subsequetly select the next cell.
# 2. Note how the number in the brackets of __In[ ]__ changes in the top left of the cell. The number increases with each executed cell and allows you to easily track in which order the cells were executed.
# + tags=[]
print("Click in this gray area and press CTRL + ENTER to run me interactively!")
print(2 + 2)
# -
# Jupyter notebooks also allow to display more complex results, such as plots or tables. __You do not need to understand what is going on in the next two code cells, we will come to that soon enough!__ Just run them and dream of the possibilities by displaying the output of your future computations in such a nice way!
# +
# Borrowed from https://matplotlib.org/stable/tutorials/introductory/pyplot.html
import matplotlib.pyplot as plt
from random import randint, gauss
nr_points = 50
data = {'x': list(range(nr_points)),
'color': [randint(0,50) for _ in range(nr_points)],
's': [gauss(0,1) for _ in range(nr_points)]}
data['y'] = [(x + 10*gauss(0,1)) for x in data['x']]
data['size'] = [100 * abs(s) for s in data['s']]
plt.scatter('x', 'y', c='color', s='size', data=data)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Random points with a trend - Similar to many real life phenomena')
plt.show()
# -
# Print the first 5 values of data used for the plot above in tabular form
import pandas as pd
pd.DataFrame(data, columns=['x','y','color','size']).head(5)
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# #### Some useful commands
# - To reset all cell outputs, click "Edit" -> "Clear All Outputs"
# - To reset the kernel (the Python interpreter), click "Kernel" -> "Restart & Clear Output"
# - To rename your notebook, right-click the file in the browser pane to the left -> "Rename"
# - To export your notebook to different file formats, click "File" -> "Save and Export Notebook As"
# - You can change the order of cells by drag-n-drop.
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# #### How to insert a new cell
# Maybe you want to experiment further or take some notes. In that case you can easily extend the notebook with new cells.
# 1. Click on an existing cell. If that cell contains grey shaded code area, do to __not__ click into this area.
# 2. You can now press the __a or b__ keys to insert a new cell above or below respectively. If you want to remove the current cell, press the __d__ key twice.
# + [markdown] tags=[]
# #### How to edit cells
# -
# For code cells this is very easy, just click in the grey shaded area - voila.
#
# For markdown cells, double-click it to display the raw markdown text. Here you can write text and style it using markdown - I recommend having a look at this [cheatsheet](https://sqlbak.com/blog/jupyter-notebook-markdown-cheatsheet). When you are done, make sure to run the cell via Ctrl+Enter to render the markdown styling.
# + [markdown] tags=[]
# #### How to toggle between a markdown and a code cell
# A notebook can contain two types of cells: __Markdown cells__ hold text which can be formatted using the markdown language. __Code cells__ hold code that can be interactively executed. You recognize code cells easily by the grey shaded area and the __In[ ]:__ to their left.
# 1. Click on an existing cell. If that cell contains grey shaded code area, be sure to __not__ click into this area.
# 2. Press the __m or y__ key to change the cell to a markdown (m) or code cell (y) respectively.
# -
# # A final word of caution
# __When you define a variable in a cell it is available in all other cells as well__. In combination with the ability to run individual cells in arbitrary order, this might lead strange behavior when a variable is being modified and used in multiple places - depending on the execution order of cells the same variable can have different values at different times.
#
# Try the following:
# 1. Run the following two cells in order, whereby the second cell prints "2".
# 2. Now run the third cell.
# 3. Run the second cell again. It now prints "4", because a was changed from 1 to 2 in the third cell.
a = 1
print(a*2)
a = 2
# This might seem trivial for now, but if the cell content becomes more complex it can lead to nasty bugs.
# __Follow these best practises to avoid giving future-you a heach ache:__
# - Avoid using previously used variable names in cells that do not belong together thematically. OR explicitly define the variables in the beginning - thus explicitly assigning intended default values to the variables.
# - Given a blank slate, your notebook should be setup to run through from top to bottom in linear fashion wothout error and giving the intended results. To achieve this, regularly restart the kernel (the Python interpreter) by clicking "Kernel" -> "Restart & Clear Output". Then run through the notebook with Shift+Enter or "Run" -> "Run All Cells".
| content/copy_to_image/content_root/tutorials_jupyterlabgeoenv/001_welcome_jupyterlab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="GiCYf1Tm97ai"
# + [markdown] id="KFEQZoKIGZHa"
# ## Clone code from ggColab_DRL_single_stock_trading_OK.ipynb
# + id="cPZSlrCfGZvy"
# + colab={"base_uri": "https://localhost:8080/"} id="yM71W6T49_i5" outputId="1e9e3e11-ae98-4408-a748-e41e53081b72"
# !pip uninstall -y tensorflow==2.6.0
# + id="xIdpnjWI9_nK"
# + id="Wq86rPPE9_pq"
import pkg_resources
import pip
installedPackages = {pkg.key for pkg in pkg_resources.working_set}
required = {'yfinance', 'pandas', 'matplotlib', 'stockstats','stable-baselines','gym','tensorflow'}
missing = required - installedPackages
if missing:
# !pip install yfinance
# !pip install pandas
# !pip install matplotlib
# !pip install stockstats
# !pip install gym
# !pip install stable-baselines[mpi]
# !pip install tensorflow==1.15.4
# + id="G5tZhQCv0zx-"
# !git clone https://github.com/pqmsoft1/FinRL_single.git
# + colab={"base_uri": "https://localhost:8080/"} id="xM8GBGX-8m9M" outputId="d4658b12-1e8b-41a1-ef01-bc51edf0c742"
# cd FinRL_single/
# + id="2O1safkiA7MD"
# !ls
# + id="4AKa-X7vA9AZ"
# + id="FM4T6qfr-MpS"
# + colab={"base_uri": "https://localhost:8080/"} id="yw8rc7-d-MsC" outputId="f83c7f6e-f41d-4fa1-9f48-5b7b907ae1b6"
import yfinance as yf
from stockstats import StockDataFrame as Sdf
import pandas as pd
import matplotlib.pyplot as plt
import gym
from stable_baselines import PPO2, DDPG, A2C, ACKTR, TD3
from stable_baselines import DDPG
from stable_baselines import A2C
from stable_baselines import SAC
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.policies import MlpPolicy
#Diable the warnings
import warnings
warnings.filterwarnings('ignore')
#stockerID = "AMZN"
stockerID = "MSFT"
#stockerID = "TSLA"
stockerID ="FDX"
stockerID ="ABMD"
stockerID ="ABT"
stockerID = "AAP"
stockerID = "ABBV"
#stockerID = "AAPL"
'''
#AAPL
START_DATE_DATA = "2010-01-01"
END_DATE_DATA = "2021-08-20"
END_DATE_TRAIN_DATA = "2019-09-01"
'''
START_DATE_DATA = "2009-01-01"
#END_DATE_TRAIN_DATA = "2020-01-01"
END_DATE_TRAIN_DATA = "2020-01-01"
#END_DATE_DATA = "2021-01-20"
END_DATE_DATA = "2021-01-01"
PATH_CSV_STOCK = stockerID + ".csv"
data_df = yf.download(stockerID, start=START_DATE_DATA, end=END_DATE_DATA)
# reset the index, we want to use numbers instead of dates
data_df=data_df.reset_index()
# convert the column names to standardized names
data_df.columns = ['datadate','open','high','low','close','adjcp','volume']
# save the data to a csv file in your current folder
#data_df.to_csv('AAPL_2009_2020.csv')
#data_df.to_csv('FB_2009_2021.csv')
data_df.to_csv(PATH_CSV_STOCK)
# check missing data
data_df.isnull().values.any()
# calculate technical indicators like MACD
stock = Sdf.retype(data_df.copy())
# we need to use adjusted close price instead of close price
stock['close'] = stock['adjcp']
data_df['macd'] = stock['macd']
# check missing data again
data_df.isnull().values.any()
# Note that I always use a copy of the original data to try it track step by step.
data_clean = data_df.copy()
import numpy as np
import pandas as pd
from gym.utils import seeding
import gym
from gym import spaces
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# Global variables
HMAX_NORMALIZE = 200
INITIAL_ACCOUNT_BALANCE=100000
STOCK_DIM = 1
# transaction fee: 1/1000 reasonable percentage
TRANSACTION_FEE_PERCENT = 0.001
# REWARD_SCALING = 1e-3
class SingleStockEnv(gym.Env):
"""A stock trading environment for OpenAI gym"""
metadata = {'render.modes': ['human']}
def __init__(self, df,day = 0):
#super(StockEnv, self).__init__()
# date increment
self.day = day
self.df = df
# action_space normalization and the shape is STOCK_DIM
self.action_space = spaces.Box(low = -1, high = 1,shape = (STOCK_DIM,))
# Shape = 4: [Current Balance]+[prices]+[owned shares] +[macd]
self.observation_space = spaces.Box(low=0, high=np.inf, shape = (4,))
# load data from a pandas dataframe
self.data = self.df.loc[self.day,:]
# termination
self.terminal = False
# save the total number of trades
self.trades = 0
# initalize state
self.state = [INITIAL_ACCOUNT_BALANCE] + \
[self.data.adjcp] + \
[0]*STOCK_DIM + \
[self.data.macd]
# initialize reward and cost
self.reward = 0
self.cost = 0
# memorize the total value, total rewards
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.rewards_memory = []
def _sell_stock(self, index, action):
# perform sell action based on the sign of the action
if self.state[index+STOCK_DIM+1] > 0:
# update balance
self.state[0] += \
self.state[index+1]*min(abs(action),self.state[index+STOCK_DIM+1]) * \
(1- TRANSACTION_FEE_PERCENT)
# update held shares
self.state[index+STOCK_DIM+1] -= min(abs(action), self.state[index+STOCK_DIM+1])
# update transaction costs
self.cost +=self.state[index+1]*min(abs(action),self.state[index+STOCK_DIM+1]) * \
TRANSACTION_FEE_PERCENT
self.trades+=1
else:
pass
def _buy_stock(self, index, action):
# perform buy action based on the sign of the action
available_amount = self.state[0] // self.state[index+1]
#update balance
self.state[0] -= self.state[index+1]*min(available_amount, action)* \
(1+ TRANSACTION_FEE_PERCENT)
# update held shares
self.state[index+STOCK_DIM+1] += min(available_amount, action)
# update transaction costs
self.cost+=self.state[index+1]*min(available_amount, action)* \
TRANSACTION_FEE_PERCENT
self.trades+=1
def step(self, actions):
self.terminal = self.day >= len(self.df.index.unique())-1
if self.terminal:
plt.plot(self.asset_memory,'r')
plt.savefig('account_value.png')
plt.close()
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))
print("previous_total_asset:{}".format(self.asset_memory[0]))
print("end_total_asset:{}".format(end_total_asset))
df_total_value = pd.DataFrame(self.asset_memory)
df_total_value.to_csv('account_value.csv')
print("total_reward:{}".format(self.state[0]+sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))- INITIAL_ACCOUNT_BALANCE ))
print("total_cost: ", self.cost)
print("total trades: ", self.trades)
df_total_value.columns = ['account_value']
df_total_value['daily_return']=df_total_value.pct_change(1)
if df_total_value['daily_return'].std()!=0:
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
print("Sharpe: ",sharpe)
df_rewards = pd.DataFrame(self.rewards_memory)
df_rewards.to_csv('account_rewards.csv')
return self.state, self.reward, self.terminal,{}
else:
# actions are the shares we need to buy, hold, or sell
actions = actions * HMAX_NORMALIZE
# calculate begining total asset
begin_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))
# perform buy or sell action
argsort_actions = np.argsort(actions)
sell_index = argsort_actions[:np.where(actions < 0)[0].shape[0]]
buy_index = argsort_actions[::-1][:np.where(actions > 0)[0].shape[0]]
for index in sell_index:
# print('take sell action'.format(actions[index]))
self._sell_stock(index, actions[index])
for index in buy_index:
# print('take buy action: {}'.format(actions[index]))
self._buy_stock(index, actions[index])
# update data, walk a step s'
self.day += 1
self.data = self.df.loc[self.day,:]
#load next state
self.state = [self.state[0]] + \
[self.data.adjcp] + \
list(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]) +\
[self.data.macd]
# calculate the end total asset
end_total_asset = self.state[0]+ \
sum(np.array(self.state[1:(STOCK_DIM+1)])*np.array(self.state[(STOCK_DIM+1):(STOCK_DIM*2+1)]))
self.reward = end_total_asset - begin_total_asset
self.rewards_memory.append(self.reward)
#self.reward = self.reward * REWARD_SCALING
self.asset_memory.append(end_total_asset)
return self.state, self.reward, self.terminal, {}
def reset(self):
self.asset_memory = [INITIAL_ACCOUNT_BALANCE]
self.day = 0
self.data = self.df.loc[self.day,:]
self.cost = 0
self.trades = 0
self.terminal = False
self.rewards_memory = []
#initiate state
self.state = [INITIAL_ACCOUNT_BALANCE] + \
[self.data.adjcp] + \
[0]*STOCK_DIM + \
[self.data.macd]
return self.state
def render(self, mode='human'):
return self.state
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
train = data_clean[(data_clean.datadate>=START_DATE_DATA) & (data_clean.datadate < END_DATE_TRAIN_DATA)]
# the index needs to start from 0
train=train.reset_index(drop=True)
## Model Training: 4 models, PPO A2C, DDPG, TD3
'''
## Model 1: PPO
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ppo = PPO2('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_ppo.learn(total_timesteps=100000,tb_log_name="run_aapl_ppo")
#model_ppo.save('AAPL_ppo_100k')
#model_ppo.save('AAPL_ppo_100k')
#model_sticker_save = 'FB_ppo_100k'
model_sticker_save = model_sticker_save = stockerID + '_PPO_100k'
model_ppo.save(model_sticker_save)
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ppo = PPO2('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
#model_ppo.load('AAPL_ppo_100k.zip')
model_ppo.load(model_sticker_save)
'''
model_sticker_save = stockerID + '_TD3_100k'
#tensorboard --logdir ./single_stock_tensorboard/
#DQN<DDPG<TD3
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_td3 = TD3('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
#model_td3.learn(total_timesteps=100000,tb_log_name="run_aapl_td3")
#model_td3.save('AAPL_td3_50k')
model_td3.learn(total_timesteps=100000,tb_log_name="run_aapl_td3")
#model_td3.save('AAPL_td3_50k')
model_td3.save(model_sticker_save)
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_td3 = TD3('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
#model_td3.load('AAPL_td3_50k.zip')
model_td3.load(model_sticker_save)
# Trading
# Assume that we have $100,000 initial capital at 2019-01-01. We use the TD3 model to trade AAPL.
## sau khi chay xong: create file account_rewards.csv, account_value.csv, account_value.png
# + colab={"base_uri": "https://localhost:8080/", "height": 700} id="EV0lTOlsXcK9" outputId="de5b2386-3dd8-42e8-a271-29206e4ec659"
test = data_clean[(data_clean.datadate >= END_DATE_TRAIN_DATA) ]
# the index needs to start from 0
test=test.reset_index(drop=True)
#model = model_a2c
#model = model_ppo
model = model_td3
#model = model_ddpg
env_test = DummyVecEnv([lambda: SingleStockEnv(test)])
obs_test = env_test.reset()
print("==============Model Prediction===========")
for i in range(len(test.index.unique())):
action, _states = model.predict(obs_test)
obs_test, rewards, dones, info = env_test.step(action)
env_test.render()
#==> Create file account_rewards.csv, account_value.csv, account_value.png sau khi chay xong
'''
Part 5: Backtest Our Strategy
For simplicity purposes, in the article, we just calculate the Sharpe ratio and the annual return manually.
'''
def get_DRL_sharpe():
df_total_value=pd.read_csv('account_value.csv',index_col=0)
df_total_value.columns = ['account_value']
df_total_value['daily_return']=df_total_value.pct_change(1)
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
annual_return = ((df_total_value['daily_return'].mean()+1)**252-1)*100
print("annual return: ", annual_return)
print("sharpe ratio: ", sharpe)
return df_total_value
def get_buy_and_hold_sharpe(test):
test['daily_return']=test['adjcp'].pct_change(1)
sharpe = (252**0.5)*test['daily_return'].mean()/ \
test['daily_return'].std()
annual_return = ((test['daily_return'].mean()+1)**252-1)*100
print("annual return: ", annual_return)
print("sharpe ratio: ", sharpe)
#return sharpe
df_total_value=get_DRL_sharpe()
get_buy_and_hold_sharpe(test)
DRL_cumulative_return = (df_total_value.account_value.pct_change(1)+1).cumprod()-1
buy_and_hold_cumulative_return = (test.adjcp.pct_change(1)+1).cumprod()-1
# %matplotlib inline
fig, ax = plt.subplots(figsize=(12, 8))
plt.plot(test.datadate, DRL_cumulative_return, color='red',label = "DRL")
plt.plot(test.datadate, buy_and_hold_cumulative_return, label = "Buy & Hold")
plt.title("Cumulative Return for AAPL with Transaction Cost",size= 20)
plt.legend()
plt.rc('legend',fontsize=15)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
# + colab={"base_uri": "https://localhost:8080/", "height": 309} id="tfLWQCbY_N-V" outputId="862f4d42-92f4-4f22-d5a6-0fce6210325d"
# Import yfinance package
import yfinance as yf
# Import matplotlib for plotting
import matplotlib.pyplot as plt
# %matplotlib inline
# Set the start and end date
#start_date = '1990-01-01'
#start_date = '2019-01-01'
#end_date = '2021-07-12'
#START_DATE_DATA = "2009-01-01"
#END_DATE_DATA = "2021-08-20"
#END_DATE_TRAIN_DATA = "2019-09-01"
#END_DATE_TRAIN_DATA = "2020-01-01"
#START_DATE_DATA = "2016-01-01"
#END_DATE_DATA = "2019-01-01"
#stockerID = "ABBV"
#START_DATE_DATA = "2009-01-01"
#END_DATE_TRAIN_DATA = "2020-01-01"
#END_DATE_DATA = "2021-01-20"
start_date = END_DATE_TRAIN_DATA
end_date = END_DATE_DATA
# Set the ticker
#ticker = 'AMZN'
ticker = stockerID
# Get the data
data = yf.download(ticker, start_date, end_date)
# Print 5 rows
#data.tail()
# Plot adjusted close price data
data['Adj Close'].plot()
plt.show()
# + [markdown] id="_cmg8TL0-rbT"
# ## Model Training: 4 models, PPO A2C, DDPG, TD3
# ## Model 1: PPO
# + id="L1Ak9hVU-MuM"
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ppo = PPO2('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_ppo.learn(total_timesteps=100000,tb_log_name="run_aapl_ppo")
#model_ppo.save('AAPL_ppo_100k')
#model_ppo.save('AAPL_ppo_100k')
#model_sticker_save = 'FB_ppo_100k'
model_sticker_save = 'amzn_ppo_100k'
model_ppo.save(model_sticker_save)
# + [markdown] id="V-o9vOq_-3Wi"
# ## Model 2: DDPG
# + id="DR_WNon2-Mwb"
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ddpg = DDPG('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
#model_ddpg.learn(total_timesteps=100000, tb_log_name="run_aapl_ddpg")
#model_ddpg.save('AAPL_ddpg_50k')
# + id="ERtTQEUS-Myr"
model_ddpg.learn(total_timesteps=100000, tb_log_name="run_aapl_ddpg")
model_ddpg.save('AAPL_ddpg_50k')
#model_ddpg.save(model_sticker_save)
# + [markdown] id="FbEwEVS5--xC"
# ## Model 3: A2C
# + id="CqwECOAB-M1C"
#tensorboard --logdir ./single_stock_tensorboard/
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_a2c = A2C('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_a2c.learn(total_timesteps=100000,tb_log_name="run_aapl_a2c")
#model_a2c.save('AAPL_a2c_50k')
# + id="JkXmJ0Bo9_r6"
model_a2c.save('AAPL_a2c_50k')
#model_a2c.save(model_sticker_save)
# + [markdown] id="k1T-oJfX_Ewq"
# ## Model 4: TD3
# + id="swsevaDl_Cfh"
#tensorboard --logdir ./single_stock_tensorboard/
#DQN<DDPG<TD3
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_td3 = TD3('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
#model_td3.learn(total_timesteps=100000,tb_log_name="run_aapl_td3")
#model_td3.save('AAPL_td3_50k')
model_td3.learn(total_timesteps=100000,tb_log_name="run_aapl_td3")
model_td3.save('AAPL_td3_50k')
#model_td3.save(model_sticker_save)
# + [markdown] id="Dk28SuRk_LuB"
# ## Testing data
# + id="V3897qQ8_Ch5"
# + [markdown] id="vvg39bOe_QLx"
# ## Load model from file save
# + [markdown] id="b5vgJKZq_Qs5"
# ## Load model PPO
# + id="vr377TvW_CkK"
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ppo = PPO2('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
#model_ppo.load('AAPL_ppo_100k.zip')
model_ppo.load(model_sticker_save)
# + [markdown] id="4cPpuYQ9_YOS"
# ## Load models a2c
# + id="tJoBO65D_CmZ"
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_a2c = A2C('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_a2c.load(model_sticker_save)
# + [markdown] id="oiPeTYqe_dQZ"
# ## Load model ddpg
# + id="KepzXkwn_CoZ"
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_ddpg = DDPG('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
#model_ddpg.load('AAPL_ddpg_50k.zip')
model_ddpg.load(model_sticker_save)
# + [markdown] id="XPauM4FM_jfq"
# ## Load model td3
# + id="wH52_pwn_Cqa"
env_train = DummyVecEnv([lambda: SingleStockEnv(train)])
model_td3 = TD3('MlpPolicy', env_train, tensorboard_log="./single_stock_trading_2_tensorboard/")
model_td3.load('AAPL_td3_50k.zip')
# + [markdown] id="DuMp3IBr_nrq"
# # Trading
# Assume that we have $100,000 initial capital at 2019-01-01. We use the TD3 model to trade AAPL.
#
# ## sau khi chay xong: create file account_rewards.csv, account_value.csv, account_value.png
# + id="QuHJ2FAf_lUy"
test = data_clean[(data_clean.datadate >= END_DATE_TRAIN_DATA) ]
# the index needs to start from 0
test=test.reset_index(drop=True)
# + id="XSONrBklFLf5"
test
# + colab={"base_uri": "https://localhost:8080/"} id="C3cLrinGFJJC" outputId="17fb9013-9828-4947-9dd8-43c3d2d4480f"
#model = model_a2c
model = model_ppo
#model = model_td3
#model = model_ddpg
env_test = DummyVecEnv([lambda: SingleStockEnv(test)])
obs_test = env_test.reset()
print("==============Model Prediction===========")
for i in range(len(test.index.unique())):
action, _states = model.predict(obs_test)
obs_test, rewards, dones, info = env_test.step(action)
env_test.render()
#==> Create file account_rewards.csv, account_value.csv, account_value.png sau khi chay xong
# + [markdown] id="QRy8w11h_rmy"
# ## Part 5: Backtest Our Strategy
# For simplicity purposes, in the article, we just calculate the Sharpe ratio and the annual return manually.
# + colab={"base_uri": "https://localhost:8080/"} id="Xc2v1jpT_lXq" outputId="cafb6a75-311b-4066-8606-d63bb9ce40b3"
def get_DRL_sharpe():
df_total_value=pd.read_csv('account_value.csv',index_col=0)
df_total_value.columns = ['account_value']
df_total_value['daily_return']=df_total_value.pct_change(1)
sharpe = (252**0.5)*df_total_value['daily_return'].mean()/ \
df_total_value['daily_return'].std()
annual_return = ((df_total_value['daily_return'].mean()+1)**252-1)*100
print("annual return: ", annual_return)
print("sharpe ratio: ", sharpe)
return df_total_value
def get_buy_and_hold_sharpe(test):
test['daily_return']=test['adjcp'].pct_change(1)
sharpe = (252**0.5)*test['daily_return'].mean()/ \
test['daily_return'].std()
annual_return = ((test['daily_return'].mean()+1)**252-1)*100
print("annual return: ", annual_return)
print("sharpe ratio: ", sharpe)
#return sharpe
df_total_value=get_DRL_sharpe()
get_buy_and_hold_sharpe(test)
DRL_cumulative_return = (df_total_value.account_value.pct_change(1)+1).cumprod()-1
buy_and_hold_cumulative_return = (test.adjcp.pct_change(1)+1).cumprod()-1
# + id="-HmnT9KBJRpu"
#DRL_cumulative_return
# + id="dps-hbmJ_lZ7"
# %matplotlib inline
fig, ax = plt.subplots(figsize=(12, 8))
plt.plot(test.datadate, DRL_cumulative_return, color='red',label = "DRL")
plt.plot(test.datadate, buy_and_hold_cumulative_return, label = "Buy & Hold")
plt.title("Cumulative Return for AAPL with Transaction Cost",size= 20)
plt.legend()
plt.rc('legend',fontsize=15)
plt.rc('xtick', labelsize=15)
plt.rc('ytick', labelsize=15)
# + id="vuX1LVRn_3cL"
from google.colab import files
files.download('AAPL_a2c_50k.zip')
files.download('AAPL_td3_50k.zip')
files.download('AAPL_ppo_100k.zip')
files.download('AAPL_ddpg_50k.zip')
| ggColab_DRL_single_stock_trading_OK/ABBV_TD3_ggColab_DRL_single_stock_trading_OK.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.2
# language: julia
# name: julia-0.6
# ---
# +
# fashion mnist mlp
#
# reference: https://github.com/FluxML/model-zoo/blob/master/mnist/mlp.jl
# +
using Flux
using Flux: onehotbatch, argmax, crossentropy, throttle, @epochs
using BSON: @save, @load
using Base.Iterators: repeated
using MLDatasets # FashionMNIST
using ColorTypes: N0f8, Gray
const Img = Matrix{Gray{N0f8}}
function prepare_train()
# load full training set
train_x, train_y = FashionMNIST.traindata() # 60_000
trainrange = 1:6_000 # 1:60_000
imgs = Img.([train_x[:,:,i] for i in trainrange])
# Stack images into one large batch
X = hcat(float.(reshape.(imgs, :))...) |> gpu
# One-hot-encode the labels
Y = onehotbatch(train_y[trainrange], 0:9) |> gpu
X, Y
end
function prepare_test()
# load full test set
test_x, test_y = FashionMNIST.testdata() # 10_000
testrange = 1:1_000 # 1:10_000
test_imgs = Img.([test_x[:,:,i] for i in testrange])
tX = hcat(float.(reshape.(test_imgs, :))...) |> gpu
tY = onehotbatch(test_y[testrange], 0:9) |> gpu
tX, tY
end
# +
X, Y = prepare_train()
tX, tY = prepare_test()
m = Chain(
Dense(28^2, 32, relu),
Dense(32, 10),
softmax) |> gpu
loss(x, y) = crossentropy(m(x), y)
accuracy(x, y) = mean(argmax(m(x)) .== argmax(y))
dataset = repeated((X, Y), 200)
evalcb = () -> @show(loss(X, Y))
opt = ADAM(params(m))
# -
@epochs 5 Flux.train!(loss, dataset, opt, cb = throttle(evalcb, 2))
accuracy(X, Y)
accuracy(tX, tY)
| examples/mlp/mlp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib widget
import numpy as np
import matplotlib.pyplot as plt
import pydae.ssa as ssa
import scipy.signal as sctrl
from vsc_lcl import vsc_lcl_class
# ## Instantiate system
syst = vsc_lcl_class()
syst.Dt = 5e-6
syst.decimation = 1
syst.N_store = 100_000
syst.update()
# ## CTRL1 in state feedback
# +
syst = vsc_lcl_class()
syst.Dt = 5e-6
syst.decimation =1
syst.N_store =100_000
syst.update()
Δt = 50e-6
#x_d_ctrl_list = ['i'] # states to consider in the reduction
z_ctrl_list = [ 'i_sd_g01', 'i_sq_g01'] # outputs to consider in the controller
u_ctrl_list = ['eta_d_g01','eta_q_g01'] # intputs to consider in the controller
z_ctrl_idxs = [syst.outputs_list.index(item) for item in z_ctrl_list]
u_ctrl_idxs = [syst.inputs_run_list.index(item) for item in u_ctrl_list]
syst.Δt = Δt
## Calculate equilibirum point
syst.initialize([{'G_d_g01':0.0,'eta_d_g01':0.0,'eta_q_g01':-0.8693333,'v_1_Q':-326,'v_1_D':0.0, 'C_m_g01':4e-6}],xy0=1000)
ssa.eval_ss(syst)
# linear continous plant
A_p = syst.A
B_p = syst.B
C_p = syst.C
D_p = syst.D
# plant discretization
A_d,B_d,C_d,D_d,Dt = sctrl.cont2discrete((A_p,B_p,C_p,D_p),Δt,method='zoh')
N_z_d,N_x_d = C_d.shape # discreticed plant dimensions
N_x_d,N_u_d = B_d.shape
# convenient matrices
O_ux = np.zeros((N_u_d,N_x_d))
O_xu = np.zeros((N_x_d,N_u_d))
O_uu = np.zeros((N_u_d,N_u_d))
I_uu = np.eye(N_u_d)
syst.A_d = A_d
syst.B_d = B_d
# Controller ##################################################################################
B_c = B_d[:,u_ctrl_idxs]
C_c = C_d[z_ctrl_idxs,:]
D_c = D_d[z_ctrl_idxs,:][:,u_ctrl_idxs]
N_x_c,N_u_d = B_c.shape
N_z_c,N_x_c = C_c.shape
O_ux = np.zeros((N_u_d,N_x_d))
O_xu = np.zeros((N_x_d,N_u_d))
O_uu = np.zeros((N_u_d,N_u_d))
I_uu = np.eye(N_u_d)
# discretized plant:
# Δx_d = A_d*Δx_d + B_d*Δu_d
# Δz_c = C_c*Δx_d + D_c*Δu_d
# dinamic extension:
# Δx_d = A_d*Δx_d + B_d*Δu_d
# Δx_i = Δx_i + Δt*(Δz_c-Δz_c_ref) = Δx_i + Δt*C_c*Δx_d - Dt*Δz_c_ref
# Δz_c = z_c - z_c_0
# Δz_c_ref = z_c_ref - z_c_0
# (Δz_c-Δz_c_ref) = z_c - z_c_ref
omega_b = 2*np.pi*50
W = np.block([
[ np.cos(omega_b*Δt), -np.sin(omega_b*Δt)],
[ np.sin(omega_b*Δt), np.cos(omega_b*Δt)],
])
A_e = np.block([
[ A_d, B_c@W, O_xu], # Δx_d
[ O_ux, O_uu, O_uu], # Δx_r
[ Δt*C_c, Δt*D_c, I_uu], # Δx_i
])
B_e = np.block([
[ O_xu],
[ I_uu],
[ O_uu],
])
A_ctrl = A_e[N_x_d:,N_x_d:]
B_ctrl = B_e[N_x_d:]
# weighting matrices
Q_c = np.eye(A_e.shape[0])
Q_c[-1,-1] = 1e6
Q_c[-2,-2] = 1e6
R_c = np.eye(B_c.shape[1])*100000
K_c,S_c,E_c = ssa.dlqr(A_e,B_e,Q_c,R_c)
E_cont = np.log(E_c)/Δt
syst.A_ctrl = A_ctrl
syst.B_ctrl = B_ctrl
syst.K_c = K_c
syst.N_x_d = N_x_d # number of plant states
syst.N_u_d = N_u_d # number of plant inputs
syst.N_z_c = N_z_c # number of plant outputs considered for the controller
# +
syst = vsc_lcl_class()
syst.Dt = 5e-6
syst.decimation =1
syst.N_store =100_000
syst.update()
times = np.arange(0.0,0.1,Δt)
syst.initialize([{'G_d_g01':0.0,'eta_d_g01':0.0,'eta_q_g01':-0.8693333,'v_1_Q':-326,'v_1_D':0.0, 'C_m_g01':4e-6}],xy0=1000)
ssa.eval_A(syst)
i_sd = syst.get_value('i_sd_g01')
i_sq = syst.get_value('i_sq_g01')
v_sd = syst.get_value('v_sd_g01')
v_sq = syst.get_value('v_sq_g01')
i_td = syst.get_value('i_td_g01')
i_tq = syst.get_value('i_tq_g01')
v_md = syst.get_value('v_md_g01')
v_mq = syst.get_value('v_mq_g01')
v_dc = syst.get_value('v_dc_g01')
eta_d = syst.get_value('eta_d_g01')
eta_q = syst.get_value('eta_q_g01')
i_sd_ref_0 = i_sd
i_sq_ref_0 = i_sq
v_sq_0 = v_sq
v_sd_0 = v_sd
x_d_0 = np.array([i_td,i_tq,v_md,v_mq,i_sd,i_sq]).reshape(6,1)
u_d_0 = np.array([eta_d,eta_q]).reshape(2,1)
x_r_0 = u_d_0
syst.Δx_e = np.zeros((10,1))
it = 0
for t in times:
Δx_e = syst.Δx_e
# measurements
i_sd = syst.get_value('i_sd_g01')
i_sq = syst.get_value('i_sq_g01')
v_sd = syst.get_value('v_sd_g01')
v_sq = syst.get_value('v_sq_g01')
i_td = syst.get_value('i_td_g01')
i_tq = syst.get_value('i_tq_g01')
v_md = syst.get_value('v_md_g01')
v_mq = syst.get_value('v_mq_g01')
v_dc = syst.get_value('v_dc_g01')
x_d = np.array([i_td,i_tq,v_md,v_mq,i_sd,i_sq]).reshape(6,1)
Δx_d = x_d - x_d_0
Δx_r = syst.Δx_e[N_x_c:-N_u_d,:]
Δx_i = syst.Δx_e[(N_x_c+N_u_d):,:]
i_sd_ref = i_sd_ref_0
i_sq_ref = i_sq_ref_0
v_sq = v_sq_0
v_sd = v_sd_0
if t>20e-3: i_sd_ref = 20
if t>30e-3: i_sq_ref = 30
if t>45e-3: v_sd = 163
if t>45e-3: v_sq = -163
epsilon_d = i_sd - i_sd_ref
epsilon_q = i_sq - i_sq_ref
epsilon = np.block([[epsilon_d],[epsilon_q]])
Δu_r = -K_c @ Δx_e + np.block([[ (v_sd-v_sd_0)*2/v_dc],[(v_sq-v_sq_0)*2/v_dc]])
Δx_r = W@Δu_r
Δx_i += Δt*epsilon
Δx_e = np.block([[Δx_d],[Δx_r],[Δx_i]])
syst.Δx_e = Δx_e
x_r = Δx_r + x_r_0
eta_dq = x_r
eta_d = eta_dq[0,0]
eta_q = eta_dq[1,0]
events=[{'t_end':t,'eta_d_g01':eta_d,'eta_q_g01':eta_q,'v_1_Q':v_sq,'v_1_D':v_sd}]
syst.run(events)
# eta_d_prev = eta_d
# eta_q_prev = eta_q
it += 1
syst.post();
# +
plt.close('all')
fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(7, 7),sharex=True)
lines = axes[0].plot(syst.T,syst.get_values('i_sd_g01'),label='i_sd_g01')
lines = axes[0].plot(syst.T,syst.get_values('i_sq_g01'),label='i_sq_g01')
axes[1].plot(syst.T,syst.get_values('eta_D_g01'),label='eta_D_g01')
axes[1].plot(syst.T,syst.get_values('eta_Q_g01'),label='eta_Q_g01')
for ax in axes:
ax.grid()
ax.legend()
ax.set_xlabel('Time (s)')
datacursor(lines, display='multiple')
# +
import sympy as sym
x_d_1,x_d_2,x_d_3,x_d_4,x_d_5,x_d_6 = sym.symbols('Dx_d_1,Dx_d_2,Dx_d_3,Dx_d_4,Dx_d_5,Dx_d_6')
x_r_1,x_r_2 = sym.symbols('Dx_r_1,Dx_r_2')
x_i_1,x_i_2 = sym.symbols('Dx_i_1,Dx_i_2')
x_e = sym.Matrix([x_d_1,x_d_2,x_d_3,x_d_4,x_d_5,x_d_6,x_r_1,x_r_2,x_i_1,x_i_2])
u_r = -K_c * x_e
# +
u_r_d = str(sym.N(u_r[0],8))
u_r_q = str(sym.N(u_r[1],8))
print(f'Du_r_1 = {u_r_d};')
print(f'Du_r_2 = {u_r_q};')
# +
Du_r_1,Du_r_2 = sym.symbols('Du_r_1,Du_r_2')
Du_r = sym.Matrix([Du_r_1,Du_r_2 ])
Dx_r = W@Du_r
Dx_r_1 = str(sym.N(Dx_r[0],8))
Dx_r_1 = str(sym.N(Dx_r[1],8))
print(f'Dx_r_1 = {u_r_d};')
print(f'Dx_r_2 = {u_r_q};')
# -
print(u_r[0])
print(u_r[1])
syst.get_value('C_m_g01')
# +
from mpldatacursor import datacursor
data = np.outer(range(10), range(1, 5))
fig, ax = plt.subplots()
lines = ax.plot(data)
ax.set_title('Click somewhere on a line')
#datacursor(lines)
datacursor(display='multiple', draggable=True)
plt.show()
# +
import matplotlib.pyplot as plt
import numpy as np
from mpldatacursor import datacursor
data = np.outer(range(10), range(1, 5))
plt.plot(data)
plt.title('Click somewhere on a line')
datacursor()
plt.show()
# -
# +
Ts_ctr = 1/200;
Ts_med = 1/20000;
wN_ctr = 2*pi*1/Ts_ctr/2;
wN_med = 2*pi*1/Ts_med/2;
[nA, Wn] = buttord(wN_ctr, wN_med, -20*log10(0.7), -20*log10(0.1), 's');
[NUM_aaA,DEN_aaA] = butter(nA,Wn,'low','s');
# +
from scipy import signal
import matplotlib.pyplot as plt
# +
plt.close('all')
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(7, 7),sharex=True)
N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True)
b, a = signal.butter(N, Wn, 'band', True)
w, h = signal.freqs(b, a, np.logspace(1, 2, 500))
axes.plot(w/2/np.pi, 20 * np.log10(abs(h)))
plt.title('Butterworth bandpass filter fit to constraints')
axes.set_xlabel('Frequency [radians / second]')
axes.set_ylabel('Amplitude [dB]')
axes.grid(which='both', axis='both')
axes.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop
axes.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass
axes.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop
axes.set_xlim([0, 20e3])
# -
b
a
| examples/machines/vsc/vsc_lcl_sf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import cv2
from imutils import paths
import numpy as np
# +
# Rotate by 90 degrees
# Augment the color schemes of the images
# Shear/ elongate the images
# +
# Rotating image for obtaining forward datasets
inputPath = 'C:\\Users\\<NAME>\\Documents\\7th Sem\\IRC\\Datasets\\Creating Datasets\\Downloaded Datasets\\Right'
outputPath = 'C:\\Users\\<NAME>\\Documents\\7th Sem\\IRC\\Datasets\\Creating Datasets\\Downloaded Datasets\\Straight'
imagePaths = list(paths.list_images(inputPath))
os.chdir(outputPath)
i = 0
for imagePath in imagePaths:
i += 1
image = cv2.imread(imagePath)
image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
cv2.imwrite(str(i) + '.jpg', image)
# +
# Rotating image for obtaining left datasets
inputPath = 'C:\\Users\\<NAME>\\Documents\\7th Sem\\IRC\\Datasets\\Creating Datasets\\Downloaded Datasets\\Left'
outputPath = 'C:\\Users\\<NAME>\\Documents\\7th Sem\\IRC\\Datasets\\Creating Datasets\\Downloaded Datasets\\Right'
imagePaths = list(paths.list_images(inputPath))
os.chdir(outputPath)
i = 0
for imagePath in imagePaths:
i += 1
image = cv2.imread(imagePath)
image = cv2.rotate(image, cv2.ROTATE_180)
cv2.imwrite(str(i) + '.jpg', image)
# +
# Augmentation
import Augmentor
# Change inputPath as required
inputPath = 'C:\\Users\\<NAME>\\Documents\\7th Sem\\IRC\\Datasets\\Creating Datasets\\Downloaded Datasets\\Right'
outputPath = 'C:\\Users\\<NAME>\\Documents\\7th Sem\\IRC\\Datasets\\Creating Datasets\\Downloaded Datasets\\Right\\Output'
p1 = Augmentor.Pipeline(inputPath, outputPath)
p2 = Augmentor.Pipeline(inputPath, outputPath)
p3 = Augmentor.Pipeline(inputPath, outputPath)
p1.skew_left_right(probability = 1)
p2.skew_top_bottom(probability = 1)
p3.black_and_white(probability = 1, threshold = 64)
p1.set_save_format(save_format = "auto")
p1.process()
p2.set_save_format(save_format = "auto")
p2.process()
p3.set_save_format(save_format = "auto")
p3.process()
# +
# Path to folder containing the datasets
inputPaths = "C://Users//<NAME>//Documents//7th Sem//IRC//IRC-Rover-Files//Datasets//Creating Datasets//Downloaded Datasets//Final Datasets for Training/Left"
outputPaths = "C://Users//<NAME>//Documents//7th Sem//IRC//IRC-Rover-Files//Datasets//Creating Datasets//Downloaded Datasets//Final Datasets for Training/Left/Resized"
# List to store the paths of all images in the dataset
imagePaths = list(paths.list_images(inputPaths))
# This list will be used to store all the images in Bitmap format from OpenCV's imread()
images = []
for imagePath in imagePaths:
label = imagePath.split(os.path.sep)[-2]
labels.append(label)
image = cv2.imread(imagePath)
image = cv2.cvtColor(image. cv2.COLOR_BGR2RGB)
image = cv2.resize(image, (64, 64))
images.append(image)
| iPython Notebooks/Dataset Generator for Arrows.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import ast
from os import path
pd.set_option('display.max_rows', 115)
pd.set_option('display.max_columns', 115)
path_csv = path.join(path.abspath('.'), 'data', 'cats.csv')
path_good_csv = path.join(path.abspath('.'), 'data', 'good_cats.csv')
df = pd.read_csv(path_csv)
df.age.value_counts()
filt = ((df['age'] == 'Baby') | (df['age'] == 'Young'))
df.drop(index = df[filt].index, inplace=True)
filt = (
(df['breed'] == 'American Curl') |
(df['breed'] == 'American Wirehair') |
(df['breed'] == 'Burmilla') |
(df['breed'] == 'Canadian Hairless') |
(df['breed'] == 'Chausie') |
(df['breed'] == 'Chinchilla') |
(df['breed'] == 'Cymric') |
(df['breed'] == 'Japanese Bobtail') |
(df['breed'] == 'Javanese') |
(df['breed'] == 'LaPerm') |
(df['breed'] == 'Oriental Long Hair') |
(df['breed'] == 'Silver') |
(df['breed'] == 'Singapura') |
(df['breed'] == 'Somali') |
(df['breed'] == 'York Chocolate')
)
df.drop(index = df[filt].index, inplace=True)
df.drop(columns=['Unnamed: 0'], inplace=True)
df.drop(columns=['url', 'type', 'age', 'gender', 'coat', 'size', 'med_photos', 'id'], inplace=True)
for i in df.index:
pht = ast.literal_eval(df.loc[i].photos)
df.loc[i].photos = str([x['full'] for x in pht])
df.to_csv(path_good_csv, index=False)
| ml/Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Motivation
#
# Until now, we've seen infectiousness on a WS graph representing a "small world", with some close contacts (circle lattice) and some distant relationships.
#
# What does contact tracing do on this topology?
#
# How do people's privacy preferences affect contact tracing?
# - the different kinds of edges in the topology represent different kinds of social contacts over which people have different preferences
# ## Technical framing the problem
# ## Defining Terms
#
# ***Inflection Point***: The point in a function $f(x) = y$ where the function $f$ turns from concave to convex, or vice versa.
#
# ***Transition Region***: For a $f(x)$, a range of $x$ around the inflection point for which the slope is over some value. (lloyd et al.)
#
# ***Threshold/Phase Transition/Critical Point:*** "The point at which the order parameter goes from zero to non-zero in an infinite system." Where the order parameter is, e.g., the final density of infected nodes.
# - ???? "g(N) is the function giving the critical point as a function of N. Where it converges as N increases is the critical point."
# - Finite Sampling
#
# - **First order**:
# - This point is in a step function. It is well approximated by the inflection point.
# - This may _not_ be a step function, and rather have a coexistence region. In that case, it has two transition points.
# - **Second order**:
# - This is the point where the order parameter becomes non-zero, but not as a step function
# - This is the *critical point*.
# - It may be well approximated by the point at which the function passes a (low) slope threshold??
#
# ***Bimodal outcomes***: The final size will be a bimodal distribution because of (a) early extinctions (which are geometric on a 1D spread) and (b) the epidemic size.
# - find the peaks
# - find the midpoint between the peaks?
# - ratio of sizes of the mass on either side of the midpoint.
# ## Tasks
#
# [ ] Plot the infection ratio, instead of susceptible ratio.
#
#
# Given a set of model parameters $M : (N, K, [p], beta, gamma, alpha, zeta) \rightarrow S$ (where p is allowed to range, and S is the final susceptible ration)...
#
# ... that will imply an inflection point $p^*(M) = \arg \min dM(p) / dp$.
#
# Suppose we fix $p = p^*(M)$.
#
# That will give us $NKp^*/2$ rewired edges.
#
# We next need to reintroduce contact tracing adoption.
# I suppose we have some "empirical" questions here.
# I think it would make sense to model these next situations (adapting slightly from the questions you posed originally):
#
# 1) Look at the curve of outcomes as A (the adoption rate) ranges from 0 to 1.
#
# Expected result: a sigmoid function with an inflection point.
# We don't know we'll get this for sure though, since Lloyd et al. didn't go here.
# We know that with A = 0, $M(p^*) = S^*$ as before.
# We expect that with $A = 1, M(p) = 0$.
#
# We can take the inflection point of this curve, $A^*$, as the basis for the next study.
# This will be the point where the marginal effectiveness of the contact tracing system is at its highest.
# Note the slope here, $dS/dA^*$
#
# 2) To start to look at the effect of the rewired/distant edges, try this to start:
# Set $A = A^*$, but knock out a percentage q of the rewired edges from the tracing system.
# We would also expect S to be monotonically decreasing in q.
# What does this curve look like? Is there an inflection point?
#
# If so, call it $q^*$
#
# Note the slope here, $dS/dq^*$.
#
# If $dS/dq^* \times NKp^*/2 > dS/dA^* \times A^*NK/2$, then that is circumstantial evidence that the "distant" edges are more impactful for contact tracing than the circle lattice edges, at the multidimensional inflection point along the p, A, and q axes.
#
# Once that is in place and we think about it, we can set up a more comprehensive grid search of the space to get the gradients at other levels of p, A, and q.
# ## Tasks (old draft)
#
#
# 1. Scan of $p$ v. avg final infection size for our model parameters on the Watts-Storgatz lattice without contact tracing.
# - Find the inflection point $\hat{p}$
# - This scan should help us find the minimum value of beta above which there is epidemic spread w/o contact tracing and will be used to choose a reasonable value of beta (somewhat above the threshold) for an initial comparison of the effects of i) failure to adopt and ii) selective failure to report more “distant” contacts.
# - Find
#
# 2. Setting A = 1 and fixing $p = \hat{p}$, varying $\chi$ to produce a plot of $\chi$ v. avg final infection size, which can be translated into a plot of avg untraced edges v. avg final infection.
# - The avg number of untraced edges can vary from 0 ($\chi = 0$) to $p=KN/2$ (for xi = 1). Presumably, at $\chi = 0$, there is full contact tracing and the epidemic is suppressed.
# - Assuming we have chosen a $p$ for which the epidemic spreads when xi=1, this plot will answer at least three questions: i) At (or around, if there’s no sharp transition) what number of untraced edges does contact tracing loses its effectiveness? ii) how does the final infection size vary with xi? iii) what is the functional form of this variation?
#
# 3. Setting $\chi = 0$ and varying A to produce a plot of avg number of untraced edges v. avg final infection size, with the same beta. Here the avg number of untraced edges can vary from 0 (A=1) to $KN/2$ (A=0). This plot answers the same questions as above for this scenario. We might also want to plot untraced rewired edges v. avg final infection size from the same data.
#
# Now we compare these plots and see what we see.
# # What do we want to answer the question
#
# 1) Come up with a useful visualization of the tracing system itself
# - trace close edges vs. traced remote edges?
#
# 2) Plotting q* line graph with varied A* = { 0.2, 0.4, 0.6, 0.8 }
#
# 3) What is the cost of a untraced close edge vs. untraced remote edge -- For a Given A/q!
#
# 4) Heatmap of A vs. q infectiousness with p*
| contact-tracing/code/Python/Technical Framing of the Problem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ch `10`: Concept `02`
# ## Recurrent Neural Network
# Import the relevant libraries:
import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
# Define the RNN model:
# +
class SeriesPredictor:
def __init__(self, input_dim, seq_size, hidden_dim=10):
# Hyperparameters
self.input_dim = input_dim
self.seq_size = seq_size
self.hidden_dim = hidden_dim
# Weight variables and input placeholders
self.W_out = tf.Variable(tf.random_normal([hidden_dim, 1]), name='W_out')
self.b_out = tf.Variable(tf.random_normal([1]), name='b_out')
self.x = tf.placeholder(tf.float32, [None, seq_size, input_dim])
self.y = tf.placeholder(tf.float32, [None, seq_size])
# Cost optimizer
self.cost = tf.reduce_mean(tf.square(self.model() - self.y))
self.train_op = tf.train.AdamOptimizer().minimize(self.cost)
# Auxiliary ops
self.saver = tf.train.Saver()
def model(self):
"""
:param x: inputs of size [T, batch_size, input_size]
:param W: matrix of fully-connected output layer weights
:param b: vector of fully-connected output layer biases
"""
cell = rnn.BasicLSTMCell(self.hidden_dim)
outputs, states = tf.nn.dynamic_rnn(cell, self.x, dtype=tf.float32)
num_examples = tf.shape(self.x)[0]
W_repeated = tf.tile(tf.expand_dims(self.W_out, 0), [num_examples, 1, 1])
out = tf.matmul(outputs, W_repeated) + self.b_out
out = tf.squeeze(out)
return out
def train(self, train_x, train_y):
with tf.Session() as sess:
tf.get_variable_scope().reuse_variables()
sess.run(tf.global_variables_initializer())
for i in range(1000):
_, mse = sess.run([self.train_op, self.cost], feed_dict={self.x: train_x, self.y: train_y})
if i % 100 == 0:
print(i, mse)
save_path = self.saver.save(sess, 'model.ckpt')
print('Model saved to {}'.format(save_path))
def test(self, test_x):
with tf.Session() as sess:
tf.get_variable_scope().reuse_variables()
self.saver.restore(sess, './model.ckpt')
output = sess.run(self.model(), feed_dict={self.x: test_x})
return output
# -
# Now, we'll train a series predictor. Let's say we have a sequence of numbers `[a, b, c, d]` that we want to transform into `[a, a+b, b+c, c+d]`. We'll give the RNN a couple examples in the training data. Let's see how well it learns this intended transformation:
if __name__ == '__main__':
predictor = SeriesPredictor(input_dim=1, seq_size=4, hidden_dim=10)
train_x = [[[1], [2], [5], [6]],
[[5], [7], [7], [8]],
[[3], [4], [5], [7]]]
train_y = [[1, 3, 7, 11],
[5, 12, 14, 15],
[3, 7, 9, 12]]
predictor.train(train_x, train_y)
test_x = [[[1], [2], [3], [4]], # 1, 3, 5, 7
[[4], [5], [6], [7]]] # 4, 9, 11, 13
actual_y = [[[1], [3], [5], [7]],
[[4], [9], [11], [13]]]
pred_y = predictor.test(test_x)
print("\nLets run some tests!\n")
for i, x in enumerate(test_x):
print("When the input is {}".format(x))
print("The ground truth output should be {}".format(actual_y[i]))
print("And the model thinks it is {}\n".format(pred_y[i]))
| ch10_rnn/Concept02_rnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## <center> Computation Two Ways: Faraday's Law
# ---------
#
# In this worksheet, we will explore solving problems 22.20 and 22.23 in homework 9 using Excel and Python. These tools are highly useful in real-world problem solving, especially as the studied material increases in complexity.
#
# ### <center> Problem 22.23
#
# 
#
# #### <center> Solve on Paper
#
# Using computers to verify handwritten mathematics is a great way to help reduce human error and test understanding of the problem. Try this problem without the help of the computer initially. Then, continue to the following sections to verify your work using Excel and Python.
# <br><br><br><br><br><br><br><br><br>
#
# #### <center> Solve using Excel
#
# Click [here](hw9.xlsx) to access the spreadsheet used to solve this problem. Change the numbers and see how the output changes. Note that there are various styles and ways of using Excel to solve problems. Explore what works for you!
#
#
#
# #### <center> Solve using Python
#
# The following code is a sample way to solve problem 22.33. Run the cell using the **Run** button above to see the output. Does this match with your handwritten answer?
#
#
# +
import scipy.constants as sp
import math
# List the known variables here. Note that all cm have been converted to m.
n1_turns = 5300
n2_turns = 3200
radius_c1 = 0.04
radius_c2 = 0.02
distance_l = 0.4
amps = 0.05
freq = 2100
# Pre-Calculations
mu0_4pi = sp.mu_0/(4*sp.pi)
area_c1 = math.pow(radius_c1,2) * sp.pi
area_c2 = math.pow(radius_c2,2) * sp.pi
ang_freq = freq * 2 * sp.pi
l_third = 1/math.pow(distance_l,3)
# Solve.
emf_c2 = n1_turns * n2_turns * mu0_4pi * 2 * area_c1 * area_c2 * amps * ang_freq * l_third
print("The maximum voltage of the second coil voltage =", emf_c2,"V")
# -
# ## <center> Problem 22.20
#
# 
# 
# 
#
# #### <center> Important terms and variables
#
# Use the space below to write down any important terms and variables pertinant to this problem.
# <br><br><br><br><br><br><br><br><br>
#
# #### <center> Equation(s)
#
# Based on the context of the question and the variables identified, write down any applicable equations that can be used to solve this problem.
# <br><br><br><br><br><br><br><br><br>
#
# #### <center> Solve on Paper
#
# Use the following space to attempt the problem by hand without the use of computers. Test your work in the sections below.
# <br><br><br><br><br><br><br><br><br>
#
# #### <center> Solve using Excel
#
# Click [here](hw9.xlsx) to open the Excel spreadsheet, or click the applicable tab in the spreadsheet linked previously. Refer to the layout of problem 22.23 for further help.
#
# <link>
#
# #### <center> Solve using Python
#
# In the code cell below, fill in the empty areas. Once completed, run the cell to see if you get the right answer!
#
# +
# Packages important for this problem. Feel free to add more, as needed.
import scipy.constants as sp
import math
# List the known variables here.
# Pre-Calculations
# Solve Part 1
dBdt = #insert formula here using variables above.
# Solve Part 2
vm_reading = #insert formula here using variables above.
print("The answer to Part 1 is ", dBdt, "T/s")
print("The answer to Part 2 is ", vm_reading, "volts")
# -
# ### <center> Further Resources
#
# For more information on solving problems computationally using Excel and Python, check out the resources below.
#
# #### <center> Excel
#
# [Excel Math Functions](https://www.excelfunctions.net/excel-math-functions.html)
#
# [Excel for Windows Training](https://support.office.com/en-us/article/excel-for-windows-training-9bc05390-e94c-46af-a5b3-d7c22f6990bb)
#
# [Google Sheets training and help](https://support.google.com/a/users/answer/9282959?hl=en)
#
# #### <center> Python
#
# [Python for Computational Science and Engineering](https://www.southampton.ac.uk/~fangohr/training/python/pdfs/Python-for-Computational-Science-and-Engineering.pdf)
#
# [scipy.constants](https://docs.scipy.org/doc/scipy/reference/constants.html)
#
# [Computational Physics with Python](http://www-personal.umich.edu/~mejn/computational-physics/)
| Chapter 22 in Excel and Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # find taxa
#
# 1. Enter the taxa that you want to search for.
# 2. Run the first three cells (Shift + Enter), until you see a map.
# 3. Zoom in and out of map using '+' or '-' to select the locations you would like to download.
# 4. Run the last two cells to create a csv of the selected taxa and locations.
# 5. Click the "Download eodp.csv" link to download the csv
#
search_terms = ['Globorotalia']
# +
from datetime import date
from pathlib import Path
import sys
sys.path.append(str(Path.cwd().parent))
from scripts.normalize_data import print_df
from scripts.search_files import (
get_matching_taxa, search_for_taxa_in_all_files, draw_map,
filter_samples_by_bounding_box, DownloadFileLink,
display_search_results
)
paths = list(Path('..', 'processed_data', 'clean_data').rglob('*.csv'))
hole_path = Path('..', 'processed_data', 'Hole Summary_23_2_2021.csv')
taxa_search_path = Path('..', 'processed_data', 'taxa_list_search.csv')
nontaxa_list_path = Path('..', 'processed_data', 'normalized_nontaxa_list.csv')
# +
taxa_matches = get_matching_taxa(search_terms)
search_df = search_for_taxa_in_all_files(taxa_matches)
map_df = search_df.drop_duplicates(subset=['Exp', 'Site', 'Hole'])
print(f'{len(search_df)} samples, {len(map_df)} holes')
for taxon in taxa_matches:
print(taxon)
my_map = draw_map(map_df)
my_map
# +
filter_df = filter_samples_by_bounding_box(my_map, search_df, map_df)
print(f'{len(filter_df)} samples, {len(filter_df["geometry"].unique())} holes')
filter_df.head()
# +
file = 'eodp_data.csv'
filter_df.to_csv(file, index=False)
DownloadFileLink(file, f'Download {file}')
# -
| data_cleaning/notebooks/download_file.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dihedrals analysis on non-standard dihedrals
from peleffybenchmarktools.dihedrals import (DihedralBenchmark,
OpenMMEnergeticProfile,
OpenFFEnergeticProfile,
PELEEnergeticProfile)
from peleffy.topology import Molecule
from openforcefield.topology import Topology
from openforcefield.typing.engines.smirnoff import ForceField
from simtk import unit
# ## Test the single problematic dihedral (2,3,5,8)
# +
# Load molecule
mol = Molecule(smiles='[H]c1c(c(n(n1)S(=O)(=O)C([H])([H])C([H])([H])[H])[H])O[H]')
display(mol)
# Parameterize molecule
mol.parameterize('openff_unconstrained-1.2.1.offxml',
charges_method='gasteiger')
# Get parameters with the OpenFF Toolkit
topology = Topology.from_molecules([mol.off_molecule])
ff = ForceField('openff_unconstrained-1.2.1.offxml')
parameters = ff.label_molecules(topology)[0]
# +
# Filter out all non interesting dihedrals
propers_to_keep = list()
for p in mol.propers:
if p.phase not in (unit.Quantity(0, unit.degree),
unit.Quantity(180, unit.degree)):
propers_to_keep.append(p)
mol._propers = propers_to_keep
mol._impropers = []
# -
# Define dihedral with atom indexes
i, j, k, l = (propers_to_keep[0].atom1_idx,
propers_to_keep[0].atom2_idx,
propers_to_keep[0].atom3_idx,
propers_to_keep[0].atom4_idx)
dihedral_benchmark = DihedralBenchmark((i, j, k, l), mol)
print(dihedral_benchmark.get_dihedral_parameters())
dihedral_benchmark.display_dihedral()
# +
# Plot its theoretical energetic profile
off_ep = OpenFFEnergeticProfile(dihedral_benchmark)
for idxs, p in off_ep._parameters.items():
for i, phase in enumerate(p.phase):
if phase not in (unit.Quantity(0, unit.degree),
unit.Quantity(180, unit.degree)):
pass
else:
p.k[i] = unit.Quantity(0, unit.kilocalorie / unit.mole)
off_ep.plot_energies(resolution=10)
# -
# Plot the energetic profile obtained with PELE
pele_ep = PELEEnergeticProfile(
dihedral_benchmark,
PELE_exec='/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6',
PELE_src='/home/municoy/repos/PELE-repo/')
pele_ep.plot_energies(resolution=5)
# +
from peleffybenchmarktools.utils.pele import PELESinglePoint
pele_sp = PELESinglePoint(
PELE_exec='/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6',
PELE_src='/home/municoy/repos/PELE-repo/')
mol.set_name('molecule1')
pele_sp.run(mol)
# -
# ## Test all dihedrals that affect bond (3,5)
# +
# Load molecule
mol = Molecule(smiles='[H]c1c(c(n(n1)S(=O)(=O)C([H])([H])C([H])([H])[H])[H])O[H]')
display(mol)
# Parameterize molecule
mol.parameterize('openff_unconstrained-1.2.1.offxml',
charges_method='gasteiger')
# Get parameters with the OpenFF Toolkit
topology = Topology.from_molecules([mol.off_molecule])
ff = ForceField('openff_unconstrained-1.2.1.offxml')
parameters = ff.label_molecules(topology)[0]
# -
# Filter out all non interesting dihedrals
problematic_propers = list()
for p in mol.propers:
if p.phase not in (unit.Quantity(0, unit.degree),
unit.Quantity(180, unit.degree)):
problematic_propers.append(p)
proper_to_analyze = problematic_propers[0]
# Define dihedral with atom indexes
i, j, k, l = (proper_to_analyze.atom1_idx,
proper_to_analyze.atom2_idx,
proper_to_analyze.atom3_idx,
proper_to_analyze.atom4_idx)
dihedral_benchmark = DihedralBenchmark((i, j, k, l), mol)
print(dihedral_benchmark.get_dihedral_parameters())
dihedral_benchmark.display_dihedral()
# Plot its theoretical energetic profile
off_ep = OpenFFEnergeticProfile(dihedral_benchmark)
off_ep.plot_energies(resolution=10)
# Plot the energetic profile obtained with PELE
pele_ep = PELEEnergeticProfile(
dihedral_benchmark,
PELE_exec='/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6',
PELE_src='/home/municoy/repos/PELE-repo/')
pele_ep.plot_energies(resolution=10)
# ## Test all theoretical dihedrals
# +
from peleffy.topology import Molecule, Proper
from peleffybenchmarktools.dihedrals import (DihedralBenchmark,
PELEEnergeticProfile,
OFFPELEEnergeticProfile)
from simtk import unit
# +
mol = Molecule(smiles='CCCC')
mol.parameterize('openff_unconstrained-1.2.1.offxml')
display(mol)
# -
# ### Periodicity = 1
# +
proper = Proper(atom1_idx=0, atom2_idx=1, atom3_idx=2, atom4_idx=3,
periodicity=1, prefactor=1,
constant=unit.Quantity(1, unit.kilocalorie / unit.mole),
phase=unit.Quantity(90, unit.degree))
mol._propers = [proper]
mol._impropers = list()
# -
dihedral_benchmark = DihedralBenchmark(
(proper.atom1_idx, proper.atom2_idx, proper.atom3_idx, proper.atom4_idx), mol)
print(dihedral_benchmark.get_dihedral_parameters())
dihedral_benchmark.display_dihedral()
# Plot its theoretical energetic profile
peleffy_ep = OFFPELEEnergeticProfile(dihedral_benchmark)
peleffy_ep.plot_energies(resolution=10)
# Plot the energetic profile obtained with PELE
pele_ep = PELEEnergeticProfile(
dihedral_benchmark,
PELE_exec='/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6',
PELE_src='/home/municoy/repos/PELE-repo/')
pele_ep.plot_energies(resolution=10)
# ### Periodicity = 2
# +
proper = Proper(atom1_idx=0, atom2_idx=1, atom3_idx=2, atom4_idx=3,
periodicity=2, prefactor=1,
constant=unit.Quantity(1, unit.kilocalorie / unit.mole),
phase=unit.Quantity(90, unit.degree))
mol._propers = [proper]
mol._impropers = list()
# -
dihedral_benchmark = DihedralBenchmark(
(proper.atom1_idx, proper.atom2_idx, proper.atom3_idx, proper.atom4_idx), mol)
print(dihedral_benchmark.get_dihedral_parameters())
dihedral_benchmark.display_dihedral()
# Plot its theoretical energetic profile
peleffy_ep = OFFPELEEnergeticProfile(dihedral_benchmark)
peleffy_ep.plot_energies(resolution=10)
# Plot the energetic profile obtained with PELE
pele_ep = PELEEnergeticProfile(
dihedral_benchmark,
PELE_exec='/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6',
PELE_src='/home/municoy/repos/PELE-repo/')
pele_ep.plot_energies(resolution=10)
# ### Periodicity = 3
# +
proper = Proper(atom1_idx=0, atom2_idx=1, atom3_idx=2, atom4_idx=3,
periodicity=3, prefactor=1,
constant=unit.Quantity(1, unit.kilocalorie / unit.mole),
phase=unit.Quantity(90, unit.degree))
mol._propers = [proper]
mol._impropers = list()
# -
dihedral_benchmark = DihedralBenchmark(
(proper.atom1_idx, proper.atom2_idx, proper.atom3_idx, proper.atom4_idx), mol)
print(dihedral_benchmark.get_dihedral_parameters())
dihedral_benchmark.display_dihedral()
# Plot its theoretical energetic profile
peleffy_ep = OFFPELEEnergeticProfile(dihedral_benchmark)
peleffy_ep.plot_energies(resolution=10)
# Plot the energetic profile obtained with PELE
pele_ep = PELEEnergeticProfile(
dihedral_benchmark,
PELE_exec='/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6',
PELE_src='/home/municoy/repos/PELE-repo/')
pele_ep.plot_energies(resolution=10)
# ### Periodicity = 4
# +
proper = Proper(atom1_idx=0, atom2_idx=1, atom3_idx=2, atom4_idx=3,
periodicity=4, prefactor=1,
constant=unit.Quantity(1, unit.kilocalorie / unit.mole),
phase=unit.Quantity(90, unit.degree))
mol._propers = [proper]
mol._impropers = list()
# -
dihedral_benchmark = DihedralBenchmark(
(proper.atom1_idx, proper.atom2_idx, proper.atom3_idx, proper.atom4_idx), mol)
print(dihedral_benchmark.get_dihedral_parameters())
dihedral_benchmark.display_dihedral()
# Plot its theoretical energetic profile
peleffy_ep = OFFPELEEnergeticProfile(dihedral_benchmark)
peleffy_ep.plot_energies(resolution=10)
# Plot the energetic profile obtained with PELE
pele_ep = PELEEnergeticProfile(
dihedral_benchmark,
PELE_exec='/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6',
PELE_src='/home/municoy/repos/PELE-repo/')
pele_ep.plot_energies(resolution=10)
# ### Periodicity = 5 (_TO DO_)
# +
proper = Proper(atom1_idx=0, atom2_idx=1, atom3_idx=2, atom4_idx=3,
periodicity=5, prefactor=1,
constant=unit.Quantity(1, unit.kilocalorie / unit.mole),
phase=unit.Quantity(90, unit.degree))
mol._propers = [proper]
mol._impropers = list()
# -
dihedral_benchmark = DihedralBenchmark(
(proper.atom1_idx, proper.atom2_idx, proper.atom3_idx, proper.atom4_idx), mol)
print(dihedral_benchmark.get_dihedral_parameters())
dihedral_benchmark.display_dihedral()
# Plot its theoretical energetic profile
peleffy_ep = OFFPELEEnergeticProfile(dihedral_benchmark)
peleffy_ep.plot_energies(resolution=10)
# Plot the energetic profile obtained with PELE
pele_ep = PELEEnergeticProfile(
dihedral_benchmark,
PELE_exec='/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6',
PELE_src='/home/municoy/repos/PELE-repo/')
pele_ep.plot_energies(resolution=10)
# ### Periodicity = 6
# +
proper = Proper(atom1_idx=0, atom2_idx=1, atom3_idx=2, atom4_idx=3,
periodicity=6, prefactor=1,
constant=unit.Quantity(1, unit.kilocalorie / unit.mole),
phase=unit.Quantity(90, unit.degree))
mol._propers = [proper]
mol._impropers = list()
# -
dihedral_benchmark = DihedralBenchmark(
(proper.atom1_idx, proper.atom2_idx, proper.atom3_idx, proper.atom4_idx), mol)
print(dihedral_benchmark.get_dihedral_parameters())
dihedral_benchmark.display_dihedral()
# Plot its theoretical energetic profile
peleffy_ep = OFFPELEEnergeticProfile(dihedral_benchmark)
peleffy_ep.plot_energies(resolution=10)
# Plot the energetic profile obtained with PELE
pele_ep = PELEEnergeticProfile(
dihedral_benchmark,
PELE_exec='/home/municoy/builds/PELE/PELE-repo_serial/PELE-1.6',
PELE_src='/home/municoy/repos/PELE-repo/')
pele_ep.plot_energies(resolution=10)
| benchmarks/geometry/NonStandardDihedralAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tesla stock price analyze
# In this project we are going to analyze and visualize closing prices of Tesla company by month,weeks,days and hours.
#Importing libraries
import yfinance as yf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
#Getting Tesla data from yahoo finance
df = yf.download("TSLA", start="2020-01-01", end="2021-01-01")
df.head(10)
df.shape
# Column information:
# - Open and Close represent - the starting and final price at which the stock is traded on a particular day
# - High and Low - represent the maximum and minimum price of the share for the day
# - Volume - is the number of shares bought or sold in the day
# ## Data visualization
#ploting closing price by month
plt.figure(figsize=(10,6))
plt.title("Close price history")
plt.plot(df["Close"])
plt.xlabel("Date")
plt.ylabel("Close Price USD")
plt.show()
#Closing prices in december
december=df["12-01-2020":"12-31-2020"]
december.head()
#Average closing price in december
december.Close.mean()
#ploting closing price by day in december
plt.figure(figsize=(13,7))
plt.title("Close price in december 2020")
plt.plot(december.Close)
plt.xlabel("Date")
plt.ylabel("Close Price USD")
plt.show()
# We are missing closing prices in weekends and we can assume that at the weekend closing prices are the same as the last day before weekend (friday) so we will add closing prices at the weekends with function "asfreq".
# Adding closing prices by days (D) on weekends ("pad")
df2=df.asfreq("D","pad")
df2.head()
# We can now see that we have closing prices on the weekends also.
# Adding weekly closing prices
weekly=df2.asfreq("W","pad")
weekly.head(10)
#ploting closing price by weeks
plt.figure(figsize=(13,7))
plt.title(" Weekly closing prices in 2020")
plt.plot(weekly.Close)
plt.xlabel("Date")
plt.ylabel("Close Price USD")
plt.show()
#Closing prices from june to december 2020
june_dec=weekly["06-01-2020":"12-31-2020"]
june_dec.head(10)
#ploting closing price by weeks from june to december
plt.figure(figsize=(10,5))
plt.title(" Weekly close prices from june to december 2020")
plt.plot(june_dec.Close)
plt.xlabel("Date")
plt.ylabel("Close Price USD")
plt.show()
#Average weekly closing prices from june to to december 2020
june_dec.Close.mean()
# Adding closing prices by hours
hours=df2.asfreq("H","pad")
hours.head(10)
#Closing prices by hours in december 30th
dec_30=hours["12/30/2020":"30/12/2021"]
dec_30.head(10)
#ploting closing price by hours on december 30th
plt.figure(figsize=(10,4))
plt.title("Close prices by hours on december 30th 2020 ")
plt.plot(dec_30.Close)
plt.xlabel("Date")
plt.ylabel("Close Price USD")
plt.show()
# ### Resample
#Calculating average close price by day using resample function
days=df2.Close.resample("D").mean()
days.head()
#ploting average closing price by days
plt.figure(figsize=(10,4))
plt.title("Average close prices by days")
plt.plot(days)
plt.xlabel("Date")
plt.ylabel("Close Price USD")
plt.show()
#Calculating average close price by weeks using resample function
weeks=df2.Close.resample("W").mean()
weeks.head()
#ploting average closing price by weeks
plt.figure(figsize=(10,4))
plt.title("Average closing prices by weeks in 2020 ")
plt.plot(weeks)
plt.xlabel("Date")
plt.ylabel("Close Price USD")
plt.show()
# ### Adding date index into dataframe
# If we have data set that doesn't have date but we know start date and end date we can easily make a column with it.
apple=pd.read_csv("aapl_no_dates.csv")
apple.head()
# importing library
from pandas.tseries.holiday import USFederalHolidayCalendar
from pandas.tseries.offsets import CustomBusinessDay
#Defining date range
us_cal = CustomBusinessDay(calendar=USFederalHolidayCalendar())
rng = pd.date_range(start="7/1/2017",end="7/23/2017", freq=us_cal)
rng
#Setting defined date index into dataframe
apple.set_index(rng,inplace=True)
apple.head()
| tesla_analyzing_price/stock_price_analyze.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.chdir('/Users/sahatprasad/Documents/csv files')
import numpy as np
import pandas as pd
train_data=pd.read_csv('Train_UWu5bXk.csv')
train_data.head()
train_data.shape
train_data['Item_Visibility']=train_data['Item_Visibility'].replace(0,numpy.NaN)
print(train_data.isnull().sum())
train_data.apply(lambda x: len(x.unique()))
# +
#train_data['Item_Fat_Content'].apply(pd.value_counts)
# -
train_data["Item_Fat_Content"].value_counts()
train_data["Item_Type"].value_counts()
# +
#pd.pivot_table(train_data,values='Item_Weight', index=['Item_Identifier'])
# -
missing=train_data['Item_Weight'].isnull()
print(sum(missing))
train_data.fillna(train_data.mean(), inplace=True)
print(train_data.isnull().sum())
train_data['Outlet_Size'].mode()
# +
#Import mode function:
from scipy.stats import mode
#Determing the mode for each
outlet_size_mode = train_data.pivot_table(values='Outlet_Size', columns='Outlet_Type',aggfunc=(lambda x:x.mode().iat[0]))
print(outlet_size_mode)
miss_bool = train_data['Outlet_Size'].isnull()
train_data.loc[miss_bool,'Outlet_Size'] = train_data.loc[miss_bool,'Outlet_Type'].apply(lambda x: outlet_size_mode[x])
print(sum(train_data['Outlet_Size'].isnull()))
# -
print(train_data.isnull().sum())
# +
#train_data.describe()
# +
#train_data["Item_Fat_Content"].value_counts()
# -
pd.get_dummies(train_data["Item_Fat_Content"]).head(2)
pd.get_dummies(train_data["Outlet_Size"]).head(2)
train_data.head(2)
train_data.columns[4]
x=train_data.drop(train_data.columns[[0, 4, 6,9,10]], axis=1)
x["Item_Fat_Content"]=pd.get_dummies(x["Item_Fat_Content"])
x["Outlet_Size"]=pd.get_dummies(x["Outlet_Size"])
x['Item_Fat_Content']=x['Item_Fat_Content'].astype(int)
x['Outlet_Size']=x['Outlet_Size'].astype(int)
x['Outlet_Size'].dtypes
x.head(2)
x.dtypes
y=x.Item_Outlet_Sales
X=x.drop('Item_Outlet_Sales', axis=1)
from sklearn.linear_model import LinearRegression
from sklearn.cross_validation import train_test_split
from matplotlib import pyplot as plt
from sklearn import metrics
X_train,X_test,y_train,y_test = train_test_split(X, y, test_size=0.2)
# # Linear Regression Model
#
lm = LinearRegression()
lm.fit(X_train,y_train)
y_predict=lm.predict(X_test)
print(np.sqrt(metrics.mean_squared_error(y_test,y_predict)))
# # Ridge Regression Model
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
ridge2 = Ridge(alpha = 0.05, normalize = True)
ridge2.fit(X_train, y_train) # Fit a ridge regression on the training data
pred2 = ridge2.predict(X_test) # Use this model to predict the test data
print(pd.Series(ridge2.coef_, index = X.columns)) # Print coefficients
print(np.sqrt(mean_squared_error(y_test, pred2))) # Calculate the test MSE
# # Random Forest Model
from sklearn.ensemble import RandomForestRegressor
clf=RandomForestRegressor(n_estimators=1000)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
print(np.sqrt(mean_squared_error(y_test,y_pred)))
| Big Market Sales.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ProfessorDong/Deep-Learning-Course-Examples/blob/master/CNN_Examples/fashion_mnist.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8TlEhsfJ9rti"
# # Image Classification using Fashion-MNIST image data
# + colab={"base_uri": "https://localhost:8080/"} id="usCuJHFQqqFz" outputId="38699c17-1955-418e-fdf9-9d42ddf7e45b"
import tensorflow as tf
from tensorflow import keras
print(tf.__version__)
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="dtQfJNudsBZI"
# ## Load image data
#
# Fashion-MNIST is a dataset of Zalando's article images consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes.
#
# https://www.tensorflow.org/datasets/catalog/fashion_mnist
#
# https://github.com/zalandoresearch/fashion-mnist
# + id="LHrt3jJRrbM5" colab={"base_uri": "https://localhost:8080/"} outputId="996e0460-e775-40b6-c118-f3dfa0b5ef67"
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# + [markdown] id="Bhv14ObBswiA"
# Pixel values are between 0 and 255, 0 being black and 255 white (grey-scale image).
#
# Check the loaded data - Image
# + colab={"base_uri": "https://localhost:8080/"} id="HLX_RkQGsI6o" outputId="421b06ea-b1ef-4f4b-a08a-8326dd687cc4"
print(train_images.shape)
print(train_images[0,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 266} id="zekY0RBftpAA" outputId="ab724ca3-e9d9-4a1e-803e-0be69b941918"
plt.figure()
plt.imshow(train_images[36])
plt.colorbar()
plt.grid(False)
plt.show()
# + [markdown] id="kXHL34qutEww"
# Labels are integers from 0 to 9. Each represents a specific article of clothing.
#
# Check the loaded data - Label
# + colab={"base_uri": "https://localhost:8080/"} id="k1SaLSmPspYw" outputId="a13932d7-4869-4c54-9622-098bfab8f6aa"
train_labels[:12] # First 12 training labels
# + [markdown] id="WuQc91CCl_Cw"
# Define class names
# + id="fwNkcOhPtpMX"
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# + [markdown] id="44S38onPrbDw"
# ## Data Preprocessing
# + id="0YEcXFcLuVx_"
train_images = train_images / 255.0
test_images = test_images / 255.0
train_images = train_images.astype('float32')
test_images = test_images.astype('float32')
# Reshape the array to 4-dims so that it can work with the Keras API
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1)
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)
input_shape = (28, 28, 1)
# + [markdown] id="p7Q2UPVMufYW"
# ## Build the Model
# + id="kOlqAcnfuieP"
model = keras.Sequential([
keras.layers.Conv2D(6, kernel_size=(5,5), padding='same', input_shape=input_shape),
keras.layers.MaxPooling2D(pool_size=(2,2)),
keras.layers.Conv2D(16, kernel_size=(5,5)),
keras.layers.MaxPooling2D(pool_size=(2,2)),
keras.layers.Flatten(),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])
# + colab={"base_uri": "https://localhost:8080/"} id="wC1SK5bZ7X7k" outputId="05b634d2-ac45-4ee5-b4b8-6c35fe25fde2"
model.summary()
# + [markdown] id="tdTQocvrvkKv"
# ## Compile the Model
# + id="e1KtOpDTvmSY"
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + [markdown] id="4Mf-hV_Kv9Fo"
# ## Train the Model
# + colab={"base_uri": "https://localhost:8080/"} id="PB82VLNRv_PX" outputId="a3ebb6d7-bacf-415f-fc6d-f2f83cc1ff74"
model.fit(train_images, train_labels, epochs=30)
# + [markdown] id="p5L-u4twwfsl"
# ## Evaluate the Model
# + colab={"base_uri": "https://localhost:8080/"} id="ob-06S51wkLu" outputId="98f7f14d-d324-4067-cf77-19b98481ae4f"
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=1)
print('Test Accuracy:', test_acc)
# + [markdown] id="RpArkIhtxR0P"
# ## Make a Prediction
# + colab={"base_uri": "https://localhost:8080/", "height": 525} id="4q8BDi5FxUNG" outputId="6249382c-1f90-44c3-e203-8c8ae1db23b5"
predictions = model.predict(test_images)
print(predictions)
# predictions = model.predict([test_images[0]])
print(np.argmax(predictions[101]))
print(class_names[np.argmax(predictions[101])])
plt.figure()
plt.imshow(test_images[101].reshape(28,28))
plt.colorbar()
plt.grid(False)
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="A2X4IOx9yk--" outputId="d5ad5c97-ed09-4255-bf79-47748d5d38fc"
COLOR = 'white'
plt.rcParams['text.color'] = COLOR
plt.rcParams['axes.labelcolor'] = COLOR
def show_image(img, label, guess):
img = img.reshape(28,28)
plt.figure()
plt.imshow(img, cmap=plt.cm.binary)
# plt.title("Corrected label: " + label)
# plt.xlabel("Guessed label: " + guess)
plt.colorbar()
plt.grid(False)
plt.show()
print("Correct label: " + label)
print("Guessed label: " + guess)
def predict(model, image, correct_label):
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
prediction = model.predict(np.array([image]))
predicted_class = class_names[np.argmax(prediction)]
show_image(image, class_names[correct_label], predicted_class)
def get_number():
while True:
num = input("Choose a number: ")
if num.isdigit():
num = int(num)
if 0<=num<=1000:
return int(num)
else:
print("Try again...")
num = get_number()
image = test_images[num]
label = test_labels[num]
predict(model, image, label)
| CNN_Examples/fashion_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + pycharm={"name": "#%%\n"}
levels = [-1.0,1.0]
D = np.random.uniform(low=-1, high=1, size=(14,8))
n_row,n_col = D.shape
X = np.hstack((np.ones((n_row,1)), D))
# + pycharm={"name": "#%%\n"}
def calc_det(D, i, j, level):
levels = [-1.0,1.0]
n_row, n_col = D.shape
D[i,j] = levels[level]
X = np.hstack((np.ones((n_row,1)), D))
return np.linalg.det(X.T @ X)
# + pycharm={"name": "#%%\n"}
max_det = n_row**(n_col+1)
# Coordinate by coordinate change from 1 to -1 and check the determinant
for i in range(n_row):
for j in range(n_col):
cur_det = np.linalg.det(X.T @ X)
next_det = calc_det(D=D, i=i, j=j, level=0)
if next_det <= cur_det:
next_det = calc_det(D=D, i=i, j=j, level=1)
print(pd.DataFrame(D.T @ D))
# + pycharm={"name": "#%%\n"}
# + pycharm={"name": "#%%\n"}
| doe/legacy/coordinate_exchange.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.display import Image
from IPython.core.display import HTML
Image(url= "https://i.imgur.com/6TgxQrm.png")
# +
from sympy import *; x,h,t,y,z = symbols("x h t y z", real=True)
f, g, h = symbols('f g h', cls=Function)
f = -x**2+100
g = 5*(floor(sqrt(f)))
for i in range(0,200,1):
i = i
if i == 0:
print("""for f(x) = -x**2+100 red line and g(x) = 5*(floor(sqrt(f)) blue line dF = green line
""")
if i == 0:
print(" f +",i,"and g +",i," Current Iteration:",i)
p0 = plot((f+i),(g+i), diff(f+i),show = False,xlim = (1,10.5),size = (9,5),legend = True)
p0[0].line_color = 'r'
p0[2].line_color = 'g'
p0.show()
if i == 2:
print("f *",i,"and g *",i," Current Iteration:",i)
p1 = plot((f*i),(g*i),show = False,xlim = (1,10.5),size = (9,5),legend = "hello")
p1[0].line_color = 'r'
p1.show()
if i == 20:
print(" f root of",i,"and g root of",i," ex. f**(1/i) Current Iteration:",i)
p1 = plot((f**(1/i)),(g**(1/i)),show = False,ylim = (0,1.6),xlim = (1,10.5),size = (9,5),legend = True)
p1[0].line_color = 'r'
p1.show()
# -
| Personal_Projects/Multi-Function_Plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
from sklearn.linear_model import LinearRegression, Lasso, Ridge
from sklearn.preprocessing import PolynomialFeatures, RobustScaler
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.ensemble import BaggingRegressor
from sklearn.model_selection import cross_validate, cross_val_predict
from pyearth import Earth
from pygam import LinearGAM, GAM, f, s, te
# +
#This code prevents outputs from having the scroll down
# %%javascript
IPython.OutputArea.prototype._should_scroll = function(lines) {
return false;
}
# +
#importing the data and putting it into a pandas dataframe
df = pd.read_csv('../data/kc_house_data_clean.csv', index_col=0)
df.head(2)
# -
# This shows that the dataset has 20 columns and 21597 columns.
# Shows the different data types and that
# There are some missing values
df.info()
# This function shows the first 3 rows and last 3 rows in the dataframe
def peek(df):
return df.head(3).append(df.tail(3))
peek(df)
# statistics. A few things can be noted.
# The maximum number of bedrooms in the dataset is 33.
# Some houses have less than 1 bathroom
df.describe().T
# Checking for null values.
# Waterfront, view and yr_renovated have missing values
df.isnull().sum()
df_water = df[df['waterfront'].isna() == True]
df_water[df_water['yr_renovated'].isna() == True]
# 19075 of the houses are not in the waterfront. 146 are on the waterfront.
df['waterfront'].value_counts()
# ### Handling missing values for waterfront and yr_renovated
# Since the model can not handle missing values and waterfront seemed to be an important feature, we did the following steps to handle the missing values in the waterfront.
# - Houses not in the waterfront will have a value 0
# - Houses where waterfront has null values will will have a value of 1.
# - Houses in the waterfront will have a value of 2
#
# We then created 2 columns with binary indicators:
# - 'waterfront_null' : in which 0 are the ones we know that have/don't have waterfront, while 1 are the ones we do not know (NaN values)
# - 'waterfront_ind': in which 1 represents the houses that we know that have a waterfront.
#
#
# A column called 'yr_renovated_schema1' was created in which:
# - 0 represents houses that have never been renovated
# - 1 represents houses with missing values, so we don't know if they have been renovated
# - 2 represents the houses that the data shows that they have been renovated.
#
# The columns with the binary indicators show:
# - 'yr_renovated_null': 1 shows all the houses that we do not know if they have been renovated and 0 represents all the other houses.
# - 'yr_renovated_ind': 1 shows houses that we know that have been renovated, while 0 represents all the other houses.
idx = df['waterfront'] == 1
df['waterfront'].loc[idx] = 2
df['waterfront'].value_counts()
idx = df['waterfront'].isna() == True
df['waterfront'].loc[idx] = 1
df['waterfront'].value_counts()
df['waterfront_null'] = df['waterfront'].apply(lambda x: 1 if x == 1 else 0)
df['waterfront_null'].value_counts()
df['waterfront_ind'] = df['waterfront'].apply(lambda x: 1 if x == 2 else 0)
df['waterfront_ind'].value_counts()
df['yr_renovated'].value_counts()
# +
df['yr_renovated_scheme1'] = 0
idx = df['yr_renovated'] > 0
df['yr_renovated_scheme1'].loc[idx] = 2
idx = df['yr_renovated'].isna() == True
df['yr_renovated_scheme1'].loc[idx] = 1
df['yr_renovated_scheme1'].value_counts()
# -
df['yr_renovated_null'] = df['yr_renovated_scheme1'].apply(lambda x: 1 if x == 1 else 0)
df['yr_renovated_null'].value_counts()
df['yr_renovated_ind'] = df['yr_renovated_scheme1'].apply(lambda x: 1 if x == 2 else 0)
df['yr_renovated_ind'].value_counts()
# Shows the correlation of each feature with the target variable.
df.corr()['price'].abs().sort_values(ascending=False)
df.shape
sns.scatterplot(df['long'],df['lat'])
# Some of the datapoints are farther away from the main area of interest, so they are going to be removed.
df[df['long'] < -121.85]
df = df[df['long'] < -121.85]
df.shape
df.isnull().sum()
# dropping the yr_renovated column and checking for other missing values.
del df['yr_renovated']
df.isnull().sum()
# This shows that the view column is a categorical column
df['view'].value_counts()
df[df.view.isna() == True]
df[df.view.isna() == False]
#Since there were only 62 missing values for the view, we decided to drop them.
df = df[df.view.isna() == False]
# checking to see how many houses have 33 bedrooms. We decided to drop.
df[df.bedrooms == 33]
df = df[df.bedrooms != 33]
df.columns
# Inspecting how many datapoints have '?' on the sdft_basement
df[df.sqft_basement == '?']
# Changing '?' in the basement with the difference between sqft_living and sqft_above
idx = df['sqft_basement'] == '?'
df['sqft_basement'].loc[idx] = df['sqft_living'].loc[idx] - df['sqft_above'].loc[idx]
df[df['sqft_basement'] == '?']
np.sum(df['sqft_basement'].loc[idx] < 0)
df.sqft_basement.astype(float).astype(int)
df.sqft_basement = df.sqft_basement.astype(float).astype(int)
df.columns
# +
#Thisfunction helps check for outliers in the features of interest.
boxplot_cols = ['price', 'bedrooms', 'bathrooms', 'sqft_living', 'sqft_lot',
'floors', 'waterfront', 'view', 'condition', 'grade', 'sqft_above',
'sqft_basement', 'yr_built', 'zipcode', 'lat', 'long', 'sqft_living15',
'sqft_lot15', 'waterfront_null', 'waterfront_ind',
'yr_renovated_scheme1', 'yr_renovated_null', 'yr_renovated_ind']
# left out sqft_basement
def print_boxplot(df):
for c in df[boxplot_cols]:
sns.boxplot(df[c])
plt.show()
print_boxplot(df)
# -
# ls
df.to_csv('kc_house_data_clean.csv')
# ls
df.shape
| notebooks/clean_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Dependencies
from splinter import Browser
from bs4 import BeautifulSoup
import pandas as pd
import requests
from webdriver_manager.chrome import ChromeDriverManager
executable_path = {'executable_path': ChromeDriverManager().install()}
browser = Browser('chrome', **executable_path, headless=False)
# URL that we need to scrape
news_url = 'https://mars.nasa.gov/news/'
browser.visit(news_url)
# Create Beautiful Soup object
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
# +
# Get title and body paragraph
mars_title = soup.find_all('div', class_='content_title')[1].text
body_par = soup.find_all('div', class_='article_teaser_body')[0].text
print(mars_title)
print(body_par)
# -
# image url
image_source = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(image_source)
# +
# Create Beautiful Soup object
html = browser.html
soup_image = BeautifulSoup(html, 'html.parser')
# Get image url
path = soup_image.find_all('img')[2]['src']
print(path)
# -
# table url
table_source = 'https://space-facts.com/mars/'
browser.visit(table_source)
# Create Beautiful Soup object
html = browser.html
table_soup = BeautifulSoup(html, 'html.parser')
# print all scraped tables
table = pd.read_html(table_source)
table
final_table = table[2]
final_table
#Convert table to htms
table_to_html = final_table.to_html()
table_to_html
print(table_to_html)
#Scrape Hemispheres
hemisphere_url = 'https://marshemispheres.com/'
browser.visit(hemisphere_url)
# Create Beautiful Soup object
html = browser.html
hemisphere_soup = BeautifulSoup(html, 'html.parser')
titles=hemisphere_soup.find_all('h3')
titles[:]=(title.text for title in titles)
titles[:]=(title.split(" Enhanced")[0] for title in titles)
titles.remove('Back')
print(titles)
# +
# Scrape Images
hemisphere_image=[]
for title in titles:
browser.visit(hemisphere_url)
browser.links.find_by_partial_text(title).click()
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
img_url=soup.find('div',class_='downloads').ul.li.a['href']
print(hemisphere_url+img_url)
hemisphere_image.append({"title": title, "final_url": hemisphere_url + img_url})
print(hemisphere_image)
# -
# put everything into one dictionary
mars_dictionary = {'news_title' : mars_title, 'body_par' : body_par, 'path' : path, 'table_to_html': table_to_html,
'hemisphere_image': hemisphere_image}
mars_dictionary
#End session
browser.quit()
| .ipynb_checkpoints/mission_to_mars-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="l5jZ2M5eU4ie"
# # Prognostics and Health Management NASA Turbofan Engine
# + [markdown] id="uKr5XBon_pm2"
# ##### test 중
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
from keras.layers import LSTM, Dropout
from keras.models import Sequential
from keras.layers import Dense
import keras.backend as K
from keras.callbacks import EarlyStopping
from keras.optimizer_v2 import adam
import os
from statsmodels.tsa import ar_model, arma_mle, arima_model
# -
# #### 그래프 그리기
# +
FD002_train_03_HPC_Outlet_Temp_en5 = 'C:/Users/jhj/Desktop/NASA_Turbofan_Engine_excel_data/센서별 & 엔진별 분할 데이터 모음/train/FD002/03. HPC Outlet Temp/5번 엔진.xlsx'
df = pd.read_excel(FD002_train_03_HPC_Outlet_Temp_en5)
x = np.arange(len(df['Time']))
plt.plot(x,df['HPC Outlet Temp'])
# -
# ### 자기회귀 기반(AR 모형) 예측모델
#
# #### AR 모형 기반 예측모델
# +
from statsmodels.tsa.ar_model import ar
from math import sqrt
from sklearn.metrics import mean_squared_error
def plotar(series) :
# 학습데이터, 시험데이터 나누기
train_size = 100
# -
| 02 .Predict Health Management/PHM Turbofan Engine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="npzJ574a6A94" colab_type="text"
# **Reinforcement Learning with TensorFlow & TRFL: Actor-Critic Networks**
#
# Outline:
# 1. Actor-Critic Networks
# * Discrete A2C TRFL Loss: trfl.sequence_advantage_actor_critic_loss()
# * Continuous A2C TRFL Loss: trfl.sequence_a2c_loss()
#
#
#
# + id="RyxlWytnVqJI" colab_type="code" outputId="42c5522d-2cbe-4596-f870-982ef524dc30" colab={"base_uri": "https://localhost:8080/", "height": 328}
#TRFL works with TensorFlow 1.12
#installs TensorFlow version 1.12 then restarts the runtime
# !pip install tensorflow==1.12
import os
os.kill(os.getpid(), 9)
# + id="XRS56AQDVybG" colab_type="code" outputId="540fc7c4-569a-48fc-f71a-083c00012f3a" executionInfo={"status": "ok", "timestamp": 1555642033701, "user_tz": 240, "elapsed": 37267, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} colab={"base_uri": "https://localhost:8080/", "height": 191}
#install tensorflow-probability 0.5.0 that works with TensorFlow 1.12
# !pip install tensorflow-probability==0.5.0
#install TRFL
# !pip install trfl==1.0
#install box2d for LunarLanding env
# !pip install box2d-py
# + id="SGop2a_BZCBl" colab_type="code" colab={}
import gym
import tensorflow as tf
import trfl
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_probability as tfp
# + [markdown] id="VkC2bii3Zi9_" colab_type="text"
# ** Actor-Critic Architecture **
#
# Actor-critic (AC) combines PG and state value methods. Actor-critic has an actor network for policies and a critic network for estimating state values. The environment produces an observation of the state. The state is inputted into the policy network to select an action. The agent receives a reward and next state. The agent uses the critic network, reward and next state to evaluate the action. The actor and critic networks are updated using that evaluation, typically with gradient descent. The process is similar to REINFORCE with baselines the key difference being the critic network bootstraps using the next state.
#
# ** Example 1: Discrete AC **
# + colab_type="code" id="O043zMg4F5QG" colab={}
# set up Actor and Critic networks
class ActorCriticNetwork:
def __init__(self, name, obs_size=2, action_size=2, actor_hidden_size=32, ac_learning_rate=0.001,
entropy_cost=0.01, normalise_entropy=True, lambda_=0., baseline_cost=1.):
with tf.variable_scope(name):
# hyperparameter bootstrap_n determines the batch size
self.name=name
self.input_ = tf.placeholder(tf.float32, [None, obs_size], name='inputs')
self.action_ = tf.placeholder(tf.int32, [None, 1], name='action')
self.reward_ = tf.placeholder(tf.float32, [None, 1], name='reward')
self.discount_ = tf.placeholder(tf.float32, [None, 1], name='discount')
self.bootstrap_ = tf.placeholder(tf.float32, [None], name='bootstrap')
# set up actor network
self.fc1_actor_ = tf.contrib.layers.fully_connected(self.input_, actor_hidden_size, activation_fn=tf.nn.elu)
self.fc2_actor_ = tf.contrib.layers.fully_connected(self.fc1_actor_, actor_hidden_size, activation_fn=tf.nn.elu)
self.fc3_actor_ = tf.contrib.layers.fully_connected(self.fc2_actor_, action_size, activation_fn=None)
# reshape the policy logits
self.policy_logits_ = tf.reshape(self.fc3_actor_, [-1, 1, action_size] )
# generate action probabilities for taking actions
self.action_prob_ = tf.nn.softmax(self.fc3_actor_)
# set up critic network
self.fc1_critic_ = tf.contrib.layers.fully_connected(self.input_, critic_hidden_size, activation_fn=tf.nn.elu)
self.fc2_critic_ = tf.contrib.layers.fully_connected(self.fc1_critic_, critic_hidden_size, activation_fn=tf.nn.elu)
self.baseline_ = tf.contrib.layers.fully_connected(self.fc2_critic_, 1, activation_fn=None)
# TRFL usage
self.seq_aac_return_ = trfl.sequence_advantage_actor_critic_loss(self.policy_logits_, self.baseline_, self.action_,
self.reward_, self.discount_, self.bootstrap_, lambda_=lambda_, entropy_cost=entropy_cost,
baseline_cost=baseline_cost, normalise_entropy=normalise_entropy)
# Optimize the loss
self.ac_loss_ = tf.reduce_mean(self.seq_aac_return_.loss)
self.ac_optim_ = tf.train.AdamOptimizer(learning_rate=ac_learning_rate).minimize(self.ac_loss_)
def get_network_variables(self):
return [t for t in tf.trainable_variables() if t.name.startswith(self.name)]
# + [markdown] colab_type="text" id="WQ7FL-QsF5QL"
# ** TRFL Usage: Discrete AC with trfl.sequence_advantage_actor_critic_loss **
#
# Sequence advantage actor critic loss is the discrete implementation of A2C loss. The policy logits and action arguments are the same as other PG usage we have seen this section. The arguments: baseline, reward, discount, and bootstrap, and lambda are used to calculate the advantage. Entropy cost and normalize entropy is the trfl.discrete_policy_entropy_loss function from last video. baseline_cost is an optional argument that lets you scale the derivative amount between the actor network and the critic network.
#
# Since trfl.sequence_advantage_actor_critic_loss() uses trfl.discrete_policy_gradient_loss() we need to reshape the policy_logits before inputting them into the tensor.
#
#
# + colab_type="code" id="zR9nUJhCF5QO" colab={}
# hyperparameters
train_episodes = 5000
discount = 0.99
actor_hidden_size = 32
critic_hidden_size = 32
ac_learning_rate = 0.0005
baseline_cost = 10. #scale derivatives between actor and critic networks
# entropy hyperparameters
entropy_cost = 0.005
normalise_entropy = True
# one step returns ie TD(0). Section 4 will cover multi-step returns
lambda_ = 0.
seed = 31
env = gym.make('LunarLander-v2')
env.seed(seed)
np.random.seed(seed)
action_size = env.action_space.n
obs_size = env.observation_space.shape[0]
tf.reset_default_graph()
tf.set_random_seed(seed)
ac_net = ActorCriticNetwork(name='ac_net', obs_size=obs_size, action_size=action_size, actor_hidden_size=actor_hidden_size,
ac_learning_rate=ac_learning_rate, entropy_cost=entropy_cost, normalise_entropy=normalise_entropy,
lambda_=lambda_, baseline_cost=baseline_cost)
ac_target_net = ActorCriticNetwork(name='ac_target_net', obs_size=obs_size, action_size=action_size, actor_hidden_size=actor_hidden_size,
ac_learning_rate=ac_learning_rate, entropy_cost=entropy_cost, normalise_entropy=normalise_entropy,
lambda_=lambda_, baseline_cost=baseline_cost)
target_network_update_op = trfl.update_target_variables(ac_target_net.get_network_variables(),
ac_net.get_network_variables(), tau=0.001)
# + [markdown] colab_type="text" id="f4rKbj9lF5QT"
# ** TRFL Usage **
#
# We create a target net and use trfl.update_target_variables as we saw in Section 2. We enter a lambda_ value for the trfl.td_lambda function which is called internally. Next section we'll go over multi-step bootstrapping with trfl.td_lambda().
# + colab_type="code" outputId="b16935ef-3e6a-466d-ff68-1902f9be85a4" executionInfo={"status": "ok", "timestamp": 1555645143207, "user_tz": 240, "elapsed": 3141651, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} id="0BvNxAPGF5QW" colab={"base_uri": "https://localhost:8080/", "height": 703}
stats_rewards_list = []
stats_every = 10
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
for ep in range(1, train_episodes):
total_reward, ep_length, done = 0, 0, 0
stats_actor_loss, stats_critic_loss = 0., 0.
total_loss_list, action_list, action_prob_list, bootstrap_list = [], [], [], []
state = np.clip(env.reset(), -1., 1.)
if len(stats_rewards_list) > 10 and np.mean(stats_rewards_list[-10:],axis=0)[1] > 200:
print("Stopping at episode {} with average rewards of {} in last 10 episodes".
format(ep,np.mean(stats_rewards_list[-10:],axis=0)[1]))
break
while not done:
# generate action probabilities from policy net and sample from the action probs
action_probs = sess.run(ac_net.action_prob_, feed_dict={ac_net.input_: np.expand_dims(state,axis=0)})
action_probs = action_probs[0]
action = np.random.choice(np.arange(len(action_probs)), p=action_probs)
next_state, reward, done, info = env.step(action)
next_state = np.clip(next_state,-1.,1.)
total_reward += reward
#reward *= .01
ep_length += 1
if done:
bootstrap_value = np.zeros((1,),dtype=np.float32)
else:
#get bootstrap value
bootstrap_value = sess.run(ac_target_net.baseline_, feed_dict={
ac_target_net.input_: np.expand_dims(next_state, axis=0)
})
#train network
_, total_loss, seq_aac_return = sess.run([ac_net.ac_optim_, ac_net.ac_loss_, ac_net.seq_aac_return_], feed_dict={
ac_net.input_: np.expand_dims(state, axis=0),
ac_net.action_: np.reshape(action, (-1, 1)),
ac_net.reward_: np.reshape(reward, (-1, 1)),
ac_net.discount_: np.reshape(discount, (-1, 1)),
ac_net.bootstrap_: np.reshape(bootstrap_value, (1,)) #np.expand_dims(bootstrap_value, axis=0)
})
total_loss_list.append(total_loss)
#update target network
sess.run(target_network_update_op)
#some useful things for debuggin
stats_actor_loss += np.mean(seq_aac_return.extra.policy_gradient_loss)
stats_critic_loss += np.mean(seq_aac_return.extra.baseline_loss)
action_list.append(action)
bootstrap_list.append(bootstrap_value)
action_prob_list.append(action_probs)
if total_reward < -250:
done = 1
if done:
if ep % stats_every == 0:
print('Episode: {}'.format(ep),
'Total reward: {:.1f}'.format(np.mean(stats_rewards_list[-stats_every:],axis=0)[1]),
'Ep length: {:.1f}'.format(np.mean(stats_rewards_list[-stats_every:],axis=0)[2]),
'Loss: {:4f}'.format(np.mean(total_loss_list)))
#'Actor loss: {:.5f}'.format(stats_actor_loss),
#'Critic loss: {:.5f}'.format(stats_critic_loss))
#print(np.mean(bootstrap_value), np.mean(action_list), np.mean(action_prob_list,axis=0))
stats_actor_loss, stats_critic_loss = 0, 0
total_loss_list = []
stats_rewards_list.append((ep, total_reward, ep_length))
break
state = next_state
# + colab_type="code" outputId="6dfdc4b4-3ed0-4610-90a3-1079f6040c92" executionInfo={"status": "ok", "timestamp": 1555645144126, "user_tz": 240, "elapsed": 3141103, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} id="iXfSv2QOF5Qd" colab={"base_uri": "https://localhost:8080/", "height": 300}
# %matplotlib inline
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
eps, rews, lens = np.array(stats_rewards_list).T
smoothed_rews = running_mean(rews, 10)
plt.plot(eps[-len(smoothed_rews):], smoothed_rews)
plt.plot(eps, rews, color='grey', alpha=0.3)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
# + [markdown] id="c6q3srEifrr0" colab_type="text"
# ** Example 2: Continuous Actor-Critic **
# + colab_type="code" id="9bgMnSd8xWxP" colab={}
# set up Actor and Critic networks
class ActorCriticNetwork:
def __init__(self, name, action_low, action_high, obs_size=2, action_size=2, actor_hidden_size=32, ac_learning_rate=0.001,
entropy_cost=0.01, entropy_scale_op=None, lambda_=0., baseline_cost=1.):
with tf.variable_scope(name):
self.input_ = tf.placeholder(tf.float32, [None, obs_size], name='inputs')
self.action_ = tf.placeholder(tf.float32, [None, 1, action_size], name='action')
self.reward_ = tf.placeholder(tf.float32, [None, 1], name='reward')
self.discount_ = tf.placeholder(tf.float32, [None, 1], name='discount')
self.bootstrap_ = tf.placeholder(tf.float32, [None], name='bootstrap')
self.name=name
#set up actor network
self.fc1_mu_ = tf.contrib.layers.fully_connected(self.input_, actor_hidden_size, activation_fn=tf.nn.leaky_relu)
self.fc2_mu_ = tf.contrib.layers.fully_connected(self.fc1_mu_, actor_hidden_size, activation_fn=tf.nn.leaky_relu)
self.fc3_mu_ = tf.contrib.layers.fully_connected(self.fc2_mu_, action_size, activation_fn=tf.nn.tanh)
self.fc1_scale_ = tf.contrib.layers.fully_connected(self.input_, actor_hidden_size, activation_fn=tf.nn.leaky_relu)
self.fc2_scale_ = tf.contrib.layers.fully_connected(self.fc1_scale_, actor_hidden_size, activation_fn=tf.nn.leaky_relu)
self.fc3_scale_ = tf.contrib.layers.fully_connected(self.fc2_scale_, action_size, activation_fn=tf.nn.sigmoid)
self.scale_ = self.fc3_scale_*0.5 + 1e-5
self.distribution_ = tfp.distributions.MultivariateNormalDiag(loc=self.fc3_mu_, scale_diag=self.scale_)
self.distribution_shaped_ = tfp.distributions.BatchReshape(distribution=self.distribution_, batch_shape=[-1,1])
#generate actions by sampling from distributions and clipping them
self.actions_scaled_ = tf.clip_by_value(self.distribution_shaped_.sample(), action_low, action_high)
#set up critic network
self.fc1_critic_ = tf.contrib.layers.fully_connected(self.input_, critic_hidden_size, activation_fn=tf.nn.leaky_relu)
self.fc2_critic_ = tf.contrib.layers.fully_connected(self.fc1_critic_, critic_hidden_size, activation_fn=tf.nn.leaky_relu)
self.baseline_ = tf.contrib.layers.fully_connected(self.fc2_critic_, 1, activation_fn=None)
#TRFL usage
self.seq_aac_return_ = trfl.sequence_a2c_loss(self.distribution_shaped_, self.baseline_, self.action_,
self.reward_, self.discount_, self.bootstrap_, lambda_=lambda_, entropy_cost=entropy_cost,
baseline_cost=baseline_cost, entropy_scale_op=entropy_scale_op)
#Optimize the loss
self.ac_loss_ = tf.reduce_mean(self.seq_aac_return_.loss)
self.ac_optim_ = tf.train.AdamOptimizer(learning_rate=ac_learning_rate).minimize(self.ac_loss_)
def get_network_variables(self):
return [t for t in tf.trainable_variables() if t.name.startswith(self.name+"/fully_connected_")]
# + [markdown] colab_type="text" id="zQrKgeQBxWxW"
# ** TRFL Usage: Continuous AC with trfl.sequence_a2c_loss **
#
# Sequence A2C loss is the continuous A2C loss. The first argument is the policy distribution. The arguments: baseline, reward, discount, and bootstrap, and lambda are used to calculate the advantage. The entropy arguments are the same and used the same way as in trfl.policy_entropy_loss. baseline cost is an optional argument that lets you scale the derivative amount between the actor network and the critic network.
#
#
#
#
#
# + colab_type="code" id="7vva4Rr-xWxX" colab={"base_uri": "https://localhost:8080/", "height": 1271} outputId="9c79c54e-274a-4362-be53-e2457387a1df" executionInfo={"status": "ok", "timestamp": 1555645156424, "user_tz": 240, "elapsed": 3151509, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}}
# hyperparameters
train_episodes = 5000
discount = 0.99
actor_hidden_size = 32
critic_hidden_size = 32
ac_learning_rate = 0.0005
baseline_cost = 3. #scale derivatives between actor and critic networks
# entropy hyperparameters
entropy_cost = 0.005
entropy_scale_op = None
# one step returns ie TD(0). Section 4 will cover multi-step returns
lambda_ = 0.
seed = 31
env = gym.make('LunarLanderContinuous-v2')
env.seed(seed)
np.random.seed(seed)
action_size = env.action_space.shape[0]
obs_size = env.observation_space.shape[0]
tf.reset_default_graph()
tf.set_random_seed(seed)
ac_net = ActorCriticNetwork(name='ac_net', action_low=env.action_space.low[0], action_high=env.action_space.high[0],
obs_size=obs_size, action_size=action_size, actor_hidden_size=actor_hidden_size,
ac_learning_rate=ac_learning_rate, entropy_cost=entropy_cost, entropy_scale_op=entropy_scale_op,
lambda_=lambda_, baseline_cost=baseline_cost)
ac_target_net = ActorCriticNetwork(name='ac_target_net', action_low=env.action_space.low[0],
action_high=env.action_space.high[0],
obs_size=obs_size, action_size=action_size, actor_hidden_size=actor_hidden_size,
ac_learning_rate=ac_learning_rate, entropy_cost=entropy_cost, entropy_scale_op=entropy_scale_op,
lambda_=lambda_, baseline_cost=baseline_cost)
target_network_update_op = trfl.update_target_variables(ac_target_net.get_network_variables(),
ac_net.get_network_variables(), tau=0.001)
# + colab_type="code" outputId="b1336bfb-9d2a-4928-db6c-c0636f666a1f" executionInfo={"status": "ok", "timestamp": 1555646272242, "user_tz": 240, "elapsed": 4266241, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} id="PThymA_vxWxc" colab={"base_uri": "https://localhost:8080/", "height": 566}
stats_rewards_list = []
stats_every = 10
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
for ep in range(1, train_episodes):
total_reward, ep_length, done = 0, 0, 0
total_loss_list = []
state = np.clip(env.reset(), -1., 1.)
if len(stats_rewards_list) > 10 and np.mean(stats_rewards_list[-10:],axis=0)[1] > 200:
print("Stopping at episode {} with average rewards of {} in last 10 episodes".
format(ep,np.mean(stats_rewards_list[-10:],axis=0)[1]))
break
while not done:
# generate action probabilities from policy net and sample from the action probs
action = sess.run(ac_net.actions_scaled_, feed_dict={ac_net.input_: np.expand_dims(state,axis=0)})
next_state, reward, done, info = env.step(action[0][0])
next_state = np.clip(next_state,-1.,1.)
total_reward += reward
#reward *= .01
ep_length += 1
if done:
bootstrap_value = np.zeros((1,),dtype=np.float32)
else:
#get bootstrap value
bootstrap_value = sess.run(ac_target_net.baseline_, feed_dict={
ac_target_net.input_: np.expand_dims(next_state, axis=0)
})
#train network
_, total_loss, = sess.run([ac_net.ac_optim_, ac_net.ac_loss_], feed_dict={
ac_net.input_: np.expand_dims(state, axis=0),
ac_net.action_: action,
ac_net.reward_: np.reshape(reward, (-1, 1)),
ac_net.discount_: np.reshape(discount, (-1, 1)),
ac_net.bootstrap_: np.reshape(bootstrap_value, (1,))
})
total_loss_list.append(total_loss)
#update target network
sess.run(target_network_update_op)
if total_reward < -250:
done = 1
if done:
if ep % stats_every == 0:
print('Episode: {}'.format(ep),
'Total reward: {:.1f}'.format(np.mean(stats_rewards_list[-stats_every:],axis=0)[1]),
'Ep length: {:.1f}'.format(np.mean(stats_rewards_list[-stats_every:],axis=0)[2]),
'Total loss: {:.4f}'.format(np.mean(total_loss_list)))
total_loss_list = []
stats_rewards_list.append((ep, total_reward, ep_length))
break
state = next_state
# + colab_type="code" outputId="81f5b334-0ff1-4a0c-ee5c-b92bd6625678" executionInfo={"status": "ok", "timestamp": 1555646273128, "user_tz": 240, "elapsed": 4265697, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "09166577195279766198"}} id="MlOI025IxWxk" colab={"base_uri": "https://localhost:8080/", "height": 300}
# %matplotlib inline
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
eps, rews, lens = np.array(stats_rewards_list).T
smoothed_rews = running_mean(rews, 10)
plt.plot(eps[-len(smoothed_rews):], smoothed_rews)
plt.plot(eps, rews, color='grey', alpha=0.3)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
# + id="XJ45B9Axt2Kh" colab_type="code" colab={}
| Section 3/Actor-Critic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# Multiclass classification with GPflow
# --
#
# *<NAME> and <NAME>, 2016*
from __future__ import print_function
import GPflow
import tensorflow as tf
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# %matplotlib inline
# +
#make a one dimensional classification problem
np.random.seed(1)
X = np.random.rand(100,1)
K = np.exp(-0.5*np.square(X - X.T)/0.01) + np.eye(100)*1e-6
f = np.dot(np.linalg.cholesky(K), np.random.randn(100,3))
plt.figure(figsize=(12,6))
plt.plot(X, f, '.')
# -
Y = np.argmax(f, 1).reshape(-1,1)
# ### Sparse Variational Gaussian approximation
m = GPflow.svgp.SVGP(X, Y,
kern=GPflow.kernels.Matern32(1) + GPflow.kernels.White(1, variance=0.01),
likelihood=GPflow.likelihoods.MultiClass(3),
Z=X[::5].copy(), num_latent=3, whiten=True, q_diag=True)
m.kern.white.variance.fixed = True
m.Z.fixed = True
_ = m.optimize()
# +
def plot(m):
f = plt.figure(figsize=(12,6))
a1 = f.add_axes([0.05, 0.05, 0.9, 0.6])
a2 = f.add_axes([0.05, 0.7, 0.9, 0.1])
a3 = f.add_axes([0.05, 0.85, 0.9, 0.1])
xx = np.linspace(m.X.value.min(), m.X.value.max(), 200).reshape(-1,1)
mu, var = m.predict_f(xx)
mu, var = mu.copy(), var.copy()
p, _ = m.predict_y(xx)
a3.set_xticks([])
a3.set_yticks([])
a3.set_xticks([])
a3.set_yticks([])
for i in range(m.likelihood.num_classes):
x = m.X.value[m.Y.value.flatten()==i]
points, = a3.plot(x, x*0, '.')
color=points.get_color()
a1.plot(xx, mu[:,i], color=color, lw=2)
a1.plot(xx, mu[:,i] + 2*np.sqrt(var[:,i]), '--', color=color)
a1.plot(xx, mu[:,i] - 2*np.sqrt(var[:,i]), '--', color=color)
a2.plot(xx, p[:,i], '-', color=color, lw=2)
a2.set_ylim(-0.1, 1.1)
a2.set_yticks([0, 1])
a2.set_xticks([])
# -
plot(m)
print(m.kern)
# ### Sparse MCMC
m = GPflow.sgpmc.SGPMC(X, Y,
kern=GPflow.kernels.Matern32(1, lengthscales=0.1) + GPflow.kernels.White(1, variance=0.01),
likelihood=GPflow.likelihoods.MultiClass(3),
Z=X[::5].copy(), num_latent=3)
m.kern.matern32.variance.prior = GPflow.priors.Gamma(1.,1.)
m.kern.matern32.lengthscales.prior = GPflow.priors.Gamma(2.,2.)
m.kern.white.variance.fixed = True
_ = m.optimize(maxiter=10)
samples = m.sample(500, verbose=True, epsilon=0.04, Lmax=15)
# +
def plot_from_samples(m, samples):
f = plt.figure(figsize=(12,6))
a1 = f.add_axes([0.05, 0.05, 0.9, 0.6])
a2 = f.add_axes([0.05, 0.7, 0.9, 0.1])
a3 = f.add_axes([0.05, 0.85, 0.9, 0.1])
xx = np.linspace(m.X.value.min(), m.X.value.max(), 200).reshape(-1,1)
Fpred, Ypred = [], []
for s in samples[100::10]: # burn 100, thin 10
m.set_state(s)
Ypred.append(m.predict_y(xx)[0])
Fpred.append(m.predict_f_samples(xx, 1).squeeze())
for i in range(m.likelihood.num_classes):
x = m.X.value[m.Y.value.flatten()==i]
points, = a3.plot(x, x*0, '.')
color = points.get_color()
for F in Fpred:
a1.plot(xx, F[:,i], color=color, lw=0.2, alpha=1.0)
for Y in Ypred:
a2.plot(xx, Y[:,i], color=color, lw=0.5, alpha=1.0)
a2.set_ylim(-0.1, 1.1)
a2.set_yticks([0, 1])
a2.set_xticks([])
a3.set_xticks([])
a3.set_yticks([])
plot_from_samples(m, samples)
# -
df = m.get_samples_df(samples)
df.head()
_ = plt.hist(np.vstack(df['model.kern.matern32.lengthscales']).flatten(), 50, normed=True)
plt.xlabel('lengthscale')
| doc/source/notebooks/multiclass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# +
import pandas as pd
import geopandas as gpd
import rasterio
from rasterio.merge import merge
from rasterio.plot import show
from Download_functions import create_filenames, get_tiles, create_links
# +
gridPath = "../inputs/gfc_tiles.shp"
boundaryPath = "../inputs/MadreDeDios_buffer0.05.shp"
downloadPath = "../downloads/"
boundary = gpd.read_file(boundaryPath)
tiles = get_tiles(boundary, gridPath)
# -
names = create_filenames(tiles, 2019, "last")
files = []
for name in names:
path = downloadPath + name
src = rasterio.open(path)
files.append(src)
files
# +
mosaic, out_trans = merge(files, bounds=boundary.geometry[0].bounds)
# -
mosaic
# +
from matplotlib import pyplot#pyplot.imshow(mosaic[0:3], cmap='terrain')))
# -
mosaic[0:3].shape
# +
from rasterio.mask import mask
for file in files:
file, trans = mask(file, boundary.geometry, crop=True)
# %%
from rasterio.merge import merge
mosaic, out_trans = rasterio.merge.merge(files)
# %%
from rasterio.mask import mask
from rasterio.plot import show
import matplotlib
mosaic = mask(mosaic, boundary.geometry, crop=True)
show(mosaic)
# %%
| notebooks/exploratory/2.0_jgcb_prepare_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fine Food Recommender System
# Dataset: Amazon Fine Foods Reviews.
#
# Source: https://www.kaggle.com/snap/amazon-fine-food-reviews
#
# Author: <NAME>.
#
# Description: "This dataset consists of reviews of fine foods from amazon. The data span a period of more than 10 years, including all ~500,000 reviews up to October 2012. Reviews include product and user information, ratings, and a plain text review. It also includes reviews from all other Amazon categories."
#
# Recommendation systems can be built in a variety of ways. If one knows nothing about the given user then one could simply recommend the most popular or hot items, this is a quite straightforward approach but will often fail to be accurate. A better approach -but requires some data about the user/audience- is to employ either collaborative filtering, which recommends content similar to the one the user has shown interest in, or content-based filtering, which shows content that some other users that seem to have alike preferences rated with high score.
#
# In this exercise, I'm implementing a mixture of those two methods by training a Random Forest Regressor to predict the score a user will give to a product s/he hasn't consumed yet. This method is chosen because it is simple enough to be implemented quickly, but complex enough to take advantage of most of the information in the dataset (including text) to produce accurate results.
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import random
import re
from collections import Counter
from itertools import product
from joblib import dump, load
from scipy.sparse import coo_matrix
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import confusion_matrix, f1_score, mean_squared_error
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
from nltk.stem.wordnet import WordNetLemmatizer
# -
def describe(df, var, name):
n = df[var].nunique()
m = df[var].value_counts()
s = df.groupby(var)['Score'].mean()
print('Number of {}: {}'.format(name, n))
print('Reviews')
print('Mean: {:.2f}, std: {:.2f}, max: {}, median: {:.2f}, min: {}'.\
format(m.mean(), m.std(), m.max(), m.median(), m.min()))
print('Score')
print('Mean: {:.2f}, std: {:.2f}, max: {}, median: {:.2f}, min: {}'.\
format(s.mean(), s.std(), s.max(), s.median(), s.min()))
df = pd.read_csv('Reviews.csv')
print(df.shape)
df.head()
# +
# Time is not in proper format
df.Time = pd.to_datetime(df.Time, unit='s')
df['Year'] = df.Time.dt.year
# Id is useless
df.drop('Id', axis=1, inplace=True)
# Factorize product and user ids to save memory
df.UserId = df.UserId.factorize()[0]
df.ProductId = df.ProductId.factorize()[0]
# -
# Missing data
df.isnull().sum()
# I'm dropping products and users with 10 reviews or less
# I want to avoid memory errors
# And their utility may be marginal
df = df[df.groupby('ProductId')['ProductId'].transform('count') > 10]
df = df[df.groupby('UserId')['UserId'].transform('count') > 10]
df.shape
# Have users rated the same product twice or more?
df[['ProductId', 'UserId']].duplicated().sum()
describe(df, 'UserId', 'users')
print('*--'*20)
describe(df, 'ProductId', 'products')
# I'm planning on getting features from both summary and text
df['Full_txt'] = df['Summary'].fillna('') + ' ' + df['Text']
# # Split data into train, test and validation
# The aim is to train the model into the train dataset, tune hyper parameter with the test dataset and then perform final validation with the validation dataset. This gives a more accurate perception of the real error because the model never gets to see the answers (scores) for the validation set.
# Split train and validation
sss = StratifiedShuffleSplit(n_splits=2, test_size=0.2, random_state = 412)
for train_idx, test_idx in sss.split(df, df.Score, df.ProductId):
train = df.iloc[train_idx]
validation = df.iloc[test_idx]
break
print(train.shape, validation.shape)
# Now split train in train and test
sss = StratifiedShuffleSplit(n_splits=2, test_size=0.2, random_state = 412)
for train_idx, test_idx in sss.split(train, train.Score, train.ProductId):
test = train.iloc[test_idx]
train = train.iloc[train_idx]
break
print(train.shape, test.shape)
describe(train, 'UserId', 'users')
print('*--'*20)
describe(train, 'ProductId', 'products')
print('*--'*20)
describe(validation, 'UserId', 'users')
print('*--'*20)
describe(validation, 'ProductId', 'products')
# # Text keywords extraction
# As data has been semi-anonimized, the best description of the product exists in the reviews. By extracting keywords from then, one could obtain useful groups of products. This assumes that, when reviewing, people tend to use certain word when talking about a specific type of product.
#
# A very raw version of keyword extraction is being performed here, with no especial tuning being made. Also, no attempt to get a feeling of the whole text instead of just the keywords is being made.
# I noticed some words I'd like to remove
words = ['br', 'john', 'pb', 'pg', 'ck', 'amazon', 'wayyyy', 'come', 'bye']
stop_words = set(stopwords.words("english"))
stop_words = stop_words.union(words)
def regularize_text(x, stop_words=stop_words):
# standardize text
x = re.sub('[^a-zA-Z]', ' ', x)
x = x.lower()
x=re.sub("</?.*?>"," <> ",x)
x=re.sub("(\\d|\\W)+"," ",x)
x = x.split(' ')
ps=PorterStemmer()
lem = WordNetLemmatizer()
x = [lem.lemmatize(word) for word in x if not word in stop_words]
return ' '.join(x)
# I only use train dataset in this phase to avoid data leakage
train['Full_txt'] = train['Full_txt'].apply(regularize_text)
# +
# Vectorize words
countV=CountVectorizer(max_df=0.8,stop_words=stop_words, max_features=10000, ngram_range=(1,1))
X=countV.fit_transform(train['Full_txt'])
# Calculate TFIDF
tfidf = TfidfTransformer()
tfidf.fit(X)
feature_names=countV.get_feature_names()
# +
# Functions to extract most important keywords
def sort_matrix(m):
tuples = zip(m.col, m.data)
return sorted(tuples, key=lambda x: (x[1], x[0]), reverse=True)
def extract_n(names, items, n=10):
sorted_items = items[:n+1]
scores = []
features = []
# word index and corresponding tf-idf score
for idx, s in sorted_items:
scores.append(round(s, 3))
features.append(names[idx])
return dict(zip(features, scores))
def keywords_from_doc(doc, tfidf, n=5):
tfidf_vector=tfidf.transform(countV.transform(doc))
sorted_items=sort_matrix(tfidf_vector.tocoo())
return extract_n(feature_names,sorted_items,n)
# -
# Get dict with the keywords of each product
keywords_per_product = {}
ids = train['ProductId'].unique()
for i in ids:
mask = train['ProductId'] == i
doc = train[mask]['Full_txt'].values
keywords = keywords_from_doc(doc, tfidf, 5)
keywords_per_product[i] = list(keywords.keys())
# +
# get the frequency of keywords and only keep the most frequent 5%
count = Counter()
for v in keywords_per_product.values():
count.update(v)
perc = np.percentile(list(count.values()), 95)
keywords = [k for k,v in count.items() if v>=perc]
# +
# OneHot encode keywords
prod_vec = {}
for product in train['ProductId'].unique():
vec = []
for keyword in keywords:
if keyword in keywords_per_product[product]:
vec.append(1)
else:
vec.append(0)
prod_vec[product] = vec
prod_features = pd.DataFrame(prod_vec).T
prod_features.columns = keywords
prod_features.head()
# -
# Keywords per product have been extracted and one-hot encoded. It looks good enough, so I'll just merge it into the train dataset.
train = train.merge(prod_features, left_on=['ProductId'], right_index=True, how='left')
# # Get aditional features from scores
# +
def standard_features(data, group, var, prefix):
g = data.groupby(group)[var]
data[prefix+var+'Mean'] = g.transform('mean')
data[prefix+var+'Std'] = g.transform('std')
data[prefix+var+'Count'] = g.transform('count')
return data
train = standard_features(train, 'UserId', 'Score', 'User')
train = standard_features(train, 'ProductId', 'Score', 'Product')
train = standard_features(train, ['ProductId', 'Year'], 'Score', 'ProductYear')
# -
# # Merge features to train and validation
# To avoid data leakage, features are only extracted from train dataset and then merged back into the test and validation set.
product_cols = train.filter(regex='(Product).*').columns
user_cols = train.filter(regex='(User).*').columns
test = test.merge(train[product_cols].groupby('ProductId').mean(), left_on='ProductId', right_index=True, how='left')
test = test.merge(train[user_cols].groupby('UserId').mean(), left_on='UserId', right_index=True, how='left')
test = test.merge(prod_features, left_on=['ProductId'], right_index=True, how='left')
test.fillna(0, inplace=True) # There is no information about NaNs
validation = validation.merge(train[product_cols].groupby('ProductId').mean(),
left_on='ProductId', right_index=True, how='left')
validation = validation.merge(train[user_cols].groupby('UserId').mean(),
left_on='UserId', right_index=True, how='left')
validation = validation.merge(prod_features, left_on=['ProductId'],
right_index=True, how='left')
validation.fillna(0, inplace=True) # There is no information about NaNs
# # Train, tune and validate model
def scorer(model, X_train, X_test, y_train, y_test):
# MSE Scorer
model.fit(X_train, y_train)
preds = model.predict(X_test)
return mean_squared_error(y_test, preds)
def grid_search(model, X_train, X_test, y_train, y_test, param_grid, rs=542, verbose=False):
# Hyperparameter grid search
if verbose:
total = sum([1 for _ in product(*param_grid.values())])
combs = product(*param_grid.values())
best_score = np.inf
for i, comb in enumerate(combs):
params = dict(zip(param_grid.keys(), comb))
model.set_params(**params)
score = scorer(model, X_train, X_test, y_train, y_test)
if score < best_score:
best_score = score
best_params = params
if verbose:
print('Parameter combination: {}/{}. Score:{:.4f}, best:{:.4f}.'.format(i+1,total, score, best_score))
return best_params, best_score
# Split X y
cols = train.drop(['ProfileName', 'HelpfulnessNumerator',
'HelpfulnessDenominator', 'Score', 'Time', 'Year',
'Summary', 'Text', 'Full_txt', 'UserId', 'ProductId'],
axis=1).columns
X_train = train[cols].fillna(0) #NaNs are in std
y_train = train['Score']
X_test = test[cols].fillna(0)
y_test = test['Score']
# Fit the base regressor
rf = RandomForestRegressor(n_estimators=200, n_jobs=-1, random_state=412)
rf.fit(X_train, y_train)
preds = rf.predict(X_test)
score = mean_squared_error(y_test, preds)
print(score)
# +
# Tune features
best_score = score
features = X_train.columns
fi = rf.feature_importances_
lfi = np.log(fi)
for q in np.arange(0.05, 1, 0.05):
v = np.exp(np.quantile(lfi, q))
features = X_train.columns[fi>=v]
score = scorer(rf, X_train[features], X_test[features], y_train, y_test)
if score < best_score:
best_score = score
best_features = features
best_q = q
print('Tested q: {:.2f}, score: {:.4f}. Best score: {:.4f}'.format(q, score, best_score))
for q in np.arange(best_q-0.04, best_q+0.04, 0.01):
if np.isclose(best_q,q):
continue
v = np.exp(np.quantile(lfi, q))
features = X_train.columns[fi>=v]
score = scorer(rf, X_train[features], X_test[features], y_train, y_test)
if score < best_score:
best_score = score
best_features = features
best_q = q
print('Tested q: {:.2f}, score: {:.4f}. Best score: {:.4f}'.format(q, score, best_score))
# -
# Tune hyperparameters
param_grid = {'max_depth':[15, 30, 50, 100, None], 'min_samples_split':[2, 30, 60],
'min_impurity_decrease':[0.0, 0.001, 0.0001]}
params, score = grid_search(rf, X_train[best_features], X_test[best_features],
y_train, y_test, param_grid, verbose=True)
params['n_jobs']=-1
params['random_state']=412
params['n_estimators']=200
print(params)
# ### Validate
# To validate the results, I'm joining train and test data and retraining the model with the given features and hyperparameters. For the sake of simplicity, I'm just joining the two datasets together. A better approach is to update product and user data by recalculating them in the new, bigger dataset.
X_pre = pd.concat([X_train[best_features], X_test[best_features]])
y_pre = pd.concat([y_train, y_test])
traintest = pd.concat([train, test], sort=False)
rf = RandomForestRegressor(**params)
rf.fit(X_pre, y_pre)
dump(rf, 'rfModel.joblib')
# Validate
X_val = validation[best_features]
y_val = validation['Score']
preds = rf.predict(X_val)
mse = mean_squared_error(y_val, preds)
print(mse)
# Transform into a classification problem by rounding predictions
print('Macro F1: {:.2f}'.format(f1_score(y_val, preds.round(), average='macro')))
print('Weighted F1: {:.2f}'.format(f1_score(y_val, preds.round(), average='weighted')))
sns.heatmap(confusion_matrix(y_val, preds.round()), cmap='bwr', center=0, annot=True, fmt='.0f',
xticklabels=range(1,6), yticklabels=range(1,6))
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.show()
# 0.66 MSE in validation data is a surprising result. Given that the best MSE that could be achieved on the training phase was 0.99, the better perfomance on validation means that either the model needed more data to find better patterns, that there is still plenty of room to improve this model or even that validation data was too easy.
#
# When translated into a classification problem (by rounding predicted scores), a weighted F1 (accounting for label imbalance) of 68% shows that results may be improved way further. Nevertheless, when one looks at the confusion matrix, most of the mistakes come from neighbour classes (e.g. it predicted a 4 but it was a 5), which are not a terrible mistake. Actually, if one thinks about it, humans don't tend to be 100% consistent when rating things (<NAME> is famous for theorizing on this), so even for the rater it could be easy to change a 5 for a 4. Therefore, even this simple over-optimistic model could be used in production and it will obtain ok results.
# # Recommend to user
# Finally, the trained model needs to be used to recommed new products to a given user. To do so, it is necessary to compute the expected score and sort the results.
def recommend(user=None, n=10, data=traintest, user_cols=user_cols,
product_cols=product_cols, prod_features=prod_features,
features=best_features, model=rf):
if user is None:
user = random.choice(test.UserId.unique())
# Assemble dataset for prediction
mask = data.UserId == user
user_features = data[mask][user_cols].mean()
included_products = data[mask].ProductId.unique()
mask = data.ProductId.apply(lambda x: x not in included_products)
products = data[mask][product_cols]
products = products.merge(prod_features, left_on='ProductId', right_index=True, how='left')
for i in user_features.iloc[1:].index:
products[i] = user_features[i]
# Predict and sort results
preds = model.predict(products[features].fillna(0))
recommended = data[mask][['ProductId']]
recommended['PredScore'] = preds
recommended.drop_duplicates(inplace=True)
recommended = recommended.sort_values('PredScore', ascending=False).iloc[:n]
print('{} recommended products for User {} with scores:'.format(n, user))
print(recommended.to_string(index=False))
return recommended
# Choose a random user and recommend him/her 10 products
_ = recommend()
# Recommend 15 products to user 127
_ = recommend(127, 15)
| Recommendation Systems/Amazon Finefoods reviews.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sqlite3 as db
def readFromDB(conn,exectCmd,params):
cursor = conn.cursor()
cursor.execute(exectCmd,params)
results = cursor.fetchall()[0][0]
return results
def statistic(db_path,dict):
conn = db.connect(db_path)
# SQL query model
sql = 'select count(*) from bug_info where "Affected JS Engine Components" = ? and State = ?; '
result = dict
for k in dict.keys():
try:
# Execute SQL statements with parameters k and "Fixed"
params = [k, "Fixed"]
FixedNum = readFromDB(conn, sql, params)
result[k].append(FixedNum)
# Execute SQL statements with parameters k and "Verified"
params = [k,"Verified"]
VerifiedNum = readFromDB(conn, sql, params)
result[k].append(FixedNum+VerifiedNum)
except:
print ("Error: unable to fetch data")
# close the connection
conn.close()
return result
def statistic_from_bug_db(db_path, dict):
result = statistic(db_path,dict)
return result
if __name__ == "__main__":
db_path = r"/mnt/aliyun/COMFORT/data/Bug_info.db"
dict = {"CodeGen": [], "Implementation": [], "Parser": [], "RegExp Engine": [], "Strict Mode": [], "Optimizer": []}
result = statistic_from_bug_db(db_path, dict)
print("bug 数量统计结果如下:")
print(result)
# -
| artifact_evaluation/Jupyter/src/data_statistics/statistic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example 3 - Langmuir Hinshelwood mechanism
#
# In this example, we will show how to build a Gaussian Process (GP) surrogate model for a Langmuir Hinshelwood (LH) mechanism and locate its optimum via Bayesian Optimization.
# In a typical LH mechanism, two molecules adsorb on neighboring sites and the adsorbed molecules undergo a bimolecular reaction:
# $$ A + * ⇌ A* $$
# $$ B + * ⇌ B* $$
# $$ A* + B* → Product $$
#
# The reation rate can be expressed as,
#
# $$ rate = \frac{k_{rds} K_1 K_2 P_A P_B}{(1 + K_1 P_A + K_2 P_B)^2}$$
#
# where $k_{rds}$, $K_1$, $K_2$, $P_A$, $P_B$ are the kinetic constants and partial pressure of two reacting species. $P_A$ and $P_B$ are the independent variables `X1` and `X2`. The rate is the dependent variable `Y`. The goal is to determine their value where the rate is maximized.
#
# The details of this example is summarized in the table below:
#
# | Key Item | Description |
# | :----------------- | :---------------------------- |
# | Goal | Maximization |
# | Objective function | LH mechanism |
# | Input (X) dimension | 2 |
# | Output (Y) dimension | 1 |
# | Analytical form available? | Yes |
# | Acqucision function | Expected improvement (EI) |
# | Initial Sampling | Full factorial or latin hypercube |
#
# Next, we will go through each step in Bayesian Optimization.
#
# ## 1. Import `nextorch` and other packages
import numpy as np
from nextorch import plotting, bo, doe
# ## 2. Define the objective function and the design space
# We use a Python function `rate` as the objective function `objective_func`.
#
# The range of the input $P_A$ and $P_B$ is between 1 and 10 bar.
# +
#%% Define the objective function
def rate(P):
"""langmuir hinshelwood mechanism
Parameters
----------
P : numpy matrix
Pressure of species A and B
2D independent variable
Returns
-------
r: numpy array
Reactio rate, 1D dependent variable
"""
# kinetic constants
K1 = 1
K2 = 10
krds = 100
# Expend P to 2d matrix
if len(P.shape) < 2:
P = np.array([P])
r = np.zeros(P.shape[0])
for i in range(P.shape[0]):
P_A, P_B = P[i][0], P[i][1]
r[i] = krds*K1*K2*P_A*P_B/((1+K1*P_A+K2*P_B)**2)
# Put y in a column
r = np.expand_dims(r, axis=1)
return r
# Objective function
objective_func = rate
# Set the ranges
X_ranges = [[1, 10], [1, 10]]
# -
# ## 3. Define the initial sampling plan
# Here we compare two sampling plans with the same number of sampling points:
#
# 1. Full factorial (FF) design with levels of 5 and 25 points in total.
# 2. Latin hypercube (LHC) design with 10 initial sampling points, and 15 more Bayesian Optimization trials
#
# The initial reponse in a real scale `Y_init_real` is computed from the helper function `bo.eval_objective_func(X_init, X_ranges, objective_func)`, given `X_init` in unit scales.
# +
#%% Initial Sampling
n_ff_level = 5
n_ff = n_ff_level**2
# Full factorial design
X_init_ff = doe.full_factorial([n_ff_level, n_ff_level])
# Get the initial responses
Y_init_ff = bo.eval_objective_func(X_init_ff, X_ranges, objective_func)
n_init_lhc = 10
# Latin hypercube design with 10 initial points
X_init_lhc = doe.latin_hypercube(n_dim = 2, n_points = n_init_lhc, seed= 1)
# Get the initial responses
Y_init_lhc = bo.eval_objective_func(X_init_lhc, X_ranges, objective_func)
# Compare the two sampling plans
plotting.sampling_2d([X_init_ff, X_init_lhc],
X_ranges = X_ranges,
design_names = ['FF', 'LHC'])
# -
# ## 4. Initialize an `Experiment` object
#
# Next, we initialize two `Experiment` objects for FF and LHC, respectively. We also set the objective function and the goal as maximization.
#
# We will train two GP models. Some progress status will be printed out.
#
#
# +
#%% Initialize an Experiment object
# Full factorial design
# Set its name, the files will be saved under the folder with the same name
Exp_ff = bo.Experiment('LH_mechanism_LHC')
# Import the initial data
Exp_ff.input_data(X_init_ff, Y_init_ff, X_ranges = X_ranges, unit_flag = True)
# Set the optimization specifications
# here we set the objective function, minimization by default
Exp_ff.set_optim_specs(objective_func = objective_func,
maximize = True)
# Latin hypercube design
# Set its name, the files will be saved under the folder with the same name
Exp_lhc = bo.Experiment('LH_mechanism_FF')
# Import the initial data
Exp_lhc.input_data(X_init_lhc, Y_init_lhc, X_ranges = X_ranges, unit_flag = True)
# Set the optimization specifications
# here we set the objective function, minimization by default
Exp_lhc.set_optim_specs(objective_func = objective_func,
maximize = True)
# -
# ## 5. Run trials
# We perform 15 more Bayesian Optimization trials for the LHC design using the default acquisition function (Expected Improvement (EI)).
#%% Optimization loop
# Set the number of iterations
n_trials_lhc = n_ff - n_init_lhc
for i in range(n_trials_lhc):
# Generate the next experiment point
X_new, X_new_real, acq_func = Exp_lhc.generate_next_point()
# Get the reponse at this point
Y_new_real = objective_func(X_new_real)
# or
# Y_new_real = bo.eval_objective_func(X_new, X_ranges, objective_func)
# Retrain the model by input the next point into Exp object
Exp_lhc.run_trial(X_new, X_new_real, Y_new_real)
# ## 6. Visualize the final model reponses
# We would like to see how sampling points scattered in the 2D space.
#%% plots
# Check the sampling points
# Final lhc Sampling
print('LHC sampling points')
plotting.sampling_2d_exp(Exp_lhc)
# Compare to full factorial
print('Comparing two plans:')
plotting.sampling_2d([Exp_ff.X, Exp_lhc.X],
X_ranges = X_ranges,
design_names = ['FF', 'LHC'])
# We can also visualize model predicted rates and error in heatmaps. The red colors indicates higher values and the blue colors indicates lower values.
# +
# Reponse heatmaps
# Objective function heatmap
print('Objective function heatmap: ')
plotting.objective_heatmap(objective_func, X_ranges, Y_real_range = [0, 25])
# full factorial heatmap
print('Full factorial model heatmap: ')
plotting.response_heatmap_exp(Exp_ff, Y_real_range = [0, 25])
# LHC heatmap
print('LHC model heatmap: ')
plotting.response_heatmap_exp(Exp_lhc, Y_real_range = [0, 25])
# full fatorial error heatmap
print('Full factorial model error heatmap: ')
plotting.response_heatmap_err_exp(Exp_ff, Y_real_range = [0, 5])
# LHC error heatmap
print('LHC model error heatmap: ')
plotting.response_heatmap_err_exp(Exp_lhc, Y_real_range = [0, 5])
# -
# The rates can also be plotted as response surfaces in 3D.
# Suface plots
# Objective function surface plot
print('Objective function surface: ')
plotting.objective_surface(objective_func, X_ranges, Y_real_range = [0, 25])
# full fatorial surface plot
print('Full fatorial model surface: ')
plotting.response_surface_exp(Exp_ff, Y_real_range = [0, 25])
# LHC surface plot
print('LHC model surface: ')
plotting.response_surface_exp(Exp_lhc, Y_real_range = [0, 25])
# ## 7. Export the optimum
#
# Compare two plans in terms optimum discovered in each trial.
plotting.opt_per_trial([Exp_ff.Y_real, Exp_lhc.Y_real],
design_names = ['Full Fatorial', 'LHC'])
# Obtain the optimum from each method.
# +
# lhc optimum
y_opt_lhc, X_opt_lhc, index_opt_lhc = Exp_lhc.get_optim()
print('From LHC + Bayesian Optimization, ')
print('The best reponse is rate = {} at P = {}'.format(y_opt_lhc, X_opt_lhc))
# FF optimum
y_opt_ff, X_opt_ff, index_opt_ff = Exp_ff.get_optim()
print('From full factorial design, ')
print('The best reponse is rate = {} at P = {}'.format(y_opt_ff, X_opt_ff))
# -
# From above plots, we see both LHC + Bayesian Optimization and full factorial design locate the same optimum point in this 2D example.
# [Thumbnail](_images/03.png) of this notebook
| docs/source/examples/03_LH_mechanism.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="pBqy1um2i0LI"
# **Importing dataset and observing overview**
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="GRpCYak5ixnx" outputId="db77ed99-ad07-4d03-a9d9-c43f94c7d439"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import datasets
data=pd.read_csv("/content/Iris.csv")
data.head()
# + [markdown] colab_type="text" id="eGgFW4-DjEGq"
# **Checking outliers for optimizing the output**
# + colab={"base_uri": "https://localhost:8080/", "height": 334} colab_type="code" id="jj594ciMjT2g" outputId="62b39e27-cad1-4d34-fde3-9c89bc2b8e4d"
import seaborn as sns
ax=sns.boxplot(x="Species", y="SepalLengthCm", data=data)
# + colab={"base_uri": "https://localhost:8080/", "height": 299} colab_type="code" id="1fHLgzZlkhTl" outputId="6659a723-2dd0-4d7f-d3a5-5de559e098ab"
sns.boxplot(x="Species", y="SepalWidthCm", data=data)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} colab_type="code" id="yBcYcsjAkk_p" outputId="4361a643-5c56-400c-ecaf-2fad3d5997b7"
sns.boxplot(x="Species", y="PetalLengthCm", data=data)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} colab_type="code" id="G-bivrhZkneD" outputId="c9a5a8a4-119e-4c65-c90e-f0f9bf8ce193"
sns.boxplot(x="Species", y="PetalWidthCm", data=data)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} colab_type="code" id="s2lLYVaWkxAo" outputId="9e92d173-cf64-40d0-f623-6ebf166c2ad3"
sns.violinplot(x="Species", y="SepalLengthCm", data=data)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} colab_type="code" id="MdEbzpV8k6tU" outputId="079a7808-5452-43d7-b8a1-5c456ab34963"
sns.violinplot(x="Species", y="SepalWidthCm", data=data)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} colab_type="code" id="Nugvg2rJlBaE" outputId="dcf076a4-1f85-4e16-8e0e-bd2433c7bf55"
sns.violinplot(x="Species", y="PetalLengthCm", data=data)
# + colab={"base_uri": "https://localhost:8080/", "height": 296} colab_type="code" id="qjVIdRgPlEAF" outputId="921fd2f9-e730-4d9c-bc6f-5990e944288e"
sns.violinplot(x="Species", y="PetalWidthCm", data=data)
# + [markdown] colab_type="text" id="ajTLks84lR3h"
# **Label Encoding Species**
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="CNKGYIt1lTqx" outputId="bdf170cc-136d-468c-87cf-6e25ceb54378"
from sklearn.preprocessing import LabelEncoder
encoder=LabelEncoder()
data['species']=encoder.fit_transform(data['Species'])
data.head()
# + [markdown] colab_type="text" id="BhPYjyGIlY3J"
# **Scaling The Features and Removing Outliers**
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="Mq5NSdeblaHl" outputId="10e7b136-9041-44bc-f7be-fd210aac544c"
data['Check_Outliers']=pd.cut(data['SepalLengthCm'],5)
data[['Check_Outliers','SepalLengthCm']].groupby('Check_Outliers',as_index=False).count()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="_fzz9Zq1llDw" outputId="da79082a-797e-480d-f3f1-0d36fa269399"
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler = scaler.fit(data[['SepalLengthCm']])
scaled_SPL = scaler.transform(data[['SepalLengthCm']])
data['Scaled SP Length']=scaled_SPL
data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} colab_type="code" id="sICB0DIplohH" outputId="d5234cbf-1de2-4bff-b897-06a63be726d2"
data.drop(labels='Check_Outliers',axis=1,inplace=True)
data['Check_Outliers']=pd.cut(data['Scaled SP Length'],5)
data[['Check_Outliers','Scaled SP Length']].groupby('Check_Outliers',as_index=False).count()
# + colab={"base_uri": "https://localhost:8080/", "height": 69} colab_type="code" id="r9nojVqIltc1" outputId="be4fe6ac-716b-4314-8378-2650f7241066"
lb=0.1
ub=0.95
limit=data['SepalLengthCm'].quantile([lb,ub])
limit
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="wV0ozatnl6ic" outputId="91e95f9b-d24f-4d8e-93a1-e6a0fba93ec5"
sdd=(data['SepalLengthCm']<limit.loc[ub])
sdd.value_counts()
final_data=data[sdd].copy()
final_data.shape
# + [markdown] colab_type="text" id="YwdVSw1wmGHL"
# **Checking Counts of the present Species**
# + colab={"base_uri": "https://localhost:8080/", "height": 86} colab_type="code" id="VAb3y1LAmImE" outputId="90f441ef-00b6-4978-9b50-f156607f88cc"
final_data.drop(labels=['Check_Outliers','Scaled SP Length'],axis=1,inplace=True)
final_data['species'].value_counts()
# + [markdown] colab_type="text" id="PuG96bDjmiKK"
# **Data Distribution Species-Wise**
# + colab={"base_uri": "https://localhost:8080/", "height": 944} colab_type="code" id="QoygOrvPmauY" outputId="608f642e-e445-4465-c94b-2c775df5ef01"
sns.pairplot(final_data.iloc[:,[0,1,2,3,4]])
# + [markdown] colab_type="text" id="_xZTUjE4ojdW"
# **Finding value of optimal k using KElbowVisualizer**
# + colab={"base_uri": "https://localhost:8080/", "height": 471} colab_type="code" id="w5Y5OsOConVx" outputId="aedc26a1-8b05-4361-add9-d2ce0047cc71"
from yellowbrick.cluster import KElbowVisualizer
from sklearn.cluster import KMeans
model1 = KMeans()
visualizer = KElbowVisualizer(model1, k=(1,10))
visualizer.fit(final_data.iloc[:,[0,1,2,3]])
# + [markdown] colab_type="text" id="YvaZj0saosdB"
# **Visualizing Silhouette Co-efficient using k=3**
# + colab={"base_uri": "https://localhost:8080/", "height": 436} colab_type="code" id="JHEkxknkot9H" outputId="6a712a52-8b7d-490d-a1ad-8549d74b9dad"
from yellowbrick.cluster import SilhouetteVisualizer
model2 = KMeans(3)
visualizer = SilhouetteVisualizer(model2)
visualizer.fit(final_data.iloc[:,[0,1,2,3]])
# + [markdown] colab_type="text" id="-IHA-dOfo04S"
# **Fixing n_clusters=3 and fitting the data to KMeans Model**
# + colab={} colab_type="code" id="cAK45G5Mo3mR"
x=final_data.iloc[:,[0,1,2,3]]
kmeans = KMeans(n_clusters = 3, init = 'k-means++',
max_iter = 300, n_init = 10, random_state = 0)
y_kmeans = kmeans.fit_predict(x)
# + [markdown] colab_type="text" id="qnT-F5sGo_ef"
# **Visualizing The Clusters Based On First Two Columns**
# + colab={"base_uri": "https://localhost:8080/", "height": 364} colab_type="code" id="dJ6K4Auro_xB" outputId="d62f0cc8-e3b2-4784-eac3-83020f4b1188"
x = np.array(x)
plt.scatter(x[y_kmeans == 0, 0], x[y_kmeans == 0, 1],
s = 100, c = 'red', label = 'Iris-setosa')
plt.scatter(x[y_kmeans == 1, 0], x[y_kmeans == 1, 1],
s = 100, c = 'blue', label = 'Iris-versicolour')
plt.scatter(x[y_kmeans == 2, 0], x[y_kmeans == 2, 1],
s = 100, c = 'green', label = 'Iris-virginica')
plt.scatter(kmeans.cluster_centers_[:, 0], kmeans.cluster_centers_[:,1],
s = 100, c = 'yellow', label = 'Centroids')
plt.legend()
# + colab={} colab_type="code" id="uKpkyADKorb7"
# + colab={} colab_type="code" id="dptLoESYixoV"
| basics/.ipynb_checkpoints/Unsupervised_Machine_Learning-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# %matplotlib inline
import i3assist as i3
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
# The GridSearch class holds a list of Euler objects
# describing the local orientations searched in eulerFG*
# scripts and by the MRASRCH parameter
# gs - grid search
gs = i3.GridSearch(theta_max=9., theta_step=3., psi_max=6., psi_step=3., do_180=False)
len(gs) # Let's you know how many angles you will be searching using these parameters.
# The GridSearch can be iterated over to see the individual euler angles.
# Each of these can be manipulated; the individual components can be gotten;
# and the rotations can be inverted and converted to the rotation matrix
gs = i3.GridSearch(2., 1., 2., 1.) #Short hand version of above: tMax, tStep, pMax, pStep
for euler in gs:
print("Phi: {:f}".format(euler.phi), end=' ') # Get First Euler Angle
print("Theta: {:f}".format(euler.theta), end=' ') # Get Second Euler Angle
print("Psi: {:f}".format(euler.psi)) # Get Third Euler Angle
print("Another way to print it: {:s}".format(str(euler))) # Euler objects pretty print by default
euler.normalize() # Makes Phi and Psi in the range [-180, 180], and theta in the range [0, 180] for old I3
print("Normalized inverse: ", euler)
invEuler = euler.copy() # All modifications to objects happen inplace so make copies
invEuler.invert() # Inverts the rotations
print("Inverse rotation: ", invEuler)
rotMat = euler.to_matrix() # Convert ZXZ euler angles to ZXZ rotation matrix
print("Rotation matrix: ", rotMat) # prints in a line like new I3 trf format
print(""*80, end='\n\n')
gs = i3.GridSearch(9., 3., 30., 1.0)
lons = []
lats = []
for i in gs:
if len(lons) == 0:
lons.append(i.phi)
lats.append(90. - i.theta)
elif lons[-1] != i.phi or lats[-1] != 90. - i.theta:
lons.append(i.phi)
lats.append(90. - i.theta)
else:
pass
f, (sp1, sp2) = plt.subplots(1, 2, figsize=(10,10))
m1 = Basemap(projection='nplaea', boundinglat=80, lon_0=0.,resolution='l', ax=sp1)
m2 = Basemap(projection='splaea', boundinglat=-1, lon_0=270., resolution='l', ax=sp2)
m1.drawparallels(np.arange(80.,90.,0.5), labels=[1,0,0,0])
m1.drawmeridians(np.arange(-180.,181.,15.), labels=[0,1,1,0])
m2.drawparallels(np.arange(-60.,90.,15.), labels=[0,1,0,0])
m2.drawmeridians(np.arange(-180.,181.,15.))
x1, y1 = m1(lons, lats)
m1.scatter(x1,y1,marker='o',color='r')
x2, y2 = m2(lons, lats)
m2.scatter(x2, y2, marker='D', color='g')
plt.show()
tl = i3.TransformList()
tl.from_file('test.trf')
lons = []
lats = []
clrs = []
for trf in tl:
rot = trf.rotation
eul = rot.to_euler()
lons.append(eul.phi)
lats.append(90. - eul.theta)
clrs.append(eul.psi)
m = Basemap(projection='sinu', lon_0=0., resolution='l')
m.drawparallels(np.arange(-90., 90., 30.))
m.drawmeridians(np.arange(-180., 181., 45.))
x, y = m(lons, lats)
cax = m.scatter(x, y, s=36, alpha=1., marker='.', c=clrs, cmap=plt.get_cmap('viridis'))
plt.colorbar(cax, orientation='horizontal')
plt.title('T7 channel WT Orientations')
plt.show()
| i3assist_plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: "Python 3.7 (Intel\xAE oneAPI)"
# language: python
# name: c009-intel_distribution_of_python_3_oneapi-beta05-python
# ---
# # Intel® Advisor - Roofline Analysis
# This sections demonstrates how to collect and generate a roofline report using Intel Advisor.
#
# ##### Sections
# - [What is the Roofline Model?](#What-is-the-Roofline-Model?)
# - _Analysis:_ [Roofline Analysis Report](#Roofline-Analysis-Report)
# - [Finding Effective Optimization Strategies](#Finding-Effective-Optimization-Strategies)
# - [Command Line Options for GPU Roofline Analysis](#Command-Line-Options-for-GPU-Roofline-Analysis)
# - [Using Roofline Analysis on Intel GPU](#Using-Roofline-Analysis-on-Intel-GPU)
# ## Learning Objectives
# - Explain how Intel® Advisor performs GPU Roofline Analysis.
# - Run the GPU Roofline Analysis using command line syntax.
# - Use GPU Roofline Analysis to identify effective optimization strategies.
#
# ## What is the Roofline Model?
# A Roofline chart is a visual representation of application performance in relation to hardware limitations, including memory bandwidth and computational peaks. Intel Advisor includes an automated Roofline tool that measures and plots the chart on its own, so all you need to do is read it.
#
# The chart can be used to identify not only where bottlenecks exist, but what’s likely causing them, and which ones will provide the most speedup if optimized.
#
# ## Requirements for a Roofline Model on a GPU
#
# In order to generate a roofline analysis report ,application must be at least partially running on a GPU, Gen9 or Gen11 integrated graphics and the Offload must be implemented with OpenMP, SYCL, DPC++, or OpenCL and a recent version of Intel® Advisor
#
# Generating a Roofline Model on GPU generates a multi-level roofline where a single loop generates several dots and each dot can be compared to its own memory (GTI/L3/DRAM/SLM)
#
# ## Gen9 Memory Hierarchy
#
# 
# ## Roofline Analysis Report
# Let's run a roofline report -- this is another <b>live</b> report that is interactive.
# [Intel Advisor Roofline report](assets/roofline.html)
import os
os.system('/bin/echo $(whoami) is running DPCPP_Essentials Module5 -- Roofline_Analysis - 2 of 2 roofline.html')
from IPython.display import IFrame
IFrame(src='assets/roofline.html', width=1024, height=769)
# # Finding Effective Optimization Strategies
# Here are the GPU Roofline Performance Insights, it highlights poor performing loops and shows performance ‘headroom’ for each loop which can be improved and which are worth improving. The report shows likely causes of bottlenecks where it can be Memory bound vs. compute bound. It also suggests next optimization steps
#
#
# <img src="assets/r1.png">
#
#
# ### Running the Survey
# The Survey is usually the first analysis you want to run with Intel® Advisor. The survey is mainly used to time your application as well as the different loops and functions.
# ### Running the trip count
# The second step is to run the trip count analysis. This step uses instrumentation to count how many iterations you are running in each loops. Adding the option -flop will also provide the precise number of operations executed in each of your code sections.
# ## Advisor Command-Line for generating "roofline" on the CLI
# * Clone official GitHubb samples repository
# git clone https://github.com/oneapi-src/oneAPI-samples.git
#
# * Go into Project directory to the matrix multiply advisor sample
#
# ``cd oneAPI-samples/Tools/Advisor/matrix_multiply_advisor/``
#
# * Build the application and generate the matrix multiplication binary
#
# ``cmake .``
# ``make``
#
# * To run the GPU Roofline analysis in the Intel® Advisor CLI:
# Run the Survey analysis with the --enable-gpu-profiling option
# ``advixe-cl –collect=survey --enable-gpu-profiling --project-dir=./adv -- ./matrix.dpcpp``
#
# * Run the Trip Counts and FLOP analysis with --enable-gpu-profiling option:
#
# ``advixe-cl -–collect=tripcounts --stacks --flop --enable-gpu-profiling --project-dir=./adv -- ./matrix.dpcpp``
#
# *Generate a GPU Roofline report:
# ``advixe-cl --report=roofline --gpu --project-dir=./adv``
#
# * Open the generated roofline.html in a web browser to visualize GPU performance.
# +
# %%writefile advisor_roofline.sh
# #!/bin/bash
advixe-cl –collect=survey --enable-gpu-profiling --project-dir=./adv -- ./matrix.dpcpp
advixe-cl -–collect=tripcounts --stacks --flop --enable-gpu-profiling --project-dir=./adv -- ./matrix.dpcpp
advixe-cl --report=roofline --gpu --project-dir=./adv
# -
# ## Using Roofline Analysis on Intel GPU
# You can see how close you are to the system maximums. The roofline indicates maximum room for improvement
#
# <img src="assets/r2.png">
#
# ## Showing Dots for all Memory Sub-systems
#
# 
# ## Add Labels
#
# 
# ## Clean the View
#
# 
# ## Show the Guidance
#
# 
# ## Summary
#
# * We ran a roofline report.
# * Explored the features of the roofline report and learned how to interpret the report.
# * Examined the information to determine where speedup opportunites exist.
# <html><body><span style="color:Red"><h1>Reset Notebook</h1></span></body></html>
#
# ##### Should you be experiencing any issues with your notebook or just want to start fresh run the below cell.
#
#
# + jupyter={"source_hidden": true}
from IPython.display import display, Markdown, clear_output
import ipywidgets as widgets
button = widgets.Button(
description='Reset Notebook',
disabled=False,
button_style='', # 'success', 'info', 'warning', 'danger' or ''
tooltip='This will update this notebook, overwriting any changes.',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
out = widgets.Output()
def on_button_clicked(_):
# "linking function with output"
with out:
# what happens when we press the button
clear_output()
# !rsync -a --size-only /data/oneapi_workshop/oneAPI_Essentials/05_Intel_Advisor/ ~/oneAPI_Essentials/05_Intel_Advisor
print('Notebook reset -- now click reload on browser.')
# linking button and function together using a button's method
button.on_click(on_button_clicked)
# displaying button and its output together
widgets.VBox([button,out])
| DirectProgramming/DPC++/Jupyter/oneapi-essentials-training/05_Intel_Advisor/roofline_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] Collapsed="false"
# # Queries 15may20
# + [markdown] Collapsed="false"
# ## TrumpTweets
# + Collapsed="false"
word_count = pd.Series(' '.join(df.content).split()).value_counts()
word_count.shape
# + Collapsed="false"
# When doing shape of this, i just have a series.
# I thought it would produce a 2 columns dataframe (words, freq)
# + Collapsed="false"
# + Collapsed="false"
def_col_name = ['word', 'freq']
word_count.columns = def_col_name
word_count
# + Collapsed="false"
# word_counts doe not have the attributed column names
# + Collapsed="false"
# + [markdown] Collapsed="false"
# ## Exercise 17
# + Collapsed="false"
from sklearn import preprocessing
label_encoder = preprocessing.LabelEncoder()
df1['Quarter']= label_encoder.fit_transform(df1['Quarter'])
df1['Quarter'].unique()
# + Collapsed="false"
I have those warnings - how would you fix the formula? I tried, but did not work
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead
See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy
# + Collapsed="false"
# + Collapsed="false"
# + Collapsed="false"
| Exercises - Qasim/Queries/Queries 15may20.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import pandas as pd
import time
from pandarallel import pandarallel
import numpy as np
# ⚠️ **WARNING** ⚠️
#
# On Windows, because of the multiprocessing system (spawn), the function you send to pandarallel must be **self contained**, and should not depend on external resources.
#
# Example:
#
# ❌ **Forbidden:**
#
# ```Python
# import math
#
# def func(x):
# # Here, `math` is defined outside `func`. `func` is not self contained.
# return math.sin(x.a**2) + math.sin(x.b**2)
# ```
#
# ✅ **Valid:**
#
# ```Python
# def func(x):
# # Here, `math` is defined inside `func`. `func` is self contained.
# import math
# return math.sin(x.a**2) + math.sin(x.b**2)
# ```
# # Initialize pandarallel
pandarallel.initialize()
# # DataFrame.apply
df_size = int(5e6)
df = pd.DataFrame(dict(a=np.random.randint(1, 8, df_size),
b=np.random.rand(df_size)))
def func(x):
import math
return math.sin(x.a**2) + math.sin(x.b**2)
# %%time
res = df.apply(func, axis=1)
# %%time
res_parallel = df.parallel_apply(func, axis=1)
res.equals(res_parallel)
# # DataFrame.applymap
df_size = int(1e7)
df = pd.DataFrame(dict(a=np.random.randint(1, 8, df_size),
b=np.random.rand(df_size)))
def func(x):
import math
return math.sin(x**2) - math.cos(x**2)
# %%time
res = df.applymap(func)
# %%time
res_parallel = df.parallel_applymap(func)
res.equals(res_parallel)
# # DataFrame.groupby.apply
df_size = int(3e7)
df = pd.DataFrame(dict(a=np.random.randint(1, 1000, df_size),
b=np.random.rand(df_size)))
def func(df):
import math
dum = 0
for item in df.b:
dum += math.log10(math.sqrt(math.exp(item**2)))
return dum / len(df.b)
# %%time
res = df.groupby("a").apply(func)
# %%time
res_parallel = df.groupby("a").parallel_apply(func)
res.equals(res_parallel)
# # DataFrame.groupby.rolling.apply
df_size = int(1e6)
df = pd.DataFrame(dict(a=np.random.randint(1, 300, df_size),
b=np.random.rand(df_size)))
def func(x):
return x.iloc[0] + x.iloc[1] ** 2 + x.iloc[2] ** 3 + x.iloc[3] ** 4
# %%time
res = df.groupby('a').b.rolling(4).apply(func, raw=False)
# %%time
res_parallel = df.groupby('a').b.rolling(4).parallel_apply(func, raw=False)
res.equals(res_parallel)
# # DataFrame.groupby.expanding.apply
df_size = int(1e6)
df = pd.DataFrame(dict(a=np.random.randint(1, 300, df_size),
b=np.random.rand(df_size)))
def func(x):
return x.iloc[0] + x.iloc[1] ** 2 + x.iloc[2] ** 3 + x.iloc[3] ** 4
# %%time
res = df.groupby('a').b.expanding(4).apply(func, raw=False)
# %%time
res_parallel = df.groupby('a').b.expanding(4).parallel_apply(func, raw=False)
res.equals(res_parallel)
# # Series.map
df_size = int(5e7)
df = pd.DataFrame(dict(a=np.random.rand(df_size) + 1))
def func(x):
import math
return math.log10(math.sqrt(math.exp(x**2)))
# %%time
res = df.a.map(func)
# %%time
res_parallel = df.a.parallel_map(func)
res.equals(res_parallel)
# # Series.apply
df_size = int(3.5e7)
df = pd.DataFrame(dict(a=np.random.rand(df_size) + 1))
def func(x, power, bias=0):
import math
return math.log10(math.sqrt(math.exp(x**power))) + bias
# %%time
res = df.a.apply(func, args=(2,), bias=3)
# %%time
res_parallel = df.a.parallel_apply(func, args=(2,), bias=3)
res.equals(res_parallel)
# # Series.rolling.apply
df_size = int(1e6)
df = pd.DataFrame(dict(a=np.random.randint(1, 8, df_size),
b=list(range(df_size))))
def func(x):
return x.iloc[0] + x.iloc[1] ** 2 + x.iloc[2] ** 3 + x.iloc[3] ** 4
# %%time
res = df.b.rolling(4).apply(func, raw=False)
# %%time
res_parallel = df.b.rolling(4).parallel_apply(func, raw=False)
res.equals(res_parallel)
| docs/examples_windows.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yashvinj/MegamanCausalSceneGeneration/blob/main/Causal_Scene_Generation_updated.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="3OkTyTAox8_F" outputId="6de7c0e1-1021-472b-cd40-82437e0fa878"
# !pip3 install pyro-ppl
from google.colab import drive
drive.mount('/content/drive')
# + id="2QJSjiGwyZIa"
from graphviz import Digraph
import pyro
import torch
pyro.set_rng_seed(101)
import pyro.distributions as dist
from pyro.infer import Importance, EmpiricalMarginal
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
from googlesearch import search
from ipywidgets import interactive, Layout
import ipywidgets as widgets
from IPython.display import display
from PIL import Image, ImageDraw, ImageFont
import os
#os.pathsep + r'your_graphviz_bin_direction'
# + colab={"base_uri": "https://localhost:8080/"} id="LesSuSz8KpeB" outputId="87680abf-8b31-4dbe-c4aa-f2cc84242be8"
# !pip install Pillow
# + [markdown] id="3D_ZC2FmyJ0j"
# Here are the attributes of each class:
# Megaman: attack, taunt, stationary, block, dead
# Shademan: attack, taunt, stationary, block, dead
# Background: background
# Interaction: interaction
# + colab={"base_uri": "https://localhost:8080/", "height": 367} id="lyF8V9XeyNQa" outputId="7b4700f3-6fb1-46df-dbce-1a86da6bde34"
dag = Digraph(comment='DAG')
dag.node('M','Megaman')
dag.node('S','Shademan')
dag.node('I','Interaction')
dag.node('E','Environment')
dag.node('R','Scene')
dag.edges(['MI', 'SI', 'MS','IR', 'ER'])
dag
# + id="eMiKiG_R6MDq"
alias = {'position1':['left','right'],
'position2':['left','right'],
'action1':['gyrobeam', 'teleport', 'gearingup', 'fallen','jump', 'burned','won'],
'action2':['lavaattack', 'scare', 'intimidate', 'block', 'recoil','dead'],
'distance':['small','large'],
'size':['tiny','magnificent'],
'background':['highway','city', 'whitespace'],
'interaction':['fights','stationary']
}
prob = {'position1':torch.tensor([0.5,0.5]),
'position2':torch.tensor([0.5,0.5]),
'action1':torch.tensor([0.4, 0.1, 0.2, 0.1, 0.1, 0.05,0.05]),
#'action2':torch.tensor([0.3, 0.15, 0.15, 0.1, 0.25,0.05]),
'distance':torch.tensor([0.5,0.5]),
'size':torch.tensor([0.5,0.5]),
'background':torch.tensor([0.45,0.45,0.1]),
'action2':torch.tensor([[0.15,0.15,0.15,0.25,0.15,0.05], #a1=gyrobeam, [a2=lavaattack. a2=scare. a2=intimidate. a2=block. a2=recoil. a2=dead]
[0.65,0,0.2,0,0.1,0.05], #a1=teleport, [a2=lavaattack. a2=scare. a2=intimidate. a2=block. a2=recoil. a2=dead]
[0.55,0.2,0.2,0,0,0.05], #a1=gearingup, [a2=lavaattack. a2=scare. a2=intimidate. a2=block. a2=recoil. a2=dead]
[0.8,0.2,0,0,0,0], #a1=fallen, [a2=lavaattack. a2=scare. a2=intimidate. a2=block. a2=recoil. a2=dead]
[0.6,0.1,0,0.1,0.1,0], #a1=jump, [a2=lavaattack. a2=scare. a2=intimidate. a2=block. a2=recoil. a2=dead]
[0.85,0.1,0,0,0,0.05], #a1=burned, [a2=lavaattack. a2=scare. a2=intimidate. a2=block. a2=recoil. a2=dead]
[0,0,0,0,0,1]]), #a1=won, [a2=lavaattack. a2=scare. a2=intimidate. a2=block. a2=recoil. a2=dead]
'interaction':torch.tensor([[[[0.03,0.97],[0.3,0.7]], #highway: e1=left, [e2=left. e2=right]
[[0.2,0.8],[0.03,0.97]]], #highway:e1=right, [e2=left. e2=right]
[[[0.96,0.04],[0.98,0.02]], #city: e1=left, [e2=left. e2=right]
[[0.85,0.25],[0.9,0.1]]], #city: e1=right, [e2=left. e2=right]
[[[0.9,0.1],[0.95,0.05]], #whitespace: e1=left, [e2=left. e2=right]
[[0.85,0.25],[0.8,0.2]]]]) #whitespace: e1=right, [e2=left. e2=right]
}
causal_var = ['position1', 'position2', 'background', 'interaction', 'action1', 'action2']
other_var = ['distance','size', 'action1', 'action2']
infer_var = ['position1', 'position2', 'background', 'interaction', 'action1', 'action2']
# + id="ivKS2Nn_6S_v"
#define classes
class Background(object):
def __init__(self):
self.background = pyro.sample('background', dist.Categorical(prob['background']))
class Interaction(object):
def __init__(self, entity1, entity2, background):
self.interaction = pyro.sample('interaction', dist.Categorical(prob['interaction'][background.background][entity1.position1][entity2.position2]))
class Megaman(object):
def __init__(self):
self.position1 = pyro.sample("position1", dist.Categorical(probs=prob['position1']))
self.action1 = pyro.sample("action1", dist.Categorical(probs=prob['action1']))
class Shademan(object):
def __init__(self,entity1):
self.position2 = pyro.sample('position2', dist.Categorical(prob['position2']))
#self.action2 = pyro.sample("action2", dist.Categorical(probs=prob['action2']))
self.action2 = pyro.sample('action2', dist.Categorical(prob['action2'][entity1.action1]))
self.distance = pyro.sample('distance', dist.Categorical(prob['distance']))
self.size = pyro.sample('size', dist.Categorical(prob['size']))
# + id="QYiBoidSzC78"
#condition and intervention models
def model():
'''
This is the basic overall model, all distributions are pre-defined at the beginning,
and relationships are in the definition of each class.
'''
megaman = Megaman()
shademan = Shademan(megaman)
background = Background()
interaction = Interaction(megaman, shademan, background)
def condition(model, evidence, num_samples = 1000):
'''
causal condition model, return posterior
'''
condition_model = pyro.condition(model, data=evidence)
posterior = pyro.infer.Importance(condition_model, num_samples=num_samples).run()
return posterior
def intervention(model, evidence, num_samples = 1000):
'''
causal intervention model, return posterior
'''
do_model = pyro.do(model, data=evidence)
posterior = pyro.infer.Importance(do_model, num_samples=num_samples).run()
return posterior
def pltDistribution(posterior, infer, num_samples = 1000):
'''
This function uses posterior from model to generated samples and plot the distribution of inference variables.
'''
sample = []
for i in range(num_samples):
trace = posterior()
value = []
for i in range(len(infer)):
value.append(int(trace.nodes[infer[i]]['value']))
sample.append(tuple(value))
data = Counter(sample).most_common()
unique = list(map(lambda x: x[0], data))
counts = list(map(lambda x: x[1], data))
x_label = []
for i in range(len(unique)):
label = []
for j in range(len(infer)):
label.append(alias[infer[j]][list(unique[i])[j]])
x_label.append(label)
plt.bar(range(len(data)), counts)
plt.xticks(range(len(data)), x_label)
plt.xlabel(infer)
def mostOccurance(posterior, infer, num_samples = 1000):
'''
This function takes the posterior from model and generated samples, then return the sample with most occurance.
'''
sample = []
for i in range(num_samples):
trace = posterior()
value = []
for i in range(len(infer)):
value.append(int(trace.nodes[infer[i]]['value']))
sample.append(tuple(value))
most_common, num_most_common = Counter(sample).most_common(1)[0]
infer_dict = {}
for i in range(len(infer)):
infer_dict[infer[i]] = most_common[i]
return infer_dict
# + id="xfBfkrwY0VV4"
#assistant functions
def getVar(w):
'''
This function takes all variables which have pre-defined values,
distinguishes them into causal model related variables and other variables,
and converts label strings to torch.tensor().
'''
do_var = w.kwargs
do_causal_var = {}
do_other_var = {}
for var in causal_var:
if do_var[var] == '-':
continue
else:
do_causal_var[var] = torch.tensor(alias[var].index(do_var[var]))
for var in other_var:
if do_var[var] == '-':
continue
else:
do_other_var[var] = torch.tensor(alias[var].index(do_var[var]))
return do_causal_var, do_other_var
def getInfer(do_causal_var):
'''
This function removes variables that has pre-defined values from all potential inference
variables. It aims to only take variables which do not have pre-defined values as inferences
in our model.
'''
infer_all = infer_var.copy()
do_causal_var_key = list(do_causal_var.keys())
for var in do_causal_var_key:
if var in infer_all:
infer_all.remove(var)
return infer_all
def getLabel(infer_res):
'''
This function convert torch.tensor() to label string for the dictionary with all variables.
'''
for key in infer_res:
infer_res[key] = alias[key][infer_res[key]]
return infer_res
def getEntity(input_dic):
'''
This function generates samples with pre-defined class and functions,
and replace variables which have pre-defined values or infered from
our causal model.
Return a dictionary with all variables with a label.
'''
dic = {}
megaman = Megaman()
shademan = Shademan(megaman)
background = Background()
interaction = Interaction(megaman,shademan,background)
entity_list = [megaman,shademan,background,interaction]
for k in entity_list:
entity_dic = k.__dict__
dic.update(entity_dic)
for k in dic:
dic[k] = alias[k][dic[k]]
dic.update(input_dic)
return dic
def getScene(input_dict):
'''
This function takes the output dictionary with all variable,
and converts it into a sentence and print it out.
'''
scene = str("Megaman -> %s and shademan -> %s Action-> %s distance -> %s size -> %s location -> %s." %
(input_dict['action1'],input_dict['action2'],
input_dict['interaction'],input_dict['distance'],
input_dict['size'],input_dict['background']))
print(scene)
return scene
def userInterface():
'''
This function allows user to do condition and intervention with a user interface.
display(w) shows the interface.
'''
# Part 1: multiple choices of label values
def f_interactive(position1,position2,action1,action2,distance,background,interaction,size):
pass
w = interactive(f_interactive,
position1=sum([["-"],alias['position1']], []),
position2=sum([["-"],alias['position2']], []),
action1=sum([["-"],alias['action1']], []),
action2=sum([["-"],alias['action2']], []),
distance=sum([["-"],alias['distance']], []),
size=sum([["-"],alias['size']], []),
background=sum([["-"],alias['background']], []),
interaction=sum([["-"],alias['interaction']], []))
# Part 2: button to show pictures
btn_con = widgets.Button(description = "Show conditional picture", tooltip = 'condition button', layout=Layout(width='25%', height='40px'))
btn_do = widgets.Button(description = "Show interventional picture", tooltip = 'intervention button', layout=Layout(width='25%', height='40px'))
def btn_con_click(sender):
final_dict, scene = model_condition(w, False)
pic = generatePic(final_dict, scene)
display(pic.finalImage)
pic.save()
def btn_do_click(sender):
final_dict, scene = model_intervention(w, False)
pic = generatePic(final_dict, scene)
display(pic.finalImage)
pic.save()
btn_con.on_click(btn_con_click)
btn_do.on_click(btn_do_click)
return w, btn_con, btn_do
# + id="3bTLNQOW8hx8"
#main functions
def model_condition(w, plot = False):
do_causal_var, do_other_var = getVar(w)
infer = getInfer(do_causal_var)
infer_model = condition(model, evidence = do_causal_var)
do_causal_var.update(do_other_var)
infer_res = {}
if infer:
if plot:
pltDistribution(infer_model, infer)
infer_res = mostOccurance(infer_model, infer)
infer_res.update(do_causal_var)
infer_res = getLabel(infer_res)
final_dict = getEntity(infer_res)
scene = getScene(final_dict)
return final_dict, scene
def model_intervention(w, plot = False):
do_causal_var, do_other_var = getVar(w)
infer = getInfer(do_causal_var)
infer_model = intervention(model, evidence = do_causal_var)
do_causal_var.update(do_other_var)
infer_res = {}
if infer:
if plot:
pltDistribution(infer_model, infer)
infer_res = mostOccurance(infer_model, infer)
infer_res.update(do_causal_var)
infer_res = getLabel(infer_res)
final_dict = getEntity(infer_res)
scene = getScene(final_dict)
return final_dict, scene
# + id="pXi-OKsy9NdX"
#image generator
class generatePic(object):
'''
This class takes generated features from causal OOP model and implements it into a picture.
'''
def __init__(self, attr_dict, scene):
self.position1 = attr_dict['position1']
self.position2 = attr_dict['position2']
self.action1 = attr_dict['action1']
self.action2 = attr_dict['action2']
self.distance = attr_dict['distance']
self.size = attr_dict['size']
self.background = attr_dict['background']
self.scene = scene
self.MegamanImage, self.ShademanImage, self.backgroundImage = self.getImage()
self.finalImage = self.getFinalImage()
def getImage(self):
'''
load raw pictures as elements from local directory
'''
try:
im_g = Image.open("/content/drive/My Drive/Causal modelling/Causal scene generation/"+self.action1+".png")
except:
im_g = Image.open("/content/drive/My Drive/Causal modelling/Causal scene generation/"+self.action1+".jpg")
try:
im_t = Image.open("/content/drive/My Drive/Causal modelling/Causal scene generation/"+self.action2+".png")
except:
im_t = Image.open("/content/drive/My Drive/Causal modelling/Causal scene generation/"+self.action2+".jpg")
try:
im_b = Image.open("/content/drive/My Drive/Causal modelling/Causal scene generation/"+self.background+".png")
except:
im_b = Image.open("/content/drive/My Drive/Causal modelling/Causal scene generation/"+self.background+".jpg")
return im_g, im_t, im_b
def backgroundcolor(self):
color = self.color
rgb_dict = {'black': (0, 0, 0),
'grey': (165, 169, 176),
'white': (179, 120, 61)}
self.backgroundImage = self.backgroundImage.convert('RGBA')
self.backgroundImage = self.backgroundImage.resize(400,400)
data = np.array(self.backgroundImage)
red, green, blue, alpha = data.T
white_areas = (red == 255) & (blue == 255) & (green == 255)
data[..., :-1][~white_areas.T] = rgb_dict[color]
self.backgroundImage = Image.fromarray(data)
def getmmsize(self):
if self.size == 'tiny':
rt = [0.4, 0.4]
else:
rt = [0.6, 0.6]
return rt
def resize_pic(self, entityImage, rt = [0.3,0.5]):
'''
resize entities pictures to adjust the background
'''
maxwidth, maxheight = self.backgroundImage.width*rt[0], self.backgroundImage.height*rt[1]
#maxwidth, maxheight = 400,400
new_w = entityImage.width
new_h = entityImage.height
if entityImage.width>=maxwidth:
new_w = int(maxwidth)
ratio = entityImage.height/entityImage.width
new_h = int(new_w*ratio)
if entityImage.height>=maxheight:
new_h = int(maxheight)
ratio = entityImage.height/entityImage.width
new_w = int(new_h/ratio)
entityImage_new = entityImage.resize((new_w,new_h))
return entityImage_new
def getdistance(self):
if self.distance == 'small':
dist = 20
else:
dist = 80
return dist
def get_concat(self, color=(255,255,255)):
'''
combine megaman, shademan and background into one picture
'''
if self.position1 == self.position2:
dis = 0
else:
dis = self.getdistance()
image = Image.new('RGBA', self.backgroundImage.size ,color)
image.paste(self.backgroundImage,(0,0))
if self.position1 == 'right' and self.position2 == 'left':
leftImage = self.ShademanImage
leftImage = leftImage.transpose(Image.FLIP_LEFT_RIGHT)
rightImage = self.MegamanImage
rightImage = rightImage.transpose(Image.FLIP_LEFT_RIGHT)
else:
leftImage = self.MegamanImage
rightImage = self.ShademanImage
image.paste(leftImage, (int((self.backgroundImage.width-dis)/2-leftImage.width), self.backgroundImage.height-leftImage.height-10), leftImage)
image.paste(rightImage, (int((self.backgroundImage.width+dis)/2), self.backgroundImage.height-rightImage.height-10), rightImage)
return image
def addDescription(self,image):
l = ImageDraw.Draw(image)
#font = ImageFont.truetype('arial.ttf', size=60);
#l.text((50, 50), self.scene, font = font, align ="center", fill = 'black')
def getFinalImage(self):
self.MegamanImage = self.resize_pic(self.MegamanImage, rt = self.getmmsize())
self.ShademanImage = self.resize_pic(self.ShademanImage, rt = self.getmmsize())
image = self.get_concat()
self.addDescription(image)
return image
def draw(self):
self.finalImage.show()
def save(self):
self.finalImage.save("/content/drive/My Drive/Causal modelling/Causal scene generation/generatedPic/" + self.scene + ".png")
def getmmsize(self):
if self.size == 'tiny':
rt = [0.3, 0.3]
else:
rt = [0.5, 0.5]
return rt
# + colab={"base_uri": "https://localhost:8080/", "height": 361, "referenced_widgets": ["fa89fb29874a482084ca326111304ab1", "2365a8b6401642d3aa502ba55951f855", "acbf79e31fec4644b2eae191a8a0de8c", "8463ff3df8fb4b88ad2dcfcab0973c7b", "2ce5347defc84f7d8e0aee02f9a443a4", "7e0d253d2a5e45039e4f59b789b9aaec", "ddcf450b776546a3864995d9144c2294", "decc758e186c4713ae94d2ef92938633", "11430abce3cd4675a1be240d15bfce24", "c2e94fb15fca413d962e698ab1281f08", "ec084419e57240e8aa5a001073513466", "<KEY>", "692ced86bb6d40f2b41b394e86a481cf", "<KEY>", "2ff496ad483f44998aadf3525c4a9618", "ab933691e7c44392be4d89dae49a2e16", "c00952afdea644738a627e751a58981a", "4f3fac13e5164bfb8da70704d3c25733", "<KEY>", "4319d8f4f95e4255af98bda68d9428f1", "8ee98f69486a4b6f82ae9d01fbe1df91", "<KEY>", "0b2ff02f2a8f4fc2b57e7c74d2dca66c", "a1f05301a6c342d781f6d449de725e33", "<KEY>", "<KEY>", "<KEY>", "f1334d2d6668450084aaf6c0ceb90221", "<KEY>", "<KEY>", "f944545ef0264a4fa10d934dd1fc609a", "8f43333c047e46d1a80d86bbb1a4a8c6", "<KEY>", "<KEY>"]} id="4nVcCEx3FQKG" outputId="163d51f6-64f0-4f8a-fe87-c04698409d49"
variable_choices, btn_condition, btn_intervention = userInterface()
display(variable_choices, btn_condition, btn_intervention)
| projects/Megaman Causal Scene Generation/Causal_Scene_Generation_updated.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Step 9: Publish WaMDaM Sqlite files into HydroShare
#
# ### By <NAME>, Feb 2022
# ## Prerequists
# * You should have an account with HydroShare
# * You should still be connected to the provided SQLite database you worked with above
# * First connect to the SQLite files here
# https://github.com/WamdamProject/WaMDaM_JupyterNotebooks/tree/master/2_ServeToModels/Files
#
# * Connect to the Bear_River_WEAP_WASH.sqlite and follow the instructions below. Then disconnect it or close the Wizard.
#
# * Connect to the Bear_Weber.sqlite.sqlite and follow the insructions below
#
# * In the WaMDaM Wizard main window, click at **Visualize and Publish** tab, then click at the Hydrohare button.
# * provide your user name and password to HydroShare then click login.
# * Provide a tite, abstract, and your name as an author to a resource you will create in HydroShare for the WEAP and WASH model SQLite file. For the sake of demonestration, adding a few words should work. Being descritive is always a good practice.
# * Click publish.
# * Go to your borwser and hard refresh HydroShare page. Go to **My Resources** tab in HydroShare. You will see the newly uploaded resource.
# * Click at the newly added resouce and make it public. Feel free to make edits you see fit
#
# <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/images/HydroShare.PNG?raw=true" style="float:center;width:600px;padding:20px">
# <h3><center>**Figure 1:** WaMDaM Wizard Window to Uplaod to HydroShare</center></h3>
#
#
# <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/images/HydroShare_upload.PNG?raw=true" style="float:center;width:600px;padding:20px">
# <h3><center>**Figure 2:** HydroShare "My Resources" page</center></h3>
#
#
# <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/images/HydroShareResources.PNG?raw=true" style="float:center;width:600px;padding:20px">
# <h3><center>**Figure 3:** HydroShare metadata </center></h3>
#
#
# <img src="https://github.com/WamdamProject/WaMDaM-software-ecosystem/blob/master/mkdocs/Edit_MD_Files/images/HydroShare_metadata.PNG?raw=true" style="float:center;width:600px;padding:20px">
# <h3><center>**Figure 4:** HydroShare metadata 2</center></h3>
#
# # Congratualtions!
| 2_ServeToModels/09_Step9_Publish_HydroShare.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Socioeconomic data and TOC entitlements
# * Entitlements assigned to census tracts
# * Which census tracts (what income levels or median household income) have seen TOC entitlements?
# * See if tract is composed of mostly TOC-eligible parcels
# * Then look at Census characteristics of mostly TOC-eligible tracts vs not
# +
import boto3
import geopandas as gpd
import intake
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
import os
import pandas as pd
import laplan
# +
catalog = intake.open_catalog("../catalogs/*.yml")
s3 = boto3.client('s3')
bucket_name = 'city-planning-entitlements'
# -
# ## A. Merge in number of TOC entitlements that tract had with census stats
# Download parcels with TOC entitlement info, and only keep parcels with TOC ent
def tracts_with_TOC_ent():
parcels = catalog.toc_parcels_with_entitlements.read().to_crs('EPSG:4326')
toc_parcels = parcels[parcels.num_TOC > 0][['AIN', 'num_TOC', 'num_nonTOC']]
crosswalk_parcels_tracts = catalog.crosswalk_parcels_tracts.read()
df = pd.merge(crosswalk_parcels_tracts[["AIN", "GEOID", "toc_AIN"]],
toc_parcels, on = "AIN", how = "left", validate = "1:1")
# Fill in NaNs with zeroes
df = df.assign(
num_TOC = df.num_TOC.fillna(0).astype(int),
num_nonTOC = df.num_nonTOC.fillna(0).astype(int),
)
# Aggregate to tract-level
df = (df.groupby(["GEOID", "toc_AIN"])
.agg({"num_TOC": "sum",
"num_nonTOC": "sum"})
.reset_index()
)
# Merge in census tract geometry and other census characteristics
tracts = catalog.census_tracts.read().to_crs("EPSG:4326")
census_stats = catalog.census_analysis_table.read()
# Merge in census tract geometry with census stats
census = pd.merge(tracts[["GEOID", "geometry"]],
census_stats,
on = "GEOID", how = "left", validate = "1:1")
# Merge in census tract geometry with TOC entitlements
final = pd.merge(census, df, on = "GEOID", how = "left", validate = "1:1")
return final
final = tracts_with_TOC_ent()
# ## B. Summary stats
# Instead of unweighted averages, we should definitely weight by population.
# Aggregate counts for # non car, # zero veh workers, etc into the group first.
# Then calculate % non car, % zero veh workers, medincome etc.
# Already have info whether tract is 50% or more (by area or # AIN) TOC-eligible
# Now add info about how many actual TOC entitlements occurred
def set_groups(df):
def set_cutoffs(row):
toc_ENT = 0
toc_ENT_group = 0
if row.num_TOC > 0:
toc_ENT = 1
if row.num_TOC <= 5:
toc_ENT_group = 1
if (row.num_TOC > 5) and (row.num_TOC <= 10):
toc_ENT_group = 2
if row.num_TOC > 10:
toc_ENT_group = 3
return pd.Series([toc_ENT, toc_ENT_group],
index=['toc_ENT', 'toc_ENT_group'])
with_cutoffs = df.apply(set_cutoffs, axis=1)
df = pd.concat([df, with_cutoffs], axis=1)
df = df.assign(
num_TOC = df.num_TOC.fillna(0).astype(int),
num_nonTOC = df.num_nonTOC.fillna(0).astype(int),
)
return df
final = set_groups(final)
final.head(2)
# +
# Calculate IQR for income
def aggregate_by_toc(df, category_col, income_df):
df = df[["GEOID", category_col]]
df2 = pd.merge(df, income_df, on = "GEOID", how = "left", validate = "1:1")
# Aggregate by toc_area or toc_AIN
df2 = df2.pivot_table(index = category_col, aggfunc = "sum").reset_index()
# Calculate IQR
iqr = (df2.apply(
lambda r: pd.Series(laplan.census.income_percentiles(r, [25,50,75]), dtype="float64"),
axis=1,
).rename(columns={0: "Q1", 1: "Q2", 2: "Q3"})
)
# Change unit income IQR from thousands of dollars to dollars
DOLLAR_UNIT = 1_000
iqr = (iqr.assign(
Q1 = iqr.Q1 * DOLLAR_UNIT,
Q2 = iqr.Q2 * DOLLAR_UNIT,
Q3 = iqr.Q3 * DOLLAR_UNIT,
).rename(columns = {"Q1": "income_Q1",
"Q2": "income_Q2",
"Q3": "income_Q3"})
)
# Merge IQR in
df3 = pd.merge(df2[[category_col]], iqr, left_index = True, right_index = True,
how = "left", validate = "1:1")
return df3
def summary_stats(df, category_col, income_df):
# Number of tracts by cut-offs
num_tracts = (df.groupby(category_col).agg({
"GEOID": "count"
}).reset_index()
.rename(columns = {"GEOID": "num_tracts"})
)
# Calculate totals
totals = df.groupby(category_col).agg({
"zero_veh_workers": "sum",
"non_car_workers": "sum",
"workers_total": "sum",
"pop_renter": "sum",
"pop_whitenonhisp": "sum",
"pop_total": "sum",
}).reset_index()
# Calculate percents
percents = totals.assign(
pct_zero_veh = totals.zero_veh_workers / totals.workers_total,
pct_non_car = totals.non_car_workers / totals.workers_total,
pct_renter = totals.pop_renter / totals.pop_total,
pct_white = totals.pop_whitenonhisp / totals.pop_total,
)
# Calculate income IQR
income_iqr = aggregate_by_toc(df, category_col, income_df)
# Create final table
summary = pd.merge(percents, num_tracts, on = category_col, validate = "1:1")
summary = pd.merge(summary, income_iqr, on = category_col, validate = "1:1")
return summary
# +
# Create a subset df that pulls out incomerange columns from census stats
income_ranges = laplan.census.CENSUS_INCOME_RANGES
# The new_var columns to keep all have prefix "total_".
# Can switch out if we're interested in other races' income ranges
keep = []
for x in income_ranges:
keep.append("total_" + x)
keep.append("GEOID")
census_stats = catalog.census_analysis_table.read()
income = census_stats[keep]
income.head(2)
# -
# TOC tracts: 50% of AIN in TOC Tier or not
by_AIN = summary_stats(final, "toc_AIN", income)
by_AIN
# TOC tracts: has TOC ENT or not
by_toc_ENT = summary_stats(final, "toc_ENT", income)
by_toc_ENT
# TOC tracts: by number of TOC ENT
by_num_TOC_ENT = summary_stats(final, "num_TOC", income)
by_num_TOC_ENT
# TOC tracts: by grouping the number of TOC ENT into 3 groups
# 0: num_TOC = 0
# 1: 1-5
# 2: 6-10
# 3: 11+
by_TOC_ENT_group = summary_stats(final, "toc_ENT_group", income)
by_TOC_ENT_group
# +
if not os.path.exists("../outputs"):
os.mkdir("../outputs")
writer = pd.ExcelWriter("../outputs/07-toc-census-stats.xlsx", engine="xlsxwriter")
by_AIN.to_excel(writer, sheet_name = "by_pct_AIN")
by_toc_ENT.to_excel(writer, sheet_name = "by_TOC_ENT")
by_num_TOC_ENT.to_excel(writer, sheet_name = "by_num_TOC_ENT")
by_TOC_ENT_group.to_excel(writer, sheet_name = "by_TOC_ENT_group")
writer.save()
# -
# ## C. Make map of tracts
# +
# By AIN
final = final.to_crs("EPSG:4326")
fig, ax = plt.subplots(figsize=(8,8))
ax.set_title("TOC Tracts by % AIN")
blue = "#3182BD"
gray = "#E1E1E1"
final.plot(column="toc_AIN", ax=ax,
cmap = matplotlib.colors.ListedColormap([gray, blue]),
legend=False)
# -
# By TOC ENT
fig, ax = plt.subplots(figsize=(8,8))
ax.set_title("TOC Tracts by having TOC Entitlements")
final.plot(column="toc_ENT", ax=ax, cmap = "tab20c", legend=False)
# +
# By TOC ENT group
colors = {0: '#3182bd', # blue
1: '#50b76f', # green
2: '#fdae6b', # orange
3: '#eded0f'} # yellow
fig, ax = plt.subplots(figsize=(8,8))
ax.set_title("TOC Tracts by TOC Entitlements Group")
for ctype, data in final.groupby('toc_ENT_group'):
# Define the color for each group using the dictionary
color = colors[ctype]
# Plot each group using the color defined above
data.plot(color=color,
ax=ax,
label=ctype,
legend=True)
# -
| notebooks/C6-toc-census-stats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import straph as sg
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (12,9)
# # Connected Components
# Connected components are among the most basic, useful and important concepts of
# graph theory. It is common usage to decompose a graph into its connected components.
# If a graph is not connected, it can be divided into distinct connected components.
# Many properties, which involve computation of paths or communities, can
# be computed independently on each connected component, thus enabling parallel
# execution of numerous methods.
#
# Connected components were recently generalized to [stream graphs](https://arxiv.org/abs/1710.04073). These generalized
# connected components have a crucial feature: like graph connected components
# and unlike other generalizations available in the literature, they partition the set of
# temporal nodes. This means that each node at each time instant is in one and only one
# connected component. This makes these generalized connected components particularly
# appealing to capture important features of the vast variety of objects modeled
# by stream graphs.
path_directory = "examples/"
S = sg.read_stream_graph(path_nodes=path_directory + "example_nodes.sg",
path_links=path_directory + "example_links.sg")
# ## Weakly Connected Components
# In a Stream Graph, $S = (T,V,W,E)$, weakly connected components represent elements of $W$
# connected together without any constraint on time.
#
# Intuitively, the weakly connected components correspond to the disconnected parts
# of a drawing of a stream graph.
wcc = S.weakly_connected_components()
_ = S.plot(clusters=wcc,title="Weakly Connected Components")
# These elements can be analysed separately to
# observe and compute some properties, allowing a parallel implementation of several methods.
# ## Strongly Connected Components
# Inside a strongly connected component all nodes are reachable from any other at any
# time instant.
#
# This definition is consistent with the one used in graph theory: for any time instant, if
# we take the induced Graph $G_t$ the SCC at $t$ corresponds to the connected components
# of $G_t$.
scc = S.strongly_connected_components(format = "cluster")
_ = S.plot(clusters=scc,title="Strongly Connected Components")
# ## Stable Connected Components
# A stable connected component is a cluster $C = (I,X)$, $I = [b,e]$, $X \in V$ where interactions between the nodes have begun before $b$ or at the same time and have ended after $e$ or at the same time.
#
# The decomposition into stable connected components is a
# finer grain decomposition of the stream graph than the one into strongly connected
# components.
#
# A stable connected component, $C = (I,X)$, can be reduced to a static graph $G_C = (X,E_C)$ spanning $I$.
stcc = S.stable_connected_components(format = "cluster")
_ = S.plot(clusters=stcc,title="Stable Connected Components")
# For more details we refer to the paper [Connected Components in Stream Graphs](https://arxiv.org/abs/2011.08054).
| docs/notebooks/Connected Components.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp stockfish
# -
# %load_ext autoreload
# %autoreload 2
# # cheviz stockfish
#
# This module integrates a chess engine, in this case the popular stockfish engine. The actual engine is provided by Python chess as [described here](https://python-chess.readthedocs.io/en/latest/engine.html). However, in our goal to build a complete ETL pipeline, we still need to get the engine running on our box. In a similar way as we got our data (PGN-encoded chess games), I'd like to use Jupyter Notebooks to perform the engine integration.
# # 1. Motivation
#
# For automated insights, we need a way to let the computer learn about our metrics. We need something to compare against our metrics, so I figured I start with stockfish, a very popular chess engine. Python chess also interfaces with stockfish so I won't need another dependency, either.
#
# Stockfish is a small download and with the binaries having few dependencies they should be ready to go right after extracting the archive – no extra installation required. The engine should then help us by generating our training data.
# # 2. Getting the engine
# !mkdir -p "tools"
# !ls
# Assuming this notebook runs on a Linux box, we'll get the Linux binary [from the official website](https://stockfishchess.org/download/). We're not building high-end desktop software here (does such thing even exist?) so hard-coding the download URL for just the Linux version is OK. It might feel wrong, but the best advice to counter your (very valid) intuition is that we have to focus on our goal: automated insights. That is, don't spend time over-engineering the basic, non-data-sciency stuff in your pipeline *unless* your pipeline is ready to run in production and earn money for you. There is a reason why we use Python for all of this, so quick'n'dirty – to a certain degree – is the way to go.
#
# We need to set a fake browser user-agent for our download request, otherwise we get 403'd. Normally that's a sign you're doing something wrong at extra costs to someone else (in this case ISP hosting fees for the stockfish communiy). At 1.7M of download size, which is probably smaller than a typical project's landing page these days, I don't feel guilty at all.
# +
#export
from pathlib import Path
import urllib.request
import shutil
def download():
src = 'https://stockfishchess.org/files/stockfish-11-linux.zip'
dst = Path().absolute() / 'tools' / 'stockfish.zip'
if not dst.is_file():
request = urllib.request.Request(src)
request.add_header('Referer', 'https://stockfishchess.org/download/')
request.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)')
with urllib.request.urlopen(request) as response, open(dst, 'wb') as dst_file:
shutil.copyfileobj(response, dst_file)
return dst
# -
# We still need to extract the downloaded Zip archive. Also, we need to make the stockfish binary executable. There are 3 binaries available in this version, we use what I guess is the default one. More quick'n'dirty hardcoding that I expect to break sooner than later.
# +
#export
from pathlib import Path
import zipfile
import os
import stat
def extract(dst:Path):
if not dst.is_file():
return
with zipfile.ZipFile(dst, 'r') as zip_file:
zip_file.extractall(Path().absolute() / 'tools')
dst_extracted = Path().absolute() / 'tools' / zip_file.namelist()[0]
# make binary executable
dst_binary = dst_extracted / 'Linux' / 'stockfish_20011801_x64'
st = os.stat(dst_binary)
os.chmod(dst_binary, st.st_mode | stat.S_IEXEC)
return dst_binary
# -
stockfish = extract(download())
# Check if we can run stockfish now, but let's not get stuck in stockfish's command prompt. So we send 'quit' to it immediately, which will still return the version of the binary and its authors.
# !{stockfish} quit
# The next step is to hook up the engine with Python chess, as described in their documentation: https://python-chess.readthedocs.io/en/latest/engine.html
#
# PGN can be described as a hierarchical document model due to its ability to store variations next to the mainline. Python chess uses a recursive nodes structure to model PGN; `GameNode::add_main_variation(move: chess.Move)->GameNode` processes a move, adds it as a child to the current node and returns the child. If we only process the mainline moves, the structure effectively represents a linked list. We could also just use a chess.Board instance to replay the engine results back to Python chess. Since our data module handles PGN however, I thought I go the extra step here. The way engine, game and board interact feels fragile to me but that is perhaps because board isn't just a plain chess position viewer. Instead, it has game/engine logic of its own.
# +
#export
from pathlib import Path
import chess
import chess.engine
import chess.pgn
import cheviz.data
import datetime
def makeEngine(engine_path:Path=None):
p = engine_path if not engine_path is None else extract(download())
def engine():
return chess.engine.SimpleEngine.popen_uci(p.as_posix())
return engine
def playGame(engine_path:Path, time_limit:float=0.1):
me = makeEngine(engine_path)
engine = me()
game = chess.pgn.Game()
game.headers['Event'] = 'cheviz'
game.headers['Date'] = datetime.date.today().strftime("%Y-%m-%d")
game.headers['White'] = engine_path.name
game.headers['Black'] = engine_path.name
while not game.board().is_game_over():
result = engine.play(game.board(), chess.engine.Limit(time=time_limit), info=chess.engine.Info.SCORE)
game = game.add_main_variation(result.move)
# result.info is sometimes empty, happens more reliably with very short time limits
game.comment = result.info['score'] if 'score' in result.info else None
engine.quit()
# set game back to root node
game = game.game()
game.headers['Result'] = game.board().result
return game
# -
# Computer chess is extremely technical and moves will pile up. Even with short time limits a chess engine vs chess engine game can take a couple seconds or even minutes. Too short of a time limit and computer chess turns into garbage. Notice that it's not the engine that's slow, it's how we interact with it and process results.
# %time game = playGame(stockfish)
display(game.mainline_moves())
# Let's look at the engine's game through our UI.
import cheviz.ui
games_list = [cheviz.data.fromGame(game)]
cheviz.ui.showGameUi(games_list)
# We use the same display wrap trick as in the `02_ui.ipynb` notebook.
# +
from IPython.display import display, HTML
CSS = """
.output {
flex-direction: row;
flex-wrap: wrap;
}
"""
HTML('<style>{}</style>'.format(CSS))
| 03_stockfish.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/chavamoon/MachineLearningExamples/blob/main/Python/SimpleLinealRegression.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="PM9Ip-53Za9B"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pylab
import scipy.stats as stats
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error, mean_squared_error
# + [markdown] id="6vkzJjKWcsRG"
# **Objective**:
#
# Using linear regression to predict the variable 'Glucose' from Indian women Glucose analysis sample dataset.
# + [markdown] id="4Ie6O_v9aoq4"
# **1. DATA LOAD**
# + id="E2Afr7FYaxPw"
# Random seed for making the experiment reproducible
np.random.seed(200728)
# + id="C_0lJvZtZfKD"
diabetes_dataset = pd.read_csv("diabetes.csv")
# columns in lowercase for an easier data manipulation
diabetes_dataset.rename(columns={column: column.lower() for column in diabetes_dataset.columns}, inplace=True )
# + colab={"base_uri": "https://localhost:8080/"} id="USmv5XzheHIv" outputId="5e5c6687-506f-4b41-efae-a05134aff215"
{column: column.lower() for column in diabetes_dataset.columns}
# + [markdown] id="nfRjX1b6cM9W"
# **2. DATA ANALYSIS**
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="3EPlvyy5cVtl" outputId="e30c24ad-882b-4796-be64-b6883f9a39c5"
diabetes_dataset.head()
# + colab={"base_uri": "https://localhost:8080/"} id="O9HVokB2cf5Q" outputId="41c82a80-5fbc-45a9-9092-52e31f696685"
diabetes_dataset.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="FmvzhUyPdNDZ" outputId="9f933b50-ae0a-4c5e-fbc3-ece597dd2a77"
diabetes_dataset.describe()
# + [markdown] id="e5KOl8Yef-xd"
# checking correlation between variables
#
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="-QJ1P-yKfGWO" outputId="d2912a34-ebbb-4d10-ad0d-60b6525d5cfe"
#checking correlation between variables
sns.pairplot(diabetes_dataset)
# + colab={"base_uri": "https://localhost:8080/", "height": 351} id="kxREIuWcfgto" outputId="3f3b02d4-29c2-40a2-d835-33f4cc34999c"
#Correlation matrix
diabetes_dataset.corr()
# + [markdown] id="Ymdpi8RvgIZl"
# Insuline and bmi are the best candidates for predicting glucose, in this example we will use bmi to predict glucose
# + id="RY1FlN4bggwe"
X = diabetes_dataset[["bmi"]]
y = diabetes_dataset[["glucose"]]
# + id="noaQyrzIiv6V"
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
# + colab={"base_uri": "https://localhost:8080/"} id="jW-Hfq6Tj2T1" outputId="33278359-5202-47bb-ef86-c3b0b4d4eb60"
print("Shape of X_train: " , X_train.shape, ". Shape of X_test" , X_test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="NLBwqPrLka5s" outputId="b87164bb-4029-43f8-a4be-dd146555c4ca"
print("Shape of y_train: " , y_train.shape, ". Shape of y_test" , y_test.shape)
# + [markdown] id="R_4M08PPk035"
# **3. TRAINING**
# + id="9n6d6PiHlAjW"
lr = LinearRegression()
# + id="lyTwvgUqlEf8"
# Send training values to LinearRegression
m_lr = lr.fit(X_train, y_train)
# + [markdown] id="4NXFvqghl5DG"
# Getting betas and intercept
# + colab={"base_uri": "https://localhost:8080/"} id="3BISuEyjl9m1" outputId="2760087d-4600-4478-cf65-2fa682b445ed"
# Betas
m_lr.coef_
# + colab={"base_uri": "https://localhost:8080/"} id="3BLUWww1mO79" outputId="ee62262f-3582-450c-8156-26eb90eadddc"
#Intercept
m_lr.intercept_
# + [markdown] id="Kb2tNkZCmliU"
# Predictions
# + id="3ZG9Ho00mnyV"
predictions = m_lr.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="0kW8_sALm4Wu" outputId="68a246ee-47ee-4586-db99-13b3437ef538"
#last_five predictions
predictions[:5]
# + [markdown] id="mBoXAtqunCK2"
# **4. PERFORMANCE METRICS**
# + colab={"base_uri": "https://localhost:8080/"} id="Zq7pc_cmnIpE" outputId="7af30616-a9fd-410e-f774-c52733972534"
#MAE
mean_absolute_error(y_test, predictions)
# + colab={"base_uri": "https://localhost:8080/"} id="xBZxNnJjntS_" outputId="21068d31-324d-4a1a-a49a-207f8c2be948"
#RMSE
mean_squared_error(y_test, predictions, squared=False)
# + [markdown] id="RJHI8IkToQEA"
# **5.RESIDUALS**
# + id="KX85_EcIoV0n"
residuals = y_test - predictions
# + id="vd91rc1suWxm"
#Converting predictions array from shape (231,1) to (231,)
predictions_array = predictions.reshape(predictions.shape[0],)
# + id="U3ycYy7XpZSl"
# predictions to 231 single array
df_residuals = pd.DataFrame({
'y_test':residuals['glucose'],
'predictions': predictions_array,
'residuals':residuals['glucose'] })
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="IGXT_B9Rq9B_" outputId="5476e629-b8bc-41db-fb86-a79ee7e4f8b3"
#Dots should be centered in zero and have constants variance (no pattern)
sns.scatterplot(x="predictions", y="residuals", data=df_residuals)
# + [markdown] id="U5GVS2s5vGkY"
# **5. QQPLOT**
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="zqGzQXJ0vokE" outputId="0248b2b3-cbd3-46f6-e1fd-08a434e1cf43"
# Must follow 45 degrees line
stats.probplot(residuals['glucose'], dist='norm', plot=pylab)
pylab.show()
# + id="8GZCBoOVvx39"
| Python/Regression/SimpleLinealRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import lxml.etree as ET
import requests
# read csv and remove not referenced places
df = pd.read_csv('data/thun_places_georeferenced_geobrowser.csv', encoding="utf-8")
cleaned_df = df.dropna(subset=['GettyID'])
# fetch listplace.xml and save it as lxml.etree element "tree"
url = 'http://localhost:8080/exist/rest/db/apps/thun/data/indices/listplace.xml'
response = requests.get(url)
tree = ET.fromstring(response.content)
for index, row in cleaned_df.iterrows():
key = (row.Address).lower()
searchstring = "//tei:place[@xml:id='{}']".format(key)
if len(tree.xpath(searchstring, namespaces={"tei": "http://www.tei-c.org/ns/1.0"})) > 0:
hit = tree.xpath(searchstring, namespaces={"tei": "http://www.tei-c.org/ns/1.0"})[0]
idno = ET.Element("idno", type="getty")
idno.text = str(row.GettyID)
location = ET.Element("location")
geo = ET.Element("geo", decls="#WGS")
coordinates = "{} {}".format(row.Longitude, row.Latitude)
geo.text = coordinates
location.append(geo)
hit.append(location)
hit.append(idno)
# write updated listplace.xml to file
with open('data/enriched.xml', 'wb') as f:
f.write(ET.tostring(tree, pretty_print=True))
| georeference/enrich_listplace.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %pylab inline
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import sys
from tqdm import tqdm
# -
cuda = torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")
save_path = 'cache/models'
# +
train_dataset = dsets.MNIST(root='./data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='./data',
train=False,
transform=transforms.ToTensor())
print(train_dataset.data.size())
print(test_dataset.data.size())
digit_p, digit_q = 4, 7
# digit_p, digit_q = 4,9
data1 = train_dataset
# selecting number 0 zero only
tt = data1.targets[(data1.targets== digit_p) | (data1.targets== digit_q)]
tt[tt==digit_p] = 0
tt[tt==digit_q] = 1
dd = data1.data[(data1.targets== digit_p) | (data1.targets== digit_q)]
# tt = data.targets[(data.targets== 1)]
# dd = data.data[(data.targets== 1)]
data1.targets = tt
data1.data = dd
train_loader = torch.utils.data.DataLoader(data1, batch_size=100, shuffle=True, drop_last = True)
# Num batches
# num_batches = len(train_loader)
print((tt==0).sum(), (tt==1).sum())
data2 = test_dataset
# selecting number 0 zero only
tt = data2.targets[(data2.targets== digit_p) | (data2.targets== digit_q)]
tt[tt==digit_p] = 0
tt[tt==digit_q] = 1
dd = data2.data[(data2.targets== digit_p) | (data2.targets== digit_q)]
# tt = data.targets[(data.targets== 1)]
# dd = data.data[(data.targets== 1)]
data2.targets = tt
data2.data = dd
test_loader = torch.utils.data.DataLoader(data2, batch_size=100, shuffle=True, drop_last = True)
# Num batches
# num_batches = len(train_loader)
print((tt==0).sum(), (tt==1).sum())
# +
# train_dataset.data
# +
p = train_dataset.data.size()[1]
num_cls = len(set(train_dataset.targets.numpy()))
print(p, num_cls)
batch_size = 100
n_iters = 10000
num_epochs = int(n_iters / (len(train_dataset) / batch_size))+1
print('number of epochs: {}'.format(num_epochs))
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# +
class RNNModel(nn.Module):
def __init__(self, input_dim, hidden_dim, layer_dim, output_dim):
super(RNNModel, self).__init__()
# Hidden dimensions
self.hidden_dim = hidden_dim
# Number of hidden layers
self.layer_dim = layer_dim
# Building your RNN
# batch_first=True causes input/output tensors to be of shape
# (batch_dim, seq_dim, feature_dim)
self.rnn = nn.RNN(input_dim, hidden_dim, layer_dim, batch_first=True, nonlinearity='tanh')
# Readout layer
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
# Initialize hidden state with zeros
#######################
# USE GPU FOR MODEL #
#######################
h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim).to(device)
# One time step
# We need to detach the hidden state to prevent exploding/vanishing gradients
# This is part of truncated backpropagation through time (BPTT)
out, hn = self.rnn(x, h0.detach())
# Index hidden state of last time step
# out.size() --> 100, 28, 100
# out[:, -1, :] --> 100, 100 --> just want last time step hidden states!
out = self.fc(out[:, -1, :])
# out.size() --> 100, 10
return out
input_dim = 4
hidden_dim = 2
layer_dim = 2 # ONLY CHANGE IS HERE FROM ONE LAYER TO TWO LAYER
output_dim = 10
model = RNNModel(input_dim, hidden_dim, layer_dim, output_dim)
model.to(device)
# JUST PRINTING MODEL & PARAMETERS
print(model)
print(len(list(model.parameters())))
for i in range(len(list(model.parameters()))):
print(list(model.parameters())[i].size())
# +
### Model Training
criterion = nn.CrossEntropyLoss()
learning_rate = 0.01
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# Number of steps to unroll
seq_dim = 28*28 // 4
iter = 0
best_acc = 0
num_epochs = 6
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
model.train()
# Load images as tensors with gradient accumulation abilities
images, labels = images.view(-1, seq_dim,
input_dim).requires_grad_().to(device), labels.to(device)
# Clear gradients w.r.t. parameters
optimizer.zero_grad()
# Forward pass to get output/logits
# outputs.size() --> 100, 10
outputs = model(images)
# Calculate Loss: softmax --> cross entropy loss
loss = criterion(outputs, labels)
# Getting gradients w.r.t. parameters
loss.backward()
# Updating parameters
optimizer.step()
iter += 1
if iter % 500 == 0:
model.eval()
# Calculate Accuracy
correct = 0
total = 0
with torch.no_grad():
# Iterate through test dataset
for images, labels in test_loader:
# Resize images
images = images.view(-1, seq_dim, input_dim).to(device)
# Forward pass only to get logits/output
# import pdb; pdb.set_trace()
outputs = model(images)
# Get predictions from the maximum value
_, predicted = torch.max(outputs.data, 1)
# Total number of labels
total += labels.size(0)
# Total correct predictions
if cuda:
correct += (predicted.cpu() == labels.cpu()).sum()
else:
correct += (predicted == labels).sum()
accuracy = 100 * correct.float() / total
# Print Loss
print('Iteration: {}. Loss: {}. Accuracy: {}'.format(iter, loss.item(), accuracy))
if accuracy > best_acc:
best_acc = accuracy
torch.save({'epoch': epoch,
'model': model.state_dict(),
'optimizer': optimizer.state_dict()
}, '{}/rnn_iter_{}.pth'.format(save_path, iter))
print('\r Best model saved.\r')
# -
### Load and use the best model
bst_mdl = save_path+'/rnn_best.pth'
model.load_state_dict(torch.load(bst_mdl)['model'])
images.shape
# +
# import torch
# import torch.nn as nn
# import torchvision
# import torchvision.transforms as transforms
# # Device configuration
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# # Hyper-parameters
# sequence_length = 28
# input_size = 28
# hidden_size = 128
# num_layers = 2
# num_classes = 10
# batch_size = 100
# num_epochs = 5
# learning_rate = 0.01
# # Recurrent neural network (many-to-one)
# class RNN(nn.Module):
# def __init__(self, input_size, hidden_size, num_layers, num_classes):
# super(RNN, self).__init__()
# self.hidden_size = hidden_size
# self.num_layers = num_layers
# self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
# self.fc = nn.Linear(hidden_size, num_classes)
# def forward(self, x):
# # Set initial hidden and cell states
# h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
# # Forward propagate LSTM
# out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)
# # Decode the hidden state of the last time step
# out = self.fc(out[:, -1, :])
# return out
# model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)
# # Loss and optimizer
# criterion = nn.CrossEntropyLoss()
# optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# # Train the model
# total_step = len(train_loader)
# for epoch in range(num_epochs):
# for i, (images, labels) in enumerate(train_loader):
# images = images.reshape(-1, sequence_length, input_size).to(device)
# labels = labels.to(device)
# # Forward pass
# outputs = model(images)
# loss = criterion(outputs, labels)
# # Backward and optimize
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
# if (i+1) % 100 == 0:
# print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
# .format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# # Test the model
# with torch.no_grad():
# correct = 0
# total = 0
# for images, labels in test_loader:
# images = images.reshape(-1, sequence_length, input_size).to(device)
# labels = labels.to(device)
# outputs = model(images)
# _, predicted = torch.max(outputs.data, 1)
# total += labels.size(0)
# correct += (predicted == labels).sum().item()
# print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * correct / total))
# # Save the model checkpoint
# torch.save(model.state_dict(), 'model.ckpt')
# +
### Feeding white-noise
model.eval()
batch_size = 10000
all_size = 100000
iters = 100
stats = {} # recording the bias
noise = {}
for i in range(num_cls):
stats[i] = 0
noise[i] = []
with tqdm(total=iters, file=sys.stdout) as pbar:
for kk in range(iters):
z = torch.rand(all_size, p, p)
for k in range(0, all_size, batch_size):
with torch.no_grad():
cur_data = z[k:k+batch_size]
if cuda:
cur_data = cur_data.cuda()
pred = model(cur_data).max(1)[1]
for i in range(num_cls):
noise[i].append(cur_data[pred == i].cpu())
stats[i] += (pred == i).sum().cpu()
pbar.update(1)
# -
stats
pred
# visualize
for i in range(num_cls):
if stats[i] != 0:
print(i)
plt.imshow(torch.cat(noise[i]).mean(0))
plt.show()
| rnn_mnist-a.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # 1. Install the Kaggle API on your SageMaker notebook
# !pip install kaggle
# # 2. Create a Kaggle API Key
# 1. Create a Kaggle account here: https://www.kaggle.com/
# 2. Create a Kaggle API key
# 3. Upload the Kaggle API key onto your notebook instance.
# Make sure your kaggle.json file is located on the home directory of your notebook instance.
# !mv ../../kaggle.json /home/ec2-user/.kaggle/kaggle.json
# !chmod 600 /home/ec2-user/.kaggle/kaggle.json
# This next command downloads the data to your notebook instance. Make sure you're doing this in the Cloud, rather than from your laptop. That will save your local network bandwidth from having to download the file, and will free up space on your laptop!
#
# The last line in the API kaggle file is going to be specific to your Kaggle competition. Make sure to modify it based on the data you actually want to download.
# !kaggle competitions download -c ams-2014-solar-energy-prediction-contest
| Starter-Code/Downloading data from Kaggle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
data_path = '/home/michaelneuder/fc_recon_6400/'
orig = np.loadtxt(os.path.join(data_path, 'orig3.txt'))
recon = np.loadtxt(os.path.join(data_path, 'recon3.txt'))
# making sure data isn't corrupted
orig.shape, recon.shape
orig = orig.reshape((3,96,96))
recon = recon.reshape((3,96,96))
plt.imshow(orig[0], cmap='gray')
plt.show()
# trimming extra data on recon file
recon = np.loadtxt(os.path.join(data_path, 'recon3.txt'))
recon.shape
recon = np.around(recon, decimals=3)
recon
recon.shape
np.savetxt(os.path.join(data_path, 'recon3_shortened.txt'), recon, fmt='%.3f',)
| bin/data_processing/data_scratch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.4 64-bit
# name: python394jvsc74a57bd0ac59ebe37160ed0dfa835113d9b8498d9f09ceb179beaac4002f036b9467c963
# ---
import pandas as pd
import numpy as np
df = pd.read_csv("..\Dataset\heart.csv") #loading the dataset
df.head()
# Normalization
df["age"]=df["age"]/df["age"].max()
df["chol"]=df["chol"]/df["chol"].max()
df["thalach"]=df["thalach"]/df["thalach"].max()
df["trestbps"]=df["trestbps"]/df["trestbps"].max()
df["cp"]=df["cp"]/df["cp"].max()
df["fbs"]=df["fbs"]/df["fbs"].max()
df["oldpeak"]=df["oldpeak"]/df["oldpeak"].max()
df["slope"]=df["slope"]/df["slope"].max()
df["ca"]=df["ca"]/df["ca"].max()
df["thal"]=df["thal"]/df["thal"].max()
df["exang"]=df["exang"]/df["exang"].max()
df["restecg"]=df["restecg"]/df["restecg"].max()
df.head()
#after normalization the data looks like this
import seaborn as sns
import matplotlib.pyplot as plt
plt.figure(figsize=(16, 6))
heat_map = sns.heatmap(df.corr(method='pearson'), annot=True, fmt='.2f', linewidths=2, cmap='mako')
heat_map.set_xticklabels(heat_map.get_xticklabels(), rotation=45); # correlations between the features
# positive value means high correlation
x = df[['age', 'gender','cp','trestbps','fbs','chol','restecg','thalach','exang','oldpeak','slope','ca','thal']].values # selecting the features for x
y = df['target'].values
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split( x, y, test_size=0.2, random_state=5)
print ('Train set:', x_train.shape, y_train.shape)
print ('Test set:', x_test.shape, y_test.shape)
# + active=""
# from sklearn import svm
# clf = svm.SVC(kernel = 'linear')
# clf.fit(x_train, y_train)
# +
# Using Support vector machine
# -
from sklearn import svm
clf = svm.SVC(kernel = 'poly', degree = 3) #svm model with polynomial kernel of degree 3
#training over the dataset
clf.fit(x_train, y_train)
from sklearn.metrics import accuracy_score
print("Train set Accuracy: ", accuracy_score(y_train, clf.predict(x_train)))
print("Test set Accuracy: ", accuracy_score(y_test, clf.predict(x_test)))
# +
# Using K-nearest Neighbors
# -
from sklearn.neighbors import KNeighborsClassifier
k = 4
#training over the dataset
neigh = KNeighborsClassifier(n_neighbors = k).fit(x_train,y_train)
from sklearn import metrics
print("Train set Accuracy: ", metrics.accuracy_score(y_train, neigh.predict(x_train)))
print("Test set Accuracy: ", metrics.accuracy_score(y_test, neigh.predict(x_test)))
| DataScience/Your Machine Learning Projects/Heart Disease Prediction/Model/heart_disease_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A Building Controls OpenAI Gym Environment - 1st-Order Model Train Demo
# **(C) <NAME>**
#
# Supplementary work to accompany "A Simplied Building Controls Environment with a Reinforcement Learning Application" paper submitted to IBPSA Rome 2019 Conference. Paper authors: <NAME>, <NAME> and <NAME>.
#
# Please consult the following links to be more accustomed to how OpenAI Gym and Tensorforce work:
# + OpenAI Documentation: https://gym.openai.com/docs
# + Creating a new OpenAI Env: https://github.com/openai/gym/tree/master/gym/envs#how-to-create-new-environments-for-gym
# + OpenAI Env Wiki: https://github.com/openai/gym/wiki/Environments
# + Simple example: https://github.com/MartinThoma/banana-gym
# + OpenAI Baselines: https://github.com/openai/baselines
# + Tensorforce: https://github.com/tensorforce/tensorforce
#
# Other info:
# + MuJoCo: https://www.roboti.us/index.html
# + https://jeffknupp.com/blog/2014/06/18/improve-your-python-python-classes-and-object-oriented-programming/
# + https://python-packaging.readthedocs.io/en/latest/minimal.html
# + https://ray.readthedocs.io/en/latest/rllib.html
# + https://github.com/ray-project/ray/tree/master/python/ray/rllib
#
#
# To install, go to gym-BuildingControls folder and run the command:
#
# pip install -e .
# Make notebook full width
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# ## 2nd-Order Model: Changes to Make to BuildingControls_env.py file
# To run the 2nd-order model, comment this line:
#
# self.bldg = 0 # 1st-order model: direct conditionning of room air
#
# and uncomment the following line:
#
# # self.bldg = 1 # 2nd-order model: radiant slab system to condition room air node
#
# in the `BuildingControls_env.py` file.
# ## Import Dependencies
# +
# from /tensorforce/examples/quickstart.py
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm # progress bar
from tensorforce.core.preprocessors import Preprocessor
from tensorforce.execution import Runner
from tensorforce.contrib.openai_gym import OpenAIGym
# Create an OpenAIgym environment.
import gym
import gym_BuildingControls
environment = OpenAIGym('BuildingControls-v0', visualize=False) # tensorforce
env_gym = gym.make('BuildingControls-v0') # openai gym
cumsum_curriculum = np.cumsum(env_gym.curriculum)
import os
if os.name == 'nt': save_dir = 'C:/RLAgentSaved/'
elif os.name == 'posix': save_dir = '/home/vasken/RLAgentSaved/'
restore = False
save_interval = 1e10 #int(episodes/10.)
report_interval = 500
plot_interval = 1e10 # XXX: Don't use
plot_test_interval = 1e10 # XXX: Don't use
# -
import matplotlib
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams.update({'figure.subplot.left': 0.15, 'figure.subplot.bottom': 0.19})
# ## User-defined classes and functions
# +
# Following are controller classes
class agent_constant():
def __init__(self, constant_action):
self.action = constant_action
def act(self, s, deterministic=True):
return self.action
class agent_random():
def __init__(self, nA):
self.nA = nA
def act(self, s, deterministic=True):
return np.random.choice(self.nA)
class agent_bangbang():
""" One stage bang-bang controls"""
def __init__(self, hvac_levels, heat_on, heat_off, cool_on, cool_off, hvac_off=None):
self.hvac_levels = hvac_levels
self.heat_on, self.heat_off = heat_on, heat_off
self.cool_on, self.cool_off = cool_on, cool_off
self.hvac_off = (self.hvac_levels-1)/2. if (hvac_off == None) else hvac_off
def act(self, s, deterministic=True):
T = s[0] # room sensor temp
hvac_state = s[1] # hvac state
self.action = 1 # default action: don't change hvac state/setting
if (T < self.heat_on) and (hvac_state == self.hvac_off): self.action = 2 # turn on heating
if (T > self.heat_off) and (hvac_state > self.hvac_off): self.action = 0 # turn off heating
if (T > self.cool_on) and (hvac_state == self.hvac_off): self.action = 0 # turn on cooling
if (T < self.cool_off) and (hvac_state < self.hvac_off): self.action = 2 # turn off cooling
return self.action
class agent_bangbang_2stage():
""" Two stage bang-bang controls for heating only """
def __init__(self, hvac_levels, heat_on, heat_off, heat_on_stage2, heat_off_stage2, hvac_off=None):
self.hvac_levels = hvac_levels
self.heat_on, self.heat_off = heat_on, heat_off
self.heat_on_stage2, self.heat_off_stage2 = heat_on_stage2, heat_off_stage2
self.hvac_off = (self.hvac_levels-1)/2. if (hvac_off == None) else hvac_off
def act(self, s, deterministic=True):
T = s[0] # room sensor temp
hvac_state = s[1] # hvac state
self.action = 1 # default action: don't change hvac state/setting
if (T < self.heat_on) and (hvac_state == self.hvac_off): self.action = 2 # turn on heating
if (T > self.heat_off) and (hvac_state > self.hvac_off): self.action = 0 # turn off heating
if (T < self.heat_on_stage2): self.action = 2 # turn on heating more
if (T > self.heat_off_stage2) and (hvac_state > self.hvac_off): self.action = 0 # turn off stage 2 heating
return self.action
class agent_pi():
""" Proportional-integral control with discrete mode output """
def __init__(self, Kp, Ki, setpoint, hvac_off, mode='heating', output_deadband=500.):
self.Kp, self.Ki, self.setpoint = Kp, Ki, setpoint
self.hvac_off = hvac_off
self.mode = mode
self.out_db = output_deadband
self.cum_error = 0
def act(self, s, deterministic=True):
T = s[0] # room sensor temp (process variable)
hvac_state = s[1] # hvac state
self.action = 1 # default action: don't change hvac state/setting
error = self.setpoint - T # setpoint error
self.cum_error += error # cumulative error
output = self.Kp*error + self.Ki*self.cum_error # PI raw output
if self.mode == 'heating':
# TODO make this part generic/general
if (output <= 2500-self.out_db) & (hvac_state > self.hvac_off): self.action = 0 # turn off heating (hvac setting 1, off)
if (output > 2500+self.out_db) & (hvac_state <= self.hvac_off): self.action = 2 # turn on heating (hvac setting 2)
if (output <= 7500-self.out_db) & (hvac_state > 2): self.action = 0 # turn down heating (hvac setting 2)
if (output > 7500+self.out_db) & (hvac_state <= 2): self.action = 2 # turn up heating (hvac setting 3)
elif self.mode == 'cooling':
# TODO make this part generic/general
if (output > -1500+self.out_db) & (hvac_state < self.hvac_off): self.action = 2 # turn off cooling (hvac setting 1, off)
if (output <= -1500-self.out_db) & (hvac_state >= self.hvac_off): self.action = 0 # turn on cooling (hvac setting 0)
return self.action
# Function to plot results
def show_results(agent, perturbation_mode, plot_progression=True, reward_limit=None, plot_T=False, save_prefix=""):
figsize=(10,3)
figext, figdpi = ".png", 300
tickhrs = 12
# env_gym = gym.make('BuildingControls-v0') # openai gym
env_gym.perturbation_mode = perturbation_mode
env_gym.change_perturbations_mode()
s = env_gym.reset()
done = False
obs_history = s[0] # only track index 0 state observation = room temperature
hvac_history = s[1] # hvac state [0,1,2,3,4]
act_history = np.array([]) # actions taken -/0/+
Tamb_history = s[15] # ambient temp
T_history = np.empty(env_gym.nN) # all temperatures; agent does not see this, read from "info" variable passed @ env.step()
reward_history = np.array([]) # actions taken -/0/+
while not done:
# env.render()
action = agent.act(s, deterministic=True)
# s, done, r = environment.execute(action)
s, r, done, info = env_gym.step(action)
obs_history = np.append(obs_history, s[0])
hvac_history = np.append(hvac_history, s[1])
act_history = np.append(act_history, action-1)
Tamb_history = np.append(Tamb_history, s[15])
T_history = np.vstack((T_history, info['T']))
reward_history = np.append(reward_history, r)
# llm, lsp, hsp, hlm = env_gym.T_low_limit, env_gym.T_low_sp, env_gym.T_high_sp, env_gym.T_high_limit # setpoint limits
lsp, hsp = env_gym.T_low_sp, env_gym.T_high_sp # setpoint limits
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1,1,1)
plt.plot(obs_history, 'k')
if plot_T: plt.plot(T_history, '--')
plt.xlim((0,env_gym.nT))
ax.set_xticks(np.arange(0,env_gym.nT+1,4*tickhrs))
ax.set_xticklabels(np.arange(0,int(env_gym.nT/4)+1,tickhrs))
ax.set_yticks([lsp, hsp])
plt.xlabel('Time, h')
plt.ylabel('Temperature, degC')
# plt.ylim()
# plt.plot(np.full(env_gym.nT, hlm), 'b')
plt.plot(np.full(env_gym.nT, hsp), 'b--')
plt.plot(np.full(env_gym.nT, lsp), 'r--')
# plt.plot(np.full(env_gym.nT, llm), 'r')
if save_prefix: plt.savefig(save_prefix+'_Temp'+figext, dpi=figdpi)
plt.show()
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1,1,1)
plt.plot(hvac_history)
# plt.title("HVAC State")
plt.xlim((0,env_gym.nT))
plt.ylim((env_gym.minA,env_gym.maxA))
ax.set_yticks(np.arange(env_gym.maxA+1))
ax.set_yticklabels(np.arange(env_gym.maxA+1))
ax.set_xticks(np.arange(0,env_gym.nT+1,4*tickhrs))
ax.set_xticklabels(np.arange(0,int(env_gym.nT/4)+1,tickhrs))
plt.xlabel('Time, h')
plt.ylabel('HVAC State')
if save_prefix: plt.savefig(save_prefix+'_HVAC'+figext, dpi=figdpi)
plt.show()
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1,1,1)
plt.plot(act_history)
plt.xlim((0,env_gym.nT))
# plt.title("Action")
ax.set_yticks([-1,0,1])
ax.set_yticklabels(["↓","","↑"])
plt.yticks(fontsize=20)
ax.set_xticks(np.arange(0,env_gym.nT+1,4*tickhrs))
ax.set_xticklabels(np.arange(0,int(env_gym.nT/4)+1,tickhrs))
plt.xlabel('Time, h')
plt.ylabel('Action')
if save_prefix: plt.savefig(save_prefix+'_Action'+figext, dpi=figdpi)
plt.show()
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1,1,1)
plt.plot(Tamb_history)
plt.xlim((0,env_gym.nT))
# plt.title("T_ambient")
ax.set_xticks(np.arange(0,env_gym.nT+1,4*tickhrs))
ax.set_xticklabels(np.arange(0,int(env_gym.nT/4)+1,tickhrs))
plt.xlabel('Time, h')
plt.ylabel('T_ambient, degC')
if save_prefix: plt.savefig(save_prefix+'_TAmbient'+figext, dpi=figdpi)
plt.show()
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(1,1,1)
plt.plot(reward_history)
plt.xlim((0,env_gym.nT))
# plt.title("Reward in episode")
ax.set_xticks(np.arange(0,env_gym.nT+1,4*tickhrs))
ax.set_xticklabels(np.arange(0,int(env_gym.nT/4)+1,tickhrs))
plt.xlabel('Time, h')
plt.ylabel('Step Reward')
if save_prefix: plt.savefig(save_prefix+'_StepReward'+figext, dpi=figdpi)
plt.show()
reward_cumulative = np.cumsum(reward_history)
print("End reward: %.1f" % reward_cumulative[-1])
# plt.figure()
# plt.plot(reward_cumulative)
# plt.title("Cumulative reward in episode")
if plot_progression:
# Training progression
# plt.figure(figsize=figsize)
# plt.plot(runner.episode_timesteps, marker='.', linestyle='None', alpha=0.1)
# plt.title("Timesteps")
plt.figure(figsize=figsize)
plt.plot(runner.episode_rewards, marker='.', linestyle='None', alpha=0.1)
if reward_limit != None:
plt.ylim((reward_limit, 0))
plt.xlabel('Episode')
plt.title("Rewards")
if save_prefix: plt.savefig(save_prefix+'_RLTrainRewardEpisodic'+figext, dpi=figdpi)
plt.show()
# Callback function printing episode statistics
def episode_finished(r):
message = "Finished episode {ep} after {ts} timesteps (reward: {reward})".format(ep=r.episode, ts=r.episode_timestep, reward=int(r.episode_rewards[-1]))
pbar.set_description(message)
pbar.update(1)
if (r.episode % save_interval == 0) and (r.episode != 0):
# print("Model save checkpoint.")
r.agent.save_model(directory=save_dir, append_timestep=True)
# r.agent.save_model()
if (r.episode % report_interval == 0) and (r.episode != 0):
print("Average of last 500 rewards: {}".format(np.mean(r.episode_rewards[-500:])))
print("Average of last 100 rewards: {}".format(np.mean(r.episode_rewards[-100:])))
print("Best 5 of last 100 rewards: {}".format(np.sort(r.episode_rewards[-100:])[-5::]))
if (r.episode % plot_interval == 0) and (r.episode != 0):
mode = np.searchsorted(cumsum_curriculum, r.episode)
print("Mode: %i" % mode)
show_results(agent, perturbation_mode=mode)
if (r.episode % plot_test_interval == 0) and (r.episode != 0):
show_results(agent, perturbation_mode=8, plot_progression=False, save_prefix="RLHeat_%i" % r.episode)
show_results(agent, perturbation_mode=9, plot_progression=False, save_prefix="RLCool_%i" % r.episode)
return True
# Function to create RL agent given type and network architecture
def create_agent(network='dense_lstm', flavour='quickstart_custom_net'):
# neural network "brain"
if (network=='dense_lstm'):
network_spec = [
dict(type='dense', size=64),
dict(type='internal_lstm', size=64),
]
elif (network=='dense_2'):
network_spec = [
dict(type='dense', size=64),
dict(type='dense', size=64),
]
elif (network=='dense_3'):
network_spec = [
dict(type='dense', size=64),
dict(type='dense', size=64),
dict(type='dense', size=64),
]
else:
print("Unknown network input!")
# create the agent
if (flavour=='quickstart_custom_net'):
from tensorforce.agents import PPOAgent
""" https://github.com/reinforceio/tensorforce/blob/master/examples/quickstart.py """
agent = PPOAgent(
states=environment.states,
actions=environment.actions,
network=network_spec,
# Agent
states_preprocessing=None, #preprocessing_config,
actions_exploration='epsilon_decay', #None,
reward_preprocessing=None,
# MemoryModel
update_mode=dict(
unit='episodes',
batch_size=10, # 10 episodes per update
frequency=10, # Every 10 episodes
),
memory=dict(
type='latest',
include_next_states=False,
capacity=1000
),
# DistributionModel
distributions=None,
entropy_regularization=0.01,
# PGModel
baseline_mode='states',
baseline=dict(
type='mlp',
sizes=[64, 64]
),
baseline_optimizer=dict(
type='multi_step',
optimizer=dict(
type='adam',
learning_rate=1e-3
),
num_steps=20
),
gae_lambda=0.99,
# PGLRModel
likelihood_ratio_clipping=0.2,
# PPOAgent
step_optimizer=dict(
type='adam',
learning_rate=1e-3
),
subsampling_fraction=0.2,
optimization_steps=25,
execution=dict(
type='single',
session_config=None,
distributed_spec=None
)
)
elif (flavour=='vpg'):
from tensorforce.agents import VPGAgent
agent = VPGAgent(
states=environment.states,
actions=environment.actions,
network=network_spec,
batched_observe=False,
#batching_capacity=10,
scope='vpg',
execution=None,
variable_noise=None,
states_preprocessing=None,
actions_exploration='epsilon_decay',
reward_preprocessing=None,
update_mode=None,
memory=None,
optimizer={'type':'adam', 'learning_rate':1e-3},
discount=0.99,
# distributions=None,
# entropy_regularization=None,
# baseline_mode='states',
# baseline={'type':'mlp', 'sizes':[32, 32]},
# baseline_optimizer={'type':'adam', 'learning_rate':1e-3},
# gae_lambda=None,
)
else:
print("Unknown agent flavour input!")
return agent
# -
# ## Create the RL PPO Agent
# agent = create_agent(flavour='vpg', network='dense_2')
agent = create_agent(flavour='quickstart_custom_net', network='dense_lstm')
# agent = create_agent(flavour='quickstart_custom_net', network='dense_2')
# agent = create_agent(flavour='quickstart_custom_net', network='dense_3')
if restore: agent.restore_model(directory=save_dir)
# ## Train
episodes = int(6000)
runner = Runner(agent=agent, environment=environment)
with tqdm(total=episodes) as pbar: runner.run(episodes=episodes, max_episode_timesteps=None, episode_finished=episode_finished)
# ## Define Baseline Controls
# agent_cnst = agent_constant(constant_action=1)
# agent_rand = agent_random(env_gym.nA)
agent_bang = agent_bangbang(hvac_levels=len(env_gym.heat_cool_levels), heat_on=20.5, heat_off=21.5, cool_on=25, cool_off=23.5, hvac_off=env_gym.heat_cool_off)
agent_bang2 = agent_bangbang_2stage(hvac_levels=len(env_gym.heat_cool_levels), heat_on=20.5, heat_off=21.5, heat_on_stage2=19, heat_off_stage2=21, hvac_off=env_gym.heat_cool_off)
agent_piheat = agent_pi(Kp=5000., Ki=20., setpoint=21., hvac_off=env_gym.heat_cool_off, mode='heating', output_deadband=2000.)
agent_picool = agent_pi(Kp=3000., Ki=10., setpoint=24., hvac_off=env_gym.heat_cool_off, mode='cooling', output_deadband=1000.)
# ## Heating Mode Test
perturbation_mode = 10
# +
# print('Constant Agent')
# show_results(agent_cnst, perturbation_mode, plot_progression=False,)# plot_T=True)
# +
# print('Random Agent')
# show_results(agent_rand, perturbation_mode, plot_progression=False)
# +
# print('BangBang Agent 2 stage')
# show_results(agent_bang2, perturbation_mode, plot_progression=False, save_prefix="BangBangHeat")
# +
# print('Proportional-Integral Agent: Heating Mode')
# agent_piheat.cum_error = 0 # reset error
# show_results(agent_piheat, perturbation_mode, plot_progression=False, save_prefix="PIHeat")
# -
print('RL Agent')
# show_results(agent, perturbation_mode, plot_progression=False, save_prefix="RLHeat") # plot_T=True)
show_results(agent, perturbation_mode, plot_progression=False, save_prefix="RLHeat", plot_T=True)
# ## Cooling Mode Test
perturbation_mode = 11
# +
# print('BangBang Agent')
# show_results(agent_bang, perturbation_mode, plot_progression=False, save_prefix="BangBangCool")
# +
# print('Proportional-Integral Agent: Cooling Mode')
# agent_picool.cum_error = 0 # reset error
# show_results(agent_picool, perturbation_mode, plot_progression=False, save_prefix="PICool")
# -
print('RL Agent')
# show_results(agent, perturbation_mode, reward_limit=-5000., save_prefix="RLCool") # plot_T=True)
show_results(agent, perturbation_mode, reward_limit=-5000., save_prefix="RLCool", plot_T=True)
| A Building Controls OpenAI Gym Environment - 2nd-Order Model Train Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Descobrindo doadores em potencial
#
# - A partir de um dataset iremos itentificar possíveis doadores de fundos para eleições.
# - No dataset temos dados sobre educação, trabalho, renda, etinia.
# - Nós sabemos que indivíduos que tem alta renda são melhores alvos para arrecadação de doações políticas.
#
# ### Vamos construir um classificador que prevê os níveis de renda, baseado em atributos pessoais
#
# Os indivíduos com maior renda serão os primeiros a serem abordados em busca de doação política.
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
file_name = "https://raw.githubusercontent.com/rajeevratan84/datascienceforbusiness/master/adult.data"
census = pd.read_csv(file_name)
census.head()
census.shape
# Como esse dataset não apresenta cabeçalho, precisamos primeiramente crialo
columns_ = ['age', 'workclass', 'fnlwgt','education','education-num','marital-status','occupation',
'relationship','race','sex','capital-gain','capital-loss','hours-per-week','native-country', 'Income']
census = pd.DataFrame(census.values,columns=columns_)
census.head()
# Algumas informações sobre o dataset
print("Linhas :",census.shape[0])
print("colunas :",census.shape[1])
print("\nParâmetros: \n", census.columns.tolist())
print("\nValores ausentes: \n", census.isnull().sum().values.sum())
print("\nValores únicos: \n", census.nunique())
census.info()
# Nas informações apresentadas acima, podemos observar que até os dados numéricos estão tipados em forma de objeto. Por isso iremos utilizar uma função da biblioteca Pandas para transforma-los em inteiros.
census = census.infer_objects()
census.head()
census.info()
# ### Análise Exploratória dos Dados
census.Income.unique()
# Removendo espaços ' '
census['Income'] = census['Income'].str.strip()
# Informações sobre a renda.
# +
# Número de registros
n_records = census.shape[0]
# Número de registros onde a renda do indivíduo é maior que $50,000
n_greater_50k = census.loc[census['Income'] == '>50K'].shape[0]
# Número de registros onde a renda do indivíduo é no máximo $50,000
n_at_most_50k = census[census['Income'] == '<=50K'].shape[0]
# Porcentagem dos indivíduos onde a renda é maior que $50,000
greater_percent = (n_greater_50k / n_records) * 100
# Resultados
print("Número Total de registros: {}".format(n_records))
print("Indivíduos de renda maior $50,000: {}".format(n_greater_50k))
print("Indivíduos de renda até no máximo $50,000: {}".format(n_at_most_50k))
print("Porcentagem dos indivíduos de renda maior que $50,000: {:.2f}%".format(greater_percent))
# -
# Visualizações
sns.set(style="whitegrid", color_codes=True)
sns.catplot("sex", col='education', data=census, hue='Income', kind="count", col_wrap=4);
# <i> Nos gráficos acima são mostradas as relações entre o nível de educação, renda e sexo.
#
# ***
# +
fig, axes = plt.subplots(1, 2, figsize=(14,4))
census.hist('capital-gain', bins=20, ax=axes[0])
census.hist('capital-loss', bins=20, ax=axes[1])
# -
# Nos gráficos de "Capital Gain" e "Capital Loss" é possível notar que o eixo x é estendido muito além de onde as barras são plotadas. Isso acontece porque existem valores bem pequenos distribuidos no gráfico, que acabam não sendo destacados devido a escala do gráfico.<br> O que podemos fazer para melhorar essa visualização é transformá-la para escala logarítma
skewed = ['capital-gain', 'capital-loss']
census[skewed] = census[skewed].apply(lambda x: np.log(x + 1))
census.head()
# Após aplicar a escala logarítma
# +
fig, axes = plt.subplots(1, 2, figsize=(12,4))
census.hist('capital-gain', bins=20, ax=axes[0])
census.hist('capital-loss', bins=20, ax=axes[1])
# -
# ***
# Iremos remove a coluna fnlwg, pois não terá importância para nós.
# fnlwgt: final weight. In other words, this is the number of people the census believes the entry represents
census.drop(['fnlwgt'], axis=1, inplace=True)
census.head()
# Aqui estaremos criando a função "get_uniques", que retorna um dataframe composto pelos valores únicos de cada coluna categórica.
def get_uniques(df):
aux = []
c_name = []
for col in df.columns:
if df[col].nunique() < 20:
aux.append(df[col].unique())
c_name.append(col)
df_n = pd.DataFrame(pd.DataFrame(aux).T)
df_n = pd.DataFrame(df_n.values, columns = c_name)
return df_n
censu_uniq_vals = get_uniques(census)
censu_uniq_vals
census['native-country'].unique()
# <i> Analisando os resultados, observamos que algumas células de dados categóricos estão preenchidas com o caracter " ?".<br> Então agora iremos remover essas linhas
census[census['native-country'] == " ?"]
census = census[census['occupation'] != " ?"]
census = census[census['native-country'] != " ?"]
# Checando valores nulos.
census[census.isnull().any(axis=1)]
# ### Preparando dados para modelos Machine Learnig
# Primeiramente iremos normalizar o dados numéricos entre 0 - 1
# +
from sklearn.preprocessing import MinMaxScaler
# Initialize a scaler, then apply it to the features
scaler = MinMaxScaler() # default=(0, 1)
numerical = ['age', 'education-num', 'capital-gain', 'capital-loss', 'hours-per-week']
# Make a copy of the our original df
census_minmax_transform = pd.DataFrame(data = census)
# Scale our numerica data
census_minmax_transform[numerical] = scaler.fit_transform(census_minmax_transform[numerical])
census_minmax_transform.head()
# -
# Removendo a coluna de label do dataframe.
income_raw = census_minmax_transform['Income']
census_minmax_transform = census_minmax_transform.drop('Income', axis = 1)
# Aqui iremos fazer o processo de One-hot encode, para transformar os dados categóricos em numéricos.
# +
# One-hot encode the 'features_log_minmax_transform' data using pandas.get_dummies()
features_final = pd.get_dummies(census_minmax_transform)
# Encode the 'income_raw' data to numerical values
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
income = income_raw.apply(lambda x: 0 if x == "<=50K" else 1)
income = pd.Series(encoder.fit_transform(income_raw))
# Print the number of features after one-hot encoding
encoded = list(features_final.columns)
print("{} features depois do one-hot encoding.".format(len(encoded)))
print("\n",encoded)
# -
# Aqui separamos entre dados de treino e de teste
# +
from sklearn.model_selection import train_test_split
# Split the 'features' and 'income' data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(features_final, income, test_size = 0.2, random_state = 0)
# Show the results of the split
print("Set de treinamento tem {} amostras.".format(X_train.shape[0]))
print("Set de teste tem {} amostras.".format(X_test.shape[0]))
# -
# Calculando a Acurácia mínima que deveriamos atingir, baseando nos valores de indivíduos que recebem mais que 50K em relação à toda população
# +
# Calculate accuracy
accuracy = n_greater_50k / n_records
# Calculating precision
precision = n_greater_50k / (n_greater_50k + n_at_most_50k)
#Calculating recall
recall = n_greater_50k / (n_greater_50k + 0)
# Calculate F-score using the formula above for beta = 0.5
fscore = (1 + (0.5*0.5)) * ( precision * recall / (( 0.5*0.5 * (precision))+ recall))
# Print the results
print("Naive Predictor: [Accuracy score: {:.4f}, F-score: {:.4f}]".format(accuracy, fscore))
# -
# Na célula abaixo é criada uma função para fazer o processo de treinamento e teste, assim podemos testar vários modelos sem que seja preciso repetir vários blocos de código.
# +
from sklearn.metrics import fbeta_score, accuracy_score
from time import time
def train_predict(learner, sample_size, X_train, y_train, X_test, y_test):
'''
inputs:
- learner: the learning algorithm to be trained and predicted on
- sample_size: the size of samples (number) to be drawn from training set
- X_train: features training set
- y_train: income training set
- X_test: features testing set
- y_test: income testing set
'''
results = {}
# Fit the learner to the training data using slicing with 'sample_size'
start = time() # Get start time
learner = learner.fit(X_train[:sample_size],y_train[:sample_size])
end = time() # Get end time
# Calculate the training time
results['train_time'] = end - start
# Get the predictions on the test set,
# then get predictions on the first 300 training samples
start = time() # Get start time
predictions_test = learner.predict(X_test)
predictions_train = learner.predict(X_train[:300])
end = time() # Get end time
# Calculate the total prediction time
results['pred_time'] = end - start
# Compute accuracy on the first 300 training samples
results['acc_train'] = accuracy_score(y_train[:300],predictions_train)
# Compute accuracy on test set
results['acc_test'] = accuracy_score(y_test,predictions_test)
# Compute F-score on the the first 300 training samples
results['f_train'] = fbeta_score(y_train[:300],predictions_train,0.5)
# Compute F-score on the test set
results['f_test'] = fbeta_score(y_test,predictions_test,0.5)
# Success
print("{} trained on {} samples.".format(learner.__class__.__name__, sample_size))
# Return the results
return results
# -
# Aqui nos iremos fazer os testes com números de amostras variadas, para assim analisar quanto tempo cada modelo gasta para realizar o processo de treinamento.
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import AdaBoostClassifier
# Initialize the three models, the random states are set to 101 so we know how to reproduce the model later
clf_A = DecisionTreeClassifier(random_state=101)
clf_B = SVC(random_state = 101)
clf_C = AdaBoostClassifier(random_state = 101)
# Calculate the number of samples for 1%, 10%, and 100% of the training data
samples_1 = int(round(len(X_train) / 100))
samples_10 = int(round(len(X_train) / 10))
samples_100 = len(X_train)
# Collect results on the learners
results = {}
for clf in [clf_A, clf_B, clf_C]:
clf_name = clf.__class__.__name__
results[clf_name] = {}
for i, samples in enumerate([samples_1, samples_10, samples_100]):
results[clf_name][i] = \
train_predict(clf, samples, X_train, y_train, X_test, y_test)
# -
#Printing out the values
for i in results.items():
print(i[0])
display(pd.DataFrame(i[1]).rename(columns={0:'1%', 1:'10%', 2:'100%'}))
# Plotando a matriz de confusão dos modelos.
# +
from sklearn.metrics import confusion_matrix
plt.figure(figsize=(30,12))
for i,model in enumerate([clf_A,clf_B,clf_C]):
cm = confusion_matrix(y_test, model.predict(X_test))
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] # normalize the data
# view with a heatmap
plt.figure(i)
sns.heatmap(cm, annot=True, annot_kws={"size":10},
cmap='Blues', square=True, fmt='.3f')
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.title('Confusion matrix for:\n{}'.format(model.__class__.__name__));
# -
# Abaixo iremos usar o módulo GridSearchCV para descobrir(sintonizar) os melhores parâmetros para nosso modelo.
#
# +
# Import 'GridSearchCV', 'make_scorer', and any other necessary libraries
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
# Initialize the classifier
clf = AdaBoostClassifier(base_estimator=DecisionTreeClassifier())
# Create the parameters list you wish to tune
parameters = {'n_estimators':[50, 120],
'learning_rate':[0.1, 0.5, 1.],
'base_estimator__min_samples_split' : np.arange(2, 8, 2),
'base_estimator__max_depth' : np.arange(1, 4, 1)
}
# Make an fbeta_score scoring object
scorer = make_scorer(fbeta_score,beta=0.5)
# Perform grid search on the classifier using 'scorer' as the scoring method
grid_obj = GridSearchCV(clf, parameters,scorer)
# Fit the grid search object to the training data and find the optimal parameters
grid_fit = grid_obj.fit(X_train,y_train)
# Get the estimator
best_clf = grid_fit.best_estimator_
# Make predictions using the unoptimized and model
predictions = (clf.fit(X_train, y_train)).predict(X_test)
best_predictions = best_clf.predict(X_test)
# Report the before-and-afterscores
print("Unoptimized model\n------")
print("Accuracy score on testing data: {:.4f}".format(accuracy_score(y_test, predictions)))
print("F-score on testing data: {:.4f}".format(fbeta_score(y_test, predictions, beta = 0.5)))
print("\nOptimized Model\n------")
print("Final accuracy score on the testing data: {:.4f}".format(accuracy_score(y_test, best_predictions)))
print("Final F-score on the testing data: {:.4f}".format(fbeta_score(y_test, best_predictions, beta = 0.5)))
print(best_clf)
| 3 - Descobrindo doadores em potencial .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# this code isolates PSF volumes out of z-stack and then averages them
# PSFs are detected using TrackPy, the resulting locations are used to crop out the PSF volumes
# the centers of the psfs in XY are refined by chosing the slice with max intensity and perfoming a 2D gauss fit.
# the stack is then upsampled by a factor, and in XY the pixel closest to the gaussian fit is chosen. Next the
# intensity in Z along that pixel is plotted and fitted with a gauss to obtain the Z center of the PSF.
# lastly the upsampled PSFs are averaged resulting in a volume containing the average PSF.
# To add: option for overlay of gauss positions and trackpy in focus image
from pathlib import Path
import pandas as pd
import numpy as np
import trackpy as tp
import pylab
import matplotlib._pylab_helpers
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
from matplotlib import gridspec
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib.backends.backend_pdf import PdfPages
import pims, PIL, tifffile, time, pathlib, os, json, math, glob
from pims import FramesSequence, Frame
import scipy
from scipy.ndimage import zoom
from scipy.ndimage import rotate
from scipy.optimize import curve_fit
from scipy.ndimage import gaussian_filter
from math import floor
from mpl_toolkits.mplot3d import Axes3D
import warnings
DEBUG = False
# +
from psf_extractor.util import get_Daans_special_cmap
fire=get_Daans_special_cmap()
# +
class TiffFilePages(FramesSequence):
def __init__(self, filename):
self._filename = filename
tif = tifffile.TiffFile(filename)
self._len = len(tif.pages)
page = tif.pages[0]
self._frame_shape = page.shape
self._dtype = page.dtype
def get_frame(self, i):
img = tifffile.imread(self._filename, key=i)
return Frame(img, frame_no=i)
def __len__(self):
return self._len
@property
def frame_shape(self):
return self._frame_shape
@property
def pixel_type(self):
return self._dtype
def super_gaussian(x, x0, sigma, amp, back, rank):
return amp * ((np.exp(-(2 ** (2 * rank - 1)) * np.log(2) * (((x - x0) ** 2) / ((sigma) ** 2)) ** (rank))) ** 2) + back
# +
def gaussian_2D(x, y, x0, y0, xalpha, yalpha, theta, A, B): #define 2D gauss function
theta = np.deg2rad(theta)
a = np.cos(theta)**2/(2*xalpha**2) + np.sin(theta)**2/(2*yalpha**2)
b = -1*np.sin(2*theta)/(4*xalpha**2) + np.sin(2*theta)/(4*yalpha**2)
c = np.sin(theta)**2/(2*xalpha**2) + np.cos(theta)**2/(2*yalpha**2)
return A * np.exp( -(a*(x-x0)**2 + 2*b*(x-x0)*(y-y0) + c*(y-y0)**2)) + B
# This is the callable that is passed to curve_fit. M is a (2,N) array
# where N is the total number of data points in Z, which will be ravelled
# to one dimension.
def _gaussian_2D(M, *args):
x, y = M
arr = np.zeros(x.shape)
for i in range(len(args)//7):
arr += gaussian_2D(x, y, *args[i*7:i*7+7])
return arr
def do_2D_gauss_fit(arr, thetaest=45):
arry, arrx = arr.shape
midx, midy, sigx, sigy, maxI, minI = gauss2D_param(arr)
p0 = [midx, midy, sigx/4, sigy, thetaest, maxI, minI]
x, y = np.arange(0, arrx), np.arange(0, arry)
X, Y = np.meshgrid(x, y)
xdata = np.vstack((X.ravel(), Y.ravel()))
popt, pcov = curve_fit(_gaussian_2D, xdata, arr.ravel(), p0, maxfev = 8000)
return popt #give back all fit values
def gaussian_1D(x, x0, xalpha, A, B):
return A * np.exp( -((x-x0)**2 / (2*xalpha**2))) + B
def gauss2D_param(im): #estimate first guesses for parameters
imy, imx = im.shape
for fact in [3, 2.5, 2, 1.5, 1, 0.5]:
try:
image = im.copy()
idxs = image < image.mean() + fact*image.std()
idxs = scipy.ndimage.binary_dilation(idxs)
image[idxs] = 0
xy = np.argwhere(image > 0)
ys, xs = xy[:,0], xy[:,1]
if len(xs)==0 or len(ys)==0: continue
midy, midx = ys.mean(), xs.mean()
sigy, sigx = (ys.max() - ys.min())/2, (xs.max() - xs.min())/2
yn, yp = intround(midy-sigy), intround(midy+sigy)
xn, xp = intround(midx-sigx), intround(midx+sigx)
maxI = image[yn:yp, xn:xp].mean()*2
minI = im.mean()
return midx, midy, sigx, sigy, maxI, minI
except:
if DEBUG:
print(str(fact)+" failed:", im.mean(), fact*im.std())
return imx//2, imy//2, 5, 5, im.max(), im.min()
def gauss1D_param(ydata):
for fact in [2, 1.5, 1, 0.5,0.25]:
try:
yd = ydata.copy()
idxs = yd < yd.mean() + fact*yd.std()
idxs = scipy.ndimage.binary_dilation(idxs)
yd[idxs] = 0
xs = np.argwhere(yd > 0)
if xs.size < 1: raise #check if list is empty
midx = xs.mean()
sigx = (xs.max() - xs.min())/2
xn, xp = intround(midx-sigx), intround(midx+sigx)
if yd[xn:xp].size < 1: raise #check if list is empty
maxI = yd[xn:xp].mean()*2
minI = ydata.mean()
if np.isnan(maxI) or sigx <0.5: raise
if DEBUG:
print("zprof ", str(fact)+" success:", ydata.mean(), fact*ydata.std())
return midx, 2*fact*sigx, maxI, minI
except:
if DEBUG:
print("zprof ", str(fact)+" failed:", ydata.mean(), fact*ydata.std())
return int(len(ydata)/2), 5, max(ydata), min(ydata)
def do_1D_gauss_fit(ydata, xdata=None):
if type(xdata) == type(None): xdata = np.arange(0, len(ydata))
midx, sigx, maxI, minI = gauss1D_param(ydata)
p0 = [xdata[intround(midx)], np.abs(xdata[1]-xdata[0])*sigx, maxI, minI]
popt, pcov = curve_fit(gaussian_1D, xdata, ydata, p0, maxfev = 8000)
xfine = np.linspace(xdata.min(), xdata.max(), len(xdata)*5)
return popt, xfine, xdata
# +
class HaltException(Exception): pass
def check_blacklist(features, widths, dims):
blacklist = np.zeros(len(features)) # create array to keep track of overlap
if set(widths) != set(dims): raise HaltException("Keys not equal in passed widths and dims dictionaries")
axis = [key for key in dims.keys() if key in features.columns.values]
for i in features.index: # run over all particles in zstack
if blacklist[i]==0: #check if particles is already blacklisted
for key in axis:
p_i = round(features[key][i],0)
if p_i < widths[key] or p_i > dims[key]-widths[key]:
blacklist[i]=1
break
if blacklist[i] == 1: continue
#check for overlap
for j in features.index:
if i != j: # omit comparing particle to itself
bools = []
for key in axis:
p_i = round(features[key][i],0)
p_j = round(features[key][j],0)
bools.append(bool(abs(p_j-p_i) < 2*widths[key]))
if np.all(bools):
blacklist[i]=2
blacklist[j]=2
if sum(blacklist) == len(features): raise HaltException("All PSFs overlap or are too close to box edge, choose smaller PSF volume...")
return blacklist
# +
def get_stack(loc, fn, c_outdir=True):
outd = "_output"
if not loc[-1] in ["/", "\\"]:
loc += "/"
if "*.png" in fn or "*.tif" in fn:
stack = pims.open(loc + fn)
# if this crashes on plugin keyword -> install pims from git master
stack.__dict__['_filename'] = stack.pathname
elif ".tif" in fn:
stack = pims.TiffStack(loc + fn)
outd = pathlib.Path(stack._filename).stem + outd
if len(stack) < 2:
# PIMS fails to get full stack, retry with tifffile
stack = TiffFilePages(loc + fn)
else: raise HaltException("Did not correctly specify files using .tif or *.png")
outdir = os.path.join(os.path.dirname(stack._filename), outd)
if not os.path.exists(outdir) and c_outdir: os.makedirs(outdir)
return outdir, stack
def get_pb(flt):
return flt - floor(flt)
# -
def plot_debug(max_proj, features, features_filt, locs, width_x, width_y, filt_psfs):
fig = plt.figure(dpi=300)
imy, imx = max_proj.shape
spec = gridspec.GridSpec(ncols=2, nrows=4, figure=fig,
height_ratios=[1,1,1,1],
width_ratios=[1,1])
spec2 = gridspec.GridSpec(ncols=3, nrows=3, figure=fig,
height_ratios=[1,1,1],
wspace=0.1, hspace=0.4,
width_ratios=[1,0.4, 0.4])
ax1 = fig.add_subplot(spec[0:2,0])
plt.imshow(max_proj, cmap=fire)
plt.plot(features.x.values,
features.y.values,
'o', markerfacecolor='None',
markersize=10,
markeredgecolor="red")
ax1.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=True, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False # labels along the bottom edge are off)
)
plt.ylabel("Y [px]", fontsize=7)
ax2 = plt.subplot(spec[2:,0])
plt.imshow(max_proj, cmap=fire)
plt.plot(features_filt.x.values,
features_filt.y.values,
'o', markerfacecolor='None',
markersize=10,
markeredgecolor="red")
plt.xlabel("X [px]", fontsize=7)
plt.ylabel("Y [px]", fontsize=7)
ax3 = fig.add_subplot(spec2[0,1:])
nbins = intround((features['mass'].max() - features['mass'].min()))
if nbins == 0: nbins = 30
plt.hist(features['mass'], bins=nbins)
nbins = intround((features_filt['mass'].max() - features_filt['mass'].min()))
if nbins == 0: nbins = 30
plt.hist(features_filt['mass'], bins=nbins)
plt.grid(True)
plt.xlabel("Mass [a.u.]", fontsize=6)
plt.axvline(features_filt.mass.min(), c='r')
plt.axvline(features_filt.mass.max(), c='r')
ax4 = fig.add_subplot(spec2[2,1])
plt.hist(locs.pb_x_tp)
plt.grid()
plt.hist(locs.pb_x_g)
plt.xlabel("X Pixel bias [px]", fontsize=6)
ax5 = fig.add_subplot(spec2[1,1])
plt.hist(locs.pb_y_tp)
plt.hist(locs.pb_y_g)
plt.grid()
plt.xlabel("Y Pixel bias [px]", fontsize=6)
ax6 = fig.add_subplot(spec2[2,2], sharey=ax4)
xtp = [get_pb(x) for x in features.x.values]
plt.hist(xtp)
plt.grid()
plt.xlabel("X Pixel bias [px]", fontsize=6)
ax6.tick_params(axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
labelleft=False # labels along the bottom edge are off)
)
ax7 = fig.add_subplot(spec2[1,2], sharey=ax5, sharex=ax5)
ytp = [get_pb(x) for x in features.y.values]
plt.hist(ytp)
plt.grid()
plt.xlabel("Y Pixel bias [px]", fontsize=6)
set_ax_ticksize([ax1,ax2, ax3, ax4, ax5, ax6, ax7], fontsize=6)
ax7.tick_params(axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
labelleft=False # labels along the bottom edge are off)
)
# plt.tight_layout()
total = len(filt_psfs)
xy = math.ceil(np.sqrt(total))
fig, axes = plt.subplots(nrows=xy, ncols=xy, sharex=True, sharey=True, dpi=300, figsize=(10,10))
for i, ax in enumerate(axes.flat):
if i < total:
im = ax.imshow(max_int_proj(filt_psfs[i]), cmap=fire)
set_ax_ticksize(ax)
ax.set_title(locs.PSF[i])
else:
ax.set_axis_off()
#im = ax.imshow(np.random.random((10,10)), vmin=0, vmax=1)
plt.suptitle("Maximum Intensity Projection for selected beads")
plt.tight_layout(rect=[0, 0.02, 1, 0.97])
cax,kw = mpl.colorbar.make_axes([ax for ax in axes.flat])
plt.colorbar(im, cax=cax, **kw)
def plot_PSF(psf_sum, pi_x, pi_y, pi_z):
sumz, sumy, sumx = psf_sum.shape
focim, (zpos, ypos, xpos) = psf_gauss_fit(psf_sum)
#plt.imsave("focim.tiff", focim)
zpos, ypos, xpos = intround(zpos), intround(ypos), intround(xpos)
fig = plt.figure(figsize=(8,8), dpi=300)
spec = gridspec.GridSpec(ncols=2, nrows=4, figure=fig,
width_ratios=[sumx * pi_x, sumz * pi_z],
height_ratios=[sumx * pi_x, *[sumz * pi_z/4]*3], )
spec2 = gridspec.GridSpec(ncols=2, nrows=4, figure=fig,
width_ratios=[sumx * pi_x, sumz * pi_z],
height_ratios=[sumx * pi_x, *[sumz * pi_z/4]*3],
hspace=0.1)
ax1 = fig.add_subplot(spec[0])
plt.imshow(psf_sum[zpos,:,:], interpolation=interp, cmap=fire,
extent=[sumx//2 * pi_x / -1e3, sumx//2 * pi_x / 1e3,
sumx//2 * pi_x / -1e3, sumx//2 * pi_x / 1e3])
ax1.annotate("XY", xy=(50/(sumx * pi_x), 50/(sumx * pi_x)),
xycoords="axes fraction", color='white', weight='semibold',
fontsize=11)
plt.xlabel(r"X [$\mathrm{\mu m}$]")
plt.ylabel("Y [$\mathrm{\mu m}$]")
ax1.xaxis.tick_top()
ax1.xaxis.set_label_position('top')
ax2 = plt.subplot(spec[1:,0], sharex = ax1)
plt.imshow(psf_sum[:,ypos,:], interpolation=interp, cmap=fire,
extent=[sumx//2 * pi_x / -1e3, sumx//2 * pi_x / 1e3,
sumz//2 * pi_z / 1e3, sumz//2 * pi_z / -1e3])
plt.ylabel("Z [$\mathrm{\mu m}$]")
ax2.annotate("XZ", xy=(50/(sumx * pi_x), 50/(sumz * pi_z)),
xycoords="axes fraction", color='white', weight='semibold',
fontsize=11)
ax2.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False # labels along the bottom edge are off)
)
ax3 = plt.subplot(spec[0,1], sharey = ax1)
plt.imshow(np.rot90(psf_sum[:,:,xpos]), interpolation=interp, cmap=fire,
extent=[sumz//2 * pi_z / -1e3, sumz//2 * pi_z / 1e3,
sumx//2 * pi_x / -1e3, sumx//2 * pi_x / 1e3])
ax3.tick_params(axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=False, # ticks along the top edge are off
labelleft=False # labels along the bottom edge are off)
)
plt.xlabel("Z [$\mathrm{\mu m}$]")
ax3.annotate("YZ", xy=(50/(sumz * pi_z), 50/(sumx * pi_x)),
xycoords="axes fraction", color='white', weight='semibold',
fontsize=11)
ax3.xaxis.tick_top()
ax3.xaxis.set_label_position('top')
ax4 = fig.add_subplot(spec2[3,1])
ax5 = fig.add_subplot(spec2[2,1], sharex=ax4)
ax6 = fig.add_subplot(spec2[1,1], sharex=ax4)
zprof = psf_sum[:, ypos, xpos]
xprof = psf_sum[zpos, ypos, :]
yprof = psf_sum[zpos, :, xpos]
zprofx = (np.arange(0, sumz) - sumz/2) * pi_z
yprofx = (np.arange(0, sumy) - sumy/2) * pi_y
xprofx = (np.arange(0, sumx) - sumx/2) * pi_x
xlim = []
for a, prof, xprof, l, c in zip([ax4, ax5, ax6],
[xprof, yprof, zprof],
[xprofx, yprofx, zprofx],
["X", "Y", "Z"],
['lime', "deepskyblue", "tomato"]):
popt, xfine, _ = do_1D_gauss_fit(prof, xprof)
lineval = popt[0]/1e3
a.plot(xprof/1e3 - lineval, prof, '.', c=c, label=l)
a.plot(xfine/1e3 - lineval, gaussian_1D(xfine, *popt), 'k-', lw=0.75,
label=r"$\mathrm{FWHM}$"+"={:.0f} nm".format(popt[1]*2.35))
a.tick_params(axis='y', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
left=False, # ticks along the bottom edge are off
right=True, # ticks along the top edge are off
labelright=False, # labels along the bottom edge are off)
labelleft=False # labels along the bottom edge are off)
)
a.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=True, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False # labels along the bottom edge are off)
)
a.yaxis.set_label_position('right')
a.legend(fontsize='x-small', handlelength=0.8)
a.grid(True)
a.set_ylim(0, None)
xlim = np.max([xlim, 2*popt[1]*2.35+popt[0]])
lineval = 0
if l == "X":
ax1.axhline(lineval, c=c, ls='--', lw = 0.75)
ax2.axhline(lineval, c=c, ls='--', lw = 0.75)
elif l == "Y":
ax1.axvline(lineval, c=c, ls='--', lw = 0.75)
ax3.axvline(lineval, c=c, ls='--', lw = 0.75)
elif l == "Z":
ax2.axvline(lineval, c=c, ls='--', lw = 0.75)
ax3.axhline(lineval, c=c, ls='--', lw = 0.75)
ax4.set_xlabel("Distance [$\mathrm{\mu m}$]")
ax4.tick_params(axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=True, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=True # labels along the bottom edge are off)
)
ax5.set_ylabel("\nSignal intensity [a.u.]")
xlim *= 1e-3
ax4.set_xlim(-1 * xlim, xlim)
# +
def multipage(filename, figs=None, dpi=300):
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for i, fig in enumerate(figs):
pngfilename = filename.replace(".pdf", "_"+str(i)+".png")
fig.savefig(pngfilename)
fig.savefig(pp, format='pdf')
pp.close()
def round_up_to_odd(f):
return np.ceil(f) // 2 * 2 + 1
def rup2oddint(f):
return int(round_up_to_odd(f))
def iterable(obj):
try:
iter(obj)
except Exception:
return False
else:
return True
def eight_bit_as(arr, dtype=np.float32):
if arr.dtype != np.uint8:
arr = arr.astype(np.float32)
arr -= arr.min()
arr *= 255.0/arr.max()
else:
arr = arr.astype(np.float32)
return arr.astype(dtype)
def max_int_proj(arr):
return np.max(arr, axis=0)
def cut_section_from_stack(arr3d, x, y, wx, wy, z=None, wz=None, upsampled=False):
pw = 0
lenz, leny, lenx = arr3d.shape
minx, maxx = int(round(x - wx)), int(round(x + wx))
miny, maxy = int(round(y - wy)), int(round(y + wy))
try:
minz, maxz = int(round(z - wz)), int(round(z + wz))
except:
minz = 0
maxz = lenz
mins, nulls = (minz, miny, minx), (0, 0, 0)
maxs, lims = (maxz, maxy, maxx), arr3d.shape
minidxs = np.array(mins)
maxidxs = np.array(lims) - np.array(maxs)
if np.any(minidxs < 0) or np.any(maxidxs < 0):
a = np.concatenate((minidxs, maxidxs), axis=None)
a[a > 0] = 0
pw = np.max(np.abs(a))
arr3d = np.pad(arr3d, pw, mode='edge')
if DEBUG:
bla = {0:"minz", 1:"miny", 2:"minx", 3:"maxz", 4:"maxy", 5:"maxx"}
print(bla[np.argmin(a)], "PW:", pw)
return arr3d[minz+pw:maxz+pw, miny+pw:maxy+pw, minx+pw:maxx+pw]
def psf_z_gauss_fit(arr, x=None, y=None):
arrz, arry, arrx = arr.shape
if not x or not y:
x, y = arrx//2, arry//2
else:
x, y = intround(x), intround(y)
z_profile = arr[:,x, y]
xdata = np.arange(0, arrz)
popt, xfine, _ = do_1D_gauss_fit(z_profile, xdata)
return popt
def psf_gauss_fit(arr):
centerpos = []
if len(arr.shape) > 2: #3D stack
arrz, arry, arrx = arr.shape
mip = max_int_proj(arr)
xgp, ygp, _, _, _, _, _ = do_2D_gauss_fit(mip)
pz = psf_z_gauss_fit(arr, xgp, ygp)
zgp = pz[0]
centerpos.append(zgp)
focim = arr[intround(zgp), :, :]
else: focim = arr
x_gp, y_gp, x_si, y_si, rot, maI, miI = do_2D_gauss_fit(focim)
centerpos.append(y_gp)
centerpos.append(x_gp)
if len(arr.shape) > 2:
#if np.abs(x_gp - xgp) > 1 or np.abs(y_gp - ygp) > 1:
pz = psf_z_gauss_fit(arr, x_gp, y_gp)
centerpos[0] = pz[0]
return focim, centerpos
def crop_and_fit(arr3d, x, y, wx, wy, z=None, wz=None):
crop = cut_section_from_stack(arr3d, x, y, wx, wy, z, wz)
focim, (z_gp, y_gp, x_gp) = psf_gauss_fit(crop)
x = x - wx + x_gp
y = y - wy + y_gp
try:
z = z - wz + z_gp
except:
z = z_gp
cropstack = cut_section_from_stack(arr3d, x, y, wx, wy, z, wz)
z_corr = (arr3d.shape[0] - cropstack.shape[0])//2
return cropstack, focim, x_gp, y_gp, z_gp-z_corr, x, y, z
def rebin(arr, factor):
shape = [arr.shape[0] // factor, factor,
arr.shape[1] // factor, factor]
mean_axis = (1,3)
if arr.ndim == 3:
shape = np.append(shape, [arr.shape[2] // factor, factor])
mean_axis += (5,)
return arr.reshape(shape).mean(mean_axis)
def intround(f):
return int(round(f,0))
def set_ax_ticksize(ax, fontsize=8):
if not type(ax) == type([]): ax = [ax]
for a in ax:
for tick in a.xaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
for tick in a.yaxis.get_major_ticks():
tick.label.set_fontsize(fontsize)
# +
############### SET PROPERTIES!!!! ######################
#set pixel size in nm:
pixel_z = 329.5
pixel_x = 195
pixel_y = pixel_x
#Trackpy diameter z, y, x in nm
tpyd = [3000, 1000, 1000]
#set psf cutout size in um
xy_width = 4
z_width = 10
#upsampling factor
upsampling_factor = 6 #even if possible (1 is no upsampling)
interp = "none"
extra_xy_width = 5
#plot overlay of trackpy coordinates and gauss fit on image (XY, XZ and YZ)??
plot = False
plot_tp = False
DEBUG = False
max_mass_frac = 1 # 1: all beads accepted, higher number, more filtering.
warnings.filterwarnings('ignore')
#load image stack
#location = '../../../Matlab_codes/Astigmatism_Extraction_Experiment/20210628_new_opt_module/2021-06-28-17-40-51zstack_-28.432deg_step50nm_4.76520994rad/8bit'
location = '../../../Matlab_codes/PSF_extraction_Laura/zstack20210730-161333/timelapse_20210730_161114/8bit'
# fns = ["2021-06-23-17-20-52zstack_-28.25deg_5.62rad/*.tif",
# ]
fns = ["timelapse_20210730_161114.tif"]
# +
############### RUN THE SCRIPT!!!! ######################
from matplotlib.colors import LogNorm
for file_name in fns:
print("Processing", file_name+"...")
start = time.time()
if "/*.png" not in file_name and ".tif" not in file_name: file_name += "/*.png"
try:
vars = json.load(open(location+"/"+file_name[:-5]+"parameters.json", 'r'))
pixel_z = vars['focusstep'] * 1e3
pixel_x = vars['pixelsize'] * 1e3
pixel_y = pixel_x
except:
pass
#cutout widths in pixels
width_x = int(xy_width * 1e3 / pixel_x)
width_y = width_x
width_z = int(z_width * 1e3 / pixel_z)
widths = {"x":width_x, "y":width_y, "z":width_z}
#trackpy diameter
tpy_diameter = [rup2oddint(dia/px) for dia, px in zip(tpyd, [pixel_z, pixel_y, pixel_x])]
#load stack if new filename
try:
new_stack_bool = os.path.normpath(stack._filename) == os.path.normpath(location + file_name)
except NameError: new_stack_bool = False
if not new_stack_bool:
print("Load new stack")
outdir, stack = get_stack(location, file_name)
stack_arr = np.array(stack)
stack_arr = eight_bit_as(stack_arr)
max_proj = max_int_proj(stack_arr)
max_proj -= max_proj.min()
max_proj *= 255.0/max_proj.max()
print("Locating features with different min_mass")
fig, axes = plt.subplots(nrows=3, ncols=2, sharex=True, sharey=True, dpi=300, figsize=(10,10))
for (ax, mm) in zip(axes.flat, [5, 25, 75, 120, 220, 250]):
features = tp.locate(max_proj, diameter=tpy_diameter[1:],
minmass=mm)
features.reset_index(drop=True, inplace=True)
ax.imshow(max_proj, cmap=fire,norm=LogNorm(vmin=1, vmax=255)) #<----------------------
ax.plot(features.x.values, features.y.values,
'o', markerfacecolor='None',
markersize=10,
markeredgecolor="blue")
ax.set_title("MM "+str(mm)+": "+str(len(features))+" features found")
plt.show()
max_proj_mm = int(input("Select minmass (integer number): "))
else: print("Using preloaded stack")
print("Locating features with minmass: {}".format(max_proj_mm))
features = tp.locate(max_proj, diameter=tpy_diameter[1:],
minmass=max_proj_mm)
features.reset_index(drop=True, inplace=True)
#determine max dimensions z-stack
stack_shape = np.shape(stack_arr)
dims = {"x":stack_shape[2], "y":stack_shape[1], "z":stack_shape[0]}
#Leave out particles too close to each other (set by width of cutout)
blacklist = check_blacklist(features, widths, dims)
#Filter featurellist
idxs = [i for i in range(len(features)) if blacklist[i] != 0]
features_filt = features.drop(features.index[idxs])
#Select on mass
lenmass = len(features_filt)
if lenmass > 12: lenmass /= max_mass_frac
features_filt = features_filt.sort_values('mass').head(n=int(lenmass)).reset_index()
if len(features_filt) == 0: raise HaltException("All {} features blacklisted and disqualified: change widths".format(len(features)))
#make array to save locations of psf
locations = [] # PSF#, x, y, z, pb_x_tp, pb_y_tp, pb_x_g, pb_y_g, pb_z_g
locs = pd.DataFrame(locations, columns = ["PSF", "x", "y", "z",
"pb_x_tp", "pb_y_tp",
"pb_x_g", "pb_y_g", "pb_z_g"])
#loop over all filtered beads
work_stack = stack_arr.copy()
psf_sum, singlets = None, []
print("Extracting PSFs from stack:")
for i, (j, row) in enumerate(features_filt.iterrows()):
y = round(row.y,0) #y
x = round(row.x,0) #x
try:
cropped = crop_and_fit(work_stack,
x,
y,
width_x+extra_xy_width,
width_y+extra_xy_width)
except:
#features_filt.drop(features_filt.index[i], inplace=True)
continue
else:
psf, focim, x_gp, y_gp, z_gp, x_ori, y_ori, z_ori = cropped
#save psf volume to file
filepath = outdir + "/psf_{}.tif".format(j)
singlets.append("/psf_{}.tif".format(j))
tifffile.imwrite(filepath, psf, photometric='minisblack')
# save location of psf to array
loc = [i, x_ori, y_ori, z_ori, x_gp, y_gp, z_gp,
get_pb(row.x), get_pb(row.y),
get_pb(x_gp), get_pb(y_gp), get_pb(z_gp),
]
locations.append(loc)
print("*"*(i+1) + "-"*(len(features_filt)-i-1), end='\r')
print("\nFilter PSFs...")
refs = np.random.randint(0,len(singlets),5)
pss = []
for j, ref in enumerate(refs):
_, psf0 = get_stack(outdir, singlets[ref], c_outdir=False)
psf0 = np.array(psf0)
ps = []
for i in range(0,len(singlets)):
_, psf0 = get_stack(outdir, singlets[i], c_outdir=False)
psf = np.array(psf)
p, _ = scipy.stats.pearsonr(max_int_proj(psf0).ravel(), max_int_proj(psf).ravel())
ps.append(p)
print("*"*(int((j+1)*len(singlets)/len(refs))) + "-"*(len(singlets)-int((j+1)*len(singlets)/len(refs))), end='\r')
pss.append(ps)
ps = np.mean(pss, axis=0)
psmean = np.mean(ps)
filt_singlets = np.argwhere((psmean - 0.04 < ps) & (ps < psmean + 0.06)).flatten()
filt_psfs, filt_locs = [], []
for i in filt_singlets:
_, psf = get_stack(outdir, singlets[i], c_outdir=False)
filt_psfs.append(np.array(psf))
filt_locs.append(locations[i])
print("\nUpsample and average PSFs:")
for i, (psf, loc) in enumerate(zip(filt_psfs, filt_locs)):
_, x_ori, y_ori, z_ori, x_gp, y_gp, z_gp, _, _, _, _, _ = loc
#upscale psf image stack for better overlay
psf_upsampled = psf.repeat(upsampling_factor, axis=0) \
.repeat(upsampling_factor, axis=1) \
.repeat(upsampling_factor, axis=2)
psf_upsampled = cut_section_from_stack(psf_upsampled,
x_gp*upsampling_factor + upsampling_factor/2,
y_gp*upsampling_factor + upsampling_factor/2,
width_x*upsampling_factor,
width_y*upsampling_factor,
z_gp*upsampling_factor + upsampling_factor/2,
width_z*upsampling_factor,
upsampled=True)
if type(psf_sum) == type(None):
psf_sum = psf_upsampled
else:
psf_sum = np.add(psf_sum, psf_upsampled)
print("*"*(i+1) + "-"*(len(filt_psfs)-i-1), end='\r')
#save psf locations to file
print("\nSaving PSF & metadata")
filepath = outdir + "/locations.csv"
locs = pd.DataFrame(filt_locs, columns = ["PSF", "x", "y", "z", "x_gp", "y_gp", "z_gp",
"pb_x_tp", "pb_y_tp",
"pb_x_g", "pb_y_g", "pb_z_g"])
locs.to_csv(filepath, index=False)
binned_psf_sum = rebin(psf_sum, upsampling_factor)
uint8_binned_psf_sum = eight_bit_as(binned_psf_sum, np.uint8)
#write averaged psf to file
filepath = outdir + "/psf_av.tif"
tifffile.imwrite(filepath, uint8_binned_psf_sum, photometric='minisblack')
print("Generating debugging plot")
plot_debug(max_proj, features, features_filt, locs, width_x, width_y, filt_psfs)
print("Generating PSF")
plot_PSF(psf_sum, pixel_x/upsampling_factor, pixel_y/upsampling_factor, pixel_z/upsampling_factor)
plot_PSF(binned_psf_sum, pixel_x, pixel_y, pixel_z)
print("Write plots to disk")
multipage(outdir + "/"+file_name[:-6]+".pdf")
print("Done!")
plt.close('all')
# -
| notebooks/PSF_extractor_BolWeeLane_for_Laura.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] raw_mimetype="text/latex"
# ## 1D Linear operator with one parameter
#
#
# This chapter introduces a basic example of the framework developed in Chapter 3. We take a one-dimensional system with a single parameter and extract an operator out of it.
#
#
# \begin{align*}
# \mathcal{L}_x^\phi u(x) &= f(x) \\
# \mathcal{L}_x^\phi &:= \phi \cdot + \frac{d}{dx}\cdot
# \end{align*}
#
# It is trivial to verify linearity of the operator:
#
# \begin{align*}
# u, f : [0, 1] &\rightarrow \mathbb{K}, \alpha, \beta \in \mathbb{R} \\
# \mathcal{L}_x^\phi (\alpha u + \beta f) &= \phi (\alpha u + \beta f) + \frac{d}{dx}(\alpha u + \beta f) \\
# &= \alpha \phi u + \beta \phi f + \alpha \frac{d}{dx}u + \beta \frac{d}{dx}f \\
# &= \alpha \mathcal{L}_x^\phi u + \beta \mathcal{L}_x^\phi f
# \end{align*}
#
# One of the solutions to this system might be:
#
# \begin{align*}
# u(x) &= x^3 \\
# f(x) &= \phi x^3 + 3x^2 \\
# x &\in [0, 1]
# \end{align*}
#
# We define Gaussian priors on the input and output:
#
# \begin{align*}
# u(x) &\sim \mathcal{GP}(0, k_{uu}(x,x',\theta)) \\
# f(x) &\sim \mathcal{GP}(0, k_{ff}(x,x',\theta,\phi))
# \end{align*}
#
# A noisy data model for the above system can be defined as:
#
# \begin{align*}
# y_u &= u(X_u) + \epsilon_u; \epsilon_u \sim \mathcal{N}(0, \sigma_u^2I)\\
# y_f &= f(X_f) + \epsilon_f; \epsilon_f \sim \mathcal{N}(0, \sigma_f^2I)
# \end{align*}
#
# For the sake of simplicity, we ignore the noise terms $\epsilon_u$ and $\epsilon_f$ while simulating the data. They're nevertheless beneficial, when computing the negative log marginal likelihood (NLML) so that the resulting covariance matrix is mostly more well-behaved for reasons as they were outlined after the preface.
#
#
# For the parameter estimation problem for the linear operator described above, we are given $\{X_u, y_u\}$, $\{X_f, y_f\}$ and we need to estimate $\phi$.
#
#
# #### Step 1: Simulate data
#
#
# We use $\phi = 2$.
#
# + nbsphinx="hidden"
import numpy as np
import sympy as sp
from scipy.optimize import minimize
import matplotlib.pyplot as plt
import time
# -
def get_simulated_data(n1, n2, phi):
x_u = np.random.rand(n1)
y_u = np.power(x_u, 3)
x_f = np.random.rand(n2)
y_f = phi*np.power(x_f, 3) + 3*np.power(x_f,2)
return(x_u, y_u, x_f, y_f)
# + nbsphinx="hidden"
(x_u, y_u, x_f, y_f) = get_simulated_data(10, 7, 2)
f, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, sharey=True, figsize=(10,3))
f.suptitle("Input and Output for the operator")
ax1.plot(x_u, y_u, 'o')
ax1.set(xlabel= "x", ylabel= "u(x)")
ax2.plot(x_f, y_f, 'ro')
ax2.set(xlabel= "x", ylabel= "f(x)")
# -
plt.show()
# #### Step 2: Evaluate kernels
#
#
# We use the RBF kernel defined as:
#
# \begin{align*}
# k_{uu}(x_i, x_j; \theta) = \theta exp(-\frac{1}{2l}(x_i-x_j)^2)
# \end{align*}
#
# throughout the report. It is worth noting that this step uses information about $\mathcal{L}_x^\phi$ but not about $u(x)$ or $f(x)$. The derivatives are computed using *sympy*.
x_i, x_j, theta, l, phi = sp.symbols('x_i x_j theta l phi')
kuu_sym = theta*sp.exp(-l*((x_i - x_j)**2))
kuu_fn = sp.lambdify((x_i, x_j, theta, l), kuu_sym, "numpy")
def kuu(x, theta, l):
k = np.zeros((x.size, x.size))
for i in range(x.size):
for j in range(x.size):
k[i,j] = kuu_fn(x[i], x[j], theta, l)
return k
# \begin{align*}
# k_{ff}(x_i,x_j;\theta,\phi) &= \mathcal{L}_{x_i}^\phi \mathcal{L}_{x_j}^\phi k_{uu}(x_i, x_j; \theta) \\
# &= \mathcal{L}_{x_i}^\phi \left( \phi k_{uu} + \frac{\partial}{\partial x_j}k_{uu} \right) \\
# &= \phi^2 k_{uu} + \phi \frac{\partial}{\partial x_j}k_{uu} + \phi \frac{\partial}{\partial x_i}k_{uu} + \frac{\partial}{\partial x_i}\frac{\partial}{\partial x_j}k_{uu} \\
# &= \theta exp(-\frac{1}{2l}(x_i-x_j)^2)\left[ \phi^2 + 2\phi |x_i-x_j| + (x_i-x_j)^2 + 1 \right]
# \end{align*}
kff_sym = phi**2*kuu_sym \
+ phi*sp.diff(kuu_sym, x_j) \
+ phi*sp.diff(kuu_sym, x_i) \
+ sp.diff(kuu_sym, x_j, x_i)
kff_fn = sp.lambdify((x_i, x_j, theta, l, phi), kff_sym, "numpy")
def kff(x, theta, l, phi):
k = np.zeros((x.size, x.size))
for i in range(x.size):
for j in range(x.size):
k[i,j] = kff_fn(x[i], x[j], theta, l, phi)
return k
# \begin{align*}
# k_{fu}(x_i,x_j;\theta,\phi) &= \mathcal{L}_{x_i}^\phi k_{uu}(x_i, x_j; \theta) \\
# &= \phi k_{uu} + \frac{\partial}{\partial x_i}k_{uu} \\
# &= \theta exp(-\frac{1}{2l}(x_i-x_j)^2) \left[ (\frac{1}{2})2|x_i-x_j| + \phi \right] \\
# &= \theta exp(-\frac{1}{2l}(x_i-x_j)^2)(\phi + |x_i-x_j|)
# \end{align*}
kfu_sym = phi*kuu_sym + sp.diff(kuu_sym, x_i)
kfu_fn = sp.lambdify((x_i, x_j, theta, l, phi), kfu_sym, "numpy")
def kfu(x1, x2, theta, l, phi):
k = np.zeros((x2.size, x1.size))
for i in range(x2.size):
for j in range(x1.size):
k[i,j] = kfu_fn(x2[i], x1[j], theta, l, phi)
return k
# \begin{align*}
# k_{uf}(x_i,x_j;\theta,\phi) &= \mathcal{L}_{x_j}^\phi k_{uu}(x_i, x_j; \theta) \\
# &= \phi k_{uu} + \frac{\partial}{\partial x_j}k_{uu} \\
# &= \theta exp(-\frac{1}{2l}(x_i-x_j)^2) \left[ (\frac{1}{2})2|x_i-x_j| + \phi \right]\\
# &= \theta exp(-\frac{1}{2l}(x_i-x_j)^2)(\phi+|x_i-x_j|)
# \end{align*}
def kuf(x1, x2, theta, l, phi):
return kfu(x1, x2, theta, l, phi).T
# #### Step 3: Compute the negative log marginal likelihood(NLML)
#
# The following covariance matrix is the result of our discussion at the end of Chapter 1.3.1, with an added noise parameter:
#
# \begin{align*}
# K = \begin{bmatrix}
# k_{uu}(X_u, X_u; \theta) + \sigma_u^2I & k_{uf}(X_u, X_f; \theta, \phi) \\
# k_{fu}(X_f, X_u; \theta, \phi) & k_{ff}(X_f, X_f; \theta, \phi) + \sigma_f^2I
# \end{bmatrix}
# \end{align*}
#
# For simplicity, assume $\sigma_u = \sigma_f$.
#
# \begin{align*}
# \mathcal{NLML} = \frac{1}{2} \left[ log|K| + y^TK^{-1}y + Nlog(2\pi) \right]
# \end{align*}
#
# where $y = \begin{bmatrix}
# y_u \\
# y_f
# \end{bmatrix}$.
def nlml(params, x1, x2, y1, y2, s):
params = np.exp(params)
K = np.block([
[
kuu(x1, params[0], params[1]) + s*np.identity(x1.size),
kuf(x1, x2, params[0], params[1], params[2])
],
[
kfu(x1, x2, params[0], params[1], params[2]),
kff(x2, params[0], params[1], params[2]) + s*np.identity(x2.size)
]
])
y = np.concatenate((y1, y2))
val = 0.5*(np.log(abs(np.linalg.det(K))) \
+ np.mat(y) * np.linalg.inv(K) * np.mat(y).T)
return val.item(0)
# #### Step 4: Optimize hyperparameters
#
nlml_wp = lambda params: nlml(params, x_u, x_f, y_u, y_f, 1e-6)
m = minimize(nlml_wp, np.random.rand(3), method="Nelder-Mead")
# + nbsphinx="hidden"
m
# -
np.exp(m.x)
# The estimated value comes very close to the actual value.
#
# For the current model, we get the following optimal values of the hyperparameters:
#
# | Parameter | Value |
# |-----------|-------|
# | $\theta$ |11.90390211 |
# | $l$ |0.47469623 |
# | $\phi$ |2.00120508 |
| parameter_estimation/par_est.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 协程
# **从句法上看,协程与生成器类似,都是定义体中包含yield关键字的函数。从根本上把yield是做流程控制的方式,可以更好地理解协程。**
#
# **协程是指一个过程,这个过程与调用方协作,产出由调用方提供的值。**
def simple_coroutine():
print('-> coroutine started')
x = yield
print('-> coroutine received: ', x)
my_coro = simple_coroutine()
print(my_coro)
print(next(my_coro))
# 生成器调用方可以使用`.send(...)`方法发送数据,发送的数据会成为生成器函数中yield表达式左边的值。
my_coro.send(42)
# 协程可以身处四种状态中的一种,当前状态可以使用`inspect.getgeneratorstate(...)`函数确定,该函数返回下述字符串中的一个:
# 1. `GEN_CREATED`:等待开始执行
# 2. `GEN_RUNNING`:解释器正在执行
# 3. `GEN_SUSPENDED`:在`yield`表达式处暂停
# 4. `GEN_CLOSED`:执行结束
#
# 因为`send`方法的参数会成为暂停`yield`表达式的值,所以,仅当协程处于暂停状态时才能调用`send`方法。
from inspect import getgeneratorstate
# 创建协程对象后,立即把None之外的值传递给他,会出现错误
my_coro = simple_coroutine()
print(getgeneratorstate(my_coro))
my_coro.send(10)
# 最先调用`next(my_coro)`这一步通常称为“预激(prime)”协程(即,让协程向前执行到第一个yield表达式,准备好作为活跃的协程使用)
def simple_coro_2(a):
print(f'-> started: a = {a}')
b = yield a
print(f'-> received: b = {b}')
c = yield a + b
print(f'-> received: c = {c}')
my_coro_2 = simple_coro_2(10)
print(getgeneratorstate(my_coro_2))
# 执行到第一个yield处:
# 1.执行第一个print语句;
# 2.执行yield a(即,返回a的值);
# 3.程序停在"b="处,等待用户输入
print(next(my_coro_2))
print(getgeneratorstate(my_coro_2))
# 1.参数b接受send发送过来的值,b = 20;
# 2.执行第二个print语句;
# 3.执行“yield a + b”(即,返回a+b的值);
# 4.程序停在"c="处,等待用户输入
print(my_coro_2.send(20))
print(getgeneratorstate(my_coro_2))
# 1. 参数c接收send发送过来的值, c = 30;
# 2. 执行第三个print语句;
# 3. 寻找下一个yield语句,没有找到,抛出"StopIteration"异常
print(my_coro_2.send(30))
def averager():
total = 0.0
count = 0
average = None
while True:
term = yield average
total += term
count += 1
average = total / count
# 协程的好处是,total和count声明为局部变量即可,无需使用实例属性或闭包在多次调用之间保持上下文。
coro_avg = averager()
# 1. 预激协程'averager()'
# 2. 返回average的初始值None
print(next(coro_avg))
print(coro_avg.send(10))
print(coro_avg.send(20))
print(coro_avg.send(5))
# ## 预激协程的装饰器
# +
from functools import wraps
def coroutine(func):
# functools.wraps装饰器,保持被装饰函数在经过装饰后所有原始信息不变
@wraps(func)
def primer(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
return gen
return primer
# -
@coroutine
def averager():
total = 0.0
count = 0
average = None
while True:
term = yield average
total += term
count += 1
average = total / count
coro_avg = averager()
# 经过预激装饰器修饰后,初始化后的装饰就已经处于暂停状态了。
print(getgeneratorstate(coro_avg))
print(coro_avg.send(10))
print(coro_avg.send(20))
# ## 终止协程和异常处理
coro_avg = averager()
print(coro_avg.send(40))
print(coro_avg.send(50))
# 由于协程内没有进行异常处理,传入不符合要求的数据时,携程会终止
print(coro_avg.send('spam'))
# 如果试图重新激活已经出现异常的协程,会抛出`StopIteration`
print(coro_avg.send(60))
# +
class DemoException(Exception):
pass
# 定义一个只处理DemoException异常的协程
def demo_exc_handling():
print('-> coroutine started')
while True:
try:
x = yield
# 只处理DemoException这一种类型的异常
except DemoException:
print('*** DemoException handled. Continuing ...')
else:
print('-> coroutine received: {!r}'.format(x))
# 这条语句应该永远不会被执行到,因为:
# 1. 如果发生DemoException异常,那么有专门的处理该异常的代码逻辑,处理完成后协程继续工作
# 2. 如果发生了除DemoException之外的异常,协程会立即终止
raise RuntimeError('This line should never run')
# -
exc_coro = demo_exc_handling()
# 预激协程
next(exc_coro)
print(exc_coro.send(10))
# 第一句输出是函数中`else`语句的`print`语句的输出,输出的`None`是`yield`的返回值
# ### 关闭协程
# 使用close方法关闭协程
print(exc_coro.close())
getgeneratorstate(exc_coro)
# ### 协程异常处理
# 协程调用`.throw(SomeException)`方法,可以将指定的异常抛给协程;协程调用`.close()`方法,可以将该协程关闭。
# #### 处理DemoException异常
exc_coro = demo_exc_handling()
# 预激协程
next(exc_coro)
print(exc_coro.send(10))
# 抛出DemoException异常给协程
print(exc_coro.throw(DemoException))
# 遇到异常并处理后,协程继续执行
getgeneratorstate(exc_coro)
# #### 处理非DemoException异常
exc_coro = demo_exc_handling()
# 预激协程
next(exc_coro)
# 抛出非DemoException异常给协程
print(exc_coro.throw(ZeroDivisionError))
getgeneratorstate(exc_coro)
# 由于`exc_coro`没有处理非`DemoException`异常的能力,协程会终止。
# 如果不管协程如何结束都想做一些清理工作,要把协程定义体中相关的代码放入`try/finally`块中。
def demo_finally():
print('-> coroutine started')
try:
while True:
try:
x = yield
# 只处理DemoException这一种类型的异常
except DemoException:
print('*** DemoException handled. Continuing ...')
else:
print('-> coroutine received: {!r}'.format(x))
finally:
# 不用调用corotine.close()方法,因为一遇到他不能处理的异常会自行关闭
print('-> coroutine ending.')
# ## 协程的返回值
#
# 如果协程中`yield`表达式的右边没有任何内容时,默认每次激活协程会返回一个`None`。
# +
from collections import namedtuple
Result = namedtuple('Result', ['count', 'average'])
def averager():
total = 0.0
count = 0
while True:
term = yield
# 当协程接收到用户输入的None,代表计算结束
if term is None:
break
total += term
count += 1
return Result(count, total / count)
# -
coro_avg = averager()
next(coro_avg)
print(coro_avg.send(10))
print(coro_avg.send(30))
print(coro_avg.send(40))
print(coro_avg.send(None))
# 传给协程`None`时:
# 1. 协程结束
# 2. 返回结果
# 3. 生成器对象抛出`StopIteration`异常,异常对象的`value`属性保存着返回值
# ### 获取协程的返回值
coro_avg = averager()
next(coro_avg)
coro_avg.send(10)
coro_avg.send(20)
coro_avg.send(30)
# 获取协程的返回值要绕个圈子。
try:
coro_avg.send(None)
except StopIteration as exc:
result = exc.value
print(result)
# ## yield from
# 在生成器`gen`中使用`yield from subgen()`:`subgen()`会获得控制权,把产出的值传给`gen`的调用方,即调用方可以直接控制`subgen`。与此同时,`gen`会阻塞,等待`subgen`终止。
def gen():
for c in 'AB':
yield c
for i in range(1, 3):
yield i
list(gen())
# 使用`yield from`:
def gen():
yield from 'AB'
yield from range(1, 3)
list(gen())
# `yield from x`表达式对`x`做的第一件事就是调用`iter(x)`,从中获得迭代器,`x`可以是任意可迭代的对象。
#
# `yield from`的主要功能是打开双向通道,把最外层的调用方与最内层的子生成器连接起来,这样二者就可以直接发送和产出值,还可以直接传入异常,而不用在位于中间的协程中添加大量处理异常的样板代码。
#
# 引入`yield from`结构的目的是为了支持实现了`__next__`、`send`、 `close`和`throw`方法的生成器(也就是说为了更方便的实现协程)。
# +
from collections import namedtuple
Result = namedtuple('Result', ['count', 'average'])
def averager():
total = 0.0
count = 0
while True:
term = yield
# 当协程接收到用户输入的None,代表计算结束
if term is None:
break
total += term
count += 1
return Result(count, total / count)
def grouper(results, key):
while True:
results[key] = yield from averager()
def main(data):
results = {}
for key, values in data.items():
group = grouper(results, key)
next(group)
for value in values:
group.send(value)
group.send(None)
return results
# -
# 1. 运行到`main()`函数的`group = grouper(results, key)`时,程序被协程`averager()`接管,此时`group`代表了协程`averager()`;
# 2. 当内层`for`循环(`'for value in values:'`)结束(即取完values中的所有值)后,`group`实例依旧在`yield from`表达式处暂停,因此,`grouper`函数定义体中的赋值语句`result[key]`还没有执行;
# 3. 紧接着输入`None`,终止`averager`实例,抛出`StopIteration`异常并向上冒泡,控制权回到函数`grouper()`。
# 4. 之后,`yield from`表达式的值是协程终止时传给`StopIteration`异常的第一个参数(`StopIteration.value`),并将该值绑定到`results[key]`。`grouper()`接收到`StopIteration`异常,而该函数内没有异常处理代码,所以继续向上冒泡异常;
# 5. `StopIteration`异常并冒泡到外层`for`循环(`'for key, values in data.items():'`),该层`for`循环接受到`StopIteration`异常后,认为迭代完成,压制异常,开始下一次循环
# 6. 外层`for`循环重新迭代时,会新建一个`grouper`实例,然后绑定到`group`变量上,前一个`grouper`实例被垃圾回收程序回收
# +
import random
data = {'girls;kg':[40.9, 38.5, 44.3, 42.2, 45.2, 41.7, 44.5, 38.0, 40.6, 44.5],
'girls;m':[1.6, 1.51, 1.4, 1.3, 1.41, 1.39, 1.33, 1.46, 1.45, 1.43],
'boys;kg':[39.0, 40.8, 43.2, 40.8, 43.1, 38.6, 41.4, 40.6, 36.3],
'boys;m':[1.38, 1.5, 1.32, 1.25, 1.37, 1.48, 1.25, 1.49, 1.46]}
results = main(data)
# -
results
#
# ## 使用协程做离散时间仿真
import collections
Event = collections.namedtuple('Event', ['time', 'proc', 'action'])
def taxi_process(ident, trips, start_time=0):
time = yield Event(start_time, ident, 'leave garage')
for i in range(trips):
time = yield Event(time, ident, 'pick up passenger')
time = yield Event(time, ident, 'drop off passenger')
yield Event(time, ident, 'going home')
DEPARTURE_INTERVAL = 5
SEARCH_DURATION = 5
TRIP_DURATION = 20
DEFAULT_END_TIME = 180
DEFAULT_NUMBER_OF_TAXIS = 3
# +
import random
def compute_duration(previous_action):
if previous_action in ['leave garage', 'drop off passenger']:
interval = SEARCH_DURATION
elif previous_action == 'pick up passenger':
interval = TRIP_DURATION
elif previous_action == 'going home':
interval = 1
else:
raise ValueError(f'Unknown previous_action: {previous_action}')
return int(random.expovariate(1 / interval)) + 1
# +
import queue
class Simulator:
def __init__(self, procs_map):
# 优先队列是离散事件仿真的基础构件,可按各个事件排定的时间顺序取出
self.events = queue.PriorityQueue()
# 创建一个dict副本,防止修改用户传进来的数据
self.procs = dict(procs_map)
def run(self, end_time):
# 预激各个taxi协程,并将它们放入到主循环
for _, proc in sorted(self.procs.items()):
first_event = next(proc)
self.events.put(first_event)
# 初始化主循环
sim_time = 0
# 结束主循环的条件:
# 1. 一天结束
# 2. 各个出租车都提前完成一天的任务量
while sim_time < end_time:
if self.events.empty():
print('*** end of events ***')
break
# 取出当前需要处理的协程(时间值最小的那个)
current_event = self.events.get()
sim_time, proc_id, previous_action = current_event
# 每个taxi协程以不同的缩进打印,方便查阅
print('taxi: ', proc_id, proc_id * ' ', current_event)
# 获取当前协程
activate_proc = self.procs[proc_id]
# 更新主循环
next_time = sim_time + compute_duration(previous_action)
try:
# 给当前协程互动,传送相应的值
next_event = activate_proc.send(next_time)
# 如果当前协程已经完成了所有任务,则以后不再处理该协程
except StopIteration:
del self.procs[proc_id]
else:
# 如果该协程没有终结,且用户正确的传入了数据,那么将它放到主循环的有限队列中
self.events.put(next_event)
# 主循环的结束是因为一天结束(这种情况可能有些出租车并没有完成所有任务量),才会执行代码块
else:
msg = f'*** end of simulation time: {self.events.qsize()} events pending'
print(msg)
# -
def main(end_time = DEFAULT_END_TIME,
num_taxis = DEFAULT_NUMBER_OF_TAXIS,
seed = 3):
if seed is not None:
random.seed(seed)
# 创建了三辆出租车一天的行程(协程),他们分别相隔5分钟开始一天的工作
taxis = {i: taxi_process(i, (i + 1) * 2, i * DEPARTURE_INTERVAL)
for i in range(num_taxis)}
sim = Simulator(taxis)
sim.run(end_time)
main()
# +
class A:
def __init__(self, a, b):
print(f'a = {a}')
print(f'b = {b}')
class B:
def __init__(self, *args, **kwargs):
super(B, self).__init__(*args, **kwargs)
print(f'c = 100')
class C(B, A):
def __init__(self, a, b):
# B.__init__(self)
# A.__init__(self, a, b)
super(C, self).__init__(a, b)
# -
c = C(4, 5)
| Jupyter/16.*.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from gs_quant.common import PayReceive, Currency
from gs_quant.instrument import IRSwaption
from gs_quant.markets.portfolio import Portfolio
from gs_quant.session import Environment, GsSession
# external users should substitute their client id and secret; please skip this step if using internal jupyterhub
GsSession.use(Environment.PROD, client_id=None, client_secret=None, scopes=('run_analytics',))
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
swaption1 = IRSwaption(PayReceive.Pay, '5y', Currency.EUR, expiration_date='3m', name='EUR-3m5y')
swaption2 = IRSwaption(PayReceive.Pay, '7y', Currency.EUR, expiration_date='6m', name='EUR-6m7y')
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
portfolio = Portfolio((swaption1, swaption2))
| gs_quant/documentation/03_portfolios/examples/030000_create_portfolio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tf20py36]
# language: python
# name: conda-env-tf20py36-py
# ---
# ## 人脸识别程序
# - 该项目用于验证人脸识别的可用性
# - 依赖环境: tf20py36
# - 验证流程:
# - 数据人脸库
# - 对的图片进行人脸识别
# - 给出识别结果
import tensorflow
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
from os import path
# +
def show_image(img_path, name=''):
img = mpimg.imread(img_path)
plt.figure()
plt.imshow(img)
plt.title(name)
def show_images(images):
for name, img_path in images:
show_image(img_path, name)
# -
# cd '~/jianzhou/facenet'
# +
def load_db_images(DB_IMAGES_PATH):
faces = [x for x in os.listdir(DB_IMAGES_PATH) if path.isdir(path.join(DB_IMAGES_PATH, x))]
print('load faces: %s' % faces)
# {face_dir: name, [img1, img2]}
db_images = {}
for face in faces:
face_dir = path.join(DB_IMAGES_PATH, face)
with open(path.join(face_dir, 'name.txt')) as r:
name = r.read().strip()
face_images = [path.join(face_dir, x) for x in os.listdir(face_dir) if x.endswith('.jpg')]
db_images[face_dir] = name, face_images
print('load db_images:')
for face_dir in db_images:
print(face_dir)
name, images = db_images[face_dir]
print('name: %s' % name + '\t'.join(images))
return db_images
DB_IMAGES_PATH = './data/images'
db_images = load_db_images(DB_IMAGES_PATH)
for face_dir in db_images:
name, images = db_images[face_dir]
for img in images:
show_image(img, img)
# -
test_img = './data/test_images/zy.jpg'
show_image(test_img)
#test_img = "./data/images/jz/jz2.jpg"
#show_image(test_img)
# !source activate tf12py27 \
# && python src/recognize.py --gpu_memory_fraction 0.8 model/20180402-114759 \
# {test_img}
show_image("./data/images/zy/zy1.jpg")
| facenet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="1Z6Wtb_jisbA"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="QUyRGn9riopB"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="H1yCdGFW4j_F"
# # 预创建的 Estimators
# + [markdown] colab_type="text" id="PS6_yKSoyLAl"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://tensorflow.google.cn/tutorials/estimator/premade"><img src="https://tensorflow.google.cn/images/tf_logo_32px.png" />在 tensorFlow.google.cn 上查看</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/estimator/premade.ipynb"><img src="https://tensorflow.google.cn/images/colab_logo_32px.png" />在 Google Colab 中运行</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/tutorials/estimator/premade.ipynb"><img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png" />在 GitHub 上查看源代码</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/tutorials/estimator/premade.ipynb"><img src="https://tensorflow.google.cn/images/download_logo_32px.png" />下载 notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="FgdA9XE5ZCS3"
# Note: 我们的 TensorFlow 社区翻译了这些文档。因为社区翻译是尽力而为, 所以无法保证它们是最准确的,并且反映了最新的
# [官方英文文档](https://tensorflow.google.cn/?hl=en)。如果您有改进此翻译的建议, 请提交 pull request 到
# [tensorflow/docs](https://github.com/tensorflow/docs) GitHub 仓库。要志愿地撰写或者审核译文,请加入
# [<EMAIL> Google Group](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-zh-cn)。
# + [markdown] colab_type="text" id="R4YZ_ievcY7p"
# 本教程将向您展示如何使用 Estimators 解决 Tensorflow 中的鸢尾花(Iris)分类问题。Estimator 是 Tensorflow 完整模型的高级表示,它被设计用于轻松扩展和异步训练。更多细节请参阅 [Estimators](https://tensorflow.google.cn/guide/estimator)。
#
# 请注意,在 Tensorflow 2.0 中,[Keras API](https://tensorflow.google.cn/guide/keras) 可以完成许多相同的任务,而且被认为是一个更易学习的API。如果您刚刚开始入门,我们建议您从 Keras 开始。有关 Tensorflow 2.0 中可用高级API的更多信息,请参阅 [Keras标准化](https://medium.com/tensorflow/standardizing-on-keras-guidance-on-high-level-apis-in-tensorflow-2-0-bad2b04c819a)。
#
# + [markdown] colab_type="text" id="8IFct0yedsTy"
# ## 首先要做的事
#
# 为了开始,您将首先导入 Tensorflow 和一系列您需要的库。
#
# + colab={} colab_type="code" id="jPo5bQwndr9P"
import tensorflow as tf
import pandas as pd
# + [markdown] colab_type="text" id="c5w4m5gncnGh"
# ## 数据集
#
# 本文档中的示例程序构建并测试了一个模型,该模型根据[花萼](https://en.wikipedia.org/wiki/Sepal)和[花瓣](https://en.wikipedia.org/wiki/Petal)的大小将鸢尾花分成三种物种。
#
# 您将使用鸢尾花数据集训练模型。该数据集包括四个特征和一个[标签](https://developers.google.com/machine-learning/glossary/#label)。这四个特征确定了单个鸢尾花的以下植物学特征:
#
# * 花萼长度
# * 花萼宽度
# * 花瓣长度
# * 花瓣宽度
#
# 根据这些信息,您可以定义一些有用的常量来解析数据:
#
# + colab={} colab_type="code" id="lSyrXp_He_UE"
CSV_COLUMN_NAMES = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Species']
SPECIES = ['Setosa', 'Versicolor', 'Virginica']
# + [markdown] colab_type="text" id="j6mTfIQzfC9w"
# 接下来,使用 Keras 与 Pandas 下载并解析鸢尾花数据集。注意为训练和测试保留不同的数据集。
# + colab={} colab_type="code" id="PumyCN8VdGGc"
train_path = tf.keras.utils.get_file(
"iris_training.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_training.csv")
test_path = tf.keras.utils.get_file(
"iris_test.csv", "https://storage.googleapis.com/download.tensorflow.org/data/iris_test.csv")
train = pd.read_csv(train_path, names=CSV_COLUMN_NAMES, header=0)
test = pd.read_csv(test_path, names=CSV_COLUMN_NAMES, header=0)
# + [markdown] colab_type="text" id="wHFxNLszhQjz"
# 通过检查数据您可以发现有四列浮点型特征和一列 int32 型标签。
# + colab={} colab_type="code" id="WOJt-ML4hAwI"
train.head()
# + [markdown] colab_type="text" id="jQJEYfVvfznP"
# 对于每个数据集都分割出标签,模型将被训练来预测这些标签。
# + colab={} colab_type="code" id="sSaJNGeaZCTG"
train_y = train.pop('Species')
test_y = test.pop('Species')
# 标签列现已从数据中删除
train.head()
# + [markdown] colab_type="text" id="jZx1L_1Vcmxv"
# ## Estimator 编程概述
#
# 现在您已经设定好了数据,您可以使用 Tensorflow Estimator 定义模型。Estimator 是从 `tf.estimator.Estimator` 中派生的任何类。Tensorflow提供了一组`tf.estimator`(例如,`LinearRegressor`)来实现常见的机器学习算法。此外,您可以编写您自己的[自定义 Estimator](https://tensorflow.google.cn/guide/custom_estimators)。入门阶段我们建议使用预创建的 Estimator。
#
# 为了编写基于预创建的 Estimator 的 Tensorflow 项目,您必须完成以下工作:
#
# * 创建一个或多个输入函数
# * 定义模型的特征列
# * 实例化一个 Estimator,指定特征列和各种超参数。
# * 在 Estimator 对象上调用一个或多个方法,传递合适的输入函数以作为数据源。
#
# 我们来看看这些任务是如何在鸢尾花分类中实现的。
#
# + [markdown] colab_type="text" id="2OcguDfBcmmg"
# ## 创建输入函数
#
# 您必须创建输入函数来提供用于训练、评估和预测的数据。
#
# **输入函数**是一个返回 `tf.data.Dataset` 对象的函数,此对象会输出下列含两个元素的元组:
#
# * [`features`](https://developers.google.com/machine-learning/glossary/#feature)——Python字典,其中:
# * 每个键都是特征名称
# * 每个值都是包含此特征所有值的数组
# * `label` 包含每个样本的[标签](https://developers.google.com/machine-learning/glossary/#label)的值的数组。
#
# 为了向您展示输入函数的格式,请查看下面这个简单的实现:
#
# + colab={} colab_type="code" id="nzr5vRr5caGF"
def input_evaluation_set():
features = {'SepalLength': np.array([6.4, 5.0]),
'SepalWidth': np.array([2.8, 2.3]),
'PetalLength': np.array([5.6, 3.3]),
'PetalWidth': np.array([2.2, 1.0])}
labels = np.array([2, 1])
return features, labels
# + [markdown] colab_type="text" id="NpXvGjfnjHgY"
# 您的输入函数可以以您喜欢的方式生成 `features` 字典与 `label` 列表。但是,我们建议使用 Tensorflow 的 [Dataset API](https://tensorflow.google.cn/guide/datasets),该 API 可以用来解析各种类型的数据。
#
# Dataset API 可以为您处理很多常见情况。例如,使用 Dataset API,您可以轻松地从大量文件中并行读取记录,并将它们合并为单个数据流。
#
# 为了简化此示例,我们将使用 [pandas](https://pandas.pydata.org/) 加载数据,并利用此内存数据构建输入管道。
#
# + colab={} colab_type="code" id="T20u1anCi8NP"
def input_fn(features, labels, training=True, batch_size=256):
"""An input function for training or evaluating"""
# 将输入转换为数据集。
dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))
# 如果在训练模式下混淆并重复数据。
if training:
dataset = dataset.shuffle(1000).repeat()
return dataset.batch(batch_size)
# + [markdown] colab_type="text" id="xIwcFT4MlZEi"
# ## 定义特征列(feature columns)
#
# [**特征列(feature columns)**](https://developers.google.com/machine-learning/glossary/#feature_columns)是一个对象,用于描述模型应该如何使用特征字典中的原始输入数据。当您构建一个 Estimator 模型的时候,您会向其传递一个特征列的列表,其中包含您希望模型使用的每个特征。`tf.feature_column` 模块提供了许多为模型表示数据的选项。
#
# 对于鸢尾花问题,4 个原始特征是数值,因此我们将构建一个特征列的列表,以告知 Estimator 模型将 4 个特征都表示为 32 位浮点值。故创建特征列的代码如下所示:
#
# + colab={} colab_type="code" id="ZTTriO8FlSML"
# 特征列描述了如何使用输入。
my_feature_columns = []
for key in train.keys():
my_feature_columns.append(tf.feature_column.numeric_column(key=key))
# + [markdown] colab_type="text" id="jpKkhMoZljco"
# 特征列可能比上述示例复杂得多。您可以从[指南](https://tensorflow.google.cn/guide/feature_columns)获取更多关于特征列的信息。
#
# 我们已经介绍了如何使模型表示原始特征,现在您可以构建 Estimator 了。
#
# + [markdown] colab_type="text" id="kuE59XHEl22K"
# ## 实例化 Estimator
#
# 鸢尾花为题是一个经典的分类问题。幸运的是,Tensorflow 提供了几个预创建的 Estimator 分类器,其中包括:
#
# * `tf.estimator.DNNClassifier` 用于多类别分类的深度模型
# * `tf.estimator.DNNLinearCombinedClassifier` 用于广度与深度模型
# * `tf.estimator.LinearClassifier` 用于基于线性模型的分类器
#
# 对于鸢尾花问题,`tf.estimator.DNNClassifier` 似乎是最好的选择。您可以这样实例化该 Estimator:
#
# + colab={} colab_type="code" id="qnf4o2V5lcPn"
# 构建一个拥有两个隐层,隐藏节点分别为 30 和 10 的深度神经网络。
classifier = tf.estimator.DNNClassifier(
feature_columns=my_feature_columns,
# 隐层所含结点数量分别为 30 和 10.
hidden_units=[30, 10],
# 模型必须从三个类别中做出选择。
n_classes=3)
# + [markdown] colab_type="text" id="tzzt5nUpmEe3"
# ## 训练、评估和预测
#
# 我们已经有一个 Estimator 对象,现在可以调用方法来执行下列操作:
#
# * 训练模型。
# * 评估经过训练的模型。
# * 使用经过训练的模型进行预测。
# + [markdown] colab_type="text" id="rnihuLdWmE75"
# ### 训练模型
#
# 通过调用 Estimator 的 `Train` 方法来训练模型,如下所示:
# + colab={} colab_type="code" id="4jW08YtPl1iS"
# 训练模型。
classifier.train(
input_fn=lambda: input_fn(train, train_y, training=True),
steps=5000)
# + [markdown] colab_type="text" id="ybiTFDmlmes8"
# 注意将 ` input_fn` 调用封装在 [`lambda`](https://docs.python.org/3/tutorial/controlflow.html) 中以获取参数,同时提供不带参数的输入函数,如 Estimator 所预期的那样。`step` 参数告知该方法在训练多少步后停止训练。
#
# + [markdown] colab_type="text" id="HNvJLH8hmsdf"
# ### 评估经过训练的模型
#
# 现在模型已经经过训练,您可以获取一些关于模型性能的统计信息。代码块将在测试数据上对经过训练的模型的准确率(accuracy)进行评估:
#
# + colab={} colab_type="code" id="A169XuO4mKxF"
eval_result = classifier.evaluate(
input_fn=lambda: input_fn(test, test_y, training=False))
print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))
# + [markdown] colab_type="text" id="VnPMP5EHph17"
# 与对 `train` 方法的调用不同,我们没有传递 `steps` 参数来进行评估。用于评估的 `input_fn` 只生成一个 [epoch](https://developers.google.com/machine-learning/glossary/#epoch) 的数据。
#
# `eval_result` 字典亦包含 `average_loss`(每个样本的平均误差),`loss`(每个 mini-batch 的平均误差)与 Estimator 的 `global_step`(经历的训练迭代次数)值。
# + [markdown] colab_type="text" id="ur624ibpp52X"
# ### 利用经过训练的模型进行预测(推理)
#
# 我们已经有一个经过训练的模型,可以生成准确的评估结果。我们现在可以使用经过训练的模型,根据一些无标签测量结果预测鸢尾花的品种。与训练和评估一样,我们使用单个函数调用进行预测:
# + colab={} colab_type="code" id="wltc0jpgng38"
# 由模型生成预测
expected = ['Setosa', 'Versicolor', 'Virginica']
predict_x = {
'SepalLength': [5.1, 5.9, 6.9],
'SepalWidth': [3.3, 3.0, 3.1],
'PetalLength': [1.7, 4.2, 5.4],
'PetalWidth': [0.5, 1.5, 2.1],
}
def input_fn(features, batch_size=256):
"""An input function for prediction."""
# 将输入转换为无标签数据集。
return tf.data.Dataset.from_tensor_slices(dict(features)).batch(batch_size)
predictions = classifier.predict(
input_fn=lambda: input_fn(predict_x))
# + [markdown] colab_type="text" id="JsETKQo0rHvi"
# `predict` 方法返回一个 Python 可迭代对象,为每个样本生成一个预测结果字典。以下代码输出了一些预测及其概率:
# + colab={} colab_type="code" id="Efm4mLzkrCxp"
for pred_dict, expec in zip(predictions, expected):
class_id = pred_dict['class_ids'][0]
probability = pred_dict['probabilities'][class_id]
print('Prediction is "{}" ({:.1f}%), expected "{}"'.format(
SPECIES[class_id], 100 * probability, expec))
| site/zh-cn/tutorials/estimator/premade.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: deepsvg
# language: python
# name: deepsvg
# ---
# %load_ext autoreload
# %autoreload 2
import os
os.chdir("..")
# +
from deepsvg.svglib.svg import SVG
from deepsvg import utils
from deepsvg.difflib.tensor import SVGTensor
from deepsvg.svglib.utils import to_gif, make_grid, make_grid_lines, make_grid_grid
from deepsvg.svglib.geom import Bbox
from deepsvg.svg_dataset import SVGDataset, load_dataset
from deepsvg.utils.utils import batchify, linear
import os
import ntpath
import re
from tqdm import tqdm
import pickle
import random
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from cairosvg import svg2png
from PIL import Image
import cv2
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE
import torch
# -
# # Font generation and interpolation
device = torch.device("cuda:0"if torch.cuda.is_available() else "cpu")
# Load the pretrained model
dataset = load_dataset(cfg)
glyph2label = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
def sample_class(label, z=None, temperature=.3, filename=None, do_display=True, return_svg=False, return_png=False, *args, **kwargs):
label_id = glyph2label.index(label)
if z is None:
z = torch.randn(1, 1, 1, cfg.model_cfg.dim_z).to(device) * temperature
label, = batchify((torch.tensor(label_id),), device=device)
commands_y, args_y = model.greedy_sample(None, None, None, None, label=label, z=z)
tensor_pred = SVGTensor.from_cmd_args(commands_y[0].cpu(), args_y[0].cpu())
svg_path_sample = SVG.from_tensor(tensor_pred.data, viewbox=Bbox(256), allow_empty=True).normalize().split_paths()
if return_svg:
return svg_path_sample
return svg_path_sample.draw(file_path=filename, do_display=do_display, return_png=return_png, *args, **kwargs)
# +
def easein_easeout(t):
return t*t / (2. * (t*t - t) + 1.);
def interpolate(z1, z2, label, n=25, filename=None, ease=True, do_display=True):
alphas = torch.linspace(0., 1., n)
if ease:
alphas = easein_easeout(alphas)
z_list = [(1-a) * z1 + a * z2 for a in alphas]
img_list = [sample_class(label, z, do_display=False, return_png=True) for z in z_list]
to_gif(img_list + img_list[::-1], file_path=filename, frame_duration=1/12)
# -
def encode_icon(idx):
data = dataset.get(id=idx, random_aug=False)
model_args = batchify((data[key] for key in cfg.model_args), device)
with torch.no_grad():
z = model(*model_args, encode_mode=True)
return z
def interpolate_icons(idx1, idx2, label, n=25, *args, **kwargs):
z1, z2 = encode_icon(idx1), encode_icon(idx2)
interpolate(z1, z2, label, n=n, *args, **kwargs)
def get_z(temperature=.3):
z = torch.randn(1, 1, 1, cfg.model_cfg.dim_z).to(device) * temperature
return z
def sample_all_glyphs(z, filename=None):
svg_digits = [sample_class(glyph, z=z, return_svg=True) for glyph in "0123456789"]
svg_lower = [sample_class(glyph, z=z, return_svg=True) for glyph in "abcdefghijklmnopqrstuvwxyz"]
svg_upper = [sample_class(glyph, z=z, return_svg=True) for glyph in "ABCDEFGHIJKLMNOPQRSTUVWXYZ"]
grid = make_grid_lines([svg_digits, svg_lower, svg_upper])
grid.draw(file_path=filename)
# ## Random font generation
# Sample a random latent vector and decode it conditionally on all glyph labels:
# +
dim_z = 512
pretrained_folder = "pretrained/fake"
pretrained_file = "fake_labelled_{}.pth.tar".format(dim_z)
pretrained_path = os.path.join(pretrained_folder, pretrained_file)
from configs.deepsvg.hierarchical_ordered_gest_labelled import Config
cfg = Config()
cfg.model_cfg.dim_z = dim_z
model = cfg.make_model().to(device)
utils.load_model(pretrained_path, model)
model.eval();
label = "A"
z = encode_icon('1365_BellCentennialStd-Address_fs_50_A_Layer 2')
sample_class(label, z=z, with_points=True, with_handles=True, with_moves=False)
# -
# Now let's make a convenient grid display of all glyphs!
sample_all_glyphs(z)
# # Interpolation of font glyphs
# Interpolations between randomly generated glyphs
z1, z2 = get_z(), get_z()
interpolate(z1, z2, "9")
# Interpolations between real fonts
label = "0"
uni = dataset._label_to_uni(glyph2label.index(label))
id1, id2 = dataset.random_id_by_uni(uni), dataset.random_id_by_uni(uni)
interpolate_icons(id1, id2, label)
| notebooks/fonts_new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
filepath = 'Iris_Data.csv'
data = pd.read_csv(filepath)
# +
#criar uma nova coluna
# -
data['sepal_area'] = data.sepal_length*data.sepal_width
# +
# imprimir 5 linhas e as 4 últimas colunas
# -
print(data.iloc[:5,-4:])
data['abbrev'] = (data
.species
.apply(lambda x:
x.replace('Iris-','')))
print(data.iloc[:5,-4:])
small_data = pd.concat([data.iloc[:2], data.iloc[-2:]])
print(small_data.iloc[:,-4:])
qt_species = (data.groupby('species').size())
print(qt_species)
print(data.mean())
print(data.petal_length.median())
# +
# moda - valor que mais aparece na amostra
# -
print(data.petal_length.mode())
print(data)
print(data.quantile(0))
print(data.describe())
sample = (data.sample(n=5,replace=False,random_state=41))
print(sample.iloc[:,-3:])
| week1/pandas_ch_1.ipynb |