code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# [View in Colaboratory](https://colab.research.google.com/github/abhiWriteCode/Small-Machine-Learning-Projects/blob/master/Classify_Iris_flowers.ipynb)
# + id="iPJwGSpOMcoT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="5476ae12-d215-4d85-fd10-17cc0003efbf"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import sklearn
from sklearn.datasets import load_iris
import tensorflow as tf
import keras
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
# + id="XIT7sEkNMiQd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="c88d23c8-2774-47ac-86ea-cf13bd7f4368"
dataset = load_iris()
print(dir(dataset))
# + id="t_zrtbTpNw5_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="f90546e7-33c8-4a0d-87e7-4cd52b8c864e"
X = dataset.data
y = dataset.target
X.shape, y.shape
# + id="CW8Q0Rc8PVhY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 196} outputId="fa360c7d-8b9b-43c3-e7e7-a3d46e5a5d63"
suffled_index = np.argsort(np.random.randn(len(y)))
X = X[suffled_index]
y = y[suffled_index]
X = pd.DataFrame(X, columns=dataset.feature_names)
y = pd.DataFrame(y, columns=['target'])
y.head()
# + id="woUrDMQPRJkK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 196} outputId="effb3a4f-5b50-40c9-d715-12e768b64584"
X.head()
# + id="zvlrYCljRKty" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 286} outputId="17c8f3d2-72a5-4ad1-da03-71286e9a666f"
X.describe()
# + id="Q-ofBPOKS7f4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 166} outputId="d2e78b98-6aca-4996-9f48-0b5ab546a238"
X.corr()
# + [markdown] id="oDpSSsE5Ts0J" colab_type="text"
# `petal length` and `petal width` are highly correlated
#
# `sepal length` and 'petal length' are also highly correlated
# + id="9Xm4VRxpS-ZJ" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_tr, X_te, y_tr, y_te = train_test_split(X, y, test_size=0.2)
# + [markdown] id="kc4x0eiCcf-b" colab_type="text"
# ## Model1
# + id="oc0rhPOsVPiU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 120} outputId="0e493974-c7be-46ea-dc0d-a7124c86fdb9"
from sklearn.neighbors import KNeighborsClassifier
# n_neighbors=3 for predict 3 classes
# p=2 for euclidean_distance
classifier1 = KNeighborsClassifier(n_neighbors=3, p=2)
classifier1.fit(X_tr, y_tr)
# + id="aMHCyy7qXXmk" colab_type="code" colab={}
# Predicting the Test set results
y_pred1 = classifier1.predict(X_te)
# + id="OHl_1cgQX4u3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="29006d9f-d9d6-4681-8caa-f2ad7e53e185"
ouput_table1 = pd.DataFrame({'acually':y_te.values.reshape(len(y_te)), 'predicted':y_pred1})
ouput_table1['is right'] = ouput_table1['acually'] == ouput_table1['predicted']
ouput_table1
# + id="Q9NXOpF7YTzZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="9a301fcd-12b1-4945-9a9e-5def676cf504"
# correct predictons
correct_predictons = len(ouput_table1[ouput_table1['is right'] == True])
print(correct_predictons)
# + id="r0D-tK4PY2L3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="432e3e12-f3df-45ae-e229-356b6c71214c"
accuracy1 = correct_predictons/len(y_te)
accuracy1
# + [markdown] id="8OTFx6K2ckOS" colab_type="text"
# ## Model2
# + [markdown] id="6X2lhCKzc_Uc" colab_type="text"
# We concluded that,
#
# `petal length` and `petal width` are highly correlated
#
# `sepal length` and 'petal length' are also highly correlated
# + id="E0K_JMmzdWUC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="489358bb-c685-480d-fcd4-87ea303525c8"
X
# + id="Vu6L1pEsd7Nd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 390} outputId="6e247d1d-ebc1-427b-cdf2-53393f99a9ac"
X[['sepal length (cm)', 'petal length (cm)', 'petal width (cm)']].plot(figsize=(15,6))
# + id="mFOSs-tgebeF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="52796497-024c-4352-f6fc-1a4bbaa5e779"
reduced_X = pd.DataFrame(X[['sepal length (cm)', 'petal length (cm)', 'petal width (cm)']].mean(axis=1).values, columns=['reduced column'])
reduced_X['sepal width (cm)'] = X['sepal width (cm)'].values
reduced_X
# + id="elKaXl1QhTi5" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_tr, X_te, y_tr, y_te = train_test_split(reduced_X, y, test_size=0.2)
# + id="d8-3majPhVCg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 120} outputId="a5298c75-8437-490f-a79c-6be20e8ed6d4"
from sklearn.neighbors import KNeighborsClassifier
# n_neighbors=3 for predict 3 classes
# p=2 for euclidean_distance
classifier2 = KNeighborsClassifier(n_neighbors=3, p=2)
classifier2.fit(X_tr, y_tr)
# + id="5KHZP5JSjsTx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="e10fc392-e903-409c-b06a-fe37c7c5198e"
# Predicting the Test set results
y_pred2 = classifier2.predict(X_te)
ouput_table1 = pd.DataFrame({'acually':y_te.values.reshape(len(y_te)), 'predicted':y_pred2})
ouput_table1['is right'] = ouput_table1['acually'] == ouput_table1['predicted']
# correct predictons
correct_predictons = len(ouput_table1[ouput_table1['is right'] == True])
print(correct_predictons)
# + id="N-tynjUNkFks" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} outputId="bbd9dec7-265e-4c79-f554-3652bcf450ae"
accuracy2 = correct_predictons/len(y_te)
accuracy2
# + [markdown] id="QpuM-_BRk6UR" colab_type="text"
# ## Model3
# + id="3qgN-b99k8zd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="938bacfd-297b-441f-d7be-d42256a5a598"
# combing two columns
reduced_X = pd.DataFrame(X[['petal length (cm)', 'petal width (cm)']].mean(axis=1).values, columns=['reduced column'])
reduced_X['sepal width (cm)'] = X['sepal width (cm)'].values
reduced_X['sepal length (cm)'] = X['sepal length (cm)'].values
reduced_X
# + id="QMPD-C4Oyhvn" colab_type="code" colab={}
# # OneHotEncoding on y
# from sklearn.preprocessing import OneHotEncoder
# onehotencoder = OneHotEncoder(categorical_features = [0])
# y = onehotencoder.fit_transform(y).toarray()
# y = pd.DataFrame(y)
from keras.utils import to_categorical
y_binary = to_categorical(y)
# + id="5bptqIjFv3vR" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_tr, X_te, y_tr, y_te = train_test_split(reduced_X, y_binary, test_size=0.2)
# + id="gbi2vZXIrvE8" colab_type="code" colab={}
# model1 = keras.models.Sequential([
# keras.layers.Dense(activation = 'selu', input_dim = 3, units = 6),
# keras.layers.Dense(activation = 'tanh', units = 6),
# keras.layers.Dense(activation = 'softmax', units = 3)
# ])
# train acc = 88-96%
# test acc = 85-95%
model1 = keras.models.Sequential([
keras.layers.Dense(input_dim = 3, units = 10), # activation=None means linear activation ( a(x)=x )
keras.layers.Dense(activation = 'tanh', units = 10),
keras.layers.Dense(activation = 'softmax', units = 3)
])
# train acc = 95-100%
# test acc = 94-100%
# model1 = keras.models.Sequential([
# keras.layers.Dense(activation = 'relu', input_dim = 3, units = 6),
# keras.layers.Dense(activation = 'selu', units = 6),
# keras.layers.Dense(activation = 'softmax', units = 3)
# ])
# train acc = 70-76%
# test acc = 62-66%
# model1 = keras.models.Sequential([
# keras.layers.Dense(activation = 'selu', input_dim = 3, units = 6),
# keras.layers.Dense(activation = 'tanh', units = 6),
# keras.layers.Dense(activation = 'softmax', units = 3)
# ])
# # train acc = 88-96%
# # test acc = 80%
# + id="iGjp4z_ytB4l" colab_type="code" colab={}
model1.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# + id="dfclJubkwvtH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 234} outputId="2e5556da-3e46-4116-f17c-59333cf19c82"
model1.summary()
# + id="pYJUnmDnvi_d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 3379} outputId="de83ebdd-c9f4-4243-f00b-edab3affaa4e"
model1.fit(X_tr, y_tr, epochs=100)
# + id="ju5SFBp60jWM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="194f1961-4485-480b-8fc4-03386d12c8fb"
loss, acc = model1.evaluate(X_tr, y_tr)
print(loss, acc)
# + id="qQIzbTiH4bt3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="f4626a44-844b-4864-d959-989c744bf26d"
loss, acc = model1.evaluate(X_te, y_te)
print(loss, acc)
# + id="imtgMcrk4lJJ" colab_type="code" colab={}
# + [markdown] id="yXqRHGcNIEyU" colab_type="text"
# ## Model4
# + id="Qbz06DhsIKXz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 404} outputId="befe07b5-8af4-464d-8134-17abcb59c076"
reduced_X = pd.DataFrame(X[['petal length (cm)', 'petal width (cm)']].mean(axis=1).values, columns=['reduced column'])
reduced_X['sepal width (cm)'] = X['sepal width (cm)'].values
reduced_X['sepal length (cm)'] = X['sepal length (cm)'].values
reduced_X
# + id="PvY2KLoAIj4l" colab_type="code" colab={}
from keras.utils import to_categorical
y_binary = to_categorical(y)
# + id="D8LEHpcFId-4" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
X_tr, X_te, y_tr, y_te = train_test_split(reduced_X, y_binary, test_size=0.2)
# + id="na9TQBa-Ilx0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 201} outputId="eabc4c01-728c-4bb6-a8ce-eb6aeedbae09"
model2 = keras.models.Sequential([
keras.layers.Dense(input_dim = 3, units = 5),
keras.layers.Dense(units = 3, activation='sigmoid')
])
model2.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model2.summary()
# + id="2bs96c8rJ4G8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33488} outputId="f448d04f-cfd3-4b57-a734-25180b3ec98d"
model2.fit(X_tr, y_tr, epochs=1000)
# + id="dsmIAl3OKAtO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="b7597a35-4c1c-4dc2-fc28-92aef43e89a3"
loss, acc = model2.evaluate(X_tr, y_tr)
print(loss, acc)
# + id="TcQ29YASKUs5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="164f6321-0a0e-4b86-f17f-a11f20d385c9"
loss, acc = model2.evaluate(X_te, y_te)
print(loss, acc)
# + id="pGPOFUOUMTqb" colab_type="code" colab={}
| Classify_Iris_flowers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (holistic-iem)
# language: python
# name: holistic-iem
# ---
# %reload_ext autoreload
# %autoreload 2
# +
import wosis
import wosis.analysis.plotting as wos_plot
import metaknowledge as mk
import networkx as nx
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
# Make plots look better
sns.set_style('darkgrid')
sns.set_context('paper', font_scale=2.0)
plt.tight_layout();
# -
# Some fields are proprietary; owned by Clarivate Analytics. These have to be hidden.
hide_columns = ['DE', 'abstract', 'keywords', 'DOI', 'id', 'kws']
def make_safe_display(df):
"""Create a truncated dataframe with IP-related data removed.
"""
return df.loc[:, ~(df.columns.isin(hide_columns))]
TODAY = datetime.today().strftime('%Y-%m-%d')
CONFIG_FILE = "../config/config.yml" # Point to configuration file
TMP_DIR = '../tmp' # temporary store - data that cannot be shared is stored here
DATA_DIR = '../data' # data that can be shared is stored here
FIG_DIR = '../figs' # directory to place generated figures
WOS_CONFIG = wosis.load_config(CONFIG_FILE)
| notebooks/Common Setup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import aiohttp
import asyncio
import json
import logging
from IPython.html import widgets
from IPython.display import display as ipydisplay
from utils import colorify_log_handler
# +
colorify_log_handler(
logging.getLogger().handlers[0], # IPython by default inject one
log_lineno = False,
time_fmt = '%H:%M:%S'
)
logger = logging.getLogger('bench_rest_api')
logger.setLevel(logging.DEBUG)
logging.getLogger('asyncio').setLevel(logging.DEBUG)
# -
logger.info('This is info')
logger.debug('我會說中文喔')
logger.error('……人家不是喜歡才跟你講話的喔')
logger.warning('笨蛋')
# !curl -s -XGET "http://localhost:5566/" | python -m json.tool
# !curl -s -XGET "http://localhost:5566/quote/uniform" | python -m json.tool
# + language="bash"
#
# ab -c 10 -n 10 "http://localhost:5566/quote?slow=true"
# -
# ## Basic
@asyncio.coroutine
def quote_simple(url='http://localhost:5566/quote/uniform', slow=False):
r = yield from aiohttp.request(
'GET', url, params={'slow': True} if slow else {}
)
if r.status != 200:
logger.error('Unsuccessful response [Status: %s (%d)]'
% (r.reason, r.status))
r.close(force=True)
return None
quote_json = yield from r.json()
return quote_json['quote']
loop = asyncio.get_event_loop()
# To run a simple asyncio corountine.
coro = quote_simple()
quote = loop.run_until_complete(coro)
quote
# Internally asyncio wraps it with [`asyncio.Task`].
# So the following works equivalently.
#
# [`asyncio.Task`]: https://docs.python.org/3.4/library/asyncio-task.html#asyncio.Task
task = asyncio.Task(quote_simple())
quote = loop.run_until_complete(task)
quote
# However, `coro` is `corountine`, and `task` is `Task` (subclass of [`Future`]).
#
# One can use `asyncio.ensure_future` to make sure having a Future obj returned.
#
# [`Future`]: https://docs.python.org/3.4/library/asyncio-task.html#asyncio.Future
type(coro), type(task)
# Passing wrong URL gives error
quote = loop.run_until_complete(
quote_simple(url='http://localhost:5566/quote/uniform?part=100')
)
# ## Multiple Concurrent Requests
@asyncio.coroutine
def quote_many_naive(num_quotes=1):
coroutines = [
quote_simple(slow=True) for i in range(num_quotes)
]
quotes = yield from (asyncio.gather(*coroutines))
return quotes
# %%time
quotes = loop.run_until_complete(quote_many_naive(2000))
# This is not helping since we open 2000 connections at a time. It is slower than expected.
# ### Limiting connection pool size
#
# Ref on [official site](http://aiohttp.readthedocs.org/en/latest/client.html#limiting-connection-pool-size).
# +
@asyncio.coroutine
def quote(conn, url='http://localhost:5566/quote/uniform', slow=False):
r = yield from aiohttp.request(
'GET', url, params={'slow': True} if slow else {},
connector=conn
)
if r.status != 200:
logger.error('Unsuccessful response [Status: %s (%d)]'
% (r.reason, r.status))
r.close(force=True)
return None
quote_json = yield from r.json()
r.close(force=True)
return quote_json['quote']
@asyncio.coroutine
def quote_many(num_quotes=1, conn_limit=20):
conn = aiohttp.TCPConnector(keepalive_timeout=1, force_close=True, limit=conn_limit)
coroutines = [
quote(conn) for i in range(num_quotes)
]
quotes = yield from (asyncio.gather(*coroutines))
return quotes
# -
# %%time
quotes = loop.run_until_complete(quote_many(2000, conn_limit=100))
# I don't know why, but using its internal connection limit is slow. But we can implement one ourselves.
# ### Custom connection limit using semaphore
#
# Use [`asyncio.Semaphore`] acting as a lock.
#
# [`asyncio.Semaphore`]: https://docs.python.org/3.4/library/asyncio-sync.html#asyncio.Semaphore
# +
def quote_with_lock(semaphore, url='http://localhost:5566/quote/uniform'):
with (yield from semaphore):
r = yield from aiohttp.request('GET', url)
if r.status != 200:
logger.error('Unsuccessful response [Status: %s (%d)]'
% (r.reason, r.status))
r.close(force=True)
return None
quote_json = yield from r.json()
r.close(force=True)
return quote_json['quote']
@asyncio.coroutine
def quote_many(num_quotes=1, conn_limit=20):
semaphore = asyncio.Semaphore(conn_limit)
coroutines = [
quote_with_lock(semaphore) for i in range(num_quotes)
]
quotes = yield from (asyncio.gather(*coroutines))
return quotes
# -
# %%time
quotes = loop.run_until_complete(quote_many(2000, conn_limit=100))
# ## Add Progressbar
# If you don't care the original of coroutines
@asyncio.coroutine
def quote_many(num_quotes=1, conn_limit=20, progress=None, step=10):
if progress is None:
progress = widgets.IntProgress()
progress.max = num_quotes // step
ipydisplay(progress)
semaphore = asyncio.Semaphore(conn_limit)
coroutines = [
quote_with_lock(semaphore) for i in range(num_quotes)
]
# quotes = yield from (asyncio.gather(*coroutines))
quotes = []
for ith, coro in enumerate(asyncio.as_completed(coroutines), 1):
if ith % step == 0:
progress.value += 1
q = yield from coro
quotes.append(q)
return quotes
# %%time
quotes = loop.run_until_complete(quote_many(2000, conn_limit=100, step=1))
# For fast response, progress bar introduces considerable latency. Try modify the step higher.
# %%time
quotes = loop.run_until_complete(quote_many(2000, conn_limit=100, step=20))
# ### Original order matters
#
# ... go eat yourself.
@asyncio.coroutine
def quote_many(num_quotes=1, conn_limit=20, progress=None, step=10):
if progress is None:
progress = widgets.IntProgress()
progress.max = num_quotes // step
ipydisplay(progress)
# create the lock
semaphore = asyncio.Semaphore(conn_limit)
finished_task_count = 0
def progress_adder(fut):
nonlocal finished_task_count
finished_task_count += 1
if finished_task_count % step == 0:
progress.value += 1
# wrap coroutines as Tasks
futures = []
for i in range(num_quotes):
task = asyncio.Task(quote_with_lock(semaphore))
task.add_done_callback(progress_adder)
futures.append(task)
quotes = yield from (asyncio.gather(*futures))
return quotes
# %%time
quotes = loop.run_until_complete(quote_many(2000, conn_limit=100, step=1))
# %%time
quotes = loop.run_until_complete(quote_many(2000, conn_limit=100, step=20))
# ### Alternative way
@asyncio.coroutine
def quote_many(num_quotes=1, conn_limit=20, progress=None, step=10):
if progress is None:
progress = widgets.IntProgress()
progress.max = num_quotes // step
ipydisplay(progress)
semaphore = asyncio.Semaphore(conn_limit)
# wrap coroutines with future
# For Python 3.4.4+, asyncio.ensure_future(...)
# will wrap coro as Task and keep input the same
# if it is already Future.
futures = [
asyncio.ensure_future(quote_with_lock(semaphore))
for i in range(num_quotes)
]
for ith, coro in enumerate(asyncio.as_completed(futures), 1):
if ith % step == 0:
progress.value += 1
yield from coro
quotes = [fut.result() for fut in futures]
return quotes
# %%time
quotes = loop.run_until_complete(quote_many(2000, conn_limit=100, step=20))
| 1_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab: Iris Flower Classification using Decision Theory
#
# In this exercise, we will use decision theory on a simple multi-variable classification problem. In doing the exercise, you will learn to:
#
# * Load a pre-installed dataset in the `sklearn` package.
# * Estimate parameters of a multi-variable Gaussian from data
# * Make multi-class predictions using linear and quadratic discriminants derived from the Gaussian parameters
# * Evaluate the accuracy of the predictions on test data
#
# We load the following packages.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 16})
# ## Loading and Visualizing the Data
#
# In this lab, we will use the classic dataset [Iris flower dataset](https://en.wikipedia.org/wiki/Iris_flower_data_set). The problem is to estimate the type of iris ('setosa' 'versicolor' 'virginica') from four features of the Iris flower. Since the data is widely-used in machine learning classes, it is included in the `sklearn` package in python. You can download the data with following command:
from sklearn.datasets import load_iris
data = load_iris()
# Set`X=data.data` and `y=data.target`. The matrix `X[i,j]` will then be the value of feature `j` in sample `i` and `y[i]` will be the index of the class for sample `i`. Also, print:
# * Number of samples,
# * Number of features per sample
# * Number of classes
X = data.data
y = data.target
# Print the feature and target names in `data.feature_names` and `data.target_names`
print(data.feature_names,data.target_names)
# To visualize the data, create a *scatter* plot of two of the four features: `(sepal length, petal length)`. In a scatter plot, each point is plotted as some marker, say a small circle. Different colors are used for different class labels. You can create a scatter plot using the `plt.scatter()` command. Make sure you label your axes using the `plt.xlabel()` and `plt.ylabel()` axes.
#
# If you did the plot correctly, you should see that you can separate the points well even just using two features.
plt.scatter(x=X[:,0],y=X[:,2],c=y)
plt.xlabel('sepal length (cm)')
plt.ylabel('petal length (cm)')
# ## Classifier Based on a Linear Discriminator
#
# Before trying any classification methods, it is necessary to split the data into two components:
# * Training samples: Used for fitting the classifier models
# * Test samples: Used for testing the classifier
#
# We will discuss splitting the training and test data in detail later in the class. But, the reason for the splitting is that we want to test the classifier on samples not used in training. This ensures we see how well it works on *new* samples that have not been seen.
#
# For this purpose, divide the data into 75 samples `(Xtr,ytr)` for training and 75 samples `(Xts,yts)` for test. You must randomly shuffle the samples before splitting -- do not pick the first 75 for training. You can do the splitting manually or use the `sklearn.model_selection.train_test_split` function. If you use `train_test_split()`, set `shuffle=True`.
from sklearn.model_selection import train_test_split
# Split into training and test
Xtr, Xts, ytr, yts = train_test_split(X,y,test_size=75, shuffle=True)
# ##### We will first try using a linear discriminator. For linear discriminator we first need to estimate the sample mean of each class. Complete the following function which returns:
#
# mu[:,j] = sample mean of X[i,:] for samples with y[i]=j
# mu is a list of sample means for each class value
def fit_lin(X,y):
mu = np.zeros([len(data.feature_names),len(data.target_names)])
for j in list(set(y)): # loop through all possible class values
I = np.where(y==j)[0] # indices of samples in class j
mu[:,j] = np.mean(X[I,:], axis=0) # to take the mean of each col in the selected rows by I
return mu
# Run the function on the training data to get the sample means.
mu= fit_lin(Xtr,ytr) # means of 4 features by 3 classes
mu
# Assuming a Gaussian model with equal and i.i.d. covariance matrices, the optimal estimator is given by the linear discrimantor: Given a test sample `x`, we compute:
#
# g[j] = mu[:,j].dot(x) - 0.5*sum(mu[:,j]**2)
#
#
# for each class `j`. Then, we select `yhat = argmax_j g[j]`. Complete the following code which takes a matrix of data samples `X` to compute a vector of class predictions `yhat`.
# X is num_samp by features
def predict_lin(X,mu):
class_values = list(set(y))
num_samp = X.shape[0]
yhat = np.zeros(num_samp)
for i in range(num_samp):
x = X[i]
g = np.zeros(len(class_values))
for j in class_values:
g[j] = np.dot(mu[:,j],x) - 0.5*sum(mu[:,j]**2)
yhat[i] = np.argmax(g)
return yhat
# Test the linear classifier on the test data. Specifically, estimate `yhat` and measure the accuracy, which is the number of samples on which the classifier was correct. If you did everything correctly, you should get an accuracy of around 90%. But, it may be a little higher or lower depending on the random training / test split.
yhat = predict_lin(Xts,mu)
acc = 1 - np.mean((yhat != yts)*1)
print(acc)
# ## Quadratic Discriminator
#
# Now, we will try a more sophisticated classifier. In this case, we will estimate both the sample mean and covariance matrix. Complete the following code that computes:
#
# * `mu`: Array where `mu[:,j]` is the sample mean for the samples with `y[i]==j`. This is identical to the code you have above.
# * `S`: Array of covariance matrices where `S[:,:,j]` is the sample covariance matrix for the samples with `y[i]==j`. You can use the `np.cov()` method.
# S,
def fit_quad(X,y):
mu = fit_lin(X,y)
num_class = len(list(set(y)))
num_samp,num_feat = X.shape
S = np.zeros([num_feat,num_feat,num_class])
for j in range(num_class):
I = np.where(y==j)[0]
S[:,:,j] = np.cov(X[I],rowvar=False) # every row is obs of 4 features, so False
return mu, S
# Fit the quadratic model on the training data.
mu, S = fit_quad(Xtr, ytr)
# Given Gaussian models in each class the optimal decision rule is the following *quadratic* decision rule: Given a sample `x`, we compute the discrimant,
#
#
# g[j] = 0.5*(x-mu[:,j).T.dot(Sinv[:,:,j]).dot(x-mu[:,j]) + 0.5*log(det(S[:,:,j]))
#
#
# where `Sinv[:,:,j]` is the matrix inverse of `S[:,:,j]`. Then, we take `yhat = argmin_j g[j]`. Complete the following code to compute the predictions based on the quadratic discriminats.
def predict_quad(X,mu,S):
num_class = len(list(set(y)))
num_samp = X.shape[0]
yhat = np.zeros(num_samp)
for i in range(num_samp):
x = X[i]
g = np.zeros(num_class)
for j in range(num_class):
mu_class = mu[:,j]
s_class = S[:,:,j]
Sinv_ic = np.linalg.inv(s_class)
logdet_ic = np.log(np.linalg.det(s_class))
diff = np.reshape(x-mu_class,(1,4))
t_diff = np.reshape(x-mu_class,(4,1))
g[j] = 0.5*diff.dot(Sinv_ic).dot(t_diff) + 0.5*logdet_ic
yhat[i] = np.argmin(g)
return yhat
# Test the quadratic discriminator on the test data. You should get around 98% accuracy depending on the train / test split.
yhat = predict_quad(Xts,mu,S)
acc = 1 - np.mean((yhat != yts)*1)
print(acc)
| lab1_iris_partial-LDA_and_QDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Building second level models using _nipype_ and _SPM12_
#
# ## Base functionality for _megameta_ project
#
# -------
# #### History
# * 5/4/19 cscholz - add datasink, incorporate mreg design, incorporate sampling of first-level contrast based on percentage of available first-level models per project
# * 4/15/19 mbod - incorporate function to read the 2nd level JSON model config
# * 4/9/19 mbod - modify template to work with fmriprep processed data
# * 3/20/19 mbod - initial setup for testing some simple one sample t-test models
# -----
#
# ### Description
#
# * Set up a nipype workflow to use SPM12 to make second level models for _megameta_ task data (preprocessed using `batch8` SPM8 scripts) in BIDS derivative format
#
# ### Setup
# +
import os # system functions
# NIYPE FUNCTIONS
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.spm as spm # spm
import nipype.interfaces.matlab as mlab # how to run matlab
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model specification
from nipype.interfaces.base import Bunch
from nipype.algorithms.misc import Gunzip
import scipy.io as sio
import numpy as np
import json
import pandas as pd
import random
from IPython.display import Image
from itertools import product
# +
#MODEL_SPEC_FILE = 'group_pmod_pop_rank.json'
#CONTRAST_NAME='puremessageXpmod_pop_rank'
#PATH_TO_SPM_FOLDER = '/data00/tools/spm12mega/'
#exclude_subjects=[]
# -
# #### Matlab path
#
# Set the way matlab should be called
mlab.MatlabCommand.set_default_matlab_cmd("matlab -nodesktop -nosplash")
# If SPM is not in your MATLAB path you should add it here
mlab.MatlabCommand.set_default_paths(PATH_TO_SPM_FOLDER)
GROUP_DIR = '/data00/projects/megameta/group_models/'
# #### Load JSON model config
JSON_MODEL_FILE = os.path.join('/data00/projects/megameta/scripts/jupyter_megameta/second_level_models',
'model_specifications',
MODEL_SPEC_FILE)
with open(JSON_MODEL_FILE) as fh:
model_def = json.load(fh)
# +
MODEL_NAME = model_def['ModelName']
CONTRASTS = model_def['Contrasts']
ROOT_DIR = '/data00/projects/megameta'
# +
l2_contrast_list = CONTRASTS # list of specific contrast files to use in 2nd level model (include .nii?)
output_dir = os.path.join(GROUP_DIR,'derivatives', 'nipype','model_2nd-level_{}'.format(MODEL_NAME))
working_dir = os.path.join(GROUP_DIR, 'working',
'nipype', 'workingdir_model_2nd-level_{}'.format(MODEL_NAME))
# +
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(working_dir):
os.makedirs(working_dir)
# -
# ## Get list of contrast files
def process_project(project_name, model_def=model_def, exclude_subjects=exclude_subjects ,scan_all_subjs=False, DEBUG=False):
project_spec = [pspec for pspec in model_def['Projects'] if pspec['Name']==project_name]
if not project_spec:
print('Cannot find specification for project: ', project_name)
return None
model_name = project_spec[0]['Model']
cmap = project_spec[0]['ContrastMap']
model_dir = os.path.join(ROOT_DIR, project_name,
"derivatives", "nipype",
"model_{}".format(model_name)
)
if not os.path.exists(model_dir):
print('Cannot find first level model directory:', model_dir)
return None
subjs_with_models = [s for s in os.listdir(model_dir) if s.startswith('sub-')]
#exclude_people
subjs_with_models=[s for s in subjs_with_models if s not in exclude_subjects]
if DEBUG:
print("Found {} first level subject models\n".format(len(subjs_with_models)))
contrast_lists = { cname: [] for cname in cmap}
model_contrasts=None
for sidx,subj in enumerate(subjs_with_models):
if DEBUG:
print('Processing',subj, '-',end='')
first_level_dir = os.path.join(model_dir, subj, 'medium', 'fwhm_8')
if scan_all_subjs or sidx==0:
spm_mat_file = os.path.join(first_level_dir, 'SPM.mat')
SPM = sio.loadmat(spm_mat_file, squeeze_me=True, struct_as_record=False)['SPM']
model_contrasts = SPM.xCon
if DEBUG:
print(' found {} contrasts'.format(len(model_contrasts)))
con_map = {con.name: 'con_{:0>4}.nii'.format(cidx) for cidx,con in enumerate(model_contrasts,1) }
if DEBUG:
print('\tContrasts are:', con_map)
for model_con, proj_con in cmap.items():
path_to_con = os.path.join(first_level_dir, con_map[proj_con])
if os.path.exists(path_to_con):
contrast_lists[model_con].append(path_to_con)
return contrast_lists
# ## Define nodes
# +
# Infosource - a function free node to iterate over the list of subject names
l2_infosource = pe.Node(util.IdentityInterface(fields=['contrast_id']),
name="infosource")
smoothing_kernels = [ 8 ]
resolutions = ['medium']
resolution_and_kernel_list = product(resolutions, smoothing_kernels)
l2_infosource.iterables = [('contrast_id', l2_contrast_list),
('resolution_and_smoothing', resolution_and_kernel_list)
]
# +
# SelectFiles - to grab the data (alternativ to DataGrabber)
subject_pattern='*'
OUTPUT_DIR = output_dir
l2_output_dir = output_dir
l2_templates = {'cons': os.path.join(output_dir, MODEL_NAME, subject_pattern, '{smoothing_ksize}',
'{contrast_id}.nii')}
l2_selectfiles = pe.Node(nio.SelectFiles(l2_templates,
base_directory=OUTPUT_DIR,
sort_filelist=True),
name="selectfiles")
# -
def make_contrast_list(model_path, cname,exclude_subjects, sample_perc=80):
#EDITED BY CHRISTIN to get randomly sample a given percentage of subjects for second-level model
import json
import random
import os
import scipy.io as sio
ROOT_DIR = '/data00/projects/megameta'
def process_project(project_name, model_def, scan_all_subjs=False, DEBUG=False):
project_spec = [pspec for pspec in model_def['Projects'] if pspec['Name']==project_name]
if not project_spec:
print('Cannot find specification for project: ', project_name)
return None
model_name = project_spec[0]['Model']
cmap = project_spec[0]['ContrastMap']
model_dir = os.path.join(ROOT_DIR, project_name,
"derivatives", "nipype",
"model_{}".format(model_name)
)
if not os.path.exists(model_dir):
print('Cannot find first level model directory:', model_dir)
return None
subjs_with_models = [s for s in os.listdir(model_dir) if s.startswith('sub-')]
#Exclude people
subjs_with_models=[s for s in subjs_with_models if s not in exclude_subjects]
#Get a random sample of participants (based on a percentage)
sample_size=(sample_perc/100)*len(subjs_with_models)
subj_list=random.sample(subjs_with_models,int(sample_size))
print('Project: {}, Sampling {} of {} participants with a model'.format(project_name, int(sample_size), len(subjs_with_models)))
if DEBUG:
print("Found {} first level subject models\n".format(len(subjs_with_models)))
contrast_lists = { cname: [] for cname in cmap}
model_contrasts=None
for sidx,subj in enumerate(subj_list):
if DEBUG:
print('Processing',subj, '-',end='')
first_level_dir = os.path.join(model_dir, subj, 'medium', 'fwhm_8')
if scan_all_subjs or sidx==0:
spm_mat_file = os.path.join(first_level_dir, 'SPM.mat')
SPM = sio.loadmat(spm_mat_file, squeeze_me=True, struct_as_record=False)['SPM']
model_contrasts = SPM.xCon
if DEBUG:
print(' found {} contrasts'.format(len(model_contrasts)))
con_map = {con.name: 'con_{:0>4}.nii'.format(cidx) for cidx,con in enumerate(model_contrasts,1) }
if DEBUG:
print('\tContrasts are:', con_map)
for model_con, proj_con in cmap.items():
path_to_con = os.path.join(first_level_dir, con_map[proj_con])
if os.path.exists(path_to_con):
contrast_lists[model_con].append(path_to_con)
return contrast_lists, subjs_with_models
with open(model_path) as fh:
model_def = json.load(fh)
conlist=[]
for p in model_def['Projects']:
print(p)
conlist.extend(process_project(p['Name'], model_def)[cname])
return conlist
l2_getcontrasts = pe.Node(util.Function(input_names=['model_path','cname','exclude_subjects'],
output_names=['contrasts', 'covariates'],
function=make_contrast_list),
name='makecontrasts')
MDIR = os.path.abspath('../model_specifications')
l2_getcontrasts.inputs.model_path=os.path.join(MDIR, MODEL_SPEC_FILE)
l2_getcontrasts.inputs.cname=CONTRAST_NAME
l2_getcontrasts.inputs.exclude_subjects=exclude_subjects
# +
#EDITED BY CHRISTIN (ADDING DATASINK)
# Datasink - creates output folder for important outputs
datasink = pe.Node(nio.DataSink(base_directory=OUTPUT_DIR,
container=l2_output_dir),
name="datasink")
# Use the following DataSink output substitutions
substitutions = [('_contrast_id_', '')]
datasink.inputs.substitutions = substitutions
# -
# ## Model nodes
# +
osttdesign = pe.Node(spm.model.OneSampleTTestDesign(),
name="osttdesign")
osttdesign.inputs.explicit_mask_file='/data00/tools/spm8/apriori/brainmask_th25.nii'
osttdesign.inputs.threshold_mask_none=True
# +
#MODEL_SPEC_FILE = 'group_mreg_behav_nonavers.json'
#CONTRAST_NAME='puremessage'
#PATH_TO_SPM_FOLDER = '/data00/tools/spm12mega/'
#JSON_MODEL_FILE = os.path.join('/data00/projects/megameta/scripts/jupyter_megameta/second_level_models',
# 'model_specifications',
# MODEL_SPEC_FILE)
#exclude_subjects=[]
# +
#EDITED BY CHRISTIN TO IMPPLEMENT MREG
# Multiple Regression Design - creates mreg Design
mregdesign = pe.Node(spm.model.MultipleRegressionDesign(),
name="mregdesign")
# Add covariates
## Make a list of covariates based on the contrast list
covs=[]
contrast_list, subj_list=make_contrast_list(JSON_MODEL_FILE,CONTRAST_NAME,exclude_subjects)[0]
pjs=[c.split('/')[4] for c in contrast_list]
pjs=[s for s in set(pjs)]
print(pjs)
print(subj_list)
## Make dummy variables based on list of projects and add them to the covariate list of dictionaries
#for pj in set(pjs):
# cur_cov_vector=[]
# for idx, _ in enumerate(pjs):
## if pjs[idx]==pj:
# cur_cov_vector.append(1)
# else:
# cur_cov_vector.append(0)
# #make dictionary for current covariate
# cur_dict={'name': pj, 'vector': cur_cov_vector}
# #add dictionary to list of covs
# covs.append(cur_dict)
##NOTE: THE CODE ABOVE CREATES ONE DUMMY PER PROJECT. NEED TO TAKE ONE OUT AND DECIDE WHICH PROJECT TO USE AS COMPARISON/REFERENCE.
#BELOW ARE TWO VERSIONS OF DOING THAT. VERSIN 1 RANDOMLY CHOOSES (# OF PROJECTS)-1 COVARIATES TO INCLUDE - BUT WE PROBABLY WANT TO BE MORE STRATEGIC
#VERSION 1
#covs=random.sample(covs,(len(pjs)-1))
# VERSION 2 REMOVES DARPA1 TO MAKE IT THE REFERENCE PROJECT -- BUT I DON'T HAVE A CLEAR RATIONALE FOR WHY THAT OVER OTHERS RIGHT NOW...
#covs=[i for i in covs if i['name']!='darpa1']
# Intended covs format:
# covs = [
# {'name':'alcohol','vector': []},
# {'name':'darpa1','vector': []},
# {'name':'darpa2','vector': []},
# {'name':'cityyear','vector': []},
# {'name':'project1','vector': []}
#]
# Add covariate of behaivor change and baseline
#subj_list=[]
#for pj in pjs:
# project_spec = [pspec for pspec in model_def['Projects'] if pspec['Name']==pj]#
# model_name = project_spec[0]['Model']
# model_dir = os.path.join(ROOT_DIR, pj,
# "derivatives", "nipype",
# "model_{}".format(model_name)
# )
# subjs_with_models = [s for s in os.listdir(model_dir) if s.startswith('sub-')]
# #Exclude people
# subjs_with_models=[s for s in subjs_with_models if s not in exclude_subjects]
# subj_list=subj_list+subjs_with_models
#subj_list=[s.replace('sub-','') for s in subj_list]
##make a new behavior vector for the people who are in subj_list
#regressors=pd.read_csv('/data00/projects/megameta/scripts/jupyter_megameta/second_level_models/indbehav_data/behaviorchange_050919nc.csv')
#behav_mreg=[]
#for row_num, val in enumerate(regressors['change']):
# if regressors['pID'][row_num] in subj_list:
# behav_mreg.append(regressors['change'][row_num])
#behav_mreg_dict={'name': 'behav_mreg', 'vector':behav_mreg}
#behav_baseline=[]
#for row_num, val in enumerate(regressors['baseline']):
# if regressors['pID'][row_num] in subj_list:
# behav_baseline.append(regressors['baseline'][row_num])
#behav_baseline_dict={'name': 'behav_baseline', 'vector':behav_baseline}
#covs=[behav_mreg_dict,behav_baseline_dict]
#mregdesign.inputs.covariates=covs
#mregdesign.inputs.explicit_mask_file='/data00/tools/spm8/apriori/brainmask_th25.nii'
# +
# EstimateModel - estimate the parameters of the model
level2estimate = pe.Node(spm.model.EstimateModel(estimation_method={'Classical': 1}),
name="level2estimate")
# -
# EstimateContrast - estimates simple group contrast
level2conestimate = pe.Node(spm.model.EstimateContrast(group_contrast=True),
name="level2conestimate")
# +
'''
cont1 = ['QuitIntent', 'T', ['QuitIntent', 'FTND', 'mean_WC', 'mean'], [1, 0, 0, 0]]
cont2 = ['FTND', 'T', ['QuitIntent', 'FTND', 'mean_WC', 'mean'], [0, 1, 0, 0]]
cont3 = ['mean_WC', 'T', ['QuitIntent', 'FTND', 'mean_WC', 'mean'], [0, 0, 1, 0]]
cont4 = ['mean', 'T', ['QuitIntent', 'FTND', 'mean_WC', 'mean'], [0, 0, 0, 1]]
'''
cont = ['behav_mreg', 'T', ['behav_mreg','behav_baseline'], [1,0]]
level2conestimate.inputs.contrasts = [cont]
# -
# ## Setup second level workflow
#l2_working_dir = os.path.join(PROJECT_DIR, 'nipype', 'workingdir_banner_2nd_level')
l2_working_dir = working_dir
# +
# EDITED BY CHRISTIN (adding datasink to the workflow)
l2analysis = pe.Workflow(name='l2analysis')
l2analysis.base_dir = l2_working_dir
# Connect up the 2nd-level analysis components
l2analysis.connect(
[
#(l2_infosource, l2_getcontrasts, [('contrast_id', 'contrast_id'),
# ('model_path')]),
(l2_getcontrasts, mregdesign, [('contrasts', 'in_files')]),
(mregdesign, level2estimate, [('spm_mat_file',
'spm_mat_file')] ),
(level2estimate, level2conestimate, [('spm_mat_file',
'spm_mat_file'),
('beta_images',
'beta_images'),
('residual_image',
'residual_image')]),
(level2conestimate, datasink, [('spm_mat_file',
'contrasts.@spm_mat'),
('spmT_images',
'contrasts.@T'),
('con_images',
'contrasts.@con')])
])
| archive/second_level_Christin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MODFLOW API Paper
#
# # Create Trimesh Grid for McDonald Valley Problem for the "Optimization of groundwater withdrawals" example
#
# This notebook can be used to create the MODFLOW 6 datasets used by the "Optimization of groundwater withdrawals" example, as reported in the MODFLOW 6 API paper (in progress). This notebook should be run prior to running the `optimize_mv_api.ipynb` notebook.
#
# ## Prerequisites
# To run the simulation and process the results, the following publicly available software and data are required:
#
# * __libmf6.dll__ (Windows) or __libmf6.so__ (linux or MacOS) pre-compiled dynamic-link library (dll)/shared object (so) and available from https://github.com/MODFLOW-USGS/executables. The operating specific pre-compiled dll/so should be installed in the `bin` subdirectory at the same level as the subdirectory containing this Jupyter Notebook (`../bin/`).
# * __mf6.exe__ (Windows) or __mf6__ (linux or MacOS) pre-compiled application and available from https://github.com/MODFLOW-USGS/executables. The operating specific pre-compiled application should be installed in the `bin` subdirectory at the same level as the subdirectory containing this Jupyter Notebook (`../bin/`).
# https://github.com/Deltares/xmipy.
# * __flopy__ is a python package that can be used to build, run, and post-process MODFLOW 6 models. The source is available at https://github.com/modflowpy/flopy and the package can be installed from PyPI using `pip install flopy` or conda using `conda install flopy`.
#
# ## Building the McDonald Valley MODFLOW 6 datasets
#
# We start by importing the necessary packages:
# +
import os
import sys
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import rasterio
import flopy
from flopy.utils.gridgen import Gridgen
import flopy.plot.styles as styles
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
# -
#
# _Define the name of the executable_
exe_name = "mf6"
if sys.platform == "win32":
exe_name += ".exe"
# ## Load external data
kaq = flopy.utils.Raster.load("./data/k_aq.asc")
kclay = flopy.utils.Raster.load("./data/k_clay.asc")
bot = flopy.utils.Raster.load("./data/bottom.asc")
# ## Create Structured Grid
nlay = 5
nrow = 40
ncol = 25
shape2d = (nrow, ncol)
shape3d = (nlay, nrow, ncol)
delr, delc = 500., 500.
delrs = np.ones(ncol, dtype=np.float32) * delr
delcs = np.ones(nrow, dtype=np.float32) * delc
top = np.ones(shape2d, dtype=np.float32) * 100.
botm = np.zeros(shape3d, dtype=np.float32)
botm[0] = -5.0
botm[1] = -50.0
botm[2] = -51.0
botm[3] = -100.0
botm[4] = bot.get_array(1)
# Build structured model grid
struct_grid = flopy.discretization.StructuredGrid(
nlay=1,
nrow=nrow,
ncol=ncol,
delr=delrs,
delc=delcs,
)
# ## Read the Trimesh File into Python
# +
fname = './data/TriMesh_with_densified_contours.exp'
with open(fname, 'r') as f:
lines = f.readlines()
# read vertex info into a structured array
vtmp = [line.strip().split()[1:] for line in lines if line.startswith('N')]
dtype = [('ivert', int), ('x', float), ('y', float), ('label', '|S10'), ('ibound', int), ('elev', float)]
vertices = np.empty((len(vtmp)), dtype=dtype)
for i, line in enumerate(vtmp):
for j, val in enumerate(line):
vertices[i][j] = val
# read element info into a structured array
etmp = [line.strip().split()[1:] for line in lines if line.startswith('E')]
dtype = [('icell', int), ('iv1', int), ('iv2', int), ('iv3', int), ('label', '|S10'), ('ibound', int), ('elev', float)]
cells = np.empty((len(etmp)), dtype=dtype)
for i, line in enumerate(etmp):
for j, val in enumerate(line):
cells[i][j] = val
# +
verts = []
for ivert, x, y, label, ibound, elev in vertices:
verts.append([x, y])
verts = np.array(verts, dtype=np.float)
iverts = []
for i, iv1, iv2, iv3, s, ibound, elev in list(cells):
iverts.append([iv3 - 1, iv2 - 1, iv1 - 1])
ncpl = len(iverts)
nvert = verts.shape[0]
# -
# ## Calculate Centroids
# centroid and area array
from flopy.utils.cvfdutil import centroid_of_polygon, area_of_polygon
xcyc = np.empty((ncpl, 2), dtype=np.float)
area = np.empty((ncpl), dtype=np.float)
for icell in range(ncpl):
vlist = [(verts[ivert, 0], verts[ivert, 1]) for ivert in iverts[icell]]
xcyc[icell, 0], xcyc[icell, 1] = centroid_of_polygon(vlist)
area[icell] = abs(area_of_polygon(*zip(*vlist)))
# +
# make temporary vertex grid for plotting and intersecting
vertices = []
for i in range(nvert):
vertices.append((i, verts[i, 0], verts[i, 1]))
cell2d = []
for i in range(ncpl):
cell2d.append([i, xcyc[i, 0], xcyc[i, 1], 3] + [iv for iv in iverts[i]])
vg_temp = flopy.discretization.VertexGrid(vertices=vertices, cell2d=cell2d,
nlay=1, ncpl=len(cell2d))
# -
# Plot the DISV grid
grid_verts = vg_temp.verts
xcc, ycc = vg_temp.xyzcellcenters[:2]
with styles.USGSMap():
fig = plt.figure(figsize=(8, 12))
ax = fig.add_subplot(1, 1, 1, aspect='equal')
mm = flopy.plot.PlotMapView(modelgrid=vg_temp)
mm.plot_grid()
plt.plot(xcc, ycc, 'ko', markersize=1.)
plt.plot(grid_verts[:,0], grid_verts[:,1], 'rs', markersize=1.5)
# ## Intersect the Grid with the Property Arrays
kaq_tm = kaq.resample_to_grid(vg_temp,
band=kaq.bands[0],
method="nearest")
# +
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(1, 2, 1, aspect='equal')
mm = flopy.plot.PlotMapView(modelgrid=struct_grid, ax=ax)
mm.plot_array(kaq.get_array(1), cmap='jet')
ax = fig.add_subplot(1, 2, 2, aspect='equal')
mm = flopy.plot.PlotMapView(modelgrid=vg_temp, ax=ax)
mm.plot_array(kaq_tm, cmap='jet')
#mm.plot_cvfd(verts, iverts, a=kaq_tm, edgecolor='none', cmap='jet')
plt.xlim(verts[:, 0].min(), verts[:, 0].max())
plt.ylim(verts[:, 1].min(), verts[:, 1].max())
# -
kclay_tm = kclay.resample_to_grid(vg_temp,
band=kclay.bands[0],
method="nearest")
with styles.USGSMap():
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(1, 2, 1, aspect='equal')
mm = flopy.plot.PlotMapView(modelgrid=struct_grid, ax=ax)
mm.plot_array(kclay.get_array(1), cmap='jet')
ax = fig.add_subplot(1, 2, 2, aspect='equal')
mm = flopy.plot.PlotMapView(modelgrid=vg_temp, ax=ax)
mm.plot_array(kclay_tm, cmap='jet')
plt.xlim(verts[:, 0].min(), verts[:, 0].max())
plt.ylim(verts[:, 1].min(), verts[:, 1].max())
bot_tm = bot.resample_to_grid(vg_temp,
band=bot.bands[0],
method="nearest")
with styles.USGSMap():
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(1, 2, 1, aspect='equal')
mm = flopy.plot.PlotMapView(modelgrid=struct_grid, ax=ax)
mm.plot_array(bot.get_array(1), cmap='jet')
ax = fig.add_subplot(1, 2, 2, aspect='equal')
mm = flopy.plot.PlotMapView(modelgrid=vg_temp, ax=ax)
mm.plot_array(bot_tm, cmap='jet')
# ## Intersect the Features with the Grid
ibd = cells['ibound']
with styles.USGSMap():
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(1, 1, 1, aspect='equal')
mm = flopy.plot.PlotMapView(modelgrid=vg_temp)
mm.plot_array(ibd, cmap='jet')
# +
iriv = 1
iwel = 2
ilak = 3
idx = cells['ibound'] == iriv
rbot = -2.
ct = 0.
at = 0.
# cond_scale is needed to make sure the total conductance
# is similar to what is in the regular grid.
cond_scale = 10.28
rivspd = []
for ip1, iv1, iv2, iv3, txt, ibound, elev in cells[idx]:
icell = ip1 - 1
distance = xcyc[icell, 1]
stage = 0. + distance / (18 * delc)
cond = 1.e5
cond = cond / (delc * delr) * area[icell]
cond = cond * cond_scale
ct += cond
at += area[icell]
rivspd.append([(0, icell), stage, cond, rbot])
# -
iriv = 1
iwel = 2
ilak = 3
idx = cells['ibound'] == ilak
chdspd = [[(0, ip1 - 1), 11.] for ip1, iv1, iv2, iv3, txt, ibound, elev in cells[idx]]
# first well is Virginia City well site 2
# second well is Reilly well
# third well is Virginia City well site 1
idx = cells['ibound'] == iwel
boundname = ['VC2', 'Reilly', 'VC1']
welspd = [[(4, ip1 - 1), 0.] for ip1, iv1, iv2, iv3, txt, ibound, elev in cells[idx]]
welspd = [[cellid, rate, boundname[i]] for i, (cellid, rate) in enumerate(welspd)]
welspd
# ## Build the Model
ws = './temp/mvmodel'
name = 'mv'
sim = flopy.mf6.MFSimulation(sim_ws=ws, sim_name=name, exe_name=exe_name)
tdis = flopy.mf6.ModflowTdis(sim)
ims = flopy.mf6.ModflowIms(sim, complexity='complex')
gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True)
dis = flopy.mf6.ModflowGwfdisv(gwf, nlay=5, ncpl=ncpl, nvert=nvert,
top=cells['elev'],
botm=[-5, -50, -51, -100., bot_tm],
vertices=vertices, cell2d=cell2d)
ic = flopy.mf6.ModflowGwfic(gwf, strt=11.)
npf = flopy.mf6.ModflowGwfnpf(gwf,
xt3doptions=True, save_specific_discharge=True,
icelltype=[1, 0, 0, 0, 0],
k=[kaq_tm, kaq_tm, kclay_tm, kaq_tm, kaq_tm],
k33=[0.25 * kaq_tm, 0.25 * kaq_tm, kclay_tm, 0.25 * kaq_tm, 0.25 * kaq_tm])
chd = flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chdspd)
rch = flopy.mf6.ModflowGwfrcha(gwf, recharge=.003641)
riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd)
wel = flopy.mf6.ModflowGwfwel(gwf, stress_period_data=welspd, boundnames=True)
oc = flopy.mf6.ModflowGwfoc(gwf,
head_filerecord=name + '.hds',
budget_filerecord=name + '.bud',
saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')],
printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')])
sim.write_simulation()
sim.run_simulation()
# ## Post-Process the Results
head = gwf.output.head().get_data()[:, 0, :]
spdis = gwf.output.budget().get_data(text='DATA-SPDIS')[0]
# +
ilay = 0
with styles.USGSMap():
fig = plt.figure(figsize=(15, 15))
ax = fig.add_subplot(1, 2, 1, aspect='equal')
pmv = flopy.plot.PlotMapView(gwf, ax=ax, layer=ilay)
pmv.plot_array(head, cmap='jet', ec='gray')
#pmv.plot_grid()
with styles.USGSMap():
ax = fig.add_subplot(1, 2, 2, aspect='equal')
pmv = flopy.plot.PlotMapView(gwf, ax=ax, layer=ilay)
pmv.contour_array(head, levels=np.arange(20))
pmv.plot_vector(spdis["qx"], spdis["qy"])
# -
# # Figure for Paper
# +
figwidth = 85 # mm
figwidth = figwidth / 10 / 2.54 # inches
figheight = 100 # mm
figheight = figheight / 10 / 2.54 # inches
with styles.USGSMap():
fig = plt.figure(
figsize=(figwidth, figheight),
constrained_layout=True,
)
ax = fig.add_subplot(1, 1, 1)
# ax.set_aspect('equal')
pmv = flopy.plot.PlotMapView(gwf, ax=ax, layer=ilay)
pa = pmv.plot_array(head, cmap='viridis_r', ec='none', alpha=0.7)
pmv.plot_grid(linewidths=0.2, colors='black')
iplot = np.ones(cells['ibound'].shape, dtype=np.int)
idx = cells['ibound'] == iriv
iplot[idx] = -1
pmv.plot_ibound(iplot, color_ch='cyan')
wellnames = ['A', 'B', 'C']
idx = cells['ibound'] == iwel
for idx, icell in enumerate(cells['icell'][idx]):
x, y = xcyc[icell]
ax.plot(x, y, 'ro', markersize=2)
s = wellnames.pop(0)
s = "Well {}".format(s)
if idx == 0:
xx = x - 250
ha = "right"
else:
xx = x + 250
ha = "left"
styles.add_text(
ax,
text=s,
x=xx,
y=y+250,
color='white',
transform=False,
bold=False,
italic=False,
va="center",
ha=ha,
)
styles.add_text(
ax,
text="Lake",
x=2750,
y=15000,
color='white',
transform=False,
bold=False,
italic=False,
va="center",
ha="center",
)
styles.add_text(
ax,
text="River",
x=5000,
y=5000,
color='white',
transform=False,
bold=False,
italic=False,
va="center",
ha="center",
rotation="vertical",
)
ax.set_xlabel('x position (m)')
ax.set_ylabel('y position (m)')
cbar = fig.colorbar(pa, shrink=0.68)
cbar.ax.set_ylabel('head (m)')
fname = os.path.join("..", 'doc', 'figures', 'mf6opt.png')
plt.savefig(fname, dpi=600)
# -
| notebooks/make_mcdonald_valley.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="l5uXqP2G7ov1"
# # DeepWalk in python
# + [markdown] id="_Ue1g8397t0o"
# ## Imports
# + id="V_CAT4tG7om_"
import random
import networkx as nx
from gensim.models import Word2Vec
import numpy as np
from abc import ABC
import pandas as pd
# + [markdown] id="_GAkUKgmTkZR"
# ## Deepwalk
# + id="6DW32rssTing"
class DeepWalk:
"""
Implement DeepWalk algorithm.
reference paper : DeepWalk: Online Learning of Social Representations
link : https://arxiv.org/abs/1403.6652
Using the algorithm can get graph embedding model with your network data.
"""
def __init__(self, G=None, adjlist_path=None, edgelist_path=None):
"""
Parameters
G : networkx : networkx graph.
adjlist_path : network file path.
"""
if G == adjlist_path == edgelist_path == None:
raise ValueError('all parameter is None, please check your input.')
try:
if G != None:
self.G = G
elif adjlist_path != None:
self.G = nx.read_adjlist(adjlist_path)
elif edgelist_path != None:
self.G = nx.read_edgelist(edgelist_path)
except Exception as e:
print(e)
def random_walk(self, iterations, start_node=None, random_walk_times=5):
"""
: Implement of random walk algorithm :
Parameters
----------------------------------------
iterations : int : random walk number of iteration
start_node : str : choose start node (random choose a node, if start_node is None)
random_walk_times : int : random walk times.
----------------------------------------
Returns
walk_records : list of walks record
"""
walk_records = []
for i in range(iterations):
if start_node is None:
s_node = random.choice(list(self.G.nodes()))
walk_path = [s_node]
else:
walk_path = [start_node]
current_node = s_node
while(len(walk_path) < random_walk_times):
neighbors = list(self.G.neighbors(current_node))
current_node = random.choice(neighbors)
walk_path.append(current_node)
walk_records.append(walk_path)
return walk_records
def buildWord2Vec(self, **kwargs):
"""
Using gensim to build word2vec model
Parameters
----------------------------------------
**kwargs
walk_path : list : random walk results
size : int : specific embedding dimension, default : 100 dim
window : int : specific learn context window size, default : 5
workers : int : specific workers. default : 2
----------------------------------------
Returns
walk_records : list of walks record
"""
walk_path = kwargs.get('walk_path', None)
if walk_path is None:
return
size = kwargs.get('size', 100)
window = kwargs.get('window', 5)
workers = kwargs.get('workers', 2)
embedding_model = Word2Vec(walk_path, size=size, window=window, min_count=0, workers=workers, sg=1, hs=1)
return embedding_model
# + [markdown] id="B7Kv5bfhaIsh"
# ## Hierarchical Softmax
# First, we'll build the components required to use hierarchical softmax. From the paper:
#
# Computing the partition function (normalization factor) is expensive. If we assign the vertices to the leaves of a binary tree, the prediction problem turns into maximizing the probability of a specific path in the tree
#
# Thus, instead of having a classifier that predicts probabilities for each word from our vocabulary (besides the one we're currently iterating on), we can structure the loss function as a binary tree where every internal node contains its own binary classifier. Computing the loss (and gradient) can therefore be done in $O(logv)$ predictions rather than $O(v)$ (as is the case with $v$ labels), where $v$ is the number of vertices in our graph.
# + id="OPTTtTT9aIpg"
class Tree(ABC):
@staticmethod
def merge(dims, lr, batch_size, left=None, right=None):
if left is not None: left.set_left()
if right is not None: right.set_right()
return InternalNode(dims, lr, batch_size, left, right)
@staticmethod
def build_tree(nodes, dims, lr, batch_size):
if len(nodes) % 2 != 0: nodes.append(None)
while len(nodes) > 1:
nodes = [Tree.merge(dims, lr, batch_size, nodes[i], nodes[i+1]) for i in range(0, len(nodes) - 1, 2)]
return nodes[0]
def set_parent(self, t):
self.parent = t
def set_left(self): self.is_right = False
def set_right(self): self.is_right = True
# + id="Ivhz8tvMaWZg"
class InternalNode(Tree):
def __init__(self, dims, lr, batch_size, left=None, right=None, parent=None, is_right=None):
self.dims = dims
self.set_left_child(left)
self.set_right_child(right)
self.set_parent(parent)
self.is_right = is_right
self.params = np.random.uniform(size=self.dims)
self.gradients = []
self.lr = lr
self.batch_size= batch_size
def set_left_child(self, child: Tree):
self.left = child
if self.left is not None:
self.left.set_parent(self)
self.left.set_left()
def set_right_child(self, child: Tree):
self.right = child
if self.right is not None:
self.right.set_parent(self)
self.right.set_right()
def set_parent(self, parent: Tree):
self.parent = parent
def predict(self, embedding, right=True):
d = self.params.dot(embedding) if right else -self.params.dot(embedding)
return 1/(1+np.exp(-d))
def update_gradients(self, gradient: np.array):
self.gradients.append(gradient)
if len(self.gradients) >= self.batch_size:
avg_gradient = np.stack(self.gradients, axis=0).mean(axis=0)
self.params = self.params - self.lr * avg_gradient
self.gradients = []
def __eq__(self, other):
return (
self.dims == other.dims and
self.left == other.left and
self.right == other.right and
self.lr == other.lr and
self.batch_size == other.batch_size
)
# + id="s4cUO_97aZgW"
class Leaf(Tree):
def __init__(self, vertex, parent: InternalNode = None, is_right = False):
self.parent = parent
self.is_right = is_right
self.vertex = vertex
def update(self, anchor_vertex):
node = self
gradients = []
total_cost = 0.
emb_grads = []
while node.parent is not None:
is_right = node.is_right
node = node.parent
prob = node.predict(anchor_vertex.embedding, is_right)
log_prob = np.log(prob)
total_cost -= log_prob
u = 1 - prob
node.update_gradients(u*anchor_vertex.embedding)
emb_grads.append(u*node.params)
anchor_vertex.update_embedding(sum(emb_grads))
return total_cost
# + id="mE_0ks6PabTD"
class Vertex(object):
def __init__(self, dim, lr, batch_size):
self.dim = dim
self.embedding = np.random.uniform(size=dim)
self.lr = lr
self.gradients = []
self.batch_size = batch_size
def update_embedding(self, gradient: np.array):
self.gradients.append(gradient)
if len(self.gradients) >= self.batch_size:
avg_gradient = np.stack(self.gradients, axis=0).mean(axis=0)
self.embedding = self.embedding - self.lr * avg_gradient
self.gradients = []
# + id="Ux9slFZ-adiw"
v = Vertex(8, 1e-1, 1)
v2 = Vertex(8, 1e-1, 1)
leaf = Leaf(v)
leaf2 = Leaf(v2)
i = InternalNode(8, 1e-1, 1, leaf, leaf2)
# + colab={"base_uri": "https://localhost:8080/"} id="3dp2DCMgai2j" executionInfo={"status": "ok", "timestamp": 1633185052067, "user_tz": -330, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="41d60917-dfc5-4dc2-a955-d6a34fdadf7a"
before = leaf2.vertex.embedding
before_parent = leaf.parent.params
print(before)
# + colab={"base_uri": "https://localhost:8080/"} id="6nTMuDVBajHJ" executionInfo={"status": "ok", "timestamp": 1633185058468, "user_tz": -330, "elapsed": 703, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="9b3e5a23-41f5-4d17-eb78-946212109211"
leaf.update(leaf2.vertex)
after = leaf2.vertex.embedding
after_parent = leaf.parent.params
print(after)
# + [markdown] id="fSmBZypRakhm"
# Leaves 1 and 2 should share parent i. Also, each should have its own vertex (v and v2 respectively).
# + id="_pY0JOE_amtM"
assert leaf.vertex == v
assert leaf.vertex != v2
assert leaf2.vertex == v2
assert leaf2.vertex != v
assert leaf.parent == i
assert leaf2.parent == i
# + [markdown] id="NRvDC5rnanwm"
# As a convenience method, we have Tree.merge which should do the same thing as the manual passing to the InternalNode constructor above.
# + id="fb6NsUPlapka"
i2 = Tree.merge(8, 1e-1, 1, leaf, leaf2)
# + id="2w1yuw3Waqpr"
assert i2 == i
# + [markdown] id="dYGVHNvYar0D"
# We should be able to create an internal node with a single child.
# + id="yiveInG5atPm"
i3 = InternalNode(8, 0.01, 1, leaf)
assert i3.left == leaf
assert i3.right is None
# + [markdown] id="6lz38Mizaumb"
# We should be able to combine two internal nodes under a third internal node.
# + id="tN4e7CGDawIn"
two_internal_nodes = Tree.merge(8, 0.01, 1, i, i2)
# + id="iJpxRbHlaxsA"
assert two_internal_nodes.left == i
assert two_internal_nodes.right == i2
assert i.parent == two_internal_nodes
assert i2.parent == two_internal_nodes
# + id="fxkGgObGazPL"
p = Tree.merge(8, 1e-1, 1, leaf, leaf2)
# + colab={"base_uri": "https://localhost:8080/"} id="C6A0p1Mra0jv" executionInfo={"status": "ok", "timestamp": 1633185128668, "user_tz": -330, "elapsed": 532, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="4d798539-0996-47cd-ee4a-5170709eb09b"
leaf.parent == leaf2.parent
# + colab={"base_uri": "https://localhost:8080/"} id="hlHh21ooa1u-" executionInfo={"status": "ok", "timestamp": 1633185133919, "user_tz": -330, "elapsed": 716, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="2a7ca9bc-41dd-4291-8ac0-6e8e7424c848"
leaf.vertex.embedding
# + colab={"base_uri": "https://localhost:8080/"} id="Y948sZOYa27u" executionInfo={"status": "ok", "timestamp": 1633185139193, "user_tz": -330, "elapsed": 424, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="9a5e0306-365f-43f2-b244-8636e2448fc8"
before = leaf2.vertex.embedding.copy()
before_parent = leaf.parent.params.copy()
leaf.update(leaf2.vertex)
after = leaf2.vertex.embedding
after_parent = leaf.parent.params
(before, after)
# + colab={"base_uri": "https://localhost:8080/"} id="V7o7Ugtwa4VA" executionInfo={"status": "ok", "timestamp": 1633185146746, "user_tz": -330, "elapsed": 515, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="48cc8c72-7984-431b-a5a1-b9b080f5c973"
(before_parent, after_parent)
# + id="kaZ9yTnba6I1"
assert leaf.parent.predict(leaf2.vertex.embedding, right=False) + leaf.parent.predict(leaf2.vertex.embedding)
# + colab={"base_uri": "https://localhost:8080/"} id="rZQVRYJNa7Qg" executionInfo={"status": "ok", "timestamp": 1633185158126, "user_tz": -330, "elapsed": 425, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="27781dd6-6951-4189-a1f2-e9acfcca76a5"
leaf.parent.predict(leaf2.vertex.embedding)
# + colab={"base_uri": "https://localhost:8080/"} id="zwNqR-Cpa88M" executionInfo={"status": "ok", "timestamp": 1633185166246, "user_tz": -330, "elapsed": 619, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="a4519140-c8c3-465a-c6c5-7ef3db21c18c"
new_leaf = Leaf(Vertex(8, 0.01, 1))
new_leaf2 = Leaf(Vertex(8, 0.01, 1))
merged = Tree.merge(8, 0.01, 1, new_leaf, new_leaf2)
before1 = new_leaf2.vertex.embedding.copy()
new_leaf.update(new_leaf2.vertex)
after1 = new_leaf2.vertex.embedding
(before1, after1)
# + colab={"base_uri": "https://localhost:8080/"} id="aQNA3TD1a-2D" executionInfo={"status": "ok", "timestamp": 1633185172323, "user_tz": -330, "elapsed": 645, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="9a767b11-b153-49ba-8ede-c2cba83097a5"
before2 = new_leaf.vertex.embedding.copy()
new_leaf2.update(new_leaf.vertex)
after2 = new_leaf.vertex.embedding
(before2, after2)
# + id="Jmyp5cHvbAYP"
emb_length = 10
lr = 1e-3
bs = 100
v1 = Vertex(emb_length, lr, bs)
v2 = Vertex(emb_length, lr, bs)
v3 = Vertex(emb_length, lr, bs)
random_walk = [v1, v2, v3]
leaves = list(map(lambda x: Leaf(x), random_walk))
tree = Tree.build_tree(leaves, emb_length, lr, bs)
# + id="0fQmteX6bFb_" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1633185240163, "user_tz": -330, "elapsed": 662, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="e3d0d2a7-05e8-4ab8-8c76-705d4a89b127"
leaves
# + id="8ZjzHNozbFcA" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1633185241985, "user_tz": -330, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="15d2626b-42e2-4e7d-9c0c-2b197b5f5478"
tree.__class__
# + id="f36Fs_C1bFcC" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1633185242731, "user_tz": -330, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="fafb1c70-5d61-469d-f9bd-38d87564e6d7"
v1.embedding.shape, v2.embedding.shape, v3.embedding.shape
# + id="MRKD0Yf3bFcD"
leaf1, leaf2, leaf3, empty_leaf = leaves
# + id="jKiUuANrbFcD" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1633185244387, "user_tz": -330, "elapsed": 15, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="b44b881b-9cf7-4b8a-ff19-e633950f81b4"
leaf3.vertex.embedding
# + id="OHqy3C6TbFcD" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1633185244842, "user_tz": -330, "elapsed": 30, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="a531b6ba-2394-43f4-cb71-6be33db1b548"
leaf1.parent, leaf2.parent, leaf3.parent
# + [markdown] id="zXoHf1r47yvC"
# ## Plots
# + id="JpFNd3dSbFcE" colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"status": "ok", "timestamp": 1633185246043, "user_tz": -330, "elapsed": 1223, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="b39addd4-15e2-42ca-9aaf-99e1b4799ee2"
costs1 = []
costs3 = []
combined_cost = []
for i in range(10000):
cost1 = leaf1.update(leaf2.vertex)
cost3 = leaf3.update(leaf2.vertex)
if i % bs == 0:
costs1.append(cost1)
costs3.append(cost3)
combined_cost.append(cost1+cost3)
pd.Series(costs1).plot(kind='line')
# + id="Nyl5xXrybFcE" colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"status": "ok", "timestamp": 1633185246914, "user_tz": -330, "elapsed": 894, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="e58ab367-c65b-44d0-fb5f-b72d48c857c6"
pd.Series(costs3).plot(kind='line')
# + id="_j2khUZ1bFcE" colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"status": "ok", "timestamp": 1633185248847, "user_tz": -330, "elapsed": 11, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="3f3e35d4-c6fe-4762-f26a-39e318594dc2"
pd.Series(combined_cost).plot(kind='line')
# + id="EnNn5u0xbFcF"
emb_length, lr, bs = 10, 1e-4, 100
leaves = [Vertex(emb_length, lr, bs) for i in range(100)]
# + id="6EKibUn-bFcF"
leaves = [Leaf(v) for v in leaves]
# + id="flNh5_jRbFcF"
tree = Tree.build_tree(leaves, emb_length, lr, bs)
# + id="gma5jbSzbFcF"
chosen_leaf = leaves[20]
# + id="Pam4uPUBbFcG" colab={"base_uri": "https://localhost:8080/", "height": 72} executionInfo={"status": "ok", "timestamp": 1633185274998, "user_tz": -330, "elapsed": 23762, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="b97035cc-c4a5-4416-9915-911b8e5741ea"
#slow
costs = []
num_iter = 3000
epoch_costs = []
for it in range(num_iter):
for i in range(100):
if i == 20:
continue
costs.append(leaves[i].update(chosen_leaf.vertex))
epoch_costs.append(np.mean(costs))
costs = []
s = pd.Series(epoch_costs)
s.plot(kind='line')
# + [markdown] id="AdYUmWzbbFcG"
# This is an interesting result -- it seems a little unusual that we would see training loss going up, but some things to consider:
# * In the "real" version, the leaf embeddings are (hopefully) going to have some relationship with the internal node model parameters. In this toy version, we've uniformly initialized all parameters and then trained the model on every single leaf for many iterations. It's basically learning how to optimize random noise.
# * We're using plain vanilla batch GD here, with no learning rate annealing (or any of the wide number of GD enhancements that exist). It's very possible that we're getting gradient explosions / divergence towards the end here.
# + [markdown] id="4Zu0lhlHbFcG"
# The goal of hierarchical softmax is to make the scoring function run in $O(logv)$ rather than $O(v)$ by organizing the nodes as a binary tree with a binary classifier at each internal node. At a high level, we follow these steps:
# 1. We identify a leaf that is contained within the window of our vertex within the current random walk
# 2. We take that leaf's parent and compute the probability of having followed the correct path (left or right) to the leaf we identified in step 1 by using the model parameters for this internal node combined with the features for the current vertex (which is a row in $\Phi$).
# 3. We repeat step 2 for all internal nodes until we get to the root
# 4. The product of all of the internal probabilities gives us the probability of seeing a co-occurrence of the neighbor node given what we know about the node we're exploring
# 5. $-logPr(u_k|\Phi(v_j))$ is our loss function, where $Pr(u_k|\Phi(v_j))$ is the probability we calculated in step 4
# 6. We use the loss in step 5 to perform a gradient descent step updating both the parameters of our model and $\Phi(v_j)$:
#
# $$\theta \leftarrow \theta - \alpha_\theta * \frac{\partial J}{\partial \theta}$$
# <br>
# $$\Phi \leftarrow \Phi - \alpha_\Phi * \frac{\partial J}{\partial \Phi}$$
#
# Where $\theta$ represents all of the parameters of all of the models in the internal nodes of the tree, and $\Phi$ represents the latent representation of the current vertex.
| docs/T384270_DeepWalk_in_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Setup CLTK and import TLG
#
# Follow the [setup instructions from the CLTK tutorial here](https://github.com/cltk/tutorials/blob/master/2%20Import%20corpora.ipynb).
from cltk.corpus.utils.importer import CorpusImporter
my_greek_downloader = CorpusImporter('greek')
my_greek_downloader.import_corpus('tlg', '~/corpora/TLG_E/')
# # Pre-process TLG E corpus
#
# ## Covert Beta Code to Unicode
#
# http://docs.cltk.org/en/latest/greek.html#converting-tlg-texts-with-tlgu
# +
from cltk.corpus.greek.tlgu import TLGU
tlgu = TLGU()
tlgu.convert_corpus(corpus='tlg') # writes to: ~/cltk_data/greek/text/tlg/plaintext/
# -
# ## Cleanup texts
#
# Overwrite the plaintext files with more aggresive cleanup, but keep periods.
#
# http://docs.cltk.org/en/latest/greek.html#text-cleanup
# !head ~/cltk_data/greek/text/tlg/plaintext/TLG0437.TXT
from cltk.corpus.utils.formatter import tlg_plaintext_cleanup
import os
# +
plaintext_dir = os.path.expanduser('~/cltk_data/greek/text/tlg/plaintext/')
files = os.listdir(plaintext_dir)
for file in files:
file = os.path.join(plaintext_dir, file)
with open(file) as file_open:
file_read = file_open.read()
clean_text = tlg_plaintext_cleanup(file_read, rm_punctuation=True, rm_periods=False)
clean_text = clean_text.lower()
with open(file, 'w') as file_open:
file_open.write(clean_text)
# -
# !head ~/cltk_data/greek/text/tlg/plaintext/TLG0437.TXT
| lda/leipzig/1_lda_tests/1a Setup and preprocess docs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + nbsphinx="hidden"
import open3d as o3d
import numpy as np
import copy
import os
import sys
# monkey patches visualization and provides helpers to load geometries
sys.path.append('..')
import open3d_tutorial as o3dtut
# change to True if you want to interact with the visualization windows
o3dtut.interactive = not "CI" in os.environ
# -
# # Mesh
# Open3D has a data structure for 3D triangle meshes called `TriangleMesh`.
# The code below shows how to read a triangle mesh from a `ply` file and print its vertices and triangles.
# +
print("Testing mesh in Open3D...")
armadillo_mesh = o3d.data.ArmadilloMesh()
mesh = o3d.io.read_triangle_mesh(armadillo_mesh.path)
knot_mesh = o3d.data.KnotMesh()
mesh = o3d.io.read_triangle_mesh(knot_mesh.path)
print(mesh)
print('Vertices:')
print(np.asarray(mesh.vertices))
print('Triangles:')
print(np.asarray(mesh.triangles))
# -
# The `TriangleMesh` class has a few data fields such as `vertices` and `triangles`. Open3D provides direct memory access to these fields via numpy.
# ## Visualize a 3D mesh
print("Try to render a mesh with normals (exist: " +
str(mesh.has_vertex_normals()) + ") and colors (exist: " +
str(mesh.has_vertex_colors()) + ")")
o3d.visualization.draw_geometries([mesh])
print("A mesh with no normals and no colors does not look good.")
# You can rotate and move the mesh but it is painted with uniform gray color and does not look `3d`. The reason is that the current mesh does not have normals for vertices or faces. So uniform color shading is used instead of a more sophisticated Phong shading.
# ## Surface normal estimation
# Let’s draw the mesh with surface normals.
print("Computing normal and rendering it.")
mesh.compute_vertex_normals()
print(np.asarray(mesh.triangle_normals))
o3d.visualization.draw_geometries([mesh])
# It uses `compute_vertex_normals` and `paint_uniform_color` which are member functions of `mesh`.
# ## Crop mesh
# We remove half of the surface by directly operating on the `triangle` and `triangle_normals` data fields of the mesh. This is done via numpy.
print("We make a partial mesh of only the first half triangles.")
mesh1 = copy.deepcopy(mesh)
mesh1.triangles = o3d.utility.Vector3iVector(
np.asarray(mesh1.triangles)[:len(mesh1.triangles) // 2, :])
mesh1.triangle_normals = o3d.utility.Vector3dVector(
np.asarray(mesh1.triangle_normals)[:len(mesh1.triangle_normals) // 2, :])
print(mesh1.triangles)
o3d.visualization.draw_geometries([mesh1])
# ## Paint mesh
# `paint_uniform_color` paints the mesh with a uniform color. The color is in RGB space, [0, 1] range.
print("Painting the mesh")
mesh1.paint_uniform_color([1, 0.706, 0])
o3d.visualization.draw_geometries([mesh1])
# ## Mesh properties
# A triangle mesh has several properties that can be tested with Open3D. One important property is the manifold property, where we can test the triangle mesh if it is edge manifold `is_edge_manifold` and if it is `is_vertex_manifold`. A triangle mesh is edge manifold, if each edge is bounding either one or two triangles. The function `is_edge_manifold` has the `bool` parameter `allow_boundary_edges` that defines if boundary edges should be allowed. Further, a triangle mesh is vertex manifold if the star of the vertex is edge-manifold and edge-connected, e.g., two or more faces connected only by a vertex and not by an edge.
#
# Another property is the test of self-intersection. The function `is_self_intersecting` returns `True` if there exists a triangle in the mesh that is intersecting another mesh. A watertight mesh can be defined as a mesh that is edge manifold, vertex manifold and not self intersecting. The function `is_watertight` implements this check in Open3D.
#
# We also can test the triangle mesh, if it is orientable, i.e. the triangles can be oriented in such a way that all normals point towards the outside. The corresponding function in Open3D is called `is_orientable`.
#
# The code below tests a number of triangle meshes against those properties and visualizes the results. Non-manifold edges are shown in red, boundary edges in green, non-manifold vertices are visualized as green points, and self-intersecting triangles are shown in pink.
def check_properties(name, mesh):
mesh.compute_vertex_normals()
edge_manifold = mesh.is_edge_manifold(allow_boundary_edges=True)
edge_manifold_boundary = mesh.is_edge_manifold(allow_boundary_edges=False)
vertex_manifold = mesh.is_vertex_manifold()
self_intersecting = mesh.is_self_intersecting()
watertight = mesh.is_watertight()
orientable = mesh.is_orientable()
print(name)
print(f" edge_manifold: {edge_manifold}")
print(f" edge_manifold_boundary: {edge_manifold_boundary}")
print(f" vertex_manifold: {vertex_manifold}")
print(f" self_intersecting: {self_intersecting}")
print(f" watertight: {watertight}")
print(f" orientable: {orientable}")
geoms = [mesh]
if not edge_manifold:
edges = mesh.get_non_manifold_edges(allow_boundary_edges=True)
geoms.append(o3dtut.edges_to_lineset(mesh, edges, (1, 0, 0)))
if not edge_manifold_boundary:
edges = mesh.get_non_manifold_edges(allow_boundary_edges=False)
geoms.append(o3dtut.edges_to_lineset(mesh, edges, (0, 1, 0)))
if not vertex_manifold:
verts = np.asarray(mesh.get_non_manifold_vertices())
pcl = o3d.geometry.PointCloud(
points=o3d.utility.Vector3dVector(np.asarray(mesh.vertices)[verts]))
pcl.paint_uniform_color((0, 0, 1))
geoms.append(pcl)
if self_intersecting:
intersecting_triangles = np.asarray(
mesh.get_self_intersecting_triangles())
intersecting_triangles = intersecting_triangles[0:1]
intersecting_triangles = np.unique(intersecting_triangles)
print(" # visualize self-intersecting triangles")
triangles = np.asarray(mesh.triangles)[intersecting_triangles]
edges = [
np.vstack((triangles[:, i], triangles[:, j]))
for i, j in [(0, 1), (1, 2), (2, 0)]
]
edges = np.hstack(edges).T
edges = o3d.utility.Vector2iVector(edges)
geoms.append(o3dtut.edges_to_lineset(mesh, edges, (1, 0, 1)))
o3d.visualization.draw_geometries(geoms, mesh_show_back_face=True)
knot_mesh_data = o3d.data.KnotMesh()
knot_mesh = o3d.io.read_triangle_mesh(knot_mesh_data.path)
check_properties('KnotMesh', knot_mesh)
check_properties('Mobius', o3d.geometry.TriangleMesh.create_mobius(twists=1))
check_properties("non-manifold edge", o3dtut.get_non_manifold_edge_mesh())
check_properties("non-manifold vertex", o3dtut.get_non_manifold_vertex_mesh())
check_properties("open box", o3dtut.get_open_box_mesh())
check_properties("intersecting_boxes", o3dtut.get_intersecting_boxes_mesh())
# ## Mesh filtering
# Open3D contains a number of methods to filter meshes. In the following we show the implemented filters to smooth noisy triangle meshes.
# ### Average filter
# The simplest filter is the average filter. A given vertex $v_i$ is given by the average of the adjacent vertices $\mathcal{N}$
#
# \begin{equation}
# v_i = \frac{v_i + \sum_{n \in \mathcal{N}} v_n}{|N| + 1} \,.
# \end{equation}
#
# This filter can be used to denoise meshes as demonstrated in the code below. The parameter `number_of_iterations` in the function `filter_smooth_simple` defines the how often the filter is applied to the mesh.
# +
print('create noisy mesh')
knot_mesh = o3d.data.KnotMesh()
mesh_in = o3d.io.read_triangle_mesh(knot_mesh.path)
vertices = np.asarray(mesh_in.vertices)
noise = 5
vertices += np.random.uniform(0, noise, size=vertices.shape)
mesh_in.vertices = o3d.utility.Vector3dVector(vertices)
mesh_in.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_in])
print('filter with average with 1 iteration')
mesh_out = mesh_in.filter_smooth_simple(number_of_iterations=1)
mesh_out.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_out])
print('filter with average with 5 iterations')
mesh_out = mesh_in.filter_smooth_simple(number_of_iterations=5)
mesh_out.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_out])
# -
# ### Laplacian
# Another important mesh filter is the Laplacian defined as
#
# \begin{equation}
# v_i = v_i \cdot \lambda \sum_{n \in N} w_n v_n - v_i \,,
# \end{equation}
#
# where $\lambda$ is the strength of the filter and $w_n$ are normalized weights that relate to the distance of the neighboring vertices. The filter is implemented in `filter_smooth_laplacian` and has the parameters `number_of_iterations` and `lambda`.
# +
print('filter with Laplacian with 10 iterations')
mesh_out = mesh_in.filter_smooth_laplacian(number_of_iterations=10)
mesh_out.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_out])
print('filter with Laplacian with 50 iterations')
mesh_out = mesh_in.filter_smooth_laplacian(number_of_iterations=50)
mesh_out.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_out])
# -
# ### Taubin filter
# The problem with the average and Laplacian filter is that they lead to a shrinkage of the triangle mesh. [\[Taubin1995\]](../reference.html#Taubin1995) showed that the application of two Laplacian filters with different $\lambda$ parameters can prevent the mesh shrinkage. The filter is implemented in `filter_smooth_taubin`.
# +
print('filter with Taubin with 10 iterations')
mesh_out = mesh_in.filter_smooth_taubin(number_of_iterations=10)
mesh_out.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_out])
print('filter with Taubin with 100 iterations')
mesh_out = mesh_in.filter_smooth_taubin(number_of_iterations=100)
mesh_out.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh_out])
# -
# ## Sampling
# Open3D includes functions to sample point clouds from a triangle mesh. The simplest method is `sample_points_uniformly` that uniformly samples points from the 3D surface based on the triangle area. The parameter `number_of_points` defines how many points are sampled from the triangle surface.
mesh = o3d.geometry.TriangleMesh.create_sphere()
mesh.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh])
pcd = mesh.sample_points_uniformly(number_of_points=500)
o3d.visualization.draw_geometries([pcd])
# +
bunny = o3d.data.BunnyMesh()
mesh = o3d.io.read_triangle_mesh(bunny.path)
mesh.compute_vertex_normals()
o3d.visualization.draw_geometries([mesh])
pcd = mesh.sample_points_uniformly(number_of_points=500)
o3d.visualization.draw_geometries([pcd])
# -
# Uniform sampling can yield clusters of points on the surface, while a method called Poisson disk sampling can evenly distribute the points on the surface. The method `sample_points_poisson_disk` implements sample elimination. It starts with a sampled point cloud and removes points to satisfy the sampling criterion. The method supports two options to provide the initial point cloud:
#
# 1. Default via the parameter `init_factor`: The method first samples uniformly a point cloud from the mesh with `init_factor` x `number_of_points` and uses this for the elimination.
# 2. One can provide a point cloud and pass it to the `sample_points_poisson_disk` method. Then, this point cloud is used for elimination.
# +
mesh = o3d.geometry.TriangleMesh.create_sphere()
pcd = mesh.sample_points_poisson_disk(number_of_points=500, init_factor=5)
o3d.visualization.draw_geometries([pcd])
pcd = mesh.sample_points_uniformly(number_of_points=2500)
pcd = mesh.sample_points_poisson_disk(number_of_points=500, pcl=pcd)
o3d.visualization.draw_geometries([pcd])
# +
bunny = o3d.data.BunnyMesh()
mesh = o3d.io.read_triangle_mesh(bunny.path)
mesh.compute_vertex_normals()
pcd = mesh.sample_points_poisson_disk(number_of_points=500, init_factor=5)
o3d.visualization.draw_geometries([pcd])
pcd = mesh.sample_points_uniformly(number_of_points=2500)
pcd = mesh.sample_points_poisson_disk(number_of_points=500, pcl=pcd)
o3d.visualization.draw_geometries([pcd])
# -
# ## Mesh subdivision
# In mesh subdivision we divide each triangle into a number of smaller triangles. In the simplest case, we compute the midpoint of each side per triangle and divide the triangle into four smaller triangles. This is implemented in the `subdivide_midpoint` function. The 3D surface and area stays the same, but the number of vertices and triangles increases. The parameter `number_of_iterations` defines how many times this process should be repeated.
mesh = o3d.geometry.TriangleMesh.create_box()
mesh.compute_vertex_normals()
print(
f'The mesh has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)
mesh = mesh.subdivide_midpoint(number_of_iterations=1)
print(
f'After subdivision it has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)
# Open3D implements an additional subdivision method based on [\[Loop1987\]](../reference.html#Loop1987). The method is based on a quartic box spline, which generates $C^2$ continuous limit surfaces everywhere except at extraordinary vertices where they are $C^1$ continuous. This leads to smoother corners.
mesh = o3d.geometry.TriangleMesh.create_sphere()
mesh.compute_vertex_normals()
print(
f'The mesh has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)
mesh = mesh.subdivide_loop(number_of_iterations=2)
print(
f'After subdivision it has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)
knot_mesh = o3d.data.KnotMesh()
mesh = o3d.io.read_triangle_mesh(knot_mesh.path)
mesh.compute_vertex_normals()
print(
f'The mesh has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)
mesh = mesh.subdivide_loop(number_of_iterations=1)
print(
f'After subdivision it has {len(mesh.vertices)} vertices and {len(mesh.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh], zoom=0.8, mesh_show_wireframe=True)
# ## Mesh simplification
# Sometimes we want to represent a high-resolution mesh with fewer triangles and vertices, but the low-resolution mesh should still be close to the high-resolution mesh. For this purpose Open3D implements a number of mesh simplification methods.
# ### Vertex clustering
# The vertex clustering method pools all vertices that fall into a voxel of a given size to a single vertex. The method is implemented in `simplify_vertex_clustering` and has as parameters `voxel_size` that defines the size of the voxel grid and `contraction` that defines how the vertices are pooled. `o3d.geometry.SimplificationContraction.Average` computes a simple average.
# +
bunny = o3d.data.BunnyMesh()
mesh = o3d.io.read_triangle_mesh(bunny.path)
mesh.compute_vertex_normals()
print(
f'Input mesh has {len(mesh_in.vertices)} vertices and {len(mesh_in.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh_in])
voxel_size = max(mesh_in.get_max_bound() - mesh_in.get_min_bound()) / 32
print(f'voxel_size = {voxel_size:e}')
mesh_smp = mesh_in.simplify_vertex_clustering(
voxel_size=voxel_size,
contraction=o3d.geometry.SimplificationContraction.Average)
print(
f'Simplified mesh has {len(mesh_smp.vertices)} vertices and {len(mesh_smp.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh_smp])
voxel_size = max(mesh_in.get_max_bound() - mesh_in.get_min_bound()) / 16
print(f'voxel_size = {voxel_size:e}')
mesh_smp = mesh_in.simplify_vertex_clustering(
voxel_size=voxel_size,
contraction=o3d.geometry.SimplificationContraction.Average)
print(
f'Simplified mesh has {len(mesh_smp.vertices)} vertices and {len(mesh_smp.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh_smp])
# -
# ### Mesh decimation
# Another category of mesh simplification methods is mesh decimation that operates in incremental steps. We select a single triangle that minimizes an error metric and removes it. This is repeated until a required number of triangles is achieved. Open3D implements `simplify_quadric_decimation` that minimizes error quadrics (distances to neighboring planes). The parameter `target_number_of_triangles` defines the stopping critera of the decimation algorithm.
# +
mesh_smp = mesh_in.simplify_quadric_decimation(target_number_of_triangles=6500)
print(
f'Simplified mesh has {len(mesh_smp.vertices)} vertices and {len(mesh_smp.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh_smp])
mesh_smp = mesh_in.simplify_quadric_decimation(target_number_of_triangles=1700)
print(
f'Simplified mesh has {len(mesh_smp.vertices)} vertices and {len(mesh_smp.triangles)} triangles'
)
o3d.visualization.draw_geometries([mesh_smp])
# -
# ## Connected components
# The result of various reconstruction methods. Open3D implements a connected components algorithm `cluster_connected_triangles` that assigns each triangle to a cluster of connected triangles. It returns for each triangle the index of the cluster in `triangle_clusters`, and per cluster the number of triangles in `cluster_n_triangles` and the surface area of the cluster in `cluster_area`.
#
# This is useful in for instance [RGBD Integration](../pipelines/rgbd_integration.ipynb), which is not always a single triangle mesh, but a number of meshes. Some of the smaller parts are due to noise and we most likely want to remove them.
#
# The code below shows the application of `cluster_connected_triangles` and how it can be used to remove spurious triangles.
# +
print("Generate data")
bunny = o3d.data.BunnyMesh()
mesh = o3d.io.read_triangle_mesh(bunny.path)
mesh.compute_vertex_normals()
mesh = mesh.subdivide_midpoint(number_of_iterations=2)
vert = np.asarray(mesh.vertices)
min_vert, max_vert = vert.min(axis=0), vert.max(axis=0)
for _ in range(30):
cube = o3d.geometry.TriangleMesh.create_box()
cube.scale(0.005, center=cube.get_center())
cube.translate(
(
np.random.uniform(min_vert[0], max_vert[0]),
np.random.uniform(min_vert[1], max_vert[1]),
np.random.uniform(min_vert[2], max_vert[2]),
),
relative=False,
)
mesh += cube
mesh.compute_vertex_normals()
print("Show input mesh")
o3d.visualization.draw_geometries([mesh])
# -
print("Cluster connected triangles")
with o3d.utility.VerbosityContextManager(
o3d.utility.VerbosityLevel.Debug) as cm:
triangle_clusters, cluster_n_triangles, cluster_area = (
mesh.cluster_connected_triangles())
triangle_clusters = np.asarray(triangle_clusters)
cluster_n_triangles = np.asarray(cluster_n_triangles)
cluster_area = np.asarray(cluster_area)
print("Show mesh with small clusters removed")
mesh_0 = copy.deepcopy(mesh)
triangles_to_remove = cluster_n_triangles[triangle_clusters] < 100
mesh_0.remove_triangles_by_mask(triangles_to_remove)
o3d.visualization.draw_geometries([mesh_0])
print("Show largest cluster")
mesh_1 = copy.deepcopy(mesh)
largest_cluster_idx = cluster_n_triangles.argmax()
triangles_to_remove = triangle_clusters != largest_cluster_idx
mesh_1.remove_triangles_by_mask(triangles_to_remove)
o3d.visualization.draw_geometries([mesh_1])
| docs/jupyter/geometry/mesh.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../../../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
# # _*Quantum algorithm for linear system of equations*_
#
# The latest version of this notebook is available on https://github.com/qiskit/qiskit-tutorial.
#
# ***
# ### Contributors
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> and <NAME>$^{1}$
#
# 1. Institute of Fundamental and Frontier Sciences, University of Electronic Science and Technology of China,Chengdu, China,610051
# ***
# ## Introduction
# Solving linear equations is a very common problem in the fields of numerical optimization and machine learning. With the rapid expansion of data sets, solving linear equations becomes more and more difficult for classical computer. However, quantum computers can be used for solving linear systems of algebraic equations with exponential speed up compared with classical computers. Therefore, the use of quantum computers to solve the system of linear equations can greatly reduce the computational complexity and time complexity.
#
# #### Experiment design
# <p>We have a Hermitian $N \times N$ matrix $A$, and a unit vector $\vec b$, suppose we would like to find $x$ satisfying $A\vec x = \vec b$.[[1]](#cite_1) <br />(1)Represent $b$ as a quantum state $|b\rangle = \sum_{i=1}^{N} b_i |i\rangle$.<br />(2) Apply the conditional Hamiltonian evolution $e^{iAt}$ to $|b\rangle$ for a superposition of different times $t$. With the phase estimation algorithm, we can decompose $|b\rangle$ in the eigenbasis of $A$ and to find the corresponding eigenvalues $\lambda_j$. After this stage, the state of the system is close to $\sum_{j=1}^{N} \beta_j |u_j\rangle |\lambda_j\rangle$, where $u_j$ is the eigenvector basis of $A$, and $|b\rangle = \sum_{j=1}^{N}\beta_j |u_j\rangle$. <br />(3)Uncompute the $|\lambda_j\rangle$ register and we get a state which is propotional to $\sum_{j=1}^{N} \beta_j \lambda_{j}^{-1}|u_j\rangle = A^{-1}|b\rangle = |x\rangle$.</p>
# The schematic diagram of quantum K-Means is the following picture.[[2]](#cite_2) And to make our algorithm can be run using qiskit, we design a more detailed circuit to achieve our algorithm in the next section.
# <img src="../images/hhl_1.jpg" width="400 px">
# #### Quantum K-Means circuit
# <img src="../images/hhl_2.png">
# #### Parameter declaration
# Here we set the parameters as follwing, the aim is to ensure the precision.[[3]](#cite_3) <br />
# $r = 4$, $t_0=2\pi$, $S = \begin{pmatrix}1 & 0 \\ 0 & i\end{pmatrix}$, $H = \frac{1}{\sqrt{2}}\begin{pmatrix}1 & 1 \\ 1 & -1\end{pmatrix}$,
# $R_y(\theta) = \begin{pmatrix}cos(\theta/2) & -sin(\theta/2) \\ sin(\theta/2) & cos(\theta/2)\end{pmatrix}$
#
# ## Data declaration
# The aim of this algorithm is to solve the linear system of equations. As a demo, we take a linear equation as $A \vec x = \vec b$. In this equition, for example, we define the matrix $A$ as $A = \begin{pmatrix}2 & -1 \\ -1 & 2\end{pmatrix}$ and $\vec b$ as $\vec b=\begin{pmatrix}0 \\ 1\end{pmatrix}$. After making the $A$ and $\vec b$ as input, we implement the algorithm to obtain the result vector $\vec x$.
# ## Quantum algorithm for linear system of equations program
# +
# import math lib
from math import pi
# import Qiskit
from qiskit import Aer, execute
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
# import basic plot tools
from qiskit.tools.visualization import plot_histogram
# -
# To use local qasm simulator
backend = Aer.get_backend('qasm_simulator')
# In this section, we first judge the version of Python and import the packages of qiskit, math to implement the following code. We show our algorithm on the ibm_qasm_simulator, if you need to run it on the real quantum conputer, please remove the "#" in frint of "import Qconfig" and the set_api() function.
# +
# create Quantum Register called "qr" with 4 qubits
qr = QuantumRegister(4, name="qr")
# create Quantum Register called "cr" with 4 qubits
cr = ClassicalRegister(4, name="cr")
# Creating Quantum Circuit called "qc" involving your Quantum Register "qr"
# and your Classical Register "cr"
qc = QuantumCircuit(qr, cr, name="solve_linear_sys")
# Initialize times that we get the result vector
n0 = 0
n1 = 0
for i in range(10):
#Set the input|b> state"
qc.x(qr[2])
#Set the phase estimation circuit
qc.h(qr[0])
qc.h(qr[1])
qc.u1(pi, qr[0])
qc.u1(pi/2, qr[1])
qc.cx(qr[1], qr[2])
#The quantum inverse Fourier transform
qc.h(qr[0])
qc.cu1(-pi/2, qr[0], qr[1])
qc.h(qr[1])
#R(lamda^-1) Rotation
qc.x(qr[1])
qc.cu3(pi/16, 0, 0, qr[0], qr[3])
qc.cu3(pi/8, 0, 0, qr[1], qr[3])
#Uncomputation
qc.x(qr[1])
qc.h(qr[1])
qc.cu1(pi/2, qr[0], qr[1])
qc.h(qr[0])
qc.cx(qr[1], qr[2])
qc.u1(-pi/2, qr[1])
qc.u1(-pi, qr[0])
qc.h(qr[1])
qc.h(qr[0])
# To measure the whole quantum register
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
qc.measure(qr[2], cr[2])
qc.measure(qr[3], cr[3])
job = execute(qc, backend=backend, shots=8192,)
result = job.result()
# Get the sum og all results
n0 = n0 + result.get_data("solve_linear_sys")['counts']['1000']
n1 = n1 + result.get_data("solve_linear_sys")['counts']['1100']
# print the result
print(result)
# print(result.get_data(qc))
plot_histogram(result.get_counts())
# Reset the circuit
qc.reset(qr)
# calculate the scale of the elements in result vectot and print it.
p = n0/n1
print(n0)
print(n1)
print(p)
# -
# ## Result analysis
# According to the result, we can obtain the scale of two elements in $\vec x$. Via features of linear system, we can get
# $A\vec x=A\begin{pmatrix}a_1 \\ ka_2\end{pmatrix}=\vec b=\begin{pmatrix}0 \\ 1\end{pmatrix}$. So we can get the answer, $\vec x=\begin{pmatrix}0.32665 \\0.67335\end{pmatrix} $.<br \> For more examples, we test different value of $\vec b$ and get the answer about $\vec x $ as following table: <br \>
# <img src="../images/hhl_3.png" width="500 px">
# And the statistic result is showed as following:
# <img src="../images/hhl_4.png" width="500 px">
# ## Reference
# <cite>[1]Quantum Algorithm for Linear Systems of Equations(see [<NAME>, <NAME>, and <NAME>
# Phys. Rev. Lett. 103, 150502](https://journals.aps.org/prl/abstract/10.1103/PhysRevLett.103.150502))</cite><a id='cite_1'></a><br \>
# <cite>[2]Quantum circuit design for solving linear systems of equations(see [<NAME>,<NAME>,<NAME> & <NAME>](https://www.tandfonline.com/doi/abs/10.1080/00268976.2012.668289))</cite><a id='cite_2'></a><br \>
# <cite>[3]Experimental realization of quantum algorithm for solving linear systems of equations(see [<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and Jiangfeng Du Phys. Rev. A 89, 022313](https://journals.aps.org/pra/abstract/10.1103/PhysRevA.89.022313))</cite><a id='cite_3'></a>
| community/awards/teach_me_qiskit_2018/quantum_machine_learning/2_HHL/Quantum Algorithm for Linear System of Equations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import tensorflow as tf
import re
import numpy as np
from sklearn.utils import shuffle
from utils import *
import time
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from unidecode import unidecode
from tqdm import tqdm
import pickle
df = pd.read_csv('sentiment-news-bahasa-v5.csv')
Y = LabelEncoder().fit_transform(df.label)
df.head()
def textcleaning(string):
string = re.sub('http\S+|www.\S+', '',' '.join([i for i in string.split() if i.find('#')<0 and i.find('@')<0]))
string = unidecode(string).replace('.', '. ').replace(',', ', ')
string = re.sub('[^\'\"A-Za-z\- ]+', ' ', string)
return ' '.join([i for i in re.findall("[\\w']+|[;:\-\(\)&.,!?\"]", string) if len(i)>1]).lower()
for i in range(df.shape[0]):
df.iloc[i,1] = textcleaning(df.iloc[i,1])
# +
with open('polarity-negative-translated.txt','r') as fopen:
texts = fopen.read().split('\n')
labels = [0] * len(texts)
with open('polarity-positive-translated.txt','r') as fopen:
positive_texts = fopen.read().split('\n')
labels += [1] * len(positive_texts)
texts += positive_texts
texts += df.iloc[:,1].tolist()
labels += Y.tolist()
assert len(labels) == len(texts)
# -
concat = ' '.join(texts).split()
vocabulary_size = len(list(set(concat)))
data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)
print('vocab from size: %d'%(vocabulary_size))
print('Most common words', count[4:10])
print('Sample data', data[:10], [rev_dictionary[i] for i in data[:10]])
def str_idx(corpus, dic, maxlen, UNK=3):
X = np.zeros((len(corpus),maxlen))
for i in range(len(corpus)):
for no, k in enumerate(corpus[i].split()[:maxlen][::-1]):
try:
X[i,-1 - no]=dic[k]
except Exception as e:
X[i,-1 - no]=UNK
return X
class Model:
def __init__(self, size_layer, num_layers, dimension_output, learning_rate, dropout,
dict_size):
def cells(size, reuse=False):
return tf.contrib.rnn.DropoutWrapper(
tf.nn.rnn_cell.LSTMCell(
size,
initializer=tf.orthogonal_initializer(),
reuse=reuse),
state_keep_prob=dropout,
output_keep_prob=dropout)
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None])
encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, size_layer], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(
num_units = size_layer,
memory = encoder_embedded)
rnn_cells = tf.contrib.seq2seq.AttentionWrapper(
cell = tf.nn.rnn_cell.MultiRNNCell([cells(size_layer) for _ in range(num_layers)]),
attention_mechanism = attention_mechanism,
attention_layer_size = size_layer,
alignment_history=True)
outputs, last_state = tf.nn.dynamic_rnn(rnn_cells, encoder_embedded, dtype = tf.float32)
self.alignments = tf.transpose(last_state.alignment_history.stack(),[1,2,0])
W = tf.get_variable('w',shape=(size_layer, dimension_output),
initializer=tf.glorot_uniform_initializer())
b = tf.get_variable('b',shape=(dimension_output),
initializer=tf.zeros_initializer())
self.logits = tf.add(tf.matmul(outputs[:,-1], W),b,name='logits')
self.cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = self.logits, labels = self.Y))
self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost)
correct_pred = tf.equal(tf.argmax(self.logits, 1,output_type=tf.int32), self.Y)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
self.attention = tf.nn.softmax(tf.reduce_sum(self.alignments[0],1),name='alphas')
# +
size_layer = 256
num_layers = 2
dropout = 0.8
dimension_output = 2
learning_rate = 1e-4
batch_size = 32
maxlen = 100
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(size_layer,num_layers,dimension_output,learning_rate,dropout,len(dictionary))
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
saver.save(sess, "bahdanau/model.ckpt")
# -
vectors = str_idx(texts, dictionary, maxlen)
train_X, test_X, train_Y, test_Y = train_test_split(vectors,
labels,
test_size = 0.2)
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 5, 0, 0, 0
while True:
lasttime = time.time()
if CURRENT_CHECKPOINT == EARLY_STOPPING:
print('break epoch:%d\n'%(EPOCH))
break
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
pbar = tqdm(range(0, len(train_X), batch_size), desc='train minibatch loop')
for i in pbar:
batch_x = train_X[i:min(i+batch_size,train_X.shape[0])]
batch_y = train_Y[i:min(i+batch_size,train_X.shape[0])]
acc, loss, _ = sess.run([model.accuracy, model.cost, model.optimizer],
feed_dict = {model.X : batch_x, model.Y : batch_y})
assert not np.isnan(loss)
train_loss += loss
train_acc += acc
pbar.set_postfix(cost=loss, accuracy = acc)
pbar = tqdm(range(0, len(test_X), batch_size), desc='test minibatch loop')
for i in pbar:
batch_x = test_X[i:min(i+batch_size,test_X.shape[0])]
batch_y = test_Y[i:min(i+batch_size,test_X.shape[0])]
acc, loss = sess.run([model.accuracy, model.cost],
feed_dict = {model.X : batch_x, model.Y : batch_y})
test_loss += loss
test_acc += acc
pbar.set_postfix(cost=loss, accuracy = acc)
train_loss /= (len(train_X) / batch_size)
train_acc /= (len(train_X) / batch_size)
test_loss /= (len(test_X) / batch_size)
test_acc /= (len(test_X) / batch_size)
if test_acc > CURRENT_ACC:
print('epoch: %d, pass acc: %f, current acc: %f'%(EPOCH,CURRENT_ACC, test_acc))
CURRENT_ACC = test_acc
CURRENT_CHECKPOINT = 0
else:
CURRENT_CHECKPOINT += 1
print('time taken:', time.time()-lasttime)
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'%(EPOCH,train_loss,
train_acc,test_loss,
test_acc))
EPOCH += 1
saver.save(sess, "bahdanau/model.ckpt")
# +
real_Y, predict_Y = [], []
pbar = tqdm(range(0, len(test_X), batch_size), desc='validation minibatch loop')
for i in pbar:
batch_x = test_X[i:min(i+batch_size,test_X.shape[0])]
batch_y = test_Y[i:min(i+batch_size,test_X.shape[0])]
predict_Y += np.argmax(sess.run(model.logits, feed_dict = {model.X : batch_x, model.Y : batch_y}),1).tolist()
real_Y += batch_y
# -
from sklearn import metrics
print(metrics.classification_report(real_Y, predict_Y, target_names = ['negative','positive']))
strings=','.join([n.name for n in tf.get_default_graph().as_graph_def().node if "Variable" in n.op or n.name.find('Placeholder') >= 0 or n.name.find('logits') == 0 or n.name.find('alphas') == 0])
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
"directory: %s" % model_dir)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = "/".join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + "/frozen_model.pb"
clear_devices = True
with tf.Session(graph=tf.Graph()) as sess:
saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=clear_devices)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(",")
)
with tf.gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
print("%d ops in the final graph." % len(output_graph_def.node))
freeze_graph("bahdanau", strings)
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
g=load_graph('bahdanau/frozen_model.pb')
x = g.get_tensor_by_name('import/Placeholder:0')
logits = g.get_tensor_by_name('import/logits:0')
alphas = g.get_tensor_by_name('import/alphas:0')
test_sess = tf.InteractiveSession(graph=g)
test_sess.run([logits,alphas], feed_dict={x:vectors[:1]})[1].shape
text = 'kerajaan sebenarnya sangat bencikan rakyatnya, minyak naik dan segalanya'
new_vector = str_idx([text],dictionary,len(text.split()))
test_sess.run([tf.nn.softmax(logits),alphas], feed_dict={x:new_vector})
text = 'saya sangat sayangkan kerajaan saya'
new_vector = str_idx([text],dictionary,len(text.split()))
test_sess.run([tf.nn.softmax(logits),alphas], feed_dict={x:new_vector})
text = 'bodoh lah awak ni'
new_vector = str_idx([text],dictionary,len(text.split()))
test_sess.run([tf.nn.softmax(logits),alphas], feed_dict={x:new_vector})
text = 'kerajaan sebenarnya sangat baik'
new_vector = str_idx([text],dictionary,len(text.split()))
test_sess.run([tf.nn.softmax(logits),alphas], feed_dict={x:new_vector})
import json
with open('bahdanau-sentiment.json','w') as fopen:
fopen.write(json.dumps({'dictionary':dictionary,'reverse_dictionary':rev_dictionary}))
| session/deprecated/sentiment/bahdanau-split.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: causality
# language: python
# name: causality
# ---
# # Introduction
#
# Following up on $d$-separation, my colleagues and I chatted about how to find the confounding set of variables in a causal graph. This is another graph search problem. Let's see how this can be applied.
# +
from custom import rule1, rule2, rule3, path_nodes
import networkx as nx
import matplotlib.pyplot as plt
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# -
# From Judea Pearl's book, there is a diagram in chapter 4, `Figure 4.7`. Let's reproduce it here.
G = nx.DiGraph()
edges = [
('D', 'A'), ('D', 'C'), ('F', 'C'),
('A', 'B'), ('C', 'B'), ('C', 'Y'),
('F', 'X'), ('F', 'Y'), ('C', 'E'),
('A', 'X'), ('E', 'X'), ('E', 'Y'),
('B', 'X'), ('X', 'Y'), ('G', 'X'),
('G', 'Y')
]
G.add_edges_from(edges)
pos = {
'D': (0, 0),
'A': (1, 0.5),
'C': (1, -1),
'F': (1, -2),
'B': (2, -0.3),
'E': (2, 1),
'X': (4, 0.5),
'G': (4.5, -2),
'Y': (5, 0.5)
}
nx.draw(G, pos=pos, with_labels=True)
# To reveal the answer, the minimum confounding set is $\{A, B, E, F, G\}$.
# What we would like to know is what is the set of confounders that we need to control for in order to correctly estimate the effect of $X$ on $Y$.
#
# To do this, we use the following logic:
#
# 1. Find all undirected paths between $X$ and $Y$.
# 1. Traverse each node in the undirected paths.
# 1. Check to see if, in the directed graph, the node blocks the path between $X$ and $Y$ if it were in the conditioning set.
# 1. If yes, then it should be included as a confounder. Break out and continue on to next path.
# 1. If no, it should not be included as a confounder.
# +
Gpath = G.to_undirected()
confounders = set()
n1 = 'X'
n2 = 'Y'
for i, path in enumerate(nx.all_simple_paths(Gpath, n1, n2)):
for n in path:
if n is not n1 and n is not n2:
pass1 = rule1(n, [n], G, path)
pass2 = rule2(n, [n], G, path)
pass3 = rule3(n, [], G, path)
if pass1 or pass2 or pass3:
confounders.add(n)
# We break, because as soon as we find a good
# blocking node, there is no need to continue
# looking at other nodes.
break
confounders
# -
# We did it!
| notebooks/04-finding-confounding-set.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="tTMGgF43-aXL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"status": "ok", "timestamp": 1592577148100, "user_tz": -420, "elapsed": 2732, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12268042113551763489"}} outputId="0c721422-6f62-4d5c-c206-49b5183d03aa"
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
#buat data jumlah kamar
bedrooms = np.array([1,1,2,2,3,4,4,5,5,5])
#data harga rummah. asumsi dalam dollar
house_price = np.array([15000, 18000, 27000, 34000, 50000, 68000, 65000, 81000,85000, 90000])
# menampilkan scatter plot dari dataset
# %matplotlib inline
bedrooms = bedrooms.reshape(-1, 1)
linreg = LinearRegression()
linreg.fit(bedrooms, house_price)
plt.scatter(bedrooms, house_price)
plt.plot(bedrooms, linreg.predict(bedrooms))
| LinearRegression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dl
# language: python
# name: dl
# ---
# +
import numpy as np
import random
import cv2
import matplotlib.pyplot as plt
from sklearn.metrics import precision_score, recall_score
from sklearn.metrics import jaccard_score
import staple
# +
def show(src, titles=[], suptitle="",
bwidth=4, bheight=4, save_file=False,
show_axis=True, show_cbar=False):
num_cols = len(src)
plt.figure(figsize=(bwidth * num_cols, bheight))
plt.suptitle(suptitle)
for idx in range(num_cols):
plt.subplot(1, num_cols, idx+1)
if not show_axis: plt.axis("off")
if idx < len(titles): plt.title(titles[idx])
plt.imshow(src[idx]*1)
if type(show_cbar) is bool:
if show_cbar: plt.colorbar()
elif idx < len(show_cbar) and show_cbar[idx]:
plt.colorbar()
plt.tight_layout()
if save_file:
plt.savefig(save_file)
def show2(src, titles=[], suptitle="",
bwidth=4, bheight=4, save_file=False,
show_axis=True, show_cbar=False):
num_cols = len(src)
plt.figure(figsize=(bwidth * num_cols//2, bheight*2))
plt.suptitle(suptitle)
for idx in range(num_cols):
plt.subplot(2, num_cols//2, idx+1)
if not show_axis: plt.axis("off")
if idx < len(titles): plt.title(titles[idx])
plt.imshow(src[idx]*1)
if type(show_cbar) is bool:
if show_cbar: plt.colorbar()
elif idx < len(show_cbar) and show_cbar[idx]:
plt.colorbar()
plt.tight_layout()
if save_file:
plt.savefig(save_file)
# -
# # Data
R = 20 #num of segs
l = 40
N = l*l #num of image pixels
gt = np.zeros((l, l))
gt = cv2.circle(gt, (l//2, l//2), 10, (1), 3)
plt.imshow(gt)
plt.title("GT");
np.random.seed(2021)
p = 0.05
segs = []
metrics = []
for idx in range(R):
p += (idx/500)
noise_type = idx % 2
seg = gt.copy()
if noise_type == 0:
mask = np.random.rand(l,l)
if noise_type == 1:
mask = np.random.normal(loc=127, scale=100, size=(l, l)).astype(np.uint8)/255
if idx > 3:
idr = np.random.randint(0, 10)
if idr == 0:
mask[:l//2+int(p*50), :l//2+int(p*50)] = 1
if idr == 1:
mask[l//2-int(p*50):, :l//2] = 1
if idr == 2:
mask[:l//2+int(p*50), l//2:] = 1
if idr == 3:
mask[l//2-int(p*50):, l//2:] = 1
salt = mask < p
peper = mask > 1-(2*p)+(p**2)
seg[salt] = 1
seg[peper] = 0
segs.append(seg)
recall = recall_score(gt.ravel(), seg.ravel())
prec = precision_score(gt.ravel(), seg.ravel())
jaccard = jaccard_score(gt.ravel(), seg.ravel())
metrics.append([recall, prec, jaccard])
segs = np.array(segs)
metrics = np.array(metrics)
show2(segs, show_axis=False)
plt.figure(figsize=(30, 20))
plt.imshow(segs.reshape(segs.shape[0], -1)[:, 200:600])#.shape
plt.xlabel("Pixel ID")
plt.ylabel("Segmentation ID")
plt.title("D Matrix")
# # STAPLE
## Note: metrics[0] recall, prec, jaccard]
T = gt.reshape(-1)
D = D = segs.reshape(segs.shape[0], -1).copy()
p = metrics[:, 0]
q = metrics[:, 1]
MV = (segs.sum(0)>= segs.shape[0]//2)
# +
staple_res = staple.STAPLE(list(segs)).run()
show([gt, MV, staple_res, staple_res>0.1, staple_res>0.3, staple_res>0.5, staple_res>0.8],
titles=["GT", "MV", "STAPLE Raw", "STAMPLE 0.1", "STAMPLE 0.3", "STAMPLE 0.5", "STAMPLE 0.8"])
# -
# # Non Weight Simple
# +
new_segs = segs.copy()
results, n_segs = [], []
alfa = 1
mdices = []
for epoch in range(6):
MV = (new_segs.sum(0)>= new_segs.shape[0]//2)
results.append(MV)
n_segs.append(len(new_segs))
seg_score = []
for seg in new_segs:
jac = jaccard_score(MV.ravel(), seg.ravel())
dice = (2*jac)/ (1+jac)
seg_score.append([dice, seg])
seg_score = np.array(seg_score, dtype=object)
#sorted_seg = np.array(sorted(seg_score, key=lambda x:x[0], reverse=True))
wdice = seg_score[:, 0].sum()
mdice = seg_score[:, 0].mean()
sdice = seg_score[:, 0].std()
t = mdice - (alfa*sdice)
new_segs = np.array((list(filter(lambda x: x[0]> t, seg_score))))[:, 1]
show([gt] + results, show_axis=False,titles=['GT'] + [f"Iter {idx}, N {n}" for idx, n in enumerate(n_segs)])
# -
# # SIMPLE
# +
new_segs = segs.copy()
results, n_segs = [], []
alfa = 0.2
MV = (new_segs.sum(0)>= new_segs.shape[0]//2)
new_weights = np.ones(len(new_segs))
mdices = []
for epoch in range(6):
MV = (new_segs.transpose(1,2,0) * new_weights).sum(2) >= (new_weights.sum()/2)
results.append(MV)
n_segs.append(len(new_segs))
dices = []
for seg in new_segs:
jac = jaccard_score(MV.ravel(), seg.ravel())
dice = (2*jac)/ (1+jac)
dices.append(dice)
dices = np.array(dices)
#sorted_seg = np.array(sorted(seg_score, key=lambda x:x[0], reverse=True))
wdice = dices.sum()
mdice = dices.mean()
sdice = dices.std()
mdices.append(mdice)
t = mdice - (alfa * sdice)
new_segs = new_segs[dices > t]
new_weights = dices[dices > t]
if len(new_segs)==1:
break
show([gt] + results, show_axis=False,
titles=['GT'] + [f"Iter {idx}, N {n}, Dice: {mdice:0.2f}" for idx, (n, mdice) in enumerate(zip(n_segs, mdices))])
| books/consensus.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
returned_date = list(map(int,input().split(' ')))
expected_date = list(map(int,input().split(' ')))
fine = 0
if returned_date[2] > expected_date[2]:
fine = 10000
elif returned_date[2] == expected_date[2]:
if returned_date[1] > expected_date[1]:
fine = (returned_date[1] - expected_date[1])*500
elif returned_date[1] == expected_date[1]:
if returned_date[0] > expected_date[0]:
fine = (returned_date[0] - expected_date[0])*15
print(fine)
# -
| hacker-rank/30 Days of Code/26. Nested Logic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from helper_news import *
import pandas as pd
r = search("individu", index = "all_news")
ddf, n, msc = get_top_news(r, category = "News")
# ddf, n, msc = get_top_news(r, category = "FakeNewsAlert")
# +
import datetime
from elasticsearch import Elasticsearch
INDEX_NAME = 'all_news'
ec_conn= Elasticsearch('http://localhost:9200')
ec_conn
query = {
"bool":{
"filter":[
{
"terms":{
"news_vendor":[
"sebenarnya",
]
}
},
]
}
}
x = ec_conn.search(index='all_news', body = {"query": query, "size" : 3000})['hits']['hits'] #[0]['_source'].keys()
# -
# +
# x[0]
# -
news_date = [j.get("_source").get("news_date") for j in x]
df = pd.DataFrame({"news_date": news_date}) #.groupby("news_date").count()
df["news_date"] = pd.to_datetime(df["news_date"])
df["news_date_2"] = df["news_date"] + pd.Timedelta('8 hours')
df["news_date"] = df["news_date_2"].dt.date
df = df.groupby(["news_date"]).count()
df.columns = ["news_count"]
df = df.reset_index()
import plotly.express as px
from plotly.offline import plot
# +
# # !conda install psutil -y
# -
fig = px.line(df, x="news_date", y="news_count", title='SEBENARNYA.MY news count over time')
# fig.write_image("fig1.png")
plot(fig)
| FFA_App/app/visualize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: stringbo
# language: python
# name: stringbo
# ---
# ### script to apply SSK BO over an space with context free grammar constraints
# ### we demonstrate on symbolic regression task
import numpy as np
from numpy import *
import emukit
import re
import matplotlib.pyplot as plt
from emukit.core.initial_designs import RandomDesign
from emukit.core import ParameterSpace
from emukit.core.optimization import RandomSearchAcquisitionOptimizer
from emukit.bayesian_optimization.loops import BayesianOptimizationLoop
from emukit.bayesian_optimization.acquisitions import ExpectedImprovement
from emukit.core.loop import FixedIterationsStoppingCondition
import warnings
warnings.filterwarnings('ignore')
# %load_ext autoreload
# %autoreload 2
#import our code
from stringbo.code.CFG.CFG import Grammar
from stringbo.code.parameters.cfg_parameter import CFGParameter
from stringbo.code.parameters.cfg_parameter import unparse
from stringbo.code.optimizers.GrammarGeneticAlgorithmAcquisitionOptimizer import GrammarGeneticProgrammingOptimizer
from stringbo.code.emukit_models.emukit_ssk_model import SSK_model
# # Define problem (objective and space)
# +
# define grammar rules
# define our arithmetic grammar
# note that we have to map ( -> lb and ) -> rb
# as the tree kernel reads these sybmols to seperate branches
# also require 'dummy' productions for arithmetic expressions
# as our parser in the kernel requires all terminal nodes to be connected to a single node
# e.g.
# S
# / | \
# S ADD T
# | | |
# T "a" "1"
# |
# "x"
# is the string "x + 1"
# and is represented as '(S (S (T x)) (ADD a) (T 1))'
grammar = Grammar.fromstring("""
S -> S ADD T | S TIMES T | S DIVIDE T | T
T -> LB S RB | SIN S RB | EXP S RB
ADD -> "+"
TIMES -> "*"
DIVIDE -> "/"
LB -> "lb"
RB -> "rb"
SIN -> "sin"
EXP -> "exp"
T -> "x" | "1" | "2" | "3"
""")
# +
# True expression
# This is the target expression that we wish to learn
true = '1/3+x+sin(x*x)'
x = np.linspace(-10,10,1000)
y = np.array(eval(true))
# Objective function
# we wish to find symbolic expressions X that are close in MSE error between X(x) and true y
#
# Following Grammar VAE paper we put in a hard limit on worse MSE
# and optimize log(1+MSE), as exponential terms in expressions can give
# large MSE, which are hard to model with a GP
def objective(X):
#X needs to come in as a 2d numpy array in raw form '3*x+2'
X=np.atleast_2d(X)
f_evals=np.zeros((X.shape[0],1))
for i in range(X.shape[0]):
# format correctly for numpy
string = X[i][0]
string = string.replace(" ","")
string = string.replace("lb","(")
string = string.replace("rb",")")
string = string.replace("exp","exp(")
string = string.replace("sin","sin(")
# hard limit of 1000
result = np.log(1+np.minimum(np.mean((np.array(eval(string)) - y)**2), 1000))
if np.isnan(result):
result = np.log(1+1000)
f_evals[i] = result
# return log(1+MSE) of each input X
return f_evals
# define search space (length refers to number of terminals in strings)
length=20
min_length=3
space = ParameterSpace([CFGParameter("grammar",grammar,max_length=length,min_length=min_length)])
# -
# # Collect initial points
# collect initial design (uniform sample)
np.random.seed(123)
random_design = RandomDesign(space)
initial_points_count = 15
X_init = random_design.get_samples(initial_points_count)
X_init_strings = unparse(X_init)
Y_init = objective(X_init_strings)
# # Explain Methods
# +
# we perform optimziation using our SSK-approach and random search
# VAE baselines are availible for Grammar VAEs and Character VAES at https://github.com/mkusner/grammarVAE
# -
# # 1) Perform BO with SSK
# build BO loop
# fit SSK model
# just a single restart when fitting kernel params for demo
model = SSK_model(space,X_init_strings,Y_init,max_subsequence_length=5,n_restarts=3)
# Load core elements for Bayesian optimization
expected_improvement = ExpectedImprovement(model)
# either use genetic algorithm or random search to optimize acqusition function
optimizer = GrammarGeneticProgrammingOptimizer(space,dynamic=True,population_size=100,tournament_prob=0.5,p_crossover= 0.8, p_mutation=0.1)
# optimizer = RandomSearchAcquisitionOptimizer(space,10000)
bayesopt_loop_SSK= BayesianOptimizationLoop(model = model,
space = space,
acquisition = expected_improvement,
acquisition_optimizer = optimizer)
# add loop summary
def summary(loop, loop_state):
print("Performing BO step {}".format(loop.loop_state.iteration))
bayesopt_loop_SSK.iteration_end_event.append(summary)
# run BO loop for 25 steps
np.random.seed(123)
stopping_condition = FixedIterationsStoppingCondition(i_max = 25)
bayesopt_loop_SSK.run_loop(objective, stopping_condition)
# # Perform random search
# also see performance of random search
#(starting from the initialization used by the other approaches)
np.random.seed(123)
Y_random=np.vstack([Y_init,objective(unparse(random_design.get_samples(25)))])
# # plot results
# plot results
# recall that first 15 points are a random sample shared by all the methods
plt.plot(np.minimum.accumulate(bayesopt_loop_SSK.loop_state.Y),label="SSk")
plt.plot(np.minimum.accumulate(Y_random),label="Random Search")
plt.yscale("log")
plt.ylabel('Current best score')
plt.xlabel('Iteration')
plt.legend()
| example_notebooks/.ipynb_checkpoints/Symbolic_Regression_Example-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rrsguim/PhD_Economics/blob/master/OutputGap_FNN_BR.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="dUeKVCYTbcyT"
# ------------------------------------------------------------------------------------------------------
# Copyright (c) 2020 <NAME>
#
# This work was done when I was at the University of California, Riverside, USA.
#
# It is part of my doctoral thesis in Economics at the Federal University of
#
# Rio Grande do Sul, Porto Alegre, Brazil.
#
#
# See full material at https://github.com/rrsguim/PhD_Economics
#
# The code below, under the Apache License, was inspired by
#
# *Classification on imbalanced data*, and
#
# *Introduction to the Keras Tuner*
#
# Copyright 2020 The TensorFlow Authors
#
# https://www.tensorflow.org/tutorials/structured_data/imbalanced_data
#
# https://www.tensorflow.org/tutorials/keras/keras_tuner
#
# -------------------------------------------------------------------------
# + [markdown] id="gJT7cOb44eDi"
# # Transfer Learning for Output Gap Estimation
# + [markdown] id="CJW6mqC85CRP"
# ##Setup
# + id="yJHVo_K_v20i"
from __future__ import absolute_import, division, print_function, unicode_literals
# + id="fYBlUQ5FvzxP"
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
# + id="43tKdRoNecNy" colab={"base_uri": "https://localhost:8080/"} outputId="db16b073-e50e-4f46-9852-3257c2e4a17d"
# !pip install -U keras-tuner
# + id="JM7hDSNClfoK"
import tensorflow as tf
from tensorflow import keras
import IPython
import kerastuner as kt
from kerastuner import RandomSearch
import os
import tempfile
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# + id="c8o1FHzD-_y_"
mpl.rcParams['figure.figsize'] = (12, 10)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# + [markdown] id="7JfLUlawto_D"
# ## Deep Learning | Brazil
# + [markdown] id="oG4spZ-K5MkL"
# ###Data loading and pre-processing
# + [markdown] id="Rv6BaNhL5eLQ"
# #### Download the data set
#
# #### Data
#
# There are two data sets for Brazil because of methodology changes in unemployment rate. Set 1 from 2001-IV to 2015-IV and Set 2 from 2012-II to 2020-III.
#
# >Column| Source| Description| Feature Type | Data Type
# >------------|--------------------|----------------------|-----------------|----------------
# >Unemployment | FRED-MD | UNRATE - Unemployment rate, percent, s.a., quarterly average | Numerical | float
# >Capacity | FRED-MD | TCU - Capacity utilization index, percent, s.a., quarterly average | Numerical | float
# >TFP | FRED-MD | Total Factor Productivity Level at Current Purchasing Power Parities for Brazil, Index USA = 1 | Numerical | float
# >Business cycle | CODACE | CODACE based Recession Indicator (1 = true; 0 = false) | Classification | integer
# >GAP | | Output Gap, percent | Numerical | float
# + id="pR_SnbMArXr7"
file = tf.keras.utils
#raw_data = pd.read_csv('https://raw.githubusercontent.com/rrsguim/PhD_Economics/master/TL4OG/TL4OG_BR_data_to2015.csv') #to 2015 (because of Unrate methodology)
raw_data = pd.read_csv('https://raw.githubusercontent.com/rrsguim/PhD_Economics/master/TL4OG/TL4OG_BR_data_since2012.csv') #since 2012 (because of Unrate methodology)
# + id="rGVtGyAas2Hz"
raw_data.index = raw_data['DATE']
drop_DATE = raw_data.pop('DATE')
raw_data.index = pd.to_datetime(raw_data.index,infer_datetime_format=True)
raw_data.index = raw_data.index.to_period("Q")
# + [markdown] id="XD4o60p47wlD"
# Observe last lines of the adjusted dataset.
# + id="OqJOIP2AY5RJ" colab={"base_uri": "https://localhost:8080/", "height": 235} outputId="5e400523-bbc8-4238-e82d-c65cbf661715"
raw_data.tail()
# + [markdown] id="A9UejVk58KJ-"
# #### Inspect pre-processed data
# + colab={"base_uri": "https://localhost:8080/", "height": 563} id="FVmTqOWr-pwg" outputId="52451def-f60e-4001-a028-84601a65e42a"
plot_cols = ['unrate', 'tcu', 'CODACE', 'GAP']
plot_features = raw_data[plot_cols]
plot_features.index = raw_data.index
_ = plot_features.plot(subplots=True)
# + id="brunxYyx8Nje" colab={"base_uri": "https://localhost:8080/", "height": 592} outputId="b1ca5ff1-7b1f-47a4-a3b2-ad67b1a95f1d"
plt.plot(drop_DATE, raw_data['GAP'], label='GAP')
plt.bar(drop_DATE, raw_data['CODACE']*-10 , width=1, linewidth=1, align='center', color="lightgray", label='CODACE')
plt.bar(drop_DATE, raw_data['CODACE']*5 , width=1, linewidth=1, align='center', color="lightgray", label='CODACE')
plt.plot(drop_DATE, np.zeros(drop_DATE.shape[0]), color='black')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="_1qDwvzK_u5R" outputId="717a2fa4-a649-45e6-8666-27ed05ff434f"
raw_data.describe().transpose()
# + [markdown] id="n9RDMZQq8sV9"
# #### Split and shuffle
#
# Split the dataset into train, validation, and test sets. The validation set is used during the model fitting to evaluate the loss and any metrics, however, the model is not fit with this data. The test set is completely unused during the training phase and is only used at the end to evaluate how well the model generalizes to new data.
# + id="xfxhKg7Yr1-b" colab={"base_uri": "https://localhost:8080/"} outputId="5799443f-5e36-4c09-f341-3d17b8bd0b31"
# Use a utility from sklearn to split our dataset.
train_df, test_df = train_test_split(raw_data, test_size=0.2, random_state=0) #
train_df, val_df = train_test_split(train_df, test_size=0.2, random_state=0)
print(len(train_df), 'train examples')
print(len(val_df), 'validation examples')
print(len(test_df), 'test examples')
# + [markdown] id="a7rZsWJ89bdf"
# #### Normalize
#
# It is important to scale features before training a neural network. Normalization is a common way of doing this scaling. Subtract the mean and divide by the standard deviation of each feature.
#
# The mean and standard deviation should only be computed using the training data so that the models have no access to the values in the validation and test sets.
#
# + id="IO-qEUmJ5JQg"
#CODACE is a dummy, so we don't normalize it. We also don't normalize the target (GAP)
CODACE_train = train_df.pop('CODACE')
GAP_train = train_df.pop('GAP')
CODACE_val = val_df.pop('CODACE')
GAP_val = val_df.pop('GAP')
CODACE_test = test_df.pop('CODACE')
GAP_test = test_df.pop('GAP')
train_mean = train_df.mean()
train_std = train_df.std()
train_df = (train_df - train_mean) / train_std
val_df = (val_df - train_mean) / train_std
test_df = (test_df - train_mean) / train_std
#put CODACE and GAP back
train_df['CODACE'] = CODACE_train
train_df['GAP'] = GAP_train
val_df['CODACE'] = CODACE_val
val_df['GAP'] = GAP_val
test_df['CODACE'] = CODACE_test
test_df['GAP'] = GAP_test
# + [markdown] id="I6nWpqPlBsVn"
# #### Split features from labels
# + id="BKYPvwrvBtVy"
# Form np arrays of labels and features.
train_labels = np.array(train_df.pop('GAP'))
val_labels = np.array(val_df.pop('GAP'))
test_labels = np.array(test_df.pop('GAP'))
train_features = np.array(train_df)
val_features = np.array(val_df)
test_features = np.array(test_df)
# + [markdown] id="rNkFB0GjUrVo"
# ### DL model
# + [markdown] id="lRgMMwGp-6cP"
# #### Define the model and metrics
#
# Define a function that creates a deep neural network with densly connected hidden layers, regularizers to reduce overfitting, and an output layer that returns the output gap estimation. Also, pick the optimal set of hyperparameters with [Keras Tunner](https://www.tensorflow.org/tutorials/keras/keras_tuner).
# + id="3JQDzUqT3UYG"
def make_model(hp):
model = keras.Sequential()
# Tune the number of units in the layers
# Choose an optimal value between 16-256
hp_dense_units = hp.Int('dense_units', min_value = 16, max_value = 256, step = 16)
# Tune the lambda for the regularizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_reg_lambda = hp.Choice('reg_lambda', values = [1e-2, 1e-3, 1e-4])
#Tune the activation function
hp_activation=hp.Choice('dense_activation', values=['relu', 'tanh', 'sigmoid'], default='relu')
#LASSO (L1)
model.add(keras.layers.Dense(units = hp_dense_units, kernel_regularizer=keras.regularizers.l1(hp_reg_lambda), activation = hp_activation)) #Dense layer 1
model.add(keras.layers.Dense(units = hp_dense_units, kernel_regularizer=keras.regularizers.l1(hp_reg_lambda), activation = hp_activation)) #Dense layer 2
model.add(keras.layers.Dense(units = hp_dense_units, kernel_regularizer=keras.regularizers.l1(hp_reg_lambda), activation = hp_activation)) #Dense layer 3
model.add(keras.layers.Dense(units = hp_dense_units, kernel_regularizer=keras.regularizers.l1(hp_reg_lambda), activation = hp_activation)) #Dense layer 4
#Ridge (L2)
#model.add(keras.layers.Dense(units = hp_dense_units, kernel_regularizer=keras.regularizers.l2(hp_reg_lambda), activation = hp_activation)) #Dense layer 1
#model.add(keras.layers.Dense(units = hp_dense_units, kernel_regularizer=keras.regularizers.l2(hp_reg_lambda), activation = hp_activation)) #Dense layer 2
#model.add(keras.layers.Dense(units = hp_dense_units, kernel_regularizer=keras.regularizers.l2(hp_reg_lambda), activation = hp_activation)) #Dense layer 3
#model.add(keras.layers.Dense(units = hp_dense_units, kernel_regularizer=keras.regularizers.l2(hp_reg_lambda), activation = hp_activation)) #Dense layer 4
model.add(keras.layers.Dense(1)) # Output layer
# Tune the learning rate for the optimizer
# Choose an optimal value from 0.01, 0.001, or 0.0001
hp_learning_rate = hp.Choice('learning_rate', values = [1e-2, 1e-3, 1e-4])
model.compile(#optimizer = keras.optimizers.Adam(learning_rate = hp_learning_rate),
optimizer = keras.optimizers.Adagrad(learning_rate = hp_learning_rate),
loss = keras.losses.MeanSquaredError(),
metrics = [keras.metrics.MeanAbsoluteError()])
return model
#TL4OG_model = make_model()
# + id="Lu5g3-g6T7q0"
tuner = kt.Hyperband(make_model,
kt.Objective('val_mean_absolute_error', direction='min'), #
max_epochs = 50,) #15
#factor = 3,)
#project_name = 'TL4OG')
# + [markdown] id="CrOWQtQuIoz9"
# #### Train the model
# + [markdown] id="4hKix_JPH5lq"
# Before running the hyperparameter search, define a callback to clear the training outputs at the end of every training step.
# + id="0vbL_9RaH6Xv"
class ClearTrainingOutput(tf.keras.callbacks.Callback):
def on_train_end(*args, **kwargs):
IPython.display.clear_output(wait = True)
# + [markdown] id="LIzOvr5AIT8L"
# Run the hyperparameter search. The arguments for the search method are the same as those used for tf.keras.model.fit in addition to the callback above.
# + id="sA-2VTjfIUfL" colab={"base_uri": "https://localhost:8080/"} outputId="900b0927-ce87-48b7-8f62-e23c771374f7"
tuner.search(train_features, train_labels,
epochs=50,
validation_data=(val_features, val_labels), callbacks = [ClearTrainingOutput()])
# Get the optimal hyperparameters
best_hps = tuner.get_best_hyperparameters(num_trials = 1)[0]
print(f"""
The hyperparameter search is complete. The optimal number of units in the in the densely-connected
layers is {best_hps.get('dense_units')}, while the activation function is {best_hps.get('dense_activation')}, the optimal learning rate for the optimizer
is {best_hps.get('learning_rate')}, and the optimal lambda for the regularizer is {best_hps.get('reg_lambda')}.
""")
# + [markdown] id="cVPnf6mIJodT"
# Retrain the model with the optimal hyperparameters from the search.
# + id="LqLQQDGKJqkk" colab={"base_uri": "https://localhost:8080/"} outputId="3d73866d-2d24-4934-bae2-6e6db79cb0c0"
# Build the model with the optimal hyperparameters and train it on the data
model = tuner.hypermodel.build(best_hps)
baseline_history = model.fit(train_features, train_labels,
epochs=200,
validation_data=(val_features, val_labels))
# + [markdown] id="15V1uaAsM5bR"
# #### Results
# + colab={"base_uri": "https://localhost:8080/", "height": 610} id="8AJzAl8Dqkbk" outputId="edc46241-960f-428d-a04a-dc3e51006705"
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [GAP]')
plt.plot(hist['epoch'], hist['mean_absolute_error'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mean_absolute_error'],
label = 'Val Error')
plt.ylim([0,1.4])
plt.legend()
plt.show()
plot_history(baseline_history)
# + [markdown] id="AuWlwTNyhonE"
# Training set
# + colab={"base_uri": "https://localhost:8080/", "height": 607} id="QmwcjsNprATT" outputId="7f91b147-9bd2-406e-8652-bcc991fd09f5"
time_axis_train = range(0,train_labels.shape[0])
plt.title('Training set')
plt.plot(time_axis_train, model.predict(train_features).flatten(), label='FNN Model') ### Training set
plt.plot(time_axis_train, train_labels, label='GAP')
plt.plot(time_axis_train, np.zeros(train_labels.shape[0]), color='black')
plt.legend()
plt.show()
# + [markdown] id="SQez8dYQO4G2"
# Test set
# + id="VXgYxb3vKE9F" colab={"base_uri": "https://localhost:8080/", "height": 607} outputId="223dfe6f-572c-4bea-b86e-4b0f02f52105"
time_axis_test = range(0,test_labels.shape[0]) #Test set
FNN_model = model.predict(test_features)
plt.title('Test set')
#plt.bar(time_axis_test, test_labels.T, width=1, linewidth=1, align='center', color="lightgray", label='GAP')
plt.plot(time_axis_test, FNN_model, label='FNN_model')
plt.plot(time_axis_test, test_labels, label='GAP')
plt.plot(time_axis_test, np.zeros(test_labels.shape[0]), color='black')
plt.legend()
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="o58S_xRo-fas" outputId="d67d692b-c9af-45de-b1b6-b1efc78f6902"
BR_test_error = FNN_model.flatten() - test_labels
MAE_test_set = np.sum(np.abs(BR_test_error))/BR_test_error.shape
MAE_test_set
# + [markdown] id="61d5UG1oKGuv"
# All data
# + id="YlH9oHQVL9KA"
x = raw_data
#Normalize
#NBER is a dummy, so we don't normalize it. The GAP is not normalized too.
CODACE_x = x.pop('CODACE')
GAP_x = x.pop('GAP')
x_mean = x.mean()
x_std = x.std()
x = (x - x_mean) / x_std
#put CODACE and GAP back
x['CODACE'] = CODACE_x
x['GAP'] = GAP_x
#split labels
y = x.pop('GAP')
# + colab={"base_uri": "https://localhost:8080/", "height": 607} id="y8tmgHnPKHRg" outputId="338d1c22-71a7-4249-a05e-271cace28051"
FNN_model_all_data = model.predict(x) # All data
plt.title('All data')
plt.plot(drop_DATE, FNN_model_all_data, label='FNN_model all data')
plt.plot(drop_DATE, y, label='GAP')
plt.plot(drop_DATE, np.zeros(drop_DATE.shape[0]), color='black')
plt.legend()
plt.show()
| TL4OG/OutputGap_FNN_BR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Tensorflow implementation of googLeNet
#
# This specific model is outlined in "Going deeper with convolutions", authored by <NAME>, <NAME>, among others. I will be using this model to handle a simple image recognition task, with the intention of the notebook to create a low bias model with a smaller number of parameters than a traditional convnet.
# +
import numpy as np
import tensorflow as tf
import pandas as pd
from keras import backend as K
import matplotlib.pyplot as plt
import latex
import os
import cv2
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# -
# ### Preparing data
# Getting images and their labels
path = "../../data/Sign-Language-Digits-Dataset/Dataset/"
imgs = []
encodings = []
for folder in os.listdir(path):
if folder[0] != ".":
name = folder
for image in os.listdir(path+folder):
if ".JPG" in image:
aimg = cv2.imread(path+folder+"/"+image,1)
if aimg.shape == (100,100,3):
imgs.append(aimg)
encodings.append(name)
# Vectorizing numbered labels
final_encodings = []
for encoding in encodings:
enc_num = int(encoding)
enc_swp = np.zeros((10,))
enc_swp[enc_num] = 1
final_encodings.append(enc_swp)
X = np.array(imgs)
y = np.array(final_encodings)
print(X.shape)
print(y.shape)
np.save("../../data/signs_data/X.npy",X)
np.save("../../data/signs_data/y.npy",y)
# Checkpoint
X = np.load("../../data/signs_data/X.npy")
y = np.load("../../data/signs_data/y.npy")
print(X.shape)
print(y.shape)
# This particular model uses encodings of shape (224,224,3) so reshaping input
resized_imgs = []
for i in range(X.shape[0]):
r_img = cv2.resize(X[i], (224, 224))
resized_imgs.append(r_img)
print(len(resized_imgs))
images = np.array(resized_imgs)
encodings = y
print(images.shape)
print(encodings.shape)
plt.imshow(images[0])
plt.show()
print("encoding:",encodings[0])
# ### Tensorflow placeholders
# Placeholder values for input X,y data
def get_placeholders(x_h,x_w,x_c,y_c):
"""
x_h: Height for x input
x_w: Width for x input
x_c: Channels for x input
y_c: Channels for y input
"""
X = tf.placeholder(tf.float32, name="X", shape=(None,x_h,x_w,x_c))
y = tf.placeholder(tf.float32, name="y", shape=(None,y_c))
return X,y
# Testing placeholders
tf.reset_default_graph()
with tf.Session() as sess:
X,y = get_placeholders(224,224,3,10)
print("X shape:",X.shape)
print("y shape:",y.shape)
# ### Tensorflow Forward Propagation
#
# Within the inception block, the filters input describes the number of filters used for both the bottleneck conv layers and the nxn conv layers and is as follows: [1x1 conv [f1], 1x1 conv(bottleneck) [f2], 3x3 conv [f3], 1x1 conv(bottleneck) [f4], 5x5 conv [f5], 1x1 conv(bottleneck for maxpool) [f6]]
# Corresponds with two softmax outputs before the final softmax output at the end of the model
# Input of shape 14x14xchannels
def early_softmax(the_input,stage,f):
"""
stage: a,b depending on which softmax stage
"""
Avg = tf.layers.average_pooling2d(the_input,pool_size=[5,5],strides=3,padding="valid",name="AP"+stage) # 4x4xC
Conv = tf.layers.conv2d(Avg,filters=f,kernel_size=[1,1],strides=(1,1),padding="same",name="Conv"+stage,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
Conv_A = tf.nn.relu(Conv,name="Conv_A"+stage)
Flat_A = tf.layers.flatten(Conv_A,name="Flatten_"+stage)
Dense_1 = tf.layers.dense(Flat_A,1024,activation=tf.nn.relu,name="Fc1_"+stage)
Drop_1 = tf.layers.dropout(Dense_1,rate=0.4,name="Drop1_"+stage)
Dense_2 = tf.layers.dense(Drop_1,f,activation=tf.nn.relu,name="Fc2_"+stage)
Drop_2 = tf.layers.dropout(Dense_2,rate=0.4,name="Drop2_"+stage)
linear = tf.layers.dense(Drop_2,10,activation=None,name="linear_"+stage)
return linear
# Corresponds with a inception block, including depth concatenation
# s: number of the inception stage, blocks within a stage are labled alphabetically
def inception_step(the_input,filters,s):
f1,f2,f3,f4,f5,f6 = filters
s = str(s)
# Block 1:
Z1 = tf.layers.conv2d(the_input,filters=f1,kernel_size=[1,1],strides=(1,1),padding="same",name="Z1_"+s,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
A1 = tf.nn.relu(Z1,name="A1_"+s)
# Block 2:
# 3x3 bottleneck
Z2a = tf.layers.conv2d(the_input,filters=f2,kernel_size=[1,1],strides=(1,1),padding="same",name="Z2a_"+s,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
A2a = tf.nn.relu(Z2a,name="A2a_"+s)
# 3x3 conv
Z2b = tf.layers.conv2d(A2a,filters=f3,kernel_size=[3,3],strides=(1,1),padding="same",name="Z2b_"+s,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
A2b = tf.nn.relu(Z2b,name="A2b_"+s)
# Block 3:
# 5x5 bottleneck
Z3a = tf.layers.conv2d(the_input,filters=f4,kernel_size=[1,1],strides=(1,1),padding="same",name="Z3a_"+s,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
A3a = tf.nn.relu(Z3a,name="A3a_"+s)
# 5x5 conv
Z3b = tf.layers.conv2d(A3a,filters=f5,kernel_size=[5,5],strides=(1,1),padding="same",name="Z3b_"+s,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
A3b = tf.nn.relu(Z3b,name="A3b_"+s)
# Block 4
P4 = tf.layers.max_pooling2d(the_input,pool_size=[3,3],strides=1,padding="same",name="P4_"+s)
Z4 = tf.layers.conv2d(P4,filters=f6,kernel_size=[1,1],strides=(1,1),padding="same",name="Z4_"+s,kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
A4 = tf.nn.relu(Z4,name="A4_"+s)
# Concat all 4 blocks
Dc = tf.concat([A1,A2b,A3b,A4],axis=-1,name="concat_"+s)
return Dc
# There will be three softmax outputs returned
def forward_pass(X):
input_layer = tf.reshape(X,[-1,224,224,3]) # Input shape of images
# Pre-inception
Z1 = tf.layers.conv2d(input_layer,filters=64,kernel_size=[7,7],strides=(2,2),padding="same",name="Z1",kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
A1 = tf.nn.relu(Z1,name="A1") # 112x112
P1 = tf.layers.max_pooling2d(A1,pool_size=[3,3],strides=2,padding="same",name="P1") # 56x56
LRN1 = tf.nn.local_response_normalization(P1,name="LRN1")
Z2 = tf.layers.conv2d(LRN1,filters=64,kernel_size=[1,1],strides=(1,1),padding="valid",name="Z2",kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
A2 = tf.nn.relu(Z2,name="A2")
Z3 = tf.layers.conv2d(A2,filters=192,kernel_size=[3,3],strides=(1,1),padding="same",name="Z3",kernel_initializer=tf.contrib.layers.xavier_initializer(seed=0))
A3 = tf.nn.relu(Z3,name="A3")
LRN3 = tf.nn.local_response_normalization(A3,name="LRN3")
P3 = tf.layers.max_pooling2d(LRN3,pool_size=[3,3],strides=2,padding="same",name="P3") # 28x28x192
# Inception starts
Inc1 = inception_step(P3,[64,96,128,16,32,32],1) # 28x28x256
Inc2 = inception_step(Inc1,[128,128,192,32,96,64],2) #28x28x480
P4 = tf.layers.max_pooling2d(Inc2,pool_size=[3,3],strides=2,padding="same",name="P4") # 14x14x480
Inc3 = inception_step(P4,[192,96,208,16,48,64],3) #14x14x512
# First early softmax
ESM1 = early_softmax(Inc3,"a",512)
Inc4 = inception_step(Inc3,[160,112,224,24,64,64],4) # 14x14x512
Inc5 = inception_step(Inc4,[128,128,256,24,64,64],5) # 14x14x512
Inc6 = inception_step(Inc5,[112,144,288,32,64,64],6) # 14x14x528
# Second early softmax
ESM2 = early_softmax(Inc6,"b",528)
Inc7 = inception_step(Inc6,[256,160,320,32,128,128],7) # 14x14x832
P5 = tf.layers.max_pooling2d(Inc7,pool_size=[3,3],strides=2,padding="same",name="P5") # 7x7x832
Inc8 = inception_step(P5,[256,160,320,32,128,128],8) # 7x7x832
Inc9 = inception_step(Inc8,[384,192,384,48,128,128],9) # 7x7x1024
# Last softmax output
AP = tf.layers.average_pooling2d(Inc9,pool_size=[7,7],strides=1,padding="valid",name="APc") # 1x1x1024
Flat = tf.layers.flatten(AP,name="Flatten_c")
Dense = tf.layers.dense(Flat,500,activation=tf.nn.relu,name="Fc1_c")
Drop = tf.layers.dropout(Dense,rate=0.4,name="Drop1_c")
ESM3 = tf.layers.dense(Drop,10,activation=None,name="linear_c")
return [ESM1,ESM2,ESM3]
# Testing forward prop step
test_img = images[0]
test_img.shape = (1,224,224,3)
test_enc = encodings[0]
test_enc.shape = (1,10)
tf.reset_default_graph()
with tf.Session() as sess:
X,y = get_placeholders(224,224,3,10)
Z = forward_pass(X)
init = tf.global_variables_initializer()
sess.run(init)
aZ = sess.run(Z,feed_dict={X:test_img,y:test_enc})
print(aZ[2].shape)
# ### Tensorflow Cost Function
#
# The cost function for this model is simple in that it is a simple softmax cross entropy loss. The only complexity related to this is the fact that there are three softmax outputs from the original model corresponding to predictions at different depths within the model. I plan on putting more weight on the softmax outputs from the deeper portions of the model rather than having there be an equal weighting: 0.4 for the last softmax output and 0.3 for the first two softmax outputs.
# Takes in a list of costs and the correct encoding and returns a weighted cost of the softmax predictions
# Out3 corresponds with the last softmax prediction
def cost_function(outputs,y):
out1,out2,out3 = outputs
cost_a = 0.3 * tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=out1,labels=y))
cost_b = 0.3 * tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=out2,labels=y))
cost_c = 0.4 * tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=out3,labels=y))
full_cost = cost_a + cost_b + cost_c
return full_cost
# Testing cost function
test_img = images[0]
test_img.shape = (1,224,224,3)
test_enc = encodings[0]
test_enc.shape = (1,10)
tf.reset_default_graph()
with tf.Session() as sess:
X,y = get_placeholders(224,224,3,10)
Z = forward_pass(X)
cost = cost_function(Z,y)
init = tf.global_variables_initializer()
sess.run(init)
aCost = sess.run(cost,feed_dict={X:test_img,y:test_enc})
print("Cost:",aCost)
# ### Tensorflow Model
# Using stochastic gradient descent
def model(images,encodings,lr=0.0001,num_epochs=5,print_cost=True):
tf.reset_default_graph() # resetting graph
tf.set_random_seed(1)
seed = 0
costs=[]
x_h = images[0].shape[0]
x_w = images[0].shape[1]
x_c = images[0].shape[2]
X,y = get_placeholders(x_h,x_w,x_c,10) # 10 classes
Z = forward_pass(X)
cost = cost_function(Z,y)
optimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost)
init = tf.global_variables_initializer()
saver = tf.train.Saver() # to save/load model
with tf.Session() as sess:
# Loading saved model
saver = tf.train.import_meta_graph("../../data/googlenet/inception_model.ckpt.meta")
saver.restore(sess, "../../data/googlenet/inception_model.ckpt")
# sess.run(init) # DON'T RUN INIT IF LOADING MODEL
for epoch in range(num_epochs):
running_cost = 0
# shuffle data for each epoch
seed += 1
images = shuffle(images,random_state=seed)
encodings = shuffle(encodings,random_state=seed)
for i in range(images.shape[0]):
a_img = images[i]
a_enc = encodings[i]
a_img.shape = (1,224,224,3)
a_enc.shape = (1,10)
_,temp_cost = sess.run([optimizer,cost], feed_dict={X:a_img,y:a_enc})
running_cost += temp_cost
# print("running cost: "+str(running_cost) + " ,temp_cost: " + str(temp_cost)+".")
costs.append(running_cost)
if print_cost and epoch % 1 == 0:
print("Cost at epoch {}: {}".format(epoch+1,running_cost))
loc = saver.save(sess, "../../data/googlenet/inception_model.ckpt")
return costs
acosts1 = model(images,encodings,lr=0.00001,num_epochs=15,print_cost=True)
acosts2 = model(images,encodings,lr=0.00001,num_epochs=8,print_cost=True)
x_iter = [i for i in range(23)]
all_costs = acosts1 + acosts2
plt.plot(x_iter,all_costs)
plt.title("Cost vs epoch")
plt.xlabel("epoch #")
plt.ylabel("cost")
plt.show()
| computer_vision/GoogLeNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <!--BOOK_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="fig/cover-small.jpg">
# *This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by <NAME>; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).*
#
# *The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).*
#
# <!--NAVIGATION-->
# < [Modules and Packages](13-Modules-and-Packages.ipynb) | [Contents](Index.ipynb) | [A Preview of Data Science Tools](15-Preview-of-Data-Science-Tools.ipynb) >
# # String Manipulation and Regular Expressions
# One place where the Python language really shines is in the manipulation of strings.
# This section will cover some of Python's built-in string methods and formatting operations, before moving on to a quick guide to the extremely useful subject of *regular expressions*.
# Such string manipulation patterns come up often in the context of data science work, and is one big perk of Python in this context.
#
# Strings in Python can be defined using either single or double quotations (they are functionally equivalent):
x = 'a string'
y = "a string"
x == y
# In addition, it is possible to define multi-line strings using a triple-quote syntax:
multiline = """
one
two
three
"""
# With this, let's take a quick tour of some of Python's string manipulation tools.
# ## Simple String Manipulation in Python
#
# For basic manipulation of strings, Python's built-in string methods can be extremely convenient.
# If you have a background working in C or another low-level language, you will likely find the simplicity of Python's methods extremely refreshing.
# We introduced Python's string type and a few of these methods earlier; here we'll dive a bit deeper
# ### Formatting strings: Adjusting case
#
# Python makes it quite easy to adjust the case of a string.
# Here we'll look at the ``upper()``, ``lower()``, ``capitalize()``, ``title()``, and ``swapcase()`` methods, using the following messy string as an example:
fox = "tHe qUICk bROWn fOx."
# To convert the entire string into upper-case or lower-case, you can use the ``upper()`` or ``lower()`` methods respectively:
fox.upper()
fox.lower()
# A common formatting need is to capitalize just the first letter of each word, or perhaps the first letter of each sentence.
# This can be done with the ``title()`` and ``capitalize()`` methods:
fox.title()
fox.capitalize()
# The cases can be swapped using the ``swapcase()`` method:
fox.swapcase()
# ### Formatting strings: Adding and removing spaces
#
# Another common need is to remove spaces (or other characters) from the beginning or end of the string.
# The basic method of removing characters is the ``strip()`` method, which strips whitespace from the beginning and end of the line:
line = ' this is the content '
line.strip()
# To remove just space to the right or left, use ``rstrip()`` or ``lstrip()`` respectively:
line.rstrip()
line.lstrip()
# To remove characters other than spaces, you can pass the desired character to the ``strip()`` method:
num = "000000000000435"
num.strip('0')
# The opposite of this operation, adding spaces or other characters, can be accomplished using the ``center()``, ``ljust()``, and ``rjust()`` methods.
#
# For example, we can use the ``center()`` method to center a given string within a given number of spaces:
line = "this is the content"
line.center(30)
# Similarly, ``ljust()`` and ``rjust()`` will left-justify or right-justify the string within spaces of a given length:
line.ljust(30)
line.rjust(30)
# All these methods additionally accept any character which will be used to fill the space.
# For example:
'435'.rjust(10, '0')
# Because zero-filling is such a common need, Python also provides ``zfill()``, which is a special method to right-pad a string with zeros:
'435'.zfill(10)
# ### Finding and replacing substrings
#
# If you want to find occurrences of a certain character in a string, the ``find()``/``rfind()``, ``index()``/``rindex()``, and ``replace()`` methods are the best built-in methods.
#
# ``find()`` and ``index()`` are very similar, in that they search for the first occurrence of a character or substring within a string, and return the index of the substring:
line = 'the quick brown fox jumped over a lazy dog'
line.find('fox')
line.index('fox')
# The only difference between ``find()`` and ``index()`` is their behavior when the search string is not found; ``find()`` returns ``-1``, while ``index()`` raises a ``ValueError``:
line.find('bear')
line.index('bear')
# The related ``rfind()`` and ``rindex()`` work similarly, except they search for the first occurrence from the end rather than the beginning of the string:
line.rfind('a')
# For the special case of checking for a substring at the beginning or end of a string, Python provides the ``startswith()`` and ``endswith()`` methods:
line.endswith('dog')
line.startswith('fox')
# To go one step further and replace a given substring with a new string, you can use the ``replace()`` method.
# Here, let's replace ``'brown'`` with ``'red'``:
line.replace('brown', 'red')
# The ``replace()`` function returns a new string, and will replace all occurrences of the input:
line.replace('o', '--')
# For a more flexible approach to this ``replace()`` functionality, see the discussion of regular expressions in [Flexible Pattern Matching with Regular Expressions](#Flexible-Pattern-Matching-with-Regular-Expressions).
# ### Splitting and partitioning strings
#
# If you would like to find a substring *and then* split the string based on its location, the ``partition()`` and/or ``split()`` methods are what you're looking for.
# Both will return a sequence of substrings.
#
# The ``partition()`` method returns a tuple with three elements: the substring before the first instance of the split-point, the split-point itself, and the substring after:
line.partition('fox')
# The ``rpartition()`` method is similar, but searches from the right of the string.
#
# The ``split()`` method is perhaps more useful; it finds *all* instances of the split-point and returns the substrings in between.
# The default is to split on any whitespace, returning a list of the individual words in a string:
line.split()
# A related method is ``splitlines()``, which splits on newline characters.
# Let's do this with a Haiku, popularly attributed to the 17th-century poet Matsuo Bashō:
# +
haiku = """matsushima-ya
aah matsushima-ya
matsushima-ya"""
haiku.splitlines()
# -
# Note that if you would like to undo a ``split()``, you can use the ``join()`` method, which returns a string built from a splitpoint and an iterable:
'--'.join(['1', '2', '3'])
# A common pattern is to use the special character ``"\n"`` (newline) to join together lines that have been previously split, and recover the input:
print("\n".join(['matsushima-ya', 'aah matsushima-ya', 'matsushima-ya']))
# ## Format Strings
#
# In the preceding methods, we have learned how to extract values from strings, and to manipulate strings themselves into desired formats.
# Another use of string methods is to manipulate string *representations* of values of other types.
# Of course, string representations can always be found using the ``str()`` function; for example:
pi = 3.14159
str(pi)
# For more complicated formats, you might be tempted to use string arithmetic as outlined in [Basic Python Semantics: Operators](04-Semantics-Operators.ipynb):
"The value of pi is " + str(pi)
# A more flexible way to do this is to use *format strings*, which are strings with special markers (noted by curly braces) into which string-formatted values will be inserted.
# Here is a basic example:
"The value of pi is {}".format(pi)
# Inside the ``{}`` marker you can also include information on exactly *what* you would like to appear there.
# If you include a number, it will refer to the index of the argument to insert:
"""First letter: {0}. Last letter: {1}.""".format('A', 'Z')
# If you include a string, it will refer to the key of any keyword argument:
"""First letter: {first}. Last letter: {last}.""".format(last='Z', first='A')
# Finally, for numerical inputs, you can include format codes which control how the value is converted to a string.
# For example, to print a number as a floating point with three digits after the decimal point, you can use the following:
"pi = {0:.3f}".format(pi)
# As before, here the "``0``" refers to the index of the value to be inserted.
# The "``:``" marks that format codes will follow.
# The "``.3f``" encodes the desired precision: three digits beyond the decimal point, floating-point format.
#
# This style of format specification is very flexible, and the examples here barely scratch the surface of the formatting options available.
# For more information on the syntax of these format strings, see the [Format Specification](https://docs.python.org/3/library/string.html#formatspec) section of Python's online documentation.
# ## Flexible Pattern Matching with Regular Expressions
#
# The methods of Python's ``str`` type give you a powerful set of tools for formatting, splitting, and manipulating string data.
# But even more powerful tools are available in Python's built-in *regular expression* module.
# Regular expressions are a huge topic; there are there are entire books written on the topic (including <NAME>’s [*Mastering Regular Expressions, 3rd Edition*](http://shop.oreilly.com/product/9780596528126.do)), so it will be hard to do justice within just a single subsection.
#
# My goal here is to give you an idea of the types of problems that might be addressed using regular expressions, as well as a basic idea of how to use them in Python.
# I'll suggest some references for learning more in [Further Resources on Regular Expressions](#Further-Resources-on-Regular-Expressions).
#
# Fundamentally, regular expressions are a means of *flexible pattern matching* in strings.
# If you frequently use the command-line, you are probably familiar with this type of flexible matching with the "``*``" character, which acts as a wildcard.
# For example, we can list all the IPython notebooks (i.e., files with extension *.ipynb*) with "Python" in their filename by using the "``*``" wildcard to match any characters in between:
# !ls *Python*.ipynb
# Regular expressions generalize this "wildcard" idea to a wide range of flexible string-matching sytaxes.
# The Python interface to regular expressions is contained in the built-in ``re`` module; as a simple example, let's use it to duplicate the functionality of the string ``split()`` method:
import re
regex = re.compile('\s+')
regex.split(line)
# Here we've first *compiled* a regular expression, then used it to *split* a string.
# Just as Python's ``split()`` method returns a list of all substrings between whitespace, the regular expression ``split()`` method returns a list of all substrings between matches to the input pattern.
#
# In this case, the input is ``"\s+"``: "``\s``" is a special character that matches any whitespace (space, tab, newline, etc.), and the "``+``" is a character that indicates *one or more* of the entity preceding it.
# Thus, the regular expression matches any substring consisting of one or more spaces.
#
# The ``split()`` method here is basically a convenience routine built upon this *pattern matching* behavior; more fundamental is the ``match()`` method, which will tell you whether the beginning of a string matches the pattern:
for s in [" ", "abc ", " abc"]:
if regex.match(s):
print(repr(s), "matches")
else:
print(repr(s), "does not match")
# Like ``split()``, there are similar convenience routines to find the first match (like ``str.index()`` or ``str.find()``) or to find and replace (like ``str.replace()``).
# We'll again use the line from before:
line = 'the quick brown fox jumped over a lazy dog'
# With this, we can see that the ``regex.search()`` method operates a lot like ``str.index()`` or ``str.find()``:
line.index('fox')
regex = re.compile('fox')
match = regex.search(line)
match.start()
# Similarly, the ``regex.sub()`` method operates much like ``str.replace()``:
line.replace('fox', 'BEAR')
regex.sub('BEAR', line)
# With a bit of thought, other native string operations can also be cast as regular expressions.
# ### A more sophisticated example
#
# But, you might ask, why would you want to use the more complicated and verbose syntax of regular expressions rather than the more intuitive and simple string methods?
# The advantage is that regular expressions offer *far* more flexibility.
#
# Here we'll consider a more complicated example: the common task of matching email addresses.
# I'll start by simply writing a (somewhat indecipherable) regular expression, and then walk through what is going on.
# Here it goes:
email = re.compile('\w+@\w+\.[a-z]{3}')
# Using this, if we're given a line from a document, we can quickly extract things that look like email addresses
text = "To email Guido, try <EMAIL> or the older address <EMAIL>."
email.findall(text)
# (Note that these addresses are entirely made up; there are probably better ways to get in touch with Guido).
#
# We can do further operations, like replacing these email addresses with another string, perhaps to hide addresses in the output:
email.sub('--@--.--', text)
# Finally, note that if you really want to match *any* email address, the preceding regular expression is far too simple.
# For example, it only allows addresses made of alphanumeric characters that end in one of several common domain suffixes.
# So, for example, the period used here means that we only find part of the address:
email.findall('<EMAIL>')
# This goes to show how unforgiving regular expressions can be if you're not careful!
# If you search around online, you can find some suggestions for regular expressions that will match *all* valid emails, but beware: they are much more involved than the simple expression used here!
# ### Basics of regular expression syntax
#
# The syntax of regular expressions is much too large a topic for this short section.
# Still, a bit of familiarity can go a long way: I will walk through some of the basic constructs here, and then list some more complete resources from which you can learn more.
# My hope is that the following quick primer will enable you to use these resources effectively.
# #### Simple strings are matched directly
#
# If you build a regular expression on a simple string of characters or digits, it will match that exact string:
regex = re.compile('ion')
regex.findall('Great Expectations')
# #### Some characters have special meanings
#
# While simple letters or numbers are direct matches, there are a handful of characters that have special meanings within regular expressions. They are:
# ```
# . ^ $ * + ? { } [ ] \ | ( )
# ```
# We will discuss the meaning of some of these momentarily.
# In the meantime, you should know that if you'd like to match any of these characters directly, you can *escape* them with a back-slash:
regex = re.compile(r'\$')
regex.findall("the cost is $20")
# The ``r`` preface in ``r'\$'`` indicates a *raw string*; in standard Python strings, the backslash is used to indicate special characters.
# For example, a tab is indicated by ``"\t"``:
print('a\tb\tc')
# Such substitutions are not made in a raw string:
print(r'a\tb\tc')
# For this reason, whenever you use backslashes in a regular expression, it is good practice to use a raw string.
# #### Special characters can match character groups
#
# Just as the ``"\"`` character within regular expressions can escape special characters, turning them into normal characters, it can also be used to give normal characters special meaning.
# These special characters match specified groups of characters, and we've seen them before.
# In the email address regexp from before, we used the character ``"\w"``, which is a special marker matching *any alphanumeric character*. Similarly, in the simple ``split()`` example, we also saw ``"\s"``, a special marker indicating *any whitespace character*.
#
# Putting these together, we can create a regular expression that will match *any two letters/digits with whitespace between them*:
regex = re.compile(r'\w\s\w')
regex.findall('the fox is 9 years old')
# This example begins to hint at the power and flexibility of regular expressions.
# The following table lists a few of these characters that are commonly useful:
#
# | Character | Description || Character | Description |
# |-----------|-----------------------------||-----------|---------------------------------|
# | ``"\d"`` | Match any digit || ``"\D"`` | Match any non-digit |
# | ``"\s"`` | Match any whitespace || ``"\S"`` | Match any non-whitespace |
# | ``"\w"`` | Match any alphanumeric char || ``"\W"`` | Match any non-alphanumeric char |
#
# This is *not* a comprehensive list or description; for more details, see Python's [regular expression syntax documentation](https://docs.python.org/3/library/re.html#re-syntax).
# #### Square brackets match custom character groups
#
# If the built-in character groups aren't specific enough for you, you can use square brackets to specify any set of characters you're interested in.
# For example, the following will match any lower-case vowel:
regex = re.compile('[aeiou]')
regex.split('consequential')
# Similarly, you can use a dash to specify a range: for example, ``"[a-z]"`` will match any lower-case letter, and ``"[1-3]"`` will match any of ``"1"``, ``"2"``, or ``"3"``.
# For instance, you may need to extract from a document specific numerical codes that consist of a capital letter followed by a digit. You could do this as follows:
regex = re.compile('[A-Z][0-9]')
regex.findall('1043879, G2, H6')
# #### Wildcards match repeated characters
#
# If you would like to match a string with, say, three alphanumeric characters in a row, it is possible to write, for example, ``"\w\w\w"``.
# Because this is such a common need, there is a specific syntax to match repetitions – curly braces with a number:
regex = re.compile(r'\w{3}')
regex.findall('The quick brown fox')
# There are also markers available to match any number of repetitions – for example, the ``"+"`` character will match *one or more* repetitions of what precedes it:
regex = re.compile(r'\w+')
regex.findall('The quick brown fox')
# The following is a table of the repetition markers available for use in regular expressions:
#
# | Character | Description | Example |
# |-----------|-------------|---------|
# | ``?`` | Match zero or one repetitions of preceding | ``"ab?"`` matches ``"a"`` or ``"ab"`` |
# | ``*`` | Match zero or more repetitions of preceding | ``"ab*"`` matches ``"a"``, ``"ab"``, ``"abb"``, ``"abbb"``... |
# | ``+`` | Match one or more repetitions of preceding | ``"ab+"`` matches ``"ab"``, ``"abb"``, ``"abbb"``... but not ``"a"`` |
# | ``{n}`` | Match ``n`` repetitions of preeeding | ``"ab{2}"`` matches ``"abb"`` |
# | ``{m,n}`` | Match between ``m`` and ``n`` repetitions of preceding | ``"ab{2,3}"`` matches ``"abb"`` or ``"abbb"`` |
# With these basics in mind, let's return to our email address matcher:
email = re.compile(r'\w+@\w+\.[a-z]{3}')
# We can now understand what this means: we want one or more alphanumeric character (``"\w+"``) followed by the *at sign* (``"@"``), followed by one or more alphanumeric character (``"\w+"``), followed by a period (``"\."`` – note the need for a backslash escape), followed by exactly three lower-case letters.
#
# If we want to now modify this so that the Obama email address matches, we can do so using the square-bracket notation:
email2 = re.compile(r'[\w.]+@\w+\.[a-z]{3}')
email2.findall('<EMAIL>')
# We have changed ``"\w+"`` to ``"[\w.]+"``, so we will match any alphanumeric character *or* a period.
# With this more flexible expression, we can match a wider range of email addresses (though still not all – can you identify other shortcomings of this expression?).
# #### Parentheses indicate *groups* to extract
#
# For compound regular expressions like our email matcher, we often want to extract their components rather than the full match. This can be done using parentheses to *group* the results:
email3 = re.compile(r'([\w.]+)@(\w+)\.([a-z]{3})')
text = "To email Guido, try <EMAIL> or the older address <EMAIL>."
email3.findall(text)
# As we see, this grouping actually extracts a list of the sub-components of the email address.
#
# We can go a bit further and *name* the extracted components using the ``"(?P<name> )"`` syntax, in which case the groups can be extracted as a Python dictionary:
email4 = re.compile(r'(?P<user>[\w.]+)@(?P<domain>\w+)\.(?P<suffix>[a-z]{3})')
match = email4.match('<EMAIL>')
match.groupdict()
# Combining these ideas (as well as some of the powerful regexp syntax that we have not covered here) allows you to flexibly and quickly extract information from strings in Python.
# ### Further Resources on Regular Expressions
#
# The above discussion is just a quick (and far from complete) treatment of this large topic.
# If you'd like to learn more, I recommend the following resources:
#
# - [Python's ``re`` package Documentation](https://docs.python.org/3/library/re.html): I find that I promptly forget how to use regular expressions just about every time I use them. Now that I have the basics down, I have found this page to be an incredibly valuable resource to recall what each specific character or sequence means within a regular expression.
# - [Python's official regular expression HOWTO](https://docs.python.org/3/howto/regex.html): a more narrative approach to regular expressions in Python.
# - [Mastering Regular Expressions (OReilly, 2006)](http://shop.oreilly.com/product/9780596528126.do) is a 500+ page book on the subject. If you want a really complete treatment of this topic, this is the resource for you.
#
# For some examples of string manipulation and regular expressions in action at a larger scale, see [Pandas: Labeled Column-oriented Data](15-Preview-of-Data-Science-Tools.ipynb#Pandas:-Labeled-Column-oriented-Data), where we look at applying these sorts of expressions across *tables* of string data within the Pandas package.
# <!--NAVIGATION-->
# < [Modules and Packages](13-Modules-and-Packages.ipynb) | [Contents](Index.ipynb) | [A Preview of Data Science Tools](15-Preview-of-Data-Science-Tools.ipynb) >
| 14-Strings-and-Regular-Expressions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import vaex
import pandas as pd
FILES = ['compbiology', 'biology', 'medicine', 'genetics', 'ntds', 'pathogenes', 'plosone', 'srep']
PATH_ROW_ACKNOW = '../../data/TreatedEdgeList'
PATH_RESULT = "../../resultMAG"
# -
# # Read Graph
# +
# %%time
# concat all dfs
dfs = {}
dfs_all = pd.DataFrame()
for file in FILES:
print(f"read file: {file}")
dfs[file] = pd.read_csv(f'{PATH_ROW_ACKNOW}/{file}.csv')[['paperId', 'author', 'acknow']]
dfs_all = pd.concat([dfs_all, dfs[file]])
dfs_all.head()
# -
# # Read author data
# + tags=[]
# %%time
# read hdf5 ? from 2nd time
vaex_mag_authors = vaex.open('../../data-MAG/Authors/Authors.csv.hdf5')
# extract necessary data
vaex_mag_authors_avail = vaex_mag_authors[vaex_mag_authors['AuthorId'].isin(list(G.nodes))]
# to pandas
df_mag_authors_avail = vaex_mag_authors_avail.to_pandas_df()
df_mag_authors_avail.head()
# -
a = df_mag_authors_avail.groupby('AuthorId')['DisplayName'].apply(list).reset_index()
a
# + tags=[]
a['l'] = a['DisplayName'].apply(len)
# -
a[a['l'] > 1]
vaex_mag_authors[vaex_mag_authors['DisplayName'] == '<NAME>']
vaex_mag_authors[vaex_mag_authors['DisplayName'] == '<NAME>']
# ### afil
# read hdf5 ? from 2nd time
vaex_mag_pauthorAffil = vaex.open('../../data-MAG/PaperAuthorAffiliations/PaperAuthorAffiliations.txt.hdf5')
vaex_mag_pauthorAffil.head()
# ### PaperAuthorAffiliations.csvのデータ
# + tags=[]
# '<NAME>'でヒットするauthorId
authorIds_Evan = vaex_mag_pauthorAffil[vaex_mag_pauthorAffil['OriginalAuthor'] == '<NAME>']['AuthorId'].tolist()
set(authorIds_Evan)
# -
# '<NAME>'でヒットするauthorId
authorIds_Evan_middle = vaex_mag_pauthorAffil[vaex_mag_pauthorAffil['OriginalAuthor'] == '<NAME>']['AuthorId'].tolist()
set(authorIds_Evan_middle)
print(f"二つの名前(ミドルネーム有無)で共通のauthorId) = {set(authorIds_Evan) & set(authorIds_Evan_middle)}")
# authorId == '2103588639' で検索してヒットした結果
vaex_mag_pauthorAffil[vaex_mag_pauthorAffil['AuthorId'] == 2103588639]
# ### Author.csvのデータ
# author data
vaex_mag_authors[vaex_mag_authors['AuthorId'] == 2103588639]
PaperAuthorAffiliationsには一つのauthorIdに対して複数の名前(ミドル有無)があるが,Authorのデータには一つのauthorIdに一つの名前の構造を持ってるみたい
def determine_acknow(acknow_name):
1. if name in (MAGのauthors)
2. 次のうちのいづれかに当てはまる.
2-1. その論文の著者と共著関係にある
2-2. acknow
vaex_mag_pauthorAffil[:100]
# + jupyter={"outputs_hidden": true} tags=[]
list(set(vaex_mag_pauthorAffil[:100]['AuthorId'].tolist()))
# -
vaex_mag_pauthorAffil[vaex_mag_pauthorAffil['AuthorId'] == 2103588639]
vaex_mag_pauthorAffil[vaex_mag_pauthorAffil['AuthorId'] == 2989460491]
| scripts/main/.ipynb_checkpoints/3-MAG_Stats-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Skin Cancer Recognizer
#
# This notebook contains a trained model that, given an image of a sunspot, can detect whether skin cancer is present.
# # Define Loaders
#
# Pull in the training/testing/validation images into a loader.
# +
import numpy as np
from torchvision import datasets
import torchvision.transforms as transforms
# Common parameters/attributes
n_epochs = 50
batch_size = 20
data_dir = 'D:/image_datasets/skin_cancer_images/'
use_cuda = torch.cuda.is_available()
# Data transform will resize and images to 300 x 300, crop out the
# center at as a 250x250 square and convert the image to a tensor.
image_transform = transforms.Compose([transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
# Load in data directories from each relevant folder and transform
sc_data = {
'train': datasets.ImageFolder(data_dir + 'train', transform=image_transform),
'valid': datasets.ImageFolder(data_dir + 'valid', transform=image_transform),
'test': datasets.ImageFolder(data_dir + 'test', transform=image_transform)
}
# Prepare test/train/valid data loaders
sc_loaders = {
'train': torch.utils.data.DataLoader(sc_data['train'], batch_size=batch_size, num_workers=num_workers, shuffle=True),
'valid': torch.utils.data.DataLoader(sc_data['valid'], batch_size=batch_size, num_workers=num_workers, shuffle=True),
'test': torch.utils.data.DataLoader(sc_data['test'], batch_size=batch_size, num_workers=num_workers, shuffle=True)
}
# Image counts
print('Training: ', len(sc_data['train']))
print('Validation: ', len(sc_data['valid']))
print('Testing: ', len(sc_data['test']))
# -
# # Define Resnet50 Model
#
# Load in pretrained Resnet50 model and update last FCL for skincare classes
# +
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
## check if CUDA is available
use_cuda = torch.cuda.is_available()
## Add final FCL at the end of the model
sc_model = nn.Sequential(
models.resnet50(pretrained=True),
nn.Linear(1000, 3)
)
## move model to GPU if CUDA is available
if use_cuda:
print('Using cuda')
sc_model = sc_model.cuda()
else:
print('Not using cuda')
## Show model structure
print(sc_model)
# -
# # Define Optimizer and Criterion
#
# Using SGD optimizer of CrossEntropyLoss criterion
# +
# specify loss function (categorical cross-entropy)
sc_criterion = nn.CrossEntropyLoss()
# specify optimizer (stochastic gradient descent) and learning rate = 0.0025
sc_optimizer = optim.SGD(sc_model[1].parameters(), lr=0.0025)
# -
# # Define Training Method
#
# A reusable function for training and saving the best model
# +
## When dealing with large images they may be truncated
## and need special permission to load
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
def train(n_epochs, loaders, model, optimizer, criterion, use_cuda, save_path):
"""returns trained model"""
# initialize tracker for minimum validation loss
valid_loss_min = np.Inf
for epoch in range(1, n_epochs+1):
# initialize variables to monitor training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for batch_idx, (data, target) in enumerate(loaders['train']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
## find the loss and update the model parameters accordingly
## record the average training loss, using something like
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model.forward(data)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss = train_loss + ((1 / (batch_idx + 1)) * (loss.data - train_loss))
######################
# validate the model #
######################
model.eval()
for batch_idx, (data, target) in enumerate(loaders['valid']):
# move to GPU
if use_cuda:
data, target = data.cuda(), target.cuda()
## update the average validation loss
# forward pass: compute predicted outputs by passing inputs to the model
output = model.forward(data)
# calculate the batch loss
loss = criterion(output, target)
# update validation loss
valid_loss = valid_loss + ((1 / (batch_idx + 1)) * (loss.data - valid_loss))
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch,
train_loss,
valid_loss
))
## TODO: save the model if validation loss has decreased
# Save model if validation loss has decreased since last min
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(
valid_loss_min,
valid_loss))
torch.save(model.state_dict(), save_path)
valid_loss_min = valid_loss
# return trained model
return model
# -
## Train the model
sc_model = train(n_epochs, sc_loaders, sc_model, sc_optimizer, sc_criterion, use_cuda, 'cancer_predictor_model.pt')
# # Define Testing Method
#
# Method for testing model performance
def test(loaders, model, criterion, use_cuda):
test_loss = 0.
correct = 0.
total = 0.
model.eval()
for batch_idx, (data, target) in enumerate(loaders['test']):
if use_cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
test_loss = test_loss + ((1 / (batch_idx + 1)) * (loss.data - test_loss))
pred = output.data.max(1, keepdim=True)[1]
correct += np.sum(np.squeeze(pred.eq(target.data.view_as(pred))).cpu().numpy())
total += data.size(0)
print('Test Loss: {:.6f}\n'.format(test_loss))
print('\nTest Accuracy: %2d%% (%2d/%2d)' % (
100. * correct / total, correct, total))
## Test the model
test(sc_loaders, sc_model, sc_criterion, use_cuda)
# # Predictor Function
#
# Given an image path, it makes a prediction on the type of skin cancer
# +
## Image utils and pt Variable
from PIL import Image
from torch.autograd import Variable
## Prediction method using Resnet50 model
def sc_predict(img_path):
transform = transforms.Compose([transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
image = transform(Image.open(img_path))
image = image.unsqueeze(0)
prediction = False
if use_cuda:
prediction = sc_model.forward(Variable(image).cuda()).cpu()
else:
prediction = sc_model.forward(Variable(image))
return prediction.data.numpy()[0]
print(sc_predict('D:/image_datasets/skin_cancer_images/train/melanoma/ISIC_0000002.jpg'))
# -
# # Output to CSV
#
#
# ### Columns:
# - Id - the file names of the test images (in the same order as the sample submission file)
# - task_1 - the model's predicted probability that the image (at the path in Id) depicts melanoma
# - task_2 - the model's predicted probability that the image (at the path in Id) depicts seborrheic keratosis
# +
import csv
from glob import glob
from tqdm import tqdm
with open('predictions.csv', mode='w') as csv_file:
## Define columns
writer = csv.DictWriter(csv_file, fieldnames=['Id', 'task_1', 'task_2'])
writer.writeheader()
## Evaluation mode
sc_model.eval()
## Paths
image_paths = glob(data_dir + 'test/*/*')
## Process each file in test directory
for i in tqdm(range(len(image_paths))):
## Get prediction
pred = sc_predict(image_paths[i])
## Write results to csv
writer.writerow({
'Id': image_paths[i],
'task_1': pred[0],
'task_2': pred[2]
})
| Skin Cancer Recognizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Use radvel to fit simulated RV data from June hack days
# +
# Preliminary imports
# #%pylab osx
# %pylab inline
import radvel
import radvel.likelihood
from radvel.plot import orbit_plots
import copy
import pandas as pd
import os
from scipy import optimize
import corner
from astropy.time import Time
import numpy as np
from numpy import *
import copy
rcParams['font.size'] = 18
# -
import emcee
emcee.__version__
# Convenient function for plotting fits
def plot_results(like):
fig = gcf()
errorbar(
like.x, like.model(like.x)+like.residuals(),
yerr=like.yerr, fmt='o'
)
t_start = min(like.x)
t_stop = max(like.x)
ti = linspace(t_start,t_stop,10000)
plot(ti, like.model(ti))
xlabel('Time')
ylabel('RV')
draw()
# Load simulated RV time series
# ========================
rv_data_path = os.path.relpath('../../Data/rv.dat')
data = pd.read_csv(rv_data_path, usecols=(0,1,2), delim_whitespace=True, header=None,
names = ['date', 'vel', 'err'])
data.head()
data['vel'].values.shape, data['date'].values.shape
plt.figure(figsize=(12,8))
plt.errorbar(data['date'].values, data['vel'].values, data['err'].values)
# Initialize 1-planet model
# ===================
params = radvel.Parameters(1,basis='per tc secosw sesinw k')
params['k1'] = radvel.Parameter(value=20.0)
params['per1'] = radvel.Parameter(value=1500.0)
params['secosw1'] = radvel.Parameter(value=0.0)
params['sesinw1'] = radvel.Parameter(value=0.0)
params['tc1'] = radvel.Parameter(value=2455300)
rv_mod = radvel.RVModel(params)
# Generate a likelihood
# =====================
# +
like = radvel.likelihood.RVLikelihood(
rv_mod, data['date'].values,
data['vel'].values, data['err'].values)
like.params['gamma'] = radvel.Parameter(value=0)
like.params['jit'] = radvel.Parameter(value=data['err'].values[0])
truths = copy.deepcopy(like.params) # Store away model parameters for later reference
like.params['jit'].vary = False # Don't vary jitter
# -
# Perform a maximum likelihood fit
# ================================
plot_results(like) # Plot initial model
plot_results(like) # Plot initial model
res = optimize.minimize(like.neglogprob_array, like.get_vary_params(),
method='Nelder-Mead', options={'maxfev':1e4})
print(res)
print(like)
plot_results(like) # plot best fit model
plt.figure()
plt.plot(like.x, like.residuals())
# Initialize 2-planet model
# ===================
# +
params = radvel.Parameters(2,basis='per tc secosw sesinw k')
params['k1'] = radvel.Parameter(value=20.0)
params['per1'] = radvel.Parameter(value=1500.0)
params['secosw1'] = radvel.Parameter(value=0.0)
params['sesinw1'] = radvel.Parameter(value=0.0)
params['tc1'] = radvel.Parameter(value=2455300)
params['k2'] = radvel.Parameter(value=10.0)
params['per2'] = radvel.Parameter(value=7000.0)
params['secosw2'] = radvel.Parameter(value=0.0)
params['sesinw2'] = radvel.Parameter(value=0.0)
params['tc2'] = radvel.Parameter(value=2457000)
rv_mod = radvel.RVModel(params)
# -
# Generate a likelihood
# =====================
# +
like = radvel.likelihood.RVLikelihood(
rv_mod, data['date'].values,
data['vel'].values, data['err'].values)
like.params['gamma'] = radvel.Parameter(value=0)
like.params['jit'] = radvel.Parameter(value=data['err'].values[0])
truths = copy.deepcopy(like.params) # Store away model parameters for later reference
like.params['jit'].vary = False # Don't vary jitter
# -
plot_results(like) # Plot initial model
res = optimize.minimize(like.neglogprob_array, like.get_vary_params(),
method='Nelder-Mead', options={'maxfev':1e4})
print(res)
print(like)
plot_results(like) # plot best fit model
# Instantiate posterior
# ======================
# +
# Instantiate posterior
post = radvel.posterior.Posterior(like)
post0 = copy.deepcopy(post)
# Add in priors
post.priors += [radvel.prior.EccentricityPrior( 2 )] # Keeps eccentricity < 1
# Perform Max-likelihood fitting
res = optimize.minimize(
post.neglogprob_array,
post.get_vary_params(),
method='Powell',
options=dict(maxiter=100000,maxfev=100000,xtol=1e-8)
)
print("Initial loglikelihood = %f" % post0.logprob())
print("Final loglikelihood = %f" % post.logprob())
print(post)
# -
# Perform MCMC posterior exploration
# ==================================
df = radvel.mcmc(post, nrun=200)
df_synth = post.params.basis.to_synth(df)
labels = 'per1 tc1 e1 k1 per2 tc2 e2 k2 '.split()
df_synth[labels].quantile([0.14,0.5,0.84]).T
# +
labels = 'per1 tc1 e1 k1 per2 tc2 e2 k2'.split()
rc('font',size=8)
fig = corner.corner(
df_synth[labels],labels=labels,
levels=[0.68,0.95],plot_datapoints=False,smooth=True,bins=20
)
# -
RVPlot = orbit_plots.MultipanelPlot(post)
RVPlot.plot_multipanel()
| Notebooks-Code/astrometry_radial velocities_orbit fitting/radvel_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from algorithms import utils as ut
from algorithms.Gaussian import NormalNaiveBayes
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# +
data = np.loadtxt("datasets/toy-classification.csv",delimiter=",")
_ = plt.scatter(x = data[:,0], y = data[:,1],c = data[:,-1])
# -
train_data,test_data = ut.train_test_split(data,0.7)
# +
model = NormalNaiveBayes()
model.fit(train_data[:,:-1], train_data[:,-1])
s = model.score(test_data[:,:-1], test_data[:,-1])
print("Score: {}%".format(s*100))
| Naive Bayes Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dlnd
# language: python
# name: dlnd
# ---
# # 얕은 신경망을 이용한 다중 분류 문제
import numpy as np
import matplotlib.pyplot as plt
# ## 함수 구현
# ### Sigmoid 함수
# $sigmoid(x) = 1/(1+e^{-x})$
# ### Softmax 함수
# $softmax(x)_i = e^{x_i}/\sum{e^{x_j}}$
# ## 네트워크 구조 정의
# Define network architecture
# ## 데이터셋 가져오기, 정리하기
# Import and organize dataset
dataset = np.load('ch2_dataset.npz')
inputs = dataset['inputs']
labels = dataset['labels']
# ## 모델 만들기
# Create Model
# ## 사전에 학습된 파라미터 불러오기
weights = np.load('ch2_parameters.npz')
model.W_h = weights['W_h']
model.b_h = weights['b_h']
model.W_o = weights['W_o']
model.b_o = weights['b_o']
# ## 모델 구동 및 결과 프린트
# ## 정답 클래스 스캐터 플랏
# ## 모델 출력 클래스 스캐터 플랏
| RNN_TF20/02. Shallow Neural Network implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## NoteBook to Train and Visualize the U-Net Network
# +
# Importing bunch of libraries
import os
import sys
import time
import random
import warnings
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook as tqdm
from skimage.io import imread, imshow
from skimage.transform import resize
import tensorflow as tf
from keras.models import Model, load_model
from keras.layers import Input
from keras.layers.core import Dropout, Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras import backend as K
from keras.utils import multi_gpu_model
# Specify image dimensions
# Please note that the code may not function as expected for different image size
# (It will definitely not run for smaller images)
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
seed = 42
random.seed = seed
# -
# ### Function Definitions
# +
## Function for computing the masks' pixel-to-pixel accuracy, takes as input 2D masks and 2D predictions
# (Label 1 corresponds to skin, and 0 to non-skin)
# TP - true positive: mask and prediction pixels refer to skin
# TN - true negative: mask and prediction pixels refer to non-skin
# FP - false positive: mask pixels refer to non-skin, prediction pixels refer to skin
# FN - false negative: mask pixels refer to skin, prediction pixels refer to non-skin
def acc_comp(msk, preds_test_t):
mean_acc = np.zeros(1)
mean_TP = np.zeros(1)
mean_TN = np.zeros(1)
mean_FP = np.zeros(1)
mean_FN = np.zeros(1)
for j in range(msk.shape[0]):
act = msk[j]
pr = preds_test_t[j].reshape(IMG_WIDTH, IMG_HEIGHT)
c = act == pr
d = act & pr
e = act | pr
neg = act.sum()
pos = (IMG_WIDTH*IMG_HEIGHT)-act.sum()
TP = round(float(d.sum()),6)
FP = round(float(pr.sum()-d.sum()),6)
TN = round(float((IMG_WIDTH*IMG_HEIGHT)-e.sum()),6)
FN = round(float(e.sum()-pr.sum()),6)
acc = round(float(c.sum())/(IMG_WIDTH*IMG_HEIGHT),6)
mean_TP = np.append([mean_TP],TP)
mean_TN = np.append([mean_TN],TN)
mean_acc = np.append([mean_acc],acc)
mean_FP = np.append([mean_FP],FP)
mean_FN = np.append([mean_FN],FN)
mean_acc = mean_acc[1:]
mean_TP = mean_TP[1:]
mean_TN = mean_TN[1:]
mean_FP = mean_FP[1:]
mean_FN = mean_FN[1:]
std = round(np.std(mean_acc),6)
## Average accuracy for all images
# avg = round(mean_acc.sum()/msk.shape[0],6)
## Average number of true positive pixels (only meaningful if all images have the same shape)
# overall_TP = round(mean_TP.sum()/msk.shape[0],6)
## Average number of true negative pixels (only meaningful if all images have the same shape)
# overall_TN = round(mean_TN.sum()/msk.shape[0],6)
return (mean_acc,std,mean_TP,mean_TN,mean_FP,mean_FN)
## Class for extracting time elapsed per training epoch
class TimingCallback(Callback):
def on_train_begin(self, logs={}):
self.times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_time_start = time.time()
def on_epoch_end(self, batch, logs={}):
self.times.append(time.time() - self.epoch_time_start)
cb = TimingCallback()
## Intersection-over-Union (IoU) metric, can be tracked instead of the accuracy during training
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
# -
# ### This is where the actual implementation of the algorithm starts. You should run everything in order
# #### A) Get the training data (original images + masks). It is better that the images and masks have the same names. The only thing you need to be concerned with is the sorting of the images. They will be sorted by their names, so you want your original images and corresponding masks to have matching names. This section adds the original images' path to TRAIN_PATH, and the masks' path to MASK_PATH. You don't have to worry about the naming if you are using our datasets from Google Drive.
# #### You should replace the paths with the ones corresponding to your machine. Open a terminal, go to the All_Skin_Datasets directory, and type pwd. That would be the path to the datasets folder.
# +
# Dataset 1: HGR
TRAIN_PATH1 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset1_HGR/original_images/']
MASK_PATH1 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset1_HGR/skin_masks/']
train_ids1 = next(os.walk(TRAIN_PATH1[0]))[2]
mask_ids1 = next(os.walk(MASK_PATH1[0]))[2]
train_ids1.sort()
mask_ids1.sort()
TRAIN_PATH1 = TRAIN_PATH1*len(train_ids1)
MASK_PATH1 = MASK_PATH1*len(train_ids1)
# Dataset 2: TDSD
TRAIN_PATH5 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset2_TDSD/original_images/']
MASK_PATH5 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset2_TDSD/skin_masks/']
train_ids5 = next(os.walk(TRAIN_PATH5[0]))[2]
mask_ids5 = next(os.walk(MASK_PATH5[0]))[2]
train_ids5.sort()
mask_ids5.sort()
TRAIN_PATH5 = TRAIN_PATH5*len(train_ids5)
MASK_PATH5 = MASK_PATH5*len(train_ids5)
# Dataset 3: Schmugge
TRAIN_PATH6 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset3_Schmugge/original_images/']
MASK_PATH6 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset3_Schmugge/skin_masks/']
train_ids6 = next(os.walk(TRAIN_PATH6[0]))[2]
mask_ids6 = next(os.walk(MASK_PATH6[0]))[2]
train_ids6.sort()
mask_ids6.sort()
TRAIN_PATH6 = TRAIN_PATH6*len(train_ids6)
MASK_PATH6 = MASK_PATH6*len(train_ids6)
# Dataset 4: Pratheepan
TRAIN_PATH2 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset4_Pratheepan/original_images/']
MASK_PATH2 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset4_Pratheepan/skin_masks/']
train_ids2 = next(os.walk(TRAIN_PATH2[0]))[2]
mask_ids2 = next(os.walk(MASK_PATH2[0]))[2]
train_ids2.sort()
mask_ids2.sort()
TRAIN_PATH2 = TRAIN_PATH2*len(train_ids2)
MASK_PATH2 = MASK_PATH2*len(train_ids2)
# Dataset 5: VDM
TRAIN_PATH3 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset5_VDM/original_images/']
MASK_PATH3 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset5_VDM/skin_masks/']
train_id3 = next(os.walk(TRAIN_PATH3[0]))[2]
mask_id3 = next(os.walk(MASK_PATH3[0]))[2]
train_id3.sort()
mask_id3.sort()
train_ids3 = train_id3[1:]
mask_ids3 = mask_id3[1:]
TRAIN_PATH3 = TRAIN_PATH3*len(train_ids3)
MASK_PATH3 = MASK_PATH3*len(train_ids3)
# Dataset 6: SFA
TRAIN_PATH4 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset6_SFA/original_images/']
MASK_PATH4 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset6_SFA/skin_masks/']
train_ids4 = next(os.walk(TRAIN_PATH4[0]))[2]
mask_ids4 = next(os.walk(MASK_PATH4[0]))[2]
train_ids4.sort()
mask_ids4.sort()
TRAIN_PATH4 = TRAIN_PATH4*len(train_ids4)
MASK_PATH4 = MASK_PATH4*len(train_ids4)
# Dataset 7: FSD
TRAIN_PATH7 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset7_FSD/original_images/']
MASK_PATH7 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset7_FSD/skin_masks/']
train_ids7 = next(os.walk(TRAIN_PATH7[0]))[2]
mask_ids7 = next(os.walk(MASK_PATH7[0]))[2]
train_ids7.sort()
mask_ids7.sort()
TRAIN_PATH7 = TRAIN_PATH7*len(train_ids7)
MASK_PATH7 = MASK_PATH7*len(train_ids7)
# # Dataset 8: ABDOMEN
TRAIN_PATH8 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset8_Abdomen/train/original_images/']
MASK_PATH8 = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset8_Abdomen/train/skin_masks/']
train_ids8 = next(os.walk(TRAIN_PATH8[0]))[2]
mask_ids8 = next(os.walk(MASK_PATH8[0]))[2]
train_ids8.sort()
mask_ids8.sort()
TRAIN_PATH8 = TRAIN_PATH8*len(train_ids8)
MASK_PATH8 = MASK_PATH8*len(train_ids8)
# Combining all datasets together
TRAIN_PATH = np.concatenate((TRAIN_PATH1,TRAIN_PATH2,TRAIN_PATH3,TRAIN_PATH4,TRAIN_PATH5, TRAIN_PATH6,TRAIN_PATH7,TRAIN_PATH8))
MASK_PATH = np.concatenate((MASK_PATH1,MASK_PATH2,MASK_PATH3,MASK_PATH4,MASK_PATH5,MASK_PATH6,MASK_PATH7,MASK_PATH8))
train_ids = np.concatenate((train_ids1,train_ids2,train_ids3,train_ids4,train_ids5,train_ids6,train_ids7,train_ids8))
mask_ids = np.concatenate((mask_ids1,mask_ids2,mask_ids3,mask_ids4,mask_ids5,mask_ids6,mask_ids7,mask_ids8))
# -
# #### B) Shuffle the data and resize to the dimensions specified in the first block
# +
# This creates two array of zeros (for the ground truth and mask data) to store the images in them
X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1))
print('Getting and resizing train images and masks ... ')
sys.stdout.flush()
g = list(range(0,len(train_ids)))
np.random.shuffle(g)
# Creates string arrays to store the path for every training image
strs_original = ["" for x in range(len(train_ids))]
strs_mask = ["" for x in range(len(train_ids))]
pathmsk = MASK_PATH[0] + mask_ids[0]
# Store images path in the corresponding arrays (one array for masks, one for the original ones)
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
strs_mask[n] = MASK_PATH[n] + mask_ids[n]
strs_original[n] = TRAIN_PATH[n] + train_ids[n]
# Read images from their paths and store them in arrays
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
path = strs_original[g[n]]
img = imread(path)[:,:,:IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_train[n] = img
path = strs_mask[g[n]]
img = imread(path)
if img.ndim == 3:
img = img[:,:,1]
img = np.expand_dims(resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True), axis=-1)
if (np.unique(img).size) > 2:
# Important, this is needed to convert masks into binary numbers, as some pixels are between 0 and 255
img = img > 30
else:
img = img > 0
Y_train[n] = img
# -
# #### Save the data to load easily next time. The saving and loading might actually take more time than just running parts A and B. Your call!
# +
## Saving
# np.save("X_data",X_train)
# np.save("Y_data",Y_train)
## Loading
# X_train = np.load('./X_data.npy')
# Y_train = np.load('./Y_data.npy')
# -
# #### C) Double check your work! This will output the images and the corresponding masks. Very useful to ensure that the data has been correctly matched. If the images don't match chances are you've messed up the naming.
ix = random.randint(0, len(train_ids))
imshow(X_train[ix])
plt.show()
imshow(np.squeeze(Y_train[ix]))
plt.show()
# #### D) Construct the U-Net model, based on the "U-net:Convolutional networks for biomedical image segmentation" paper by Ronneberger et al.
# +
inputs = Input((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS))
s = Lambda(lambda x: x/255) (inputs)
# Convolution layer
c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (s)
# Dropout layer
c1 = Dropout(0.1) (c1)
# Another convolution layer
c1 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c1)
# Maxpooling layer
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (p1)
c2 = Dropout(0.1) (c2)
c2 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (p2)
c3 = Dropout(0.2) (c3)
c3 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (p3)
c4 = Dropout(0.2) (c4)
c4 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (p4)
c5 = Dropout(0.3) (c5)
c5 = Conv2D(256, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c5)
# Deconvolution layer
u6 = Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same') (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (u6)
c6 = Dropout(0.2) (c6)
c6 = Conv2D(128, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c6)
u7 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (u7)
c7 = Dropout(0.2) (c7)
c7 = Conv2D(64, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c7)
u8 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (u8)
c8 = Dropout(0.1) (c8)
c8 = Conv2D(32, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c8)
u9 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (u9)
c9 = Dropout(0.1) (c9)
c9 = Conv2D(16, (3, 3), activation='relu', kernel_initializer='he_normal', padding='same') (c9)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)
model = Model(inputs=[inputs], outputs=[outputs])
model = multi_gpu_model(model, gpus=4)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['acc'])
# -
# #### E) Fit the model to the training data
earlystopper = EarlyStopping(patience=30, verbose=1)
checkpointer = ModelCheckpoint('your_model_name.h5', verbose=1, save_best_only=True)
results = model.fit(X_train, Y_train, validation_split=0.20, batch_size=64, epochs=50, shuffle=True,
callbacks=[earlystopper, checkpointer, cb])
# #### F) Output training results and plots
# +
# Summarize history for loss
plt.plot(results.history['loss'])
plt.plot(results.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# Summarize history for accuracy (or mean iou, depending on what you use)
plt.plot(results.history['acc'])
plt.plot(results.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# Results and Plots
# model.summary()
print("UNET ARCHITECTURE")
print ("-------------------------------------------------------------")
print("Total num of training images: %d" % len(train_ids))
print("Max num of epochs: %d" % 50)
print("Optimizer: %s" % 'ADAM')
print("Batch size: %d" % 64)
print("Loss function: %s" % 'Binary Cross-Entropy')
print("Validation data percentage: %d" % 10)
print("Early stoppping: %s" % 'Yes')
a = results.history["acc"]
b = results.history["loss"]
c = results.history["val_acc"]
d = results.history["val_loss"]
e = cb.times
print ("-------------------------------------------------------------")
header = "#"+" "+"Time sec"+" "+"Tr_acc"+" "+"Tr_loss"+" "+"Vl_acc"+" "+"Vl_loss"
print(header)
print ("-------------------------------------------------------------")
for l in range(ep):
str = "%d\t\t%f\t\t%f\t\t%f\t\t%f\t\t%f" % (l, round(e[l],4),round(a[l],4),round(b[l],4),round(c[l],4),d[l])
print (str.expandtabs(2))
print ("-------------------------------------------------------------")
# -
# #### G) Test against training and validation samples
# +
# Load your trained model
model = load_model('your_model_name.h5', custom_objects={'mean_iou': mean_iou})
# Pedict masks for the training data
preds_train = model.predict(X_train[:int(X_train.shape[0]*0.8)], verbose=1)
# Predict masks for the validation data
preds_val = model.predict(X_train[int(X_train.shape[0]*0.8):], verbose=1)
# Threshold out the predictions, turn them into a type that can be shown as an image
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
# -
# #### H) See predicted masks for training samples
# Sanity check on random training samples
ix = random.randint(0, len(preds_train_t))
imshow(X_train[ix])
plt.show()
imshow(np.squeeze(Y_train[ix]))
plt.show()
imshow(np.squeeze(preds_train_t[ix]))
plt.show()
# #### I) See predicted masks for validation data
# +
# Sanity check on random validation samples
# Plot the original image
ix = random.randint(0, len(preds_val_t))
imshow(X_train[int(X_train.shape[0]*0.7):][ix])
plt.show()
# Plot the actual mask
imshow(np.squeeze(Y_train[int(Y_train.shape[0]*0.7):][ix]))
plt.show()
# Plot the predicted mask
imshow(np.squeeze(preds_val_t[ix]))
plt.show()
# -
# #### J) Load model, testing data and check against trained network (if masks exist)
# +
# Load the model for testing, same logic follows for extracting the testing data
model = load_model('your_model_name.h5', custom_objects={'mean_iou': mean_iou})
ABD_PATH = 'path_to_img_data'
MSK_PATH = 'path_to_mask_data'
abd_ids = next(os.walk(ABD_PATH))[2]
msk_ids = next(os.walk(MSK_PATH))[2]
abd_ids.sort()
msk_ids.sort()
abd = np.zeros((len(abd_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
msk = np.zeros((len(msk_ids), IMG_HEIGHT, IMG_WIDTH), dtype=np.uint8)
sys.stdout.flush()
for n, id_ in tqdm(enumerate(abd_ids), total=len(abd_ids)):
path = ABD_PATH + id_
img = imread(path)[:,:,:IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
abd[n] = img
for n, id_ in tqdm(enumerate(msk_ids), total=len(msk_ids)):
path = MSK_PATH + id_
img = imread(path)
if img.ndim == 3:
img = img[:,:,1]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True)
if (np.unique(img).size) > 2:
img = img > 30 # Important, Needed to make labels 0's and 1's only
else:
img = img > 0
img = img.astype(np.uint8)
msk[n] = img
# Actual Predictions
preds_test = model.predict(abd[:int(abd.shape[0])], verbose=1)
# Threshold predictions
preds_test_t = (preds_test > 0.5).astype(np.uint8)
# Overall accuracy on abdomen pictures
answer = acc_comp(msk, preds_test_t);
## Save TP, TN, FP and FN results in a .npy file if you want to analyze further the results
# a = np.reshape(answer[2],(100,1))
# b = np.reshape(answer[3],(100,1))
# c = np.reshape(answer[4],(100,1))
# d = np.reshape(answer[5],(100,1))
# g = np.concatenate([a,b,c,d],axis = 1)
# np.save('your_file_name.npy',g)
# -
# #### K) Visualize results
# This will output ALL the training results, so be careful
for j in range(len(abd_ids)):
print(j)
plt.show()
imshow(abd[j])
plt.show()
imshow(np.squeeze(preds_test_t[j]*255))
plt.show()
imshow(np.squeeze(msk[j]))
# #### J) Load model, testing data and check against trained network (if masks do NOT exist)
# +
model = load_model('model_name.h5', custom_objects={'mean_iou': mean_iou})
ABD_PATH = 'path_to_img_data'
abd_ids = next(os.walk(ABD_PATH))[2]
abd_ids.sort()
abd = np.zeros((len(abd_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
sys.stdout.flush()
for n, id_ in tqdm(enumerate(abd_ids), total=len(abd_ids)):
path = ABD_PATH + id_
img = imread(path)[:,:,:IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
abd[n] = img
# Actual Predictions
preds_test = model.predict(abd[:int(abd.shape[0])], verbose=1)
# Threshold predictions
preds_test_t = (preds_test > 0.5).astype(np.uint8)
# -
# #### K) Visualize results
for j in range(len(abd_ids)):
print(j)
plt.show()
imshow(abd[j])
plt.show()
imshow(np.squeeze(preds_test_t[j]*255))
plt.show()
# #### L) Calculate Metrics for Abdomen Dataset
# +
## Load Data
ABD_PATH = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset8_Abdomen/test/original_images/']
MSK_PATH = ['/Users/lydiazoghbi/Desktop/All_Skin_Datasets/Dataset8_Abdomen/test/skin_masks/']
abd_ids = next(os.walk(ABD_PATH))[2]
msk_ids = next(os.walk(MSK_PATH))[2]
abd_ids.sort()
msk_ids.sort()
## Calculating Predictions
abd = np.zeros((len(abd_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
msk = np.zeros((len(msk_ids), IMG_HEIGHT, IMG_WIDTH), dtype=np.uint8)
sys.stdout.flush()
for n, id_ in tqdm(enumerate(abd_ids), total=len(abd_ids)):
path = ABD_PATH + id_
img = imread(path)[:,:,:IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
abd[n] = img
for n, id_ in tqdm(enumerate(msk_ids), total=len(msk_ids)):
path = MSK_PATH + id_
img = imread(path)
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant',
preserve_range=True)
if (np.unique(img).size) >= 2:
img = img > 30 # Important, Needed to make labels 0's and 1's only
else:
img = img > 0
img = img.astype(np.uint8)
msk[n] = img
# Actual Predictions
preds_test = model.predict(abd[:int(abd.shape[0])], verbose=1)
# Threshold predictions
preds_test_t = (preds_test > 0.5).astype(np.uint8)
# Calculating Metrics
mean_acc = acc_comp(msk, preds_test_t)avg = np.mean(mean_acc)
std = np.std(mean_acc)
print("average " + str(avg) )
print("STD " + str(std) )
| UNET and Features/U-Net.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('/home/gray/Desktop/dataset/Social_Network_Ads.csv')
dataset.head()
X = dataset.iloc[:, [2, 3]]
Y = dataset.iloc[:, [4]]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.25, random_state = 0)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.fit_transform(X_test)
# +
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion='entropy',random_state=0)
classifier.fit(X_train,y_train)
y_pred = classifier.predict(X_test)
# -
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy = (61+29)/np.sum(cm)
accuracy
train_score= classifier.score(X_train,y_train)
test_score= classifier.score(X_test,y_test)
print(train_score)
print(test_score)
| Edureka_MicroCourse/DecissionTree.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
import Pkg
Pkg.activate(".")
using InterpolatedRejectionSampling
using PyPlot
function somefunc(x,y)
return sin(x)*y^2
end
function somedist(n1,n2)
X = range(0, stop=π/2, length=n1)
Y = range(0, stop=2, length=n2)
Z = [somefunc(x,y) for x=X,y=Y]
return X,Y,Z
end
X_highres, Y_highres, Z_highres = somedist(100,100);
# View the 2d distribution
Xgrid = Matrix{Float64}(undef,size(Z_highres))
Ygrid = Matrix{Float64}(undef,size(Z_highres))
for (i,x) in enumerate(X_highres)
for (j,y) in enumerate(Y_highres)
Xgrid[i,j] = x
Ygrid[i,j] = y
end
end
;
plot_surface(Xgrid, Ygrid, Z_highres)
pcolormesh(Xgrid,Ygrid,Z_highres)
ax = gca()
ax.set_title("discretized density")
knots = (range(0, stop=π/2, length=10),
range(0, stop=2, length=9));
vals = [somefunc(x,y) for x=knots[1],y=knots[2]]
n = 100000
xy = irsample(X,Y,n)
hist2D(xy[1,:], xy[2,:],bins=100)
| README.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7 (tensorflow)
# language: python
# name: tensorflow
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pylab as pl
import scipy.optimize as opt
from sklearn import preprocessing
# %matplotlib inline
# ## About the dataset
# We will use a telecommunications dataset for predicting customer churn. This is a historical customer dataset where each row represents one customer. The data is relatively easy to understand, and you may uncover insights you can use immediately. Typically it is less expensive to keep customers than acquire new ones, so the focus of this analysis is to predict the customers who will stay with the company.
#
# This data set provides information to help you predict what behavior will help you to retain customers. You can analyze all relevant customer data and develop focused customer retention programs.
#
# The dataset includes information about:
#
# <li> Customers who left within the last month – the column is called Churn
# <li> Services that each customer has signed up for – phone, multiple lines, internet, online security, online backup, device protection, tech support, and streaming TV and movies
# <li> Customer account information – how long they had been a customer, contract, payment method, paperless billing, monthly charges, and total charges
# <li> Demographic info about customers – gender, age range, and if they have partners and dependents.
#
#
import wget
wget.download('https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/ChurnData.csv')
df = pd.read_csv('ChurnData.csv')
df.head()
df=df[['tenure', 'age', 'address', 'income', 'ed', 'employ', 'equip', 'callcard', 'wireless','churn']]
df['churn']=df['churn'].astype('int')
df.head()
df.shape
X=np.asarray(df[["tenure","age","address","income","ed","employ","equip"]])
X[0:5]
y=np.asarray(df["churn"])
y[0:5]
from sklearn.preprocessing import StandardScaler
X = preprocessing.StandardScaler().fit_transform(X)
X[:5]
# ## Train - Test Data Split
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test=train_test_split(X,y,test_size=0.2,random_state=4)
print ('Train set:', X_train.shape, Y_train.shape)
print ('Test set:', X_test.shape, Y_test.shape)
# ## Modeling (Logistic Regression with Scikit-learn)
#
# Lets build our model using LogisticRegression from Scikit-learn package. This function implements logistic regression and can use different numerical optimizers to find parameters, including ‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’ solvers. You can find extensive information about the pros and cons of these optimizers if you search it in internet.
#
# The version of Logistic Regression in Scikit-learn, support regularization. Regularization is a technique used to solve the overfitting problem in machine learning models. C parameter indicates inverse of regularization strength which must be a positive float. Smaller values specify stronger regularization. Now lets fit our model with train set
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix
LR=LogisticRegression(C=0.1,solver='liblinear')
LR.fit(X_train,Y_train)
yhat=LR.predict(X_test)
yhat
yhat_prob=LR.predict_proba(X_test)
yhat_prob
# +
import seaborn as sns
plt.figure(figsize=(8,6))
ax1 = sns.distplot(df['churn'], hist=False, color="r", label="Actual Value")
sns.distplot(yhat, hist=False, color="b", label="Fitted Values" , ax=ax1)
plt.title('Actual vs Fitted Values for Price')
plt.xlabel('Price (in dollars)')
plt.ylabel('Proportion of Cars')
plt.show()
plt.close()
# -
| Data_Analysis/Logistic_Regression/Logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="N6ZDpd9XzFeN"
# ##### Copyright 2018 The TensorFlow Hub Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + cellView="form" colab={} colab_type="code" id="KUu4vOt5zI9d"
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# + [markdown] colab_type="text" id="ok9PfyoQ2rH_"
# # How to solve a problem on Kaggle with TF-Hub
#
# + [markdown] colab_type="text" id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/hub/tutorials/text_classification_with_tf_hub_on_kaggle"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/text_classification_with_tf_hub_on_kaggle.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/text_classification_with_tf_hub_on_kaggle.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/text_classification_with_tf_hub_on_kaggle.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="556YQZLUO4Ih"
# TF-Hub is a platform to share machine learning expertise packaged in reusable resources, notably pre-trained **modules**. In this tutorial, we will use a TF-Hub text embedding module to train a simple sentiment classifier with a reasonable baseline accuracy. We will then submit the predictions to Kaggle.
#
# For more detailed tutorial on text classification with TF-Hub and further steps for improving the accuracy, take a look at [Text classification with TF-Hub](https://colab.research.google.com/github/tensorflow/hub/blob/master/docs/tutorials/text_classification_with_tf_hub.ipynb).
# + [markdown] colab_type="text" id="Q4DN769E2O_R"
# ## Setup
# + colab={} colab_type="code" id="9KyLct9rq0lo"
# !pip install -q kaggle
# This notebook uses features from tf 2.2
# !pip install tf-nightly
# + colab={} colab_type="code" id="v7hy0bhngTUp"
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import zipfile
from sklearn import model_selection
# + [markdown] colab_type="text" id="JvgBdeMsuu_3"
# Since this tutorial will be using a dataset from Kaggle, it requires [creating an API Token](https://github.com/Kaggle/kaggle-api) for your Kaggle account, and uploading it to the Colab environment.
# + colab={} colab_type="code" id="nI7C-Zc4urOH"
import os
import pathlib
# Upload the API token.
def get_kaggle():
try:
import kaggle
return kaggle
except OSError:
pass
token_file = pathlib.Path("~/.kaggle/kaggle.json").expanduser()
token_file.parent.mkdir(exist_ok=True, parents=True)
try:
from google.colab import files
except ImportError:
raise ValueError("Could not find kaggle token.")
uploaded = files.upload()
token_content = uploaded.get('kaggle.json', None)
if token_content:
token_file.write_bytes(token_content)
token_file.chmod(0o600)
else:
raise ValueError('Need a file named "kaggle.json"')
import kaggle
return kaggle
kaggle = get_kaggle()
# + [markdown] colab_type="text" id="6OPyVxHuiTEE"
# # Getting started
#
# ## Data
# We will try to solve the [Sentiment Analysis on Movie Reviews](https://www.kaggle.com/c/sentiment-analysis-on-movie-reviews/data) task from Kaggle. The dataset consists of syntactic subphrases of the Rotten Tomatoes movie reviews. The task is to label the phrases as **negative** or **positive** on the scale from 1 to 5.
#
# You must [accept the competition rules](https://www.kaggle.com/c/sentiment-analysis-on-movie-reviews/data) before you can use the API to download the data.
#
# + cellView="both" colab={} colab_type="code" id="rKzc-fOGV72G"
SENTIMENT_LABELS = [
"negative", "somewhat negative", "neutral", "somewhat positive", "positive"
]
# Add a column with readable values representing the sentiment.
def add_readable_labels_column(df, sentiment_value_column):
df["SentimentLabel"] = df[sentiment_value_column].replace(
range(5), SENTIMENT_LABELS)
# Download data from Kaggle and create a DataFrame.
def load_data_from_zip(path):
with zipfile.ZipFile(path, "r") as zip_ref:
name = zip_ref.namelist()[0]
with zip_ref.open(name) as zf:
return pd.read_csv(zf, sep="\t", index_col=0)
# The data does not come with a validation set so we'll create one from the
# training set.
def get_data(competition, train_file, test_file, validation_set_ratio=0.1):
data_path = pathlib.Path("data")
kaggle.api.competition_download_files(competition, data_path)
competition_path = (data_path/competition)
competition_path.mkdir(exist_ok=True, parents=True)
competition_zip_path = competition_path.with_suffix(".zip")
with zipfile.ZipFile(competition_zip_path, "r") as zip_ref:
zip_ref.extractall(competition_path)
train_df = load_data_from_zip(competition_path/train_file)
test_df = load_data_from_zip(competition_path/test_file)
# Add a human readable label.
add_readable_labels_column(train_df, "Sentiment")
# We split by sentence ids, because we don't want to have phrases belonging
# to the same sentence in both training and validation set.
train_indices, validation_indices = model_selection.train_test_split(
np.unique(train_df["SentenceId"]),
test_size=validation_set_ratio,
random_state=0)
validation_df = train_df[train_df["SentenceId"].isin(validation_indices)]
train_df = train_df[train_df["SentenceId"].isin(train_indices)]
print("Split the training data into %d training and %d validation examples." %
(len(train_df), len(validation_df)))
return train_df, validation_df, test_df
train_df, validation_df, test_df = get_data(
"sentiment-analysis-on-movie-reviews",
"train.tsv.zip", "test.tsv.zip")
# + [markdown] colab_type="text" id="DFq_EyS1BEyK"
# Note: In this competition the task is not to rate entire reviews, but individual phrases from withion the reviews. This is a much harder task.
# + colab={} colab_type="code" id="42hgsiWNq5y9"
train_df.head(20)
# + [markdown] colab_type="text" id="YPuHgx3BWBOg"
# ## Training an Model
#
# *Note: We could model this task also as a regression, see [Text classification with TF-Hub](https://colab.research.google.com/github/tensorflow/hub/blob/master/docs/tutorials/text_classification_with_tf_hub.ipynb).*
# + colab={} colab_type="code" id="23U30yEkVq4w"
class MyModel(tf.keras.Model):
def __init__(self, hub_url):
super().__init__()
self.hub_url = hub_url
self.embed = hub.load(self.hub_url).signatures['default']
self.sequential = tf.keras.Sequential([
tf.keras.layers.Dense(500),
tf.keras.layers.Dense(100),
tf.keras.layers.Dense(5),
])
def call(self, inputs):
phrases = inputs['Phrase'][:,0]
embedding = 5*self.embed(phrases)['default']
return self.sequential(embedding)
def get_config(self):
return {"hub_url":self.hub_url}
# + colab={} colab_type="code" id="JE--GDMM2tSp"
model = MyModel("https://tfhub.dev/google/nnlm-en-dim128/1")
model.compile(
loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=tf.optimizers.Adam(),
metrics = [tf.keras.metrics.SparseCategoricalAccuracy(name="accuracy")])
# + colab={} colab_type="code" id="SRr-lvhstiNw"
history = model.fit(x=dict(train_df), y=train_df['Sentiment'],
validation_data=(dict(validation_df), validation_df['Sentiment']),
epochs = 25)
# + [markdown] colab_type="text" id="s8j7YTRSe7Pj"
# # Prediction
#
# Run predictions for the validation set and training set.
# + colab={} colab_type="code" id="iGqVNSl87bgN"
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
# + colab={} colab_type="code" id="zbLg5LzGwAfC"
train_eval_result = model.evaluate(dict(train_df), train_df['Sentiment'])
validation_eval_result = model.evaluate(dict(validation_df), validation_df['Sentiment'])
print(f"Training set accuracy: {train_eval_result[1]}")
print(f"Validation set accuracy: {validation_eval_result[1]}")
# + [markdown] colab_type="text" id="DR2IsTF5vuAX"
# ## Confusion matrix
#
# Another very interesting statistic, especially for multiclass problems, is the [confusion matrix](https://en.wikipedia.org/wiki/Confusion_matrix). The confusion matrix allows visualization of the proportion of correctly and incorrectly labelled examples. We can easily see how much our classifier is biased and whether the distribution of labels makes sense. Ideally the largest fraction of predictions should be distributed along the diagonal.
# + colab={} colab_type="code" id="yKUnJFYY8bO_"
predictions = model.predict(dict(validation_df))
predictions = tf.argmax(predictions, axis=-1)
predictions
# + colab={} colab_type="code" id="fjAs8W_Z9BvP"
cm = tf.math.confusion_matrix(validation_df['Sentiment'], predictions)
cm = cm/cm.numpy().sum(axis=1)[:, tf.newaxis]
# + colab={} colab_type="code" id="nT71CtArpsKz"
sns.heatmap(
cm, annot=True,
xticklabels=SENTIMENT_LABELS,
yticklabels=SENTIMENT_LABELS)
plt.xlabel("Predicted")
plt.ylabel("True")
# + [markdown] colab_type="text" id="Pic7o2m04weY"
# We can easily submit the predictions back to Kaggle by pasting the following code to a code cell and executing it:
#
# ``` python
# test_predictions = model.predict(dict(test_df))
# test_predictions = np.argmax(test_predictions, axis=-1)
#
# result_df = test_df.copy()
#
# result_df["Predictions"] = test_predictions
#
# result_df.to_csv(
# "predictions.csv",
# columns=["Predictions"],
# header=["Sentiment"])
# kaggle.api.competition_submit("predictions.csv", "Submitted from Colab",
# "sentiment-analysis-on-movie-reviews")
# ```
#
# + [markdown] colab_type="text" id="50BLu-JX_dlm"
# After submitting, [check the leaderboard](https://www.kaggle.com/c/sentiment-analysis-on-movie-reviews/leaderboard) to see how you did.
| site/en-snapshot/hub/tutorials/text_classification_with_tf_hub_on_kaggle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Poisson Distribution - Errors in Text
#
# > This document is written in *R*.
# >
# > ***GitHub***: https://github.com/czs108
# ## Background
#
# > In a certain long document there is an *average* of **0.5** typographical errors per **100** words of text.
# ## Question A
#
# > What is the *mean* number of words between errors?
# \begin{equation}
# Mean = \frac{1}{0.5} \times 100 = 200
# \end{equation}
# ## Question B
#
# > What is the probability of finding **4** errors in a text of length **500** words?
# \begin{equation}
# \lambda = 0.5 \times \frac{500}{100} = 2.5
# \end{equation}
#
# \begin{equation}
# \begin{split}
# P(X = 4) &= \frac{e^{-\lambda} \cdot {\lambda}^{4}}{4!} \\
# &= \frac{e^{-2.5} \cdot {2.5}^{4}}{4!}
# \end{split}
# \end{equation}
# Use the `dpois` function.
dpois(x=4, lambda=2.5)
# ## Question C
#
# > What is the probability of there being *at least* **300** words before the *1st* error?
# \begin{equation}
# \lambda = 0.5 \times \frac{300}{100} = 1.5
# \end{equation}
#
# \begin{equation}
# \begin{split}
# P(X = 0) &= \frac{e^{-\lambda} \cdot {\lambda}^{0}}{0!} \\
# &= e^{-1.5}
# \end{split}
# \end{equation}
dpois(x=0, lambda=1.5)
# Use the `exp` function.
exp(-1.5)
# ## Question D
#
# > What is the *minimum* number of words in which the probability of finding an error is *at least* **90%**?
# We know that
#
# \begin{equation}
# P(X = 1) \geq 0.9
# \end{equation}
#
# So
#
# \begin{equation}
# P(X = 0) < 0.1
# \end{equation}
#
# Assume $n$ is the number of words.
#
# \begin{equation}
# \lambda = 0.5 \times \frac{n}{100}
# \end{equation}
#
# \begin{equation}
# \begin{split}
# P(X = 0) &= \frac{e^{-\lambda} \cdot {\lambda}^{0}}{0!} \\
# &= e^{-\lambda} \\
# &< 0.1
# \end{split}
# \end{equation}
#
# Then we get
#
# \begin{equation}
# \ln 0.1 = -2.3
# \end{equation}
log(0.1)
# When $-\lambda < -2.3$, $P(X = 0) < 0.1$.
#
# So $\lambda > 2.3$
#
# \begin{equation}
# n = 200 \times \lambda > 460
# \end{equation}
# ## Question E
#
# > How many words would there be on a page, if the probability of **0** errors on a page was **20%**?
# Assume $n$ is the number of words on a page.
# \begin{equation}
# \lambda = 0.5 \times \frac{n}{100}
# \end{equation}
#
# \begin{equation}
# \begin{split}
# P(X = 0) &= \frac{e^{-\lambda} \cdot {\lambda}^{0}}{0!} \\
# &= e^{-\lambda} \\
# &= 0.2
# \end{split}
# \end{equation}
#
# Then we get
#
# \begin{align}
# \ln 0.2 = -\lambda \\
# \lambda = 1.609
# \end{align}
-log(0.2)
# \begin{equation}
# n = 200 \times \lambda = 322
# \end{equation}
# ## Question F
#
# > What is the probability of there being *at least* **2000** words before there are **10** errors?
# \begin{equation}
# \lambda = 0.5 \times \frac{2000}{100} = 10
# \end{equation}
#
# \begin{equation}
# \begin{split}
# P(X \leq 9) &= \sum_{i=0}^{9} P(X = i) \\
# &= \sum_{i=0}^{9} \frac{e^{-10} \cdot {10}^{i}}{i!}
# \end{split}
# \end{equation}
sum(dpois(x=c(0:9), lambda=10))
# Or use the `ppois` function.
ppois(q=9, lambda=10)
| exercises/Poisson Distribution - Errors in Text.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''.venv'': venv)'
# language: python
# name: python3
# ---
import numpy as np
import cv2
import matplotlib.pyplot as plt
# %matplotlib inline
image = cv2.imread('Computer Vision/people.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.imshow(gray, cmap='gray')
faceCascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
len(faces)
i = 1
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
i = i + 1
plt.axis("off")
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.show()
status = cv2.imwrite('faces_detected.jpg', image)
print("[INFO] Image faces_detected.jpg written to filesystem: ", status)
| ML/Week8-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Small overview of HoloViz capability of data exploration
# <style>div.container { width: 100% }</style>
# <img style="float:left; vertical-align:text-bottom;" width="172" src="https://holoviz.org/assets/holoviz-logo-stacked.svg" />
# <div style="float:right; vertical-align:text-bottom;"></div>
# This notebook is intended to present a small overview of HoloViz and the capability for data exploration, with interactive plots (show difference between matplotlib and bokeh). Many parts are based on or copied from the official [HoloViz Tutorial](https://holoviz.org/tutorial/index.html) (highly recommended for a more extensive overview of the possibilities of HoloViz).
#
# Note: In June 2019 the project name changed from [PyViz](https://pyviz.org/) to [HoloViz](https://holoviz.org/). The reason for this is explained in this [blog post](http://blog.pyviz.org/pyviz-holoviz.html).
# ## HoloViz Packages used for this notebook
# <br>
# <table>
# <tr>
# <td><img src="https://raw.githubusercontent.com/pyviz/holoviz/master/examples/assets/hvplot.png" width=90px style="margin: 0px 25%"></td>
# <td><img src="https://raw.githubusercontent.com/pyviz/holoviz/master/examples/assets/bokeh.png" width=100px style="margin: 0px 25%"></td>
# <td><img src="https://raw.githubusercontent.com/pyviz/holoviz/master/examples/assets/holoviews.png" width=130px style="margin: 0px 25%"></td>
# <td><img src="https://raw.githubusercontent.com/pyviz/holoviz/master/examples/assets/geoviews.png" width=130px style="margin: 0px 25%"></td>
# </tr>
# </table>
# ## Exploring Pandas Dataframes
#
# If your data is in a Pandas dataframe, it's natural to explore it using the ``.plot()`` method (based on Matplotlib). Let's have a look at some automatic weather station data from Langenferner:
import pandas as pd
url = 'https://cluster.klima.uni-bremen.de/~oggm/tutorials/aws_data_Langenferner_UTC+2.csv'
df = pd.read_csv(url, index_col=0, parse_dates=True)
df.head()
# Just calling ``.plot()`` won't give anything meaningful, because of the different magnitudes of the parameters:
df.plot();
# Of course we can have a look at one variable only:
df.TEMP.plot();
# This creates a static plot using matplotlib. With this approach we also can make some further explorations, like calculating the monthly mean temperature:
dfm = df.resample('m').mean()
dfm.TEMP.plot();
# We can see the course of the parameter but we can not tell what was the exact temperature at January and we also cannot zoom in.
# ## Exploring Data with hvPlot and Bokeh
# If we are using ```hvplot``` instead we can create interactive plots with the same plotting API:
# you might need to install first [hvplot](https://hvplot.holoviz.org/getting_started/index.html) via e.g. conda install -c pyviz hvplot
# +
import hvplot.pandas
df.TEMP.hvplot()
# -
# Now you have an interactive plot using bokeh with zooming option and hover with additional information (get the exact values and timestamps), also possible for all variables but again not very meaningful:
plot = df.hvplot()
plot
# But at least you can use your mouse to hover over each variable and explore their values. Furthermore, by clicking on the legend the colors can be switched on/off. Still, different magnitudes make it hard to see all parameters at once.
#
# Here the interactive features are provided by the [Bokeh](http://bokeh.pydata.org) JavaScript-based plotting library. But what's actually returned by this call is a overlay of something called a [HoloViews](http://holoviews.org) object, here specifically a HoloViews [Curve](http://holoviews.org/reference/elements/bokeh/Curve.html). HoloViews objects *display* as a Bokeh plot, but they are actually much richer objects that make it easy to capture your understanding as you explore the data.
print(plot)
# This object can be converted to a `HoloMap` object (using the HoloViews Package and declare bokeh to use for plotting) to create a widget that can be used to select the variables from.
import holoviews as hv
hv.extension('bokeh')
holo_plot = hv.HoloMap(plot)
print(holo_plot)
holo_plot.opts(width=700, height=500)
# But first have a look at the HoloViews Objects.
# ## HoloViews Objects
# Creating a simple HoloViews Object:
import numpy as np
# +
xs = np.arange(-10, 10.5, 0.5)
ys = 100 - xs**2
df_xy = pd.DataFrame(dict(x=xs, y=ys))
simple_curve = hv.Curve(df_xy, 'x', 'y')
print(simple_curve)
# -
# ``:Curve [x] (y)`` is HoloViews's shorthand for saying that the data in ``df_xy`` is a set of samples from a continuous function ``y`` of one independent variable ``x``, and ``simple_curve`` simply pairs your dataframe ``df_xy`` with this semantic declaration.
#
# Once we've captured this crucial bit of metadata, HoloViews now knows enough about this object to represent it graphically, as it will do by default in a Jupyter notebook:
simple_curve
# This Bokeh plot is much more convenient to examine than a column of numbers, because it conveys the entire set of data in a compact, easily appreciated, interactively explorable format. HoloViews knew that a continuous curve like this is the right representation for what would otherwise be just a table of numbers, because we explicitly declared the element type as ``hv.Curve``. Crucially, ``simple_curve`` itself is not a plot, it's just a simple wrapper around your data that happens to have a convenient graphical representation. The full dataframe will always be available as ``simple_curve.data``, for any numerical computations you would like to do:
simple_curve.data.tail()
# As you can see, with HoloViews you don't have to select between plotting your data and working with it numerically. Any HoloViews object will let you do *both* conveniently; you can simply choose whatever representation is the most appropriate way to approach the task you are doing. This approach is very different from a traditional plotting program, where the objects you create (e.g. a Matplotlib figure or a native Bokeh plot) are a dead end from an analysis perspective, useful only for plotting.
# ### HoloViews Elements
#
# Holoview objects merge the visualization with the data. For an Holoview object you have to classify what the data is showing. A Holoview object could be initialised in several ways:
#
# ```
# hv.Element(data, kdims=None, vdims=None, **kwargs)
# ```
#
# This standard signature consists of the same five types of information:
#
# - **``Element``**: any of the dozens of element types shown in the [reference gallery](http://holoviews.org/reference/index.html).
# - **``data``**: your data in one of a number of formats described below, such as tabular dataframes or multidimensional gridded Xarray or Numpy arrays.
# - **``kdims``**: "key dimension(s)", also called independent variables or index dimensions in other contexts---the values for which your data was measured.
# - **``vdims``**: "value dimension(s)", also called dependent variables or measurements---what was measured or recorded for each value of the key dimensions.
# - **``kwargs``**: optional keyword arguments specific to that ``Element`` type (rarely needed).
#
# Elements could be for example ```Curve```, ``` Scatter```, ```Area``` and also different ways of declaring the key dimension(s) and value dimension(s) are shown below:
(hv.Curve(df_xy, kdims=('x','x_label'), vdims=('y','y_label')) +
hv.Scatter((xs,ys)).redim.label(x='x_label', y='ylabel') +
hv.Area({'x':xs,'y':ys}))
# The example also shows two ways of labeling the variables, one is directly by the initialisation with tuples ```('x','x_label')``` and ```('y','y_label')``` and a other option is to use ```.redim.label()```.
#
# The example above also shows the simple syntax to create a layout of different Holoview Objects by using `+`. With `*` you can simply overlay the objects in one plot:
# +
from holoviews import opts
(hv.Curve(df_xy, 'x', 'y') *
hv.VLine(5).opts(color='black') *
hv.HLine(75).opts(color='red'))
# -
# With ```.opts()``` you can change some characteristics of the Holoview Objects and you can use the `[tab]` key completion to see, what options are available or you can use the ```hv.help()``` function to get more information about some `Elements`.
# +
# hv.help(hv.Curve)
# -
# So now we can use some Holoview object for the data exploration for the glacier data. We create a `Layout` with some subplots for the different parameters. With ```opts.defaults()``` we can change some default properties of the different HoloView Elements, here we activate the ```hover``` tool for all ```Curve``` elements. Try to zoom into one plot!
# +
opts.defaults(opts.Curve(tools=['hover']))
(hv.Curve(df, 'index', 'TEMP') +
hv.Curve(df,'index','RH') +
hv.Curve(df,'index','SWIN').opts(color='darkorange') * hv.Curve(df,'index','SWOUT').opts(color='red') +
hv.Curve(df,'index','LWIN').opts(color='darkorange') * hv.Curve(df,'index','LWOUT').opts(color='red') +
hv.Curve(df,'index','WINDSPEED') +
hv.Curve(df,'index','WINDDIR')).cols(3).opts(opts.Curve(width=300, height=200))
# -
# So here we created a ```Curve``` Element for some Parameters and put them together in subplots by using `+` and overlay some in one subplot with `*`. With ```.opts()``` I define the color of some parameters and set the ```width``` and ```height``` propertie for the used ```Curve``` Elements and with ```.cols()``` I define the number of columns.
#
# Now we can zoom in and use a hover for data exploration and because all Holoview Objects using the same dataframe and the same key variable the x-axes of all plots are linked. So when you zoom in in one plot all the other plots are zoomed in as well.
#
# ### HoloView Dataset and HoloMap Objects
#
# A HoloViews `Dataset` is similar to a HoloViews Element, without having any specific metadata that lets it visualize itself. A Dataset is useful for specifying a set of `Dimension`s that apply to the data, which will later be inherited by any visualizable elements that you create from the Dataset.
#
# A HoloViews Dimension is the same concept as a **dependent** or **independent** variable in mathematics. In HoloViews such variables are called value dimensions and key dimensions (respectively). So lets take again our glacier pandas DataFrame and create a HoloView `Dataset`. Beforehand we define some new columns for the date. Then we create our HoloView DataFrame with the key variables (independent) ``month``, ``year`` and ``day_hour``. The remaining columns will automatically be inferred to be value (dependent) dimensions:
# +
df['month'] = df.index.month
df['year'] = df.index.year
df['day_hour'] = df.index.day + df.index.hour/24
df['timestamp'] = df.index.strftime('%d.%m.%Y %H:%M')
df_month = hv.Dataset(df, ['month', 'year', 'day_hour'])
df_month = df_month.redim.label(day_hour='day of month')
df_month
# -
# Out of this Dataset we now can create a `Holomap` with ``.to``. The ``.to`` method of a Dataset takes up to four main arguments:
#
# 1. The element you want to convert to
# 2. The key dimensions (i.e., independent variables) to display
# 3. The dependent variables to display, if any
# 4. The dimensions to group by, if nothing given the remaining key variables are used
slider = df_month.to(hv.Curve, ['day_hour'], ['TEMP', 'RH', 'SWIN', 'timestamp'])
slider = slider.opts(width=600, height=400, tools=['hover'])
print(slider)
# We now created a ``HoloMap`` with to grouped variables ``[month, year]``, one key variable ``[day_hour]`` and five dependent variables ``(TEMP, RH, SWIN, WINDSPEED, timestamp)``. Now look at the visualisation (some months/year pairs are missing and cannot be visualized):
slider
# We see that a widget was created where we can choose the ``'month'`` and the ``'year'`` (the two grouped variables). The plot is showing the `'day_hour'` (key) variable against the first dependent variable ``'TEMP'``. The other dependent variables are not shown but their values are displayed in the hover.
#
# For a better comparison we also can look at grouped variables at once when we use ``.overlay()``:
overlay = df_month.to(hv.Curve, ['day_hour'], ['TEMP', 'RH', 'SWIN','WINDSPEED','timestamp']).overlay()
overlay = overlay.opts(width=800, height=500, tools=['hover'])
overlay.opts(opts.NdOverlay(legend_muted=True, legend_position='left'))
print(overlay)
# Here we are creating an ``NdOverlay`` Object which is similar to a ``HoloMap``, but has a different visualisation:
overlay
# Here now no widget is created, instead there is a interactive legend where we can turn the color *on* by clicking in the legend on it. So we can compare the months with each other (for example the same month in different years).
#
# It is also easy to look at some mean values, for example looking at mean diurnal values for each month and year you can use ```.aggregate```, which combine the values after the given function:
df['hour'] = df.index.hour
df_mean = hv.Dataset(df, ['month', 'year', 'hour']).aggregate(function=np.mean)
df_mean = df_mean.redim.label(hour='hour of the day')
print(df_mean)
# ```.aggregate()``` uses the key variables and looks where all of them are the same. It uses the provided function (in the case above ```np.mean```) to calculate new values. So in the above case we calculate mean daily cycles for each month and year. The calculated ``Dataset`` then can be displayed as we have seen it above.
slider = df_mean.to(hv.Curve, ['hour'], ['TEMP', 'RH', 'SWIN']).opts(width=600, height=400, tools=['hover'])
print(slider)
slider
overlay = df_mean.to(hv.Curve, ['hour'], ['TEMP', 'RH', 'SWIN']).opts(width=600, height=400, tools=['hover']).overlay()
overlay.opts( opts.NdOverlay(legend_muted=True, legend_position='left'))
print(overlay)
overlay
# ## Using GeoView for displaying geographical data
# As a small example for using geoview I want to show how to display a shapefiles of glaciers in an interactive plot.
import geoviews as gv
import geopandas as gpd
# you might have to install [geoviews](https://geoviews.org/index.html#installation) for that!
# ### Tile sources
#
# Tile sources are very convenient ways to provide geographic context for a plot and they will be familiar from the popular mapping services like Google Maps and Openstreetmap. The ``WMTS`` element provides an easy way to include such a tile source in your visualization simply by passing it a valid URL template. GeoViews provides a number of useful tile sources in the ``gv.tile_sources`` module:
# +
import geoviews.tile_sources as gts
layout = gv.Layout([ts.relabel(name) for name, ts in gts.tile_sources.items()])
layout.opts('WMTS', xaxis=None, yaxis=None, width=225, height=225).cols(4)
# -
# To read the shape file geopandas could be used:
from oggm import utils
europe_glacier = gpd.read_file(utils.get_rgi_region_file('11', version='61'))
hintereisferner = europe_glacier[europe_glacier.Name == 'Hintereisferner'].geometry.iloc[0]
# Then create a GeoViews Object with a GeoViews Element Shape, display it and put a ``gv.tile_sources`` in the background.
# +
# hv.help(gv.Shape)
# -
(gv.Shape(hintereisferner).opts(fill_color=None) *
gts.tile_sources['EsriImagery']).opts(width=800, height=500)
# The GeoViews Object and Element is similar to HoloViews Objects and Elements for geographical data.
print(gv.Shape(hintereisferner))
# And so similar a visualisation is stored for each GeoView Element, which can be used like an HoloView Object. So as a last example you also can plot all European glaciers in one interactive plot by using an Polygons Element of GeoViews:
(gv.Polygons(europe_glacier.geometry) *
gts.tile_sources['StamenTerrain']).opts(width=800, height=500)
# So this only was a very small look at the capability of HoloViz for data exploration and visualisation. There are much more you can do with HoloViz, but I think it is a package you should have a look at, because with only a few lines of code you can create an interactive plot which allow you to have an quick but also deep look at your data. I really recommend to visit the official [HoloViz Tutorial](https://holoviz.org/tutorial/index.html) and start using HoloViz :)
# ## What's next?
#
# - return to the [OGGM documentation](https://docs.oggm.org)
# - back to the [table of contents](welcome.ipynb)
| notebooks/holoviz_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# <!--docstring-->
# Here is my docstring.
# +
def hello():
print('Hello, world.')
class TestClass:
def __call__(self):
print(f'Hello from TestClass.')
# -
hello()
test_instance = TestClass()
test_instance()
| tests/nb1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Type of examples for which ner blinding predict the wrong relation vs the baseline which predicts the correct relations
# %load_ext autoreload
# %autoreload
import os
from sys import path
import re
import pandas as pd
path.append('../../..')
import numpy as np
from relation_extraction.data.converters.converter_i2b2 import relation_dict
output_path = '/scratch/geeticka/relation-extraction-result/i2b2-analyze/'
def res(path): return os.path.join(output_path, path)
original_sentences_path = os.path.join('/crimea/geeticka/data/relation_extraction/i2b2/pre-processed/original/test_original.txt')
ner_blinding_sentences_path = os.path.join('/crimea/geeticka/data/relation_extraction/i2b2/pre-processed/ner_blinding/test_ner_blinding.txt')
relation_dict
def read_answers_line(line):
linenum, relation = line.strip().split()
return linenum, relation
def asstring(list_of_strings):
return " ".join(list_of_strings)
def read_sentence_and_entities(line):
line = line.strip().split()
sentence = line[5:]
relation = relation_dict[int(line[0])]
entity1_idx = (int(line[1]), int(line[2]))
entity2_idx = (int(line[3]), int(line[4]))
entity1 = sentence[entity1_idx[0] : entity1_idx[1] + 1]
entity2 = sentence[entity2_idx[0] : entity2_idx[1] + 1]
return relation, asstring(entity1), asstring(entity2), asstring(sentence)
needed_linenum_and_relation = {}
with open(res('answers_for_dev-ner-blinding.txt')) as textfile1, open(res("answers_for_dev-baseline.txt")) as textfile2, \
open(res('answers_for_dev_gold-baseline.txt')) as textfile3:
for x, y, z in zip(textfile1, textfile2, textfile3):
linenum, ner_blinding_relation = read_answers_line(x)
_, baseline_relation = read_answers_line(y)
_, gold_relation = read_answers_line(z)
if baseline_relation == gold_relation and ner_blinding_relation != gold_relation:
needed_linenum_and_relation[int(linenum) - 1] = (baseline_relation, ner_blinding_relation)
len(list(needed_linenum_and_relation.keys()))
len(list(needed_linenum_and_relation.keys()))/len(open(res('answers_for_dev_gold-baseline.txt')).readlines()) * 100
# Note that we list baseline first and then the ner blinded version but the prediction of the baseline version is correct (gold) whereas the ner blinded is incorrect.
entities_blinded_per_sentence = []
trip = 0; trwp = 0; trcp = 0; trap = 0; trnap = 0; terp = 0; tecp = 0; pip = 0; none = 0
print('We print the baseline first and then the ner blinded version. Gold relation corresponds to baseline\n\n')
curr_linenum = 0
with open(original_sentences_path) as original_sentences, open(ner_blinding_sentences_path) as ner_blinding_sentences:
for x, y in zip(original_sentences, ner_blinding_sentences):
needed_linenums = list(needed_linenum_and_relation.keys())
if curr_linenum in needed_linenums:
_, e1_b, e2_b, s_b = read_sentence_and_entities(x.strip())
_, e1_c, e2_c, s_c = read_sentence_and_entities(y.strip())
r_b, r_c = needed_linenum_and_relation[curr_linenum]
entities_blinded_per_sentence.append(s_c.split().count('ENTITY'))
if r_b == 'TrIP': trip += 1
elif r_b == 'TrWP': trwp += 1
elif r_b == 'TrCP': trcp += 1
elif r_b == 'TrAP': trap += 1
elif r_b == 'TrNAP': trnap += 1
elif r_b == 'TeRP': terp += 1
elif r_b == 'TeCP': tecp += 1
elif r_b == 'PIP': pip += 1
elif r_b == 'None': none += 1
print('Predicted Relation: \t {0}, {1} \nEntities: \t {2}, {3} \t {4}, {5} \nSentences: \n\t{6} \n\t {7}'.format(
r_b, r_c, e1_b, e1_c, e2_b, e2_c, s_b, s_c))
print('\n')
curr_linenum += 1
print(trip, trwp, trcp, trap, trnap, terp, tecp, pip, none)
trip + trwp + trcp + trap + trnap + terp + tecp + pip + none
(np.mean(entities_blinded_per_sentence), np.max(entities_blinded_per_sentence), np.min(entities_blinded_per_sentence),
np.std(entities_blinded_per_sentence), np.median(entities_blinded_per_sentence))
num_sentences_where_entities_blinded = 0 # count the number of sentences where the numbers were blinded
for numbers in entities_blinded_per_sentence:
if numbers > 0:
num_sentences_where_entities_blinded += 1
num_sentences_where_entities_blinded
| notebooks/error-analysis/Preprocessing-related/i2b2-Test-Error-Analysis-NER-blinding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''base'': conda)'
# language: python
# name: python385jvsc74a57bd07a63a94b8309dd2cb3940066a55196dec144c901a91c6bbe711da2630594ed82
# ---
# ### *NOTE - Please refer to project report pdf & queries sql file for more details*
# # LIBRARIES
import json
import csv
import pandas as pd
import datetime
import sqlite3
# ### Parameters:
dateTimeObj = datetime.datetime.now()
timestampStr = dateTimeObj.strftime("%Y-%m-%d %H:%M:%S")
db = "bungalow.db"
print('Current Timestamp: ', timestampStr)
print('Database: ',db)
#
#---------------------------------#
rent_data_path = '/Users/naveenm/Downloads/takehome-data-eng-main/sample-data/zillow-rental-data.json'
#---------------------------------#
sale_data_path = '/Users/naveenm/Downloads/takehome-data-eng-main/sample-data/mls-sale-data.csv'
#
# # Stage 1 - Extract, Transform & Load
# ## Load from source Zillow File (Rent data):
rent_df = pd.read_json(rent_data_path)
rent_df.shape
# ### Expand data column in JSON source file to read useful info:
df2 = pd.json_normalize(rent_df['data'])
fields = ["home_status","zillow_id","home_type","year_built"]
df2 = df2[fields]
rent_df = rent_df.merge(df2,left_index=True,right_index=True)
rent_df.shape
# # Load from source MLS File (Sale data):
sale_df = pd.read_csv(sale_data_path)
sale_df.shape
# +
### Rename column names for convenience and replace special characters
# -
rent_df.columns = rent_df.columns.str.replace(' ','_').str.lower()
#---------------------------------#
sale_df = sale_df.rename(columns={'URL (SEE http://www.redfin.com/buy-a-home/comparative-market-analysis FOR INFO ON PRICING)': 'URL','MLS#':'MLS_ID'})
sale_df.columns = sale_df.columns.str.replace(' ','_').str.lower()
# ### Remove duplicate records, retain the last occurrence
sale_df.info()
rent_df = rent_df.drop_duplicates(subset = ['id','url'], keep='last')
#---------------------------------#
sale_df = sale_df.drop_duplicates(subset = ['url','mls_id'], keep='last')
# ### Modify a few columns on both rent & sale datasets for consistency across the Database
rent_df['region'].replace({'Alabama':'AL','Alaska':'AK','Arizona':'AZ',
'Arkansas':'AR','California':'CA','Colorado':'CO','Connecticut':'CT',
'Delaware':'DE','District of Columbia':'DC','Florida':'FL','Georgia':'GA',
'Hawaii':'HI','Idaho':'ID','Illinois':'IL','Indiana':'IN',
'Iowa':'IA','Kansas':'KS','Kentucky':'KY','Louisiana':'LA',
'Maine':'ME','Maryland':'MD','Massachusetts':'MA','Michigan':'MI',
'Minnesota':'MN','Mississippi':'MS','Missouri':'MO','Montana':'MT',
'Nebraska':'NE','Nevada':'NV','New Hampshire':'NH','New Jersey':'NJ',
'New Mexico':'NM','New York':'NY','North Carolina':'NC','North Dakota':'ND',
'Ohio':'OH','Oklahoma':'OK','Oregon':'OR','Pennsylvania':'PA',
'Rhode Island':'RI','South Carolina':'SC','South Dakota':'SD','Tennessee':'TN',
'Texas':'TX','Utah':'UT','Vermont':'VT','Virginia':'VA','Washington':'WA','West Virginia':'WV',
'Wisconsin':'WI','Wyoming':'WY'}, inplace=True)
rent_df['market'] = rent_df.market.str.replace('-', '')
rent_df['home_status'] = rent_df.home_status.str.replace('FOR_RENT','ForRent')
rent_df['city'] = rent_df['city'].str.lower()
rent_df['data'] = rent_df['data'].astype('str')
#---------------------------------#
sale_df['state_or_province'].replace({'Alabama':'AL','Alaska':'AK','Arizona':'AZ',
'Arkansas':'AR','California':'CA','Colorado':'CO','Connecticut':'CT',
'Delaware':'DE','District of Columbia':'DC','Florida':'FL','Georgia':'GA',
'Hawaii':'HI','Idaho':'ID','Illinois':'IL','Indiana':'IN',
'Iowa':'IA','Kansas':'KS','Kentucky':'KY','Louisiana':'LA',
'Maine':'ME','Maryland':'MD','Massachusetts':'MA','Michigan':'MI',
'Minnesota':'MN','Mississippi':'MS','Missouri':'MO','Montana':'MT',
'Nebraska':'NE','Nevada':'NV','New Hampshire':'NH','New Jersey':'NJ',
'New Mexico':'NM','New York':'NY','North Carolina':'NC','North Dakota':'ND',
'Ohio':'OH','Oklahoma':'OK','Oregon':'OR','Pennsylvania':'PA',
'Rhode Island':'RI','South Carolina':'SC',
'South Dakota':'SD','Tennessee':'TN','Texas':'TX','Utah':'UT',
'Vermont':'VT','Virginia':'VA','Washington':'WA','West Virginia':'WV',
'Wisconsin':'WI','Wyoming':'WY'}, inplace=True)
sale_df['market'] = sale_df.market.str.replace('-', '')
sale_df['city'] = sale_df['city'].str.lower()
# ### Adding Load timestamp before writing to DB:
rent_df["load_ts"] = timestampStr
#---------------------------------#
sale_df["load_ts"] = timestampStr
# ### Create DB Connection:
conn = sqlite3.connect(db)
print(conn)
# # Load to DB:
# #### Note - if_exists to be changed to 'append' for incremental load.
# #### Master Tables have source data with Minor transformations
sale_df.to_sql('sale_df_master', conn, if_exists='replace', index=False)
#---------------------------------#
rent_df.to_sql('rent_df_master', conn, if_exists='replace', index=False)
# ### validation: read counts from DB & check a few rows
pd.read_sql('select count(*) from sale_df_master', conn)
pd.read_sql('select count(*) from rent_df_master', conn)
pd.read_sql('select * from sale_df_master limit 5', conn)
pd.read_sql('select * from rent_df_master limit 5', conn)
# # Stage 2 - Aggregate Source data from Database for insights enablement
# ## Create new Table with Aggregated data for data science/Data anaysis teams:
#
# Merge Data from Rent & Sales datasets:
#
merge_query = """SELECT * FROM (
SELECT mls_id as id,
'ForSale' as listing_type,
source,
status as home_status,
market as market,
property_type as home_type,
address,
city,
zip_or_postal_code as postal,
state_or_province as region,
price,
square_feet as sqft,
beds,
baths,
year_built
FROM sale_df_master
UNION ALL
SELECT id,
home_status as listing_type,
source,
'Active' as home_status,
market,
home_type,
address,
city,
postal,
region,
price,
sqft,
beds,
baths,
year_built
FROM rent_df_master
)"""
real_estate_df = pd.read_sql_query(merge_query, conn)
# ### Missing Data in the aggregated Data:
# +
def missing_cols(df):
'''prints out columns with the # of missing values'''
total = 0
for col in df.columns:
missing_vals = df[col].isnull().sum()
total += missing_vals
if missing_vals != 0:
print(f"{col} => {df[col].isnull().sum()}")
if total == 0:
print("no missing values left")
else:
print("\nTotal Missing values:%d"%total)
# -
missing_cols(real_estate_df)
# ### Duplicate Check in agg data before loading to the final DB Table:
x = real_estate_df.duplicated().any()
print(x)
real_estate_df_dup = real_estate_df[real_estate_df.duplicated()]
real_estate_df_dup
# ### Adding timestamp before loading to DB:
dateTimeObj = datetime.datetime.now()
timestampStr = dateTimeObj.strftime("%Y-%m-%d %H:%M:%S")
real_estate_df["load_ts"] = timestampStr
# ### Load Final Agg data to DB:
real_estate_df.to_sql('real_estate_gold', conn, if_exists='replace', index=False)
pd.read_sql('select count(*) from real_estate_gold', conn)
# ### Commit DB & close Connection:
conn.commit()
conn.close()
# ### **NOTE - Please refer to project report pdf & queries sql file for more details **
| preprocess_and_load.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Title
# Description text here...
# %gui qt # we need to start the Qt event loop before importing napari
# + tags=["remove-cell"]
# This cell is required for the automated continuous integration
# It allows the async Qt magic in the previous cell time to load
import time
time.sleep(1)
# +
import napari
viewer = napari.Viewer()
# + tags=["hide-input"]
from napari.utils import nbscreenshot
nbscreenshot(viewer)
# + tags=["remove-cell"]
viewer.close()
| template-page.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="https://speakingofresearch.files.wordpress.com/2018/04/mouse-cv.jpg" width="300">
#
#
# # Use case: Find a drug candidate for further testing.
#
# ## Goal: Find a drug cost effectively.
#
# ## Imperative: We want to find a drug that works but we don't want to try too many -- it's expensive!
#
# ## Problem: We can't tell what is a drug candidate by just looking at it.
#
# ### So we don't use images, we use 0 for miss and 1 for hit.
#
# # Also: if something is "not known to be a drug candidate" it could be because it hasn't been tested yet.
#
# ### So we can't really trust the "misses" -- 0.
#
import styling as sty
sty.cls("01010100000010010001001")
sty.cls("10101000000100100010001")
# ## Those classifications are probably about the same -- the leading 0 might even be a "unknown" drug candidate not in the gold standard.
#
# ## But this one is definitely bad: You have to do too many experiments to cover many of the hits:
sty.cls("00000111111100000000000")
# # *To recap: You have to know what the goal is before you can say whether an ordering is "good" or "bad"*
| notebooks/presentation/9_antiviral.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# Creation of list(declaration) using list constructor
fruits = []
# Creation of list(initialisaiton) using list constructor
fruits = ['apple', 'mango', 'pinnapple']
fruits
# +
# Mutable
# add, delete, modify items are allowed
# -
# 
# ## Add
# [//]: # ([add-items-in-list [append(item) adds 1 item in last] [extend(list) adds 1+ items in last] [insert(item) adds 1 item at given index] [+operator adds 2+ lists]])
#
# 
# add items
fruits = ['apple', 'mango', 'pinnapple']
# Single item is added (use of append)
fruits.append("banana");
fruits
# add items
fruits = ['apple', 'mango', 'pinnapple']
# Multiple items are added (use of extend)
fruits.extend(["fruit1", "fruit2"])
fruits
# add item
fruits = ['apple', 'mango', 'pinnapple']
# Add element at given "index" (use of insert)
fruits.insert(1, "fruit")
fruits
# Given :
# ``` python
# fruits = ['apple', 'mango', 'pinnapple']
# ```
# * Q1. add element first
# * Q2. add element last
# add element at index 0, at first
fruits = ['apple', 'mango', 'pinnapple']
fruits.insert(0, "fruit1")
fruits
# add element at at last
fruits = ['apple', 'mango', 'pinnapple']
fruits.append("fruit1")
fruits
# Concatenatio of two or more lists
l1 = [1, 3, 5, 7, 9, 11]
l2 = [2, 4, 6, 8, 10, 12]
l1+l2
# ## Delete
# 
# * [delete [by-index[del-keyword[1-item indexing][2-items slicing]] ][by-value [remove-function [1-item]]]]
# delete items (by index) (use del keyword)
fruits = ['apple', 'mango', 'pinnapple']
# delete mango
del fruits[1]
fruits
# delete items (by name) (use remove keyword)
fruits = ['apple', 'mango', 'pinnapple']
# delete mango
fruits.remove('mango')
fruits
# you can delete multiple items (by index) in continuos index (use of slicing )
f = ['apple', 'mango', 'pinnapple', 'banana', 'orange']
del f[0:2]
# you can delete multiple items (by index) in non-continuos index (use of slicing )
f = ['apple', 'mango', 'pinnapple', 'banana', 'orange']
# delete apple pinaaple orange
del f[0]
del f[1]
del f[2]
f
# delete last element
fruits = ['apple', 'mango', 'pinnapple', 'banana', 'orange']
fruits.pop()
fruits
# delete first element
f = ['apple', 'mango', 'pinnapple', 'banana', 'orange']
del f[0]
# +
# modify
# -
f = []
for i in dir(f):
if(not(i.startswith("__"))):
print(i, end = ", ")
| 08. Python Lists/10. Summary Lesson(Exercise).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="05f23de0-001" colab_type="text"
# #1. Install Dependencies
# First install the libraries needed to execute recipes, this only needs to be done once, then click play.
#
# + id="05f23de0-002" colab_type="code"
# !pip install git+https://github.com/google/starthinker
# + [markdown] id="05f23de0-003" colab_type="text"
# #2. Get Cloud Project ID
# To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
#
# + id="05f23de0-004" colab_type="code"
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
# + [markdown] id="05f23de0-005" colab_type="text"
# #3. Get Client Credentials
# To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
#
# + id="05f23de0-006" colab_type="code"
CLIENT_CREDENTIALS = 'PASTE CLIENT CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
# + [markdown] id="05f23de0-007" colab_type="text"
# #4. Enter CM360 Bulkdozer Editor Parameters
# Bulkdozer is a tool that can reduce trafficking time in Campaign Manager by up to 80%% by providing automated bulk editing capabilities.
# 1. Open the <a href='https://docs.google.com/spreadsheets/d/1EjprWTDLWOvkV7znA0P4uciz0_E5_TNn3N3f8J4jTwA/edit?usp=sharing&resourcekey=<KEY>' target='_blank'>Bulkdozer 0.28</a> feed.
# 1. Make your own copy of the feed by clicking the File -> Make a copy... menu in the feed.
# 1. Give it a meaninful name including the version, your name, and team to help you identify it and ensure you are using the correct version.
# 1. Under the Account ID field below, enter the your Campaign Manager Network ID.
# 1. Under Sheet URL, enter the URL of your copy of the feed that you just created in the steps above.
# 1. Go to the Store tab of your new feed, and enter your profile ID in the profileId field (cell B2). Your profile ID is visible in Campaign Manager by clicking your avatar on the top right corner.
# 1. Click the 'Save' button below.
# 1. After clicking 'Save', copy this page's URL from your browser address bar, and paste it in the Store tab for the recipe_url field (cell B5) your sheet.
# 1. Bulkdozer is ready for use
# 1. Stay up to date on new releases and other general anouncements by joining <a href='https://groups.google.com/forum/#!forum/bulkdozer-announcements' target='_blank'>Bulkdozer announcements</a>.
# 1. Review the <a href='https://github.com/google/starthinker/blob/master/tutorials/Bulkdozer/Installation_and_User_guides.md' target='_blank'>Bulkdozer documentation</a>.
# Modify the values below for your use case, can be done multiple times, then click play.
#
# + id="05f23de0-008" colab_type="code"
FIELDS = {
'recipe_timezone': 'America/Chicago', # Timezone for report dates.
'account_id': None, # Campaign Manager Network ID (optional if profile id provided)
'dcm_profile_id': None, # Campaign Manager Profile ID (optional if account id provided)
'sheet_url': '', # Feed Sheet URL
}
print("Parameters Set To: %s" % FIELDS)
# + [markdown] id="05f23de0-009" colab_type="text"
# #5. Execute CM360 Bulkdozer Editor
# This does NOT need to be modified unless you are changing the recipe, click play.
#
# + id="05f23de0-010" colab_type="code"
from starthinker.util.configuration import Configuration
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'traffic': {
'hour': [
],
'account_id': {'field': {'name': 'account_id','kind': 'string','order': 1,'description': 'Campaign Manager Network ID (optional if profile id provided)','default': None}},
'dcm_profile_id': {'field': {'name': 'dcm_profile_id','kind': 'string','order': 1,'description': 'Campaign Manager Profile ID (optional if account id provided)','default': None}},
'auth': 'user',
'sheet_url': {'field': {'name': 'sheet_url','kind': 'string','order': 2,'description': 'Feed Sheet URL','default': ''}},
'timezone': {'field': {'name': 'recipe_timezone','kind': 'timezone','description': 'Timezone for report dates.','default': 'America/Chicago'}}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(Configuration(project=CLOUD_PROJECT, client=CLIENT_CREDENTIALS, user=USER_CREDENTIALS, verbose=True), TASKS, force=True)
| colabs/bulkdozer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Plotting polygons with Folium
# This example demonstrates how to plot polygons on a Folium map.
import geopandas as gpd
import folium
import matplotlib.pyplot as plt
# ## Load geometries
# This example uses the nybb dataset, which contains polygons of New York boroughs.
path = gpd.datasets.get_path('nybb')
df = gpd.read_file(path)
df.head()
# Plot from the original dataset
# + tags=["nbsphinx-thumbnail"]
df.plot(figsize=(6, 6))
plt.show()
# -
# Notice that the values of the polygon geometries do not directly represent the values of latitude of longitude in a geographic coordinate system.
# To view the coordinate reference system of the geometry column, access the `crs` attribute:
df.crs
# The [epsg:2263](https://epsg.io/2263) crs is a projected coordinate reference system with linear units (ft in this case).
# As folium (i.e. leaflet.js) by default accepts values of latitude and longitude (angular units) as input, we need to project the geometry to a geographic coordinate system first.
# Use WGS 84 (epsg:4326) as the geographic coordinate system
df = df.to_crs(epsg=4326)
print(df.crs)
df.head()
df.plot(figsize=(6, 6))
plt.show()
# ## Create Folium map
m = folium.Map(location=[40.70, -73.94], zoom_start=10, tiles='CartoDB positron')
m
# ### Add polygons to map
# Overlay the boundaries of boroughs on map with borough name as popup:
for _, r in df.iterrows():
# Without simplifying the representation of each borough,
# the map might not be displayed
sim_geo = gpd.GeoSeries(r['geometry']).simplify(tolerance=0.001)
geo_j = sim_geo.to_json()
geo_j = folium.GeoJson(data=geo_j,
style_function=lambda x: {'fillColor': 'orange'})
folium.Popup(r['BoroName']).add_to(geo_j)
geo_j.add_to(m)
m
# ### Add centroid markers
# In order to properly compute geometric properties, in this case centroids, of the geometries, we need to project the data to a projected coordinate system.
# +
# Project to NAD83 projected crs
df = df.to_crs(epsg=2263)
# Access the centroid attribute of each polygon
df['centroid'] = df.centroid
# -
# Since we're again adding a new geometry to the Folium map, we need to project the geometry back to a geographic coordinate system with latitude and longitude values.
# +
# Project to WGS84 geographic crs
# geometry (active) column
df = df.to_crs(epsg=4326)
# Centroid column
df['centroid'] = df['centroid'].to_crs(epsg=4326)
df.head()
# +
for _, r in df.iterrows():
lat = r['centroid'].y
lon = r['centroid'].x
folium.Marker(location=[lat, lon],
popup='length: {} <br> area: {}'.format(r['Shape_Leng'], r['Shape_Area'])).add_to(m)
m
| doc/source/gallery/polygon_plotting_with_folium.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Vml7Jh2LEOAU"
# !pip install --upgrade tables
# + id="MR4sJEeYEkUM" executionInfo={"status": "ok", "timestamp": 1605547296861, "user_tz": -60, "elapsed": 1430, "user": {"displayName": "<NAME>\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# + id="1me9tKwuEzvi"
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car"
# + id="X66TacdeFZgB" executionInfo={"status": "ok", "timestamp": 1605547490249, "user_tz": -60, "elapsed": 16654, "user": {"displayName": "<NAME>\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="3b9d8018-d89a-4468-9e3c-54520b2133a4" colab={"base_uri": "https://localhost:8080/"}
df = pd.read_hdf('data/car.h5')
df.shape
# + id="laXQ7_cIFjqZ" executionInfo={"status": "ok", "timestamp": 1605547499175, "user_tz": -60, "elapsed": 529, "user": {"displayName": "<NAME>\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="98c36841-73bd-477f-9c3a-d4d2281e877f" colab={"base_uri": "https://localhost:8080/"}
df.columns.values
# + id="akiyDSswFprJ" executionInfo={"status": "ok", "timestamp": 1605547577376, "user_tz": -60, "elapsed": 857, "user": {"displayName": "<NAME>0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="dc2d545c-7f5d-41fb-a7c4-eda418eeb2f1" colab={"base_uri": "https://localhost:8080/", "height": 265}
df['price_value'].hist(bins=100);
# + id="h1Usn4ZQGA0w" executionInfo={"status": "ok", "timestamp": 1605547617110, "user_tz": -60, "elapsed": 552, "user": {"displayName": "<NAME>0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="5f9c29b3-a9bd-480d-d6af-d10d10362fd0" colab={"base_uri": "https://localhost:8080/"}
df['price_value'].describe()
# + id="RfAm-5mQGGiw" executionInfo={"status": "ok", "timestamp": 1605548762265, "user_tz": -60, "elapsed": 571, "user": {"displayName": "<NAME>0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="e4b5b61a-a39b-4e40-a20f-bed53a2f153a" colab={"base_uri": "https://localhost:8080/"}
df['param_marka-pojazdu'].unique()
# + id="ii2ZrxK4KeIW" executionInfo={"status": "ok", "timestamp": 1605548852546, "user_tz": -60, "elapsed": 576, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14555346676748516021"}} outputId="8b332329-02e9-4784-ef76-004501f66772" colab={"base_uri": "https://localhost:8080/"}
df.groupby('param_marka-pojazdu')['price_value'].mean()
# + id="fpzLK7dcKu7c" executionInfo={"status": "ok", "timestamp": 1605548919130, "user_tz": -60, "elapsed": 1921, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "14555346676748516021"}} outputId="586c2553-dd03-49b1-eb28-4d4dfaf762e8" colab={"base_uri": "https://localhost:8080/", "height": 346}
df.groupby('param_marka-pojazdu')['price_value'].agg(np.mean).plot(kind='bar');
# + id="Y10wxZG3K9H8" executionInfo={"status": "ok", "timestamp": 1605549114110, "user_tz": -60, "elapsed": 1189, "user": {"displayName": "<NAME>\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="c48939a2-54dc-4120-af15-f3fc4d0ba42e" colab={"base_uri": "https://localhost:8080/", "height": 392}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg(np.mean)
.sort_values(ascending=False)
.head(50)
).plot(kind='bar', figsize=(15,5))
# + id="Jc3ip1hKLXky" executionInfo={"status": "ok", "timestamp": 1605549179631, "user_tz": -60, "elapsed": 943, "user": {"displayName": "<NAME>0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="c883e846-bd66-45af-c89f-82dba6bed21c" colab={"base_uri": "https://localhost:8080/", "height": 396}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg(np.median)
.sort_values(ascending=False)
.head(50)
).plot(kind='bar', figsize=(15,5))
# + id="xs9mc4HbMD8D" executionInfo={"status": "ok", "timestamp": 1605549254599, "user_tz": -60, "elapsed": 1447, "user": {"displayName": "<NAME>\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="69a0b37f-39fb-460d-accc-f133af85be88" colab={"base_uri": "https://localhost:8080/", "height": 396}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg([np.mean, np.median, np.size])
.sort_values(by='mean', ascending=False)
.head(50)
).plot(kind='bar', figsize=(15,5))
# + id="2UQeIvWEMQ75" executionInfo={"status": "ok", "timestamp": 1605549343568, "user_tz": -60, "elapsed": 2304, "user": {"displayName": "<NAME>\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="eb30856c-758e-43ab-cd11-2ee0d83a492a" colab={"base_uri": "https://localhost:8080/", "height": 458}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg([np.mean, np.median, np.size])
.sort_values(by='mean', ascending=False)
.head(50)
).plot(kind='bar', figsize=(15,5), subplots=True)
# + id="64bBVhmsMnzh" executionInfo={"status": "ok", "timestamp": 1605549385024, "user_tz": -60, "elapsed": 2083, "user": {"displayName": "<NAME>\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="55f31415-1020-435d-d29f-b4754d8efa61" colab={"base_uri": "https://localhost:8080/", "height": 458}
(
df
.groupby('param_marka-pojazdu')['price_value']
.agg([np.mean, np.median, np.size])
.sort_values(by='size', ascending=False)
.head(50)
).plot(kind='bar', figsize=(15,5), subplots=True)
# + id="Ur1895oEM1z5" executionInfo={"status": "ok", "timestamp": 1605549837911, "user_tz": -60, "elapsed": 518, "user": {"displayName": "Ela Roma\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}}
def group_and_barplot(feat_groupby, feat_agg='price_value', agg_funcs=[np.mean, np.median, np.size], feat_sort='mean', top=50, subplots=True):
return(
df
.groupby(feat_groupby)[feat_agg]
.agg(agg_funcs)
.sort_values(by=feat_sort, ascending=False)
.head(top)
).plot(kind='bar', figsize=(15,5), subplots=subplots)
# + id="EDpHK0b_OKEq" executionInfo={"status": "ok", "timestamp": 1605549853870, "user_tz": -60, "elapsed": 2166, "user": {"displayName": "Ela Roma\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="22907e95-2b06-483b-f73a-66b4e35002ee" colab={"base_uri": "https://localhost:8080/", "height": 390}
group_and_barplot('param_marka-pojazdu');
# + id="nuLXfO0KOi04" executionInfo={"status": "ok", "timestamp": 1605549995299, "user_tz": -60, "elapsed": 1619, "user": {"displayName": "<NAME>\u0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="e7601345-db5e-4db3-98b5-b8210221c2fe" colab={"base_uri": "https://localhost:8080/", "height": 412}
group_and_barplot('param_kraj-pochodzenia', feat_sort='size');
# + id="Oy6JHySlO3C_" executionInfo={"status": "ok", "timestamp": 1605550077396, "user_tz": -60, "elapsed": 1339, "user": {"displayName": "<NAME>0144ska", "photoUrl": "", "userId": "14555346676748516021"}} outputId="2167d37f-b451-41ad-ccde-cdd061aabe02" colab={"base_uri": "https://localhost:8080/", "height": 369}
group_and_barplot('param_kolor', feat_sort='mean');
# + id="jsVjM0KUPZH3"
| day2_visualisation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mella30/Deep-Learning-with-Tensorflow-2/blob/main/Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week3_Scale_bijectors_and_LinearOperator.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="yNFwEtoT-NgR"
# # Scale bijectors and LinearOperator
# + [markdown] id="VQ3ETa_9-NgS"
# This reading is an introduction to scale bijectors, as well as the `LinearOperator` class, which can be used with them.
# + id="_pzCmsNA-NgT" colab={"base_uri": "https://localhost:8080/"} outputId="5c130d8a-c782-4a17-95fb-1cf8299dbfec"
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
print("TF version:", tf.__version__)
print("TFP version:", tfp.__version__)
from IPython.display import Image
# + [markdown] id="IfCihlmc-NgW"
# ## Introduction
#
# You have now seen how bijectors can be used to transform tensors and tensor spaces. Until now, you've only seen this in the scalar case, where the bijector acts on a single value. When the tensors you fed into the bijectors had multiple components, the bijector acted on each component individually by applying batch operations to scalar values. For probability distributions, this corresponds to a scalar event space.
#
# However, bijectors can also act on higher-dimensional space. You've seen, for example, the multivariate normal distribution, for which samples are tensors with more than one component. You'll need higher-dimensional bijectors to work with such distributions. In this reading, you'll see how bijectors can be used to generalise scale transformations to higher dimensions. You'll also see the `LinearOperator` class, which you can use to construct highly general scale bijectors. In this reading, you'll walk through the code, and we'll use figure examples to demonstrate these transformations.
#
# This reading contains many images, as this allows you to visualise how a space is transformed. For this reason, the examples are limited to two dimensions, since these allow easy plots. However, these ideas generalise naturally to higher dimensions. Let's start by creating a point that is randomly distributed across the unit square $[0, 1] \times [0, 1]$:
# + id="1z3TufXn-NgX" colab={"base_uri": "https://localhost:8080/"} outputId="a08fbd1d-a978-402f-f40d-4856defb5441"
# Create the base distribution and a single sample
uniform = tfd.Uniform(low=[0.0, 0.0], high=[1.0, 1.0], name='uniform2d')
x = uniform.sample()
x
# + [markdown] id="ApOUy1uq-Wzi"
# We will be applying linear transformations to this data. To get a feel for how these transformations work, we show ten example sample points, and plot them, as well as the domain of the underlying distribution:
# + id="M5KYJke2-W9b" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="295174d1-f5e4-48f8-92ef-c15156313b22"
# Run this cell to download and view a figure to show example data points
# !wget -q -O x.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1DLqzh7xcjM7BS3C_QmgeF1xET2sXgMG0"
Image("x.png", width=500)
# + [markdown] id="UILbUvhp-NgZ"
#
# 
#
# Each of the ten points is hence represented by a two-dimensional vector. Let $\mathbf{x} = [x_1, x_2]^T$ be one of these points. Then scale bijectors are linear transformations of $\mathbf{x}$, which can be represented by a $2 \times 2$ matrix $B$. The forward bijection to $\mathbf{y} = [y_1, y_2]^T$ is
#
# $$
# \mathbf{y}
# =
# \begin{bmatrix}
# y_1 \\ y_2
# \end{bmatrix}
# = B \mathbf{x}
# = \begin{bmatrix}
# b_{11} & b_{12} \\
# b_{21} & b_{22} \\
# \end{bmatrix}
# \begin{bmatrix}
# x_1 \\ x_2
# \end{bmatrix}
# $$
#
# This is important to remember: any two-dimensional scale bijector can be represented by a $2 \times 2$ matrix. For this reason, we'll sometimes use the term "matrix" to refer to the bijector itself. You'll be seeing how these points and domain are transformed under different bijectors in two dimensions.
# + [markdown] id="WjidnTpq-NgZ"
# ## The `ScaleMatvec` bijectors
# + [markdown] id="oErkIexK-Nga"
# ### The `ScaleMatvecDiag` bijector
#
# We'll start with a simple scale bijector created using the `ScaleMatvecDiag` class:
# + id="E2xh3xOz-Nga"
# Create the ScaleMatvecDiag bijector
bijector = tfb.ScaleMatvecDiag(scale_diag=[1.5, -0.5])
# + [markdown] id="HNXwnshy-Ngd"
# which creates a bijector represented by the diagonal matrix
# $$ B =
# \begin{bmatrix}
# 1.5 & 0 \\
# 0 & -0.5 \\
# \end{bmatrix}.
# $$
#
# We can apply this to the data using `y = bijector(x)` for each of the ten points. This transforms the data as follows:
# + id="z9DO4iI2_Uof" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="547f8db3-cba1-4043-8129-4618c8b2dd13"
# Run this cell to download and view a figure to illustrate the transformation
# !wget -q -O diag.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1sgfZ_Qzdd2v7CErP2zIk04p6R6hUW7RR"
Image("diag.png", width=500)
# + [markdown] id="RYPUE9ce_U4I"
# You can see what happened here: the first coordinate is multiplied by 1.5 while the second is multipled by -0.5, flipping it through the horizontal axis.
# + id="QmQnRIvy-Ngd" colab={"base_uri": "https://localhost:8080/"} outputId="8fe5ce7d-d36a-40d6-cbb3-dc3076d482af"
# Apply the bijector to the sample point
y = bijector(x)
y
# + [markdown] id="wIqdagRl-Ngf"
# ### The `ScaleMatvecTriL` bijector
#
# In the previous example, the bijector matrix was diagonal, which essentially performs an independent scale operation on each of the two dimensions. The domain under the bijection remains rectangular. However, not all scale tarnsformations have to be like this. With a non-diagonal matrix, the domain will transform to a quadrilateral. One way to do this is by using the `tfb.ScaleMatvecTriL` class, which implements a bijection based on a lower-triangular matrix. For example, to implement the lower-triangular matrix
# $$ B =
# \begin{bmatrix}
# -1 & 0 \\
# -1 & -1 \\
# \end{bmatrix}
# $$
# you can use the `tfb.ScaleMatvecTriL` bijector as follows:
# + id="tnP1qEgI-Ngg"
# Create the ScaleMatvecTriL bijector
bijector = tfb.ScaleMatvecTriL(scale_tril=[[-1., 0.],
[-1., -1.]])
# + id="N4MRKTWt-Ngi" colab={"base_uri": "https://localhost:8080/"} outputId="bfb1fc94-29a6-4da6-aca6-40a455c65d91"
# Apply the bijector to the sample x
y = bijector(x)
y
# + [markdown] id="uOKr_c_F-Ngl"
# A graphical overview of this change is:
# + id="mMPt4BPG_q8R" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="afa770be-3380-4eb9-c3a1-6619ea6865f2"
# Run this cell to download and view a figure to illustrate the transformation
# !wget -q -O lower_triangular.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1eMYwPzMVpmt1FYscplu7RRn1S4gmFo5B"
Image("lower_triangular.png", width=500)
# + [markdown] id="YQ-RcKWv-Ngm"
# ## Inverse and composition
#
# Scale transformations always map the point $[0, 0]$ to itself and are only one particular class of bijectors. As you saw before, you can create more complicated bijections by composing one with another. This works just like you would expect. For example, you can compose a scale transformation with a shift to the left (by one unit) as follows:
# + id="ljYA14K--Ngm"
# Create a scale and shift bijector
scale_bijector = tfb.ScaleMatvecTriL(scale_tril=[[-1., 0.],
[-1., -1.]])
shift_bijector = tfb.Shift([-1., 0.])
bijector = shift_bijector(scale_bijector)
# + id="EM_Tai07-Ngo" colab={"base_uri": "https://localhost:8080/"} outputId="6af6de8f-6699-4cc8-bdfb-295d0d08566c"
# Apply the bijector to the sample x
y = bijector(x)
y
# + [markdown] id="fy2akFfk-Ngq"
# which has the expected result:
#
# + id="PQOBCsmJALAs" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="4bf17aa3-700d-4a0b-99c8-09ebebaad224"
# Run this cell to download and view a figure to illustrate the transformation
# !wget -q -O scale_and_shift.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1iucwJlG2ropvJOkRfBMgEpuFNpYa_JH6"
Image("scale_and_shift.png", width=500)
# + [markdown] id="o2FsyqHsAKP0"
# Furthermore, bijectors are always invertible (with just a few special cases, see e.g. [`Absolute Value`](https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/AbsoluteValue)), and these scale transformations are no exception. For example, running
# + id="jwHodULr-Ngq"
# Apply the inverse transformation to the image of x
bijector = tfb.ScaleMatvecTriL(scale_tril=[[-1., 0.],
[-1., -1.]])
y = bijector.inverse(bijector(x))
# + [markdown] id="erOyIGtXAiuV"
# recovers `x`:
# + id="2Vj4URlLAi2F" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="5a74d4bb-9a99-4b8e-f9c0-728752a3b093"
# Run this cell to download and view a figure to illustrate the transformation
# !wget -q -O inverse.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1CHCkSfz6EnOYiZaw6vGZ_s6BzyP1NK1X"
Image("inverse.png", width=500)
# + [markdown] id="QJUEcls4-Ngt"
# so that the original and transformed data is the same.
# + id="zL6lnzOG-Ngt" colab={"base_uri": "https://localhost:8080/"} outputId="f405fb2a-97fb-4ad0-d3bc-8b0a51b3675a"
# Check that all y and x values are the same
tf.reduce_all(y == x)
# + [markdown] id="-yhVG-g3-Ngv"
# ## The `LinearOperator` class and `ScaleMatvecLinearOperator` bijector
#
# The examples you just saw used the `ScaleMatvecDiag` and `ScaleMatvecTriL` bijectors, whose transformations can be represented by diagonal and lower-triangular matrices respectively. These are convenient since it's easy to check whether such matrices are invertible (a requirement for a bijector). However, this comes at a cost of generality: there are acceptable bijectors whose matrices are not diagonal or lower-triangular. To construct these more general bijectors, you can use the `ScaleMatvecLinearOperator` class, which operates on instances of `tf.linalg.LinearOperator`.
#
# The `LinearOperator` is a class that allows the creation and manipulation of linear operators in TensorFlow. It's rare to call the class directly, but its subclasses represent many of the common linear operators. It's programmed in a way to have computational advantages when working with big linear operators, although we won't discuss these here. What matters now is that we can use these linear operators to define bijectors using the `ScaleMatvecLinearOperator` class. Let's see how this works.
# + [markdown] id="8DfSe2kv-Ngw"
# ### The `LinearOperatorDiag` class
#
# First, let's use this framework to recreate our first bijector, represented by the diagonal matrix
#
# $$ B =
# \begin{bmatrix}
# 1.5 & 0 \\
# 0 & -0.5 \\
# \end{bmatrix}.
# $$
#
# You can do this using the `ScaleMatvecLinearOperator` as follows. First, we'll create the linear operator that represents the scale transformation using
# + id="MiFMxpsb-Ngw"
scale = tf.linalg.LinearOperatorDiag(diag=[1.5, -0.5])
# + [markdown] id="-oKw73Ku-Ngy"
# where `LinearOperatorDiag` is one of the subclasses of `LinearOperator`. As the name suggests, it implements a diagonal matrix. We then use this to create the bijector using the `tfb.ScaleMatvecLinearOperator`:
# + id="a0mfpIUk-Ngy"
# Create the ScaleMatvecLinearOperator bijector
bijector = tfb.ScaleMatvecLinearOperator(scale)
# + [markdown] id="SUjBbtKp-Ng1"
# This bijector is the same as the first one above:
# + id="1hMYRcDuA5_-" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="3ecb467f-82f0-4254-e798-5b8d70c71649"
# Run this cell to download and view a figure to illustrate the transformation
# !wget -q -O linear_operator_diag.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1KaCJl28Thp6NjxspG3pq251vDJrmDd97"
Image("linear_operator_diag.png", width=500)
# + id="m_8qIb-z-Ng1" colab={"base_uri": "https://localhost:8080/"} outputId="613dbb22-ebb0-4feb-a576-1a042e0d818c"
# Apply the bijector to the sample x
y = bijector(x)
y
# + [markdown] id="uQOIGUl5-Ng3"
# ### The `LinearOperatorFullMatrix` class
#
# We can also use this framework to create a bijector represented by a custom matrix. Suppose we have the matrix
#
# $$ B =
# \begin{bmatrix}
# 0.5 & 1.5 \\
# 1.5 & 0.5 \\
# \end{bmatrix}
# $$
#
# which is neither diagonal nor lower-triangular. We can implement a bijector for it using the `ScaleMatvecLinearOperator` class by using another subclass of `LinearOperator`, namely the `LinearOperatorFullMatrix`, as follows:
# + id="ySLPNIVx-Ng3"
# Create a ScaleMatvecLinearOperator bijector
B = [[0.5, 1.5],
[1.5, 0.5]]
scale = tf.linalg.LinearOperatorFullMatrix(matrix=B)
bijector = tfb.ScaleMatvecLinearOperator(scale)
# + [markdown] id="v4D8NFxb-Ng5"
# which leads to the following transformation:
# + id="gofqMvQpBRlA" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="f7906462-4255-4d02-a802-6c6d272c7abe"
# Run this cell to download and view a figure to illustrate the transformation
# !wget -q -O linear_operator_full.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1Zk5lp7-VTwmX5r0yPAqVGGzWIgYTjJIJ"
Image("linear_operator_full.png", width=500)
# + id="d9kDhz0C-Ng6" colab={"base_uri": "https://localhost:8080/"} outputId="99d10829-43c0-49f4-bbe5-dfa52f7f0b78"
# Apply the bijector to the sample x
y = bijector(x)
y
# + [markdown] id="_HyVQWuy-Ng8"
# ### Batch operations and broadcasting
#
# As you've seen before, it's important to be very careful with shapes in TensorFlow Probability. That's because there are three possible components to a shape: the event shape (dimensionality of the random variable), sample shape (dimensionality of the samples drawn) and batch shape (multiple distributions can be considered in one object). This subtlety is especially important for bijectors, but can be harnassed to make powerful, and very computationally efficient, transformations of spaces. Let's examine this a little bit in this section.
#
# In the previous examples, we applied a bijector to a two-dimensional data point $\mathbf{x}$ to create a two-dimensional data point $\mathbf{y}$. This was done using $\mathbf{y} = B \mathbf{x}$ where $B$ is the $2 \times 2$ matrix that represents the scale bijector. This is simply matrix multiplication. To implement this, we created a tensor `x` with `x.shape == [2]` and a bijector using a matrix of shape `B.shape == [2, 2]`. This generalises straightforwardly to higher dimensions: if $\mathbf{x}$ is $n$-dimensional, the bijection matrix must be of shape $n \times n$ for some $n>0$. In this case, $\mathbf{y}$ is $n$-dimensional.
#
# But what if you wanted to apply the same bijection to ten $\mathbf{x}$ values at once? You can then arrange all these samples into a single tensor `x` with `x.shape == [10, 2]` and create a bijector as usual, with a matrix of shape `B.shape == [2, 2]`.
# + id="-eLiZNZi-Ng8" colab={"base_uri": "https://localhost:8080/"} outputId="87037765-ea8e-4bf7-9269-53a46d03a4b2"
# Create 10 samples from the uniform distribution
x = uniform.sample(10)
x
# + id="3ACfBTuZ-Ng_" colab={"base_uri": "https://localhost:8080/"} outputId="7493d76d-fdce-4a82-b800-bace8c561786"
# Recreate the diagonal matrix transformation with LinearOperatorDiag
scale = tf.linalg.LinearOperatorDiag(diag=[1.5, -0.5])
scale.to_dense()
# + id="mplwIBb0-NhA"
# Create the ScaleMatvecLinearOperator bijector
bijector = tfb.ScaleMatvecLinearOperator(scale)
# + id="ak74fvbe-NhC" colab={"base_uri": "https://localhost:8080/"} outputId="0ce59497-7731-4f8a-9296-e8fd1cb4347f"
# Apply the bijector to the 10 samples
y = bijector(x)
y
# + [markdown] id="vl6rCedgBqud"
# This gives us the same plot we had before:
# + id="mKT0LCFWBq2a" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="1d4bf5ad-0792-4d95-8748-59509166e286"
# Run this cell to download and view a figure to illustrate the transformation
# !wget -q -O diag.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1sgfZ_Qzdd2v7CErP2zIk04p6R6hUW7RR"
Image("diag.png", width=500)
# + [markdown] id="Q33XJsx8-NhE"
# For matrix multiplication to work, we need `B.shape[-1] == x.shape[-1]`, and the output tensor has last dimension `y.shape[-1] == B.shape[-2]`. For invertibility, we also need the matrix `B` to be square. Any dimensions except for the last one on `x` become sample/batch dimensions: the operation is broadcast across these dimensions as we are used to. It's probably easiest to understand through a table of values, where `s`, `b`, `m`, and `n` are positive integers and `m != n`:
#
# | `B.shape` | `x.shape` | `y.shape` |
# | ----- | ----- | ----- |
# | `(2, 2)` | `(2)` | `(2)` |
# | `(n, n)` | `(m)` | `ERROR` |
# | `(n, n)` | `(n)` | `(n)` |
# | `(n, n)` | `(s, n)` | `(s, n)` |
# | `(b, n, n)` | `(n)` | `(b, n)` |
# | `(b, n, n)` | `(b, n)` | `(b, n)` |
# | `(b, n, n)` | `(s, 1, n)` | `(s, b, n)` |
#
# These rules and the ability to broadcast make batch operations easy.
# + [markdown] id="gGSUs-0B-NhE"
# We can also easily apply multiple bijectors. Suppose we want to apply both these bijectors:
#
# $$
# \begin{align}
# B_1 =
# \begin{bmatrix}
# 1 & 0 \\
# 0 & -1 \\
# \end{bmatrix}
# & \qquad
# B_2 =
# \begin{bmatrix}
# -1 & 0 \\
# 0 & 1 \\
# \end{bmatrix}.
# \end{align}
# $$
#
# We can do this using the batched bijector
# + id="FBnTnUEy-NhF"
# Create a batched ScaleMatvecLinearOperator bijector
diag = tf.stack((tf.constant([1, -1.]),
tf.constant([-1, 1.]))) # (2, 2)
scale = tf.linalg.LinearOperatorDiag(diag=diag) # (2, 2, 2)
bijector = tfb.ScaleMatvecLinearOperator(scale=scale)
# + [markdown] id="-444xO4x-NhG"
# and we can broadcast the samples across both bijectors in the batch, as well as broadcasting the bijectors across all samples. For this, we need to include a batch dimension in the samples Tensor.
# + id="DAS06SEJ-NhH" colab={"base_uri": "https://localhost:8080/"} outputId="19a1006e-d86f-48b3-b728-2edf43fd9712"
# Add a singleton batch dimension to x
x = tf.expand_dims(x, axis=1)
x.shape
# + id="G0rMQi5B-NhJ" colab={"base_uri": "https://localhost:8080/"} outputId="95ff2896-108d-48c7-ca74-899e6166530f"
# Apply the batched bijector to x
y = bijector(x)
y.shape # (S, B, E) shape semantics
# + [markdown] id="kXA95fDP-NhL"
# which gives two batches of forward values for each sample:
# + id="saqG9CHcB9Iv" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="39dbf44b-46f9-4d79-ab6a-db6d435c9eca"
# Run this cell to download and view a figure to illustrate the transformation
# !wget -q -O linear_operator_batch.png --no-check-certificate "https://docs.google.com/uc?export=download&id=1obgl3sOIYsH_ijxxkhgBu4miBxq23fny"
Image("linear_operator_batch.png", width=500)
# + [markdown] id="9VS3QAuI-NhL"
# ## Conclusion
#
# In this reading, you saw how to construct scale bijectors in two dimensions using the various `ScaleMatvec` classes. You also had a quick introduction to the general `LinearOperators` class and some of its subclasses. Finally, you saw how batching makes large computations clean and efficient. Be careful to keep track of the tensor shapes, as broadcasting and the difference between batch shapes and event shapes makes errors easy. Finally, note that these bijectors are still amenable to composition (via `Chain` or simply feeding one into another) and inversion, which retains the same syntax you're used to. Enjoy using this powerful tool!
# + [markdown] id="wACUDIBT-NhM"
# ### Further reading and resources
#
# * `ScaleMatvec` bijectors:
# * https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/ScaleMatvecDiag\n",
# * https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/ScaleMatvecLinearOperator\n",
# * https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/ScaleMatvecLU\n",
# * https://www.tensorflow.org/probability/api_docs/python/tfp/bijectors/ScaleMatvecTriL\n",
# * `LinearOperator` class (see also subclasses)
# * https://www.tensorflow.org/api_docs/python/tf/linalg/LinearOperator
| Course3-Probabilistic_Deep_Learning_with_Tensorflow2/week3_Scale_bijectors_and_LinearOperator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python Intel Pytorch
# language: python
# name: pytorch
# ---
# ### Module import
# !pip install pytorch-gradcam
# +
import os
import PIL
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.models as models
from torchvision import transforms
from torchvision.utils import make_grid, save_image
from gradcam.utils import visualize_cam
from gradcam import GradCAM, GradCAMpp
device = 'cpu'
# -
# ### Load image
# +
img_dir = '/Volumes/hack014/A-DEEP-CONVOLUTIONAL-NEURAL-NETWORK-FOR-IDENTIFYING-THYROID-CANCER/model/thyroidemb/mixed/utsw_test/benign'
# img_name = '4736_17_trv_0008_T4.tif'
# img_name = '4785_25_0010_T4.tif'
# img_name = '4852_33_trv_11_T5.tif'
# img_name = '4942_18_yrv_0016_T4.tif'
# BENIGN:
# img_name = '4881_30_trv_0014_T3.tif'
img_name = '4221_29_trv_trv_0042_T3.tif'
img_path = os.path.join(img_dir, img_name)
pil_img = PIL.Image.open(img_path).convert('RGB')
pil_img
# -
# ### preprocess image
torch_img = transforms.Compose([
transforms.Resize((288, 288)),
transforms.ToTensor()
])(pil_img).to(device)
normed_torch_img = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])(torch_img)[None]
# ### Load torchvision models and make configs
# +
import torch.nn as nn
resnet = models.resnet50(pretrained=False)
num_ftrs = resnet.fc.in_features
resnet.fc = nn.Linear(num_ftrs, 1)
model_file = os.path.abspath('/Volumes/hack014/A-DEEP-CONVOLUTIONAL-NEURAL-NETWORK-FOR-IDENTIFYING-THYROID-CANCER/model/thyroidemb/resnet-mixed2/model.pt')
resnet.load_state_dict(torch.load(model_file,map_location='cpu'))
# -
configs = [
dict(model_type='resnet', arch=resnet, layer_name='layer4'),
]
# +
for config in configs:
config['arch'].to(device).eval()
cams = [
[cls.from_config(**config) for cls in (GradCAM, GradCAMpp)]
for config in configs
]
# -
# ### Feedforward image, calculate GradCAM/GradCAM++, and gather results
# +
images = []
for gradcam, gradcam_pp in cams:
mask, _ = gradcam(normed_torch_img)
heatmap, result = visualize_cam(mask, torch_img)
mask_pp, _ = gradcam_pp(normed_torch_img)
heatmap_pp, result_pp = visualize_cam(mask_pp, torch_img)
images.extend([torch_img.cpu(), heatmap, heatmap_pp, result, result_pp])
grid_image = make_grid(images, nrow=5)
# -
# ### Show results
transforms.ToPILImage()(grid_image)
| model/thyroidemb/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ARCTraining/python-2021-04/blob/gh-pages/notebooks/020_data_pandas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="KgG9J1Lfk5_Z"
# # Starting with Data
# + id="a9s-hezjkb5W"
# Author: <NAME>
# Date: 2021-04-26
# Lesson link: https://arctraining.github.io/python-2021-04/02-starting-with-data/index.html
# + colab={"base_uri": "https://localhost:8080/"} id="eTx3Ol5Wbufe" outputId="8391f9cb-48d5-4238-c0dc-adedbcc48be1"
# Connect my Google Drive to Google Colab
from google.colab import drive
drive.mount ('/content/gdrive')
# + id="YoHKafr1lOhx"
# import pandas library
import pandas as pd
# + id="sK6fQUSplpt-"
# We use the Portal Project teaching dataset
# https://figshare.com/articles/dataset/Portal_Project_Teaching_Database/1314459
# + colab={"base_uri": "https://localhost:8080/"} id="JcalYB5xmwwQ" outputId="895619eb-fee1-4f71-fafc-7180d2c48608"
# But first we need to download it
# !wget https://arctraining.github.io/python-2021-04/data/portal-teachingdb-master.zip
# + id="5LF8hea0rt_7"
# after downloading rename
# portal-teachingdb-master.zip to data.zip
# <<< do that in the file explorer window to the left of the notebook <<<
# + colab={"base_uri": "https://localhost:8080/"} id="_z82woXzoekT" outputId="9a647392-a2cd-445e-f34d-e19bf954d07e"
# now unzip the file we've just downloaded
# !unzip data.zip
# + id="gsK3kDbgr7fk"
# rename the folder portal-teachingdb-master to data
# <<< do that in the file explorer window to the left of the notebook <<<
# + colab={"base_uri": "https://localhost:8080/"} id="RScX5BtHovh-" outputId="e7e9b285-1d74-4c5b-8d0e-01f8ce479d8b"
# see what we have done
# !ls -l
# + colab={"base_uri": "https://localhost:8080/"} id="TaTqKsLkirHB" outputId="da9b75e7-35bd-4708-bf2c-fc3e4dfde6c2"
# The full filepath has a space in it (between Colab and Notebooks) which we need to accomodate
# Either by including the path in quotes - like this:
# !ls "/content/gdrive/MyDrive/Colab Notebooks/intro-python-2021-04/data"
# + colab={"base_uri": "https://localhost:8080/"} id="FI4f-44xjQ0u" outputId="6889a8dc-c181-44a8-f8c9-26eb29542cc9"
# Or by 'escaping out' the space by putting a backslash \ in front of it
# Like this:
# !ls /content/gdrive/MyDrive/Colab\ Notebooks/intro-python-2021-04/data
# + id="xsDxUu30fPNZ"
# Move the data folder into your 'gdrive'
# Move it into gdrive/MyDrive/Colab Notebooks/intro-python-2021-04
# + colab={"base_uri": "https://localhost:8080/", "height": 444} id="6jMUnG1Uo4Iq" outputId="951b06f8-1d01-4db7-88b3-d68dba82f810"
# First look at the data
# Copy the 'path' to your Google Drive data folder and replace it with the 'data' folder we had last week
pd.read_csv ('/content/gdrive/MyDrive/Colab Notebooks/intro-python-2021-04/data/surveys.csv')
# + id="FiJYnoZ9qMVf"
# Read in the df and assign to a variable
surveys_df = pd.read_csv ('/content/gdrive/MyDrive/Colab Notebooks/intro-python-2021-04/data/surveys.csv')
# + id="LkfJq07-kl_a"
# But having to include this long path every time is a pain so
filepath = "/content/gdrive/MyDrive/Colab Notebooks/intro-python-2021-04/data/"
surveys_df = pd.read_csv (filepath + 'surveys.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="Q9hQJab2qeOp" outputId="ff0760ba-a5c6-4697-85c4-736c9d7f5552"
# Quickly view the contents
surveys_df
# + colab={"base_uri": "https://localhost:8080/", "height": 143} id="TKFdrmnIqnGZ" outputId="f00ad66e-112b-4ac4-99ef-6ebd59ebf6ba"
# look at the fist few rows
surveys_df.head(3)
# + [markdown] id="jLPurv1NssXA"
# ## Exploring the dataset
# + colab={"base_uri": "https://localhost:8080/"} id="6i7T_PYusvbK" outputId="2b1a30e5-2904-45b0-8299-ed553a7284b6"
# What is surveys_df
type(surveys_df)
# + colab={"base_uri": "https://localhost:8080/"} id="ctSZL-6Vs4D7" outputId="2778d2a1-3222-4476-e47c-dfd376916880"
# What data types are in the dataframe
surveys_df.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="10ojDdQGtAbx" outputId="0534dd3d-d906-49d0-99fa-66fdeec39a21"
# Challenge 1
surveys_df.columns
# + colab={"base_uri": "https://localhost:8080/"} id="UQsHndMxu3c3" outputId="e835804a-b684-482a-ccdf-5ace276d2829"
surveys_df.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="xCR4Kv7PvCib" outputId="3d8e4b0f-1ce3-40fd-8b11-43111e85afc8"
surveys_df.tail()
# + [markdown] id="t_L2LpHkvc4n"
# ## Basic statistics on the dataframe
# + colab={"base_uri": "https://localhost:8080/"} id="8XgkXQtlvPQm" outputId="6c6b8187-07b4-4c3a-8a1a-41f09697e841"
# Look at the columns
surveys_df.columns
# + colab={"base_uri": "https://localhost:8080/"} id="feyNX1W7vomO" outputId="042f37d0-9a9b-463c-e008-4dda3f8f4208"
# What are the unique values of species
pd.unique(surveys_df['species_id'])
# + id="WBQ92Z7Vw_Gb"
# Challenge 2: Statistics
# + id="f5RaWtZFxmvJ"
# Create a list of unique plot_id
site_names = pd.unique(surveys_df['plot_id'])
# + colab={"base_uri": "https://localhost:8080/"} id="E0Dr80oPyLx-" outputId="d0603e52-ac82-4423-ad49-d9240a324967"
site_names
# + colab={"base_uri": "https://localhost:8080/"} id="XMPHWw0XyNJn" outputId="da6de1af-0fc2-47d6-a97e-0c0f4ab2d78b"
# How many unique site names are there?
len (site_names)
# + colab={"base_uri": "https://localhost:8080/"} id="3B61bdPJyZAP" outputId="57a17311-ea21-47a4-a55e-c95eceb6a9aa"
# Number of species
len (pd.unique(surveys_df['species_id']))
# + colab={"base_uri": "https://localhost:8080/"} id="bVobh6NSyuVv" outputId="0c1ae590-9810-4ea5-e99f-2d3e050c9c4f"
# What is the difference between len(site_names) and surveys_df['plot_id'].nunique()?
surveys_df['plot_id'].nunique()
# + [markdown] id="OF-TVjoLzPMx"
# ## Grouping in Pandas
# + id="yeIMVPV3y2-r"
# Homework:
# Try the 'grouping in pandas' section *and* the Challenge - Summary Data
# + id="XQ3P4fXq00jE" colab={"base_uri": "https://localhost:8080/"} outputId="85001e14-a635-4b3b-a179-489bcb019876"
surveys_df['weight'].describe()
# + colab={"base_uri": "https://localhost:8080/"} id="Vg8NP22OocuW" outputId="d066b87b-4e09-4f98-bdcc-022d79b773f2"
# If I want just one statistical metric
surveys_df['weight'].max()
# + colab={"base_uri": "https://localhost:8080/"} id="msBkMWKvoxr6" outputId="ecd7b804-93d8-400a-bf38-210fba2dcf7c"
surveys_df['weight'].mean()
# + id="2_XxtEkopDuM"
# To sumaarise by one or more variables:
grouped_data_sex = surveys_df.groupby('sex')
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="WjeFLgAepQtP" outputId="b4721e14-daa5-4cf3-bc8c-b32d0da5a99b"
# Summary statistics
grouped_data_sex.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 143} id="ZZCHZwStpapj" outputId="6a9f0feb-ca89-481b-e261-0e72896aabde"
# Get just the mean by sex
grouped_data_sex.mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="mGvuLxbrppl0" outputId="6deb6fa4-cd2d-4a72-f981-95904a87b2dd"
# Challenge
# Q1: How many recorded individuals are female F and how many male M
grouped_data_sex.describe()
# + id="X0LXJvuxql1q"
# Q2: What happens when you group by two columns using the following syntax and then calculate mean values?
grouped_data2 = surveys_df.groupby(['plot_id', 'sex'])
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="uxZY9YEBrTYF" outputId="b5a54c43-cbac-4480-ee71-87a6f27aa062"
grouped_data2.mean()
# + colab={"base_uri": "https://localhost:8080/", "height": 833} id="j3wLBtQlrnpD" outputId="79d95710-1805-42bc-dc0d-c4f577c03dba"
# Q3: Summarize weight values for each site in your data. HINT: you can use the following syntax to only
# create summary statistics for one column in your data.
# by_site['weight'].describe()
by_site = surveys_df.groupby(['plot_id'])
by_site["weight"].describe()
# + [markdown] id="HglujkDbtCrg"
# ## Summary counts and and basic maths
# + colab={"base_uri": "https://localhost:8080/"} id="DyEbxuocss_9" outputId="cb9d2dde-8af3-4bd8-ad5c-c489be90ad36"
# Count the number of samples per species
species_count = surveys_df.groupby('species_id')['record_id'].count()
print (species_count)
# + colab={"base_uri": "https://localhost:8080/"} id="OY8GKzvMvFZ1" outputId="4530e346-2673-4bc0-b162-26e6b8a3a920"
species_count = surveys_df.groupby('species_id')[['record_id', 'month']].count()
print (species_count)
# + colab={"base_uri": "https://localhost:8080/"} id="nwJ_9htzt_n_" outputId="851fac5f-aefc-419f-cd14-c55f0cebe185"
# Just for one species , eg. RM
surveys_df.groupby('species_id')['record_id'].count()[['RM', 'UR']]
# + colab={"base_uri": "https://localhost:8080/"} id="xftJdAFzudir" outputId="5f920519-cce9-4708-b584-1ef0cf292cff"
# Basic Maths
surveys_df['weight'] * 2
# + [markdown] id="Lv4ZrhA-vxwC"
# ## Simple plotting in Python
# + id="Js0TZK23uzUj"
# If you're using Notebooks then we need to make sure the plots appear in the browser
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/", "height": 287} id="4oUEYOuJwuYS" outputId="1cb0f507-f0f3-4708-e9ad-4dc6b1e6e373"
# Create a quick bar chart
species_count.plot (kind = 'bar');
# + id="rrG6426Nw_5B"
# How many animals were captured in each site
total_count = surveys_df.groupby('plot_id')['record_id'].count()
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="4Zz4_jovyVwg" outputId="76556e9c-61bf-4822-a72c-0bb837ad3f68"
total_count.plot(kind = 'bar');
# + [markdown] id="UA8rvkO_u3S5"
# ## Indexing and slicing with pandas
# + id="kov2rSILyb67"
a = [1, 2, 3, 4, 5]
# + id="k6SZ1pZ0u5qj" colab={"base_uri": "https://localhost:8080/"} outputId="9981fc2f-d1ff-45d3-b613-78adbd356db0"
a[2]
# + id="_5NclVX0u7FY" colab={"base_uri": "https://localhost:8080/"} outputId="24056f80-f750-48b2-9902-6b997ed6babe"
# view the available columns in the dataframe
surveys_df.columns
# + id="Rq8I1L_Fu8Y6" colab={"base_uri": "https://localhost:8080/", "height": 339} outputId="72774b41-172f-4419-8ee7-cf6742bf92f3"
# indexing out a whole column
# here we get an error because we're trying to index into the special .columns attribute
surveys_df.columns['hindfoot_length']
# + id="WIfhG4Dsu9uY" colab={"base_uri": "https://localhost:8080/"} outputId="b6815cb6-6388-403f-c061-7a0564d6b5cb"
# we can slice out a column using square brackets and the string of the column name
surveys_df['hindfoot_length']
# + id="YbtuL-7Xu-1s" colab={"base_uri": "https://localhost:8080/", "height": 36} outputId="aeda4319-7f17-487a-8ffb-ba68b29d0ffc"
# here we sliced out the column name from the .columns attribute
# but not the values of the column itself
surveys_df.columns[-2]
# + id="Oo8jGYpfu_4p" colab={"base_uri": "https://localhost:8080/"} outputId="46e0b79d-0357-4d94-cca3-eb00d85c8cc8"
# we can also use dot notation to index out a single column
surveys_df.hindfoot_length
# + id="429gOHKLvA1L" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="02037d74-da11-4148-fc22-9730e3bff814"
# indexing out multiple columns means passing a list
# to the index brackets
surveys_df[['hindfoot_length','month']]
# + id="aocHHAoBvCe4" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="592e6736-f131-4d15-e9d8-501c41b62e03"
# we can also do this by assigning a list as a variable
# and passing the variable to the index brackets
desired_cols = ['hindfoot_length','month']
surveys_df[desired_cols]
# + id="yO2RzK6uvD0y" colab={"base_uri": "https://localhost:8080/", "height": 603} outputId="7f0bfe2c-d5e5-47ec-98b8-bfa7c6313aba"
# indexing a column name that does not exist gives an error
surveys_df['name']
# + id="kaMVAOlovE4g" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="284bbd13-ee51-4cc3-8250-bdf6d9127e1f"
surveys_df.head()
# + id="A16FpWvGvFz5" colab={"base_uri": "https://localhost:8080/"} outputId="f34a87cc-6ce3-4621-c1f0-c1f001153317"
# we can also use .iloc and .loc to index rows, columns
# .iloc index by integer location
# index out the row at the 0th index
surveys_df.iloc[0]
# + id="u2aUNOwOvG0h" colab={"base_uri": "https://localhost:8080/"} outputId="aef9c0c6-2caf-43cc-dee6-af17c19936f9"
# here we've indexed the first 3 rows of the 6th column
surveys_df.iloc[0:3,7]
# + id="3mDfLGoZvH5y" colab={"base_uri": "https://localhost:8080/"} outputId="b616d750-4824-438b-8cb2-8c6e7059b213"
# .loc indexes by labels
# here we've indexed from rows 0 to 3 (by label)
# at the 'hindfoot_length' column
surveys_df.loc[0:3,'hindfoot_length']
# + id="Y0mvw0vBvIv2" colab={"base_uri": "https://localhost:8080/", "height": 143} outputId="c844d22d-de49-46a8-854e-7e7daa91d0ee"
# using iloc to index the first three rows and columns
surveys_df.iloc[0:3, 0:3]
# + id="pcWS_zB5vJus"
# we can make a copy of a subsetted data frame by using .copy()
# this creates a separate object from our original dataframe surveys_df
copy_of_first3 = surveys_df.iloc[0:3, 0:3].copy()
# + id="GQzLLPvdvKmi"
# this notation just creates a reference to the original dataframe
# so any changes we make to copy_of_first3 are actually made to the originak
# surveys_df dataframe
copy_of_first3 = surveys_df.iloc[0:3, 0:3]
# + id="cDd2NR8EvLkY" colab={"base_uri": "https://localhost:8080/"} outputId="63289aa1-0a79-4fcb-ea1c-a80b34464162"
# we can also perform a vectorised boolean comparison and
# return a boolean array
surveys_df['species_id'] == 'NL'
# + id="6f-DtWMzvMd4"
# we can use this to subset the dataframe for a specific species
subset_df = surveys_df[surveys_df['species_id'] == 'NL'].copy()
# + id="pS44WxvsvNV0" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="720d4b4c-f017-4338-9b19-3216c5581ebd"
# we can also do this in two lines by assign a bool_mask variable
bool_mask = surveys_df['species_id'] == 'NL'
surveys_df[bool_mask]
# + id="DA3hHnO6vOW1" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="3d657bf0-63e2-4519-a1cf-7a7aa42851ed"
subset_df.head()
# + id="bOfv9WNmvPUC"
# if you want to update your indexes on a subset
# subset_df.reset_index(inplace=True)
# + id="CqdrZpkhvQHQ"
# we can set values of rows when indexing
# but this can be quite dangerous so be careful
subset_df.iloc[0:3, 0] = 0
# + id="G5dZfqoevQ6i" colab={"base_uri": "https://localhost:8080/", "height": 143} outputId="8c80641f-f1c1-436e-a927-9da51b328790"
# checking the changes
subset_df.iloc[0:3]
# + id="bEPXnK4gvRyi" colab={"base_uri": "https://localhost:8080/"} outputId="852c1cda-5a1a-4f31-f68e-dbdc019dca9b"
# we can use subsetting to generate quick summary data
surveys_df[surveys_df['species_id'] == 'ZL'].count()
# + id="cqtbNRILvS0Z" colab={"base_uri": "https://localhost:8080/", "height": 112} outputId="bcae061d-37ed-4df4-adc1-694d3f33d259"
surveys_df[surveys_df['species_id'] == 'ZL']
# + id="3Zah2TDBvTsB" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="eda8eede-ddcd-478b-e1a5-7157c20e75ab"
# we can also subset by multiple boolean values
# using the `&` AND operator here which checks whether both conditions are True
# we can use `|` OR operator to check if one of the two is True
surveys_df[(surveys_df.year >= 1980) & (surveys_df.year <= 1985)]
# + id="Jw2g6JshRWYg"
# + id="TOYiR62HObV-" colab={"base_uri": "https://localhost:8080/"} outputId="1ee43ef6-9bd0-46c4-d3a3-6524303ca34e"
## Homework
num_rows = surveys_df[(surveys_df.year == 1999) & (surveys_df.weight <= 8)].shape[0]
print(f"Number of rows from the year 1999 with weight less than or equal to 8 are: {num_rows}")
# + id="eJR4zXuZQMFt" colab={"base_uri": "https://localhost:8080/"} outputId="e7d8916c-7617-4e2e-eefe-139391ab63c3"
# using .isin
species_list = ['NL','PF','PE','AS','ST']
surveys_df['species_id'].isin(species_list)
# + id="N4zPpVdQQesg" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="34a2afb9-59e4-4b9f-b8ee-a4cb87f68aee"
surveys_df[surveys_df['species_id'].isin(species_list)]
# + id="9e5_dYLdQwUQ" colab={"base_uri": "https://localhost:8080/"} outputId="a8982a9e-0242-412c-ea54-1269768fd819"
# using masks to subset data
# using .isin
species_list = ['NL','PF','PE','AS','ST']
surveys_df['species_id'].isin(species_list)
# + id="om7dPiSnXbY-" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="a19e7f7c-8f4c-4a78-9415-274a5101d138"
pd.isnull(surveys_df)
# + id="T4jMAtVhX4L3" colab={"base_uri": "https://localhost:8080/"} outputId="5e8ee7e9-f3c7-4ff2-c1ab-2a599ebe159f"
~pd.isnull(surveys_df).any(axis=1)
# + id="lxUGQNNZXjWr" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="93bba5fe-ae7e-4a30-e37f-164793d141c3"
surveys_df[~pd.isnull(surveys_df).any(axis=1)]
# + id="IBo_jragXxwb" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="8694d9d3-8836-4827-cc4c-b894f0a61a37"
surveys_df.dropna(axis=0, how='any')
# + [markdown] id="uUg65pG7aBVW"
# ## Data Types and formats
# + id="_FLEngYeZgl9" colab={"base_uri": "https://localhost:8080/"} outputId="0a44a6f2-1f11-4cdb-c291-5bbb559552df"
# we can check the type of python objects with type
type(surveys_df)
# + id="d2lGbNcCaKfa" colab={"base_uri": "https://localhost:8080/"} outputId="fc2e6b3e-dba4-47ce-d8e3-ad1b1910d8e2"
# with pandas we can check the specific datatypes of columns
# using the .dtypes attribute
# pandas has specific dtypes related to native python types
# int64, 64 bit integer
# float64, 64-bit float
# object, a general data type based on string types
surveys_df.dtypes
# + id="CrrFzFsaaRgK" colab={"base_uri": "https://localhost:8080/"} outputId="2b0d5fa2-f452-4b22-cf3f-833508810ca1"
# a reminder of basic maths with native python types
print(5 + 5)
# + id="UMZHHxm4bNhM" colab={"base_uri": "https://localhost:8080/"} outputId="3a4d032b-1358-4643-c415-869c5e680184"
print( 24 - 4)
# + id="gXEsRCc2bPYr" colab={"base_uri": "https://localhost:8080/"} outputId="38f1444d-e81b-401f-9079-bdef80ad39e4"
print( 5 / 9 )
# + id="P9KESOV4bSQq" colab={"base_uri": "https://localhost:8080/"} outputId="ca5bc80f-5759-4c5f-b093-0a2b830e99af"
# floor division to yield an int
print( 1 // 5)
# + id="1E0G-8u6bYzs" colab={"base_uri": "https://localhost:8080/"} outputId="04ba39ae-c2fc-4fce-b487-2ade1f39f0a7"
print( 2.5 + 1)
# + id="5-H-EC5MbheN" colab={"base_uri": "https://localhost:8080/"} outputId="cf32ce1f-ba20-45bf-8026-abafc86c4a01"
# changing data types of a pandas column
surveys_df['record_id']
# + id="Jm29PSQhbye7" colab={"base_uri": "https://localhost:8080/"} outputId="db438a01-e3f8-46f2-9f6e-098cd279a38e"
# .astype function allows us to cast a pandas row as a specific pandas dtype
surveys_df['record_id'].astype('float64')
# + id="TpvT2Y_ecG8K" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="966fdd52-0b88-4141-d84b-8d422647d3f7"
# this doesn't update the original dataframe
surveys_df.head()
# + id="8IfryXdMcnWK"
# we can update the values on a specific column
surveys_df['record_id'] = surveys_df['record_id'].astype('float64')
# + id="1HWdiQZvc6mK" colab={"base_uri": "https://localhost:8080/", "height": 326} outputId="4e2ca6fa-8f82-4fef-983e-eb06b6c54aa7"
# casting types can be difficult as highlighted here
# the 'weight' column contains NaN values which are a float64 type
# but when trying to cast them as int64s you'll get an error
surveys_df['weight'].astype('int64')
# + id="RLBN5Guwc71a" colab={"base_uri": "https://localhost:8080/"} outputId="ff36f522-d7b0-47e2-a102-42fed5ce93b7"
# you can handle NaNs in a number of ways
# handling NaN with subsetting here with the .dropna() function
surveys_df['weight'].dropna().astype('int64')
# + id="bG8maMdcd_7p" colab={"base_uri": "https://localhost:8080/"} outputId="91dcedbc-db1c-46e8-ad07-324209c31933"
# or you can replace all NaN with a value, in this case zeros
surveys_df['weight'].fillna(0)
# + id="XTAvJk94efiK" colab={"base_uri": "https://localhost:8080/", "height": 456} outputId="0076d43e-6f19-4199-bcfe-26b14593d98a"
# you can't cast the type of a column by trying to set the .dtype attribute
# the only way to do this is with the .astype function
surveys_df['weight'].dtype = 'int64'
# + id="iAU-CnI9evjp" colab={"base_uri": "https://localhost:8080/", "height": 206} outputId="7b6be474-f0ea-488f-b9d0-57f955d7d77c"
# handling datetime types in pandas
# in our dataframe we already have dates separated over 3 cols: month, day, year
# we can combine these into a single datetime column with a special datetime type
surveys_df.head()
# + id="TxLnnxvkhuzc"
# first we need to create a string column that combines year-month-day with the format
# yyyy-mm-dd
# we can do this using vectorised string concatenation with type casting shown below
date_col = surveys_df['year'].astype(str) + '-' + surveys_df['month'].astype(str) + '-' + surveys_df['day'].astype(str)
# + id="_wmphfOAh-ns"
# we can then use the super useful pd.to_datetime function to convert this date column
# into the datetime type
date_col_dt = pd.to_datetime(date_col)
# + id="gPw59C6ii5C1" colab={"base_uri": "https://localhost:8080/"} outputId="dcd0419d-4473-47d9-e708-4e232f965952"
# with the datetime Timestamp object we can do clever things like
# extract attributes such as .month, .year, .day
date_col_dt[0].month
# + id="WxPaUSpFiO9-" colab={"base_uri": "https://localhost:8080/"} outputId="660a302d-791e-4728-8f89-76ace53d60b8"
# we can use the apply method to apply a function across an entire column
# in this example we extract the specific day for every Timestamp object
date_col_dt.apply(lambda x: x.day)
# + id="Fi_3EGX0jW7a" colab={"base_uri": "https://localhost:8080/", "height": 424} outputId="d5a70f1b-9ac1-4462-b460-a776cc611afc"
surveys_df
# + id="zBstirb0kuZJ"
# we can save our pandas dataframes to disk as a csv format
# using the .to_csv method
# we use the argument index=False here to not write the index numbers to disc
surveys_df[(surveys_df.year ==1997)]\
.to_csv('/content/gdrive/MyDrive/Colab Notebooks/intro-python-2021-04/data/1997-data.csv', index=False)
# + id="UmUc45tFk_mN"
| notebooks/020_data_pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="deb612cf-001" colab_type="text"
# #1. Install Dependencies
# First install the libraries needed to execute recipes, this only needs to be done once, then click play.
#
# + id="deb612cf-002" colab_type="code"
# !pip install git+https://github.com/google/starthinker
# + [markdown] id="deb612cf-003" colab_type="text"
# #2. Get Cloud Project ID
# To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
#
# + id="deb612cf-004" colab_type="code"
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
# + [markdown] id="deb612cf-005" colab_type="text"
# #3. Get Client Credentials
# To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
#
# + id="deb612cf-006" colab_type="code"
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
# + [markdown] id="deb612cf-007" colab_type="text"
# #4. Enter CM360 Conversion Upload From BigQuery Parameters
# Move from BigQuery to CM.
# 1. Specify a CM Account ID, Floodligh Activity ID and Conversion Type.
# 1. Include BigQuery dataset and table.
# 1. Columns: Ordinal, timestampMicros, encryptedUserId | encryptedUserIdCandidates | gclid | mobileDeviceId
# 1. Include encryption information if using encryptedUserId or encryptedUserIdCandidates.
# Modify the values below for your use case, can be done multiple times, then click play.
#
# + id="deb612cf-008" colab_type="code"
FIELDS = {
'account': '',
'auth_read': 'user', # Credentials used for reading data.
'floodlight_activity_id': '',
'floodlight_conversion_type': 'encryptedUserId',
'encryption_entity_id': '',
'encryption_entity_type': 'DCM_ACCOUNT',
'encryption_entity_source': 'DATA_TRANSFER',
'bigquery_dataset': '',
'bigquery_table': '',
'bigquery_legacy': True,
}
print("Parameters Set To: %s" % FIELDS)
# + [markdown] id="deb612cf-009" colab_type="text"
# #5. Execute CM360 Conversion Upload From BigQuery
# This does NOT need to be modified unless you are changing the recipe, click play.
#
# + id="deb612cf-010" colab_type="code"
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'conversion_upload': {
'auth': 'user',
'account_id': {'field': {'name': 'account','kind': 'string','order': 0,'default': ''}},
'activity_id': {'field': {'name': 'floodlight_activity_id','kind': 'integer','order': 1,'default': ''}},
'conversion_type': {'field': {'name': 'floodlight_conversion_type','kind': 'choice','order': 2,'choices': ['encryptedUserId','encryptedUserIdCandidates','gclid','mobileDeviceId'],'default': 'encryptedUserId'}},
'encryptionInfo': {
'encryptionEntityId': {'field': {'name': 'encryption_entity_id','kind': 'integer','order': 3,'default': ''}},
'encryptionEntityType': {'field': {'name': 'encryption_entity_type','kind': 'choice','order': 4,'choices': ['ADWORDS_CUSTOMER','DBM_ADVERTISER','DBM_PARTNER','DCM_ACCOUNT','DCM_ADVERTISER','ENCRYPTION_ENTITY_TYPE_UNKNOWN'],'default': 'DCM_ACCOUNT'}},
'encryptionSource': {'field': {'name': 'encryption_entity_source','kind': 'choice','order': 5,'choices': ['AD_SERVING','DATA_TRANSFER','ENCRYPTION_SCOPE_UNKNOWN'],'default': 'DATA_TRANSFER'}}
},
'bigquery': {
'dataset': {'field': {'name': 'bigquery_dataset','kind': 'string','order': 6,'default': ''}},
'table': {'field': {'name': 'bigquery_table','kind': 'string','order': 7,'default': ''}},
'legacy': {'field': {'name': 'bigquery_legacy','kind': 'boolean','order': 8,'default': True}}
}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
| colabs/conversion_upload_from_biguery.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Note: This section is still in a draft form.**
# ## Section 5: A Combined Approach
# * We take the tartans from Section 3.
from tartans import tartans
# * And the quantum island from Section 4 (which itself used the island from Section 1).
# +
from quantum_island import quantum_island
from tools import plot_height
plot_height(quantum_island)
# -
# * As well as a drop of randomness.
import random
# * We will now combine these to bring the island into 3D.
# * This is done by the following function, which builds terrain that can be used in the [Minetest](https://www.minetest.net/) game engine.
# * Specifically, it can be used in conjunction with the [csv2terrain](https://github.com/quantumjim/csv2terrain/blob/master/README.md) mod for Minetest.
# * The function turns the terrain into a csv file, which specifies what type of block needs to go at what coordinate in order to create the island.
# * A wall is placed around the island, decorated with the rune-based tartans.
#
def make_blocks(height,tartans,depth,terrain = {'sea':2/16,'beach':3/16,'grass':5/16,'forest':12/16,'rock':14/16},origin=(0,0,0),filename='blocks.csv'):
def in_band(h,this_band):
return (h>=this_band[0]) and (h<this_band[1])
def add_grass(blocks,x,h,y,r,p=1):
if r<0.25*p:
blocks[x,H,y] = 'fern_1'
elif r<0.5*p:
blocks[x,H,y] = 'marram_grass_1'
elif r<0.75*p:
blocks[x,H,y] = 'marram_grass_2'
elif r<=p:
blocks[x,H,y] = 'marram_grass_3'
def add_tree(blocks,x,h,y,r):
for j in range(0,6):
blocks[x,h+j,y] = 'tree'
for xx in range(x-3,x+4):
for yy in range(y-3,y+4):
for hh in range(h+5,h+11):
d = (xx-x)**2+(yy-y)**2+(hh-h-6)**2 + 0.1
if d<8:
blocks[xx,hh,yy] = 'leaves'
xx = x+2*(r<0.5)-1
yy = y+2*(r<0.5)-1
blocks[xx,h+5,yy] = 'tree'
blocks[xx,h+4,yy] = 'torch'
band = {'sea':[0,terrain['sea']],'beach':[terrain['sea'],terrain['beach']],'grass':[terrain['beach'],terrain['grass']],'forest':[terrain['grass'],terrain['forest']],'rock':[terrain['forest'],terrain['rock']],'peak':[terrain['rock'],1]}
blocks = {}
for (x,y) in height:
blocks[x,0,y] = 'sand'
H = int(height[x,y]*depth)
r = height[x,y]*depth - H
if in_band(height[x,y],band['sea']):
for h in range(0,max(H,1)):
blocks[x,h,y] = 'sand'
for h in range(max(H,1),int(terrain['sea']*depth)):
blocks[x,h,y] = 'water_source'
elif in_band(height[x,y],band['beach']):
for h in range(1,H-2):
blocks[x,h,y] = 'stone'
if H>2:
for h in [H-2,H-1]:
blocks[x,h,y] = 'sand'
elif in_band(height[x,y],band['grass']) or in_band(height[x,y],band['forest']):
for h in range(1,H-2):
blocks[x,h,y] = 'stone'
blocks[x,H-2,y] = 'dirt'
blocks[x,H-1,y] = 'dirt_with_grass'
if in_band(height[x,y],band['grass']) or r>0.025:
add_grass(blocks,x,H,y,r)
else:
add_tree(blocks,x,H,y,r)
elif in_band(height[x,y],band['rock']):
for h in range(1,H-2):
blocks[x,h,y] = 'stone'
for h in [H-2,H-1]:
if r<1/6:
blocks[x,h,y] = 'stone_with_gold'
elif r<2/6:
blocks[x,h,y] = 'stone_with_iron'
elif r<3/6:
blocks[x,h,y] = 'stone_with_copper'
else:
blocks[x,h,y] = 'stone'
add_grass(blocks,x,H,y,r,p=0.5)
elif in_band(height[x,y],band['peak']):
for h in range(1,H):
blocks[x,h,y] = 'stone'
size = max(max(height.keys()))+1
L = max(max(tartans[0]))+1
for x in range(size):
for y in range(size):
if (x,0,y) not in blocks:
blocks[x,0,y] = 'sand'
if (x,1,y) not in blocks:
blocks[x,1,y] = 'water_source'
for j in range(0,size,L):
tartan = random.choice(tartans)
for dj in range(L):
for h in range(L):
x,y = dj,L-1-h
blocks[j+dj,h+1,0] = (tartan[x,y]<2/3)*'diamondblock' + (tartan[x,y]>=2/3)*'goldblock'
blocks[j+dj,h+1,size-1] = (tartan[x,y]<2/3)*'diamondblock' + (tartan[x,y]>=2/3)*'goldblock'
blocks[0,h+1,j+dj] = (tartan[x,y]<2/3)*'diamondblock' + (tartan[x,y]>=2/3)*'goldblock'
blocks[size-1,h+1,j+dj] = (tartan[x,y]<2/3)*'diamondblock' + (tartan[x,y]>=2/3)*'goldblock'
for h in range(L):
tartan = random.choice(tartans)
for x in range(L):
for y in range(L):
blocks[x-4*(h-(L-1)),h+1,y+size-L+4*(h-(L-1))] = (tartan[x,y]<2/3)*'diamondblock' + (tartan[x,y]>=2/3)*'goldblock'
for j in range(4*(L-1),int(size/2),4):
for x in range(L):
for y in range(L):
blocks[x+j,1,y+size-L-j] = (tartan[x,y]<2/3)*'diamondblock' + (tartan[x,y]>=2/3)*'goldblock'
with open(filename, 'w') as file:
file.write( str(origin[0])+','+str(origin[1])+','+str(origin[2])+',min,\n' )
file.write( str(origin[0]+size)+','+str(origin[1]+depth+1)+','+str(origin[2]+size-1)+',max,\n' )
file.write( str(origin[0]+1)+','+str(origin[1]+h+2)+','+str(origin[2]+size-2)+',player,\n' )
for (x,h,y) in blocks:
if h>=0 and x>=0 and y>=0 and x<=size and y<=size:
file.write( str(x+origin[0])+','+str(h+origin[1])+','+str(y+origin[2])+','+blocks[x,h,y]+',\n' )
# * We will implement this with a height of 20 blocks for the island.
# * The coordinates of the origin are chosen so that the island can also be created in [QiskitBlocks](https://github.com/JavaFXpert/QiskitBlocks/blob/master/README.md).
size = max(max(quantum_island.keys()))
height = 20
make_blocks(quantum_island,tartans,height,origin=(600-size,0,370-size))
# That's all for now!
| Quantum_Procedural_Generation/5_A_Combined_Approach.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### The Lorenz system
#
#
# $$
# \begin{aligned}
# \dot{x} & = \sigma(y-x) \\
# \dot{y} & = \rho x - y - xz \\
# \dot{z} & = -\beta z + xy
# \end{aligned}
# $$
#
#
# The Lorenz system of coupled, ordinary, first-order differential equations have chaotic solutions for certain parameter values σ, ρ and β and initial conditions, u(0), v(0) and w(0):
# $$
# \begin{align*}
# \frac{\mathrm{d}u}{\mathrm{d}t} &= \sigma (v - u)\\
# \frac{\mathrm{d}v}{\mathrm{d}t} &= \rho u - v - uw\\
# \frac{\mathrm{d}w}{\mathrm{d}t} &= uv - \beta w
# \end{align*}
# $$
#
# The following program plots the Lorenz attractor (the values of x, y and z as a parametric function of time) on a Matplotlib 3D projection.
import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# %matplotlib inline
# +
# Create an image of the Lorenz attractor.
# The maths behind this code is described in the scipython blog article
# at https://scipython.com/blog/the-lorenz-attractor/
# <NAME>, January 2016.
# Lorenz paramters and initial conditions
sigma, beta, rho = 10, 2.667, 28
u0, v0, w0 = 0, 1, 1.05
# Maximum time point and total number of time points
tmax, n = 100, 10000
def lorenz(X, t, sigma, beta, rho):
"""The Lorenz equations."""
u, v, w = X
up = -sigma*(u - v)
vp = rho*u - v - u*w
wp = -beta*w + u*v
return up, vp, wp
# Integrate the Lorenz equations on the time grid t
t = np.linspace(0, tmax, n)
f = odeint(lorenz, (u0, v0, w0), t, args=(sigma, beta, rho))
x, y, z = f.T
# Plot the Lorenz attractor using a Matplotlib 3D projection
fig = plt.figure()
ax = fig.gca(projection='3d')
# Make the line multi-coloured by plotting it in segments of length s which
# change in colour across the whole time series.
s = 10
c = np.linspace(0,1,n)
for i in range(0,n-s,s):
ax.plot(x[i:i+s+1], y[i:i+s+1], z[i:i+s+1], color=(1,c[i],0), alpha=0.4)
# Remove all the axis clutter, leaving just the curve.
ax.set_axis_off()
#plt.savefig('lorenz.png')
# -
tmax, n = 100, 10000
tmax, n
# <div id="lorenz-graph" style="text-align:center">
# (loading Plotly graphing lib)
# </br></br>
# <img src="https://cdnjs.cloudflare.com/ajax/libs/galleriffic/2.0.1/css/loader.gif"></img>
# </div>
#
# <div id="lorenz-controls"> </div>
| Lorenz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" id="Voc9E9Qz1BZf" colab_type="code" colab={}
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
# Import libraries
from __future__ import division
from scipy import stats
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("white")
sns.set(style="ticks", color_codes=True)
# %matplotlib inline
from sklearn.model_selection import learning_curve, validation_curve, cross_val_score
import warnings
warnings.simplefilter(action='ignore')#, category=FutureWarning)
# Create table for missing data analysis
def draw_missing_data_table(df):
total = df.isnull().sum().sort_values(ascending=False)
percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
return missing_data
# missing data
def find_missing_data(df):
#missing data
total = df.isnull().sum().sort_values(ascending=False)
percent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
if missing_data['Total'].max() > 0: print (missing_data.head(20))
else: print ("No missing data.")
# + [markdown] id="dAggxc1EOsNZ" colab_type="text"
# # Load Data and Explore
# + id="d5jztCrv1Nd9" colab_type="code" outputId="761f1422-cbd4-4ee7-f650-dde41356ff25" colab={"base_uri": "https://localhost:8080/", "height": 153}
# !pip install -q xlrd
# !git clone https://github.com/juggernautTress/EWeiss-Data.git
# + _uuid="f4e47f83916b73107cc496a277a20a0b8e380a0c" id="XL5slyDK1BaJ" colab_type="code" outputId="b250734e-6310-4d3c-ee76-5319b3347f1d" colab={"base_uri": "https://localhost:8080/", "height": 324}
# Input data files are available in the "Eweiss-Data/" directory.
bckgrnd = pd.read_excel('EWeiss-Data/UC1.xlsx', 'Background', index_col=None)
flwseeds = pd.read_excel('EWeiss-Data/UC1.xlsx', 'Data', index_col=None)
# bckgrnd.dropna(axis=0, how='all') #remve all row where all value is 'NaN' exists
# Any results will be writen to the current directory are saved as output.
flwseeds.head() ## Nominal / Categorical Data
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" id="rfab04gx1Bag" colab_type="code" outputId="0090725b-4189-4d6f-9434-81f401dc6e66" colab={"base_uri": "https://localhost:8080/", "height": 1133}
flwseeds = flwseeds.drop(["Unnamed: 0"], axis=1)
flwseeds.info()
# + [markdown] _uuid="15867c66a7543d23574482eccb28712edddfe6af" id="yvt0iekw1Bam" colab_type="text"
# *Check for missing data*
# + _uuid="f206ff114ea868edef42484168e941040f56f31a" id="A8SWfvz91Ban" colab_type="code" outputId="8109b20b-fc6d-4215-f59d-d6e1e97905f6" colab={"base_uri": "https://localhost:8080/", "height": 1816}
draw_missing_data_table(flwseeds) ## None
# + _uuid="10c34f5f161b6d431cb399651d89f8c28a30fd6c" id="UaOIHsOs1Bap" colab_type="code" outputId="eb610bc6-5241-459b-a65a-794099245462" colab={"base_uri": "https://localhost:8080/", "height": 35}
flwseeds["STORE CODE"].unique().size ## all unique store codes ?
# + [markdown] _uuid="5c01cf80ca7d7f6d3d67f44477bf289541d58eb3" id="aB8ZBQxf1Bar" colab_type="text"
# *Distribution Plots*
# + _uuid="5da94ea8481644a2f421f165cca38d02045d7a6b" id="Aqk0Lwqe1Bat" colab_type="code" outputId="61e02b17-9b7e-4f1c-cb8f-86c76e5bad5a" colab={"base_uri": "https://localhost:8080/", "height": 397}
initStats = flwseeds.describe()
initStats = initStats.drop(columns =["STORE CODE"])
initStats
# + _uuid="fd91dab23ca319b48ede5b8620f8940460afa979" id="dl25oWjy1Bay" colab_type="code" outputId="94be0c81-85fd-4117-cf7f-3d526555746a" colab={"base_uri": "https://localhost:8080/", "height": 1704}
flw_names = list(flwseeds.columns.values)[1:]
initStats = pd.DataFrame(columns=["mean","std"])
initStats["mean"]=flwseeds.drop(columns =["STORE CODE"]).mean();
initStats["std"]=flwseeds.drop(columns =["STORE CODE"]).std();
initStats["count"]=flwseeds.drop(columns =["STORE CODE"]).sum();
initStats["Name"]=flw_names
# sort df by Count column
initStats = initStats.sort_values(['mean'], ascending=False).reset_index(drop=True)
f, ax = plt.subplots(figsize=(8, 18))
sns.barplot(initStats["mean"],initStats.Name,color="c")
plt.xlabel ("Mean Occurance")
# print ("Most uncommon seeds: \n",initStats.tail(20))
initStats.tail(20)
# + [markdown] _uuid="8ef9532ab8da91b2655950d7e040b2b61fc791dd" id="3c3FBpgl1Ba1" colab_type="text"
# Everybody orders Alyssum Y. and more than 95% order Calendula Gold, Marigold Jafri Black, Anthurium Mix, Cleome Rose, Linum Blue, Holly Hock, Sweet Pea Mix, Cereopsis, Sweet Pea Pink, Sweet Pea White and Delphinium. The rest of the flowers on the list would benefit most from sale recommendations.
# + id="WeaUB7-tVeXs" colab_type="code" outputId="90d2bcfe-c7eb-42a2-c034-038a2bbc7fba" colab={"base_uri": "https://localhost:8080/", "height": 545}
## Descriptio of seed sales / store
flwseeds['Total'] = flwseeds.drop(["STORE CODE"], axis=1).sum(axis=1)
ax = flwseeds.hist(column='Total',bins=25, grid=False, figsize=(12,8), \
color='#86bf91', rwidth=0.9)
ax = ax[0]
for x in ax:
# Despine
x.spines['right'].set_visible(False)
x.spines['top'].set_visible(False)
x.spines['left'].set_visible(False)
# Switch off ticks
x.tick_params(axis="both", which="both", bottom="off", \
top="off", labelbottom="on", left="off", right="off", labelleft="on")
# Draw horizontal axis lines
vals = x.get_yticks()
for tick in vals:
x.axhline(y=tick, linestyle='dashed', alpha=0.4, \
color='#eeeeee', zorder=1)
# Remove title
x.set_title("")
# Set x-axis label
x.set_xlabel("Seed Purchase (Count)", labelpad=20,\
weight='bold', size=12)
# Set y-axis label
x.set_ylabel("Number of Stores", labelpad=20, weight='bold', size=12)
print ("min purchase:", flwseeds.Total.min(),\
"max purchase:", flwseeds.Total.max())
# + [markdown] id="Rp4TfwjWZF4q" colab_type="text"
# Most of the stores buy anywhere between 20 and 27 seeds.
# + [markdown] _uuid="695ce3ab9cb9a247bd877bff6e7ace09d9bb946b" id="LY64eHN21BbA" colab_type="text"
# # 1. Nearest Neighbours based clustering of stores
# + id="9M20LUD_auv8" colab_type="code" colab={}
from sklearn.neighbors import NearestNeighbors
from sklearn.model_selection import ShuffleSplit
rs = ShuffleSplit(n_splits=1, test_size=.1, random_state=0)
X= flwseeds.drop(columns=['Total','STORE CODE'])
split_index = rs.split(X)
train_index, test_index = list(split_index)[0]
X_train0 = X.iloc[train_index]
X_test0 = X.iloc[test_index]
X_test_STORECODE0 = flwseeds.iloc[test_index]["STORE CODE"]
neigh0 = NearestNeighbors(n_neighbors=25).fit(X_train0)
# + id="75RgkEP_q56M" colab_type="code" colab={}
from numpy.random import choice as sample
def hide_seeds(ori_vector=None,fraction_to_hide=None, k = None):
seeds_bought = list(ori_vector.columns[(ori_vector >0 ).iloc[0]])
num_of_purchases = len(seeds_bought)
if k == None:
num_2hide = int(num_of_purchases * fraction_to_hide)
if num_2hide < 1: num_2hide =1 #at least one hidden
if num_2hide == num_of_purchases: num_2hide -= 1 #at least one purchase
else:
num_2hide = num_of_purchases - k
if num_2hide < 1:
#print ("choose another query",num_of_purchases,k)
return None, None
seeds_hidden = sample(seeds_bought,num_2hide,replace=False)
ori_vector[seeds_hidden] = 0
return ori_vector, seeds_hidden
''' User-based Collaborative Filtering '''
def kernel(x,y,sigma):
return np.exp(-((x-y)**2)/(2*sigma**2))
def Recommendation(nnModel,query,stores,threshold=0.1,popularityBias_On=True,scoreType='pearson'):
#1. find stores with similar purchases as the query
neighbours = nnModel.kneighbors(query,return_distance=False)[0] #like array index
#2. get purchases by these users and sort by item affinity
#print (neighbours, stores.shape[0])
store_set = stores.iloc[neighbours]
store_set = store_set.loc[:,store_set.any()] ## drop items with zero affinity
score = store_set.sum(axis=0).astype(float) ## sum seed occurances
if not popularityBias_On:
'''
If a seed is popular among many stores, it is bound to occur with a higher
frequency in local neighbourhoods of stores. This might bias the scale of
other "unpopular" seed scores comparitively:: introducing a simple smoothing
function to tackle this problem
'''
popularity = initStats[initStats["Name"].isin(store_set.columns.values)]
popularity["mean"] *= 1000.#/= initStats["mean"].max()
pop_score = [kernel(score[name].astype(float),neighbours.size,\
popularity[popularity["Name"]==name]["mean"].values[0]) \
for name in store_set.columns.values]
pop_score /= max(pop_score)
score = pd.Series(pop_score,popularity["Name"])
score = score[score < threshold]
#print (popularity)
else:
if scoreType == 'simple':
score /= neighbours.size
score = score[score>threshold].sort_values(ascending=False)
elif scoreType== 'pearson':
### Using Pearson Correlations (within neighbours)
score = store_set.corr(method='pearson')
score.loc["Total"] = np.absolute(score[score.columns].sum(axis=1))
keep = (1- threshold) * score.index.size
score = score.loc["Total"].sort_values(ascending=False)
score = score.iloc[0:int(keep)]
return score
# + [markdown] id="3R_xEzZPFFEl" colab_type="text"
# Test Example
# + id="Gdfg4klzeh2P" colab_type="code" outputId="6d728bd6-f0e0-4083-ac5f-eaada118e953" colab={"base_uri": "https://localhost:8080/", "height": 436}
#example query
np.random.seed(22)
while 1:
store_query = X_test0.sample(1)
# hide a few purchases for testing purposes
store_query, test_recommn = hide_seeds(store_query,k=15) # k--[6,29]
if isinstance(store_query,pd.DataFrame):
break
## stop cell
## test the query:
score = Recommendation(neigh0,store_query,X_train0,popularityBias_On=1)
print (" For a store that bought -- ",\
list(store_query.columns[(store_query == 1).iloc[0]]))#,\
#"\n\n Recommendation \t\t Score\n\n",\
#score)
plt.figure(figsize=(8,6))
clrs = ['red' if (x in test_recommn) else 'c' for x in list(score.index)]
sns.barplot( score.values, score.index , palette=clrs)
plt.xlabel("Recommendation Score")
print ("Did the recommender system recommend all seeds hidden in the test set? --",\
set(test_recommn).issubset(score.index),\
"[expected recommended seeds marked in red]")
# + id="NccQrfl1545P" colab_type="code" colab={}
from __future__ import division
from sklearn.metrics import accuracy_score, classification_report,\
zero_one_loss,jaccard_similarity_score
#3. compare to test:
'''
threshold_recmnd = 0.8 # strong recommedations only
k = 6 # number of items to use for nearest neighbour search
'''
def CrossValidate(threshold_recmnd=0.8,k=6,data_fraction=0.8,inputs=[]):
neigh,X_train,X_test,X_test_STORECODE = inputs
numberOfRows = X_test.shape[0]
CV = pd.DataFrame(columns=["train_fraction","k","threshold","STOREindex","error"])
i = 0;
preds = pd.DataFrame(columns = X_train.columns.values)
target = pd.DataFrame(columns =X_train.columns.values)
Xcol_size = X_train.shape[1]
for index in X_test.index:
store_query = X_test.loc[index].to_frame().T
if k != None:
store_query,test_recommn = hide_seeds(store_query,k=k)
if not isinstance(store_query, pd.DataFrame): continue
score = Recommendation(neigh, store_query,X_train,\
threshold=threshold_recmnd,popularityBias_On=1)
# probBias =1 (faster) and keep all preds
#if not isinstance(score,pd.Series): continue
if data_fraction < 0.999:
target.loc[i,:] = np.zeros(Xcol_size)
for col in test_recommn: target.ix[i,col]=1
preds.loc[i,:] = np.zeros(Xcol_size)
if not score.empty :
incorrect_recommendations = set(test_recommn)-set(score.index)
error = len(incorrect_recommendations)/len(test_recommn)
for col in list(score.index): preds.ix[i,col] = 1
else:
error =1.
else: error = np.NaN
CV.loc[i] = [1.-data_fraction,k,threshold_recmnd,X_test_STORECODE.iloc[i],error]
i+=1
if data_fraction < 1.:
preds = preds.reindex(X_test.columns, axis=1).astype(int)
target = target.reindex(X_test.columns, axis=1).astype(int)
ac = 1 - jaccard_similarity_score(target.values,preds.values,normalize=True)
cr = classification_report(target, preds, target_names=X_train.columns.values)
return CV, [cr, ac]#/preds.values.sum()]
return CV
# + id="9VcINE811XIK" colab_type="code" outputId="bbcb6a9b-082c-461b-ff74-1da7185d3b75" colab={"base_uri": "https://localhost:8080/", "height": 251}
###### grid search to find optimal 'k' and 'threshold' values
print ("Grid Search")
k_values = [5,10,20] ## items for NN search
threshold_values = [0.5]# keep moderate and strong predictions (top 50%)
grid = pd.DataFrame()
for k in k_values:
for threshold in threshold_values:
for n_neighbors in [2,10,20,50]: ## number of neighbours to eval
neigh0 = NearestNeighbors(n_neighbors=n_neighbors).fit(X_test0)
grid_, _= CrossValidate(threshold,k,0.8,\
[neigh0,X_train0,X_test0,X_test_STORECODE0])
grid_["n_neighbors"] = [n_neighbors]*grid_.shape[0]
grid = grid.append(grid_)
print ("\t k:",k,"\t threshold:",threshold,"\t n_neighbors:",n_neighbors)
# + id="Z8f3_lHoBs6b" colab_type="code" outputId="f6ad7183-c1c8-4e31-9c23-389192ac5385" colab={"base_uri": "https://localhost:8080/", "height": 309}
#data = CV[CV["k"] == 15]
#plt.scatter(data.threshold, data.error)
ax = sns.boxplot(x="n_neighbors", y="error", hue="k",\
data=grid, palette="Set3")
plt.xlabel("n_neighbors")
plt.ylabel("error")
# + [markdown] id="qNxqpOmXBtq6" colab_type="text"
# When you train about 80% of the data:
# * using >10 n_neighbors give better results.
# * 'k' however seems to affect the prediction. Larger 'k' values (more number of input items) should find better neighbourhood search results and thus result in better recommendations. << Test this in the Cross-Validation.
#
#
#
# + [markdown] id="KAaumac_QoAC" colab_type="text"
# ### Is the recommendation system "learning"?
# + id="eImObrUc8VkG" colab_type="code" colab={}
def CV_parm(threshold=0.5,n_neighbors=50):
print ("Cross Validate: threshold=",threshold," n_neighbors=",n_neighbors)
CV = pd.DataFrame()
proportions = [.8,.6,.4,.2]
res = pd.DataFrame(columns=["train_fraction","k","accuracy score"])
for test_fraction in proportions:
print (" \t testing ",test_fraction,"of data...")
for k in [5,10,15]:
print (" \t\t k=",k)
rs = ShuffleSplit(n_splits=1,test_size=test_fraction,random_state=44)
X = flwseeds.drop(columns=["Total","STORE CODE"])
## resetting X; not required but helps if running this cell independently
split_index = rs.split(X)
train_index,test_index = list(split_index)[0]
X_train1 = X.iloc[train_index]
X_test1 = X.iloc[test_index]
X_test_STORECODE1 = flwseeds.iloc[test_index]["STORE CODE"]
neigh1 = NearestNeighbors(n_neighbors=n_neighbors).fit(X_train1)
CV_, report= CrossValidate(threshold,k,data_fraction=test_fraction,\
inputs=[neigh1,X_train1,X_test1,X_test_STORECODE1])
CV_["k"] = [k]*CV_.shape[0]
CV = CV.append(CV_)
res.loc[len(res),:] = [1-test_fraction,k,report[1]]
return CV, res
# + id="EPFnmbNneBn2" colab_type="code" outputId="ace96c4b-5fac-4502-8011-78be31dee0fe" colab={"base_uri": "https://localhost:8080/", "height": 833}
CV,res = CV_parm()
plt.figure(figsize=(8,8))
## Accuracy Score
sns.barplot(x="train_fraction",y="accuracy score", hue="k",\
data=res, palette="GnBu_d")
# + [markdown] id="6pD4O-0pIdey" colab_type="text"
# ## Strong Recommedation (threshold >0.8)
# + id="HnUa-FHDIgAF" colab_type="code" outputId="717b2c32-650c-4353-fecb-060ce6062261" colab={"base_uri": "https://localhost:8080/", "height": 724}
CV, res = CV_parm(threshold=0.8,n_neighbors=50)
plt.figure(figsize=(8,6))
sns.barplot(x="train_fraction", y="accuracy score", hue="k",\
data=res, palette="RdPu_d")
# + [markdown] _uuid="64a9a4f192138bdc6a94c5210efd147d82668a32" id="8FO6ujpG1BbB" colab_type="text"
# ## To do: Implement SVD
#
# ```
# `# scikit-surprise
# ```
#
# . SVD
# * SVD will handle the problem of scalability and sparsity by a larger dataset.
# * However, SVD provides little explanation to the reason that an item is recommended to an user.
# + [markdown] id="YTg9xGo_8OvJ" colab_type="text"
# # Edelweiss Recommendations
# + [markdown] _uuid="8bb6bfad2d7f77526b37f9056c857f18fa2c30d0" id="cfhk66w81BbD" colab_type="text"
# **Edelweiss wants this recommendation to be statistically correct. They also want to see if this data is meaningful enough to generate some decent recommendation or not. If Kayla says that this data is insufficient, then she needs to validate that with some statistical results. They also would want to see some stores and some seeds where they should focus primarily. Other than this analysis, if the Edelweiss CEO likes the approach, then he would want to do a pilot of a few stores. Kayla should help the CEO to selct store list along with the recommnedations.**
# + id="BfQCbuSKISRj" colab_type="code" colab={}
#### Seed stores to focus on:
## train all of the data
X = flwseeds.drop(columns=['STORE CODE','Total'])
X_STORECODE = flwseeds["STORE CODE"]
neighF = NearestNeighbors(n_neighbors=15).fit(X)
results = pd.DataFrame(columns=["STORE CODE", "Recommnd Strength"])
scoreList = pd.DataFrame(columns=X.columns.values)
for i,index in enumerate(X.index):
store_query = X.loc[index].to_frame().T
score = Recommendation(neighF,store_query,X,threshold=0.8,popularityBias_On=1)
results.loc[i] = [X_STORECODE.loc[index],score.sum()]
scoreList.loc[i] = score.sort_values(ascending=False)
# + id="PrKsC5F3ZwEL" colab_type="code" outputId="b70f29df-7239-441d-f2cc-bc4a5e843a04" colab={"base_uri": "https://localhost:8080/", "height": 1169}
## select store: recommendation strength by store
print ("Pilot Stores:")
results = results.sort_values(["Recommnd Strength"],ascending=False).head(5)
for index in results.index:
print ("\n\nSTORE CODE:",int(results.loc[index]["STORE CODE"]), \
"Recommendation Strength:",results.loc[index]["Recommnd Strength"])
print ("\nRecommendations ---\n")
print ("\t Seed \t\t Score")
print (scoreList.loc[index].dropna())
# + id="vd4EXebZIVMY" colab_type="code" colab={}
### Pilot Stores and seed recommendations
| Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"></ul></div>
# -
import os
import cv2
import imageio
import scipy.ndimage as ndi
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import ntpath
import re
import math
import dicom2nifti
# ## DICOM to NIFTI pre processing
# +
os.chdir(r'\\nsq024vs\u8\aczd087\MyDocs\Data science masters\Thesis\Dataset\CHAOS_Train_Sets\MR')
#Iterating through the directory and finding only the MRI images for analysis
input_dir_list = list()
output_dir_list=list()
output_dir=r'\\nsq024vs\u8\aczd087\MyDocs\Data science masters\Thesis\Dataset\CHAOS_Train_Sets\NIFTI MR'
name_list=['MR','DICOM_anon']
for root,dirs,files in os.walk(os.getcwd()):
if not dirs and all(x in root for x in name_list):
input_dir_list.append(root)
for root,dirs,files in os.walk(output_dir):
if not dirs and all(x in root for x in name_list):
output_dir_list.append(root)
final_output=list(zip(input_dir_list, output_dir_list))
final_output
# -
with open('input_output_dir')
final_oupu
# +
#Iterating through list of directorys above and writing to
for folders in final_output:
dicom2nifti.convert_directory(folders[0], folders[1], compression=True,reorient=False)
# -
#Function which will return file name at end of string of file /directory.
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
class data_generator():
"""The purpose of this class is to create a set of numpy arrays
for a given image directory for analysis to be performed on them below with the subsequent classes"""
def __init__(self,image_dir,mask_dir):
self.image_dir=image_dir
self.mask_dir=mask_dir
self.list_organs=list_organs
# +
class image_organ_intensity_analysis():
"""The purpose of this analysis is to inspect the intensity variation of each organ classified by the mask"""
def __init__(self,ground_image,MRI_images,MRI_type):
"""Note image pics fed in as numpy arrays"""
self.ground=ground_image
self.MRI_images=MRI_images
self.MRI_type=MRI_type
def resize_image():
pass
return stat_summary
def det_n_unique_val(self):
"""The purpose of this function is to return iterable df for number of unique slices per image category"""
return pd.DataFrame(self.image_database.groupby('Person_id')['Slice_id'].nunique(),
columns=['Slice_id'])
def gen_stat_df(self):
No_slices_per_person=self.det_n_unique_val()
#Iterating through each
for vals in trl2.index:
print(vals)
No_slices=int(trl2.loc[vals])
for j in range(1,No_slices+1):
self.image_database.loc[self.image_database['Person_id']==vals,
self.image_database['Slice_id']==j]
self.det_num_organs(self)
# +
from pathlib import Path
directory=r'\\nsq024vs\u8\aczd087\MyDocs\Data science masters\Thesis\Dataset\CHAOS_Train_Sets\Train_Sets\MR'
#
#r'C:\Users\niall\OneDrive\Documents\Data science masters\Thesis\Biomedical images\CHAOS_Train_Sets\Train_Sets\MR'
rootdir = Path(directory)
# Return a list of regular files only, not directories
file_list = [f for f in rootdir.glob('**/*') if f.is_file()]
# +
Img_categories={'MR_modes':['T1DUAL','T2SPIR'],
'Image_type':['Ground','InPhase','OutPhase']}
start_id='\\MR\\'
class return_gnrl_info():
"""The purpose of this class is to take in file directory list and
iterate through and create a parsable dataframe for image analysis"""
def __init__(self,Img_categories,start_id,file_list):
self.start_id=start_id
self.Img_categories=Img_categories
self.file_list=file_list
def Create_file_dictionary(self,tmp):
#iterating through file system for different image file types and importing them for meta analysis
tmp_image_file=imageio.imread(tmp)
tmp_dict=tmp_image_file.meta
tmp_dict['Image_type']=None
for key_items,values in self.Img_categories.items():
#Iterating through values
for val in values:
#Iterating through string file name and assigning name categories to it.
if val in tmp:
tmp_dict[key_items]=val
#Endid of DICOM or jpeg files based on review of file strings.
end_id="\\"+tmp_dict['MR_modes']
#Acquiring person id from file string based on end time points.
person_id=tmp[tmp.find(start_id)+len(start_id):tmp.rfind(end_id)]
tmp_dict['Person_id']=person_id
tmp_dict['File_location']=tmp
tmp_dict['File_name']=path_leaf(tmp)
tmp_dict['Slice_id']=self.__get_slice_no(tmp_dict['MR_modes'],
tmp_dict['Image_type'],
tmp_dict['File_name'])
return tmp_dict
def __get_slice_no(self,image_mode,image_type,file_name):
"""Purpose of this method to return slice number based on file outlay"""
Slice_No=self.__reg_ex_filename(file_name)
#T2 spiral no registration
if image_mode=='T2SPIR':
Slice_No=Slice_No
#T1Dual slice no registration
elif image_mode=='T1DUAL':
if image_type=='InPhase' or image_type=='Ground':
Slice_No=Slice_No/2
#Roundup function used to get odd number frequency count rather than using a counter approach.
elif image_type=='OutPhase':
Slice_No=int(math.ceil((Slice_No/2)))
else:
print('T1Dual missing input')
else:
print('T2Spir missing input')
return Slice_No
def __reg_ex_filename(self,filename):
"""Purpose of this file is to remove file name from filename"""
#Regex splits file into separate number strings based on - and . delimiters
temp_file_name=re.split(r'-|\.',filename)
#Return slice number as a float for further processing if required.
return float(temp_file_name[2].lstrip("0"))
def Create_data_frame(self):
"""Purpose of this method is a wrapper function which generated dataframe from dictionary attributes of each file"""
file_info=[]
for files in self.file_list:
tmp=str(files)
tmp_dictionary=self.Create_file_dictionary(tmp)
file_info.append(tmp_dictionary)
unedited_df=pd.DataFrame(file_info)
return self.Edit_data_frame(unedited_df)
def Edit_data_frame(self,unedited_df):
"""The purpose of this method is to relabel MR modes to 5 different categories to ease analysis in image class"""
unedited_df.replace({'Image_type': None}, 'T2SPIR_data',inplace=True)
Image_type=['InPhase','OutPhase','Ground','Ground','T2SPIR_data']
MR_Mode=['T1DUAL','T1DUAL','T1DUAL','T2SPIR','T2SPIR']
Image_renames=['T1Dual_InPhase','T1Dual_OutPhase','T1Dual_Ground','T2SPIR_Ground','T2SPIR_data']
mask_T1Dual_Inphase=(unedited_df['Image_type']=='InPhase')&(unedited_df['MR_modes']=='T1DUAL')
unedited_df['Image_type'] = unedited_df['Image_type'].mask(mask_T1Dual_Inphase,'T1Dual_InPhase')
mask_T1Dual_Outphase=(unedited_df['Image_type']=='OutPhase')&(unedited_df['MR_modes']=='T1DUAL')
unedited_df['Image_type'] = unedited_df['Image_type'].mask(mask_T1Dual_Outphase,'T1Dual_OutPhase')
#mask_T1Dual_Ground=(unedited_df['Image_type']=='Ground')&(unedited_df['MR_modes']=='T1DUAL')
#unedited_df['Image_type'] = unedited_df['Image_type'].mask(mask_T1Dual_Ground,'T1Dual_Ground')
#mask_T2SPIR_Ground=(unedited_df['Image_type']=='Ground')&(unedited_df['MR_modes']=='T2SPIR')
#unedited_df['Image_type'] = unedited_df['Image_type'].mask(mask_T2SPIR_Ground,'T2SPIR_Ground')
mask_T2SPIR_data=(unedited_df['Image_type']=='T2SPIR_data')&(unedited_df['MR_modes']=='T2SPIR')
unedited_df['Image_type'] = unedited_df['Image_type'].mask(mask_T2SPIR_data,'T2SPIR_data')
return unedited_df
gnrl_info_method=return_gnrl_info(Img_categories,start_id,file_list)
gnrl_info_df=gnrl_info_method.Create_data_frame()
gnrl_info_df['MR_modes'].unique()
# -
gnrl_info_df.loc[:,'Image_type'].unique()
# +
class gen_file_output():
def __init__(self,gnrl_df,MR_MOD,slice_no,patient_no):
self.gnrl_df=gnrl_df
self.MR_MOD=MR_MOD
self.slice_no=slice_no
self.patient_no=patient_no
def return_file_loc(self):
#"""The purpose of this function is to iterate through df and return
# the file names of a medical images mask and its associated MRI"""
#Return unique values
Unique_vals=list(self.gnrl_df.loc[self.gnrl_df['MR_modes']==self.MR_MOD,'Image_type'].unique())
#Dictionary to store file locations for each image
file_loc={}
#Iterating through each image set for analysis
for vals in Unique_vals:
Row_Int=self.gnrl_df[(self.gnrl_df['Slice_id'] == self.slice_no) &
(self.gnrl_df['Person_id'] ==self.patient_no) &
(self.gnrl_df['Image_type']==vals)&
(self.gnrl_df['MR_modes']==self.MR_MOD)]
file_loc[vals]=Row_Int['File_location'].item()
return file_loc
def Determine_no_organs(self):
#'''The purpose of this function is to assess the mask for organ types and return dictionary to that effect'''
#Finding ground file
file_loc=self.files_loc_image()
image=file_loc['Ground']
organs_chk={'Liver':63,
'Right Kidney':126,
'Left Kidney':189,
'Spleen':252,
'Background':0}
No_organs=np.unique(image)
#Iterating through organ dictionary to find matches
for keys, vals in organs_chk.items():
if organs_chk[keys] not in No_organs:
organs_chk[keys]=None
No_organs={'organs':organs_chk}
return No_organs
def files_loc_image(self):
#"""The purpose of this method is to import file locations and
#return dictionary numpy arrays to save on multiple imports and to cache images"""
file_loc=self.return_file_loc()
for keys,vals in file_loc.items():
file_loc[keys]=imageio.imread(vals)
return file_loc
def gen_image_organ_dictionary(self):
#The purpose of this wrapper function is to generate an image from the
images_dictionary=self.files_loc_image()
organs_dictionary=self.Determine_no_organs()
images_dictionary.update(organs_dictionary)
return images_dictionary
trl=gen_file_output(gnrl_info_df,'T1DUAL',15,'2')
trl2=trl.files_loc_image()
trl3=trl.return_file_loc()
trl4=trl.Determine_no_organs()
trl2.update(trl4)
trl2
# +
slice_chk=Per_slice_analysis(**trl2)
slice_chk.gen_org_spec_mask_image()
slice_chk.__dict__.keys()
# +
trl_background=slice_chk.T1Dual_InPhaseBackgroundmask
import matplotlib.pyplot as plt
plt.imshow(trl, cmap='gray')
plt.axis('off')
plt.show()
# +
trl_liver=slice_chk.T1Dual_InPhaseLivermask
import matplotlib.pyplot as plt
plt.imshow(trl_liver, cmap='gray')
plt.axis('off')
plt.show()
# +
trl_spleen=slice_chk.T1Dual_InPhaseSpleenmask
import matplotlib.pyplot as plt
plt.imshow(trl_spleen, cmap='gray')
plt.axis('off')
plt.show()
# +
trl_all=slice_chk.T1Dual_InPhase
import matplotlib.pyplot as plt
plt.imshow(trl_all, cmap='gray')
plt.axis('off')
plt.show()
# -
cv2.imshow("test", slice_chk.T1Dual_InPhaseBackgroundmask)
cv2.waitKey(0)
cv2.destroyAllWindows(0)
# +
class Per_slice_analysis():
def __init__(self, **Test_images_organs):
#Updating dicitonary of class attributes with
self.allowed_keys = {'T1Dual_InPhase', 'T1Dual_OutPhase', 'Ground', 'organs','T2SPIR_data'}
self.__dict__.update((k, v) for k, v in Test_images_organs.items() if k in self.allowed_keys)
def gen_org_spec_mask_image(self):
"""The purpose of this step is to generate a mask of intensity values for analysis
based on key value inputs of input dictionary. """
#Iterating though all class keys looking for specific key items for analysis.
#temporary dictionary for storing information.
temp_dict={}
for MRI_type,MRI_data in self.__dict__.items():
#Iterating through specific images only.
if 'T1Dual' in MRI_type or 'T2Spir' in MRI_type:
#Iterating through each organ for analysis
for organ_name,intensities in self.organs.items():
if intensities!=None:
new_key=MRI_type+organ_name+'mask'
mask_image=np.where(self.Ground==intensities,
MRI_data,0)
temp_dict[new_key]=mask_image
self.__dict__.update(temp_dict)
def gen_image_mask_histogram(self,mask_image):
"""The purpose of this step is to generate a histogram of the particular
image intensity values and save to directory
"""
temp_image=ndi.measurements.histogram(mask_image,min=0,max=256,bins=256)
return temp_image
def basic_int_stats(self,mask_image):
"""The purpose of this method is to acquire basic statistical on intensity value distributions for each organ"""
return {'image_intensity_mean':ndi.mean(mask_image),
'image_intensity_median':ndi.median(mask_image),
'image_intensity_variance':ndi.variance(mask_image)}
def signal_to_noise_ROI_ratio(self):
"""The purpose of this method is to produce a signal to noise ratio for masked images"""
def organ_slice_wrapper_analysis(self):
organ_dict=self.No_organs
#Copy of organ dictionary for dumping final values into
for keys,values in organ_dict.items():
if values==None:
slice_info[keys]=None
else:
self.gen_mask_image(keys,values)
temp_histogram=self.basic_int_stats(temp_mask)
temp_intensity_stat=self.basic_int_stats(temp_mask)
temp_stat_dict={'Intensity_histogram':temp_histogram,
'Intensity_statistics':temp_intensity_stat}
organ_dict[keys]=temp_stat_dict
# -
import inspect
inspect.getargspec(ndi.f)
# +
class image_preprocessing():
def __init__(self,file_location):
self.file_location=file_location
def image_normalisation(self):
def N3_image_preprocessing(self):
"""The purpose of this method is to perform N4 image normalisation as per
<NAME> et al., N4ITK: Improved N3 Bias Correction,
IEEE Transactions on Medical Imaging, 29(6):1310-1320, June 2010
to improve signal to noise ratio between organs to background in MRI image slices"""
from nipype.interfaces.ants import N4BiasFieldCorrection
n4 = N4BiasFieldCorrection()
n4.inputs.input_image=self.file_location
n4.inputs.save_bias=True
n4.inputs.dimension=2
n4.Outputs
def normalize(arr):
"""
Linear normalization
http://en.wikipedia.org/wiki/Normalization_%28image_processing%29
"""
arr = arr.astype('float')
# Do not touch the alpha channel
for i in range(3):
minval = arr[...,i].min()
maxval = arr[...,i].max()
if minval != maxval:
arr[...,i] -= minval
arr[...,i] *= (255.0/(maxval-minval))
return arr
def demo_normalize(self):
img = imageio.imread(self.file_location)
arr = np.array(img)
new_img = Image.fromarray(normalize(arr).astype('uint8'),'RGBA')
new_img_name=re.split('[.-]',self.file_location)
return new_img.save('/tmp/normalized.png')
# +
#from nipype.interfaces.ants import N4BiasFieldCorrection
import SimpleITK as sitk
file=gnrl_info_df.loc[1,['File_location']].values.tolist()
#n4 = N4BiasFieldCorrection()
#n4.inputs.input_image=str(file[0])
#n4.inputs.save_bias=True
#n4.inputs.dimension=2
#n4.cmdline
inputImage2 = imageio.imread(file[0])
#inputImage = sitk.ReadImage(file[0],sitk.sitkFloat32)
inputImage=ndi.imread(file[0])
corrector = sitk.N4BiasFieldCorrectionImageFilter();
#inputImage = sitk.Cast(inputImage,sitk.sitkFloat32)
maskImage = sitk.OtsuThreshold(inputImage,0,1,200)
sitk.WriteImage(maskImage, "trial.dcm")
output = corrector.Execute(inputImage)
# +
from nipype.interfaces.ants import N4BiasFieldCorrection
n4 = N4BiasFieldCorrection()
n4.inputs.input_image=file[0]#inputImage2
#n4._cmd=r'C:\Users\niall\Anaconda3\Lib\site-packages\nipype'
n4.inputs.cmd
# -
n4._cmd
import inspect
corrector.
#inspect.getargvalues(corrector)
inputImage
inputImage.GetDepth()
inputImage
pwd
MR_MOD='T1DUAL'
slice_no=2
patient_no='2'
vals='T2SPIR_data'
# +
MR_MOD='T2SPIR'
slice_no=2
patient_no='2'
vals='T2SPIR_data'
trl1=gnrl_info_df[(gnrl_info_df['Slice_id'] == slice_no) &
(gnrl_info_df['Person_id'] ==patient_no) &
(gnrl_info_df['Image_type']==vals) ]
#& (gnrl_info_df['Image_type']==vals)
trl1['File_location'].item()
# +
No_slices_per_person=pd.DataFrame(gnrl_info_df.groupby('Person_id')['Slice_id'].nunique(),
columns=['Slice_id'])
MR_type={'T1DUAL_InPhase':['InPhase','Ground'],'T1DUAL_OutPhase':['OutPhase','Ground'],'T2SPIR':[None,'Ground']}
for vals in No_slices_per_person.index:
print(vals)
No_slices=int(No_slices_per_person.loc[vals])
for j in range(1,No_slices+1):
ROI=gnrl_info_df.loc[gnrl_info_df['Person_id']==vals]
# -
| jupyter notebook/EDA_Biomedical_image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
# Import Required Libraries
try:
import tensorflow as tf
import os
import random
import numpy as np
from tqdm import tqdm
from skimage.io import imread, imshow
from skimage.transform import resize
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from keras.models import model_from_json
print("----Libraries Imported----")
except:
print("----Libraries Not Imported----")
# -
# checking the content of the current directory
os.listdir()
# +
# Setting up path
seed = 42
np.random.seed = seed
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3
TRAIN_PATH = 'E:/Projects 6th SEM/Orange-Fruit-Recognition-Using-Image-Segmentation/Image Segmentaion/train_data/'
TEST_PATH = 'E:/Projects 6th SEM/Orange-Fruit-Recognition-Using-Image-Segmentation/Image Segmentaion/test_data/'
train_ids = next(os.walk(TRAIN_PATH))[1]
test_ids = next(os.walk(TEST_PATH))[1]
print(train_ids)
print(test_ids)
# +
# Loading data
# independent variable
X_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
# dependent variable (what we are trying to predict)
Y_train = np.zeros((len(train_ids), IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
print('Resizing training images and masks')
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
path = TRAIN_PATH + id_
img = imread(path + '/images/' + id_ + '.jpg')[:,:,:IMG_CHANNELS]
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_train[n] = img #Fill empty X_train with values from img
mask = np.zeros((IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
for mask_file in next(os.walk(path + '/masks/'))[2]:
mask_ = imread(path + '/masks/' + mask_file)
mask_ = np.expand_dims(resize(mask_, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True), axis=-1)
mask = np.maximum(mask, mask_)
Y_train[n] = mask
# test images
X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.uint8)
sizes_test = []
print('Resizing test images')
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
path = TEST_PATH + id_
img = imread(path + '/images/' + id_ + '.jpg')[:,:,:IMG_CHANNELS]
sizes_test.append([img.shape[0], img.shape[1]])
img = resize(img, (IMG_HEIGHT, IMG_WIDTH), mode='constant', preserve_range=True)
X_test[n] = img
print('Done!')
# +
# Showing Random images from the dataset
image_x = random.randint(0, len(train_ids))
imshow(X_train[image_x])
plt.show()
imshow(np.squeeze(Y_train[image_x]))
plt.show()
# +
from UNet_Model import Segmentation_model
model = Segmentation_model()
model.summary()
# +
################################
#Modelcheckpoint
with tf.device('/GPU:0'):
results = model.fit(X_train, Y_train, validation_split=0.1, batch_size=4, epochs=100)
print('Training DONE')
# +
# Plotting Training Results
plt.plot(results.history['accuracy'][0:150])
plt.plot(results.history['val_accuracy'][0:150])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['training_accuracy', 'validation_accuracy'])
plt.show()
# +
plt.plot(results.history['loss'][0:150])
plt.plot(results.history['val_loss'][0:150])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['training_loss', 'validation_loss'])
plt.show()
# +
# Saving model
orange_model_json = model.to_json()
with open("Segmentation_model.json", "w") as json_file:
json_file.write(orange_model_json)
model.save_weights("Orange_Fruit_Weights_segmentation.h5")
# -
# Loading Unet
segmentation_model = model_from_json(open("Segmentation_model.json", "r").read())
segmentation_model.load_weights('Orange_Fruit_Weights_segmentation.h5')
# +
####################################
idx = random.randint(0, len(X_train))
print(idx)
preds_train = segmentation_model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1)
preds_val = segmentation_model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1)
preds_test = segmentation_model.predict(X_test, verbose=1)
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
# +
# Perform a sanity check on some random training samples
ix = random.randint(0, len(preds_train_t))
imshow(X_train[ix])
plt.show()
imshow(np.squeeze(Y_train[ix]))
plt.show()
imshow(np.squeeze(preds_train_t[ix]))
plt.show()
# Perform a sanity check on some random validation samples
ix = random.randint(0, len(preds_val_t))
imshow(X_train[int(X_train.shape[0]*0.9):][ix])
plt.show()
imshow(np.squeeze(Y_train[int(Y_train.shape[0]*0.9):][ix]))
plt.show()
imshow(np.squeeze(preds_val_t[ix]))
plt.show()
# +
# Loading Classification Model
import Prediction_file as pf
classification_model = pf.Loading_Model()
# +
# Prediction
path1 = 'Images/kiwi.jpg'
path2 = 'Images/Orange.jpg'
pred1 = pf.predicting(path1,classification_model)
pred2 = pf.predicting(path2,classification_model)
# +
from tensorflow.keras.preprocessing.image import load_img, img_to_array
def process_image(path):
img = load_img(path, target_size = (IMG_WIDTH,IMG_HEIGHT))
img_tensor = img_to_array(img)
img_tensor = np.expand_dims(img_tensor, axis = 0)
img_tensor/=255.0
return img_tensor
if pred2 > 0.5:
p = segmentation_model.predict(process_image(path2), verbose=1)
p_t = (p > 0.5).astype(np.uint8)
imshow(np.squeeze(p_t))
plt.show()
# -
p = segmentation_model.predict(process_image(path1), verbose=1)
p_t = (p > 0.5).astype(np.uint8)
imshow(np.squeeze(p_t))
plt.show()
| Image Segmentaion/main_new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # AutoKeras
#
# **This code worked with the AutoKeras version in Nov 2019 and has since been depreciated. Please refer to the June 2020 version [code/chapter-5/5-autokeras.ipynb](https://github.com/PracticalDL/Practical-Deep-Learning-Book/blob/master/code/chapter-5/5-autokeras.ipynb)**
#
# As AI is automating more and more jobs, it can finally automate designing AI architectures too. Neural Architecture Search (NAS) approaches utilize reinforcement learning to join together mini architectural blocks, till they are able to maximize the objective function - i.e. our validation accuracy. The current state of the art networks are all based on NAS, leaving human-designed architectures in the dust. Research in this area started showing promising results in 2017, with a bigger focus on making train faster in 2018.
#
# AutoKeras (Haifeng Jin et al), also apply this state of the art technique on our particular datasets in a relatively accessible manner. Generating new model architectures with AutoKeras is a matter of supplying our images and associated labels, and a time limit to finish running the jobs by. Internally, it implements several optimization algorithms including a Bayesian optimization approach to search for an optimal architecture.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="ICAIOyO4IuFi" outputId="a4449b50-7c8e-4246-c07a-7580506d6728"
# Make sure you have the necessary packages installed
# !pip3 install autokeras
# !pip3 install graphviz
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" id="nyKhle0zI16A" outputId="dfa92b1d-8f0e-4a4d-f10f-f0ee9e279e02"
from keras.datasets import mnist
from autokeras.image.image_supervised import ImageClassifier
# + colab={"base_uri": "https://localhost:8080/", "height": 503} colab_type="code" id="IjaF0gJuKBsu" outputId="edba3fba-96a6-4375-cdbb-88ac1d4e820d"
# Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(x_train.shape + (1, ))
x_test = x_test.reshape(x_test.shape + (1, ))
# -
# Define an image classifier that performs augmentations
clf = ImageClassifier(path=".",verbose=True, augment=True, \
searcher_args={'trainer_args':{'max_iter_num':7}}
)
# Fit the classifier within a time limit of 60 seconds * 60 minutes * 1 hour
clf.fit(x_train, y_train, time_limit=60 * 60 * 1)
clf.final_fit(x_train, y_train, x_test, y_test, retrain=True)
y = clf.evaluate(x_test, y_test)
print(y)
# This is what the complete output looks like:
#
# ```
# Saving Directory: .
# Preprocessing the images.
# Preprocessing finished.
#
# Initializing search.
# Initialization finished.
#
#
# +----------------------------------------------+
# | Training model 0 |
# +----------------------------------------------+
#
# Saving model.
# +--------------------------------------------------------------------------+
# | Model ID | Loss | Metric Value |
# +--------------------------------------------------------------------------+
# | 0 | 1.6381531059741974 | 0.8960000000000001 |
# +--------------------------------------------------------------------------+
#
#
# +----------------------------------------------+
# | Training model 1 |
# +----------------------------------------------+
# Epoch-1, Current Metric - 0: 43%|████████████ | 200/465 [33:54<46:26, 10.52s/ batch]Time is out.
#
# No loss decrease after 30 epochs.
#
# 0.9852
# ```
#
# Note that early stopping is present as well.
# + colab={} colab_type="code" id="5Mc0gYppKY78"
clf.export_autokeras_model("model.pkl")
# -
# Let's visualize the trained model.
# + colab={"base_uri": "https://localhost:8080/", "height": 167} colab_type="code" id="TzsHJ4VnKfmm" outputId="663ba600-daaf-450e-f223-4b5b11349b73"
visualize('.')
# -
# One can also use `scikit-learn` to verify accuracy as follows:
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="OLTBfH0jKhSp" outputId="556943da-0b46-4038-afb4-28dd923cdeb2"
from sklearn.metrics import accuracy_score
y = clf.predict(x_test)
accuracy_score(y_true=y_test, y_pred=y)
| code/chapter-5/5-autokeras-deprecated.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to XGBoost Spark3.0 with GPU
#
# Taxi is an example of xgboost regressor. This notebook will show you how to load data, train the xgboost model and use this model to predict "fare_amount" of your taxi trip.
#
# A few libraries required for this notebook:
# 1. NumPy
# 2. cudf jar
# 3. xgboost4j jar
# 4. xgboost4j-spark jar
# 5. rapids-4-spark.jar
#
# This notebook also illustrates the ease of porting a sample CPU based Spark xgboost4j code into GPU. There is only one change required for running Spark XGBoost on GPU. That is replacing the API `setFeaturesCol(feature)` on CPU with the new API `setFeaturesCols(features)`. This also eliminates the need for vectorization (assembling multiple feature columns in to one column) since we can read multiple columns.
# #### Import Required Libraries
from ml.dmlc.xgboost4j.scala.spark import XGBoostRegressionModel, XGBoostRegressor
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.sql import SparkSession
from pyspark.sql.types import FloatType, IntegerType, StructField, StructType
from time import time
# Besides CPU version requires two extra libraries.
# ```Python
# from pyspark.ml.feature import VectorAssembler
# from pyspark.sql.functions import col
# ```
# #### Create Spark Session and Data Reader
spark = SparkSession.builder.getOrCreate()
reader = spark.read
# #### Specify the Data Schema and Load the Data
# +
label = 'fare_amount'
schema = StructType([
StructField('vendor_id', FloatType()),
StructField('passenger_count', FloatType()),
StructField('trip_distance', FloatType()),
StructField('pickup_longitude', FloatType()),
StructField('pickup_latitude', FloatType()),
StructField('rate_code', FloatType()),
StructField('store_and_fwd', FloatType()),
StructField('dropoff_longitude', FloatType()),
StructField('dropoff_latitude', FloatType()),
StructField(label, FloatType()),
StructField('hour', FloatType()),
StructField('year', IntegerType()),
StructField('month', IntegerType()),
StructField('day', FloatType()),
StructField('day_of_week', FloatType()),
StructField('is_weekend', FloatType()),
])
features = [ x.name for x in schema if x.name != label ]
train_data = reader.schema(schema).option('header', True).csv('/data/taxi/csv/train')
trans_data = reader.schema(schema).option('header', True).csv('/data/taxi/csv/test')
# -
# Note on CPU version, vectorization is required before fitting data to regressor, which means you need to assemble all feature columns into one column.
#
# ```Python
# def vectorize(data_frame):
# to_floats = [ col(x.name).cast(FloatType()) for x in data_frame.schema ]
# return (VectorAssembler()
# .setInputCols(features)
# .setOutputCol('features')
# .transform(data_frame.select(to_floats))
# .select(col('features'), col(label)))
#
# train_data = vectorize(train_data)
# trans_data = vectorize(trans_data)
# ```
# #### Create a XGBoostRegressor
params = {
'eta': 0.05,
'treeMethod': 'gpu_hist',
'maxDepth': 8,
'subsample': 0.8,
'gamma': 1.0,
'numRound': 100,
'numWorkers': 1,
}
regressor = XGBoostRegressor(**params).setLabelCol(label).setFeaturesCols(features)
# The CPU version regressor provides the API `setFeaturesCol` which only accepts a single column name, so vectorization for multiple feature columns is required.
# ```Python
# regressor = XGBoostRegressor(**params).setLabelCol(label).setFeaturesCol('features')
# ```
#
# The parameter `num_workers` should be set to the number of GPUs in Spark cluster for GPU version, while for CPU version it is usually equal to the number of the CPU cores.
#
# Concerning the tree method, GPU version only supports `gpu_hist` currently, while `hist` is designed and used here for CPU training.
#
# #### Train the Data with Benchmark
def with_benchmark(phrase, action):
start = time()
result = action()
end = time()
print('{} takes {} seconds'.format(phrase, round(end - start, 2)))
return result
model = with_benchmark('Training', lambda: regressor.fit(train_data))
# #### Save and Reload the Model
model.write().overwrite().save('/data/new-model-path')
loaded_model = XGBoostRegressionModel().load('/data/new-model-path')
# #### Transformation and Show Result Sample
def transform():
result = loaded_model.transform(trans_data).cache()
result.foreachPartition(lambda _: None)
return result
result = with_benchmark('Transformation', transform)
result.select('vendor_id', 'passenger_count', 'trip_distance', label, 'prediction').show(5)
# Note on CPU version: You cannot `select` the feature columns after vectorization. So please use `result.show(5)` instead.
# #### Evaluation
accuracy = with_benchmark(
'Evaluation',
lambda: RegressionEvaluator().setLabelCol(label).evaluate(result))
print('RMSE is ' + str(accuracy))
# #### Stop
spark.stop()
| examples/notebooks/python/taxi-gpu.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convolutional NN
# +
import numpy as np
import gzip
import os
import pickle
from matplotlib import pyplot
from si.data import Dataset
from si.util.util import to_categorical
# -
# Load the MNIST dataset
def load_mnist(sample_size=None):
DIR = os.path.dirname(os.path.realpath('.'))
filename = os.path.join(DIR, 'datasets/mnist.pkl.gz')
f = gzip.open(filename, 'rb')
data = pickle.load(f, encoding='bytes')
(x_train, y_train), (x_test, y_test) = data
if sample_size:
return Dataset(x_train[:sample_size],y_train[:sample_size]),Dataset(x_test,y_test)
else:
return Dataset(x_train,y_train),Dataset(x_test,y_test)
train,test = load_mnist(500)
def preprocess(train):
# reshape and normalize input data
train.X = train.X.reshape(train.X.shape[0], 28, 28, 1)
train.X = train.X.astype('float32')
train.X /= 255
train.y = to_categorical(train.y)
preprocess(train)
preprocess(test)
# +
def plot_img(img,shape=(28,28)):
pic = (img*255).reshape(shape)
pic = pic.astype('int')
pyplot.imshow(pic, cmap=pyplot.get_cmap('gray'))
pyplot.show()
plot_img(test.X[0])
# -
from si.supervised.NeuralNetworks import NN, Dense, Activation, Conv2D, Flatten, MaxPooling2D
from si.util.Activation import Tanh, Sigmoid
from si.util.Metrics import cross_entropy, cross_entropy_prime
# ### Check Conv2D
conv = Conv2D((28, 28,1), (3, 3), 1)
out = conv.forward(test.X[:1])
plot_img(out, shape=(26, 26))
# ### Check MaxPooling
pool = MaxPooling2D(size=2,stride=1)
out = pool.forward(test.X[:1])
plot_img(out, shape=(27, 27))
# Build the model
# +
net = NN(epochs=500,lr=0.1,verbose=False)
net.addLayer(Conv2D((28, 28,1), (3, 3), 1))
net.addLayer(Activation(Tanh()))
net.addLayer(MaxPooling2D())
net.addLayer(Flatten())
net.addLayer(Dense(25*25*1, 100))
net.addLayer(Activation(Tanh()))
net.addLayer(Dense(100, 10))
net.addLayer(Activation(Sigmoid()))
net.useLoss(cross_entropy, cross_entropy_prime)
# -
# Train the model
net.fit(train)
out = net.predict(test.X[0:3])
print("\n")
print("predicted values : ")
print(np.round(out), end="\n")
print("true values : ")
print(test.y[0:3])
conv1 = net.layers[0]
act1 = net.layers[1]
pool1 = net.layers[2]
img1 = conv1.forward(test.X[:1])
plot_img(img1,shape=(26,26))
# + pycharm={"name": "#%%\n"}
img2= pool1.forward(act1.forward(img1))
plot_img(img2,shape=(25,25))
| scripts/eval5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### The Off-Switch Game
# ##### <NAME>, <NAME>, <NAME>, <NAME>
# ##### https://arxiv.org/pdf/1611.08219.pdf
# [ Reproduction in progress ]
import matplotlib.pyplot as plt
# %matplotlib inline
import numpy as np
class Robot:
def belief(self, mean, sd): #B^R
size = 1
gaussian = np.random.normal(mean, sd, size)
return gaussian
def incentive(self, policy, mean, sd): # delta
sum_wait = 0
length = 10000
Ua = []
for i in range(length):
Ua.append(self.belief(mean, sd))
wait = sum([policy(U) * U for U in Ua])
not_wait = max(sum(Ua), 0)
sum_wait = wait - not_wait
delta = sum_wait/length
return delta[0]
class Human:
def policy(self,utility, rational=True): #pi
if rational:
if utility >= 0:
return 1
return 0
return utility * np.random.normal(0.5, 0.25, 1) # irrational
r = Robot()
h = Human()
mean_0 = []
mean_75 = []
mean_n25 = []
iterations = 10
sds = []
for sd in range(iterations):
adjusted_sd = sd/float(iterations)
sds.append(adjusted_sd)
mean_0.append(r.incentive(h.policy, 0, adjusted_sd))
mean_75.append(r.incentive(h.policy, 0.75, adjusted_sd))
mean_n25.append(r.incentive(h.policy, -0.25, adjusted_sd))
print(sds)
# +
# plt.plot(ten, mean_0, "g^", ten,mean_34,"b^", ten, mean_n14, "r^")
fig,ax = plt.subplots(1)
ax.plot(mean_75,"bo--", mean_n25, "ro--", mean_0, "go--")
ax.text(4.9,0.25, 'E[Ua]=0', color='green', rotation=35)
ax.text(7.2,0.25, 'E[Ua]=-1/4', color='red', rotation=35)
ax.text(8,0.13, 'E[Ua]=3/4', color='blue', rotation=35)
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.xlabel(r'$\sigma$')
plt.ylabel(r'$\Delta$')
plt.show()
# -
# Figure 2 (Left). Paper & Reproduction, respectively.
# 
# +
# Figure 2, Right
xlist = np.linspace(0, 1, 10)
ylist = np.linspace(-1, 1, 3)
X, Y = np.meshgrid(xlist, ylist)
Z = [mean_0, mean_n25, mean_75]
print(Z)
plt.figure()
mean_0_plt = np.full(2, 0)
mean_75_plt = np.full(2, 0.75)
mean_n25_plt = np.full(2, -0.25)
cp = plt.contourf(X, Y, Z, cmap='Greys')
bar = plt.colorbar(cp)
plt.xlabel(r'$\sigma$')
plt.ylabel('E[Ua]')
plt.xlim([0,1])
plt.plot(mean_75_plt,"bo--", mean_n25_plt, "ro--", mean_0_plt, "go--")
bar.set_label(r'$\Delta$')
plt.show()
# TODO: Tweak Matplotlib to create Right plot
| Off-switch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # YOLO: Detection Using A Pre-Trained Model
# 1. Installing The Base System
# ```sh
# git clone https://github.com/pjreddie/darknet
# # cd darknet
# make
# ```
# 2. You already have the config file for YOLO in the cfg/ subdirectory. You will have to download the pre-trained weight file (258 MB):
# ```sh
# wget https://pjreddie.com/media/files/yolo.weights
# ```
# 3. Then run the detector!:
# ```sh
# ./darknet detect cfg/yolo.cfg yolo.weights data/dog.jpg
# ```
# in terminal you will got output like this:
# 
# You will got also an a image that illustrates the predictions :
# 
#
# # Tiny YOLO:
# Tiny YOLO is based off of the Darknet reference network and is much faster but less accurate than the normal YOLO model. To use the version trained on VOC:
# ```sh
# wget https://pjreddie.com/media/files/tiny-yolo-voc.weights
# ./darknet detector test cfg/voc.data cfg/tiny-yolo-voc.cfg tiny-yolo-voc.weights data/dog.jpg
# ```
# Which, ok, it's not perfect, but On GPU it runs at >200 FPS:
# 
#
# # Real-Time Detection on a Webcam
#
# Running YOLO on test data isn't very interesting if you can't see the result. Instead of running it on a bunch of images let's run it on the input from a webcam!
# You will need a webcam connected to the computer that OpenCV can connect to or it won't work.
#
# 1. Install OpenCV in Mac:
# ```sh
# brew tap homebrew/science
# brew install opencv3
# ```
# 2. change the 2nd line of the Makefile to read:
# ```sh
# nano Makefile
# ```
# 
# 3. re-make the project. Then use the imtest routine to test image loading and displaying:
# ```sh
# make
# ./darknet imtest data/eagle.jpg
# ```
# If you get a bunch of windows with eagles in them you've succeeded! (now we are using opencv with darknet)
# 4. To run this demo you will need to compile Darknet with CUDA and OpenCV (steps 1,2,3 above). Then run the command:
# ```sh
# ./darknet detector demo cfg/coco.data cfg/yolo.cfg yolo.weights
# ```
# the result in my case were like this :
# 
#
# You can also run it on a video file if OpenCV can read the video:
# ```sh
# ./darknet detector demo cfg/coco.data cfg/yolo.cfg yolo.weights ../videos/video_640_480_15.h264
# ```
# to get bettere perfromence you can run this command :
#
# ```sh
# ./darknet detector demo cfg/coco.data cfg/tiny-yolo-voc.cfg tiny-yolo-voc.weights ../videos/video_640_480_15.h264
# ```
#
# # TODO (code in python an example of retreiving (x1,y1) (x2, y2) from object detection
#
| yolo_pretrained_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# name: python2
# ---
# + [markdown] id="copyright-notice" colab_type="text"
# #### Copyright 2017 Google LLC.
# + colab={"autoexec": {"wait_interval": 0, "startup": false}} id="copyright-notice2" colab_type="code" cellView="both"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="mPa95uXvcpcn" colab_type="text"
# # Classer des chiffres écrits à la main avec les réseaux de neurones
# + [markdown] id="Fdpn8b90u8Tp" colab_type="text"
# 
# + [markdown] id="c7HLCm66Cs2p" colab_type="text"
# **Objectifs d'apprentissage :**
# * Entraîner un modèle linéaire et un réseau de neurones à classer des chiffres écrits à la main à partir de l'ensemble de données [MNIST](http://yann.lecun.com/exdb/mnist/) classique
# * Comparer les performances des modèles de classification linéaire et avec réseau de neurones
# * Visualiser les pondérations d'une couche cachée de réseau de neurones
# + [markdown] id="HSEh-gNdu8T0" colab_type="text"
# L'objectif est d'associer à chaque image d'entrée le chiffre numérique correct. Vous allez créer un réseau de neurones avec quelques couches cachées et, au sommet, une couche Softmax afin de sélectionner la classe qui l'emporte.
# + [markdown] id="2NMdE1b-7UIH" colab_type="text"
# ## Configuration
#
# Commencez par télécharger l'ensemble de données, importez TensorFlow et d'autres utilitaires, puis chargez les données dans un `DataFrame` *Pandas*. Notez que ces données constituent un échantillon des données d'apprentissage MNIST d'origine ; 20 000 lignes ont été prises au hasard.
# + id="4LJ4SD8BWHeh" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "test": {"output": "ignore", "timeout": 600}} cellView="both"
from __future__ import print_function
import glob
import math
import os
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
mnist_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_train_small.csv",
sep=",",
header=None)
# Use just the first 10,000 records for training/validation.
mnist_dataframe = mnist_dataframe.head(10000)
mnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))
mnist_dataframe.head()
# + [markdown] id="kg0-25p2mOi0" colab_type="text"
# La première colonne contient l'étiquette de classe. Les autres contiennent les valeurs de caractéristique ; une par pixel pour les `28×28=784` valeurs de pixel. La plupart de ces 784 valeurs de pixel sont égales à zéro ; vous pouvez prendre une minute pour vérifier qu'elles ne sont pas *toutes* nulles.
# + [markdown] id="PQ7vuOwRCsZ1" colab_type="text"
# 
# + [markdown] id="dghlqJPIu8UM" colab_type="text"
# Ces exemples sont, en fait, des images à fort contraste et à relativement faible résolution de nombres écrits à la main. Chacun des dix chiffres `0-9` est représenté avec une étiquette de classe unique. Il s'agit donc d'un problème de classification à classes multiples avec 10 classes.
#
# Nous allons, à présent, analyser les étiquettes et les caractéristiques, et observer quelques exemples. Notez l'utilisation de `loc` qui permet d'extraire des colonnes en fonction de l'emplacement d'origine, étant donné que cet ensemble de données est dépourvu de ligne d'en-tête.
# + id="JfFWWvMWDFrR" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "test": {"output": "ignore", "timeout": 600}}
def parse_labels_and_features(dataset):
"""Extracts labels and features.
This is a good place to scale or transform the features if needed.
Args:
dataset: A Pandas `Dataframe`, containing the label on the first column and
monochrome pixel values on the remaining columns, in row major order.
Returns:
A `tuple` `(labels, features)`:
labels: A Pandas `Series`.
features: A Pandas `DataFrame`.
"""
labels = dataset[0]
# DataFrame.loc index ranges are inclusive at both ends.
features = dataset.loc[:,1:784]
# Scale the data to [0, 1] by dividing out the max value, 255.
features = features / 255
return labels, features
# + id="mFY_-7vZu8UU" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
training_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])
training_examples.describe()
# + id="4-Vgg-1zu8Ud" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
validation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])
validation_examples.describe()
# + [markdown] id="wrnAI1v6u8Uh" colab_type="text"
# Affichez un exemple aléatoire et l'étiquette correspondante.
# + id="s-euVJVtu8Ui" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
rand_example = np.random.choice(training_examples.index)
_, ax = plt.subplots()
ax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))
ax.set_title("Label: %i" % training_targets.loc[rand_example])
ax.grid(False)
# + [markdown] id="ScmYX7xdZMXE" colab_type="text"
# ## Tâche 1 : Construire un modèle linéaire pour MNIST
#
# Commencez par créer un modèle de référence qui servira de base de comparaison. Le modèle `LinearClassifier` fournit un ensemble de *k* classificateurs un contre tous ; un pour chacune des *k* classes.
#
# Outre l'indication de la justesse et la représentation graphique de la perte logistique au fil du temps, vous remarquerez qu'une [**matrice de confusion**](https://fr.wikipedia.org/wiki/Matrice_de_confusion) est également affichée. Cette matrice identifie les classes qui ont été classées de manière erronée. Quels chiffres ont été confondus avec d'autres ?
#
# Notez également que la fonction `log_loss` est utilisée pour le suivi de l'erreur du modèle. Veillez à ne pas la confondre avec la fonction de perte interne au modèle `LinearClassifier`, laquelle est utilisée à des fins d'apprentissage.
# + id="cpoVC4TSdw5Z" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
# There are 784 pixels in each image.
return set([tf.feature_column.numeric_column('pixels', shape=784)])
# + [markdown] id="kMmL89yGeTfz" colab_type="text"
# Vous allez maintenant créer des fonctions d'entrée distinctes pour l'apprentissage et la prédiction. Vous allez les imbriquer dans `create_training_input_fn()` et `create_predict_input_fn()`, respectivement, afin de pouvoir les invoquer pour renvoyer les fonctions `_input_fn` correspondantes qui doivent être transmises aux appels `.train()` et `.predict()`.
# + id="OeS47Bmn5Ms2" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def create_training_input_fn(features, labels, batch_size, num_epochs=None, shuffle=True):
"""A custom input_fn for sending MNIST data to the estimator for training.
Args:
features: The training features.
labels: The training labels.
batch_size: Batch size to use during training.
Returns:
A function that returns batches of training features and labels during
training.
"""
def _input_fn(num_epochs=None, shuffle=True):
# Input pipelines are reset with each call to .train(). To ensure model
# gets a good sampling of data, even when number of steps is small, we
# shuffle all the data before creating the Dataset object
idx = np.random.permutation(features.index)
raw_features = {"pixels":features.reindex(idx)}
raw_targets = np.array(labels[idx])
ds = Dataset.from_tensor_slices((raw_features,raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
# + id="8zoGWAoohrwS" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def create_predict_input_fn(features, labels, batch_size):
"""A custom input_fn for sending mnist data to the estimator for predictions.
Args:
features: The features to base predictions on.
labels: The labels of the prediction examples.
Returns:
A function that returns features and labels for predictions.
"""
def _input_fn():
raw_features = {"pixels": features.values}
raw_targets = np.array(labels)
ds = Dataset.from_tensor_slices((raw_features, raw_targets)) # warning: 2GB limit
ds = ds.batch(batch_size)
# Return the next batch of data.
feature_batch, label_batch = ds.make_one_shot_iterator().get_next()
return feature_batch, label_batch
return _input_fn
# + id="G6DjSLZMu8Um" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def train_linear_classification_model(
learning_rate,
steps,
batch_size,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, and a confusion
matrix.
Args:
learning_rate: A `float`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `LinearClassifier` object.
"""
periods = 10
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create a LinearClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.LinearClassifier(
feature_columns=construct_feature_columns(),
n_classes=10,
optimizer=my_optimizer,
config=tf.estimator.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
# + [markdown] id="ItHIUyv2u8Ur" colab_type="text"
# **Exercez-vous, pendant cinq minutes, à obtenir une valeur de justesse élevée avec un modèle linéaire de ce type. Pour cet exercice, limitez-vous à expérimenter avec les hyperparamètres relatifs à la taille du lot, au taux d'apprentissage et aux nombres d'étapes.**
#
# Arrêtez-vous dès que vous obtenez une valeur de justesse supérieure à environ 0,9.
# + id="yaiIhIQqu8Uv" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
classifier = train_linear_classification_model(
learning_rate=0.02,
steps=100,
batch_size=10,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] id="266KQvZoMxMv" colab_type="text"
# ### Solution
#
# Cliquez ci-dessous pour afficher une solution.
# + [markdown] id="lRWcn24DM3qa" colab_type="text"
# Voici un ensemble de paramètres avec lequel la justesse devrait avoisiner 0,9.
# + id="TGlBMrUoM1K_" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
_ = train_linear_classification_model(
learning_rate=0.03,
steps=1000,
batch_size=30,
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] id="mk095OfpPdOx" colab_type="text"
# ## Tâche 2 Remplacer le classificateur linéaire par un réseau de neurones
#
# **Remplacez le modèle LinearClassifier ci-dessus par un modèle [`DNNClassifier`](https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier) et identifiez une combinaison de paramètres offrant une justesse de 0,95 ou plus.**
#
# Vous pouvez essayer d'autres méthodes de régularisation, telles qu'une régularisation par abandon. Ces méthodes sont documentées dans les commentaires de la classe `DNNClassifier`.
# + id="rm8P_Ttwu8U4" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
#
# YOUR CODE HERE: Replace the linear classifier with a neural network.
#
# + [markdown] id="TOfmiSvqu8U9" colab_type="text"
# Dès que vous disposez d'un modèle satisfaisant, vérifiez que l'ensemble de validation n'a pas été surappris en évaluant les données de test qui seront chargées ci-après.
#
# + id="evlB5ubzu8VJ" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
# + id="PDuLd2Hcu8VL" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
#
# YOUR CODE HERE: Calculate accuracy on the test set.
#
# + [markdown] id="6sfw3LH0Oycm" colab_type="text"
# ### Solution
#
# Cliquez ci-dessous pour afficher une solution.
# + [markdown] id="XatDGFKEO374" colab_type="text"
# Le code ci-dessous est pratiquement identique au code d'apprentissage `LinearClassifer` d'origine, à l'exception de la configuration spécifique au réseau de neurones, comme l'hyperparamètre des unités cachées.
# + id="kdNTx8jkPQUx" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
def train_nn_classification_model(
learning_rate,
steps,
batch_size,
hidden_units,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a neural network classification model for the MNIST digits dataset.
In addition to training, this function also prints training progress information,
a plot of the training and validation loss over time, as well as a confusion
matrix.
Args:
learning_rate: A `float`, the learning rate to use.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
batch_size: A non-zero `int`, the batch size.
hidden_units: A `list` of int values, specifying the number of neurons in each layer.
training_examples: A `DataFrame` containing the training features.
training_targets: A `DataFrame` containing the training labels.
validation_examples: A `DataFrame` containing the validation features.
validation_targets: A `DataFrame` containing the validation labels.
Returns:
The trained `DNNClassifier` object.
"""
periods = 10
# Caution: input pipelines are reset with each call to train.
# If the number of steps is small, your model may never see most of the data.
# So with multiple `.train` calls like this you may want to control the length
# of training with num_epochs passed to the input_fn. Or, you can do a really-big shuffle,
# or since it's in-memory data, shuffle all the data in the `input_fn`.
steps_per_period = steps / periods
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create the input functions.
predict_training_input_fn = create_predict_input_fn(
training_examples, training_targets, batch_size)
predict_validation_input_fn = create_predict_input_fn(
validation_examples, validation_targets, batch_size)
training_input_fn = create_training_input_fn(
training_examples, training_targets, batch_size)
# Create feature columns.
feature_columns = [tf.feature_column.numeric_column('pixels', shape=784)]
# Create a DNNClassifier object.
my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
classifier = tf.estimator.DNNClassifier(
feature_columns=feature_columns,
n_classes=10,
hidden_units=hidden_units,
optimizer=my_optimizer,
config=tf.contrib.learn.RunConfig(keep_checkpoint_max=1)
)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("LogLoss error (on validation data):")
training_errors = []
validation_errors = []
for period in range (0, periods):
# Train the model, starting from the prior state.
classifier.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute probabilities.
training_predictions = list(classifier.predict(input_fn=predict_training_input_fn))
training_probabilities = np.array([item['probabilities'] for item in training_predictions])
training_pred_class_id = np.array([item['class_ids'][0] for item in training_predictions])
training_pred_one_hot = tf.keras.utils.to_categorical(training_pred_class_id,10)
validation_predictions = list(classifier.predict(input_fn=predict_validation_input_fn))
validation_probabilities = np.array([item['probabilities'] for item in validation_predictions])
validation_pred_class_id = np.array([item['class_ids'][0] for item in validation_predictions])
validation_pred_one_hot = tf.keras.utils.to_categorical(validation_pred_class_id,10)
# Compute training and validation errors.
training_log_loss = metrics.log_loss(training_targets, training_pred_one_hot)
validation_log_loss = metrics.log_loss(validation_targets, validation_pred_one_hot)
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, validation_log_loss))
# Add the loss metrics from this period to our list.
training_errors.append(training_log_loss)
validation_errors.append(validation_log_loss)
print("Model training finished.")
# Remove event files to save disk space.
_ = map(os.remove, glob.glob(os.path.join(classifier.model_dir, 'events.out.tfevents*')))
# Calculate final predictions (not probabilities, as above).
final_predictions = classifier.predict(input_fn=predict_validation_input_fn)
final_predictions = np.array([item['class_ids'][0] for item in final_predictions])
accuracy = metrics.accuracy_score(validation_targets, final_predictions)
print("Final accuracy (on validation data): %0.2f" % accuracy)
# Output a graph of loss metrics over periods.
plt.ylabel("LogLoss")
plt.xlabel("Periods")
plt.title("LogLoss vs. Periods")
plt.plot(training_errors, label="training")
plt.plot(validation_errors, label="validation")
plt.legend()
plt.show()
# Output a plot of the confusion matrix.
cm = metrics.confusion_matrix(validation_targets, final_predictions)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class).
cm_normalized = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
ax = sns.heatmap(cm_normalized, cmap="bone_r")
ax.set_aspect(1)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
return classifier
# + id="ZfzsTYGPPU8I" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
classifier = train_nn_classification_model(
learning_rate=0.05,
steps=1000,
batch_size=30,
hidden_units=[100, 100],
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] id="qXvrOgtUR-zD" colab_type="text"
# Vous allez ensuite vérifier la justesse de l'ensemble d'évaluation.
# + id="scQNpDePSFjt" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
mnist_test_dataframe = pd.read_csv(
"https://download.mlcc.google.com/mledu-datasets/mnist_test.csv",
sep=",",
header=None)
test_targets, test_examples = parse_labels_and_features(mnist_test_dataframe)
test_examples.describe()
# + id="EVaWpWKvSHmu" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}}
predict_test_input_fn = create_predict_input_fn(
test_examples, test_targets, batch_size=100)
test_predictions = classifier.predict(input_fn=predict_test_input_fn)
test_predictions = np.array([item['class_ids'][0] for item in test_predictions])
accuracy = metrics.accuracy_score(test_targets, test_predictions)
print("Accuracy on test data: %0.2f" % accuracy)
# + [markdown] id="WX2mQBAEcisO" colab_type="text"
# ## Tâche 3 : Visualiser les pondérations de la première couche cachée
#
# Passez quelques minutes à explorer le réseau de neurones pour savoir ce qu'il a appris. Pour cela, vous allez accéder à l'attribut `weights_` du modèle.
#
# La couche d'entrée du modèle compte `784` pondérations correspondant aux images d'entrée `28×28`. La première couche cachée aura `784×N` pondérations, où `N` représente le nombre de nœuds de cette couche. Vous pouvez reconvertir ces pondérations en images `28×28` en *remodelant* chacun des `N` tableaux de pondérations `1×784` en `N` tableaux d'une taille de `28×28`.
#
# Exécutez la cellule suivante pour représenter graphiquement les pondérations. Pour cette cellule, notez qu'un modèle `DNNClassifier` appelé "classifier" doit déjà avoir été entraîné.
# + id="eUC0Z8nbafgG" colab_type="code" colab={"autoexec": {"startup": false, "wait_interval": 0}, "test": {"output": "ignore", "timeout": 600}} cellView="both"
print(classifier.get_variable_names())
weights0 = classifier.get_variable_value("dnn/hiddenlayer_0/kernel")
print("weights0 shape:", weights0.shape)
num_nodes = weights0.shape[1]
num_rows = int(math.ceil(num_nodes / 10.0))
fig, axes = plt.subplots(num_rows, 10, figsize=(20, 2 * num_rows))
for coef, ax in zip(weights0.T, axes.ravel()):
# Weights in coef is reshaped from 1x784 to 28x28.
ax.matshow(coef.reshape(28, 28), cmap=plt.cm.pink)
ax.set_xticks(())
ax.set_yticks(())
plt.show()
# + [markdown] id="kL8MEhNgrx9N" colab_type="text"
# La première couche cachée du réseau de neurones doit normalement modéliser des caractéristiques de bas niveau. Lorsque vous visualiserez les pondérations, vous ne verrez donc probablement que quelques blobs flous ou peut-être quelques parties de chiffres. Vous constaterez également que certains neurones sont essentiellement des données bruitées ; soit ils n'ont pas convergé, soit ils sont ignorés par les couches supérieures.
#
# Il peut être intéressant d'interrompre l'apprentissage après un certain nombre d'itérations afin d'examiner le résultat.
#
# **Effectuez l'apprentissage du classificateur pour 10, 100 et 1 000 pas, puis exécutez à nouveau cette visualisation.**
#
# Quelles différences remarquez-vous pour les différents niveaux de convergence ?
| ml/cc/exercises/fr/multi-class_classification_of_handwritten_digits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import scipy
from scipy.sparse.csgraph import connected_components
from scipy.sparse import csr_matrix
import time
import matplotlib.pyplot as plt
# %matplotlib inline
from numba import jit
# -
# a), b)
def init_system(Lx, Ly):
"""Determine the bond array and an initial state of spins"""
N = Lx * Ly
def xy_to_n(x, y):
return x*Ly + y
def n_to_xy(n):
return n // Ly, np.mod(n, Ly)
# easy way:
bonds = []
for x in range(Lx):
for y in range(Ly):
n = xy_to_n(x, y)
m1 = xy_to_n((x+1)% Lx, y)
m2 = xy_to_n(x, (y+1) % Ly)
bonds.append([n, m1])
bonds.append([n, m2])
bonds = np.array(bonds)
spins = np.random.randint(0, 2, size=(N,))*2 - 1
return spins, bonds, N
# part c)
@jit(nopython=True)
def get_weights(spins, bonds, T):
weights = np.zeros(len(bonds))
p = np.exp(-2./T) # set J = 1
for b in range(len(bonds)):
n = bonds[b, 0]
m = bonds[b, 1]
#if spins[n] != spins[m]:
# weights[b] = 0.
#else:
# if np.random.rand() < p:
# weights[b] = 0.
# else:
# weights[b] = 1.
if spins[n] == spins[m] and np.random.rand() > p:
weights[b] = 1.
return weights
# part d)
@jit(nopython=True)
def flip_spins(spins, N_components, labels):
flip_cluster = np.random.random(N_components) < 0.5 # N_components True/False values with 50/50 chance
for n in range(len(spins)):
cluster = labels[n]
if flip_cluster[cluster]:
spins[n] = - spins[n]
# done
def swendsen_wang_update(spins, bonds, T):
"""Perform one update of the Swendsen-Wang algorithm"""
N = len(spins)
weights = get_weights(spins, bonds, T)
graph = csr_matrix((weights, (bonds[:, 0], bonds[:, 1])), shape=(N, N))
graph += csr_matrix((weights, (bonds[:, 1], bonds[:, 0])), shape=(N, N))
N_components, labels = connected_components(graph, directed=False)
flip_spins(spins, N_components, labels)
# +
@jit(nopython=True)
def energy(spins, bonds):
Nbonds = len(bonds)
energy = 0.
for b in range(Nbonds):
energy -= spins[bonds[b, 0]]* spins[bonds[b, 1]]
return energy
def energy2(spins, bonds):
"""alternative implementation, gives the same results, but does not require jit to be fast"""
return -1. * np.sum(spins[bonds[:, 0]]* spins[bonds[:, 1]])
def magnetization(spins):
return np.sum(spins)
# +
def simulation(spins, bonds, T, N_measure=100):
"""Perform a Monte-carlo simulation at given temperature"""
# thermalization: without measurement
for _ in range(N_measure//10):
swendsen_wang_update(spins, bonds, T)
Es = []
Ms = []
for n in range(N_measure):
swendsen_wang_update(spins, bonds, T)
Es.append(energy(spins, bonds))
Ms.append(magnetization(spins))
return np.array(Es), np.array(Ms)
# -
# The full simulation at different temperatures
def run(Ts, L, N_measure=100):
spins, bonds, N = init_system(L, L)
spins = np.random.randint(0, 2, size=(N,))*2 - 1
Ms = []
absMs = []
Es = []
Cs = []
for T in Ts:
#print("simulating T = ", T, flush=True)
E, M = simulation(spins, bonds, T, N_measure)
Es.append(np.mean(E)/N)
Cs.append(np.var(E)/(T**2*N))
Ms.append(np.mean(M)/N)
absMs.append(np.mean(np.abs(M))/N)
return Es, Cs, Ms, absMs
# +
fig, axes = plt.subplots(4, 1, figsize=(10, 14))
ax1, ax2, ax3, ax4 = axes
Tc = 2. / np.log(1. + np.sqrt(2))
Ts = np.linspace(2.7, 1.7, 40)
print("warning: this takes some time")
for L in [4, 8, 16, 32]:
print("calculate L =", L, flush=True)
t0 = time.time()
Es, Cs, Ms, absMs = run(Ts, L, N_measure=1000)
# Plot the results
ax1.plot(Ts, Cs, label='L={L:d}'.format(L=L))
ax2.plot(Ts, Es, label='L={L:d}'.format(L=L))
ax3.plot(Ts, Ms, label='L={L:d}'.format(L=L))
ax4.plot(Ts, absMs, label='L={L:d}'.format(L=L))
print(" took {t:.1f} seconds".format(t=time.time()-t0), flush=True)
for ax in axes:
ax.axvline(Tc, color='r', linestyle='--')
ax.set_xlabel('$T$')
ax.legend(loc='best')
ax1.set_ylabel('specific heat $c_V$')
ax2.set_ylabel('energy density $E/L^2$')
ax3.set_ylabel('magnetization $<M>$')
ax4.set_ylabel('abs of magnetization $<|M|>$')
print("done")
# -
# ## Auto-correlation time
def auto_correlation(E, delta):
if delta == 0:
return 1.
N = len(E)
dE = E - np.mean(E)
cor = np.mean(dE[delta:] * dE[:-delta]) / np.mean(dE**2)
return cor
# +
L = 64
spins, bonds, N = init_system(L, L)
deltas = np.arange(0, 50)
plt.figure(figsize=(10, 7))
for T in [2., Tc, 3.]:
print("T=", T, flush=True)
E, M = simulation(spins, bonds, T, 1000)
cor_E = [auto_correlation(E, d) for d in deltas]
cor_M = [auto_correlation(M, d) for d in deltas]
plt.plot(deltas, cor_E, label="E, T={T:.3f}".format(T=T))
plt.plot(deltas, cor_M, linestyle='--', label="M, T={T:.3f}".format(T=T))
plt.legend()
plt.show()
# -
# ## compare with auto correlation of Metropolis
# +
def bonds_alternative(Lx, Ly):
N = Lx * Ly
def xy_to_n(x, y):
return x*Ly + y
bonds_alt = []
for x in range(Lx):
for y in range(Ly):
n = xy_to_n(x, y)
m1 = xy_to_n((x+1)% Lx, y)
m2 = xy_to_n(x, (y+1) % Ly)
m3 = xy_to_n((x-1)% Lx, y)
m4 = xy_to_n(x, (y-1) % Ly)
bonds_alt.append([m1, m2, m3, m4])
return np.array(bonds_alt)
@jit(nopython=True)
def delta_E(spins, bonds_alt, n):
z = bonds_alt.shape[1]
s = 0.
for i in range(z):
s += spins[bonds_alt[n, i]]
return -spins[n] * s
@jit(nopython=True)
def metropolis_update(spins, bonds, bonds_alt, T):
N = len(spins)
E = energy(spins, bonds)
for _ in range(N):
n = np.random.randint(0, N)
dE = -2. * delta_E(spins, bonds_alt, n)
if np.random.rand() < np.exp(-dE/T):
spins[n] *= -1
E += dE
# done
def simulation_metropolis(spins, bonds, bonds_alt, T, N_measure=100):
"""Perform a Monte-carlo simulation at given temperature"""
# thermalization: without measurement
for _ in range(N_measure//10):
metropolis_update(spins, bonds, bonds_alt, T)
Es = []
Ms = []
for n in range(N_measure):
metropolis_update(spins, bonds, bonds_alt, T)
Es.append(energy(spins, bonds))
Ms.append(magnetization(spins))
return np.array(Es), np.array(Ms)
# +
spins, bonds, N = init_system(L, L)
bonds_alt = bonds_alternative(L, L)
plt.figure(figsize=(10, 7))
for T in [2., Tc, 3.]:
print("T=", T, flush=True)
E, M = simulation_metropolis(spins, bonds, bonds_alt, T, 1000)
cor_E = [auto_correlation(E, d) for d in deltas]
cor_M = [auto_correlation(M, d) for d in deltas]
plt.plot(deltas, cor_E, label="E, T={T:.3f}".format(T=T))
plt.plot(deltas, cor_M, linestyle='--', label="M, T={T:.3f}".format(T=T))
plt.legend()
plt.show()
# -
# ## checks for debugging
L = 64
spins, bonds, N = init_system(L, L)
E, M = simulation(spins, bonds, 2., N_measure=10)
print(M)
print(E)
T = 0.1
print(spins)
for i in range(100):
swendsen_wang_update(spins, bonds, T)
print(spins)
print(energy(spins, bonds))
assert(energy(spins, bonds) == energy2(spins, bonds))
print(magnetization(spins))
# %%timeit
swendsen_wang_update(spins, bonds, 1.)
| 3_monte_carlo/sol2_swendsen_wang.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# Text provided under a Creative Commons Attribution license, CC-BY. All code is made available under the FSF-approved BSD-3 license. (c) <NAME>, <NAME> 2017. Thanks to NSF for support via CAREER award #1149784.
# -
# [@LorenaABarba](https://twitter.com/LorenaABarba)
# 12 steps to Navier–Stokes
# =====
# ***
# Did you make it this far? This is the last step! How long did it take you to write your own Navier–Stokes solver in Python following this interactive module? Let us know!
# Step 12: Channel Flow with Navier–Stokes
# ----
# ***
# The only difference between this final step and Step 11 is that we are going to add a source term to the $u$-momentum equation, to mimic the effect of a pressure-driven channel flow. Here are our modified Navier–Stokes equations:
# $$\frac{\partial u}{\partial t}+u\frac{\partial u}{\partial x}+v\frac{\partial u}{\partial y}=-\frac{1}{\rho}\frac{\partial p}{\partial x}+\nu\left(\frac{\partial^2 u}{\partial x^2}+\frac{\partial^2 u}{\partial y^2}\right)+F$$
#
# $$\frac{\partial v}{\partial t}+u\frac{\partial v}{\partial x}+v\frac{\partial v}{\partial y}=-\frac{1}{\rho}\frac{\partial p}{\partial y}+\nu\left(\frac{\partial^2 v}{\partial x^2}+\frac{\partial^2 v}{\partial y^2}\right)$$
#
# $$\frac{\partial^2 p}{\partial x^2}+\frac{\partial^2 p}{\partial y^2}=-\rho\left(\frac{\partial u}{\partial x}\frac{\partial u}{\partial x}+2\frac{\partial u}{\partial y}\frac{\partial v}{\partial x}+\frac{\partial v}{\partial y}\frac{\partial v}{\partial y}\right)
# $$
# ### Discretized equations
# With patience and care, we write the discretized form of the equations. It is highly recommended that you write these in your own hand, mentally following each term as you write it.
#
# The $u$-momentum equation:
#
# $$
# \begin{split}
# & \frac{u_{i,j}^{n+1}-u_{i,j}^{n}}{\Delta t}+u_{i,j}^{n}\frac{u_{i,j}^{n}-u_{i-1,j}^{n}}{\Delta x}+v_{i,j}^{n}\frac{u_{i,j}^{n}-u_{i,j-1}^{n}}{\Delta y} = \\
# & \qquad -\frac{1}{\rho}\frac{p_{i+1,j}^{n}-p_{i-1,j}^{n}}{2\Delta x} \\
# & \qquad +\nu\left(\frac{u_{i+1,j}^{n}-2u_{i,j}^{n}+u_{i-1,j}^{n}}{\Delta x^2}+\frac{u_{i,j+1}^{n}-2u_{i,j}^{n}+u_{i,j-1}^{n}}{\Delta y^2}\right)+F_{i,j}
# \end{split}
# $$
#
# The $v$-momentum equation:
#
# $$
# \begin{split}
# & \frac{v_{i,j}^{n+1}-v_{i,j}^{n}}{\Delta t}+u_{i,j}^{n}\frac{v_{i,j}^{n}-v_{i-1,j}^{n}}{\Delta x}+v_{i,j}^{n}\frac{v_{i,j}^{n}-v_{i,j-1}^{n}}{\Delta y} = \\
# & \qquad -\frac{1}{\rho}\frac{p_{i,j+1}^{n}-p_{i,j-1}^{n}}{2\Delta y} \\
# & \qquad +\nu\left(\frac{v_{i+1,j}^{n}-2v_{i,j}^{n}+v_{i-1,j}^{n}}{\Delta x^2}+\frac{v_{i,j+1}^{n}-2v_{i,j}^{n}+v_{i,j-1}^{n}}{\Delta y^2}\right)
# \end{split}
# $$
#
# And the pressure equation:
#
# $$
# \begin{split}
# & \frac{p_{i+1,j}^{n}-2p_{i,j}^{n}+p_{i-1,j}^{n}}{\Delta x^2} + \frac{p_{i,j+1}^{n}-2p_{i,j}^{n}+p_{i,j-1}^{n}}{\Delta y^2} = \\
# & \qquad \rho\left[\frac{1}{\Delta t}\left(\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x}+\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\right) - \frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x}\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x} - 2\frac{u_{i,j+1}-u_{i,j-1}}{2\Delta y}\frac{v_{i+1,j}-v_{i-1,j}}{2\Delta x} - \frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\right]
# \end{split}
# $$
# As always, we need to re-arrange these equations to the form we need in the code to make the iterations proceed.
#
# For the $u$- and $v$ momentum equations, we isolate the velocity at time step `n+1`:
#
# $$
# \begin{split}
# u_{i,j}^{n+1} = u_{i,j}^{n} & - u_{i,j}^{n} \frac{\Delta t}{\Delta x} \left(u_{i,j}^{n}-u_{i-1,j}^{n}\right) - v_{i,j}^{n} \frac{\Delta t}{\Delta y} \left(u_{i,j}^{n}-u_{i,j-1}^{n}\right) \\
# & - \frac{\Delta t}{\rho 2\Delta x} \left(p_{i+1,j}^{n}-p_{i-1,j}^{n}\right) \\
# & + \nu\left[\frac{\Delta t}{\Delta x^2} \left(u_{i+1,j}^{n}-2u_{i,j}^{n}+u_{i-1,j}^{n}\right) + \frac{\Delta t}{\Delta y^2} \left(u_{i,j+1}^{n}-2u_{i,j}^{n}+u_{i,j-1}^{n}\right)\right] \\
# & + \Delta t F
# \end{split}
# $$
#
# $$
# \begin{split}
# v_{i,j}^{n+1} = v_{i,j}^{n} & - u_{i,j}^{n} \frac{\Delta t}{\Delta x} \left(v_{i,j}^{n}-v_{i-1,j}^{n}\right) - v_{i,j}^{n} \frac{\Delta t}{\Delta y} \left(v_{i,j}^{n}-v_{i,j-1}^{n}\right) \\
# & - \frac{\Delta t}{\rho 2\Delta y} \left(p_{i,j+1}^{n}-p_{i,j-1}^{n}\right) \\
# & + \nu\left[\frac{\Delta t}{\Delta x^2} \left(v_{i+1,j}^{n}-2v_{i,j}^{n}+v_{i-1,j}^{n}\right) + \frac{\Delta t}{\Delta y^2} \left(v_{i,j+1}^{n}-2v_{i,j}^{n}+v_{i,j-1}^{n}\right)\right]
# \end{split}
# $$
#
# And for the pressure equation, we isolate the term $p_{i,j}^n$ to iterate in pseudo-time:
#
# $$
# \begin{split}
# p_{i,j}^{n} = & \frac{\left(p_{i+1,j}^{n}+p_{i-1,j}^{n}\right) \Delta y^2 + \left(p_{i,j+1}^{n}+p_{i,j-1}^{n}\right) \Delta x^2}{2(\Delta x^2+\Delta y^2)} \\
# & -\frac{\rho\Delta x^2\Delta y^2}{2\left(\Delta x^2+\Delta y^2\right)} \\
# & \times \left[\frac{1}{\Delta t} \left(\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x} + \frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\right) - \frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x}\frac{u_{i+1,j}-u_{i-1,j}}{2\Delta x} - 2\frac{u_{i,j+1}-u_{i,j-1}}{2\Delta y}\frac{v_{i+1,j}-v_{i-1,j}}{2\Delta x} - \frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\frac{v_{i,j+1}-v_{i,j-1}}{2\Delta y}\right]
# \end{split}
# $$
# The initial condition is $u, v, p=0$ everywhere, and at the boundary conditions are:
#
# $u, v, p$ are periodic on $x=0,2$
#
# $u, v =0$ at $y =0,2$
#
# $\frac{\partial p}{\partial y}=0$ at $y =0,2$
#
# $F=1$ everywhere.
#
# Let's begin by importing our usual run of libraries:
#
import numpy
from matplotlib import pyplot, cm
from mpl_toolkits.mplot3d import Axes3D
# %matplotlib inline
# In step 11, we isolated a portion of our transposed equation to make it easier to parse and we're going to do the same thing here. One thing to note is that we have periodic boundary conditions throughout this grid, so we need to explicitly calculate the values at the leading and trailing edge of our `u` vector.
def build_up_b(rho, dt, dx, dy, u, v):
b = numpy.zeros_like(u)
b[1:-1, 1:-1] = (rho * (1 / dt * ((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx) +
(v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy)) -
((u[1:-1, 2:] - u[1:-1, 0:-2]) / (2 * dx))**2 -
2 * ((u[2:, 1:-1] - u[0:-2, 1:-1]) / (2 * dy) *
(v[1:-1, 2:] - v[1:-1, 0:-2]) / (2 * dx))-
((v[2:, 1:-1] - v[0:-2, 1:-1]) / (2 * dy))**2))
# Periodic BC Pressure @ x = 2
b[1:-1, -1] = (rho * (1 / dt * ((u[1:-1, 0] - u[1:-1,-2]) / (2 * dx) +
(v[2:, -1] - v[0:-2, -1]) / (2 * dy)) -
((u[1:-1, 0] - u[1:-1, -2]) / (2 * dx))**2 -
2 * ((u[2:, -1] - u[0:-2, -1]) / (2 * dy) *
(v[1:-1, 0] - v[1:-1, -2]) / (2 * dx)) -
((v[2:, -1] - v[0:-2, -1]) / (2 * dy))**2))
# Periodic BC Pressure @ x = 0
b[1:-1, 0] = (rho * (1 / dt * ((u[1:-1, 1] - u[1:-1, -1]) / (2 * dx) +
(v[2:, 0] - v[0:-2, 0]) / (2 * dy)) -
((u[1:-1, 1] - u[1:-1, -1]) / (2 * dx))**2 -
2 * ((u[2:, 0] - u[0:-2, 0]) / (2 * dy) *
(v[1:-1, 1] - v[1:-1, -1]) / (2 * dx))-
((v[2:, 0] - v[0:-2, 0]) / (2 * dy))**2))
return b
# We'll also define a Pressure Poisson iterative function, again like we did in Step 11. Once more, note that we have to include the periodic boundary conditions at the leading and trailing edge. We also have to specify the boundary conditions at the top and bottom of our grid.
def pressure_poisson_periodic(p, dx, dy):
pn = numpy.empty_like(p)
for q in range(nit):
pn = p.copy()
p[1:-1, 1:-1] = (((pn[1:-1, 2:] + pn[1:-1, 0:-2]) * dy**2 +
(pn[2:, 1:-1] + pn[0:-2, 1:-1]) * dx**2) /
(2 * (dx**2 + dy**2)) -
dx**2 * dy**2 / (2 * (dx**2 + dy**2)) * b[1:-1, 1:-1])
# Periodic BC Pressure @ x = 2
p[1:-1, -1] = (((pn[1:-1, 0] + pn[1:-1, -2])* dy**2 +
(pn[2:, -1] + pn[0:-2, -1]) * dx**2) /
(2 * (dx**2 + dy**2)) -
dx**2 * dy**2 / (2 * (dx**2 + dy**2)) * b[1:-1, -1])
# Periodic BC Pressure @ x = 0
p[1:-1, 0] = (((pn[1:-1, 1] + pn[1:-1, -1])* dy**2 +
(pn[2:, 0] + pn[0:-2, 0]) * dx**2) /
(2 * (dx**2 + dy**2)) -
dx**2 * dy**2 / (2 * (dx**2 + dy**2)) * b[1:-1, 0])
# Wall boundary conditions, pressure
p[-1, :] =p[-2, :] # dp/dy = 0 at y = 2
p[0, :] = p[1, :] # dp/dy = 0 at y = 0
return p
# Now we have our familiar list of variables and initial conditions to declare before we start.
# +
##variable declarations
nx = 41
ny = 41
nt = 10
nit = 50
c = 1
dx = 2 / (nx - 1)
dy = 2 / (ny - 1)
x = numpy.linspace(0, 2, nx)
y = numpy.linspace(0, 2, ny)
X, Y = numpy.meshgrid(x, y)
##physical variables
rho = 1
nu = .1
F = 1
dt = .01
#initial conditions
u = numpy.zeros((ny, nx))
un = numpy.zeros((ny, nx))
v = numpy.zeros((ny, nx))
vn = numpy.zeros((ny, nx))
p = numpy.ones((ny, nx))
pn = numpy.ones((ny, nx))
b = numpy.zeros((ny, nx))
# -
# For the meat of our computation, we're going to reach back to a trick we used in Step 9 for Laplace's Equation. We're interested in what our grid will look like once we've reached a near-steady state. We can either specify a number of timesteps `nt` and increment it until we're satisfied with the results, or we can tell our code to run until the difference between two consecutive iterations is very small.
#
# We also have to manage **8** separate boundary conditions for each iteration. The code below writes each of them out explicitly. If you're interested in a challenge, you can try to write a function which can handle some or all of these boundary conditions. If you're interested in tackling that, you should probably read up on Python [dictionaries](http://docs.python.org/2/tutorial/datastructures.html#dictionaries).
# +
udiff = 1
stepcount = 0
while udiff > .001:
un = u.copy()
vn = v.copy()
b = build_up_b(rho, dt, dx, dy, u, v)
p = pressure_poisson_periodic(p, dx, dy)
u[1:-1, 1:-1] = (un[1:-1, 1:-1] -
un[1:-1, 1:-1] * dt / dx *
(un[1:-1, 1:-1] - un[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(un[1:-1, 1:-1] - un[0:-2, 1:-1]) -
dt / (2 * rho * dx) *
(p[1:-1, 2:] - p[1:-1, 0:-2]) +
nu * (dt / dx**2 *
(un[1:-1, 2:] - 2 * un[1:-1, 1:-1] + un[1:-1, 0:-2]) +
dt / dy**2 *
(un[2:, 1:-1] - 2 * un[1:-1, 1:-1] + un[0:-2, 1:-1])) +
F * dt)
v[1:-1, 1:-1] = (vn[1:-1, 1:-1] -
un[1:-1, 1:-1] * dt / dx *
(vn[1:-1, 1:-1] - vn[1:-1, 0:-2]) -
vn[1:-1, 1:-1] * dt / dy *
(vn[1:-1, 1:-1] - vn[0:-2, 1:-1]) -
dt / (2 * rho * dy) *
(p[2:, 1:-1] - p[0:-2, 1:-1]) +
nu * (dt / dx**2 *
(vn[1:-1, 2:] - 2 * vn[1:-1, 1:-1] + vn[1:-1, 0:-2]) +
dt / dy**2 *
(vn[2:, 1:-1] - 2 * vn[1:-1, 1:-1] + vn[0:-2, 1:-1])))
# Periodic BC u @ x = 2
u[1:-1, -1] = (un[1:-1, -1] - un[1:-1, -1] * dt / dx *
(un[1:-1, -1] - un[1:-1, -2]) -
vn[1:-1, -1] * dt / dy *
(un[1:-1, -1] - un[0:-2, -1]) -
dt / (2 * rho * dx) *
(p[1:-1, 0] - p[1:-1, -2]) +
nu * (dt / dx**2 *
(un[1:-1, 0] - 2 * un[1:-1,-1] + un[1:-1, -2]) +
dt / dy**2 *
(un[2:, -1] - 2 * un[1:-1, -1] + un[0:-2, -1])) + F * dt)
# Periodic BC u @ x = 0
u[1:-1, 0] = (un[1:-1, 0] - un[1:-1, 0] * dt / dx *
(un[1:-1, 0] - un[1:-1, -1]) -
vn[1:-1, 0] * dt / dy *
(un[1:-1, 0] - un[0:-2, 0]) -
dt / (2 * rho * dx) *
(p[1:-1, 1] - p[1:-1, -1]) +
nu * (dt / dx**2 *
(un[1:-1, 1] - 2 * un[1:-1, 0] + un[1:-1, -1]) +
dt / dy**2 *
(un[2:, 0] - 2 * un[1:-1, 0] + un[0:-2, 0])) + F * dt)
# Periodic BC v @ x = 2
v[1:-1, -1] = (vn[1:-1, -1] - un[1:-1, -1] * dt / dx *
(vn[1:-1, -1] - vn[1:-1, -2]) -
vn[1:-1, -1] * dt / dy *
(vn[1:-1, -1] - vn[0:-2, -1]) -
dt / (2 * rho * dy) *
(p[2:, -1] - p[0:-2, -1]) +
nu * (dt / dx**2 *
(vn[1:-1, 0] - 2 * vn[1:-1, -1] + vn[1:-1, -2]) +
dt / dy**2 *
(vn[2:, -1] - 2 * vn[1:-1, -1] + vn[0:-2, -1])))
# Periodic BC v @ x = 0
v[1:-1, 0] = (vn[1:-1, 0] - un[1:-1, 0] * dt / dx *
(vn[1:-1, 0] - vn[1:-1, -1]) -
vn[1:-1, 0] * dt / dy *
(vn[1:-1, 0] - vn[0:-2, 0]) -
dt / (2 * rho * dy) *
(p[2:, 0] - p[0:-2, 0]) +
nu * (dt / dx**2 *
(vn[1:-1, 1] - 2 * vn[1:-1, 0] + vn[1:-1, -1]) +
dt / dy**2 *
(vn[2:, 0] - 2 * vn[1:-1, 0] + vn[0:-2, 0])))
# Wall BC: u,v = 0 @ y = 0,2
u[0, :] = 0
u[-1, :] = 0
v[0, :] = 0
v[-1, :]=0
udiff = (numpy.sum(u) - numpy.sum(un)) / numpy.sum(u)
stepcount += 1
# -
# You can see that we've also included a variable `stepcount` to see how many iterations our loop went through before our stop condition was met.
print(stepcount)
# If you want to see how the number of iterations increases as our `udiff` condition gets smaller and smaller, try defining a function to perform the `while` loop written above that takes an input `udiff` and outputs the number of iterations that the function runs.
#
# For now, let's look at our results. We've used the quiver function to look at the cavity flow results and it works well for channel flow, too.
fig = pyplot.figure(figsize = (11,7), dpi=100)
pyplot.quiver(X[::3, ::3], Y[::3, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b], u[::3, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b], v[::3, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b]);
# The structures in the `quiver` command that look like `[::3, ::3]` are useful when dealing with large amounts of data that you want to visualize. The one used above tells `matplotlib` to only plot every 3rd data point. If we leave it out, you can see that the results can appear a little crowded.
fig = pyplot.figure(figsize = (11,7), dpi=100)
pyplot.quiver(X, Y, u, v);
# ## Learn more
# ***
# ##### What is the meaning of the $F$ term?
#
# Step 12 is an exercise demonstrating the problem of flow in a channel or pipe. If you recall from your fluid mechanics class, a specified pressure gradient is what drives Poisseulle flow.
#
# Recall the $x$-momentum equation:
#
# $$\frac{\partial u}{\partial t}+u \cdot \nabla u = -\frac{\partial p}{\partial x}+\nu \nabla^2 u$$
#
# What we actually do in Step 12 is split the pressure into steady and unsteady components $p=P+p'$. The applied steady pressure gradient is the constant $-\frac{\partial P}{\partial x}=F$ (interpreted as a source term), and the unsteady component is $\frac{\partial p'}{\partial x}$. So the pressure that we solve for in Step 12 is actually $p'$, which for a steady flow is in fact equal to zero everywhere.
#
# <b>Why did we do this?</b>
#
# Note that we use periodic boundary conditions for this flow. For a flow with a constant pressure gradient, the value of pressure on the left edge of the domain must be different from the pressure at the right edge. So we cannot apply periodic boundary conditions on the pressure directly. It is easier to fix the gradient and then solve for the perturbations in pressure.
#
# <b>Shouldn't we always expect a uniform/constant $p'$ then?</b>
#
# That's true only in the case of steady laminar flows. At high Reynolds numbers, flows in channels can become turbulent, and we will see unsteady fluctuations in the pressure, which will result in non-zero values for $p'$.
#
# In step 12, note that the pressure field itself is not constant, but it's the pressure perturbation field that is. The pressure field varies linearly along the channel with slope equal to the pressure gradient. Also, for incompressible flows, the absolute value of the pressure is inconsequential.
#
# ##### And explore more CFD materials online
# The interactive module **12 steps to Navier–Stokes** is one of several components of the Computational Fluid Dynamics class taught by Prof. <NAME> in Boston University between 2009 and 2013.
#
# For a sample of what the othe components of this class are, you can explore the **Resources** section of the Spring 2013 version of [the course's Piazza site](https://piazza.com/bu/spring2013/me702/resources).
#
# ***
from IPython.core.display import HTML
def css_styling():
styles = open("../styles/custom.css", "r").read()
return HTML(styles)
css_styling()
# (The cell above executes the style for this notebook.)
| modified_lessons/15_Step_12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/python
import os, sys
import json
import numpy as np
import re
import random
### YOUR CODE HERE: write at least three functions which solve
### specific tasks by transforming the input x and returning the
### result. Name them according to the task ID as in the three
### examples below. Delete the three examples. The tasks you choose
### must be in the data/training directory, not data/evaluation.
"""def solve_d4f3cd78(x):
# Iterate through the grid to find the top and bottom lines (have value of 5) and the box they enclose
for i in range(x.shape[0]):
top_line = np.where(x[i]==5)
# Find the top line
if len(top_line[0])>0:
#print(i, top_line[0])
break
# to find the bottom line start at the bottom grid and work upwards
for j in range(x.shape[0]-1,0,-1):
bottom_line = np.where(x[j]==5)
# Find the bottom line
if len(bottom_line[0])>0:
#print(j, bottom_line[0])
break
# Fill the contents of this grid with the 8 value
for k in range(i+1, j):
line = x[k]
line[top_line[0][1]: top_line[0][-1]]=8
# Create a grid in local space and identify the opening
grid = x[i:j+1, top_line[0][0]: top_line[0][-1]+1]
opening_local = np.argwhere(grid == 0)
# Create an entry for the opening we can use to locate it in the global grid
grid[opening_local[0][0],opening_local[0][1]]=10
opening_global = [np.where(x==10)[0][0], np.where(x==10)[1][0]]
x[opening_global[0] ,opening_global[1]] =8
for i in range(4):
# North facing opening
if x[opening_global[0] -1,opening_global[1]] == 0:
x[0:opening_global[0] ,opening_global[1]] = 8
#print("North facing")
# South facing opening
elif x[opening_global[0] +1,opening_global[1]] == 0:
x[opening_global[0]: ,opening_global[1]] =8
#print("South facing")
# East facing opening
elif x[opening_global[0] ,opening_global[1]+1] == 0:
x[opening_global[0] ,opening_global[1]:] = 8
#print("East facing")
# West facing opening
elif x[opening_global[0] ,opening_global[1]-1] == 0:
x[opening_global[0] ,:opening_global[1]] =8
#print("West facing")
return x"""
"""def solve_2dd70a9a(x):
x_ = x.copy()
# State action pairs - this modifies the current position depending on the current direction of travel
state_action = {'north':(1, 0), 'south':(-1, 0), 'east': (0, 1), 'west':(0, -1)}
# Locate the start and end points
start_point = np.argwhere(x ==3)
end_point = np.argwhere(x ==2)
# Define the
y_range, x_range = x.shape
def orientation(start_point, end_point):
# Get general orientation of the start points - the end points also have the same orientation
if start_point[0][0] != start_point[1][0]:
orientation='NS'
# Now find out if the starting position is north or south facing
if start_point[0][0] > end_point[0][0]:
direction = 'north'
initial_position = [start_point[0][0], start_point[0][1]]
elif start_point[0][0] < end_point[0][0]:
direction = 'south'
initial_position = [start_point[1][0], start_point[1][1]]
# If not North/South facing then it must be East/West
elif start_point[0][1] != start_point[1][1]:
orientation='EW'
# If the start point is further right than the finish then we must go towards the west
if start_point[0][1] > end_point[0][1]:
direction = 'west'
initial_position = [start_point[0][0], start_point[0][1]]
elif start_point[0][1] < end_point[0][1]:
direction = 'east'
initial_position = [start_point[1][0], start_point[1][1]]
# If the start and end points are aligned, go in the direction of more space
elif start_point[0][1] == end_point[0][1]:
if start_point[0][1]>x_range/2:
direction = 'west'
initial_position = [start_point[0][0], start_point[0][1]]
else:
direction = 'east'
initial_position = [start_point[1][0], start_point[1][1]]
return direction, initial_position
direction, initial_position = orientation(start_point, end_point)
# Define the actions for each state
state_action = {'north':(1, 0), 'south':(-1, 0), 'east': (0, -1), 'west':(0, 1)}
current_position = initial_position.copy()
for i in range(100):
pos1 = current_position
# If the current position is on the boundary, reset to the initial conditions to avoid falling off the edge of the world
if current_position[0] == 0 or current_position[0] == y_range-1 or current_position[1] == 0 or current_position[1] == x_range-1:
direction, initial_position = orientation(start_point, end_point)
current_position = initial_position
x_ = x.copy()
else:
pass
# If the next step has the value 2 to show an end point, print the grid and stop the program
if x_[np.subtract(current_position, state_action[direction])[0],np.subtract(current_position, state_action[direction])[1]] == 2:
print(x)
success='yes'
break
else:
pass
# If the next gridspace is a 0, take the appropriate move according to the state-action dictionary and update cell value and the current position
if x_[np.subtract(current_position, state_action[direction])[0],np.subtract(current_position, state_action[direction])[1]] ==0:
current_position = np.subtract(current_position, state_action[direction])[0],np.subtract(current_position, state_action[direction])[1]
x_[current_position] = 3
pos2 = current_position
#print(f'Initial point {initial_position}. Direction: {direction}. Current position: {current_position}')
# If the next grid space is an 8, take a random direction depending on the current state of the system.
elif x_[np.subtract(current_position, state_action[direction])[0],np.subtract(current_position, state_action[direction])[1]] == 8:
if (direction == 'east') or (direction == 'west'):
direction = random.choice(["north", "south"])
elif (direction == 'north') or (direction == 'south'):
direction = random.choice(["east", "west"])
# If there is any situation that is not caught in the above, reset to the initial conditions
else:
direction, initial_position = orientation(start_point, end_point)
current_position = initial_position
x_ = x.copy()
return x_
"""
def solve_83302e8f(x):
# create a copy to avoid overwriting the input
x_ = x.copy()
import itertools
# Create an array of gaps in walls - these will be used to 'seed' the yellows to make sure they propogate through the walls.
gaps = np.empty((0,2), int)
# find out how regular the walls are, the test space is square and walls are regularly spaced so only need to check one direction
spacing = np.argwhere(x_[0]!=0)
# Find the holes in the walls, first on the N/S running walls, then on the E/W running walls
for row, i in itertools.product(range(len(x_)), spacing):
if x_[row][i][0] ==0:
#print(row, i[0], x[row][i][0], "vertical")
gap = np.array([row, i[0]])
gaps = np.concatenate((gaps,[gap]))
elif x[i][0][row] ==0:
#print(i[0], row, x[i][0][row], "horizontal")
gap = np.array([i[0], row])
gaps = np.concatenate((gaps,[gap]))
# Where there are gaps, make it yellow/4
for gap in gaps:
x_[gap[0], gap[1]] = 4
# Create the directions of travel for our rabbit - N/S/E/W
directions =[[1, 0],
[-1, 0],
[0, 1],
[0, -1]]
# It takes a couple of runs to allow the 4s to propogate through the available space
for run in ['run_1', 'run_2', 'run_3']:
# Now iterate for every grid space, gap and direction
for i, gap, direction in itertools.product(range(len(x)**2), gaps, directions):
# Seed the current position at a gap in the wall
current_pos = gap
# Some movements are illegal, break out of the loop and move onto the next gap
try:
# While, for a given grid space, if the next step in a given direction is into a blank space do...
while x_[np.subtract(current_pos[0], direction[0]), np.subtract(current_pos[1], direction[1])] == 0 :
# If the next step would take our rabbit off the edge of the earth, stop and move onto the next gap. No time for adventures here
if (np.subtract(current_pos[0], direction[0]) < 0 ) or (np.subtract(current_pos[1], direction[1]) < 0) :
break
elif (np.subtract(current_pos[0], direction[0]) >= len(x)) | (np.subtract(current_pos[1], direction[1]) >= len(x)) :
break
# If the next step wont be detrimental to our rabbits health, re-assign the cell value to a 4 and update the current position for iterating
else:
x_[np.subtract(current_pos[0], direction[0]), np.subtract(current_pos[1], direction[1])] = 4
current_pos = np.subtract(current_pos[0], direction[0]), np.subtract(current_pos[1], direction[1])
#print(current_pos, direction,np.subtract(current_pos[0], direction[0]), np.subtract(current_pos[1], direction[1]) )
except:
pass
# Update our gaps array with all the cells marked 4 and loop through again - this will catch any cells we havent reached
# yet in case the rabbit has painted itself into a corner
gaps = np.argwhere(x_==4)
# Finally, any remaining 0s need to be assigned to 3s.
x_[x_==0] = 3
return x_
def main():
# Find all the functions defined in this file whose names are
# like solve_abcd1234(), and run them.
# regex to match solve_* functions and extract task IDs
p = r"solve_([a-f0-9]{8})"
tasks_solvers = []
# globals() gives a dict containing all global names (variables
# and functions), as name: value pairs.
for name in globals():
m = re.match(p, name)
if m:
# if the name fits the pattern eg solve_abcd1234
ID = m.group(1) # just the task ID
solve_fn = globals()[name] # the fn itself
tasks_solvers.append((ID, solve_fn))
for ID, solve_fn in tasks_solvers:
# for each task, read the data and call test()
directory = os.path.join("..", "data", "training")
json_filename = os.path.join(directory, ID + ".json")
data = read_ARC_JSON(json_filename)
test(ID, solve_fn, data)
return data
def read_ARC_JSON(filepath):
"""Given a filepath, read in the ARC task data which is in JSON
format. Extract the train/test input/output pairs of
grids. Convert each grid to np.array and return train_input,
train_output, test_input, test_output."""
# Open the JSON file and load it
data = json.load(open(filepath))
# Extract the train/test input/output grids. Each grid will be a
# list of lists of ints. We convert to Numpy.
train_input = [np.array(data['train'][i]['input']) for i in range(len(data['train']))]
train_output = [np.array(data['train'][i]['output']) for i in range(len(data['train']))]
test_input = [np.array(data['test'][i]['input']) for i in range(len(data['test']))]
test_output = [np.array(data['test'][i]['output']) for i in range(len(data['test']))]
return (train_input, train_output, test_input, test_output)
def test(taskID, solve, data):
"""Given a task ID, call the given solve() function on every
example in the task data."""
print(taskID)
train_input, train_output, test_input, test_output = data
print("Training grids")
for x, y in zip(train_input, train_output):
yhat = solve(x)
show_result(x, y, yhat)
print("Test grids")
for x, y in zip(test_input, test_output):
yhat = solve(x)
show_result(x, y, yhat)
def show_result(x, y, yhat):
print("Input")
print(x)
print("Correct output")
print(y)
print("Our output")
print(yhat)
print("Correct?")
if y.shape != yhat.shape:
print(f"False. Incorrect shape: {y.shape} v {yhat.shape}")
else:
print(np.all(y == yhat))
if __name__ == "__main__":data = main()
# +
x = data[0][2].copy()
import itertools
# Create an array of gaps in walls - these will be used to 'seed' the yellows to make sure they propogate through the walls.
gaps = np.empty((0,2), int)
spacing = np.argwhere(x[0]!=0)
for row, i in itertools.product(range(len(x)), spacing):
if x[row][i][0] ==0:
#print(row, i[0], x[row][i][0], "vertical")
gap = np.array([row, i[0]])
gaps = np.concatenate((gaps,[gap]))
elif x[i][0][row] ==0:
#print(i[0], row, x[i][0][row], "horizontal")
gap = np.array([i[0], row])
gaps = np.concatenate((gaps,[gap]))
for gap in gaps:
x[gap[0], gap[1]] = 4
directions =[[1, 0],
[-1, 0],
[0, 1],
[0, -1]]
for run in ['run_1', 'run_2']:
for i, gap, direction in itertools.product(range(len(x)**2), gaps, directions):
current_pos = gap
try:
while x[np.subtract(current_pos[0], direction[0]), np.subtract(current_pos[1], direction[1])] == 0 :
if (np.subtract(current_pos[0], direction[0]) < 0 ) or (np.subtract(current_pos[1], direction[1]) < 0) :
break
elif (np.subtract(current_pos[0], direction[0]) >= len(x)) | (np.subtract(current_pos[1], direction[1]) >= len(x)) :
break
else:
x[np.subtract(current_pos[0], direction[0]), np.subtract(current_pos[1], direction[1])] = 4
current_pos = np.subtract(current_pos[0], direction[0]), np.subtract(current_pos[1], direction[1])
#print(current_pos, direction,np.subtract(current_pos[0], direction[0]), np.subtract(current_pos[1], direction[1]) )
except:
pass
gaps = np.argwhere(x==4)
return x
# -
print(x)
np.subtract(0, 1)
| src/interactive_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <span style="display:block;text-align:center;margin-right:105px"><img src="../../media/logos/logo-vertical.png" width="200"/></span>
#
# # Section 5: Modelling & Simulation
#
# ---
# # Table of Contents
#
# * [System Requirements (Part 1)](#System-Requirements)
# * [Model Introduction](#Model-Introduction)
# * [Requirements Analysis](#Requirements-Analysis) <!-- [Toy Model Introduction](#Toy-Model-Introduction:-An-Ecosystem-Model) [Some Context: Lotka & Volterra](#Some-Context:-Lotka-&-Volterra)-->
# * [Visual System Mapping: Entity Relationship Diagram](#Visual-System-Mapping:-Entity-Relationship-Diagram)
# * [Visual System Mapping: Stock & Flow Diagram](#Visual-System-Mapping:-Stock-&-Flow-Diagram)
# * [Mathematical Specification](#Mathematical-Specification)
# * [System Design (Part 2)](#System-Design)
# * [Differential Specification](#Differential-Specification)
# * [cadCAD Standard Notebook Layout](#cadCAD-Standard-Notebook-Layout)
# 0. [Dependencies](#0.-Dependencies)
# 1. [State Variables](#1.-State-Variables)
# 2. [System Parameters](#2.-System-Parameters)
# 3. [Policy Functions](#3.-Policy-Functions)
# 4. [State Update Functions](#4.-State-Update-Functions)
# 5. [Partial State Update Blocks](#5.-Partial-State-Update-Blocks)
# 6. [Configuration](#6.-Configuration)
# 7. [Execution](#7.-Execution)
# 8. [Simulation Output Preparation](#8.-Simulation-Output-Preparation)
# 9. [Simulation Analysis](#9.-Simulation-Analysis)
# * [System Validation (Part 3)](#System-Validation)
# * [Policy Functions](#Policy-Functions)
# * [Model Improvements](#Model-Improvements)
# * [Differential Specification Updates](#Differential-Specification-Updates)
# * [Mathematical Specification Updates](#Mathematical-Specification-Updates)
# * [Model Limitations](#Model-Limitations)
#
# <!--
# * [Toy Model Introduction](#Toy-Model-Introduction:-An-Ecosystem-Model)
# * [Some Context: Lotka & Volterra](#Some-Context:-Lotka-&-Volterra)
# * [Visual System Mapping: Entity Relationship Diagram](#Visual-System-Mapping:-Entity-Relationship-Diagram)
# * [Requirements Analysis](#Requirements-Analysis)
# * [Mathematical Specification](#Mathematical-Specification)
# * [Visual System Mapping: Stock & Flow Diagram](#Visual-System-Mapping:-Stock-&-Flow-Diagram)
# * [Differential Specification](#Differential-Specification)
# * [cadCAD Standard Notebook Layout](#cadCAD-Standard-Notebook-Layout)
# 0. [Dependencies](#0.-Dependencies)
# 1. [State Variables](#1.-State-Variables)
# 2. [System Parameters](#2.-System-Parameters)
# 3. [Policy Functions](#3.-Policy-Functions)
# 4. [State Update Functions](#4.-State-Update-Functions)
# 5. [Partial State Update Blocks](#5.-Partial-State-Update-Blocks)
# 6. [Configuration](#6.-Configuration)
# 7. [Execution](#7.-Execution)
# 8. [Simulation Output Preparation](#8.-Simulation-Output-Preparation)
# 9. [Simulation Analysis](#9.-Simulation-Analysis)
# -->
# ---
# # System Requirements
# <center><img src="images/edp-phase-1.png" alt="Engineering Design Process, phase 1 - System requirements" width="60%"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Model Introduction
#
# > Ecosystem: a biological community of interacting organisms and their physical environment.
#
# <center>
# <img src="./images/ecosystem.png"
# alt="Ecosystem"
# style="width: 200px;" />
# </center>
# -
# <center>
# <img src="./images/lotka-volterra.png"
# alt="Lotka-Volterra"
# style="width: 60%;" />
# </center>
# ## Requirements Analysis
# [Link to Simulation Analysis](#9.-Simulation-Analysis)
# Illustrative real-world model applications:
# * Forecast animal food consumption, to determine the sustainability of a farming operation, and to plan for worst-case scenarios.
# * Given a food supply of a number of standard crops, of varying cost, how do we optimize economic performance of the farming operation?
# * Given the ecological impact of a certain crop on the fertility of the soil, how do we balance the economic performance and ecological sustainability of the farming operation?
# ### Questions
#
# 1. How long will our model ecosystem be able to sustain itself for?
# 2. What population size can our model ecosystem support?
#
# ### Assumptions
#
# 1. The population will increase over time.
# 2. The food supply will decrease over time.
# 3. There is some relationship between the population and food supply.
#
# ### Constraints / Scope
#
# * The intention of this toy model is to allow us to learn about cadCAD, the modelling process, simulation configuration, and the engineering design process!
# ## Visual System Mapping: Entity Relationship Diagram
# <!-- https://en.wikipedia.org/wiki/Entity%E2%80%93relationship_model -->
# <center>
# <img src="./images/s5-entity-relationship-diagram.png"
# alt="Entity relationship diagram"
# style="width: 60%;" />
# </center>
# ## Visual System Mapping: Stock & Flow Diagram
# <center>
# <img src="./images/s5-stock-and-flow-ecosystem.png"
# alt="Stock and flow"
# style="width: 60%;" />
# </center>
# ## Mathematical Specification
#
# > ...differential equations play a prominent role in many disciplines including engineering, physics, economics, and biology.
# ### Differential Equations
# * A population consumes a food source, and reproduces at a rate proportional to the food source.
# * The food source is consumed at a rate proportional to the population.
#
# \begin{align}
# \large population_t &\large= population_{t-1} + {\Delta population} \quad \textrm{(sheep)} \tag{1} \\
# \large food_t &\large= food_{t-1} + {\Delta food} \quad \textrm{(tons of grass)} \tag{2} \\
# \end{align}
#
# where the rate of change ($\Delta$) is:
# \begin{align}
# \large {\Delta population} &\large= \alpha * food_{t-1} \quad \textrm{(sheep/month)} \\
# \large {\Delta food} &\large= -\beta * population_{t-1} \quad \textrm{(tons of grass/month)}
# \end{align}
# # System Design
# <center><img src="images/edp-phase-2.png" alt="Engineering Design Process, phase 1 - System design" width="60%"/>
# ## Differential Specification
# + [markdown] slideshow={"slide_type": "slide"}
# <center>
# <img src="./images/s5-differential-spec-ecosystem.png"
# alt="Differential spec"
# style="width: 60%;" />
# </center>
# -
# ## cadCAD Standard Notebook Layout
# <center>
# <img src="./images/cadcad-flow.png"
# alt="cadCAD flow"
# style="width: 25%;" />
# </center>
# # 0. Dependencies
# +
# Standard libraries: https://docs.python.org/3/library/
import math
# Analysis and plotting modules
import pandas as pd
# import plotly
# +
# cadCAD configuration modules
from cadCAD.configuration.utils import config_sim
from cadCAD.configuration import Experiment
# cadCAD simulation engine modules
from cadCAD.engine import ExecutionMode, ExecutionContext
from cadCAD.engine import Executor
# -
# # 1. State Variables
# > A state variable is one of the set of variables that are used to describe the mathematical "state" of a dynamical system. ([Wikipedia](https://en.wikipedia.org/wiki/State_variable))
initial_state = {
'population': 50, # number of sheep
'food': 1000 # tons of grass
}
initial_state
# ## **Time** as a system state
# <center>
# <img src="./images/discrete-time.svg"
# alt="Discrete time"
# style="width: 200px;" />
# </center>
#
# * 1 **timestep** == 1 month
# # 2. System Parameters
#
# > System parameterization is the process of choosing variables that impact the behaviour of the model. These parameters allow us to perform simulation techniques like parameter sweeps, Monte Carlo simulations, A/B tests, and see how the system behaves under a different model parameter set.
# [Link to Simulation Analysis](#9.-Simulation-Analysis)
system_params = {
'reproduction_rate': [0.01], # sheep per month
'consumption_rate': [0.1], # tons of grass per month
}
system_params
# # 3. Policy Functions
#
# > A Policy Function computes one or more signals to be passed to State Update Functions. They describe the logic and behaviour of a system component or mechanism.
#
# We'll cover this in the next section!
# # 4. State Update Functions
# > We create State Update Functions to design the way our model state changes over time. These will usually represent the system differential specification.
# ```python
# def state_update_function(params, substep, state_history, previous_state, policy_input):
# variable_value = 0
# return 'variable_name', variable_value
# ```
#
# * `params` is a Python dictionary containing the **system parameters** <!-- for consistency with the previous definition -->
# * `substep` is an integer value representing a step within a single `timestep`
# * `state_history` is a Python list of all previous states
# * `previous_state` is a Python dictionary that defines what the state of the system was at the **previous timestep** or **substep**
# * `policy_input` is a Python dictionary of signals or actions from **policy functions**
def new_population(current_population, alpha, food_supply):
"""
The population state after one timestep, according to the differential equation (1):
current_population + alpha * food_supply
"""
return math.ceil(current_population + alpha * food_supply)
math.ceil(5.5)
# +
# Relevant state variables
current_population = initial_state['population']
food_supply = initial_state['food']
# Relevant parameters
reproduction_rate = system_params['reproduction_rate'][0] # "alpha" in our differential equation
new_population(current_population, reproduction_rate, food_supply)
# -
def s_population(params, substep, state_history, previous_state, policy_input):
"""
Update the population state according to the differential equation (1):
current_population + alpha * food_supply
"""
population = previous_state['population']
alpha = params['reproduction_rate']
food_supply = previous_state['food']
return 'population', max(new_population(population, alpha, food_supply), 0)
population = 60
print("A tuple!")
'population', max(math.ceil(population), 0)
next_state = {
# current_population + alpha * food_supply
'population': math.ceil(50 + 0.01 * 1000),
'food': 1000
}
next_state
def s_food(params, substep, state_history, previous_state, policy_input):
"""
Update the food supply state according to the differential equation (2):
food supply - beta * population
"""
food = previous_state['food'] - params['consumption_rate'] * previous_state['population']
return 'food', max(food, 0)
max(-10, 0)
# # 5. Partial State Update Blocks
# ## Tying it all together
#
# > A series of Partial State Update Blocks is a structure for composing State Update Functions and Policy Functions in series or parallel, as a representation of the system model.
# <center>
# <img src="./images/basic-psub.png"
# alt="Policy functions"
# style="width: 60%;" />
# </center>
# **Updates run in series**
partial_state_update_blocks = [
# Run first
{
'policies': {}, # Ignore for now
# State variables
'variables': {
'population': s_population
}
},
# Run second
{
'policies': {}, # Ignore for now
# State variables
'variables': {
'food': s_food
}
}
]
# **Updates run in parallel**
partial_state_update_blocks = [
{
'policies': {}, # Ignore for now
# State variables
'variables': {
# Updated in parallel
'population': s_population,
'food': s_food
}
}
]
# # 6. Configuration
# > The configuration stage is about tying all the previous model components together and choosing how the simulation should run.
# <center>
# <img src="./images/cadcad-flow.png"
# alt="cadCAD flow"
# style="width: 25%;" />
# </center>
# Configuration parameters:
# * `'N': 1` - the number of times we'll run the simulation (you'll see them called "Monte Carlo runs" later in the course, when we look at tools to analyze system models)
# * `'T': range(400)` - the number of timesteps the simulation will run for
# * `'M': system_params` - the parameters of the system
sim_config = config_sim({
"N": 1,
"T": range(400),
"M": system_params
})
range(400)
list(range(400))[0:10]
from cadCAD import configs
del configs[:] # Clear any prior configs
experiment = Experiment()
experiment.append_configs(
initial_state = initial_state,
partial_state_update_blocks = partial_state_update_blocks,
sim_configs = sim_config
)
configs[-1].__dict__
# # 7. Execution
#
# > The Execution Engine takes a model and configuration, and computes the simulation output.
# ## Configuring the cadCAD simulation execution
exec_context = ExecutionContext()
simulation = Executor(exec_context=exec_context, configs=configs)
# ## Time to simulate our ecosystem model!
raw_result, tensor_field, sessions = simulation.execute()
# # 8. Simulation Output Preparation
# > The simulation results are returned as a list of Python dictionaries, which we then convert to a Pandas dataframe. At this stage of the process you'll manipulate and analyze your results to answer questions about your model.
simulation_result = pd.DataFrame(raw_result)
raw_result[:5]
simulation_result.head()
# # 9. Simulation Analysis
# [Link to System Requirements](#Requirements-Analysis)
pd.options.plotting.backend = "plotly"
# After plotting the results, let's go and update the parameters, and then select `Cell` and `Run All Above`:
#
# [Link to System Parameters](#2.-System-Parameters)
simulation_result.plot(
kind='line',
x='timestep',
y=['population','food']
)
pd.set_option('display.max_rows', len(simulation_result))
display(simulation_result)
pd.reset_option('display.max_rows')
# https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html
simulation_result.query('food == 0').head()
# # System Validation
# <center><img src="images/edp-phase-3.png" alt="Engineering Design Process, phase 1 - validation" width="60%"/>
# ## Policy Functions
# An illustrative example:
#
# ```python
# condition = True
#
# def policy_function(params, substep, state_history, previous_state):
# '''
# This logic belongs in the policy function,
# but could also have been placed directly in the state update function.
# '''
# signal_value = 1 if condition else 0
# return {'signal_name': signal_value}
# ```
#
# ```python
# def state_update_function(params, substep, state_history, previous_state, policy_input):
# state_value = policy_input['signal_name']
# return 'state_name', state_value
# ```
# <center>
# <img src="./images/basic-psub.png"
# alt="Policy functions"
# style="width: 60%;" />
# </center>
# <center>
# <img src="./images/policy-explainer.png"
# alt="Policy functions"
# style="width: 60%;" />
# </center>
# ### Policy Aggregation
# <center>
# <img src="./images/policy-aggregation-explainer.png"
# alt="Policy functions"
# style="width: 60%;" />
# </center>
# ## Model Improvements
# ### Differential Specification Updates
# + [markdown] slideshow={"slide_type": "slide"}
# <center>
# <img src="./images/s5-differential-spec-ecosystem-policy-refactor.png"
# alt="Differential spec"
# style="width: 60%;" />
# </center>
# -
# State update functions `s_population()` and `s_food()` from the last part:
# +
def s_population(params, substep, state_history, previous_state, policy_input):
population = previous_state['population'] + params['reproduction_rate'] * previous_state['food']
return 'population', max(math.ceil(population), 0)
def s_food(params, substep, state_history, previous_state, policy_input):
food = previous_state['food'] - params['consumption_rate'] * previous_state['population']
return 'food', max(food, 0)
# -
# Adapting to use **policy functions** to drive the process, and **state update functions** to update the state according to the **differential specification**:
# +
def p_reproduction(params, substep, state_history, previous_state):
population_reproduction = params['reproduction_rate'] * previous_state['food']
return {'delta_population': population_reproduction}
def p_consumption(params, substep, state_history, previous_state):
food_consumption = params['consumption_rate'] * previous_state['population']
return {'delta_food': -food_consumption}
# +
def s_population(params, substep, state_history, previous_state, policy_input):
population = previous_state['population'] + policy_input['delta_population']
return 'population', max(math.ceil(population), 0)
def s_food(params, substep, state_history, previous_state, policy_input):
food = previous_state['food'] + policy_input['delta_food']
return 'food', max(food, 0)
# -
# ### Mathematical Specification Updates
# \begin{align}
# \large population_t &\large= population_{t-1} + {\Delta population} \quad \textrm{(sheep)} \tag{1} \\
# \large food_t &\large= food_{t-1} + {\Delta food} \quad \textrm{(tons of grass)} \tag{2}
# \end{align}
#
# where the rate of change ($\Delta$) is:
# \begin{align}
# \large {\Delta population} &\large= \alpha * food_{t-1} \quad \textrm{(sheep/month)} \\
# \large {\Delta food} &\large= -\beta * population_{t-1} + \gamma \quad \textrm{(tons of grass/month)}
# \end{align}
#
# where:
#
# $
# \begin{align}
# \alpha: \quad &\textrm{'reproduction_rate'}\\
# \beta: \quad &\textrm{'consumption_rate'}\\
# \gamma: \quad &\textrm{'growth_rate'}
# \end{align}
# $
#
# * A population consumes a food source, and reproduces at a rate proportional to the food source $\alpha$ (alpha).
# * The food source is consumed at a rate proportional to the population $\beta$ (beta), and grows at a constant rate $\gamma$ (gamma).
# <center>
# <img src="./images/s6-differential-spec-ecosystem-with-gamma.png"
# alt="Diff spec"
# style="width: 60%" />
# </center>
# +
initial_state = {
'population': 50, # number of sheep
'food': 1000 # tons of grass
}
system_params = {
'reproduction_rate': [0.01], # number of sheep / month
'consumption_rate': [0.01], # tons of grass / month
'growth_rate': [10.0], # tons of grass / month
}
# -
from collections import Counter
A = Counter({'delta_food': 5, 'delta_population': 10})
B = Counter({'delta_food': 5})
A + B
A = Counter({'delta_food': 5, 'delta_population': 10})
B = Counter({'delta_food': -2})
A + B
def p_growth(params, substep, state_history, previous_state):
delta_food = params['growth_rate']
return {'delta_food': delta_food}
partial_state_update_blocks = [
{
'policies': {
'reproduction': p_reproduction,
'consumption': p_consumption, # Signal: `delta_food`
'growth': p_growth # Signal: `delta_food`
},
'variables': {
'population': s_population,
'food': s_food # Receives policy_input of (consumption + growth) as `delta_food`
}
}
]
# +
del configs[:]
sim_config = config_sim({
'N': 1,
'T': range(400),
'M': system_params
})
experiment.append_configs(
initial_state = initial_state,
partial_state_update_blocks = partial_state_update_blocks,
sim_configs = sim_config
)
# +
exec_context = ExecutionContext()
simulation = Executor(exec_context=exec_context, configs=configs)
raw_result, tensor_field, sessions = simulation.execute()
# -
simulation_result = pd.DataFrame(raw_result)
simulation_result
df = simulation_result.copy()
df = df[df.simulation == 0]
df
df.plot(kind='line', x='timestep', y=['population','food'])
df = df[['population', 'food']]
df.head()
df.pct_change()
diff = df.diff()
diff
diff = diff.query('food <= 0')
diff
df.iloc[75]
# ## Model Limitations
# 1. The population never dies.
# 2. The system reaches a steady state of no population or food supply change.
# #### Addition of a population death rate, "epsilon" / $\epsilon$, that's dependent on the population size:
# <br>
#
# \begin{align}
# \large population_t &\large= population_{t-1} + {\Delta population} \quad \textrm{(sheep)} \tag{1} \\
# \large food_t &\large= food_{t-1} + {\Delta food} \quad \textrm{(tons of grass)} \tag{2}
# \end{align}
#
# where the rate of change ($\Delta$) is:
# \begin{align}
# \large {\Delta population} &\large= \alpha * food_{t-1} - \epsilon * population_{t-1} \quad \textrm{(sheep/month)} \\
# \large {\Delta food} &\large= -\beta * population_{t-1} + \gamma \quad \textrm{(tons of grass/month)}
# \end{align}
#
# where:
#
# $
# \begin{align}
# \alpha: \quad &\textrm{'reproduction_rate'}\\
# \epsilon: \quad &\textrm{'death_rate'}\\
# \beta: \quad &\textrm{'consumption_rate'}\\
# \gamma: \quad &\textrm{'growth_rate'}\\
# \end{align}
# $
#
# * A population consumes a food source, and reproduces at a rate proportional to the food source $\alpha$ (alpha), and dies at a rate proportional to the population size $\epsilon$ (epsilon).
# * The food source is consumed at a rate proportional to the population $\beta$ (beta), and grows at a constant rate $\gamma$ (gamma).
# <center>
# <img src="./images/s6-differential-spec-ecosystem-final.png"
# alt="Diff spec"
# style="width: 60%" />
# </center>
def p_death(params, substep, state_history, previous_state):
population_death = params['death_rate'] * previous_state['population']
return {'delta_population': -population_death}
# +
initial_state = {
'population': 50, # number of sheep
'food': 1000 # tons of grass
}
system_params = {
'reproduction_rate': [0.01],
'death_rate': [0.01],
'consumption_rate': [0.01],
'growth_rate': [10.0],
}
# -
partial_state_update_blocks = [
{
'policies': {
'reproduction': p_reproduction,
'death': p_death,
'consumption': p_consumption,
'growth': p_growth
},
'variables': {
'population': s_population,
'food': s_food
}
}
]
# +
sim_config = config_sim({
'N': 1,
'T': range(1000),
'M': system_params
})
experiment.append_configs(
initial_state = initial_state,
partial_state_update_blocks = partial_state_update_blocks,
sim_configs = sim_config
)
# +
exec_context = ExecutionContext()
simulation = Executor(exec_context=exec_context, configs=configs)
raw_result, tensor_field, sessions = simulation.execute()
# -
simulation_result = pd.DataFrame(raw_result)
df = simulation_result.copy()
df = df[df.simulation == 1]
df
df.plot(kind='line', x='timestep', y=['population','food'])
# <br/><br/><br/>
# # Well done!
# <br/><br/><br/><br/>
| complete-foundations-bootcamp-output-main/content/section-5-modelling-and-simulation/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "skip"}
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib notebook
# + [markdown] slideshow={"slide_type": "slide"}
# # Clustering
#
# ##### Version 0.1
#
# ***
# By <NAME> (Northwestern/CIERA)
#
# 03 Mar 2022
# + [markdown] slideshow={"slide_type": "slide"}
# In this notebook, we will explore the use of clustering methods in low dimensional data spaces.
# -
# ## Problem 1) Load and plot Iris data set
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 1a**
#
# Import the iris data set from `scikit-learn`.
# + slideshow={"slide_type": "slide"}
from sklearn import datasets
iris = datasets.load_iris()
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 1b**
#
# As a baseline for reference, make a scatter plot of the iris data in the sepal length-sepal width plane.
# + slideshow={"slide_type": "slide"}
fig, ax = plt.subplots()
ax.scatter(iris.data[:,0], iris.data[:,1], c=iris.target)
# complete
# complete
# complete
# + [markdown] slideshow={"slide_type": "slide"}
# ## Problem 2) $k$-means clustering
# + [markdown] slideshow={"slide_type": "slide"}
# As a subfield of unsupervised learning, clustering aims to group/separate sources in the multidimensional feature space. The "unsupervised" comes from the fact that there are no target labels provided to the algorithm, so the machine is asked to cluster the data "on its own." The lack of labels means there is no (simple) method for validating the accuracy of the solution provided by the machine (though sometimes simple examination can show the results are **terrible**).
# + [markdown] slideshow={"slide_type": "slide"}
#
# For this reason, "classic" unsupervised methods are not particularly useful for astronomy.$^\dagger$ Supposing one did find some useful clustering structure, an adversarial researcher could always claim that the current feature space does not accurately capture the physics of the system and as such the clustering result is not interesting or, worse, erroneous.
# + [markdown] slideshow={"slide_type": "subslide"}
# $^\dagger$This is my (AAM) opinion and there are many others who disagree.
# + [markdown] slideshow={"slide_type": "slide"}
# We start today with the most famous, and simple, clustering algorithm: [$k$-means](https://en.wikipedia.org/wiki/K-means_clustering). $k$-means clustering looks to identify $k$ convex clusters, where $k$ is a user defined number. And here-in lies the rub: if we truly knew the number of clusters in advance, we likely wouldn't need to perform any clustering in the first place. This is the major downside to $k$-means.
# + [markdown] slideshow={"slide_type": "slide"}
# As a reminder from lecture, the pseudocode for $k$-means:
#
# initiate search by identifying k points (i.e. the cluster centers)
# loop
# assign each point in the data set to the closest cluster center
# calculate new cluster centers based on mean position of all points within each cluster
# if diff(new center - old center) < threshold:
# stop (i.e. clusters are defined)
#
# + [markdown] slideshow={"slide_type": "slide"}
# The threshold is defined by the user, though in some cases the total number of iterations can also be used as a stopping criteria. An advantage of $k$-means is that the solution will always converge, though the solution may only be a local minimum. Disadvantages include the assumption of convexity, i.e. difficult to capture complex geometry, and the curse of dimensionality (though as discussed in lecture it is possible to apply dimensionality reduction techniques prior to applying clustering).
# + [markdown] slideshow={"slide_type": "slide"}
# In `scikit-learn` the [`KMeans`](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html#sklearn.cluster.KMeans) algorithm is implemented as part of the [`sklearn.cluster`](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.cluster) module.
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 2a**
#
# Import `KMeans`
# + slideshow={"slide_type": "slide"}
from sklearn.cluster import KMeans
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 2b**
#
# Fit a $k = 2$, $k$-means model to the iris data. Plot the resulting clusters in the sepal length-sepal width plane.
# + slideshow={"slide_type": "slide"}
Kcluster = KMeans(2)
Kcluster.fit(iris.data)
fig, ax = plt.subplots()
ax.scatter(iris.data[:,0], iris.data[:,1], c=Kcluster.labels_)
#ax.scatter(Kcluster.cluster_centers_[0], Kcluster.cluster_centers_[1], '*')
# complete
# complete
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 2c**
#
# Fit a $k = 3$, $k$-means model to the iris data. Plot the resulting clusters in the sepal length-sepal width plane.
# + slideshow={"slide_type": "slide"}
Kcluster = KMeans(3)
Kcluster.fit(iris.data)
fig, ax = plt.subplots()
ax.scatter(iris.data[:,0], iris.data[:,1], c=Kcluster.labels_)
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 2d**
#
# Pretend that you do not know which iris sources belong to which class. Given this, which of the two clustering solutions ($k=2$ or $k=3$) would you identify as superior?
#
# Knowing that there are in fact 3 different clusters, which of the two clustering solutions would you identify as superior?
# + [markdown] slideshow={"slide_type": "slide"}
# *write your answer here*
#
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 2e**
#
# How do the results change if the 3 cluster model is called with `n_init = 1` and `init = 'random'` options? Use `rs` for the random state [this allows me to cheat in service of making a point].
#
# *Note - the respective defaults for these two parameters are 10 and `k-means++`, respectively. Read the docs to see why these choices are, likely, better than those in 2b.
# + slideshow={"slide_type": "slide"}
rs = 14
Kcluster = KMeans(3, n_init=1, init='random', random_state=rs)
Kcluster.fit(iris.data)
fig, ax = plt.subplots()
ax.scatter(iris.data[:,0], iris.data[:,1], c=Kcluster.labels_)
# + [markdown] slideshow={"slide_type": "slide"}
# That doesn't look right at all!
#
# So in addition to not knowing the correct number of clusters in the data, we see that the results are also sensitive to how the cluster positions are initiated.
# + [markdown] slideshow={"slide_type": "slide"}
# $k$-means evaluates the Euclidean distance between individual sources and cluster centers, thus, the magnitude of the individual features has a strong effect on the final clustering outcome.
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 2f**
#
# Calculate the mean, standard deviation, min, and max of each feature in the iris data set. Based on these summaries, which feature is most important for clustering?
# + slideshow={"slide_type": "slide"}
print(np.mean(iris.data, axis=0))
print(np.std(iris.data, axis=0))
print(np.max(iris.data, axis=0))
print(np.min(iris.data, axis=0))
# + [markdown] slideshow={"slide_type": "slide"}
# one with most spread - because k-means is circular, if one feature has much larger spread than others, only clustering in that feature will be detected
#
# + [markdown] slideshow={"slide_type": "slide"}
# Since $k$-means is built on Euclidean distance measures in the feature space, it can be really useful to re-scale all the features prior to applying the clustering algorithm.
# + [markdown] slideshow={"slide_type": "subslide"}
# (Two notes – (a) some algorithms are extremely sensitive to feature scaling so this is a always a good thing to keep in mind, and (b) the iris data set is small and of relatively similar scale so the effects will not be that dramatic)
# + [markdown] slideshow={"slide_type": "slide"}
# Imagine you are classifying stellar light curves: the data set will include binary white dwarf stars with periods of $\sim 0.01 \; \mathrm{d}$ and Mira variables with periods of $\sim 1000 \; \mathrm{d}$. Without re-scaling, this feature that covers 6 orders of magnitude! Without rescaling all other features will add little weight to any final clustering solution.
# + [markdown] slideshow={"slide_type": "slide"}
# The two most common forms of re-scaling are to rescale to a guassian with mean $= 0$ and variance $= 1$, or to rescale the min and max of the feature to $[0, 1]$. The best normalization is problem dependent. The [`sklearn.preprocessing`](http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing) module makes it easy to re-scale the feature set.$^\dagger$
# + [markdown] slideshow={"slide_type": "slide"}
# $\dagger$ For supervised methods, **it is essential that the same scaling used for the training set be used for all other data run through the model.** The testing, validation, and field observations cannot be re-scaled independently. This would result in meaningless final classifications/predictions.
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 2g**
#
# Re-scale the features to normal distributions, and perform $k$-means clustering on the iris data. How do the results compare to those obtained earlier?
#
# *Hint - you may find [`'StandardScaler()'`](http://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler) within the `sklearn.preprocessing` module useful.*
# + slideshow={"slide_type": "slide"}
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(iris.data)
Kcluster = KMeans(3)
KMeans.fit(scaler.transform(iris.data))
fig, ax = plt.subplots()
ax.scatter(iris.data[:,0], iris.data[:,1], c=Kcluster.labels_)
# + [markdown] slideshow={"slide_type": "slide"}
# *write your answer here*
# + [markdown] slideshow={"slide_type": "slide"}
# **How do I test the accuracy of my clusters?**
#
# Essentially - you don't. There are some methods that are available, but they essentially compare clusters to labeled samples, and if the samples are labeled it is likely that supervised learning is more useful anyway. If you are curious, `scikit-learn` does provide some [built-in functions for analyzing clustering](http://scikit-learn.org/stable/modules/clustering.html#clustering-performance-evaluation), but again, it is difficult to evaluate the validity of any newly discovered clusters.
# + [markdown] slideshow={"slide_type": "slide"}
# **What if I don't know how many clusters are present in the data?**
#
# An excellent question, as you will almost never know this a priori. Many algorithms, like $k$-means, do require the number of clusters to be specified, but some other methods do not.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Problem 3) DBSCAN
# + [markdown] slideshow={"slide_type": "slide"}
# During the lecture we saw that [`DBSCAN`](https://en.wikipedia.org/wiki/DBSCAN) can be used to identify clusters without the pre-specification of the number of clusters to search for.
#
# In brief, `DBSCAN` requires two parameters: `minPts`, the minimum number of points necessary for a cluster, and $\epsilon$, a distance measure (see the lecture for the full pseudocode).
# + [markdown] slideshow={"slide_type": "slide"}
# The general downsides for DBSCAN are that the results are highly dependent on the two tuning parameters, and that clusters of highly different densities can be difficult to recover (because $\epsilon$ and `minPts` is specified for all clusters.
# + [markdown] slideshow={"slide_type": "slide"}
# In `scitkit-learn` the
# [`DBSCAN`](http://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html#sklearn.cluster.DBSCAN) algorithm is part of the `sklearn.cluster` module. $\epsilon$ and `minPts` are set by `eps` and `min_samples`, respectively.
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 3a**
#
# Cluster the iris data using `DBSCAN`. Use the `scikit-learn` defaults. Plot the results in the sepal width-sepal length plane.
#
# *Note - DBSCAN labels outliers as $-1$, and thus, `plt.scatter()`, will plot all these points as the same color.*
#
# + slideshow={"slide_type": "slide"}
from sklearn.cluster import DBSCAN
dbs = # complete
dbs.fit( # complete
fig, ax = plt.subplots()
ax.scatter( # complete
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 3b**
#
# Adjust the tuning parameters to see how they affect the final clustering results. How does the use of `DBSCAN` compare to $k$-means? Can you obtain 3 clusters with `DBSCAN`? If not, given the knowledge that the iris dataset has 3 classes - does this invalidate `DBSCAN` as a viable algorithm?
# + slideshow={"slide_type": "slide"}
dbs = DBSCAN( # complete
dbs.fit(# complete
fig, ax = plt.subplots()
ax.scatter( # complete
# + [markdown] slideshow={"slide_type": "slide"}
# *write your answer here*
# + [markdown] slideshow={"slide_type": "slide"}
# ## Problem 4) Cluster SDSS Galaxy Data
#
# The following query will select 10k likely galaxies from the SDSS database and return the results of that query into an [`astropy.Table`](http://docs.astropy.org/en/stable/table/) object. (For now, if you are not familiar with the SDSS DB schema, don't worry about this query, just know that it returns a bunch of photometric features.)
# + [markdown] slideshow={"slide_type": "slide"}
# from astroquery.sdss import SDSS # enables direct queries to the SDSS database
#
# GALquery = """SELECT TOP 5000
# p.dered_u - p.dered_g as ug, p.dered_g - p.dered_r as gr,
# p.dered_g - p.dered_i as gi, p.dered_g - p.dered_z as gz,
# p.petroRad_i, p.petroR50_i, p.deVAB_i, p.fracDev_i
# FROM PhotoObjAll AS p JOIN specObjAll s ON s.bestobjid = p.objid
# WHERE p.mode = 1 AND s.sciencePrimary = 1 AND p.clean = 1 AND p.type = 3
# AND p.deVAB_i > -999 AND p.petroRad_i > -999 AND p.petroR50_i > -999 AND p.dered_r < 20
# """
# SDSSgals = SDSS.query_sql(GALquery)
# SDSSgals
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 4a**
#
# Download the [SDSS galaxy data](https://arch.library.northwestern.edu/downloads/7w62f868g?locale=en)
#
# Read in the file `galaxy_clustering.csv`, and convert the data into a feature array `X`.
# + slideshow={"slide_type": "slide"}
# complete
# complete
# complete
# + [markdown] slideshow={"slide_type": "slide"}
# **Problem 4b**
#
# Using the SDSS galaxy data, identify interesting clusters within the data. This question is intentionally very open ended. If you uncover anything especially exciting you'll have a chance to share it with the group. Feel free to use the algorithms discussed above, or any other packages available via `sklearn`. Can you make sense of the clusters in the context of galaxy evolution?
#
# *Hint - don't fret if you know nothing about galaxy evolution (neither do I!). Just take a critical look at the clusters that are identified*
# + slideshow={"slide_type": "slide"}
# complete
# complete
# complete
# complete
# complete
# + slideshow={"slide_type": "slide"}
# complete
# complete
# complete
# complete
# complete
# + [markdown] slideshow={"slide_type": "slide"}
# There are two solutions shown above, one using DBSCAN and one using $k$-means (with $k$ = 3).
#
# Over the years I have learned A LOT about the SDSS targeting algorithm. For these data I think 3 clusters is a fairly reasonable solution. The bluest galaxies are star-forming galaxies at low redshift $z$. The "tight" cluster of red galaxies are passive galaxies at low-$z$. Finally the diffuse cloud at $g - i \approx 3$ are high-$z$ luminous red galaxies (LRGs) that were targeted for BAO measurements. These sources have a wide range of $u-g$ colors because they essentially are not detected in the $u$ band as this is blueward of the Lyman break.
#
# If you did not take the time to plot and examine each of the features in this data set, I suggest you look closely at the `fracDev_i` feature, which provides the most discriminating power for this clustering example.
| Sessions/Session14/Day1/Clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [Dequeue](https://www.tutorialspoint.com/python/python_deque.htm)
#
# A double-ended queue, or deque, has the feature of adding and removing elements from either end. The Deque module is a part of **collections** library. It has the methods for adding and removing elements which can be invoked directly with arguments. In the below program we import the collections module and declare a deque. Without need of any class we use the **in-built implement** these methods directly.
# +
# append, appendleft, pop and popleft methods
import collections
# Create a deque
Dequeue = collections.deque(["Mon","Tue","Wed"])
print (Dequeue)
# Append to the right
print("Adding to the right: ")
Dequeue.append("Thu")
print (Dequeue)
# append to the left
print("Adding to the left: ")
Dequeue.appendleft("Sun")
print (Dequeue)
# Remove from the right
print("Removing from the right: ")
Dequeue.pop()
print (Dequeue)
# Remove from the left
print("Removing from the left: ")
Dequeue.popleft()
print (Dequeue)
# Reverse the dequeue
print("Reversing the deque: ")
Dequeue.reverse()
print (Dequeue)
| fundamentals/Dequeue.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
import bs4
import csv
list=[]
lis_h=['Course_Name','Course_Level','Course_Type','Cource_Description','University_Name','Link']
list.append(lis_h)
# -
res1=requests.get('https://www.westernsydney.edu.au/future/study/courses/undergraduate.html')
soup1=bs4.BeautifulSoup(res1.text,'lxml')
s1=soup1.select('.list--linked')
for i in range(len(s1)):
for j in s1[i].select('a'):
print(j.text)
print(j.get('href'))
res2=requests.get('https://www.westernsydney.edu.au/future/study/courses/postgraduate.html')
soup2=bs4.BeautifulSoup(res2.text,'lxml')
s2=soup2.select('article')
for i in range(len(s2)-3):
print(s2[i].h3.text)
print(s2[i].a.get('href'))
# +
import requests
import bs4
import csv
list=[]
lis_h=['Course_Name','Course_Level','Course_Type','Cource_Description','University_Name','Link']
list.append(lis_h)
res1=requests.get('https://www.westernsydney.edu.au/future/study/courses/undergraduate.html')
soup1=bs4.BeautifulSoup(res1.text,'lxml')
s1=soup1.select('.list--linked')
for i in range(len(s1)):
for j in s1[i].select('a'):
list.append([j.text,'Undergraduate','','','Western Sydney University',j.get('href')])
res2=requests.get('https://www.westernsydney.edu.au/future/study/courses/postgraduate.html')
soup2=bs4.BeautifulSoup(res2.text,'lxml')
s2=soup2.select('article')
for i in range(len(s2)-3):
list.append([s2[i].h3.text,'Postgraduate','','','Western Sydney University',s2[i].a.get('href')])
list
# -
len(list)
with open('Western_Sydney_University.csv','w',newline="") as file:
write=csv.writer(file)
for row in list:
write.writerow(row)
| Australian_Universities/Western_Sydney_University/Western_Sydney_University.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Section 1 Business Understanding
# The purpose of this project is to use Stack Overflow data from 2020 to better understand:
#
# #### Question 1: Which programming languages are the most desired next year (Which language to learn in 2021)?
# #### Question 2: Which programming languages are the most popular among developers in 2020?
# #### Question 3: How much more do developers earn for mastering certain programming languages?
# importing the relevant libararies
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# # Secion 2 Data Understanding
# The main data source is the survey results from Stack Overflow which is an open source and can be access through the link in the Readme file.
# ### Gather
#importing the data
df = pd.read_csv("survey_results_public.csv")
df.head()
# ### Assess
#getting an overview of the numerical data in the dataset
df.describe()
# # Section 3 Data Preparation
# As there are columns which are mixed of string and float datatypes, this first needs to be imputed using the imputation function defined above. This enables us to have more quantitative data from the dataset. Furthermore, there are missing values in some of the relevant variables which also need to be preprocessed before we can continue with the analysis.
def impute_func(col):
'''
INPUT:
col - column name that needs to be imputed.
OUTPUT:
columns values that are the string datatypes are imputed with the edge values in the strings.
Imputes the input columns with the mean of the column values for the relevant variables. As the edge cases in these categories
are quite small compared to the overrall sample size, the edge numbers are used as imputation numbers. For example,
'Less than 1 year' is replaced with nubmer 1.
'''
if col == 'YearsCode' or col == 'YearsCodePro':
df[col] = np.where((df[col] == 'Less than 1 year'),1,df[col])
df[col] = np.where((df[col] == 'More than 50 years'),50,df[col])
else:
df[col] = np.where((df[col] == 'Younger than 5 years'),5,df[col])
df[col] = np.where((df[col] == 'Older than 85'),85,df[col])
# ### Clean
#imputing the YearsCode,Age1stCode,YearsCodePro columns as the values are mixed of float and string data types, the impute_func
#cleans the columns so they can be used for analysis later
impute_func('YearsCode')
impute_func('Age1stCode')
impute_func('YearsCodePro')
# ### Analyze
##dealing with null values and removing rows where there are no values for the compensation columns
##removing any rows without any compensation value because we do not want to assume the mean for the column as that
##people do not have compensation such as students should not have compensation at all
df.dropna(axis=0,subset=['ConvertedComp'],how='any',inplace = True)
df = df.astype({"YearsCode": float, "Age1stCode": float, "YearsCodePro": float}).drop('CompTotal',axis=1)
df_numerical = df.select_dtypes(include=['float64']).copy()
df_numerical.isnull().sum()
##Given that the number of the missing values in the dataset is relatively small compared to the overall sample size
##and the rows left are only those who are professionals because we cleaned the data without compensation in the previous cell.
##It is intuitive to replace the null values in the numerical columns with the mean of the columns
for col in df_numerical.columns:
df_numerical[col].fillna(df_numerical[col].mean(),inplace=True)
# ## Section 4 Modelling
# The second part of this project focuses on using a simple linear regression model to evaluate the third question that needs to be addressed which is that how much more can developers earn for mastering certain programming languages. The data will be fitted on the model and used to derive the coefficients to draw the conclusions.
def get_languages(df):
'''
INPUT:
df - the original dataframe is passed in as input.
OUTPUT:
Outputs all the dummy variables for all the programming languages in the answers.
This function takes in the original dataframe and calculates 1 if a developer konws a particular programming language
and 0 otherwise for that language. Finally the dataframe with dummy variables is returned as output.
'''
df = df.copy()
df.dropna(axis=0,subset=['LanguageWorkedWith'],how='any',inplace = True)
language_ranks = df['LanguageWorkedWith'].str.split(';', expand=True).stack().value_counts()
languages = list(language_ranks.index)
df['NumberofLanguages'] = df['LanguageWorkedWith'].str.split(';').apply(len)
for language in languages:
df[f'Knows_{language}'] = np.where(df['LanguageWorkedWith'].str.find(language) != -1,1,0)
cols = [col for col in df.columns if 'Knows' in col]
cols.extend(['NumberofLanguages','ConvertedComp'])
return df[cols]
def coef_weights(coefficients, X_train):
'''
INPUT:
coefficients - the coefficients of the linear model
X_train - the training data, so the column names can be used
OUTPUT:
coefs_df - a dataframe holding the coefficient, estimate, and abs(estimate)
Provides a dataframe that can be used to understand the most influential coefficients
in a linear model by providing the coefficient estimates along with the name of the
variable attached to the coefficient.
'''
coefs_df = pd.DataFrame()
coefs_df['est_int'] = X_train.columns
coefs_df['coefs'] = lm_model.coef_
coefs_df['abs_coefs'] = np.abs(lm_model.coef_)
coefs_df = coefs_df.sort_values('abs_coefs', ascending=False)
return coefs_df
# ### Model
#Dividing the dataset into test and training sets, fitting the model and finally predict the results
df_final = get_languages(df)
X = df_final.drop('ConvertedComp',axis=1)
y = df_final['ConvertedComp']
lm_model = LinearRegression(normalize=True)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3, random_state=42)
lm_model.fit(X_train,y_train)
y_preds = lm_model.predict(X_test)
# # Section 5 Evaluation
# The graphs for the most populuar languages are plotted using the function defined previously and lastly the cofficients of the variables are ranked based on the size of their absolute values to give an interpretation of their impact on the salary level of the developers.
def make_graphs(col_name):
'''
INPUT:
col_name - column name that is used to produce the graphs.
OUTPUT:
Produces graphs based on the input column.
The function creates the horizontal bar chart for the top ranked programming languages based on their popularity among developers
'''
language_ranks = df[col_name].str.split(';', expand=True).stack().value_counts()
df_language_ranks = pd.DataFrame({'Language':language_ranks.index, 'Frequency':language_ranks.values}).sort_values('Frequency')
ax = df_language_ranks.plot(x='Language',y='Frequency',kind='barh',figsize=(8, 10), color='#86bf91', zorder=2, width=0.85)
ax.set_xlabel("Popularity", labelpad=20, weight='bold', size=12)
ax.set_ylabel("Languages", labelpad=20, weight='bold', size=12)
if col_name == 'LanguageDesireNextYear':
ax.set_title("Most Desired Programming Languages in 2021", weight='bold', size=14)
else:
ax.set_title("Most Popular Programming Languages in 2020", weight='bold', size=14)
ax.get_legend().remove()
fig = ax.get_figure()
fig.savefig(f'{col_name}.png');
# ### Visualize
# #### Question 1: Which programming languages are the most desired next year (Which language to learn in 2021)?
#plotting the graph for the most desired programming languages in 2021
make_graphs('LanguageDesireNextYear')
# #### Question 2: Which programming languages are the most popular among developers in 2020?
#plotting the graph for the most desired programming languages in 2020
make_graphs('LanguageWorkedWith')
# #### Question 3: How much more do developers earn for mastering certain programming languages?
#Use the function to plot top variables based on coefficients
coef_df = coef_weights(lm_model.coef_, X_train)
#A quick look at the top results
coef_df.head(5).reset_index().drop('index',axis=1)
# # Section 6 Deployment
# This notebook serves as the technical foundation for the Medium post that gives a high-level explanation of the findings. The Medium post can be accessed via the link in the Readme section.
| Data Science Nanodegree Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import matplotlib.pyplot as plt
from pymedphys.mudensity import single_mlc_pair
# from pymedphys.mudensity import (
# _determine_reference_grid_position, _determine_leaf_centres
# )
# +
# def single_mlc_pair(left_mlc, right_mlc, grid_resolution, time_steps=50):
# leaf_pair_widths = [grid_resolution]
# jaw = np.array([
# [grid_resolution/2, grid_resolution/2],
# [grid_resolution/2, grid_resolution/2]
# ])
# mlc = np.array([
# [
# [-left_mlc[0], right_mlc[0]],
# ],
# [
# [-left_mlc[1], right_mlc[1]],
# ]
# ])
# grid, mu_density = calc_single_control_point(
# mlc, jaw, leaf_pair_widths=leaf_pair_widths,
# grid_resolution=grid_resolution, time_steps=time_steps
# )
# return grid['mlc'], mu_density[0, :]
# -
plt.plot(*single_mlc_pair((-1, -1), (2.7, 2.7), 1), '-o')
single_mlc_pair((-1, -1), (2.7, 2.7), 1)
# +
leaf_pair_widths = [2, 2]
mlc = np.array([
[
[1, 1],
[2, 2],
],
[
[2, 2],
[3, 3],
]
])
jaw = np.array([
[1.5, 1.2],
[1.5, 1.2]
])
_, mu_density = calc_single_control_point(
mlc, jaw, leaf_pair_widths=leaf_pair_widths
)
# -
np.round(mu_density, 2)
| examples/archive/mudensity/07_unit_testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Final Lab
#
# *<NAME>*
#
# Esta nb corresponde al Experimento 1 sin la ejecucion, solo para valuacion del accuracy en TEST
#
# ## Main task
#
# In this notebook, we will apply transfer learning techniques to finetune the [MobileNet](https://arxiv.org/pdf/1704.04861.pdf) CNN on [Cifar-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset.
#
# ## Procedures
#
# In general, the main steps that we will follow are:
#
# 1. Load data, analyze and split in *training*/*validation*/*testing* sets.
# 2. Load CNN and analyze architecture.
# 3. Adapt this CNN to our problem.
# 4. Setup data augmentation techniques.
# 5. Add some keras callbacks.
# 6. Setup optimization algorithm with their hyperparameters.
# 7. Train model!
# 8. Choose best model/snapshot.
# 9. Evaluate final model on the *testing* set.
#
# +
# load libs
import os
import matplotlib.pyplot as plt
from IPython.display import SVG
# https://keras.io/applications/#documentation-for-individual-models
from keras.applications.mobilenet import MobileNet
from keras.datasets import cifar10
from keras.models import Model
from keras.utils.vis_utils import model_to_dot
from keras.layers import Dense, GlobalAveragePooling2D,Dropout
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import plot_model, to_categorical
from sklearn.model_selection import train_test_split
import cv2
import numpy as np
#
from genlib import pd,plot_confusion_matrix
from sklearn.metrics import confusion_matrix,classification_report
#
import tensorflow as tf
# -
# #### cuda
# +
cuda_flag=False
if cuda_flag:
# Setup one GPU for tensorflow (don't be greedy).
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# The GPU id to use, "0", "1", etc.
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Limit tensorflow gpu usage.
# Maybe you should comment this lines if you run tensorflow on CPU.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = 0.3
sess = tf.Session(config=config)
# -
# ## 1. Load data, analyze and split in *training*/*validation*/*testing* sets
# +
# Cifar-10 class names
# We will create a dictionary for each type of label
# This is a mapping from the int class name to
# their corresponding string class name
LABELS = {
0: "airplane",
1: "automobile",
2: "bird",
3: "cat",
4: "deer",
5: "dog",
6: "frog",
7: "horse",
8: "ship",
9: "truck"
}
# Load dataset from keras
(x_train_data, y_train_data), (x_test_data, y_test_data) = cifar10.load_data()
############
# [COMPLETE]
# Add some prints here to see the loaded data dimensions
############
print("Cifar-10 x_train shape: {}".format(x_train_data.shape))
print("Cifar-10 y_train shape: {}".format(y_train_data.shape))
print("Cifar-10 x_test shape: {}".format(x_test_data.shape))
print("Cifar-10 y_test shape: {}".format(y_test_data.shape))
# -
# from https://www.cs.toronto.edu/~kriz/cifar.html
# The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images.
# The classes are completely mutually exclusive. There is no overlap between automobiles and trucks. "Automobile" includes sedans, SUVs, things of that sort. "Truck" includes only big trucks. Neither includes pickup trucks.
# Some constants
IMG_ROWS = 32
IMG_COLS = 32
NUM_CLASSES = 10
RANDOM_STATE = 2018
############
# [COMPLETE]
# Analyze the amount of images for each class
# Plot some images to explore how they look
############
from genlib import get_classes_distribution,plot_label_per_class
for y,yt in zip([y_train_data.flatten(),y_test_data.flatten()],['Train','Test']):
print('{:>15s}'.format(yt))
get_classes_distribution(y,LABELS)
plot_label_per_class(y,LABELS)
# Todo parece ir de acuerdo a la documentación. Veamos las imagenes,
from genlib import sample_images_data,plot_sample_images
for xy,yt in zip([(x_train_data,y_train_data.flatten()),(x_test_data,y_test_data.flatten())],['Train','Test']):
print('{:>15s}'.format(yt))
train_sample_images, train_sample_labels = sample_images_data(*xy,LABELS)
plot_sample_images(train_sample_images, train_sample_labels,LABELS)
# +
############
# [COMPLETE]
# Split training set in train/val sets
# Use the sampling method that you want
############
#init seed
np.random.seed(seed=RANDOM_STATE)
full_set_flag=True # True: uses all images / False only a subset specified by TRAIN Samples and Val Frac
VAL_FRAC=0.2
TRAIN_SIZE_BFV=x_train_data.shape[0]
TRAIN_FRAC=(1-VAL_FRAC)
# calc
TRAIN_SAMPLES_FULL=int(TRAIN_FRAC*TRAIN_SIZE_BFV) # if full_set_flag==True
TRAIN_SAMPLES_RED=20000 # if full_set_flag==False
VAL_SAMPLES_RED=int(VAL_FRAC*TRAIN_SAMPLES_RED) # if full_set_flag==False
if full_set_flag:
# Esta forma parece servir si barremos todo el set sino...
#
# Get Index
train_idxs = np.random.choice(np.arange(TRAIN_SIZE_BFV), size=TRAIN_SAMPLES_FULL, replace=False)
val_idx=np.array([x for x in np.arange(TRAIN_SIZE_BFV) if x not in train_idxs])
else:
train_idxs = np.random.choice(np.arange(TRAIN_SIZE_BFV), size=TRAIN_SAMPLES_RED, replace=False)
val_idx=np.random.choice(train_idxs, size=VAL_SAMPLES_RED, replace=False)
# Split
x_val_data = x_train_data[val_idx, :, :, :]
y_val_data = y_train_data[val_idx]
x_train_data = x_train_data[train_idxs, :, :, :]
y_train_data = y_train_data[train_idxs]
####
# -
####
print("Cifar-10 x_train shape: {}".format(x_train_data.shape))
print("Cifar-10 y_train shape: {}".format(y_train_data.shape))
print("Cifar-10 x_val shape: {}".format(x_val_data.shape))
print("Cifar-10 y_val shape: {}".format(y_val_data.shape))
print("Cifar-10 x_test shape: {}".format(x_test_data.shape))
print("Cifar-10 y_test shape: {}".format(y_test_data.shape))
# Veamos si quedaron balanceados Train y Validation
for y,yt in zip([y_train_data.flatten(),y_val_data.flatten()],['Train','Validation']):
print('{:>15s}'.format(yt))
get_classes_distribution(y,LABELS)
plot_label_per_class(y,LABELS)
# +
# In order to use the MobileNet CNN pre-trained on imagenet, we have
# to resize our images to have one of the following static square shape: [(128, 128),
# (160, 160), (192, 192), or (224, 224)].
# If we try to resize all the dataset this will not fit on memory, so we have to save all
# the images to disk, and then when loading those images, our datagenerator will resize them
# to the desired shape on-the-fly.
############
# [COMPLETE]
# Use the above function to save all your data, e.g.:
# save_to_disk(x_train, y_train, 'train', 'cifar10_images')
# save_to_disk(x_val, y_val, 'val', 'cifar10_images')
# save_to_disk(x_test, y_test, 'test', 'cifar10_images')
############
save_image_flag=False # To avoid saving images every time!!!
if save_image_flag:
from genlib import save_to_disk
save_to_disk(x_train_data, y_train_data, 'train', output_dir='cifar10_images')
save_to_disk(x_val_data, y_val_data, 'val', output_dir='cifar10_images')
save_to_disk(x_test_data, y_test_data, 'test', output_dir='cifar10_images')
# -
# ## 2. Load CNN and analyze architecture
#Model
NO_EPOCHS = 25
BATCH_SIZE = 32
NET_IMG_ROWS = 128
NET_IMG_COLS = 128
# +
############
# [COMPLETE]
# Use the MobileNet class from Keras to load your base model, pre-trained on imagenet.
# We wan't to load the pre-trained weights, but without the classification layer.
# Check the notebook '3_transfer-learning' or https://keras.io/applications/#mobilenet to get more
# info about how to load this network properly.
############
#Note that this model only supports the data format 'channels_last' (height, width, channels).
#The default input size for this model is 224x224.
base_model = MobileNet(input_shape=(NET_IMG_ROWS, NET_IMG_COLS, 3), # Input image size
weights='imagenet', # Use imagenet pre-trained weights
include_top=False, # Drop classification layer
pooling='avg') # Global AVG pooling for the
# output feature vector
# -
# ## 3. Adapt this CNN to our problem
# +
############
# [COMPLETE]
# Having the CNN loaded, now we have to add some layers to adapt this network to our
# classification problem.
# We can choose to finetune just the new added layers, some particular layers or all the layer of the
# model. Play with different settings and compare the results.
############
# get the output feature vector from the base model
x = base_model.output
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# Add Drop Out Layer
x=Dropout(0.5)(x)
# and a logistic layer
predictions = Dense(NUM_CLASSES, activation='softmax')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# -
# ## 4. Setup data augmentation techniques
# +
############
# [COMPLETE]
# Use data augmentation to train your model.
# Use the Keras ImageDataGenerator class for this porpouse.
# Note: Given that we want to load our images from disk, instead of using
# ImageDataGenerator.flow method, we have to use ImageDataGenerator.flow_from_directory
# method in the following way:
# generator_train = dataget_train.flow_from_directory('resized_images/train',
# target_size=(128, 128), batch_size=32)
# generator_val = dataget_train.flow_from_directory('resized_images/val',
# target_size=(128, 128), batch_size=32)
# Note that we have to resize our images to finetune the MobileNet CNN, this is done using
# the target_size argument in flow_from_directory. Remember to set the target_size to one of
# the valid listed here: [(128, 128), (160, 160), (192, 192), or (224, 224)].
############
data_get=ImageDataGenerator()
test_generator = data_get.flow_from_directory(
directory='cifar10_images/test/',
target_size=(128, 128),
batch_size=1,
class_mode=None,
shuffle=False,
seed=42
)
# -
# ## 5 to 7 NOT USED
# ## Comentarios
# Se corrieron los siguientes casos (la descripcion no es extensiva sino orientativa):
#
# - Experiment001 - Full Set (Train_frac 0.8 - val_frac 0.2) - categorical_cross_entropy - dropout 0.5 - fit on layer >87
# - Experiment002 - Full Set (Train_frac 0.8 - val_frac 0.2) - SGD - dropout 0.25 - fit on layer >87
# - Experiment003 - Reduced Set (Train_samp 20000 - val_frac 0.2) - categorical_cross_entropy - dropout 0.5 - fit on layer >87
# - Experiment004 - Reduced Set (Train_samp 20000 - val_frac 0.2) - categorical_cross_entropy - dropout 0.1 fit on layer >85
# - Experiment005 - Reduced Set (Train_samp 20000 - val_frac 0.2) - categorical_cross_entropy - dropout 0.75 fit on layer >85
# - Experiment006 - Full Set (Train_frac 0.6 - val_frac 0.4) - categorical_cross_entropy - dropout 0.75 - fit on layer >87
# - Experiment007 - Full Set (Train_frac 0.6 - val_frac 0.4) - SGD - dropout 0.25 - fit on layer >85
#
# Cada caso tiene su ipynb asociada, junto a lo registrado por los callbacks TensorBoard y ModelCheckpoint.
#
# La eleccion se baso en los logs de Tensorboard. En funcion de estos se opto por el Experiment001.
# -Valor de accuracy en (train,val)
# -Valor de loss en (train,val) (hasta la etapa 6 - si se observa los logs)
#
# Algunos comentarios en orden, si bien no se sumaron mas capas que 3 (tres), se probaron
# diferentes valores de configuracion para DropOut y Balance de Train y Validation. Asimismo como capas entrenables.
# Da la impresion que algo estoy realizando en forma no correcta puesto que los valores de accuracy en validacion se mantienen estables.
# ## 8. Choose best model/snapshot
############
# [COMPLETE]
# Analyze and compare your results. Choose the best model and snapshot,
# justify your election.
############
#https://machinelearningmastery.com/check-point-deep-learning-models-keras/
model.load_weights('experiment_001/weights.06-2.85.hdf5')
############
# [COMPLETE]
# Compile your model.
############
from keras.losses import categorical_crossentropy
model.compile(loss=categorical_crossentropy,
optimizer='adam',
metrics=['accuracy'])
# ## 9. Evaluate final model on the *testing* set
# +
############
# [COMPLETE]
# Evaluate your model on the testing set.
############
#https://medium.com/@vijayabhaskar96/tutorial-image-classification-with-keras-flow-from-directory-and-generators-95f75ebe5720
test_generator.reset()
pred=model.predict_generator(test_generator,verbose=1)
# -
predicted_class_indices=np.argmax(pred,axis=1)
labels = (test_generator.class_indices)
labels = dict((v,k) for k,v in labels.items())
predictions = [labels[k] for k in predicted_class_indices]
filenames=test_generator.filenames
results=pd.DataFrame({"Filename":filenames,
"predictions":predictions})
results['true_val']=results.Filename.apply(lambda x: x.split('/')[0])
results.to_csv("results_MobileNet.csv",index=False)
results.head(5)
# #### Results
#Veamos la matriz de confusion para las clases
plt.figure(figsize=(15,10))
plot_confusion_matrix(confusion_matrix(results.true_val,results.predictions),classes=LABELS.values())
# Y el reporte de clasificacion
print(classification_report(results.true_val,results.predictions,target_names=LABELS.values()),end='\n\n')
# Tal cual observamos en validacion, los resultados no son buenos en general, con muchos errores entre clases.
| vpc_2018/lab/Lab_VpC_FelixRojoLapalma_TEST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Maximin LinUCB
from lrcb.bandits.multirep_finite import FiniteMultiBandit, hls_rank_combined
from lrcb.representations.finite_representations import make_random_rep, hls_rank, rank, derank_hls, make_hls_rank
from lrcb.representations.finite_representations import is_cmb, is_hls, spans, hls_lambda, derank_cmb, LinearRepresentation
from lrcb.representations.finite_representations import reduce_dim, fuse_columns
from lrcb.algos.oful import oful
from lrcb.algos.lr_oful import select_oful
from lrcb.algos.maximin_oful import maximin_oful
import math
import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import lrcb.visualization.notebook_utils as nu
from jupyterthemes import jtplot
#jtplot.style()
plt.rcParams['figure.figsize'] = [12, 9]
# ## Common settings
nc = 20
na = 4
dim = 5
noise = 0.1
seeds = range(5)
# Generate original HLS representation
#r0 = make_random_rep(nc, na, dim, normalize=True)
r0 = LinearRepresentation(np.load('../logs/basic_features.npy'), np.load('../logs/basic_param.npy'))
assert is_hls(r0)
# OFUL settings
iterations = 10000
reg = 1
delta = 0.01
param_bound = 1. #Guaranteed by make_random_rep(..., normalize=True)
# ## Basic Problem
# 5 equivalent representations of dimension 5, of increasing hls rank, only one is HLS (the last)
# +
reps = [derank_hls(r0, i, transform=True, normalize=True) for i in range(1, dim)] #Random transformation preserving equivalence
reps.append(r0)
for i, r in enumerate(reps[:-1]):
assert r == reps[-1] #All representations are equivalent
assert not np.allclose(r._param, reps[-1]._param) #But with different parameters!
assert not is_hls(r) #Only the original one is HLS
assert spans(r) #All representations still span R^d
#Buldi multi-representation problem
problem = FiniteMultiBandit(nc, na, reps)
for r in reps:
print('%d/%d' % (hls_rank(r), rank(r)))
# -
# OFUL with different representations
for i in range(len(reps)):
problem.select_rep(i)
name = 'basic_oful(%d)' % hls_rank(reps[i])
for seed in seeds:
oful(problem, iterations, reg, noise, delta, param_bound, seed=seed, verbose=False, logname=name)
ax = nu.compare('../logs', ['basic_oful(%d)' % hls_rank(r) for r in reps], 'cumregret', seeds)
ax.set_ylabel('Regret')
oracle = 'basic_oful(5)'
dummy = 'basic_oful(4)'
# ## Comparison of different strategies
# For model-selection algorithm, start from random active representation and divide confidence by n. reps.
problem.reset()
delta2 = delta / len(reps)
# **lambdamin(u)**: select representation with largest minimum eigenvalue of optimal features according to uniform distribution and estimated optimal actions
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='maxlambdamin', uniform=True,
seed=seed, verbose=False, logname='basic_lambdamin(u)')
# **lambdamin**: same with empirical context distribution
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='maxlambdamin', uniform=False,
seed=seed, verbose=False, logname='basic_lambdamin')
# **design**: select representation with largest minimum eigenvalue of design matrix
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='design', uniform=False,
seed=seed, verbose=False, logname='basic_design')
# **minbonus**: select representation with minimum optimistic bonus
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='minbonus', uniform=False,
seed=seed, verbose=False, logname='basic_minbonus')
# **maximin**: use the tightest UCB for each arm
for seed in seeds:
maximin_oful(problem, iterations, reg, noise, delta2, param_bound,
seed=seed, verbose=False, logname='basic_maximin')
nu.compare('../logs', [oracle, dummy, 'basic_maximin'],
'cumregret', seeds)
# ## Effect of the Number of Representations
values = [2, 4, 8, 16, 32]
for n_reps in values:
reps = [r0]
for _ in range(n_reps - 1):
reps.append(derank_hls(r0, 1, transform=True, normalize=True))
problem = FiniteMultiBandit(nc, na, reps)
problem.reset()
delta2 = delta / n_reps
for seed in seeds:
maximin_oful(problem, iterations, reg, noise, delta2, param_bound,
seed=seed, verbose=False, logname='nreps(%d)_maximin' % n_reps)
nu.compare('../logs', ['nreps(%d)_maximin' % i for i in values], 'cumregret', seeds)
# ## Varying Dimension
# +
values = range(2, dim)
reps = []
for d in values:
r = reduce_dim(r0, d, transform=True, normalize=True)
r = derank_hls(r, 1)
assert r==r0
assert r.dim == d
reps.append(r)
r = derank_hls(r0, 1, transform=True, normalize=True)
reps.append(r)
reps.append(r0)
for r in reps:
print('%d: %d' % (r.dim, hls_rank(r)))
problem = FiniteMultiBandit(nc, na, reps)
# -
for i in range(len(reps)-1):
problem.select_rep(i)
for seed in seeds:
oful(problem, iterations, reg, noise, delta, param_bound, seed=seed, verbose=False,
logname='dims_oful(%d)' % reps[i].dim)
nu.compare('../logs', ['dims_oful(%d)' % d for d in range(2, dim+1)] + ['basic_oful(5)'], 'cumregret', seeds)
ranking = nu.tournament('../logs', ['dims_oful(%d)' % d for d in range(2, dim+1)], 'cumregret', seeds)
best = ranking[0][0]
ranking
# +
problem.reset()
delta2 = delta / len(reps)
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='maxlambdamin', uniform=True,
seed=seed, verbose=False, logname='dims_lambdamin(u)')
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='maxlambdamin', uniform=False,
seed=seed, verbose=False, logname='dims_lambdamin')
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='design', uniform=False,
seed=seed, verbose=False, logname='dims_design')
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='minbonus', uniform=False,
seed=seed, verbose=False, logname='dims_minbonus')
for seed in seeds:
maximin_oful(problem, iterations, reg, noise, delta2, param_bound,
seed=seed, verbose=False, logname='dims_maximin')
# -
nu.compare('../logs', [best, 'dims_oful(5)', 'dims_lambdamin', 'dims_design', 'dims_maximin'], 'cumregret', seeds)
nu.compare('../logs', [best, 'dims_design', 'dims_maximin'], 'cumregret', seeds)
# ## Non-Uniform Contexts
# Basic problem, but with non-uniform multinomial context distribution
# +
reps = [derank_hls(r0, i, transform=True, normalize=True) for i in range(1, dim)] #Random transformation preserving equivalence
reps.append(r0)
for i, r in enumerate(reps[:-1]):
assert r == reps[-1] #All representations are equivalent
assert not np.allclose(r._param, reps[-1]._param) #But with different parameters!
assert not is_hls(r) #Only the original one is HLS
assert spans(r) #All representations still span R^d
#Buldi multi-representation problem
cprobs = np.arange(1, nc+1)**2#np.random.uniform(size=nc)
cprobs = cprobs / np.sum(cprobs)
problem = FiniteMultiBandit(nc, na, reps, context_probs=cprobs)
# -
plt.bar(range(nc), cprobs)
hls_lambda(r0)
hls_lambda(r0, cprobs)
# +
problem.select_rep(-1)
for seed in seeds:
oful(problem, iterations, reg, noise, delta, param_bound, seed=seed, verbose=False, logname='context_oracle')
problem.select_rep(-2)
for seed in seeds:
oful(problem, iterations, reg, noise, delta, param_bound, seed=seed, verbose=False, logname='context_dummy')
problem.reset()
delta2 = delta / len(reps)
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='maxlambdamin', uniform=True,
seed=seed, verbose=False, logname='context_lambdamin(u)')
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='maxlambdamin', uniform=False,
seed=seed, verbose=False, logname='context_lambdamin')
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='design', uniform=False,
seed=seed, verbose=False, logname='context_design')
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='minbonus', uniform=False,
seed=seed, verbose=False, logname='context_minbonus')
for seed in seeds:
maximin_oful(problem, iterations, reg, noise, delta2, param_bound,
seed=seed, verbose=False, logname='context_maximin')
# -
nu.compare('../logs', ['context_oracle', 'context_dummy', 'context_lambdamin(u)', 'context_lambdamin',
'context_design', 'context_maximin'], 'cumregret', seeds)
# ## Composing Representations
reps = [fuse_columns(r0, [0,1]),
fuse_columns(r0, [2,4]),
fuse_columns(r0, [4,0]),
fuse_columns(r0, [4,1]),
fuse_columns(r0, [0,2])]
for r in reps:
print(hls_rank(r))
problem = FiniteMultiBandit(nc, na, reps)
hls_rank_combined(problem)
# +
for i in range(len(reps)):
problem.select_rep(i)
for seed in seeds:
oful(problem, iterations, reg, noise, delta, param_bound, seed=seed, verbose=False,
logname='compose_oful(%i)' % i)
problem.reset()
delta2 = delta / len(reps)
"""
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='maxlambdamin', uniform=True,
seed=seed, verbose=False, logname='compose_lambdamin(u)')
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='maxlambdamin', uniform=False,
seed=seed, verbose=False, logname='compose_lambdamin')
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='design', uniform=False,
seed=seed, verbose=False, logname='compose_design')
for seed in seeds:
select_oful(problem, iterations, reg, noise, delta2, param_bound,
rule='minbonus', uniform=False,
seed=seed, verbose=False, logname='compose_minbonus')
"""
for seed in seeds:
maximin_oful(problem, iterations, reg, noise, delta2, param_bound,
seed=seed, verbose=False, logname='compose_maximin')
# -
nu.compare('../logs', ['compose_oful(%i)' % i for i in range(len(reps))] + ['compose_maximin'], 'cumregret', seeds)
| notebooks/Maximin LinUCB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # K Nearest Neighbors Project
#
# Welcome to the KNN Project! This will be a simple project very similar to the lecture, except you'll be given another data set. Go ahead and just follow the directions below.
# ## Import Libraries
# **Import pandas,seaborn, and the usual libraries.**
# ## Get the Data
# ** Read the 'KNN_Project_Data csv file into a dataframe **
# **Check the head of the dataframe.**
# # EDA
#
# Since this data is artificial, we'll just do a large pairplot with seaborn.
#
# **Use seaborn on the dataframe to create a pairplot with the hue indicated by the TARGET CLASS column.**
# # Standardize the Variables
#
# Time to standardize the variables.
#
# ** Import StandardScaler from Scikit learn.**
# ** Create a StandardScaler() object called scaler.**
# ** Fit scaler to the features.**
# **Use the .transform() method to transform the features to a scaled version.**
# **Convert the scaled features to a dataframe and check the head of this dataframe to make sure the scaling worked.**
# # Train Test Split
#
# **Use train_test_split to split your data into a training set and a testing set.**
# # Using KNN
#
# **Import KNeighborsClassifier from scikit learn.**
# **Create a KNN model instance with n_neighbors=1**
# **Fit this KNN model to the training data.**
# # Predictions and Evaluations
# Let's evaluate our KNN model!
# **Use the predict method to predict values using your KNN model and X_test.**
# ** Create a confusion matrix and classification report.**
# # Choosing a K Value
# Let's go ahead and use the elbow method to pick a good K Value!
#
# ** Create a for loop that trains various KNN models with different k values, then keep track of the error_rate for each of these models with a list. Refer to the lecture if you are confused on this step.**
# **Now create the following plot using the information from your for loop.**
# ## Retrain with new K Value
#
# **Retrain your model with the best K value (up to you to decide what you want) and re-do the classification report and the confusion matrix.**
# # Great Job!
#
#
#
| Scikit/03-K-Nearest-Neighbors/02-K Nearest Neighbors Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.5 64-bit (''pytorchgpu'': conda)'
# language: python
# name: python37564bitpytorchgpucondac067e2ac54c5476d98dad29e1b14e4ac
# ---
# # WeightViz Examples
# %matplotlib inline
from Visualizer.Brain import Brain
from Libraries.Enums import NNLibs as Libs
# 1. Create Brain object (Specify which library you want to use as a parameter)
# 2. Use brain's visualize() function
#
# Parameters for visualize():
# * weights: String, path for the model
# * load_from_path: Boolean
# * loss_: Loss
# * n_iter_: Number of iteration(epoch)
# * interval: Interval to pause plotting for live plotting
# ## PyTorch
# + tags=[]
brain = Brain(nn_lib=Libs.Torch)
brain.visualize("Models/sample_5", load_from_path=True)
# -
# ## PyTorch - Live Visualisation
# ### To visualize while training, we use visualise function in the training loop
# + tags=[]
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
input_size = 32
hidden_size = 32
num_classes = 1
num_epochs = 500
learning_rate = 0.001
# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.sigmoid = nn.Sigmoid()
self.fc3 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
out = self.sigmoid(out)
out = self.fc3(out)
return out
model = NeuralNet(input_size, hidden_size, num_classes).to(device)
# Loss and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
X_train = torch.randn(100,32).to(device)
Y_train = torch.randn(100,1).to(device)
# Initate visualizer
brain = Brain(nn_lib=Libs.Torch)
# Train the model
for epoch in range(num_epochs):
#print("epoch : ",epoch)
# Forward pass
outputs = model(X_train)
loss = criterion(outputs, Y_train)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 10 == 0:
# Plot Brain
brain.visualize(model.state_dict(),loss_=loss,n_iter_=epoch,interval=1)
# -
# ## SimplyNet
# +
'''
brain = Brain(nn_lib=Libs.SimplyNet)
brain.visualize("Models/random_weight_2_4_4_1")
'''
# -
# ## SKLearn
# + tags=[]
import numpy as np
from sklearn.neural_network import MLPRegressor as MLP
X_train = np.random.rand(2,2)
y_train = np.random.rand(2,)
my_hidden_layer_sizes = (4,4)
XOR_MLP = MLP(
activation='tanh',
alpha=0.99,
batch_size='auto',
beta_1=0.9,
beta_2=0.999,
early_stopping=False,
epsilon=1e-08,
hidden_layer_sizes= my_hidden_layer_sizes,
learning_rate='constant',
learning_rate_init = 0.1,
max_iter=5000,
momentum=0.5,
nesterovs_momentum=True,
power_t=0.5,
random_state=0,
shuffle=True,
solver='sgd',
tol=0.0001,
validation_fraction=0.1,
verbose=False,
warm_start=False)
XOR_MLP.fit(X_train,y_train)
# Read layer weights and bias weights together
weights = XOR_MLP.coefs_
biases_weights = XOR_MLP.intercepts_
brain_MLP = Brain(Libs.Sklearn)
brain_MLP.visualize([weights,biases_weights],load_from_path=True)
# -
| Examples/example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
def time_converter(year,idx,total_timestep=35040):
"""
Input
-------------------
year: int, year to be converted
idx: int, index of timestep in the specific year, SHOULD start from zero
total_timestep: total timestep of the specific year
Output
-------------------
pandas Timestamp of the time corresponding to the idx
"""
index = pd.date_range(start='1/1/{0}'.format(year), end='1/1/{0}'.format(year+1), periods=total_timestep+1)
time = index[idx]
return time
# +
def price_generator(year,total_timestep=35040):
index = pd.date_range(start='1/1/{0}'.format(year), end='1/1/{0}'.format(year+1), periods=total_timestep+1)
e_price_df = pd.DataFrame(index, columns=['date_time'])
e_price_df['e_price'] = 0
for idx in range(total_timestep):
time = index[idx]
if 0 <= time.hour < 14:
e_price = 8
elif 14 <= time.hour < 16 or 21 <= time.hour < 23:
e_price = 16
elif 16 <= time.hour < 21:
e_price = 25
else :
e_price = 8
e_price_df.loc[idx,'e_price'] = e_price
return e_price_df
# -
e_price_df_2015 = price_generator(2015,total_timestep=35040)
e_price_df_2015.to_csv('e_price_2015.csv', index=False)
e_price_df_2013 = price_generator(2013,total_timestep=35040)
e_price_df_2013.to_csv('e_price_2013.csv', index=False)
e_price_df_2014 = price_generator(2014,total_timestep=35040)
e_price_df_2014.to_csv('e_price_2014.csv', index=False)
e_price_df_2017 = price_generator(2017,total_timestep=35040)
e_price_df_2017.to_csv('e_price_2017.csv', index=False)
| e_tariffs/.ipynb_checkpoints/generate_e_tarrifs-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Cobaya-env
# language: python
# name: cobaya
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import sys, os
os.environ['COBAYA_NOMPI'] = 'True'
sys.path.append('/global/cscratch1/sd/sfschen/boss_analysis_joint/lss_likelihood/')
# %matplotlib inline
# +
from cobaya.yaml import yaml_load_file
from cobaya.samplers.mcmc import plot_progress
#
from getdist.mcsamples import MCSamplesFromCobaya
from getdist.mcsamples import loadMCSamples
import getdist.plots as gdplt
from cobaya.model import get_model
#
import os
# -
# Configuration Space vs. Fourier
# +
pkz1_info = yaml_load_file("yamls/pk_z1_lnA.yaml")
pkz1_samples = loadMCSamples(os.path.abspath(pkz1_info["output"]),\
settings={'ignore_rows':0.5, 'contours': [0.68, 0.95]})
# +
pkz3_info = yaml_load_file("yamls/pk_z3_lnA.yaml")
pkz3_samples = loadMCSamples(os.path.abspath(pkz3_info["output"]),\
settings={'ignore_rows':0.5, 'contours': [0.68, 0.95]})
# -
xi1path = '/global/cscratch1/sd/mwhite/Fitting/CobayaLSS/chains/boss_s01_z038_lcdm_xi'
xiz1_samples = loadMCSamples(xi1path,\
settings={'ignore_rows':0.5, 'contours': [0.68, 0.95]})
xi3path = '/global/cscratch1/sd/mwhite/Fitting/CobayaLSS/chains/boss_s01_z061_lcdm_xi'
xiz3_samples = loadMCSamples(xi3path,\
settings={'ignore_rows':0.5, 'contours': [0.68, 0.95]})
# +
# Plot Together!
gdplot = gdplt.get_subplot_plotter()
gdplot = gdplt.get_subplot_plotter(width_inch=4)
gdplot.settings.axes_fontsize=12
gdplot.settings.legend_fontsize=12
gdplot.triangle_plot([pkz1_samples, xiz1_samples, pkz3_samples, xiz3_samples],\
["omegam","H0","sigma8"],\
filled=[True,False,True,False],\
contour_lws=1.5,\
contour_args=[{'color':'C0','ls':'-'},{'color':'C0','ls':'--'},\
{'color':'C1','ls':'-'},{'color':'C1','ls':'--'}],\
line_args=[{'color':'C0','ls':'-'},{'color':'C0','ls':'--'},
{'color':'C1','ls':'-'},{'color':'C1','ls':'--'}],
linewidth_contour=3,\
legend_labels = [r"$\bf{z1}: P_\ell$",r"$\bf{z1}: \xi_\ell$",r"$\bf{z3}: P_\ell$",r"$\bf{z3}: \xi_\ell$"])
plt.savefig('Figures/pkxi_consistency.pdf')
# -
| boss_analysis/boss_analysis_joint/PaperPlots_Consistency_xiP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + hide_input=false
import requests
import json
import numpy as np
import pandas as pd
# -
##payload = {'q': "select * from water_risk_indicators where indicator = 'water_stress' and model in ('bau', 'historic') and period='year' and type='absolute' and basinid = 7664 order by year asc"}
r = requests.get('http://api.resourcewatch.org/vocabulary?page[size]=1000')
#print(json.dumps(r.json()['data'],sort_keys=True, indent=1))
datasetCollection = np.array(r.json()['data'])
print('current list of vocabularies that exists on the API: ')
for value in r.json()['data']:
print(value['id'])
# ## Vocabularies That belongs to RW
# ```json
# {
# "forest": {
# "tags": [
# "forest_change",
# "forest_loss",
# "forest_gain",
# "forest_cover",
# "fire",
# "burn",
# "deforestation",
# "degradation",
# "restoration",
# "land_cover",
# "intact_forest",
# "logging",
# "mangrove"
# ]
# },
# "biodiversity": {
# "tags": [
# "marine_life",
# "coral_reef",
# "bleaching",
# "ocean",
# "hotspot",
# "risk",
# "habitat",
# "species",
# "endangered",
# "conservation",
# "human_impact",
# "protected_area",
# "reserve",
# "park",
# "fish",
# "fishing",
# "illegal",
# "poaching",
# "bleaching",
# "hunting",
# "fisheries",
# "ecosystem",
# "amphibian",
# "commercial",
# "biome",
# "mammal",
# "mangrove",
# "plant",
# "bird",
# "intact",
# "habitat_loss",
# "extinction",
# "range"
# ]
# },
# "reference_map": {
# "tags": [
# "political",
# "boundaries",
# "satellite",
# "imagery",
# "elevation",
# "land_cover",
# "land_use",
# "land_classification",
# "land_type",
# "land_units",
# "biome",
# "terrain",
# "slope",
# "road"
# ]
# },
# "disasters": {
# "tags": [
# "natural_disaster",
# "earthquake",
# "seismic",
# "flood",
# "volcano",
# "volcanic",
# "eruption",
# "drought",
# "fire",
# "outbreak",
# "cold_wave",
# "heat_wave",
# "storm",
# "landslide",
# "hazard",
# "risk",
# "stress",
# "vulnerability",
# "hurricane",
# "typhoon",
# "extreme_event",
# "explosion"
# ]
# },
# "commerce": {
# "tags": [
# "commodities",
# "supply_chain",
# "supply",
# "demand",
# "trade",
# "investment",
# "regulation",
# "concessions",
# "agreement",
# "transparency",
# "coal",
# "uranium",
# "bauxite",
# "sulfur",
# "lithium",
# "copper",
# "iron_ore",
# "silicon",
# "mineral",
# "network",
# "shipping",
# "port",
# "gdp",
# "gross_domestic_product",
# "gni",
# "gross_national_income",
# "limit",
# "infrastructure",
# "route",
# "market",
# "production",
# "resource",
# "economy",
# "recycle",
# "waste",
# "material_flows",
# "price"
# ]
# },
# "food": {
# "tags": [
#
# "nutrition",
# "malnutrition",
# "fertilizer",
# "hunger",
# "famine",
# "food_security",
# "food_supply",
# "production",
# "resource",
# "crop_health",
# "crop_yield",
# "livestock",
# "food_waste",
# "diets",
# "malnutrition",
# "food_production",
# "food_consumption",
# "soil",
# "cropland",
# "farmland",
# "agriculture",
# "irrigation",
# "irrigated",
# "rain_fed",
# "food_price",
# "price",
# "crop",
# "vegetation",
# "vegetation_health",
# "crop_health",
# "organic",
# "meat",
# "beef",
# "lamb",
# "crop_calendar",
# "harvest",
# "ndvi",
# "anomaly",
# "aquaculture",
# "fish",
# "corn",
# "maize",
# "wheat",
# "rice",
# "soy",
# "palm_oil",
# "shrimp",
# "pesticide",
# "yield_gap"
# ]
# },
# "dataset_type": {
# "tags": [
# "raster",
# "vector",
# "table",
# "geospatial",
# "non_geospatial"
# ]
# },
# "energy": {
# "tags": [
# "energy_production",
# "production",
# "electricity",
# "energy_efficiency",
# "renewable",
# "nonrenewable",
# "power_line",
# "emissions",
# "power_plant",
# "solar",
# "wind",
# "geothermal",
# "hydroelectric",
# "hydropower",
# "nuclear",
# "biofuel",
# "coal",
# "oil",
# "petroleum",
# "energy_hazard",
# "spill",
# "chemical",
# "pipeline",
# "nighttime_lights",
# "uranium",
# "thorium",
# "access",
# "energy_access",
# "oil_refineries",
# "oil_reserves",
# "reserve",
# "flare",
# "flaring",
# "natural_gas",
# "shale",
# "power",
# "potential",
# "radioactive",
# "resource"
# ]
# },
# "function": {
# "tags": [
# "planet_pulse",
# "explore",
# "alert",
# "insight",
# "dashboard",
# "timeseries"
# ]
# },
# "water": {
# "tags": [
# "water_avaliablility",
# "water_extent",
# "water_scarcity",
# "water_demand",
# "water_pollution",
# "flood",
# "drought",
# "precipitation",
# "rainfall",
# "rain",
# "water_quality",
# "nitrogen",
# "phosphorus",
# "dissolved_oxygen",
# "temperature",
# "total_suspended_solids",
# "water_scarcity",
# "water_risk",
# "water_stress",
# "drinking_water",
# "sewage",
# "wastewater",
# "treatment",
# "nutrient",
# "water_access",
# "access",
# "water_consumption",
# "infrastructure",
# "change",
# "seasonality",
# "greywater",
# "fresh_water",
# "dam",
# "reservoir",
# "water_source",
# "sedimentation",
# "sediment",
# "erosion",
# "pollution",
# "surface_water",
# "lake",
# "river",
# "sea",
# "soil_moisture",
# "ocean",
# "watershed",
# "drainage_basin",
# "catchment",
# "carbon",
# "organic",
# "hydrologic",
# "resource",
# "clean_water",
# "silica",
# "hab",
# "harmful_algal_blooms",
# "hydropower",
# "anomaly",
# "water_conflict"
# ]
# },
# "frequency": {
# "tags": [
# "weekly",
# "near_real-time",
# "daily",
# "weekly",
# "monthly",
# "annual",
# "projection"
# ]
# },
# "society": {
# "tags": [
# "people",
# "population",
# "demographic",
# "education",
# "educate",
# "literacy",
# "illiterate",
# "school",
# "primary",
# "secondary",
# "youth",
# "children",
# "adolescents",
# "health",
# "governance",
# "government",
# "development",
# "political_stability",
# "human_rights",
# "land_rights",
# "refugees",
# "conflict",
# "protest",
# "migrant",
# "justice",
# "information",
# "participation",
# "security",
# "accountability",
# "stability",
# "corruption",
# "rule_of_law",
# "child_mortality",
# "infant_mortality",
# "poverty",
# "cooking_fuel",
# "sanitation",
# "water",
# "electricity",
# "floor_materials",
# "assets",
# "acquisition",
# "adaptation",
# "capacity",
# "aid",
# "foreign_aid",
# "tone",
# "tenure",
# "sustainable",
# "asylum",
# "displaced",
# "land_grab",
# "land_ownership",
# "migration",
# "mortality",
# "sdgs",
# "sustainable_development_goals",
# "resilience",
# "fragility",
# "freedom",
# "gdp",
# "gross_domestic_product",
# "gini",
# "inequality",
# "life_expectancy",
# "sensitivity",
# "response",
# "crisis",
# "economic",
# "income",
# "fatalities",
# "death",
# "asylum",
# "violence",
# "peace",
# "gender",
# "women",
# "girls",
# "discrimination"
# ]
# },
# "location": {
# "tags": [
# "global",
# "quasi-global",
# "tropic",
# "temperate",
# "boreal",
# "arctic",
# "antarctic",
# "afghanistan",
# "aland",
# "albania",
# "algeria",
# "american_samoa",
# "andorra",
# "angola",
# "anguilla",
# "antarctica",
# "antigua_and_barb.",
# "argentina",
# "armenia",
# "aruba",
# "australia",
# "austria",
# "azerbaijan",
# "bahamas",
# "bahrain",
# "bangladesh",
# "barbados",
# "belarus",
# "belgium",
# "belize",
# "benin",
# "bermuda",
# "bhutan",
# "bolivia",
# "bosnia_and_herz.",
# "botswana",
# "brazil",
# "br._indian_ocean_ter.",
# "brunei",
# "bulgaria",
# "burkina_faso",
# "burundi",
# "cambodia",
# "cameroon",
# "canada",
# "cape_verde",
# "cayman_is.",
# "central_african_rep.",
# "chad",
# "chile",
# "china",
# "colombia",
# "comoros",
# "congo",
# "dem._rep._congo",
# "cook_is.",
# "costa_rica",
# "cte_d'ivoire",
# "croatia",
# "cuba",
# "curaao",
# "cyprus",
# "czech_rep.",
# "denmark",
# "djibouti",
# "dominica",
# "dominican_rep.",
# "ecuador",
# "egypt",
# "el_salvador",
# "eq._guinea",
# "eritrea",
# "estonia",
# "ethiopia",
# "falkland_is.",
# "faeroe_is.",
# "fiji",
# "finland",
# "france",
# "fr._polynesia",
# "fr._s._antarctic_lands",
# "gabon",
# "gambia",
# "georgia",
# "germany",
# "ghana",
# "gibraltar",
# "greece",
# "greenland",
# "grenada",
# "guam",
# "guatemala",
# "guernsey",
# "guinea",
# "guinea-bissau",
# "guyana",
# "haiti",
# "heard_i._and_mcdonald_is.",
# "vatican",
# "honduras",
# "hong_kong",
# "hungary",
# "iceland",
# "india",
# "indonesia",
# "iran",
# "iraq",
# "ireland",
# "isle_of_man",
# "israel",
# "italy",
# "jamaica",
# "japan",
# "jersey",
# "jordan",
# "kazakhstan",
# "kenya",
# "kiribati",
# "dem._rep._korea",
# "korea",
# "kuwait",
# "kyrgyzstan",
# "lao_pdr",
# "latvia",
# "lebanon",
# "lesotho",
# "liberia",
# "libya",
# "liechtenstein",
# "lithuania",
# "luxembourg",
# "macao",
# "macedonia",
# "madagascar",
# "malawi",
# "malaysia",
# "maldives",
# "mali",
# "malta",
# "marshall_is.",
# "mauritania",
# "mauritius",
# "mexico",
# "micronesia",
# "moldova",
# "monaco",
# "mongolia",
# "montenegro",
# "montserrat",
# "morocco",
# "mozambique",
# "myanmar",
# "namibia",
# "nauru",
# "nepal",
# "netherlands",
# "new_caledonia",
# "new_zealand",
# "nicaragua",
# "niger",
# "nigeria",
# "niue",
# "norfolk_island",
# "n._mariana_is.",
# "norway",
# "oman",
# "pakistan",
# "palau",
# "palestine",
# "panama",
# "papua_new_guinea",
# "paraguay",
# "peru",
# "philippines",
# "pitcairn_is.",
# "poland",
# "portugal",
# "puerto_rico",
# "qatar",
# "romania",
# "russia",
# "rwanda",
# "st-barthlemy",
# "saint_helena",
# "st._kitts_and_nevis",
# "saint_lucia",
# "st-martin",
# "st._pierre_and_miquelon",
# "st._vin._and_gren.",
# "samoa",
# "san_marino",
# "so_tom_and_principe",
# "saudi_arabia",
# "senegal",
# "serbia",
# "seychelles",
# "sierra_leone",
# "slovakia",
# "singapore",
# "sint_maarten",
# "slovenia",
# "solomon_is.",
# "somalia",
# "south_africa",
# "s._geo._and_s._sandw._is.",
# "s._sudan",
# "spain",
# "sri_lanka",
# "sudan",
# "suriname",
# "swaziland",
# "sweden",
# "switzerland",
# "syria",
# "taiwan",
# "tajikistan",
# "tanzania",
# "thailand",
# "timor-leste",
# "togo",
# "tonga",
# "trinidad_and_tobago",
# "tunisia",
# "turkey",
# "turkmenistan",
# "turks_and_caicos_is.",
# "tuvalu",
# "uganda",
# "ukraine",
# "united_arab_emirates",
# "united_kingdom",
# "united_states",
# "u.s._minor_outlying_is.",
# "uruguay",
# "uzbekistan",
# "vanuatu",
# "venezuela",
# "vietnam",
# "british_virgin_is.",
# "u.s._virgin_is.",
# "wallis_and_futuna_is.",
# "w._sahara",
# "yemen",
# "zambia",
# "zimbabwe",
# "coral_sea_islands_territory",
# "republic_of_kosovo"
# ]
# },
# "resolution": {
# "tags": [
# "national",
# "subnational"
# ]
# },
# "cities": {
# "tags": [
# "traffic",
# "congestion",
# "fatalities",
# "death",
# "road_safety",
# "infrastructure",
# "road",
# "commute",
# "traffic_feed",
# "transport",
# "transportation",
# "rapid_transport",
# "bus",
# "train",
# "rail",
# "public_transport",
# "cycle",
# "cycling",
# "bike",
# "accessibility",
# "urban",
# "urban_extent",
# "settlement",
# "informal_settlement",
# "city_extent",
# "urban_expansion",
# "park",
# "greenspace",
# "urban_planning",
# "nightlights",
# "air_quality",
# "no2",
# "nitrogen_dioxide",
# "emissions",
# "air_pollution",
# "pollution",
# "co",
# "carbon_monoxide",
# "bc",
# "black_carbon",
# "o3",
# "ozone",
# "so2",
# "sulfur_dioxide",
# "wind",
# "wind_speed",
# "wind_direction",
# "wastewater",
# "treatment",
# "sewage",
# "plastic",
# "public_service",
# "affiliation",
# "membership",
# "zone",
# "zoning",
# "waste",
# "commercial",
# "building",
# "particulate_matter",
# "pm2.5",
# "heat_island",
# "height",
# "slum",
# "resilience",
# "environmental_health",
# "environmental_quality",
# "employment"
# ]
# },
# "climate": {
# "tags": [
# "ghg",
# "greenhouse_gas",
# "carbon_dioxide",
# "temperature",
# "sea_ice",
# "ice_shelf",
# "sea_level",
# "snow",
# "snow_cover",
# "ice",
# "glacier",
# "climate_change",
# "emissions",
# "ph",
# "sea_surface_temperature",
# "carbon",
# "biomass",
# "carbon_storage",
# "ocean",
# "vulnerability",
# "coastal",
# "acidification",
# "weather",
# "oceans",
# "carbon_sinks",
# "extreme_event",
# "sea_level",
# "sea_level_rise",
# "slr",
# "adaptation",
# "hurricane",
# "storms",
# "precipitation",
# "methane",
# "storm_intensity",
# "bioclimate",
# "ch4",
# "biome",
# "nitrous_oxide",
# "n2o",
# "indcs",
# "intended_nationally_determined_contributions",
# "peatland",
# "ndcs",
# "nationally_determined_contributions",
# "greywater",
# "groundwater",
# "resource",
# "anomaly"
# ]
# }
# }
# ```
text = '''{
"forest": {
"tags": [
"forest_change",
"forest_loss",
"forest_gain",
"forest_cover",
"fire",
"burn",
"deforestation",
"degradation",
"restoration",
"land_cover",
"intact_forest",
"logging",
"mangrove"
]
},
"biodiversity": {
"tags": [
"marine_life",
"coral_reef",
"bleaching",
"ocean",
"hotspot",
"risk",
"habitat",
"species",
"endangered",
"conservation",
"human_impact",
"protected_area",
"reserve",
"park",
"fish",
"fishing",
"illegal",
"poaching",
"bleaching",
"hunting",
"fisheries",
"ecosystem",
"amphibian",
"commercial",
"biome",
"mammal",
"mangrove",
"plant",
"bird",
"intact",
"habitat_loss",
"extinction",
"range"
]
},
"reference_map": {
"tags": [
"political",
"boundaries",
"satellite",
"imagery",
"elevation",
"land_cover",
"land_use",
"land_classification",
"land_type",
"land_units",
"biome",
"terrain",
"slope",
"road"
]
},
"disasters": {
"tags": [
"natural_disaster",
"earthquake",
"seismic",
"flood",
"volcano",
"volcanic",
"eruption",
"drought",
"fire",
"outbreak",
"cold_wave",
"heat_wave",
"storm",
"landslide",
"hazard",
"risk",
"stress",
"vulnerability",
"hurricane",
"typhoon",
"extreme_event",
"explosion"
]
},
"commerce": {
"tags": [
"commodities",
"supply_chain",
"supply",
"demand",
"trade",
"investment",
"regulation",
"concessions",
"agreement",
"transparency",
"coal",
"uranium",
"bauxite",
"sulfur",
"lithium",
"copper",
"iron_ore",
"silicon",
"mineral",
"network",
"shipping",
"port",
"gdp",
"gross_domestic_product",
"gni",
"gross_national_income",
"limit",
"infrastructure",
"route",
"market",
"production",
"resource",
"economy",
"recycle",
"waste",
"material_flows",
"price"
]
},
"food": {
"tags": [
"nutrition",
"malnutrition",
"fertilizer",
"hunger",
"famine",
"food_security",
"food_supply",
"production",
"resource",
"crop_health",
"crop_yield",
"livestock",
"food_waste",
"diets",
"malnutrition",
"food_production",
"food_consumption",
"soil",
"cropland",
"farmland",
"agriculture",
"irrigation",
"irrigated",
"rain_fed",
"food_price",
"price",
"crop",
"vegetation",
"vegetation_health",
"crop_health",
"organic",
"meat",
"beef",
"lamb",
"crop_calendar",
"harvest",
"ndvi",
"anomaly",
"aquaculture",
"fish",
"corn",
"maize",
"wheat",
"rice",
"soy",
"palm_oil",
"shrimp",
"pesticide",
"yield_gap"
]
},
"dataset_type": {
"tags": [
"raster",
"vector",
"table",
"geospatial",
"non_geospatial"
]
},
"energy": {
"tags": [
"energy_production",
"production",
"electricity",
"energy_efficiency",
"renewable",
"nonrenewable",
"power_line",
"emissions",
"power_plant",
"solar",
"wind",
"geothermal",
"hydroelectric",
"hydropower",
"nuclear",
"biofuel",
"coal",
"oil",
"petroleum",
"energy_hazard",
"spill",
"chemical",
"pipeline",
"nighttime_lights",
"uranium",
"thorium",
"access",
"energy_access",
"oil_refineries",
"oil_reserves",
"reserve",
"flare",
"flaring",
"natural_gas",
"shale",
"power",
"potential",
"radioactive",
"resource"
]
},
"function": {
"tags": [
"planet_pulse",
"explore",
"alert",
"insight",
"dashboard",
"timeseries"
]
},
"water": {
"tags": [
"water_avaliablility",
"water_extent",
"water_scarcity",
"water_demand",
"water_pollution",
"flood",
"drought",
"precipitation",
"rainfall",
"rain",
"water_quality",
"nitrogen",
"phosphorus",
"dissolved_oxygen",
"temperature",
"total_suspended_solids",
"water_scarcity",
"water_risk",
"water_stress",
"drinking_water",
"sewage",
"wastewater",
"treatment",
"nutrient",
"water_access",
"access",
"water_consumption",
"infrastructure",
"change",
"seasonality",
"greywater",
"fresh_water",
"dam",
"reservoir",
"water_source",
"sedimentation",
"sediment",
"erosion",
"pollution",
"surface_water",
"lake",
"river",
"sea",
"soil_moisture",
"ocean",
"watershed",
"drainage_basin",
"catchment",
"carbon",
"organic",
"hydrologic",
"resource",
"clean_water",
"silica",
"hab",
"harmful_algal_blooms",
"hydropower",
"anomaly",
"water_conflict"
]
},
"frequency": {
"tags": [
"weekly",
"near_real-time",
"daily",
"weekly",
"monthly",
"annual",
"projection"
]
},
"society": {
"tags": [
"people",
"population",
"demographic",
"education",
"educate",
"literacy",
"illiterate",
"school",
"primary",
"secondary",
"youth",
"children",
"adolescents",
"health",
"governance",
"government",
"development",
"political_stability",
"human_rights",
"land_rights",
"refugees",
"conflict",
"protest",
"migrant",
"justice",
"information",
"participation",
"security",
"accountability",
"stability",
"corruption",
"rule_of_law",
"child_mortality",
"infant_mortality",
"poverty",
"cooking_fuel",
"sanitation",
"water",
"electricity",
"floor_materials",
"assets",
"acquisition",
"adaptation",
"capacity",
"aid",
"foreign_aid",
"tone",
"tenure",
"sustainable",
"asylum",
"displaced",
"land_grab",
"land_ownership",
"migration",
"mortality",
"sdgs",
"sustainable_development_goals",
"resilience",
"fragility",
"freedom",
"gdp",
"gross_domestic_product",
"gini",
"inequality",
"life_expectancy",
"sensitivity",
"response",
"crisis",
"economic",
"income",
"fatalities",
"death",
"asylum",
"violence",
"peace",
"gender",
"women",
"girls",
"discrimination"
]
},
"location": {
"tags": [
"global",
"quasi-global",
"tropic",
"temperate",
"boreal",
"arctic",
"antarctic",
"afghanistan",
"aland",
"albania",
"algeria",
"american_samoa",
"andorra",
"angola",
"anguilla",
"antarctica",
"antigua_and_barb.",
"argentina",
"armenia",
"aruba",
"australia",
"austria",
"azerbaijan",
"bahamas",
"bahrain",
"bangladesh",
"barbados",
"belarus",
"belgium",
"belize",
"benin",
"bermuda",
"bhutan",
"bolivia",
"bosnia_and_herz.",
"botswana",
"brazil",
"br._indian_ocean_ter.",
"brunei",
"bulgaria",
"burkina_faso",
"burundi",
"cambodia",
"cameroon",
"canada",
"cape_verde",
"cayman_is.",
"central_african_rep.",
"chad",
"chile",
"china",
"colombia",
"comoros",
"congo",
"dem._rep._congo",
"cook_is.",
"costa_rica",
"cte_d'ivoire",
"croatia",
"cuba",
"curaao",
"cyprus",
"czech_rep.",
"denmark",
"djibouti",
"dominica",
"dominican_rep.",
"ecuador",
"egypt",
"el_salvador",
"eq._guinea",
"eritrea",
"estonia",
"ethiopia",
"falkland_is.",
"faeroe_is.",
"fiji",
"finland",
"france",
"fr._polynesia",
"fr._s._antarctic_lands",
"gabon",
"gambia",
"georgia",
"germany",
"ghana",
"gibraltar",
"greece",
"greenland",
"grenada",
"guam",
"guatemala",
"guernsey",
"guinea",
"guinea-bissau",
"guyana",
"haiti",
"heard_i._and_mcdonald_is.",
"vatican",
"honduras",
"hong_kong",
"hungary",
"iceland",
"india",
"indonesia",
"iran",
"iraq",
"ireland",
"isle_of_man",
"israel",
"italy",
"jamaica",
"japan",
"jersey",
"jordan",
"kazakhstan",
"kenya",
"kiribati",
"dem._rep._korea",
"korea",
"kuwait",
"kyrgyzstan",
"lao_pdr",
"latvia",
"lebanon",
"lesotho",
"liberia",
"libya",
"liechtenstein",
"lithuania",
"luxembourg",
"macao",
"macedonia",
"madagascar",
"malawi",
"malaysia",
"maldives",
"mali",
"malta",
"marshall_is.",
"mauritania",
"mauritius",
"mexico",
"micronesia",
"moldova",
"monaco",
"mongolia",
"montenegro",
"montserrat",
"morocco",
"mozambique",
"myanmar",
"namibia",
"nauru",
"nepal",
"netherlands",
"new_caledonia",
"new_zealand",
"nicaragua",
"niger",
"nigeria",
"niue",
"norfolk_island",
"n._mariana_is.",
"norway",
"oman",
"pakistan",
"palau",
"palestine",
"panama",
"papua_new_guinea",
"paraguay",
"peru",
"philippines",
"pitcairn_is.",
"poland",
"portugal",
"puerto_rico",
"qatar",
"romania",
"russia",
"rwanda",
"st-barthlemy",
"saint_helena",
"st._kitts_and_nevis",
"saint_lucia",
"st-martin",
"st._pierre_and_miquelon",
"st._vin._and_gren.",
"samoa",
"san_marino",
"so_tom_and_principe",
"saudi_arabia",
"senegal",
"serbia",
"seychelles",
"sierra_leone",
"slovakia",
"singapore",
"sint_maarten",
"slovenia",
"solomon_is.",
"somalia",
"south_africa",
"s._geo._and_s._sandw._is.",
"s._sudan",
"spain",
"sri_lanka",
"sudan",
"suriname",
"swaziland",
"sweden",
"switzerland",
"syria",
"taiwan",
"tajikistan",
"tanzania",
"thailand",
"timor-leste",
"togo",
"tonga",
"trinidad_and_tobago",
"tunisia",
"turkey",
"turkmenistan",
"turks_and_caicos_is.",
"tuvalu",
"uganda",
"ukraine",
"united_arab_emirates",
"united_kingdom",
"united_states",
"u.s._minor_outlying_is.",
"uruguay",
"uzbekistan",
"vanuatu",
"venezuela",
"vietnam",
"british_virgin_is.",
"u.s._virgin_is.",
"wallis_and_futuna_is.",
"w._sahara",
"yemen",
"zambia",
"zimbabwe",
"coral_sea_islands_territory",
"republic_of_kosovo"
]
},
"resolution": {
"tags": [
"national",
"subnational"
]
},
"cities": {
"tags": [
"traffic",
"congestion",
"fatalities",
"death",
"road_safety",
"infrastructure",
"road",
"commute",
"traffic_feed",
"transport",
"transportation",
"rapid_transport",
"bus",
"train",
"rail",
"public_transport",
"cycle",
"cycling",
"bike",
"accessibility",
"urban",
"urban_extent",
"settlement",
"informal_settlement",
"city_extent",
"urban_expansion",
"park",
"greenspace",
"urban_planning",
"nightlights",
"air_quality",
"no2",
"nitrogen_dioxide",
"emissions",
"air_pollution",
"pollution",
"co",
"carbon_monoxide",
"bc",
"black_carbon",
"o3",
"ozone",
"so2",
"sulfur_dioxide",
"wind",
"wind_speed",
"wind_direction",
"wastewater",
"treatment",
"sewage",
"plastic",
"public_service",
"affiliation",
"membership",
"zone",
"zoning",
"waste",
"commercial",
"building",
"particulate_matter",
"pm2.5",
"heat_island",
"height",
"slum",
"resilience",
"environmental_health",
"environmental_quality",
"employment"
]
},
"climate": {
"tags": [
"ghg",
"greenhouse_gas",
"carbon_dioxide",
"temperature",
"sea_ice",
"ice_shelf",
"sea_level",
"snow",
"snow_cover",
"ice",
"glacier",
"climate_change",
"emissions",
"ph",
"sea_surface_temperature",
"carbon",
"biomass",
"carbon_storage",
"ocean",
"vulnerability",
"coastal",
"acidification",
"weather",
"oceans",
"carbon_sinks",
"extreme_event",
"sea_level",
"sea_level_rise",
"slr",
"adaptation",
"hurricane",
"storms",
"precipitation",
"methane",
"storm_intensity",
"bioclimate",
"ch4",
"biome",
"nitrous_oxide",
"n2o",
"indcs",
"intended_nationally_determined_contributions",
"peatland",
"ndcs",
"nationally_determined_contributions",
"greywater",
"groundwater",
"resource",
"anomaly"
]
}
}'''
vocabularies=json.loads(text)
| ResourceWatch/Api_definition/vocabulary_definition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
# # !{sys.executable} -m pip install matplotlib
# # !{sys.executable} -m pip install powerlaw
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (8,8) #Set figure size here
import pickle
import powerlaw
# # Occurrence Graph - Final
#
# Graphs the final occurance data for SureChemBL patent data
#Load dictionary of SureChemBL cpd IDs : no. of occurances
d = pickle.load(open("Data\occurrence_dict_FINAL.p", "rb"))
#Turn values (occurances) into a list
occurrences = list(d.values())
occurrences.sort(reverse=True)
print(occurrences[0:10])
plt.figure(figsize=(8,8))
plt.loglog(occurrences, color="#aa0f35", linewidth=4)
plt.xlabel("Patents (sorted by occurrence)")
plt.ylabel("Occurrences")
plt.figure(figsize=(8,8))
plt.plot(occurrences, color="#abdbfc", linewidth=4)
plt.xlabel("Patents (sorted by occurrence)")
plt.ylabel("Occurrences")
# # Powerlaw tests & graphs
#
# Test hypothesis (based on loglog plot) that occurrence data follows a powerlaw distribution (rich getting richer model)
#Following powerlaw code
fit = powerlaw.Fit(occurrences)
#Powerlaw stats
print("PowerLaw Stats")
print("Alpha:", fit.power_law.alpha)
print("Sigma:", fit.power_law.sigma)
print()
print("Lognormal Stats")
print("Mu:", fit.lognormal.mu)
print("Sigma:", fit.lognormal.sigma)
print()
print("Comparison to lognormal:", fit.distribution_compare("power_law", "lognormal", normalized_ratio=True))
# _**Note**_: lognormal distribution is slightly better at representing these data than powerlaw
# +
fig1 = fit.plot_pdf(color="#603734", linewidth=3, label="Emperical PDF") #Actual Data pdf (brown)
fit.power_law.plot_pdf(color="#a0a75c", linewidth=3, linestyle="--", ax=fig1, label="Modeled PDF") #Modeled pdf (green)
fit.plot_ccdf(color="#aa0f35", linewidth=3, ax=fig1, label="Emperical CCDF") #Actual data ccdf (red)
fit.power_law.plot_ccdf(color="#fdc799", linewidth=3, linestyle="--", ax=fig1, label="Modeled CCDF") #Modeled ccdf (tan)
plt.legend()
plt.xlabel("Compound Frequency")
plt.ylabel("PDF & CCDF")
plt.title("PowerLaw Comparisons")
# +
#Comparison to lognormal fit
fig2 = fit.power_law.plot_ccdf(color="#fdc799", linewidth=3, linestyle="--", label="Modeled powerlaw") #Modeled ccdf (tan)
fit.lognormal.plot_ccdf(color="#abdbfc", linewidth=3, linestyle="--", ax=fig2, label="Modeled lognormal")
fit.plot_ccdf(color="#aa0f35", linewidth=3,label="Emperical Data", ax=fig2)
#fit.exponential.plot_ccdf(color="#313a61", linewidth=3, linestyle="--", ax=fig2, label="Modeled lognormal")
plt.legend(loc="upper right")
plt.xlabel("Compound Frequency")
plt.ylabel("CCDF")
plt.title("PowerLaw vs Lognormal")
| occurance_viz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exercise 4.01: Data Staging and visualization
import numpy as np, pandas as pd
import matplotlib.pyplot as plt, seaborn as sns
mall0 = pd.read_csv("Mall_Customers.csv")
mall0.head()
# #### Renaming columns
mall0.rename({'Annual Income (k$)':'Income',
'Spending Score (1-100)':'Spend_score'},
axis=1, inplace=True)
mall0.head()
# #### Visualize the data using a scatterplot
mall0.plot.scatter(x='Income', y='Spend_score')
plt.show()
# ## Exercise 4.02: Choosing the Number of Clusters Based on Visual Inspection
# #### Standardizing the data
# +
mall_scaled = mall0.copy()
cols_to_scale = ['Age', 'Income', 'Spend_score']
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
mall_scaled[cols_to_scale] = scaler.fit_transform(mall_scaled[cols_to_scale])
# -
# #### K means clustering - varying number of clusters
from sklearn.cluster import KMeans
cluster_cols = ['Income', 'Spend_score']
markers = ['x', '*', '.', '|', '_', '1', '2']
# +
plt.figure(figsize=[15,10])
for n in range(2,8):
model = KMeans(n_clusters=n, random_state=42)
mall_scaled['Cluster']= model.fit_predict(mall_scaled[cluster_cols])
plt.subplot(2,3, n-1)
for clust in range(n):
temp = mall_scaled[mall_scaled.Cluster == clust]
plt.scatter(temp.Income, temp.Spend_score, marker=markers[clust], label="Cluster "+str(clust))
plt.title("N clusters: "+str(n))
plt.xlabel('Income')
plt.ylabel('Spend_score')
plt.legend()
plt.show()
# -
# ## Exercise 4.03: Determining the Number of Clusters Using the Elbow Method
K = 3
model = KMeans(n_clusters=K, random_state=42)
model.fit(mall_scaled[cluster_cols])
print(model.inertia_)
X = mall_scaled[cluster_cols]
inertia_scores = []
for K in range(2,11):
inertia = KMeans(n_clusters=K, random_state=42).fit(X).inertia_
inertia_scores.append(inertia)
plt.figure(figsize=[7,5])
plt.plot(range(2,11), inertia_scores)
plt.title("SSE/Inertia vs. number of clusters")
plt.xlabel("Number of clusters: K")
plt.ylabel('SSE/Inertia')
plt.show()
# ## Exercise 4.04: Mean-Shift Clustering on mall customers
from sklearn.cluster import MeanShift, estimate_bandwidth
bandwidth = 0.9
# +
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(mall_scaled[cluster_cols])
mall_scaled['Cluster']= ms.predict(X)
# -
# #### Visualize the clusters obtained.
markers = ['x', '*', '.', '|', '_', '1', '2']
# +
plt.figure(figsize=[8,6])
for clust in range(mall_scaled.Cluster.nunique()):
temp = mall_scaled[mall_scaled.Cluster == clust]
plt.scatter(temp.Income, temp.Spend_score, marker=markers[clust], label="Cluster"+str(clust))
plt.xlabel("Income")
plt.ylabel("Spend_score")
plt.legend()
plt.show()
# -
# #### Estimating bandwidth using quantiling
bandwidth = estimate_bandwidth(mall_scaled[cluster_cols], quantile=0.1)
print(bandwidth)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(mall_scaled[cluster_cols])
mall_scaled['Cluster']= ms.predict(mall_scaled[cluster_cols])
mall_scaled.Cluster.nunique()
plt.figure(figsize=[8,6])
for clust in range(mall_scaled.Cluster.nunique()):
temp = mall_scaled[mall_scaled.Cluster == clust]
plt.scatter(temp.Income, temp.Spend_score, marker=markers[clust], label="Cluster"+str(clust))
plt.xlabel("Income")
plt.ylabel("Spend_score")
plt.legend()
plt.show()
# #### Using a different value of quantile
bandwidth = estimate_bandwidth(mall_scaled[cluster_cols], quantile=0.15)
print(bandwidth)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(mall_scaled[cluster_cols])
mall_scaled['Cluster']= ms.predict(mall_scaled[cluster_cols])
mall_scaled.Cluster.nunique()
plt.figure(figsize=[8,6])
for clust in range(mall_scaled.Cluster.nunique()):
temp = mall_scaled[mall_scaled.Cluster == clust]
plt.scatter(temp.Income, temp.Spend_score, marker=markers[clust], label="Cluster"+str(clust))
plt.xlabel("Income")
plt.ylabel("Spend_score")
plt.legend()
plt.show()
# ## Exercise 4.05: Clustering Data Using the k-prototypes Method
import pandas as pd
bank0 = pd.read_csv("Bank_Personal_Loan_Modelling-2.csv")
bank0.head()
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
bank_scaled = bank0.copy()
bank_scaled['Income'] = scaler.fit_transform(bank0[['Income']])
# Applying k-prototypes
from kmodes.kprototypes import KPrototypes
cluster_cols = ['Income', 'Education']
X = bank_scaled[cluster_cols].values
kp = KPrototypes(n_clusters=3, random_state=42)
bank_scaled['Cluster'] = kp.fit_predict(bank_scaled[cluster_cols], categorical=[1])
res = bank_scaled.groupby('Cluster')['Education'].value_counts(normalize=True)
res.unstack().plot.barh(figsize=[9,6])
plt.show()
# ## Exercise 4.06: Using Silhouette Score to Pick Optimal Number of Clusters
cluster_cols = ['Income', 'Spend_score']
X = mall_scaled[cluster_cols]
model = KMeans(n_clusters=3, random_state=42)
cluster_assignments = model.fit_predict(X)
from sklearn.metrics import silhouette_score
silhouette_avg = silhouette_score(X, cluster_assignments)
print(silhouette_avg)
# +
silhouette_scores = []
for K in range(2, 11):
model = KMeans(n_clusters=K, random_state=42)
cluster_assignments = model.fit_predict(X)
silhouette_avg = silhouette_score(X, cluster_assignments)
silhouette_scores.append(silhouette_avg)
# -
plt.figure(figsize=[7,5])
plt.plot(range(2,11), silhouette_scores)
plt.xlabel("Number of clusters: K")
plt.ylabel('Avg. Silhouette Score')
plt.show()
# ## Exercise 4.07: Using a Train-Test Split to Evaluate Clustering Performance
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(mall0, train_size=0.75, random_state=42)
print(df_train.shape)
print(df_test.shape)
df_train[cluster_cols] = scaler.fit_transform(df_train[cluster_cols])
df_test[cluster_cols] = scaler.transform(df_test[cluster_cols])
# #### K means on the train data with 6 clusters
model = KMeans(n_clusters=6, random_state=42)
df_train['Cluster'] = model.fit_predict(df_train[cluster_cols])
silhouette_avg = silhouette_score(df_train[cluster_cols], df_train['Cluster'])
print(silhouette_avg)
df_test['Cluster'] = model.predict(df_test[cluster_cols])
silhouette_avg = silhouette_score(df_test[cluster_cols],df_test['Cluster'])
print(silhouette_avg)
for clust in range(df_test.Cluster.nunique()):
temp = df_test[df_test.Cluster == clust]
plt.scatter(temp.Income, temp.Spend_score, marker=markers[clust])
plt.xlabel("Income")
plt.ylabel("Spend_score")
plt.show()
| Chapter04/Exercise4.01-4.07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
assert sys.version_info >= (3, 5)
import numpy as np
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
import tensorflow as tf
from tensorflow import keras
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
from scipy import special
from Clustering_Equalgrps.equal_groups import EqualGroupsKMeans
np.random.seed(42)
tf.random.set_seed(42)
# -
M = 16
M_sec = 4
k = int(np.log2(M))
n = 16
TRAINING_SNR = 10 # snr = ebno * k/n
SAMPLE_SIZE = 50000
messages = np.random.randint(M, size=SAMPLE_SIZE)
# +
one_hot_encoder = OneHotEncoder(sparse=False, categories=[range(M)])
data_oneH = one_hot_encoder.fit_transform(messages.reshape(-1, 1))
# Generate Training Data
#x = tf.random.uniform(shape=[SAMPLE_SIZE], minval=0, maxval=M, dtype=tf.int64)
#x_1h = tf.one_hot(x, M)
#dataset = tf.data.Dataset.from_tensor_slices(x_1h)
# -
def snr_to_noise(snrdb):
'''Transform snr to noise power'''
snr = 10**(snrdb/10)
noise_std = 1/np.sqrt(2*snr) # 1/np.sqrt(2*(k/n)*ebno) for ebno to noise
return noise_std
# +
noise_std = snr_to_noise(TRAINING_SNR)
noise_std_eve = snr_to_noise(7)
# custom functions / layers without weights
norm_layer = keras.layers.Lambda(lambda x: tf.divide(x, tf.sqrt(2*tf.reduce_mean(tf.square(x)))))
shape_layer = keras.layers.Lambda(lambda x: tf.reshape(x, shape=[-1,2,n]))
shape_layer2 = keras.layers.Lambda(lambda x: tf.reshape(x, shape=[-1,2*n]))
channel_layer = keras.layers.Lambda(lambda x:
tf.add(x, tf.random.normal(tf.shape(x), mean=0.0, stddev=noise_std)))
channel_layer_eve = keras.layers.Lambda(lambda x:
tf.add(x, tf.random.normal(tf.shape(x), mean=0.0, stddev=noise_std_eve)))
encoder = keras.models.Sequential([
keras.layers.InputLayer(input_shape=[M]),
keras.layers.Dense(M, activation="elu"),
keras.layers.Dense(2*n, activation=None),
shape_layer,
norm_layer])
channel = keras.models.Sequential([channel_layer])
channel_eve = keras.models.Sequential([channel_layer, channel_layer_eve])
decoder_bob = keras.models.Sequential([
keras.layers.InputLayer(input_shape=[2,n]),
shape_layer2,
keras.layers.Dense(M, activation="elu"),
keras.layers.Dense(M, activation="softmax")
])
decoder_eve = keras.models.Sequential([
keras.layers.InputLayer(input_shape=[2,n]),
shape_layer2,
keras.layers.Dense(M, activation="elu"),
keras.layers.Dense(M, activation="softmax")
])
autoencoder_bob = keras.models.Sequential([encoder, channel, decoder_bob])
autoencoder_eve = keras.models.Sequential([encoder, channel_eve, decoder_eve])
# -
def B_Ber(input_msg, msg):
'''Calculate the Batch Bit Error Rate'''
pred_error = tf.not_equal(tf.argmax(msg, 1), tf.argmax(input_msg, 1))
bber = tf.reduce_mean(tf.cast(pred_error, tf.float32))
return bber
def random_batch(X, batch_size=32):
idx = np.random.randint(len(X), size=batch_size)
return X[idx]
def test_encoding(M=16, n=1):
inp = np.eye(M, dtype=int)
coding = encoder.predict(inp)
fig = plt.figure(figsize=(4,4))
plt.plot(coding[:,0], coding[:, 1], "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$x_2$", fontsize=18, rotation=0)
plt.grid(True)
plt.gca().set_ylim(-2, 2)
plt.gca().set_xlim(-2, 2)
plt.show()
def test_noisy_codeword(data):
rcvd_word = data[1:2000]
fig = plt.figure(figsize=(4,4))
plt.plot(rcvd_word[:,0], rcvd_word[:, 1], "b.")
#plt.plot(rcvd_word_eve[:,0], rcvd_word_eve[:, 1], 'or')
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$x_2$", fontsize=18, rotation=0)
plt.grid(True)
plt.gca().set_ylim(-2, 2)
plt.gca().set_xlim(-2, 2)
plt.show()
n_epochs = 5
batch_size = 200
n_steps = len(data_oneH) // batch_size
optimizer = keras.optimizers.Nadam(lr=0.005)
loss_fn = keras.losses.categorical_crossentropy
mean_loss = keras.metrics.Mean()
def plot_loss(step, epoch, mean_loss, X_batch, y_pred, plot_encoding):
template = 'Iteration: {}, Epoch: {}, Loss: {:.5f}, Batch_BER: {:.5f}'
if step % 10 == 0:
print(template.format(step, epoch, mean_loss.result(), B_Ber(X_batch, y_pred)))
if plot_encoding:
test_encoding()
def plot_batch_loss(epoch, mean_loss, X_batch, y_pred):
template_outer_loop = 'Interim result for Epoch: {}, Loss: {:.5f}, Batch_BER: {:.5f}'
print(template_outer_loop.format(epoch, mean_loss.result(), B_Ber(X_batch, y_pred)))
def train_Bob(n_epochs=5, n_steps=20, plot_encoding=True, only_decoder=False):
for epoch in range(1, n_epochs + 1):
print("Training Bob in Epoch {}/{}".format(epoch, n_epochs))
for step in range(1, n_steps + 1):
X_batch = random_batch(data_oneH, batch_size)
#X_batch = dataset.batch(batch_size)
with tf.GradientTape() as tape:
y_pred = autoencoder_bob(X_batch, training=True)
main_loss = tf.reduce_mean(loss_fn(X_batch, y_pred))
loss = main_loss
if only_decoder:
gradients = tape.gradient(loss, decoder_bob.trainable_variables)
optimizer.apply_gradients(zip(gradients, decoder_bob.trainable_variables))
else:
gradients = tape.gradient(loss, autoencoder_bob.trainable_variables)
optimizer.apply_gradients(zip(gradients, autoencoder_bob.trainable_variables))
mean_loss(loss)
plot_loss(step, epoch, mean_loss, X_batch, y_pred, plot_encoding)
plot_batch_loss(epoch, mean_loss, X_batch, y_pred)
def train_Eve(n_epochs=5, iterations=20, plot_encoding=True):
for epoch in range(1, n_epochs + 1):
print("Training Eve in Epoch {}/{}".format(epoch, n_epochs))
for step in range(1, n_steps + 1):
X_batch = random_batch(data_oneH, batch_size)
with tf.GradientTape() as tape:
y_pred = autoencoder_eve(X_batch, training=True)
main_loss = tf.reduce_mean(loss_fn(X_batch, y_pred))
loss = main_loss
gradients = tape.gradient(loss, decoder_eve.trainable_variables)
optimizer.apply_gradients(zip(gradients, decoder_eve.trainable_variables))
mean_loss(loss)
plot_loss(step, epoch, mean_loss, X_batch, y_pred, plot_encoding)
plot_batch_loss(epoch, mean_loss, X_batch, y_pred)
def init_kmeans(symM=16, satellites=4, n=100):
'''Initializes equal sized clusters with the whole message set'''
inp = np.eye(symM, dtype=int)
unit_codewords = encoder.predict(inp)
kmeans = EqualGroupsKMeans(n_clusters=satellites)
kmeans.fit(unit_codewords.reshape(symM,2*n))
return kmeans
def generate_mat(kmeans_labels, satellites=4, symM=16):
'''Generates the matrix for equalization of the input distribution on Eves side'''
gen_matrix = np.zeros((symM,symM))
for j in range(satellites):
for i in range(symM):
if kmeans_labels[i]==j:
for k in range(symM):
if kmeans_labels[k] == j:
gen_matrix[i,k] = 1/satellites;
gen_mat=tf.cast(gen_matrix, tf.float64)
return gen_mat
def train_Secure(kmeans_labels, n_epochs=5, iterations=20, alpha=0.7, plot_encoding=True):
'''This procedure trains the encoder to cluster the codewords,
based on the kmeans labels.
Inputs: kmeans.labels_, epochs, iterations, alpha, plotting decision variable
'''
generator_matrix = generate_mat(kmeans_labels, M_sec, M)
for epoch in range(1, n_epochs + 1):
print("Training for Security in Epoch {}/{}".format(epoch, n_epochs))
for step in range(1, n_steps + 1):
X_batch = random_batch(data_oneH, batch_size)
x_batch_s= tf.matmul(X_batch, generator_matrix)
with tf.GradientTape() as tape:
y_pred_bob = autoencoder_bob(X_batch, training=True)
y_pred_eve = autoencoder_eve(X_batch, training=False)
loss_bob = tf.reduce_mean(loss_fn(X_batch, y_pred_bob))
loss_eve = tf.reduce_mean(loss_fn(x_batch_s, y_pred_eve))
loss_sec = (1-alpha)*loss_bob + alpha*loss_eve
gradients = tape.gradient(loss_sec, autoencoder_bob.trainable_variables)
optimizer.apply_gradients(zip(gradients, autoencoder_bob.trainable_variables))
mean_loss(loss_sec)
plot_loss(step, epoch, mean_loss, X_batch, y_pred_bob, plot_encoding)
plot_batch_loss(epoch, mean_loss, X_batch, y_pred_bob)
# test msg sequence for normal encoding
N_test = 150000
test_msg = np.random.randint(M, size=N_test)
one_hot_encoder = OneHotEncoder(sparse=False, categories=[range(M)])
data_oh_normal = one_hot_encoder.fit_transform(test_msg.reshape(-1,1))
def Test_AE(data):
'''Calculate Bit Error for varying SNRs'''
snr_range = np.linspace(0, 15, 30)
bber_vec_bob = [None] * len(snr_range)
bber_vec_eve = [None] * len(snr_range)
for db in range(len(snr_range)):
noise_std = snr_to_noise(snr_range[db])
noise_std_eve = snr_to_noise(7)
code_word = encoder.predict(data)
rcvd_word = code_word + tf.random.normal(tf.shape(code_word), mean=0.0, stddev=noise_std)
rcvd_word_eve = rcvd_word + \
tf.random.normal(tf.shape(rcvd_word), mean=0.0, stddev=noise_std_eve)
dcoded_msg_bob = decoder_bob.predict(rcvd_word)
dcoded_msg_eve = decoder_eve.predict(rcvd_word_eve)
bber_vec_bob[db] = B_Ber(data, dcoded_msg_bob)
bber_vec_eve[db] = B_Ber(data, dcoded_msg_eve)
print(f'Progress: {db+1} of {30} parts')
#test_noisy_codeword(rcvd_word)
#test_noisy_codeword(rcvd_word_eve)
return (snr_range, bber_vec_bob), (snr_range, bber_vec_eve)
def satellite_labels(kmeans_labels, data_label, sats=8, data_size=150000):
'''Generate cloud/satelite codewords which utilizes the previously trained encoder.
It therefore takes a message vector of lower dimensionality and maps it to the higher
dimensional secure coding. The satelite codewords, i.e. co-sets are chosen randomly
according to the clusters.
'''
code_mat = np.zeros((sats, sats))
for sat in range(sats):
n = 0;
for index in range(M):
if kmeans_labels[index] == sat:
code_mat[sat, n] = index;
n = n + 1;
coded_label = np.zeros(data_size)
for i in range(data_size):
aux_var = data_label[i];
# pick a random row of column aux_var, i.e random symbol in the cluster
coded_label[i] = code_mat[np.random.randint(M_sec), aux_var];
return coded_label, code_mat
def sec_decoding(code_mat, pred_output, satellites, clusters):
'''Decodes the cloud signal encoding'''
sats = satellites
data = np.array(pred_output)
decoded_data = np.zeros(len(data))
for sample in range(len(data)):
cloud, msg = np.where(code_mat == data[sample])
decoded_data[sample] = msg
return decoded_data
def Test_secure_AE(coded_data, code_mat, real_data):
'''Calculate symbol error for varying SNRs'''
snr_range = np.linspace(0, 15, 30)
bber_vec_bob = [None] * len(snr_range)
bber_vec_eve = [None] * len(snr_range)
for db in range(len(snr_range)):
noise_std = snr_to_noise(snr_range[db])
noise_std_eve = snr_to_noise(7)
code_word = encoder.predict(coded_data)
rcvd_word = code_word + tf.random.normal(tf.shape(code_word), mean=0.0, stddev=noise_std)
rcvd_word_eve = rcvd_word + \
tf.random.normal(tf.shape(code_word), mean=0.0, stddev=noise_std_eve)
pred_msg_bob = decoder_bob.predict(rcvd_word)
pred_msg_eve = decoder_eve.predict(rcvd_word_eve)
decoded_msg_bob = sec_decoding(code_mat, np.array(tf.argmax(pred_msg_bob,1)),
M_sec, M_sec)
decoded_msg_eve = sec_decoding(code_mat, np.array(tf.argmax(pred_msg_eve,1)),
M_sec, M_sec)
bber_vec_bob[db] = np.mean(np.not_equal(decoded_msg_bob, real_data))
bber_vec_eve[db] = np.mean(np.not_equal(decoded_msg_eve, real_data))
print(f'Progress: {db+1} of {30} parts')
#test_noisy_codeword(rcvd_word)
#test_noisy_codeword(rcvd_word_eve)
return (snr_range, bber_vec_bob), (snr_range, bber_vec_eve)
# +
train_Bob(n_epochs, n_steps, False, False)
train_Eve(n_epochs-1, n_steps, False) #reduced epochs to match accuracy of both
bber_data_bob, bber_data_eve = Test_AE(data_oh_normal) # Taking test data for comparison
kmeans = init_kmeans(M,M_sec,n) # Initlizing kmeans for the security procedure
train_Secure(kmeans.labels_, n_epochs-3, n_steps, 0.3, False)
train_Bob(n_epochs-2, n_steps, False, True)
train_Eve(n_epochs-3, n_steps, False)
# test msg sequence for secure encoding
N_test_sec = 150000
test_msg_sec = np.random.randint(M_sec, size=N_test_sec)
print('Mapping real symbols onto secure symbols')
coded_msg, code_matrix = satellite_labels(kmeans.labels_, test_msg_sec,
M_sec, N_test_sec)
one_hot_encoder_sec = OneHotEncoder(sparse=False, categories=[range(M)])
data_oh_sec = one_hot_encoder_sec.fit_transform(coded_msg.reshape(-1,1))
print("Testing the secure symbols")
bber_sec_bob, bber_sec_eve = Test_secure_AE(data_oh_sec, code_matrix, test_msg_sec)
# +
fig = plt.figure(figsize=(8, 5))
plt.semilogy(bber_data_bob[0], bber_data_bob[1], 'o-')
plt.semilogy(bber_data_eve[0], bber_data_eve[1], 's-')
plt.semilogy(bber_sec_bob[0], bber_sec_bob[1], '^-');
plt.semilogy(bber_sec_eve[0], bber_sec_eve[1], '^-');
plt.gca().set_ylim(1e-5, 1)
plt.gca().set_xlim(0, 15)
plt.tick_params(axis='x', colors='white')
plt.tick_params(axis='y', colors='white')
plt.ylabel("Batch Symbol Error Rate", fontsize=14, rotation=90, color='white')
plt.xlabel("SNR [dB]", fontsize=18, color='white')
plt.legend(['AE Bob', 'AE Eve', 'Secure AE Bob', 'Secure AE Eve'],
prop={'size': 14}, loc='upper right');
plt.grid(True, which="both")
# -
| Wiretap_ICC2019_TF2_(1h).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Peak hell
#
# [](https://github.com/Dragon1573/PyChallenge-Tips/blob/master/LICENSE)
# [](http://www.pythonchallenge.com/pc/def/peak.html)
#
# <img src="../../resources/imgs/Quiz5-1.png" />
#
#   `Peak hell`音似`pickle`,为Python中重要的二进制数据(反)序列化工具。查看源代码,发现数据包文件`banner.p`。
#
# ```html
# <html>
# <head>
# <title>peak hell</title>
# <link rel="stylesheet" type="text/css" href="../style.css" />
# </head>
# <body>
# <center>
# <img src="peakhell.jpg" />
# <br />
# <font color="#c0c0ff">pronounce it</font>
# <br />
# <peakhell src="banner.p" />
# </center>
# </body>
# </html>
# <!-- peak hell sounds familiar? -->
# ```
#
#   爬取数据包并使用`pickle`反序列化。
from requests import get
from pickle import loads
# 爬取数据包
response = get("http://www.pythonchallenge.com/pc/def/banner.p")
# 载入
data = loads(response.content)
# 绘制字符画
for line in data:
for item in line:
print(item[0] * item[1], sep='', end='')
print()
#   观察字符画,可以看出单词`channel`,即下一关的链接为:<http://www.pythonchallenge.com/pc/def/channel.html>。
| src/Part1/Quiz5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="vjUkRN_bafUt"
# # Задание 3.2 - сверточные нейронные сети (CNNs) в PyTorch
#
# Это упражнение мы буде выполнять в Google Colab - https://colab.research.google.com/
# Google Colab позволяет запускать код в notebook в облаке Google, где можно воспользоваться бесплатным GPU!
#
# Авторы курса благодарят компанию Google и надеятся, что праздник не закончится.
#
# Туториал по настройке Google Colab:
# https://medium.com/deep-learning-turkey/google-colab-free-gpu-tutorial-e113627b9f5d
# (Keras инсталлировать не нужно, наш notebook сам установит PyTorch)
#
# + colab={"base_uri": "https://localhost:8080/"} id="FcXBeP1O7cnY" executionInfo={"status": "ok", "timestamp": 1617254824831, "user_tz": -180, "elapsed": 3132, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="83462a39-ae1e-498c-94c5-50bd6dd0be09"
# Intstall PyTorch and download data
# !pip3 install torch torchvision
# !wget -c http://ufldl.stanford.edu/housenumbers/train_32x32.mat http://ufldl.stanford.edu/housenumbers/test_32x32.mat
# + id="-afwWw-Q85vD"
from collections import namedtuple
import matplotlib.pyplot as plt
import numpy as np
import PIL
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as dset
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision import transforms
# + id="NNU-OD9O9ltP"
device = torch.device("cuda:0") # Let's make sure GPU is available!
# + [markdown] id="1AGDqVA8afVA"
# # Загружаем данные
# + id="YAvkoRx-9FsP"
# First, lets load the dataset
data_train = dset.SVHN('./',
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.43,0.44,0.47],
std=[0.20,0.20,0.20])
]
)
)
data_test = dset.SVHN('./',
split='test',
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.43,0.44,0.47],
std=[0.20,0.20,0.20])
]
)
)
# + [markdown] id="uI7z9wJAafVC"
# Разделяем данные на training и validation.
#
# На всякий случай для подробностей - https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
# + id="YRnr8CPg7Hli"
batch_size = 64
data_size = data_train.data.shape[0]
validation_split = .2
split = int(np.floor(validation_split * data_size))
indices = list(range(data_size))
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(data_train,
batch_size=batch_size,
sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(data_train,
batch_size=batch_size,
sampler=val_sampler)
# + id="LyYvt-T67PBG"
# We'll use a special helper module to shape it into a flat tensor
class Flattener(nn.Module):
def forward(self, x):
batch_size, *_ = x.shape
return x.view(batch_size, -1)
# + [markdown] id="V8Ot7cK3afVE"
# Создадим простейшую сеть с новыми слоями:
# Convolutional - `nn.Conv2d`
# MaxPool - `nn.MaxPool2d`
# + id="w9SFVGZP7SQd"
nn_model = nn.Sequential(
nn.Conv2d(3, 64, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(4),
nn.Conv2d(64, 64, 3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(4),
Flattener(),
nn.Linear(64*2*2, 10),
)
nn_model.type(torch.cuda.FloatTensor)
nn_model.to(device)
loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor)
optimizer = optim.SGD(nn_model.parameters(), lr=1e-1, weight_decay=1e-4)
# + [markdown] id="sYmsDB3bafVH"
# Восстановите функцию `compute_accuracy` из прошлого задания.
# Единственное отличие в новом - она должна передать данные на GPU прежде чем прогонять через модель. Сделайте это так же, как это делает функция `train_model`
# + colab={"base_uri": "https://localhost:8080/"} id="2ek3KVQK7hJ6" executionInfo={"status": "ok", "timestamp": 1617256636512, "user_tz": -180, "elapsed": 68044, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="b36d30a8-312d-40fb-d7de-92cfc36d26b3"
def train_model(model, train_loader, val_loader, loss, optimizer, num_epochs, scheduler=None):
loss_history = []
train_history = []
val_history = []
for epoch in range(num_epochs):
model.train() # Enter train mode
loss_accum = 0
correct_samples = 0
total_samples = 0
for i_step, (x, y) in enumerate(train_loader):
x_gpu = x.to(device)
y_gpu = y.to(device)
prediction = model(x_gpu)
loss_value = loss(prediction, y_gpu)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
_, indices = torch.max(prediction, 1)
correct_samples += torch.sum(indices == y_gpu)
total_samples += y.shape[0]
loss_accum += loss_value
if scheduler is not None:
scheduler.step()
ave_loss = loss_accum / i_step
train_accuracy = float(correct_samples) / total_samples
val_accuracy = compute_accuracy(model, val_loader)
loss_history.append(float(ave_loss))
train_history.append(train_accuracy)
val_history.append(val_accuracy)
print("Average loss: %f, Train accuracy: %f, Val accuracy: %f" % (ave_loss, train_accuracy, val_accuracy))
return loss_history, train_history, val_history
def compute_accuracy(model, loader):
"""
Computes accuracy on the dataset wrapped in a loader
Returns: accuracy as a float value between 0 and 1
"""
model.eval() # Evaluation mode
# TODO: Copy implementation from previous assignment
# Don't forget to move the data to device before running it through the model!
correct_samples = 0
total_samples = 0
for x, y in loader:
x_gpu = x.to(device)
y_gpu = y.to(device)
pred = model(x_gpu)
_, indices = torch.max(pred, 1)
correct_samples += torch.sum(indices == y_gpu)
total_samples += y_gpu.shape[0]
val_accuracy = float(correct_samples) / total_samples
return val_accuracy
loss_history, train_history, val_history = train_model(nn_model, train_loader, val_loader, loss, optimizer, 5)
# + [markdown] id="6a-3a1ZFGEw_"
# # Аугментация данных (Data augmentation)
#
# В работе с изображениями одним из особенно важных методов является аугментация данных - то есть, генерация дополнительных данных для тренировки на основе изначальных.
# Таким образом, мы получаем возможность "увеличить" набор данных для тренировки, что ведет к лучшей работе сети.
# Важно, чтобы аугментированные данные были похожи на те, которые могут встретиться в реальной жизни, иначе польза от аугментаций уменьшается и может ухудшить работу сети.
#
# С PyTorch идут несколько таких алгоритмов, называемых `transforms`. Более подробно про них можно прочитать тут -
# https://pytorch.org/tutorials/beginner/data_loading_tutorial.html#transforms
#
# Ниже мы используем следующие алгоритмы генерации:
# - ColorJitter - случайное изменение цвета
# - RandomHorizontalFlip - горизонтальное отражение с вероятностью 50%
# - RandomVerticalFlip - вертикальное отражение с вероятностью 50%
# - RandomRotation - случайный поворот
# + colab={"base_uri": "https://localhost:8080/"} id="jCWMUWmr7t5g" executionInfo={"status": "ok", "timestamp": 1617256674171, "user_tz": -180, "elapsed": 2781, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="b8ab96a7-85e5-4e5b-d671-05a2845d1580"
tfs = transforms.Compose([
transforms.ColorJitter(hue=.50, saturation=.50),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(50, resample=PIL.Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize(mean=[0.43,0.44,0.47],
std=[0.20,0.20,0.20])
])
# Create augmented train dataset
data_aug_train = dset.SVHN('./', transform=tfs)
train_aug_loader = torch.utils.data.DataLoader(data_aug_train,
batch_size=batch_size,
sampler=train_sampler)
# + [markdown] id="CIDFTzHoafVM"
# Визуализируем результаты агментации (вообще, смотреть на сгенерированные данные всегда очень полезно).
# + colab={"base_uri": "https://localhost:8080/", "height": 207} id="YlJJEro1KZ45" executionInfo={"status": "ok", "timestamp": 1617256682495, "user_tz": -180, "elapsed": 3544, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="871de9e5-ee91-4f2c-a1c6-c8c47671e6c5"
# TODO: Visualize some augmented images!
# hint: you can create new datasets and loaders to accomplish this
# Based on the visualizations, should we keep all the augmentations?
tfs = transforms.Compose([
transforms.ColorJitter(hue=.20, saturation=.20),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.RandomRotation(10, resample=PIL.Image.BILINEAR),
])
data_aug_vis = dset.SVHN('./', transform=tfs)
plt.figure(figsize=(30, 3))
for i, (x, y) in enumerate(data_aug_vis):
if i == 10:
break
plt.subplot(1, 10, i+1)
plt.grid(False)
plt.imshow(x)
plt.axis('off')
# + [markdown] id="o2LrmsYHoguB"
# Все ли агментации одинаково полезны на этом наборе данных? Могут ли быть среди них те, которые собьют модель с толку?
#
# Выберите из них только корректные
# + colab={"base_uri": "https://localhost:8080/"} id="evro9ksXGs9u" executionInfo={"status": "ok", "timestamp": 1617256728623, "user_tz": -180, "elapsed": 2666, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="c93d8d43-96bc-4ce6-fe3e-2d54e7553420"
# TODO:
tfs = transforms.Compose([
# TODO: Add good augmentations
transforms.ColorJitter(hue=.20, saturation=.20),
transforms.RandomRotation(5, resample=PIL.Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize(mean=[0.43,0.44,0.47],
std=[0.20,0.20,0.20])
])
data_aug_train = dset.SVHN('./', transform=tfs)
# TODO create new instances of loaders with the augmentations you chose
train_aug_loader = torch.utils.data.DataLoader(data_aug_train,
batch_size=batch_size,
sampler=train_sampler)
# + colab={"base_uri": "https://localhost:8080/"} id="PeO6Zw0DHqPR" executionInfo={"status": "ok", "timestamp": 1617256952371, "user_tz": -180, "elapsed": 216713, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="78659c90-3e56-4a39-c25b-7fe9951b51a9"
# Finally, let's train with augmentations!
# Note we shouldn't use augmentations on validation
loss_history, train_history, val_history = train_model(nn_model, train_aug_loader, val_loader, loss, optimizer, 5)
# + [markdown] id="r0bcioK6JBDK"
# # LeNet
# Попробуем имплементировать классическую архитектуру сверточной нейронной сети, предложенную Яном ЛеКуном в 1998 году. В свое время она достигла впечатляющих результатов на MNIST, посмотрим как она справится с SVHN?
# Она описана в статье ["Gradient Based Learning Applied to Document Recognition"](http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf), попробуйте прочитать ключевые части и имплементировать предложенную архитетуру на PyTorch.
#
# Реализовывать слои и функцию ошибки LeNet, которых нет в PyTorch, **не нужно** - просто возьмите их размеры и переведите в уже известные нам Convolutional, Pooling и Fully Connected layers.
#
# Если в статье не очень понятно, можно просто погуглить LeNet и разобраться в деталях :)
# + id="ieEzZUglJAUB"
# TODO: Implement LeNet-like architecture for SVHN task
lenet_model = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=6, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.Tanh(),
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.Tanh(),
Flattener(),
nn.Linear(in_features=16 * 5 * 5, out_features=120),
nn.Tanh(),
nn.Linear(in_features=120, out_features=84),
nn.Tanh(),
nn.Linear(in_features=84, out_features=10)
)
lenet_model.type(torch.cuda.FloatTensor)
lenet_model.to(device)
loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor)
optimizer = optim.SGD(lenet_model.parameters(), lr=1e-1, weight_decay=1e-4)
# + colab={"base_uri": "https://localhost:8080/"} id="WMmaPfdeKk9H" executionInfo={"status": "ok", "timestamp": 1617259027003, "user_tz": -180, "elapsed": 434085, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="86dc6600-3d31-473d-aadf-b83172f893a0"
# Let's train it!
loss_history, train_history, val_history = train_model(lenet_model, train_aug_loader, val_loader, loss, optimizer, 10)
# + [markdown] id="u_O9qiYySvuj"
# # Подбор гиперпараметров
# + colab={"base_uri": "https://localhost:8080/"} id="i6mhfdQ9K-N3" executionInfo={"status": "ok", "timestamp": 1617265598378, "user_tz": -180, "elapsed": 3059411, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="4810f502-8354-4d94-dd7c-fc95adc05e75"
# The key hyperparameters we're going to tune are learning speed, annealing rate and regularization
# We also encourage you to try different optimizers as well
Hyperparams = namedtuple("Hyperparams", ['learning_rate', 'reg', 'optimizer'])
RunResult = namedtuple("RunResult", ['model', 'train_history', 'val_history', 'final_val_accuracy'])
np.random.seed(42)
learning_rates = [1e-1, 1e-2, 10 ** -2.5]
anneal_coeff = 0.2
anneal_epoch = 2
regs = [1e-4]
optimizers = [optim.SGD, optim.Adam]
epoch_num = 10
train_aug_loader = torch.utils.data.DataLoader(data_aug_train,
batch_size=16,
sampler=train_sampler)
# Record all the runs here
# Key should be Hyperparams and values should be RunResult
run_record = {}
# Use grid search or random search and record all runs in run_record dictionnary
# Important: perform search in logarithmic space!
# TODO: Your code here!
from itertools import product
best_hyperparams = Hyperparams(None, None, None)
best_result = RunResult(None, None, None, None)
for lr, reg, optimizer in product(learning_rates, regs, optimizers):
lenet_model = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=6, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.Tanh(),
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=(5, 5)),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.Tanh(),
Flattener(),
nn.Linear(in_features=16 * 5 * 5, out_features=120),
nn.Tanh(),
nn.Linear(in_features=120, out_features=84),
nn.Tanh(),
nn.Linear(in_features=84, out_features=10)
)
lenet_model.type(torch.cuda.FloatTensor)
lenet_model.to(device)
loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor)
optimizer = optimizer(lenet_model.parameters(), lr=lr, weight_decay=reg)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=anneal_epoch, gamma=anneal_coeff)
params = Hyperparams(lr, reg, optimizer)
print(f"\nCurrent hyperparams: {params}")
loss_history, train_history, val_history = train_model(lenet_model, train_aug_loader, val_loader, loss, optimizer, epoch_num, scheduler)
result = RunResult(lenet_model, train_history, val_history, val_history[-1])
run_record[params] = result
if best_result.final_val_accuracy is None or best_result.final_val_accuracy < result.final_val_accuracy:
best_result = result
best_hyperparams = params
print("\nCurrent best validation accuracy: %4.2f, best hyperparams: %s" % (best_result.final_val_accuracy, best_hyperparams))
# + colab={"base_uri": "https://localhost:8080/"} id="Y6xExdw8JB1l" executionInfo={"status": "ok", "timestamp": 1617266128714, "user_tz": -180, "elapsed": 667, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="56b9794f-a347-4450-a002-3d588704a99d"
best_val_accuracy = None
best_hyperparams = None
best_run = None
for hyperparams, run_result in run_record.items():
if best_val_accuracy is None or best_val_accuracy < run_result.final_val_accuracy:
best_val_accuracy = run_result.final_val_accuracy
best_hyperparams = hyperparams
best_run = run_result
print("Best validation accuracy: %4.2f, best hyperparams: %s" % (best_val_accuracy, best_hyperparams))
# + [markdown] id="LOmsR0uVgtgf"
# # Свободное упражнение - догоним и перегоним LeNet!
#
# Попробуйте найти архитектуру и настройки тренировки, чтобы выступить лучше наших бейзлайнов.
#
# Что можно и нужно попробовать:
# - BatchNormalization (для convolution layers он в PyTorch называется [batchnorm2d](https://pytorch.org/docs/stable/nn.html#batchnorm2d))
# - Изменить количество слоев и их толщину
# - Изменять количество эпох тренировки
# - Попробовать и другие агментации
# + colab={"base_uri": "https://localhost:8080/"} id="tSVhD747icoc" executionInfo={"status": "ok", "timestamp": 1617272581387, "user_tz": -180, "elapsed": 861723, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="a5414186-4890-495d-de99-f99809eacce4"
Hyperparams = namedtuple("Hyperparams", ['learning_rate', 'reg'])
RunResult = namedtuple("RunResult", ['model', 'train_history', 'val_history', 'final_val_accuracy'])
learning_rates = [1e-1, 1e-3]
regs = [1e-4, 1e-5]
epoch_num = 10
train_aug_loader = torch.utils.data.DataLoader(data_aug_train,
batch_size=16,
sampler=train_sampler)
# Record all the runs here
# Key should be Hyperparams and values should be RunResult
run_record = {}
# Use grid search or random search and record all runs in run_record dictionnary
# Important: perform search in logarithmic space!
# TODO: Your code here!
from itertools import product
best_hyperparams = Hyperparams(None, None)
best_result = RunResult(None, None, None, None)
for lr, reg in product(learning_rates, regs):
lenet_model = nn.Sequential(
nn.Conv2d(3, 256, kernel_size=(3, 3)),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 1024, kernel_size=(3, 3)),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True),
nn.Conv2d(1024, 1024, kernel_size=(3, 3)),
nn.MaxPool2d(kernel_size=(2, 2), stride=2),
nn.BatchNorm2d(1024),
nn.ReLU(inplace=True),
Flattener(),
nn.Linear(1024 * 2 * 2, 512),
nn.BatchNorm1d(512),
nn.ReLU(inplace=True),
nn.Linear(512, 64),
nn.BatchNorm1d(64),
nn.ReLU(inplace=True),
nn.Linear(64, 10)
)
lenet_model.type(torch.cuda.FloatTensor)
lenet_model.to(device)
loss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor)
optimizer = optim.Adam(lenet_model.parameters(), lr=lr, weight_decay=reg)
scheduler = optim.lr_scheduler.CyclicLR(optimizer, base_lr=1e-4, max_lr=1e-3, cycle_momentum=False)
params = Hyperparams(lr, reg)
print(f"\nCurrent hyperparams: {params}")
loss_history, train_history, val_history = train_model(lenet_model, train_aug_loader, val_loader, loss, optimizer, epoch_num, scheduler)
result = RunResult(lenet_model, train_history, val_history, val_history[-1])
run_record[params] = result
if best_result.final_val_accuracy is None or best_result.final_val_accuracy < result.final_val_accuracy:
best_result = result
best_hyperparams = params
print("\nCurrent best validation accuracy: %4.2f, best hyperparams: %s" % (best_result.final_val_accuracy, best_hyperparams))
# + [markdown] id="ubeKgBcnhx7N"
# # Финальный аккорд - проверим лучшую модель на test set
#
# В качестве разнообразия - напишите код для прогона модели на test set вы.
#
# В результате вы должны натренировать модель, которая покажет более **90%** точности на test set.
# Как водится, лучший результат в группе получит дополнительные баллы!
# + id="EIqM1kdeh-hd" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1617272958021, "user_tz": -180, "elapsed": 6606, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjXg5AsWqqMzk55ocOzS8i9dXkhfvkdS__BBrgB=s64", "userId": "06299317524033231726"}} outputId="f5722f54-c95b-484b-8b18-162c73241bb5"
best_model = best_result.model
# TODO Write the code to compute accuracy on test set
test_loader = torch.utils.data.DataLoader(data_test, batch_size=batch_size)
final_test_accuracy = compute_accuracy(best_model, test_loader)
print("Final test accuracy - ", final_test_accuracy)
# + id="BfH6qip6kVX_"
| assignments/assignment3/PyTorch_CNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import skimage
from skimage import color, data, io
from skimage.filters import roberts, sobel
# -
# Original image:
#
# https://www.pexels.com/photo/architecture-buildings-business-city-281502/
# +
skyline_color = skimage.io.imread('./images/pexels-skyline.jpg')
skyline = color.rgb2gray(skyline_color)
plt.figure(figsize=(6, 6))
plt.imshow(skyline, cmap='gray')
# -
# ## Roberts Cross Edge Detector
# Calculates Edge magnitude using Roberts' cross operator
# * The Roberts Cross operator performs a simple, quick to compute, 2-D spatial gradient measurement on an image.
# * It thus highlights regions of high spatial frequency which often correspond to edges.
# * Pixel values at each point in the output represent the estimated absolute magnitude of the spatial gradient of the input image at that point.
# * <b>The main reason for using the Roberts Cross operator</b> is that it is <b>very quick to compute</b>.
# * <b>Only four input pixels</b> need to be examined to determine the value of each output pixel.
# * <b>The main disadvantage of Robert cross operator is small kernel which is very sensitive to noise.</b>
#
# +
skyline_edge_roberts = roberts(skyline)
plt.figure(figsize=(8,8))
plt.imshow(skyline_edge_roberts, cmap='gray')
# -
# ## Sobel Edge Detector
# Calculates Edge magnitude using Sobels' cross operator
# * <b>The Sobel Operator</b> is very similar to Roberts Cross operator but the difference is it uses a pair of <b>3x3 convolution kernel</b> whereas <b>Robertes operator uses a 2x2 convolution kernel.</b>
# * <b>It is slower than the Roberts Cross operator in computation, but its larger convolution kernel smooths the input image</b>
# * Thus, It is less sensitive to noise
# * The Sobel operator also generally produces considerably <b>higher output values for similar edges</b>, compared with the Roberts Cross.
# +
skyline_edge_sobel = sobel(skyline)
plt.figure(figsize=(8,8))
plt.imshow(skyline_edge_sobel, cmap='gray')
# -
| backup/11.sklearn5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # What is this about?
#
# Hi! Within this notebook I started to teach myself the theory and practice of Bayesian inference and MCMC sampling by means of a toy problem. In case you stumble across an error or other ways to improve this notebook (not unlikely!), please let know. Thanks! :-)
#
# A big thanks goes out to <NAME> for excellent discussions and offering expertise on this topic!
# #### Before we dive in...
# ... please execute the two cells below. It will install some packages from conda and load modules and classes relevant to this notebook. For implementation details, have a look at fourier_series and MH_sampler in mcmc_sampler.py
# !conda install --yes -c conda-forge matplotlib numpy scipy dask tqdm
# +
import matplotlib.pyplot as plt
import numpy as np
import dask
from dask.distributed import Client
from mcmc_sampler import fourier_series, MH_sampler
### You may change n_workers to whatever number of
### processors you'd like to use here
client = Client(threads_per_worker=1, n_workers=5)
# -
# # Sampling parameters using Bayesian inference
#
# ## 1.) A toy problem: A Fourier series of unknown length
# Fourier series (a cosine series) are used by most molecular machanics force fields in order to model torsion potentials. The general form of a torsion potential includes force constants $V_n$ (i.e. an amplitude in this context) and the phase angles $\delta_n$ as paramters:
#
# \begin{equation}\label{eq:TorsionSeries}
# V_{tors}(\phi) = \sum_{n} {1 \over 2 } V_n (1 + cos(n\phi + \delta_n))
# \end{equation}
#
# For a given torsion around a bond B-C, the number of terms in this series depends on the symmetry of the molecular fragment A-B-C-D. Also, it is possible that some terms have a $V_n$ of zero. In the context of this notebook, we will use a simplified version of the equation above and just assume a single set of parameters $ \theta_N: \{a_1, a_2, ..., a_N\}$:
#
# \begin{equation}\label{eq:CosineSeries}
# f(x)_{N} = \sum_{n}^{N} a_n cos(nx)
# \end{equation}
#
# We will assume that there is some actual (true) process that generates data, which we can observe, following eq. \ref{eq:CosineSeries}. However, we do not know how exactly this process looks like (i.e. we do not know the precise parameters $a_n$ in eq. \ref{eq:CosineSeries}), we just know that it must be a process following eq. \ref{eq:CosineSeries}. The objective of this notebook is the exploration of ways to use Bayesian statistics to find out about the nature of the true process. That means, we want to know the parameters $\theta_N$ and $N$ in eq. \ref{eq:CosineSeries} of the true process. However, we will only be able to obtain a limited number of samples (observations) from that true process. To make things worse, we cannot observe these samples without noise and we do not know how large this noise is. Since the data is sparse and also noisy, it is much more useful to know the distribution of parameters $\theta_N$ and $N$ given the data generated from the true process, rather than estimating the precise parameter values. This setting is ideal for Bayesian statistics.
# ## 2.) Using Bayesian Statistics for Model Inference
# In Bayesian inference, we are searching for the posterior distribution of the parameters, $p(\theta_N|Y)$, i.e. we are interested in the parameter distribution that generated the observed data $Y$. It is important to realize that we are not interested in the true parameter values $\hat{\theta}_N$, but really the distribution of these parameter values under the observed data. Since we are also interested in quantifying the noise contained in the true process, we want to include an unknown noise parameter into our considerations. We will assume that the true process contains some noise following a Gaussian distribution, so that $y_i = f(x_i) + \epsilon$, where $\epsilon \sim \mathcal{N}(0,\sigma^2)$, thus we will include $\sigma^2$ as an unknown parameter.
#
# The posterior distribution for a given model $\mathcal{M}_k: f(x)_k$, with $k>0$ is defined as follows:
#
# \begin{equation}\label{eq:BayesEquation}
# p(\theta_k, \sigma^2|Y) = {{p(Y|\theta_k,\sigma^2) p(\theta_k) p(\sigma^2)} \over p(Y)} \propto p(Y|\theta_k,\sigma^2) p(\theta_k) p(\sigma^2)
# \end{equation}
#
# - $p(Y)$: Evidence
# - $p(Y|\theta_k)$: Likelihood
# - $p(\theta_k)$: Prior on $\theta_k$
# - $p(\sigma^2)$: Prior on $\sigma^2$
# - $p(\theta_k, \sigma^2|Y)$: Posterior
#
# ### Evidence
# The evidence $p(Y)$ can be treated as a normalizing constant as long as one is only interested in the posterior distribution of a particular model $\theta_k$ (i.e. with fixed $N$). In that case, the posterior distribution is only known up to this normalizing constant, which is fine. However, if one wants to compare different models ${\mathcal{M}_k}$ (and we want to do that at a later point!), it will become important to not ignore that normalizing constant.
#
# ### Likelihood
# The likelihood is a measure for the probability that the given data can be observed under the model parameters $\theta_k$ (it is not a likelihood though!). We will assume that each observed realization $Y_i$ is drawn from a Gaussian distribution (see above). However, we do not know the exact parameters of that distribution. Our task is to find these parameters, $(\mu_i, \sigma^2)$ during the Bayesian inference. The parameters $(\mu_i, \sigma^2)$ will be informed by our choice of $\theta_k$, i.e.
#
# \begin{equation}\label{eq:LikelihoodMu}
# \mu_i(\theta_k) = f(x_i)_{\theta_k}
# \end{equation}
#
# \begin{equation}\label{eq:LikelihoodSigma}
# Y_i \sim \mathcal{N}(\mu_i,\sigma^2)
# \end{equation}
#
# \begin{equation}\label{eq:Likelihood}
# p(Y|\theta,\sigma) = \prod_i {1 \over \sqrt{2 \pi \sigma^2}} exp\big( { -(Y_i-\mu_i)^2 \over 2 \sigma^2}\big)
# \end{equation}
#
# ### The prior
#
# The prior probabilities $p(\theta_k)$ and $p(\sigma^2)$ reflect our prior knowledge of the parameter vector $\theta_k$ and the noise parameter $\sigma^2$. In case we already know that the parameter is likely to be in specific range or if certain parameter values are not resonable (e.g. in a physics-based model) then the prior probability can reflect that knowledge. Other than that, one can also be rather objective and assign equal prior probility to all realizations of $\theta_k$. The latter is exactly what we will do in the following. We assign non-zero probability to all parameters on the interval $[b,c]$ and zero probability otherwise:
#
# \[
# p(a_i) =
# \begin{cases}
# (c-b)^{-1},& \text{if } b < a_i < c \\
# 0, & \text{otherwise}
# \end{cases}
# \]
#
# \begin{equation}\label{eq:PriorCombined}
# p(\theta_k) = \prod_i^k p(a_i)
# \end{equation}
#
#
# The prior over the noise parameter will be an inverse gamma distribution, i.e. $\sigma \sim \mathcal{IG}(\alpha,\beta)$, where $\beta=1$ and $\alpha$ is a hyperparameter that we have to specify later:
#
# \begin{equation}\label{eq:InverseGamma}
# p_\alpha(\sigma) = {1 \over \Gamma(\alpha)} \sigma ^{-\alpha-1} exp\big(-{1 \over \sigma}\big)
# \end{equation}
#
# ### The posterior
#
# The posterior distribution, see eq. \ref{eq:BayesEquation}, in most cases cannot be solved analytically and must be sampled using special algorithms. That means, instead of computing the posterior directly, we will draw samples from the posterior distribution, which should converge to the true posterior distribution if we use an infinite amount of sampling. The sampling algorithm that we are going to use is called Metropolis-Hastings and is probably one of the most common algorithms from the Markov Chain Monte Carlo family. It will be explained in the following section.
# ## 3.) Metropolis-Hastings Algorithm
#
# The original aim of this algorithm is to construct a Markov Chain that has the same distribution as the desired target distribution (in our case the posterior probability distribution). A Markov Chain describes a "chain" (i.e. a sequence) of states (i.e. realizations of the random variable, here $\theta$), in which the probability to realize the current state only depends on the previous state. This is called the Markov property. Assuming that the states are living in a finite state space, the Markov property is (we will stick to $\theta$ being our random variable):
#
# \begin{equation}\label{eq:MarkovProperty}
# p(\theta^{(t+1)} = \theta' | \theta^{(t)} = \theta, ... , \theta^{(0)} = z ) =
# p(\theta^{(t+1)} = \theta' | \theta^{(t)} = \theta )
# \end{equation}
#
# Here, $\theta^{(t+1)}$ is the current state of the Markov Chain and $\theta^{(0)}, \theta^{(1)}, ... , \theta^{(t)}$ are all previous states. Another way of looking at the Markov property is to realize that these are transition functions, that is
#
# \begin{equation}\label{eq:TransitionFunction}
# A(\theta, \theta') = p(\theta^{(t+1)} = \theta' | \theta^{(t)} = \theta )
# \end{equation}
#
# If we now assume that the Markov Chain is reversible (i.e. if we get the same result irrespective of running it backwards or forwards), we can apply the detailed balance condition:
#
# \begin{equation}\label{eq:DetailedBalance}
# p(\theta|Y) A(\theta, \theta') = p(\theta'|Y) A(\theta', \theta)
# \end{equation}
#
# Integrating out $\theta$ results in
#
# \begin{equation}\label{eq:DetailedBalanceIntegrated}
# \int d\theta p(\theta|Y) A(\theta, \theta') = p(\theta'|Y)
# \end{equation}
#
# Eq. \ref{eq:DetailedBalanceIntegrated} is an important result, since it shows that sampling from the target distribution $p(\theta'|Y)$ is invariant with respect to the transition function $A(\cdot ,y)$ as long as we ensure that we are markovian and reversible! This is where the Metropolis-Hasting Algorithm comes into the game. Metropolis suggested to use the following algorithm (again we use $\theta$ as our random variable):
#
# The Markov Chain is initialized at state $\theta^{(t)} = \theta^{(0)}$
# \begin{enumerate}
# \item Propose a new state $\theta'$ from a proposal distribution $q(\theta^{(t)}, \theta')$
# \item Evaluate the posterior distribution (the target distribution), $p(\theta',Y)$ at the proposed state.
# Then, calculate the acceptance probability $\alpha(\theta^{(t)}, \theta')$ for this transition:
#
# \begin{equation}\label{eq:AcceptanceProbability}
# \alpha(\theta^{(t)}, \theta') = min\Big(1, {p(\theta'|Y) q(\theta',\theta^{(t)})
# \over p(\theta^{(t)}|Y) q(\theta^{(t)},\theta')} \Big)
# \end{equation}
#
# Draw a random number from a uniform distribution $u \propto unif(0,1)$. Update the state $t+1$ of the chain according to
#
# \[
# \theta^{(t+1)} =
# \begin{cases}
# \theta',& \text{if } u \leq \alpha(\theta^{(t)}, \theta')\\
# \theta^{(t)}, & \text{otherwise}
# \end{cases}
# \]
#
# \end{enumerate}
#
# The acceptance probability $\alpha$ (see eq. \ref{eq:AcceptanceProbability}) in the MH algorithm is related to the Markov property (see eq. \ref{eq:DetailedBalance}) through the relation $\alpha(\theta^{(t)}, \theta') = { A(\theta, \theta') \over q(\theta, \theta')}$. Thus, one can rewrite the acceptance probability as
#
# \begin{equation}\label{eq:AcceptanceProbability_Markov}
# p(\theta|Y) A(\theta, \theta') = min \Big( p(\theta|Y) q(\theta, \theta'), p(\theta'|Y) q(\theta', \theta) \Big)
# \end{equation}
#
# This algorithm allows Markovian sampling using a Markov Chain Monte Carlo strategy (MCMC). This particular algorithm is maybe not the most efficient one, but it is (A) easy to implement and (B) sufficiently efficient for low dimensional problems like the ones we are dealing with in this example.
#
# ### Reading
#
# * "Monte Carlo Strategies in Scientific Computing", <NAME>
# ## 4.) Let's Generate Some Data!
#
# Next, we will generate data $Y$ from a process that follows eq. \ref{eq:CosineSeries} and has three non-zero terms, $a_1, a_2, a_3$. Also this process contains noise from a zero-mean Gaussian distribution. The parameters are
#
# $a_1 = 5.10, a_2 = 0.13, a_3 = 3.24, \sigma^2 = 0.3$
# +
### Parameters
p = np.array([5.10, 0.13, 3.24])
### You may either reuse these x,y values, which should get you close
### to the outcome of my calculations or just generate your own data.
x = np.array([-3.14159265, -2.74159265, -2.34159265, -1.94159265, -1.54159265, -1.14159265,
-0.74159265, -0.34159265, 0.05840735, 0.45840735, 0.85840735, 1.25840735,
1.65840735, 2.05840735, 2.45840735, 2.85840735])
y = np.array([-8.92247276, -6.19605699, -0.7293233, 1.37097261, -0.65165646, -0.68584825,
1.95369845, 6.46307948, 8.36168418, 5.47697352, -0.07383572, -1.2556803,
0.19878288, 1.5263483, -2.68581297, -6.69607447])
### If you want to generate your own data,
### uncomment the following lines. The generated
### data will contain a zero-mean noise term.
#x = np.arange(-np.pi,np.pi,0.4)
#y = fourier_series(x,p) + np.random.normal(0,np.sqrt(0.3),size=x.size)
plt.plot(x,y)
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# -
# ## 5.) Model Inference Using within-model MH Sampling
#
# In the first step of this notebook tutorial, we want to sample the parameter posterior distribution from each model $\mathcal{M}_k$ individually using the MH algorithm . Since we will sample $\theta_k$ for each model seperately and the individual models do not know about each other, we call this approach within-model sampling, i.e. we sample each set of parameters from its own distribution $\theta_k \sim p(\theta_k | k, Y)$.
#
# ### MC Propagation and Hyperparameters
#
# Some parameters must be set by us in advance. These hyperparameters are the step width $w$ and the shape parameter $\alpha$ of the inverse gamma distribution (see eq. \ref{eq:InverseGamma}). A new state $(\theta_k, \sigma^2)'$ is proposed by adding a random perturbation to the current state of the Markov Chain. This perturbation follows a zero-mean normal distribution with standard deviation $s$ (this is our "step width"). By using a normal distribution we also guarantee symmetric moves. Specificually for the vector components $a_i$
#
# \begin{equation}\label{eq:Propagation}
# a_i' = a_i^{(t)} + w_i
# \end{equation}
#
# \begin{equation*}
# w_i \sim \mathcal{N}(0, s)
# \end{equation*}
#
# And we will do the same thing for the proposal of the variance ${\sigma^2}'$, however we will propose on ${\sigma}'$ in order to always have ${\sigma^2}' > 0$. It is generally encouraged to play around with the these hyperparameters. A nice lesson is to vary the step width $s$ and see how the number of accepted moves is going to vary!
#
# ### Start the Sampling!
#
# In order to start the sampling we will need to guess some initial parameters for each model. Of course we don't set it to the correct parameters, instead we use the correct parameters and add considerable amount of noise. Ideally, one would repeat these runs with different initial parameter settings in order to confirm that they actually all converge to the same stationary distribution (remember the Markov Property eq. \ref{eq:MarkovProperty} ?). However, in our case we only carry out a single (but long) run for each model.
#
# Now execute the below code block in order start the sampling. This should take about 15 minutes on a desktop computer, if you initialized at least 5 dask workers (see code block in the very beginning of this notebook!). We need several sampling steps in order to get converged results for the calculation of the evidence at a later stage. A total of 100000 steps per run seems to be reliable here.
# +
### This is our step width 's'
step_width = 0.05
### This is our prior width for the inverse gamma distribution
sigma_width = 2.0
### This is our reference that defines the boundaries on
### parameter prior.
p_ref = np.array([0., 6])
### This is the initial value of the standard deviation.
### We will use the same value for each MH run
sigma0 = 0.1
### This is a list of initial parameter vectors. They are of
### varying length, going from 2 up to 6 parameters.
p0_list = [p[:2] + 1.0 * np.random.random(2),
p + 1.0 * np.random.random(p.shape),
np.concatenate((p + 1.0 * np.random.random(p.shape),[1.0])),
np.concatenate((p + 1.0 * np.random.random(p.shape),[1.0, 1.0])),
np.concatenate((p + 1.0 * np.random.random(p.shape),[1.0, 1.0, 1.0]))
]
### Let's loop over some models and see how that
### changes the Bayesian quantities and the parameters
lazy_results = list()
def worker(sampler, *args):
sampler.run(*args)
return sampler
for p0 in p0_list:
sampler = MH_sampler(step_width,
sigma_width,
p_ref,
x,
y,
fourier_series)
lazy_result = dask.delayed(worker)(sampler, p0, sigma0, 100000)
lazy_results.append(lazy_result)
sampler_list = list(dask.compute(*lazy_results))
# -
# ### Understanding the Results of the MH Sampling
#
# We started with parameters that were somewhat different from the ones of the true model (c.f. compare variables p0_list and p). Therefore we get low posterior probabilities in the beginning of the sampling. It is customary to discard these samples ('burn in') and only keep those that a drawn from the stationary posterior. In our case, we don't really have to do it, since we have lots of sampling, however we do it anyway since it is common practice. We will identify the first 2000 steps as the 'burn in' period, after this period the $-ln(posterior)$ over all MCMC steps (see below) seems to be stable (note the different scale of the individual y-axis).
fig_traj, ax_traj = plt.subplots(1,5, figsize=(20,5))
for i in range(5):
sampler = sampler_list[i]
accept_posterior = np.array(sampler.accept_posterior)[10:]
ax_traj[i].plot(-np.log(accept_posterior))
ax_traj[i].set_title(f"{i+2} parameters")
ax_traj[i].set_ylabel("-ln(posterior)")
ax_traj[i].set_xlabel("step")
fig_traj.tight_layout()
fig_traj.show()
# ### Parameter distribution
#
# Next, let's look at the distribution of the parameters (see below, the dashed lines indicate the true values). We find that the MH sampler recovers the true parameters. For $\mathcal{M}_2$ we find a broad distribution of both parameters $a_1$ and $a_2$, this is not surprising since it is too simple a model to be able to capture the true data. The somewhat broadend parameter distribution indicates that a large number of parameter realizations is possible given data, since all of these realizations result in poor posterior probability. The correct model, $\mathcal{M}_3$ on the other hand shows nicely distributed parameters, matching the true ones quite nicely. The other 3 models, $\mathcal{M}_4$, $\mathcal{M}_5$ and $\mathcal{M}_6$, show similar distributions but parameters $a_4$, $a_5$ and $a_6$ are sampled close to zero in order cope with the fact that the true model does not need those parameters.
#
# Also see the overview of all the models in the graphs of the second row below (the dashed line indicates the true model). One sees that no solution of $\mathcal{M}_2$ is actually coming close to the true model.
#
# The below codeblock should take about 10 minute to finish.
# +
burn_in = 2000
fig_parms, ax_parms = plt.subplots(2,5, figsize=(20,10))
for i in range(5):
sampler = sampler_list[i]
accept_theta = sampler.accept_theta[burn_in::10]
maxbins = 0.
for d in range(accept_theta.shape[1]):
n, bins, patches = ax_parms[0][i].hist(accept_theta[:,d], label=f"a{d+1}")
if np.max(n)>maxbins:
maxbins = np.max(n)
ax_parms[0][i].vlines(x=p[0], ymin=0, ymax=maxbins, linestyles="--")
ax_parms[0][i].vlines(x=p[1], ymin=0, ymax=maxbins, linestyles="--")
ax_parms[0][i].vlines(x=p[2], ymin=0, ymax=maxbins, linestyles="--")
ax_parms[0][i].set_title(f"{i+2} parameters")
ax_parms[0][i].set_ylabel("Counts")
ax_parms[0][i].set_xlabel("Parameter value")
ax_parms[0][i].legend()
for theta in accept_theta:
ax_parms[1][i].plot(x, fourier_series(x, theta), linewidth=0.5)
ax_parms[1][i].set_ylabel("y")
ax_parms[1][i].set_xlabel("x")
for i in range(5):
ax_parms[1][i].plot(x, fourier_series(x, p),
color="black", linestyle="--",
linewidth=4)
fig_parms.tight_layout()
fig_parms.show()
# -
# ### Model evidence
# The logical conclusion up until now would be (assuming we do not know the true model), that all but $\mathcal(M)_2$ are possible. If we apply the rule of Occam's razor, we would just assume that $\mathcal(M)_3$ is correct, since it is the most simplistic one among the remaining 4 models. However, the definition of simplistic is not quite objective and generally is not merely based on the number of parameters in a given model. In the next steps, we want to find out the true model and hopefully fulfilling Occam's at the same time using a less subjective approach. In order to do so, we want to recall the normalizing constant $p(Y)$ introduced in the beginning. This normalazing constant, also called evidence, is important in the context of model selection for a given model $\mathcal{M}_k$. It can actually be computed from quantities we already know, see
#
# \begin{equation}\label{eq:Evidence}
# p(Y) = p(Y|\mathcal{M}_k) = \int p(\theta_k|\mathcal{M}_k) p(Y|\theta_k, \mathcal{M}_k) d\theta_k
# \end{equation}
#
# We can use this quantity for estimating the model posterior, i.e. a quantification of the probability that a given model $\mathcal{M}_k$ is true given the data $Y$.
#
# \begin{equation}\label{eq:ModelPosterior}
# p(\mathcal{M}_k|Y) \propto p(Y|\mathcal{M}_k) p(\mathcal{M}_k)
# \end{equation}
#
# This is exactly what we need to make an argument about which model is more likely than another. However, in practice the above integral is hard to compute. One way to approximate it, is using a harmonic mean estimator over the $N$ likelihood samples from the within-model parameter posterior (which comes with problems of its own, but it is sufficient for us) and assuming a uniform model prior $p(\mathcal{M}_k)$. Then,
#
# \begin{equation}\label{eq:ModelPosteriorEstimate}
# \widetilde{p}(\mathcal{M}_k|Y) = \Bigg[ {1 \over N} \sum_i^N {1 \over P(Y|\theta_i,\mathcal{M}_k)} \Bigg]_{p(\theta | Y,\mathcal{M}_k)}^{-1}
# \end{equation}
#
# By setting the individual model evidence estimates $\widetilde{p}(\mathcal{M}_k|Y)$ for all $\mathcal{M}_k$ into relation of each other, one can quantify the support for a given model over another one given the data $Y$. These relations are called Bayes factors. The higher the ratio $\widetilde{p}(\mathcal{M}_i|Y) / \widetilde{p}(\mathcal{M}_j|Y)$, the more likely model $\mathcal{M}_i$ over $\mathcal{M}_j$. These Bayes factors are computed below as well as the individual model evidence estimates for each model (left graph). It is apparent that model $\mathcal{M}_3$ is the most likely one given the data. However, we would not get the same result if we would try to pick the model with the highest likelihood (see right graph). The maximum liklihood for each model would indicate that $\mathcal{M}_6$ is actually the best model!
#
# ### Reading
# * Weinberg, <NAME>. (2012). Computing the bayes factor from a markov chain monte carlo simulation of the posterior distribution. Bayesian Analysis, 7(3), 737–770.
# * http://alumni.media.mit.edu/~tpminka/statlearn/demo/
# * https://radfordneal.wordpress.com/2008/08/17/the-harmonic-mean-of-the-likelihood-worst-monte-carlo-method-ever/
# +
burn_in = 2000
likelihood_list = [np.array(sampler_list[0].accept_likelihood)[burn_in:],
np.array(sampler_list[1].accept_likelihood)[burn_in:],
np.array(sampler_list[2].accept_likelihood)[burn_in:],
np.array(sampler_list[3].accept_likelihood)[burn_in:],
np.array(sampler_list[4].accept_likelihood)[burn_in:]
]
evidence_list = [1./np.mean(1./likelihood_list[0]),
1./np.mean(1./likelihood_list[1]),
1./np.mean(1./likelihood_list[2]),
1./np.mean(1./likelihood_list[3]),
1./np.mean(1./likelihood_list[4])
]
fig_li_ev, ax_li_ev = plt.subplots(1,2)
for i in range(5):
sampler = sampler_list[i]
evidence = evidence_list[i]
likelihood = np.max(likelihood_list[i])
ax_li_ev[0].bar(i+2, evidence, color="black")
ax_li_ev[1].bar(i+2, likelihood, color="black")
ax_li_ev[0].set_xlabel("k")
ax_li_ev[0].set_ylabel("Evidence")
ax_li_ev[1].set_xlabel("k")
ax_li_ev[1].set_ylabel("max(Likelihood)")
fig_li_ev.tight_layout()
fig_li_ev.show()
print("Bayes Factor M3/M2:", evidence_list[1]/evidence_list[0])
print("Bayes Factor M3/M4:", evidence_list[1]/evidence_list[2])
print("Bayes Factor M3/M5:", evidence_list[1]/evidence_list[3])
print("Bayes Factor M3/M6:", evidence_list[1]/evidence_list[4])
# -
# ## 6.) Model Inference Using Across-model sampling with RJ-MCMC
#
# Ok, so we have seen that we can recover the correct model by computing the evidence for each single model. But we need lots of sampling (100000 steps) in order to get converged results. Also, we need to carry out sampling for each model seperately. To be fair, there are probably more efficent sampling algorithms than MH for this kind of inference problem. Also, we did not optimize the hyperparameters. Nonetheless, there is an elegant way to get the correct answer with much less sampling effort, called Reversible Jump MCMC (RJ-MCMC). Instead of explicitly sampling the posterior of each individual model, one can also sample over a posterior that lives on the parameter space of different models simultaniously. This approach is called across-model sampling and effectively samples from the distribution $(k,\theta_k) \sim p(k, \theta_k | Y)$. The Markov Chain will then have spent time in the different models in proportion to their model evidence. This is especially efficient for models that are nested, i.e. when parameters from one model can be carried over to another model.
#
# ### Just another way of Metropolis-Hasting'ing
#
# How does RJ-MCMC work? It is essentially a variation of standard MH sampling. Because all we will do in RJ-MCMC is just normal MCMC but with proposal and target distributions (i.e. the posterior distribution) living on different (sub)spaces with different dimensionality (this point will become clear later)! The general acceptance probability for RJ-MCMC is (note that we omit $\sigma$ here for the sake of clarity):
#
# \begin{equation}\label{eq:RjmcmcAcceptance}
# \alpha(k, \theta_{k}, k', \theta'_{k'}) = min\Bigg(1, { p(k', \theta'_{k'}|Y) q(k, \theta_{k}, k', \theta'_{k'})
# \over p(k, \theta_{k}|Y) q(k', \theta'_{k'},k, \theta_{k}) } \Bigg)
# \end{equation}
#
# This is really not so much different than the acceptance probability for regular MH (see eq. \ref{eq:AcceptanceProbability})! The only difference is that now we sample from the joint posterior distribution $p(k, \theta_k | Y)$ and propose from the joint distribution $q(k, \theta_k, \cdot)$. The posterior distribution $p(k, \theta|Y)$ is computed similarly as the one for within-model sampling (see eq. \ref{eq:BayesEquation}). However, the proposal distribution will be computed in a different manner in order to reflect the difference in dimensionalty (if any) between proposal and target distribution. Furthermore, the general MH algorithm will be slightly adapted as follows:
#
# \begin{enumerate}
# \item Generate a new state *within* $\theta'_k$ from a proposal distribution $q_2(\theta^{(t)_k}, \theta'_k)$ (yet to be defined).
# \item Propose an across model move according to $q_1(k^{(t)}, k')$ (yet to be defined).
# \item Generate an updated parameter $\theta'_{k'}$ according to the within and across model move.
# \item Evaluate the posterior distribution according to eq. \ref{eq:RjmcmcAcceptance} and standard MH rejection/acceptance scheme.
# \end{enumerate}
#
# The following two paragraphs will outline two general ways to implement this algorithm.
#
# #### The general across-model move
#
# For a proposal to jump from model $k$ to $k+1$ (let's call this an expansion move, since we "expand" our model), the proposal probability can be stated as
#
# \begin{equation*}
# q(k+1, \theta_{k+1},k, \theta_{k}) = q_1(k+1, k) q_2(\theta_{k+1}, \theta_{k})
# \end{equation*}
#
# The parameter proposal can be further marginalized as
#
# \begin{equation*}
# q_2(\theta_{k+1}, \theta_{k}) = q^{(k+1)}_2(\theta_{k+1}| \theta_{k}) q^{(1:k)}_2(\theta_{k})
# \end{equation*}
#
# Here, $q^{(k+1)}_2)$ is the proposal conditional probability for the $k+1$ element in the parameter vector. The $q^{(1:k)}_2$ probability accounts for the proposal of the first $k$ elements in the parameter vector, this is the regular MH proposal probability.
#
# Conversly, the marginalized parameter proposal probability for an across model move from $k+1$ to $k$ (i.e. a contraction move) is (note that we just keep the $(1:k)$ parameters):
#
# \begin{equation*}
# q_2(\theta_{k}, \theta_{k+1}) = q^{(1:k)}_2(\theta_{k+1})
# \end{equation*}
#
# If we combine the above considerations with eq. \ref{eq:RjmcmcAcceptance}, we get an acceptance probability for an expansion move (assuming symmetric proposal $q^{(1:k)}_2(\cdot)$ ):
#
# \begin{equation}\label{eq:RjmcmcAcceptance_GeneralCaseExpansion}
# \alpha(k, \theta_{k}, k+1, \theta_{k+1}) = min\Bigg(1, { p(k+1, \theta_{k+1}|Y) q_1(k, k+1)
# \over p(k, \theta_{k}|Y) q_1(k+1, k) q^{(k+1)}_2(\theta_{k+1}| \theta_{k})
# }
# \Bigg)
# \end{equation}
#
# Note that the acceptance probability for the contraction move is just the inverse of eq. \ref{eq:RjmcmcAcceptance_GeneralCaseExpansion}.
#
# The across model moves introduced so far sample the additional element in the parameter vector during an expansion move $u = \theta^{(k+1)}_{k+1}$ from $u \sim q^{(k+1)}_2(\theta_{k+1}| \cdot)$. While doing this, we ensured to have no overall change in dimensionality for an expansion move and its corresponding contraction move. That is, we imposed the following important boundary condition to any across model proposal ($dim(u)$ is the number of elements of vector $u$, including zero):
#
# \begin{equation}\label{eq:BoundaryConditionMove}
# k + dim(u) = k' + dim(u')
# \end{equation}
#
# #### Using deterministic functions to inform across model moves
#
# In many cases, it is beneficial to use an informative deterministic function after the parameter proposal step in order to transform the proposed parameter to something reasonable. This concept can be incorporated into RJ-MCMC, as long as this function has a proper inverse function, so that we can propose the reverse move. Then, an expansion move (after drawing $u$) is set to
#
# \begin{equation*}
# \theta_{k+1} = h(\theta_k, u) = (\theta_k, g(u))
# \end{equation*}
#
# whereas the corresponding contraction move is attempted according to ($h'$ and $g'$ are the reversed functions)
#
# \begin{equation*}
# (\theta_{k}, u) = h'(\theta_{k+1}) = (\theta^{(1:k)}_{k+1}, g'(\theta^{(k)}_{k+1}))
# \end{equation*}
#
# If we use a deterministic function to manipulate the integration variable (remember that we still try to solve eq. \ref{eq:DetailedBalanceIntegrated}), we need a Jacobian factor $|J|$ to account for the change of variables in the integration variable. The expansion move then is
#
# \begin{equation}\label{eq:RjmcmcAcceptance_DeterministicCaseExpansion}
# \alpha(k, \theta_{k}, k+1, \theta_{k+1}) = min\Bigg(1, { p(k+1, \theta_{k+1}|Y) q_1(k, k+1) q_2(u_k)
# \over p(k, \theta_{k}|Y) q_1(k+1, k) q_2(u_{k+1})}
# \Big| {\partial(h'(\theta_{k+1})) \over \partial(\theta_k, u_k)} \Big|
# \Bigg)
# \end{equation}
#
# Similarly to the general case, the acceptance probability for the contraction move is obtained from the reverse of eq. \ref{eq:RjmcmcAcceptance_DeterministicCaseExpansion}.
#
# Note how the Jacobian will be unity after setting $h(\theta_k, u) = (\theta_k, u)$ and $h'(\theta_{k+1}) = (\theta^{(1:k)}_{k+1})$ and we obtain the result of the general case (eq. \ref{eq:RjmcmcAcceptance_GeneralCaseExpansion}).
#
# ### Jump into the Sampling
#
# The code block in the next cell will execute RJ-MCMC sampling with the "general" case (see eq. \ref{eq:RjmcmcAcceptance_GeneralCaseExpansion}). Note that parameters for an expansion move are proposed according to
#
# \begin{equation}\label{eq:PropagationExpansion}
# a_{k+1}' = a_k^{(t)} + w_k
# \end{equation}
#
# \begin{equation*}
# w_k \sim \mathcal{N}(0, s_e)
# \end{equation*}
#
# The parameter $s_e$ controls the width of the proposal distribution $q_2(\theta^{(t)_k}, \theta'_{k+1})$. The implementation below can also use deterministic type moves (set the variable 'dete' to 'True') by using $g(u) = s_e exp(u)$ and $g'(a_{k+1}) = log(a_{k+1})-log(s_e)$.
#
# ## Reading
#
# * <NAME>., & <NAME>. (2001). A tutorial on reversible jump MCMC with a view toward applications in QTL-mapping. International Statistical Review, 69(1), 49–61.
# * <NAME>. (1995). Reversible jump Markov chain monte carlo computation and Bayesian model determination. Biometrika, 82(4), 711–732.
# * <NAME>., & <NAME>. (2012). Model choice using reversible jump Markov chain Monte Carlo. Statistica Neerlandica, 66(3), 309–338.
# * <NAME>. (2001). On the relationship between markov chain monte carlo methods for model uncertainty. Journal of Computational and Graphical Statistics, 10(2), 230–248.
# +
### This is our step width 's'
step_width = 0.05
### This is our prior width for the inverse gamma distribution
sigma_width = 2.0
### This is our reference that defines the boundaries on
### parameter prior.
p_ref = np.array([0., 6])
### This is the initial value of the standard deviation.
### We will use the same value for each MH run
sigma0 = 0.1
### This is a parameter that controls the mapping for the
### dimension jumping
se = 1.0
### Use deterministic sampling
dete = False
### This is a list of initial parameter vectors. They are of
### varying length, going from 2 up to 6 parameters.
p0_list = [p[:2] + 1.0 * np.random.random(2),
p + 1.0 * np.random.random(p.shape),
np.concatenate((p + 1.0 * np.random.random(p.shape),[1.0])),
np.concatenate((p + 1.0 * np.random.random(p.shape),[1.0, 1.0])),
np.concatenate((p + 1.0 * np.random.random(p.shape),[1.0, 1.0, 1.0]))
]
### Let's loop over some models and see how that
### changes the Bayesian quantities and the parameters
lazy_results = list()
def worker(sampler, *args):
sampler.run(*args)
return sampler
for p0 in p0_list:
sampler = MH_sampler(step_width,
sigma_width,
p_ref,
x,
y,
fourier_series)
### This is samplig by using the general RJ-MCMC
lazy_result = dask.delayed(worker)(sampler, p0, sigma0, 10000, se, True, dete)
lazy_results.append(lazy_result)
sampler_list_rj = list(dask.compute(*lazy_results))
# -
# ### Analysis of the RJ-MCMC Sampling
#
# Note how we only need 1/10 of the sampling from the previous example where we had to sample each modell individually. If we closely look at the trajectories below, we see that even a much smaller number of sampling steps would have been sufficient to arrive at the correct model. However, we also see that although the correct model is found early for most intitial conditions, the $-ln(posterior)$ still decreases and in real live we likely want to have more sampling in order to ensure we have enough within-model samples. Also, it is important to realize that the RJ-MCMC sampler arrives at the correct model independent of the starting conditions. This is great!
# +
fig_traj, ax_traj = plt.subplots(3, 5, figsize=(15,10))
for i in range(5):
sampler = sampler_list_rj[i]
accept_posterior = sampler.accept_posterior
accept_dim = sampler.accept_dim
ax_traj[0][i].plot(-np.log(accept_posterior))
ax_traj[0][i].set_title(f"N={i+2} initial \n parameters")
ax_traj[0][i].set_ylabel("-ln(posterior)")
ax_traj[0][i].set_xlabel("step")
ax_traj[1][i].plot(accept_dim)
ax_traj[1][i].set_ylabel("N parameters")
ax_traj[1][i].set_xlabel("step")
ax_traj[1][i].set_yticks(list(range(1,11)))
ax_traj[1][i].set_ylim(1,11)
ax_traj[2][i].hist(accept_dim)
ax_traj[2][i].set_ylabel("Counts(N Parameters)")
ax_traj[2][i].set_xlabel("N parameters")
ax_traj[2][i].set_xticks(list(range(1,11)))
ax_traj[2][i].set_xlim(1,11)
fig_traj.tight_layout()
fig_traj.show()
| MCMC-Bayes/Bayesian-Inference-toyexample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''base'': conda)'
# name: python3
# ---
# # Lost in spaCy
# ## Dependencies
import pandas as pd #This manages data frames
import spacy # SpaCy is a text analysis tool
nlp = spacy.load("en_core_web_lg") #this is the library used in nlp
# ## Load Data
# > 📌I am using a sample data set to do this that is a CSV with two columns called id and text, the file is very simple by design so that the only thing going in is what is needed to come out i.e. the id of the text value and the text value - you can then join this up any way you like in Python or Power BI but the idea is to keep things as simple as possible. The text I am using is a dataset of tweets for demonstration purposes.
df = pd.read_csv('./data/text_small_example.csv', nrows=2) #loading just the first 2 rows to demonstrate code
pd.set_option('display.max_colwidth', None) #changing column width to show full text
df
# ## Position of Speech
#create a for loop of the rows in the df dataframe
for idx, row in df.iterrows():
#checks to see if the value in text is a string i.e. contains data if so continue
if not isinstance(row['text'], str):
continue
#doc is the nlp results of the current text value
doc = nlp(row['text'])
#for loop for each token of the outputs
for token in doc:
#print id of row, token text and token pos code
print(row["id"],token.text,token.pos_)
# ## Named Entity Regcognition
# > 📌 This is almost identical to the POS code and works exactly the same way except instead of using the POS outputs of nlp it uses the entity outputs
#create a for loop of the rows in the df dataframe
for idx, row in df.iterrows():
#checks to see if the value in text is a string i.e. contains data if so continue
if not isinstance(row['text'], str):
continue
#doc is the nlp results of the current text value
doc = nlp(row['text'])
#for loop for each entity of the outputs
for e in doc.ents:
#print id of row, entity text and entity label
print(row["id"],e.text,e.label_)
| lost_in_spacy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### Create Test Data
# After the model is trained, this code is used to get predictions from the model. These predictions can then be used in the COLAB notebook "mag_concept_tagger_v1_model_testing.ipynb" to calculate the metrics for model performance.
# + colab={"base_uri": "https://localhost:8080/"} id="ZJD011yhyWjD" outputId="35199005-d2bf-4120-bc5e-1e6fb8797d82"
# # %pip install tensorflow==2.4.1
# # %pip install transformers
# # %pip install pyarrow
# # %pip install tensorflow-addons
# + id="oV5qIlEokph9"
import tensorflow as tf
import pandas as pd
import pickle
import os
import tensorflow_addons as tfa
from math import ceil
AUTO = tf.data.experimental.AUTOTUNE
# -
model_iteration = 'iteration_final'
# + id="9I2c4qh5FZa-"
with open(f"./{model_iteration}/vocab/topics_vocab.pkl", "rb") as f:
target_vocab = pickle.load(f)
target_vocab_inv = {j:i for i,j in target_vocab.items()}
with open(f"./{model_iteration}/vocab/doc_type_vocab.pkl", "rb") as f:
doc_vocab = pickle.load(f)
doc_vocab_inv = {j:i for i,j in doc_vocab.items()}
with open(f"./{model_iteration}/vocab/journal_name_vocab.pkl", "rb") as f:
journal_vocab = pickle.load(f)
journal_vocab_inv = {j:i for i,j in journal_vocab.items()}
with open(f"./{model_iteration}/vocab/paper_title_vocab.pkl", "rb") as f:
title_vocab = pickle.load(f)
title_vocab_inv = {j:i for i,j in title_vocab.items()}
# -
len(target_vocab)
# ##### Short code to create ID mapping
tag_ids = pd.read_parquet("fields_of_study_ids.parquet")
names = tag_ids['normalized_name'].to_list()
ids = tag_ids['field_of_study_id'].to_list()
name_to_id = {name:i for name, i in zip(names, ids)}
id_dict = {i:name_to_id[j] for i,j in target_vocab_inv.items()}
# +
# with open(f"./{model_iteration}/vocab/tag_id_vocab.pkl", "wb") as f:
# pickle.dump(id_dict, f)
# -
with open(f"./{model_iteration}/vocab/tag_id_vocab.pkl", "rb") as f:
test_dict = pickle.load(f)
# #### Getting the model
# + id="8dvSQiNsFHr4"
encoding_layer = tf.keras.layers.experimental.preprocessing.CategoryEncoding(
max_tokens=len(target_vocab)+1, output_mode="binary", sparse=False)
# -
len(title_vocab)
len(target_vocab)
mag_model = tf.keras.models.load_model(f'./V1/model/')
mag_model.inputs
final_model = tf.keras.Model(inputs=mag_model.inputs,
outputs=tf.math.top_k(mag_model.outputs, k=30))
final_model.summary()
def get_all_model_predictions(data_path):
# Get all of the files, load into single pandas dataframe
# split up into blocks of 3000 and get model output
file_names = [x for x in os.listdir(f"./{model_iteration}/tokenized_data/test/") if x.startswith('part')]
file_names.sort()
full_df = pd.DataFrame()
for file_name in file_names:
temp_df = pd.read_parquet(f"./{model_iteration}/tokenized_data/test/{file_name}")
full_df = pd.concat([full_df, temp_df], axis=0)
num_samples = 1000
preds_final = []
scores_final = []
for i in range(ceil(full_df.shape[0]/num_samples)):
print(i)
small_df = full_df.iloc[i*num_samples:(i+1)*num_samples, :].copy()
preds, scores = get_model_predictions(small_df)
preds_final += preds
scores_final += scores
full_df['predictions'] = preds_final
full_df['scores'] = scores_final
return full_df
def get_model_predictions(input_data):
paper_titles = tf.keras.preprocessing.sequence.pad_sequences(input_data['paper_title_tok'].to_list(), maxlen=64,
dtype='int64', padding='post',
truncating='post', value=0)
doc_types = tf.convert_to_tensor(input_data['doc_type_tok'].to_list())
journal = tf.convert_to_tensor(input_data['journal_tok'].to_list())
model_output = final_model([paper_titles, doc_types, journal])
scores = model_output.values.numpy()[0][:,:20].tolist()
preds = model_output.indices.numpy()[0][:,:20].tolist()
return preds, scores
test_data = get_all_model_predictions(f"./{model_iteration}/tokenized_data/test/")
test_data.to_parquet(f"./{model_iteration}/test_data/data_with_predictions.parquet")
# test_data = pd.read_parquet(f"./{model_iteration}/test_data/data_with_predictions.parquet")
test_data['target_test'] = test_data['target_tok'].apply(lambda x: [i for i in x if i!=-1])
test_data['target_test'] = test_data['target_test'].apply(len)
test_data = test_data[test_data['target_test'] > 0].copy()
test_data.shape
test_data.sample(5)
# ### Code to get all raw test data into one file
import os
import pandas as pd
pd.set_option('display.max_colwidth', None)
test_data_raw = pd.DataFrame()
for i in os.listdir("./iteration_final/test_data_raw/"):
if i.startswith('part'):
temp_df = pd.read_parquet(f"./iteration_final/test_data_raw/{i}")
test_data_raw = pd.concat([test_data_raw, temp_df])
test_data_raw.sample(20)
# +
# test_data_raw.to_parquet("test_raw.parquet")
# -
| V1/002_Model/004_predictions_for_model_testing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# # Imports for the notebook
# +
import pandas as pd
import numpy as np
import git
import os
import plotly.express as px
from datetime import datetime
# %matplotlib inline
pd.set_option('display.max_rows', 500)
# -
# # Logic to load the data from github
# +
git_dir='../data/raw/COVID-19'
if os.path.isdir(git_dir) == False:
git.Git("../data/raw/").clone("https://github.com/CSSEGISandData/COVID-19.git")
else:
print("Folder already exists no need to clone. Just a git pull should do the job")
print('Pulling the data now.....')
g = git.cmd.Git(git_dir)
g.pull()
# -
#we concentrate on time_series_covid19_confirmed_global.csv file for this project.
#Lets have alook at the file
data_path='../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv'
pd_raw=pd.read_csv(data_path)
pd_raw.head()
#We see that the columns Lat and Long is not necessary for us. So we can drop it and also rename column names with simple names.
pd_raw=pd_raw.rename(columns={'Country/Region':'country','Province/State':'state'})
pd_raw=pd_raw.drop(['Lat', 'Long'], axis=1)
pd_raw.head()
# ## Number of countries data available:
print("Number of Countries available: ",len(pd_raw['country'].unique()))
germany_df=pd_raw[pd_raw['country']=='Canada']['8/13/20'].sum(axis=0)
germany_df
country_list=['Germany',
'US',
'Russia',
'India',
'Brazil',
]
#we create new df with Data as column.
time_idx=pd_raw.columns[2:]
time_idx=[datetime.strptime( each,"%m/%d/%y") for each in time_idx]
#create df
df_plot = pd.DataFrame({'date':time_idx})
df_plot.head()
#add columns of each country
for each in country_list:
df_plot[each]=np.array(pd_raw[pd_raw['country']==each].iloc[:,2::].sum(axis=0))
df_plot.head()
# %matplotlib inline
df_plot.set_index('date').plot()
# +
# the data is good enough lets compose it for all the countires
#first get the time index
df_plot_full = pd.DataFrame({'date':time_idx})
df_plot_full.head()
#now add column of country and for countires with states just do the sum on axis 0
for each in pd_raw['country'].unique():
df_plot_full[each]=np.array(pd_raw[pd_raw['country']==each].iloc[:,2::].sum(axis=0))
#set the index, stack the countires as columns and rename required columns
df_plot_full=df_plot_full.set_index(['date']).stack(level=[0]).reset_index().rename(columns={'level_1':'country',
0:'confirmed'})
#write the data to a file for further processing.
df_plot_full.to_csv('../data/processed/COVID_relational_confirmed.csv',sep=';',index=False)
df_plot_full.head()
# -
# # Now filter the data and calculate the doubling rate.
# +
import numpy as np
from sklearn import linear_model
reg = linear_model.LinearRegression(fit_intercept=True)
import pandas as pd
from scipy import signal
# +
def savgol_filter(df_input,column='confirmed',window=5):
''' Savgol Filter which can be used in groupby apply function (data structure kept)
parameters:
----------
df_input : pandas.series
column : str
window : int
used data points to calculate the filter result
Returns:
----------
df_result: pd.DataFrame
the index of the df_input has to be preserved in result
'''
degree=1
df_result=df_input
filter_in=df_input[column].fillna(0) # attention with the neutral element here
result=signal.savgol_filter(np.array(filter_in),
window, # window size used for filtering
1)
df_result[str(column+'_filtered')]=result
return df_result
def get_doubling_time_via_regression(in_array):
''' Use a linear regression to approximate the doubling rate
Parameters:
----------
in_array : pandas.series
Returns:
----------
Doubling rate: double
'''
y = np.array(in_array)
X = np.arange(-1,2).reshape(-1, 1)
assert len(in_array)==3
reg.fit(X,y)
intercept=reg.intercept_
slope=reg.coef_
return intercept/slope
def rolling_reg(df_input,col='confirmed'):
''' Rolling Regression to approximate the doubling time'
Parameters:
----------
df_input: pd.DataFrame
col: str
defines the used column
Returns:
----------
result: pd.DataFrame
'''
days_back=3
result=df_input[col].rolling(
window=days_back,
min_periods=days_back).apply(get_doubling_time_via_regression,raw=False)
return result
def calc_filtered_data(df_input,filter_on='confirmed'):
''' Calculate savgol filter and return merged data frame
Parameters:
----------
df_input: pd.DataFrame
filter_on: str
defines the used column
Returns:
----------
df_output: pd.DataFrame
the result will be joined as a new column on the input data frame
'''
must_contain=set(['country',filter_on])
assert must_contain.issubset(set(df_input.columns)), ' Erro in calc_filtered_data not all columns in data frame'
df_output=df_input.copy() # we need a copy here otherwise the filter_on column will be overwritten
pd_filtered_result=df_output[['country',filter_on]].groupby(['country']).apply(savgol_filter)
#print('--+++ after group by apply')
df_output=pd.merge(df_output,pd_filtered_result[[str(filter_on+'_filtered')]],left_index=True,right_index=True,how='left')
return df_output.copy()
def calc_doubling_rate(df_input,filter_on='confirmed'):
''' Calculate approximated doubling rate and return merged data frame
Parameters:
----------
df_input: pd.DataFrame
filter_on: str
defines the used column
Returns:
----------
df_output: pd.DataFrame
the result will be joined as a new column on the input data frame
'''
must_contain=set(['country',filter_on])
assert must_contain.issubset(set(df_input.columns)), ' Erro in calc_filtered_data not all columns in data frame'
pd_DR_result= df_input.groupby(['country']).apply(rolling_reg,filter_on).reset_index()
pd_DR_result=pd_DR_result.rename(columns={filter_on:filter_on+'_DR',
'level_1':'index'})
#we do the merge on the index of our big table and on the index column after groupby
df_output=pd.merge(df_input,pd_DR_result[['index',str(filter_on+'_DR')]],left_index=True,right_on=['index'],how='left')
df_output=df_output.drop(columns=['index'])
return df_output
# -
pd_JH_data=pd.read_csv('../data/processed/COVID_relational_confirmed.csv',sep=';',parse_dates=[0])
pd_JH_data=pd_JH_data.sort_values('date',ascending=True).copy()
pd_JH_data.head()
pd_result_larg=calc_filtered_data(pd_JH_data)
pd_result_larg=calc_doubling_rate(pd_result_larg)
pd_result_larg=calc_doubling_rate(pd_result_larg,'confirmed_filtered')
pd_result_larg.head()
mask=pd_result_larg['confirmed']>100
pd_result_larg['confirmed_filtered_DR']=pd_result_larg['confirmed_filtered_DR'].where(mask, other=np.NaN)
pd_result_larg.to_csv('../data/processed/COVID_final_set.csv',sep=';',index=False)
print(pd_result_larg[pd_result_larg['country']=='Germany'].tail())
# +
df_test=pd_result_larg.loc[pd_result_larg['date'] == '2020-01-22']
df = px.data.gapminder().query("year == 2007")
df = pd.merge(df, df_test, on='country')
fig2 = px.scatter_geo(df, locations="iso_alpha",
size="confirmed", # size of markers, "pop" is one of the columns of gapminder
)
fig2.show()
# -
df.head()
# +
# # # %load src/visualization/visualize.py
# import pandas as pd
# import numpy as np
# import dash
# dash.__version__
# import dash_core_components as dcc
# import dash_html_components as html
# from dash.dependencies import Input, Output,State
# import plotly.graph_objects as go
# import os
# print(os.getcwd())
# df_input_large=pd.read_csv('../data/processed/COVID_final_set.csv',sep=';')
# fig = go.Figure()
# app = dash.Dash()
# app.layout = html.Div([
# dcc.Markdown('''
# # Applied Data Science on COVID-19 data
# Goal of the project is to teach data science by applying a cross industry standard process,
# it covers the full walkthrough of: automated data gathering, data transformations,
# filtering and machine learning to approximating the doubling time, and
# (static) deployment of responsive dashboard.
# '''),
# dcc.Markdown('''
# ## Multi-Select Country for visualization
# '''),
# dcc.Dropdown(
# id='country_drop_down',
# options=[ {'label': each,'value':each} for each in df_input_large['country'].unique()],
# value=['US', 'Germany','Italy'], # which are pre-selected
# multi=True
# ),
# dcc.Markdown('''
# ## Select Timeline of confirmed COVID-19 cases or the approximated doubling time
# '''),
# dcc.Dropdown(
# id='doubling_time',
# options=[
# {'label': 'Timeline Confirmed ', 'value': 'confirmed'},
# {'label': 'Timeline Confirmed Filtered', 'value': 'confirmed_filtered'},
# {'label': 'Timeline Doubling Rate', 'value': 'confirmed_DR'},
# {'label': 'Timeline Doubling Rate Filtered', 'value': 'confirmed_filtered_DR'},
# ],
# value='confirmed',
# multi=False
# ),
# dcc.Graph(id='main_window_slope')
# ])
# @app.callback(
# Output('main_window_slope', 'figure'),
# [Input('country_drop_down', 'value'),
# Input('doubling_time', 'value')])
# def update_figure(country_list,show_doubling):
# if 'doubling_rate' in show_doubling:
# my_yaxis={'type':"log",
# 'title':'Approximated doubling rate over 3 days (larger numbers are better #stayathome)'
# }
# else:
# my_yaxis={'type':"log",
# 'title':'Confirmed infected people (source johns hopkins csse, log-scale)'
# }
# traces = []
# for each in country_list:
# df_plot=df_input_large[df_input_large['country']==each]
# if show_doubling=='doubling_rate_filtered':
# df_plot=df_plot[['country','confirmed','confirmed_filtered','confirmed_DR','confirmed_filtered_DR','date']].groupby(['country','date']).agg(np.mean).reset_index()
# else:
# df_plot=df_plot[['country','confirmed','confirmed_filtered','confirmed_DR','confirmed_filtered_DR','date']].groupby(['country','date']).agg(np.sum).reset_index()
# #print(show_doubling)
# traces.append(dict(x=df_plot.date,
# y=df_plot[show_doubling],
# mode='markers+lines',
# opacity=0.9,
# name=each
# )
# )
# return go.Figure(
# data= traces,
# layout= dict (
# width=1280,
# height=720,
# xaxis={'title':'Timeline',
# 'tickangle':-45,
# 'nticks':20,
# 'tickfont':dict(size=14,color="#7f7f7f"),
# },
# yaxis=my_yaxis
# )
# )
# if __name__ == '__main__':
# app.run_server(debug=True, use_reloader=False)
# +
# # # %load src/visualization/visualize.py
# import pandas as pd
# import numpy as np
# import dash
# dash.__version__
# import dash_core_components as dcc
# import dash_html_components as html
# from dash.dependencies import Input, Output,State
# import plotly.graph_objects as go
# import os
# print(os.getcwd())
# df_input_large=pd.read_csv('../data/processed/COVID_final_set.csv',sep=';')
# fig = go.Figure()
# app = dash.Dash()
# app.layout = html.Div(children=[
# html.Div([
# dcc.Markdown('''
# # Applied Data Science on COVID-19 data
# Goal of the project is to teach data science by applying a cross industry standard process,
# it covers the full walkthrough of: automated data gathering, data transformations,
# filtering and machine learning to approximating the doubling time, and
# (static) deployment of responsive dashboard.
# '''),
# dcc.Markdown('''
# ## Multi-Select Country for visualization
# '''),
# dcc.Dropdown(
# id='country_drop_down',
# options=[ {'label': each,'value':each} for each in df_input_large['country'].unique()],
# value=['US', 'Germany','Italy'], # which are pre-selected
# multi=True
# ),
# dcc.Markdown('''
# ## Select Timeline of confirmed COVID-19 cases or the approximated doubling time
# '''),
# dcc.Dropdown(
# id='doubling_time',
# options=[
# {'label': 'Timeline Confirmed ', 'value': 'confirmed'},
# {'label': 'Timeline Confirmed Filtered', 'value': 'confirmed_filtered'},
# {'label': 'Timeline Doubling Rate', 'value': 'confirmed_DR'},
# {'label': 'Timeline Doubling Rate Filtered', 'value': 'confirmed_filtered_DR'},
# ],
# value='confirmed',
# multi=False
# ),
# dcc.Graph(id='main_window_slope')
# ], style={'display': 'inline-block','width': '49%'}),
# html.Div( dcc.Graph(id="graph2"), style={'display': 'inline-block','width': '49%'})
# ],style={'width': '100%', 'display': 'inline-block'})
# @app.callback(
# Output('main_window_slope', 'figure'),
# [Input('country_drop_down', 'value'),
# Input('doubling_time', 'value')])
# def update_figure(country_list,show_doubling):
# if 'doubling_rate' in show_doubling:
# my_yaxis={'type':"log",
# 'title':'Approximated doubling rate over 3 days (larger numbers are better #stayathome)'
# }
# else:
# my_yaxis={'type':"log",
# 'title':'Confirmed infected people (source johns hopkins csse, log-scale)'
# }
# traces = []
# for each in country_list:
# df_plot=df_input_large[df_input_large['country']==each]
# if show_doubling=='doubling_rate_filtered':
# df_plot=df_plot[['country','confirmed','confirmed_filtered','confirmed_DR','confirmed_filtered_DR','date']].groupby(['country','date']).agg(np.mean).reset_index()
# else:
# df_plot=df_plot[['country','confirmed','confirmed_filtered','confirmed_DR','confirmed_filtered_DR','date']].groupby(['country','date']).agg(np.sum).reset_index()
# #print(show_doubling)
# traces.append(dict(x=df_plot.date,
# y=df_plot[show_doubling],
# mode='markers+lines',
# opacity=0.9,
# name=each
# )
# )
# return go.Figure(
# data= traces,
# layout= dict (
# width=1280,
# height=720,
# xaxis={'title':'Timeline',
# 'tickangle':-45,
# 'nticks':20,
# 'tickfont':dict(size=14,color="#7f7f7f"),
# },
# yaxis=my_yaxis
# )
# )
# if __name__ == '__main__':
# app.run_server(debug=True, use_reloader=False)
# +
# # %load src/visualization/visualize.py
import pandas as pd
import numpy as np
import dash
dash.__version__
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output,State
import plotly.graph_objects as go
import os
print(os.getcwd())
df_input_large=pd.read_csv('../data/processed/COVID_final_set.csv',sep=';')
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
fig = go.Figure()
app = dash.Dash()
app.layout = html.Div(children=[
html.Div([html.H1('''
Applied Data Science on COVID-19 data
'''),
dcc.Markdown('''
## Multi-Select Country for visualization
'''),
dcc.Dropdown(
id='country_drop_down',
options=[ {'label': each,'value':each} for each in df_input_large['country'].unique()],
value=['US', 'Germany','Italy'], # which are pre-selected
multi=True
),
dcc.Markdown('''
## Select Timeline of confirmed COVID-19 cases or the approximated doubling time
'''),
dcc.Dropdown(
id='doubling_time',
options=[
{'label': 'Timeline Confirmed ', 'value': 'confirmed'},
{'label': 'Timeline Confirmed Filtered', 'value': 'confirmed_filtered'},
{'label': 'Timeline Doubling Rate', 'value': 'confirmed_DR'},
{'label': 'Timeline Doubling Rate Filtered', 'value': 'confirmed_filtered_DR'},
],
value='confirmed',
multi=False
),
dcc.Graph(id='main_window_slope')
], style={'display': 'inline-block','width': '55%'}),
html.Div(children=[html.H2('World mAp of Infected/Doubling'),
dcc.Graph(id="graph2")], style={'display': 'inline-block','width': '40%'})
])
@app.callback(
Output('main_window_slope', 'figure'),
[Input('country_drop_down', 'value'),
Input('doubling_time', 'value')])
def update_figure(country_list,show_doubling):
if 'doubling_rate' in show_doubling:
my_yaxis={'type':"log",
'title':'Approximated doubling rate over 3 days (larger numbers are better #stayathome)'
}
else:
my_yaxis={'type':"log",
'title':'Confirmed infected people (source johns hopkins csse, log-scale)'
}
traces = []
for each in country_list:
df_plot=df_input_large[df_input_large['country']==each]
if show_doubling=='doubling_rate_filtered':
df_plot=df_plot[['country','confirmed','confirmed_filtered','confirmed_DR','confirmed_filtered_DR','date']].groupby(['country','date']).agg(np.mean).reset_index()
else:
df_plot=df_plot[['country','confirmed','confirmed_filtered','confirmed_DR','confirmed_filtered_DR','date']].groupby(['country','date']).agg(np.sum).reset_index()
#print(show_doubling)
traces.append(dict(x=df_plot.date,
y=df_plot[show_doubling],
mode='markers+lines',
opacity=0.9,
name=each
)
)
return go.Figure(
data= traces,
layout= dict (
width=1280,
height=720,
xaxis={'title':'Timeline',
'tickangle':-45,
'nticks':20,
'tickfont':dict(size=14,color="#7f7f7f"),
},
yaxis=my_yaxis
)
)
@app.callback(Output('graph2', 'figure'), [Input('main_window_slope', 'hoverData'),Input('doubling_time', 'value')])
def disp_hover_data(hover_data,doubling_time):
fig2=go.Figure()
if type(hover_data) is dict:
#print(hover_data)
date=hover_data['points'][0]['x']
df_test=pd_result_larg.loc[pd_result_larg['date'] == date]
df = px.data.gapminder().query("year == 2007")
df = pd.merge(df, df_test, on='country')
fig2 = px.scatter_geo(df, locations="iso_alpha",
size=doubling_time, # size of markers, "pop" is one of the columns of gapminder
)
return fig2
if __name__ == '__main__':
app.run_server(debug=True, use_reloader=False)
# -
| notebooks/Back_up/Data_preparation_small_file.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="cFL9Z7CkrlAg" colab={"base_uri": "https://localhost:8080/"} outputId="66f614ff-1f5d-47ba-f67a-9331e725f876"
# Transformers installation
# !pip install transformers==2.8.0
# !pip install torch==1.4.0
# !pip install newspaper3k
# !pip install sentencepiece
# To install from source instead of the last release, comment the command above and uncomment the following one.
# # ! pip install git+https://github.com/huggingface/transformers.git
# + [markdown] id="7GGmWTmKrlAu"
# # Summary of the tasks
#
# 1. Article Summerization using Method 1
# 2. Article Summerization using Method 2
# + [markdown] id="XBof8XL9rlB-"
# ## Article Summarization
# + [markdown] id="MNEiYU3CrlB_"
# Summarization is the task of summarizing a document or an article into a shorter text. If you would like to fine-tune a
# model on a summarization task, you may leverage the [run_summarization.py](https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization/run_summarization.py)
# script.
#
# An example of a summarization dataset is the CNN / Daily Mail dataset, which consists of long news articles and was
# created for the task of summarization. If you would like to fine-tune a model on a summarization task, various
# approaches are described in this :prefix_link:*document <examples/pytorch/summarization/README.md>*.
#
# Here is an example of using the pipelines to do summarization. It leverages a Bart model that was fine-tuned on the CNN
# / Daily Mail data set.
#
#
# The Input to model can be either "TEXT FILE" or " Direct String"
# + [markdown] id="TWPYRV5_KZV_"
#
#
# This summarizing pipeline can currently be loaded from pipeline() using the following task identifier: "summarization".
#
# The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is currently, **‘bart-large-cnn’, ‘t5-small’, ‘t5-base’, ‘t5-large’, ‘t5-3b’, ‘t5-11b’.**
# + [markdown] id="TxSeWl0ZK17q"
# Up To date all the available Model @ [https://huggingface.co/models?filter=summarization](https://)
# + [markdown] id="KbsUuiZX4Zzw"
# **Article 1**
# + id="R_kZcUAu4T7N"
# Article_1="""In a historic move, the US government has announced that it supports waiving patent protections for COVID-19 vaccines, a measure aimed at boosting supplies so that people around the world can get the shots. “The extraordinary circumstances of the COVID-19 pandemic call for extraordinary measures,” said US trade representative <NAME> in a statement. The move came on 5 May, the first of a two-day meeting of the general council of the World Trade Organization, based in Geneva, Switzerland. Until now, the United States, the European Union, the United Kingdom and Japan have blocked efforts brought by India and South Africa to make it legal to manufacture generic versions of COVID-19 vaccines. Former US presidents from both the Republican and the Democratic parties have staunchly defended intellectual-property rights, so the move by the administration of President <NAME> has shocked people on both sides of the debate. "This marks a major shift in US policy in a pro-public-health way," says <NAME>, a global-health researcher at Georgetown University in Washington DC. Kavanagh is part of the growing chorus of health-policy and global-health researchers advocating patent waivers, as the gap between vaccination rates in rich and poor nations grows larger every day.
# Fewer than 1% of people in low-income countries have received COVID-19 vaccines. The researchers are quick to note, however, that a waiver on patents covering all aspects of COVID-19 vaccines would be just the first step in ramping up vaccine supply. “It’s a one-two-three,” explains <NAME>, US director for the non-profit Drugs and Neglected Diseases initiative in New York City. “First we need to remove patent obstacles, second we need to transfer the knowledge on how to make them, and step three is a massive investment in manufacturing capacity.” And at the moment, step one is far from complete. The World Trade Organization will not negotiate the details of which patents to adjust until all its member countries agree on some sort of waiver. Health-policy analysts speculate that other countries will follow in the footsteps of the United States, although the European Union might hold out beyond the end of the meeting. South Africa and India have proposed waivers on patents, not only on vaccines, but also on COVID-19-related medical devices, drugs and diagnostic technologies; so far, Tai’s statement mentions only vaccines. Drugmakers and others who oppose the measure say that waivers sabotage companies’ enormous investments in drug and vaccine development, which are compensated by their ability to set the price on products that they exclusively own.
# Normally, patents reward pharmaceutical companies by protecting their inventions from competition by generics for a limited time—US patents on drugs typically last for 20 years. Drug companies aren’t the only opponents of the measure. In a 25 April interview with Sky News, global health philanthropist and Microsoft co-founder <NAME> argued against intellectual-property waivers, saying that manufacturers of generics couldn’t ramp up production quickly, and that vaccine quality could be compromised. After the US government's waiver announcement, the industry group Pharmaceutical Research and Manufacturers of America released a statement echoing these points, saying: “The Biden Administration has taken an unprecedented step that will undermine our global response to the pandemic and compromise safety.” Proponents of the waiver disagree, pointing out that generics manufacturers have been supplying the world with high-quality vaccines and medicines for years. They point out that taxpayers helped to foot the bill for the development of several COVID-19 vaccines, and say that the claim that pharmaceutical companies must recoup all the costs is therefore unfair—especially during a crisis. Several other obstacles must be addressed, however, such as making sure distribution is equitable.
# Cohen says: These vaccines are an unparalleled triumph for science, but if only 20% or 30% of the world winds up benefiting, what is the point of the innovation?
# """
# + [markdown] id="8As6dC4o4fkc"
# **Article 2**
# + id="slDLGGzB4jUl"
# Article_2="""
# I have never had as much suicidal ideation as I’ve had over the last year and a half, during this pandemic.
# I have not been in any particular physical danger. Thinking about ending one’s life can be an understandable coping mechanism to survive adverse conditions, such as living alone through a pandemic and going without touch or indoor companionship for months on end. I have a good therapist, and my ideas about suicide never progressed beyond thoughts towards making any plans to actually go through with it.
# The “logic” to these thoughts happened in a cycle like this.
# After eating 21 meals alone week, week after week, I craved being close to other people.
# But if I gave in to such urges, I feared, based on a no-risk mentality, that I might unwittingly set off a COVID infection chain that would kill people.
# Feeling like I couldn’t take another 21, 42, 63 or 84 meals alone, I’d think to myself at times, “Well, if you’ve got to kill someone, it might as well be you, Steven.”
# “Better just to erase myself from the equation,” I’d think, lest my desires inadvertently kill someone else.
# While not lethal, these thoughts caused me great emotional pain and mental anguish. And when I felt my first vaccine shot punctuate my left arm recently, I felt a palpable sense of relief as those excruciating thoughts drifted out of mind and body—hopefully for good.
# When COVID first began to shut down the United States, I feared how single people who live alone like me would be expected to just suck it up and deal with a purely solitary life. (I also feared how domestic violence would affect families.) Yet while I often tell other people that risk lies on a spectrum, that it’s not absolute, I had a hard time granting myself any leeway with thinking I could put anyone else at any risk. And so, I have been painfully alone for almost all of the time I’ve spent indoors over the past year.
# Now, as increasing numbers of Americans are getting vaccinated, the risk of many activities is mercifully plummeting. If it ever was, I believe it is no longer excusable to attempt an impossible no-risk approach to life in an ongoing pandemic that may well become endemic. As we fight for a more just world, we must also learn to live with a spectrum of risk.
# A key to doing this is for those of us who are researchers, journalists or both to better narrate the concept of risk. Let us consider three examples of how news media could do a better job.
# On April 25, a New York Times headline irresponsibly screamed that “Millions Are Skipping Their Second Doses of Covid Vaccines.” While technically true, the headline’s framing obscured something more exciting and newsworthy: out of more than 60 million people, it was only a scant “8 percent of those who got initial Pfizer or Moderna shots” who had “missed their second doses.” The real news was that 92 percent of people who’d gotten their first shot had shown up for their second.
# The U.S. COVID vaccination follow-through has been an unmitigated, historic success. In previous multidose vaccine campaigns, it has been far lower. In 2018, for example, a study of 350,240 Medicare and 12,599 Medicaid enrollees receiving multidose vaccines for hepatitis A and hepatitis B found completion rates could be as low as 19 percent, and only went as high as 48.9 percent. And a campaign completed between 2017 and 2019 that sought to vaccinate approximately seven million people against shingles—previously cited as one of the most successful multidose campaigns ever undertaken in the U.S.—found that 70 percent and 80 percent “completed the two-dose series within six and 12 months post initial dose, respectively.” That over 92 percent showed up for their second COVID shot, and the rest still got the 80 percent protection of a single jab, is a wildly successful campaign. And yet, the Times narrated it as cause for concern—because if it had to be all or nothing, it fell short.
# Journalists and social media users similarly struggled to properly frame the potential risks of the Johnson & Johnson vaccine when the Food and Drug Administration put it on pause last month. The FDA reported in early April that there had been six cases of blood clots possibly linked to some 6.8 million administered doses of Johnson & Johnson’s one-shot vaccine. That’s less than one per million, and only one was fatal. At the same time, about 562,000 of approximately 331 million Americans had died of COVID—about one out of every 588 people living in the United States. This meant that even if one in a million people were getting blood clots from the Johnson & Johnson vaccine, the risk of not taking it would be much higher than the risk of taking it.
# A reader wrote to me on the night of the pause, despondent and asking for advice, having just secured funding and created outreach materials that included J&J information to get vaccines to hard-to-reach populations. If you do some math, if another 6.8 million people who might have gotten J&J went unvaccinated altogether, some 11,564 of them might die of COVID— to stop one one possible death. Yet because narratives often focus on single stories of individuals and the holy grail of zero risk, the idea of six people getting blood clots can register more in our collective consciousness than the broadly mitigating effects of a single-dose vaccine with minimal risk.
# Finally, the CDC recently recommended that people who are vaccinated do not need to wear masks outside unless in crowds. This was prudent and sensible. SARS-CoV-2 has long been known to transmit overwhelmingly more easily indoors; a study of 381 outbreaks in China could only trace one to outdoor transmission. For that reason, outdoor mask mandates (in the hopes people will then also use them inside) have always been dubious. As Harvard epidemiologist <NAME> once put it, requiring masks outside is “a bit like saying we’re going to ask people to wear condoms when they’re masturbating, because we think it’s going to get them to wear a condom when they’re with another person.”
# The risk is even more dubious as the U.S. population becomes vaccinated. Yet the idea that perhaps, maybe, there could be a single transmission outside is driving many to scold the idea of doffing masks outside. (Curiously, the CDC’s guidance advising people who are vaccinated that it was okay to gather indoors with others who are also vaccinated caused much less conflict, even though indoor transmission is far more risky.)
# There’s a lot of activism still to be done to get vaccines to the most affected in the U.S. and abroad. But we can’t do that work well from a place of panic or needless pain or suffering. For instance, the high COVID mortality of line cooks has nothing to do with wearing masks outside, and everything to do with getting line cooks vaccines, stronger ventilation and better working conditions overall. Yet unnecessary panic about the former can cloud our judgment about acting on the latter.
# As my body builds up COVID protection in the coming weeks, I am looking forward to how a far lower risk of infection or transmission will improve my mental health and make life more pleasurable. I am happy to let go of fears of harming others or myself by engaging in normative life activities. I look forward to hugging, kissing and eating with other people again, and to sharing more unmasked smiles in various settings. And I welcome this boost of happiness as fuel to keep fighting against viral stigma and for vaccine equity.
# The trauma of the past 15 months or so will mean that many of us will have to be socialized into unmasking and being intimate with one another again. That socialization can be better aided if news media organizations reassess risk, narrate risk with more nuance and do not frame this next stage of the pandemic in all-or-nothing terms.
"""
# + [markdown] id="lzxl2pjZ43bZ"
# **Article 3**
# + id="i3uNNIif48Xi"
# Article_3="""
# The COVID pandemic devastated nursing homes. People living in long-term care facilities represent less than 1 percent of the U.S. population but account for a third of its COVID deaths: more than 174,000 people as of early March. And it wasn't just residents—nursing home workers had one of the deadliest jobs last year.
# Problems with long-term care precede COVID. Most Americans say they want to remain at home as long as possible as they age, yet many cannot afford such care and wind up in a nursing facility. Such facilities can cost hundreds of dollars a day. Medicaid covers most charges, yet people must be nearly bankrupt to qualify. The program reimburses nursing homes only for 70 to 80 percent of those costs, so it is harder for them to provide quality care.
# Most nursing homes are for-profit, and private equity firms are increasingly gobbling them up to make a buck at the expense of residents. Certified nursing assistants (CNAs), who furnish the bulk of care in nursing homes, earn only about $14 an hour; recruiting and retaining them is a huge challenge. And the current U.S. government system for evaluating nursing facilities—the so-called five-star rating system—is largely based on self-reported data that are easy to manipulate, and independent inspections often fail to flag serious violations in the quality of care, according to a recent New York Times investigation.
# “This isn't just a bad-actor problem,” says <NAME>, a professor of health-care policy at Harvard Medical School. “It's the system that's broken.”
# How to fix it? President <NAME>'s proposed $2-trillion infrastructure bill offers a promising start toward helping people age at home. The bill includes $400 billion over eight years for home- and community-based care. It expands Medicaid coverage for such services, which states are not currently required to provide (and those that do often have long waiting lists). The bill, which faces steep opposition from Republicans, also aims to establish more and better-paying jobs for home health workers and to give them the ability to join unions and collectively bargain.
# These steps are a good beginning, but they don't do anything to help nursing homes. “Nursing homes must be prioritized at the level of other medical facilities,” says <NAME>, CEO of the National Association of Health Care Assistants, which represents CNAs. “We're taking care of the sickest people in America.”
# The American Health Care Association (AHCA), a nonprofit that represents nursing homes and other assisted living facilities, and LeadingAge, an association of nonprofit aging service providers, recently released a proposal dubbed the Care for Our Seniors Act. The plan would require at least one registered nurse on duty 24 hours a day at every facility (in addition to CNAs and other staff) and a 30-day supply of personal protective equipment. The act includes provisions to attract and retain employees, such as providing loan forgiveness for new graduates working in long-term care, tax credits for employees, and support for child care and affordable housing. And it aims to create better oversight of facilities by focusing more on improving them than punishing them and by closing chronically poor performers. Most nursing homes are badly outdated; the new proposal calls for renovating them and ensuring all residents have private rooms. AHCA says its plan will cost $15 billion a year. To pay for it, the proposal calls for several strategies, including increasing the federal government match for Medicaid, which states have underfunded, and mandating that states pay facilities at a rate sufficient for them to break even.
# The AHCA-LeadingAge proposal is on the right track, but one thing it's missing is increased accountability, according to Grabowski. “There's a lot of skepticism that all those dollars are going to find their way to their intended purposes,” he says. Beyond top-level reforms, Porter wants to empower nursing home residents and their families to fight for the care they or their loved ones deserve. Taking a tour of a nursing home doesn't tell you anything about the quality of care, she says. Instead you should request a meeting with the president of the resident council, an advocacy group consisting of residents and their families—and if one doesn't exist, you can form one. They can tell you whether a facility is really as good as it claims to be.
# These changes will show that we, as a society, value elderly lives—including our own.
# """
# + [markdown] id="ptVDdfvS5DvL"
# **Article 4**
# + id="wPsHIV4F5GdU"
# Article_4="""
# As Black Emergency Room physicians, we've had a front row view as COVID-19 has ravaged our community, disproportionately killing African-Americans, Latinos, and other minority groups . We cheered the arrival of a vaccine, and, along with many other front-line providers, lined up early for our appointment filled with hope that an end to this pandemic could be near.
# Unfortunately, this elation has been short lived. Early signs from the current vaccine rollout show Black Americans nationwide are receiving COVID vaccinations at half the rates of White Americans . Even amongst those fighting the virus every day, few of our Black colleagues want to get vaccinated. As Black healthcare providers, we recognize our unique role in providing not only for the health of our patients but for the health of our communities. We provide a brief historical primer and urge specific action to prevent worsening of health disparities.
# There is a historical legacy of exploitation and persecution at the hands of the US healthcare system which has affected generations of Black communities. During slavery, physicians used slaves for involuntary medical experimentation for both developing cures and profit. Medical exploitation was further perpetuated in the modern era as evident by the Tuskegee Syphilis Project, the biomedical capitalization of Henrietta Lacks, and forced sterilization initiatives.
# Decades of torment have led to generalized mistrust of the healthcare system among many in the Black community. Discrimination, experiences around racism, and fear of experimentation are but a few of the many elements that contribute to this mistrust, and ultimately negatively impacts the acceptance of and willingness to seek healthcare. This notion becomes more apparent when it comes to the topic of vaccinations. A December 2020 survey conducted by the Kaiser Family Foundation showed that about a quarter of the public remains vaccine-hesitant, saying they probably or definitely would not get a COVID-19 vaccine even if it were available for free and deemed safe by scientists. More importantly, 35% of Black adults say they definitely or probably would not get vaccinated citing major reasons as fear of contracting COVID-19 from the vaccine itself or having a mistrust of vaccines in general. Among all Black Adults, about half (48%) say they are not confident that the development of a COVID-19 vaccine is taking the needs of Black people into account.
# COVID vaccination presents an opportunity for a healthcare system historically plagued by injustice and discrimination to begin to make amends. We offer concrete strategies that healthcare and governmental institutions can employ now to address COVID-19 vaccine hesitancy in the African-American community.
# First, we must acknowledge past and present injustices rooted in structurally racist policies and care delivery systems. Institutions should mandate iterative cultural competency training for all clinicians and trainees that emphasize how social determinants contribute to health inequity across communities of color. For communities historically subjected to experimentation and exclusion, transparency in describing vaccination risks and benefits and accountability in vaccine delivery are critical when engaging on the topic of COVID-19 vaccination. Second, we must develop messaging that acknowledges concerns while providing pertinent information and education. Our communities have heightened apprehension about side effects and safety rooted in historical abuses. Messaging must explicitly address these aspects in a culturally sensitive way to allay fears. Third, we must partner with trusted sources such as faith-based organizations, political advocacy groups, and grass-roots organizations to engage Black communities about the risks and benefits of the COVID19 vaccine in a personalized, culturally sensitive way. These are community leaders who acknowledge people's genuine fears and the Black experience of healthcare in America. Recognizing the potential sway that trusted Black voices have in the community, we can leverage community anchors to foster dialogue and build trust. Finally, we must redouble our efforts to overcome barriers to access particularly for those most disenfranchised. If primary care, health education, and preventive health are already less accessible for Black patients, we cannot expect that, without significant action, the COVID vaccine will magically become easy to get. Efforts need to be targeted to reach communities when and where they prefer to be vaccinated. Vaccines should be distributed in churches, barbershops, and community centers.
# The trauma from centuries of experimentation, neglect, and disenfranchisement cannot be overcome overnight. Yet, action must be taken now to alleviate the pandemic's devastating toll on communities of color given that it is far from over. With genuine communication, relevant messaging, thoughtful partnership, and a relentless focus on removing barriers, we can build transparent and equitable mechanisms to address vaccine hesitancy in the communities most at risk.
# """
# + [markdown] id="sVw73Hnn5MBT"
# **Method 1**
# + id="-6b00cx98bkg"
"""
Here, we have 5 different variants and all imports should go here
"""
# bart-large-cnn might be an overkill since it requires pytorch so it is in another notebook
from transformers import pipeline
from newspaper import Article
import torch
import json
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
#List_of_available_models=[ 'bart-large-cnn', 't5-small', 't5-base', 't5-large', 't5-3b', 't5-11b'] # to give developers an idea of what all was tested.
# for i,j in zip(List_of_available_models):
#summarizer = pipeline("summarization", model='t5-small', tokenizer="t5-small", framework="tf")
#x=summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
# + id="vWim2RlnZtct"
url1 = 'https://www.scientificamerican.com/article/in-shocking-move-u-s-backs-waiving-patents-on-covid-vaccines/' # provide the url you want summarised
url = 'https://www.nejm.org/doi/10.1056/NEJMc2104974'
article = Article(url)
article.download()
article.parse() # do not skip this
article_text = article.text
#cleaned_text = clean_text(article_text)
# + colab={"base_uri": "https://localhost:8080/", "height": 115, "referenced_widgets": ["a505ebcb04ac4a7cbcd75f71be8820a9", "<KEY>", "<KEY>", "cc0299edce684d0483f3a53f3c9712f2", "f80881feae114e318f76a51a3ffda227", "b6720ad69e904f52b0fe3195f18b6acc", "<KEY>", "c7f5df8ebe08452789ff87753a19706a", "405c375c09c143af8f872247644df0b6", "ddc281de99d645d3ac8b77a464f25089", "<KEY>", "46fed5859df74416a5d76e82d964efdc", "<KEY>", "7403c53d248d4f609c669eab7e3fef7e", "<KEY>", "495a0d3e70de4c7f838d717e7b5d2fc4"]} id="iBMUQWgeZswi" outputId="0664e95c-99e0-46b5-fb89-5dd7cfccd278"
model = T5ForConditionalGeneration.from_pretrained('t5-large')
tokenizer = T5Tokenizer.from_pretrained('t5-large')
# + colab={"base_uri": "https://localhost:8080/"} id="XgpAlNewSbod" outputId="ce1de575-425d-4b4f-a171-087c84533eb9"
#device = torch.device('cpu')
preprocess_text = article_text.strip().replace("\n","")
t5_prepared_Text = "summarize: "+preprocess_text
print ("original text preprocessed: \n", preprocess_text)
tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors="pt")#.to(device)
summary_ids = model.generate(tokenized_text,
num_beams=4,
no_repeat_ngram_size=2,
min_length=30,
max_length=300,
early_stopping=True)
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
# print ("\n\nSummarized text: \n",output)
# + colab={"base_uri": "https://localhost:8080/"} id="vNnNqFI1XpXq" outputId="ecb10c61-8188-4527-f9f5-50f40f0c9dcc"
print ("\n\nSummarized text: \n",output)
# + colab={"base_uri": "https://localhost:8080/", "height": 115, "referenced_widgets": ["2b1c1d7e34b14686812cdccd78f9e6c6", "bd825a193c794c86a4e04bcb76734037", "45a37976689043a69549d7dff68c68b0", "<KEY>", "<KEY>", "<KEY>", "f108bd4e51f2493f8162318f99874503", "<KEY>", "ae71e47be2194dbe86b8fbcee2bd0693", "<KEY>", "5e7e9f2f43fc4e358ebc8d81cd0e5477", "77b1eb8adc82455c8fa6312440b92eb8", "<KEY>", "3a67e93df8c64e8ab2c6c8e4ba7773bb", "e3dd3cde98f346eeb2312c4831d19cac", "be0dc5ef2bb0464aa9c4a62d613664d0"]} id="yz_2ktEkbzAM" outputId="85ed4dbf-e74a-4c66-be59-906f4e14fe23"
#another summary model
summarizer = pipeline("summarization", model='t5-large', tokenizer="t5-large", framework="tf") # do not re-run for every url, evaluates the summaries
# + id="Mpg-2CMd9Mju" colab={"base_uri": "https://localhost:8080/"} outputId="1055affd-9f09-413f-9968-efdaf51ea4f7"
####Varaint 1#####
length_penalty = 2.0
print(summarizer(article_text, min_length=30, max_length=250,length_penalty=2.0 ))
##########
# + colab={"base_uri": "https://localhost:8080/", "height": 350, "referenced_widgets": ["e00e7eb8bfba4699a87a41bfaaef833d", "7c77c41d910e4b6e81b335df496b7e3b", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "08af9093f52749c0a1b45e643672e560", "50175c905a474d56bbd35ec7086b8783", "54dd0895368c49eb827201668aaa29c5", "076685c6f3e7403888965bf53ae99eaa", "<KEY>", "80af40b0134a428b8dae29764570ed4f", "5a06d09feb524384ad32dcb501217abb", "7cca43f43549469b851eedc323f3afff", "<KEY>", "a512aeeaf60444eb917212b8ed5453ae", "<KEY>", "5010d5a18a47423aab74ad9fce3f915e", "4149ec659ba4431095201b82b2cad9da", "<KEY>", "<KEY>", "f3edf806bd3a45868abad1bc787de257", "<KEY>", "41da4d2a886a4a2eafec8689c5ae0976", "72dc1ef2326a485597d95411c3e9c156", "f1ac5cd244cf407ea40f36628d6f2f66", "a3944816c3d741e6bc5ab0e91265c566", "<KEY>", "<KEY>", "c8f78cd183554e9faea51d2d17250e31", "<KEY>", "57717892d9da431dbb703107e4668a51", "<KEY>", "7e78d423be8e46de8954cc0d2699686e", "79f96073e4864e8e93d94e85e77bcab8", "<KEY>", "<KEY>", "0dee705117b841fa9c6e24ef21520b61", "<KEY>", "<KEY>"]} id="sCA8LeMafotQ" outputId="1e58cf36-d9e8-4d8d-b4a9-af0c89705159"
from transformers import BartTokenizer, BartForConditionalGeneration
import torch
long_text = article_text
model = BartForConditionalGeneration.from_pretrained('facebook/bart-large-cnn')
tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn')
# tokenize without truncation
inputs_no_trunc = tokenizer(cleaned_text, max_length=None, return_tensors='pt', truncation=False)
# get batches of tokens corresponding to the exact model_max_length
chunk_start = 0
chunk_end = tokenizer.model_max_length # == 1024 for Bart
inputs_batch_lst = []
while chunk_start <= len(inputs_no_trunc['input_ids'][0]):
inputs_batch = inputs_no_trunc['input_ids'][0][chunk_start:chunk_end] # get batch of n tokens
inputs_batch = torch.unsqueeze(inputs_batch, 0)
inputs_batch_lst.append(inputs_batch)
chunk_start += tokenizer.model_max_length # == 1024 for Bart
chunk_end += tokenizer.model_max_length # == 1024 for Bart
# generate a summary on each batch
summary_ids_lst = [model.generate(inputs, num_beams=4, max_length=250, early_stopping=True) for inputs in inputs_batch_lst]
# decode the output and join into one string with one paragraph per summary batch
summary_batch_lst = []
for summary_id in summary_ids_lst:
summary_batch = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_id]
summary_batch_lst.append(summary_batch[0])
summary_all = '\n'.join(summary_batch_lst)
print(summary_all)
# + id="F2ewyypY-qhq"
# print(summarizer(Article_1, min_length=30, max_length=250))
# + id="xxh712ei-4SP"
# print(summarizer(Article_2, min_length=30, max_length=250))
# + id="vuUR0y8u_PmE"
# print(summarizer(Article_3, min_length=30, max_length=250))
# + id="HhQjqyX-_az6"
# print(summarizer(Article_4, min_length=30, max_length=250))
# + id="lru9Z7Ax_rxS"
#### Variant 2#####
summarizer = pipeline("summarization", model='t5-base', tokenizer="t5-base", framework="tf")
# + id="AB_aL3qbACNw"
print(summarizer(Article_1, min_length=30, max_length=250))
# + id="BzuzgkAzAHWd"
print(summarizer(Article_2, min_length=30, max_length=250))
# + id="tUT8TLRzAIyS"
print(summarizer(Article_3, min_length=30, max_length=250))
# + id="FigHF9HAAKjm"
print(summarizer(Article_4, min_length=30, max_length=250))
# + id="XF5xkqi4CG9l"
#### Variant 3 ######
summarizer = pipeline("summarization", model='t5-large', tokenizer="t5-large", framework="tf")
# + id="mYuBdMtKDVo7"
print(summarizer(Article_1, min_length=30, max_length=250))
# + id="o2yIiWHADa4f"
print(summarizer(Article_2, min_length=30, max_length=250))
# + id="JiZtCD6UDcNS"
print(summarizer(Article_3, min_length=30, max_length=250))
# + id="cx7axhyrDdcD"
print(summarizer(Article_4, min_length=30, max_length=250))
# + id="Q7ixoGHyHfVL"
#### Variant 3 ###### errors due to memory issues #####
summarizer = pipeline("summarization", model='t5-3b', tokenizer="t5-3b", framework="tf")
# + id="VwNMhMnLHvTr"
print(summarizer(Article_1, min_length=30, max_length=250)) # might not work due to memory issues if the previous models have been loaded ###
# + id="qcZO8oYoI7pP"
print(summarizer(Article_2, min_length=30, max_length=250)) # might not work due to memory issues if the previous models have been loaded ###
# + id="sA12lfhxI-BZ"
print(summarizer(Article_3, min_length=30, max_length=250)) # might not work due to memory issues if the previous models have been loaded ###
# + id="68YWJl5DI-vI"
print(summarizer(Article_4, min_length=30, max_length=250)) # might not work due to memory issues if the previous models have been loaded ###
# + id="XwPF0ghhrlB_"
#default version t5-base, incase model not specified
summarizer = pipeline("summarization")
# + [markdown] id="4XJaNNdmrlCA"
# Because the summarization pipeline depends on the `PreTrainedModel.generate()` method, we can override the default
# arguments of `PreTrainedModel.generate()` directly in the pipeline for `max_length` and `min_length` as shown
# below. This outputs the following summary:
# + id="vaNO7OiZrlCB"
print(summarizer(Article_1, max_length=250, min_length=30, do_sample=False))
#current token size = 512
#length penalty = 2.0
# this is an extractive summary
# + id="CaNe-fWfJclo"
print(summarizer(Article_3, max_length=250, min_length=30, do_sample=False))
# + id="LfeJXWPbJ73C"
print(summarizer(Article_4, max_length=250, min_length=30, do_sample=False))
# + id="7AP3CUqDKEDL"
# print(summarizer(Article_2, max_length=50, min_length=20, do_sample=False))
# + [markdown] id="GrplSeZz55US"
# **Method 2**
# + [markdown] id="y7rdlBOKrlCC"
# Here is an example of doing summarization using a model and a tokenizer. The process is the following:
#
# 1. Instantiate a tokenizer and a model from the checkpoint name. Summarization is usually done using an encoder-decoder
# model, such as `Bart` or `T5`.
# 2. Define the article that should be summarized.
# 3. Add the T5 specific prefix "summarize: ".
# 4. Use the `PreTrainedModel.generate()` method to generate the summary.
#
# In this example we use Google's T5 model. Even though it was pre-trained only on a multi-task mixed dataset (including
# CNN / Daily Mail), it yields very good results.
#
# Note: Though the Hugging face community has positive reviews of the tokeniser (which is fine tuned version) but it isn't working very well for us.
#
# + id="jiXlJfQXrlCD"
from transformers import AutoModelWithLMHead, AutoTokenizer
model = AutoModelWithLMHead.from_pretrained("t5-base")
tokenizer = AutoTokenizer.from_pretrained("t5-base")
# + id="nEKitFG_KmVX"
# T5 uses a max_length of 512 so we cut the article to 512 tokens.
inputs_1 = tokenizer.encode("summarize: " + Article_1, return_tensors="pt", max_length=512)
outputs_1 = model.generate(inputs_1, max_length=512, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
inputs_2 = tokenizer.encode("summarize: " + Article_2, return_tensors="pt", max_length=512)
outputs_2 = model.generate(inputs_2, max_length=512, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
inputs_3 = tokenizer.encode("summarize: " + Article_3, return_tensors="pt", max_length=512)
outputs_3 = model.generate(inputs_3, max_length=512, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
inputs_4 = tokenizer.encode("summarize: " + Article_4, return_tensors="pt", max_length=512)
outputs_4 = model.generate(inputs_4, max_length=512, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True)
# + id="xJDmdbEErlCE"
print(tokenizer.decode(outputs_1[0]))
# + id="dwG-iAgmLrdW"
print(tokenizer.decode(outputs_2[0]))
# + id="rb3wDK5ULr1J"
print(tokenizer.decode(outputs_3[0]))
# + id="RxHhSw8mLsFx"
print(tokenizer.decode(outputs_4[0]))
# + id="ekTALDujXifR"
article_text
| Updated_Misinformation_Analysis_PathCheck_Foundation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas
import json
from tqdm import tqdm
import re
import os
import requests
import datetime
from bs4 import BeautifulSoup as bs
from collections import defaultdict
# +
def printer(data):
"""
get: list of dictionaries
print: 'title' and 'link' from each dictionari
"""
for e in data:
print(e['title'])
print(e['href'])
def finder(data, select):
"""
get: list of dictionaries
return: list of dictionaries minus sublist of dictionaries by selector,
sublist of dictionaries by selector
"""
run_date = data[0]
g = [run_date]
v = [run_date]
for e in data[1:]:
t = e['title']
if (re.match(r'[ДПА][а-яВ\s]+\d{,2}[\.\s][\dа-я]+[\.\s]\d{,4}', t)
and select in t):
g.append(e)
else:
v.append(e)
return v, g
def xa0(string):
"""
Hey, mister \xa0, GTFOH!
"""
if '\xa0' in string:
string = string.replace('\xa0', '')
return string
def check_keys(dictofdicts):
"""
get: dictionari of dictionaries
print: keys of dictionaries
"""
q = []
for k, v in w.items():
for i, l in v.items():
q.append(i)
print(set(q))
def get_date(yyyy, mm, dd):
"""
get: mess
return: DateTime format
"""
mm_eng = {'Янв':'Jan', 'Фев':'Feb', 'Мар':'Mar', 'Апр':'Apr',
'Мая':'May', 'Июн':'Jun', 'Июл':'Jul', 'Авг':'Aug',
'Сен':'Sep', 'Окт':'Oct', 'Ноя':'Nov', 'Дек':'Dec'}
g = dd+'/'+mm_eng[mm.capitalize()]+'/'+yyyy
d = datetime.datetime.strptime(g, '%d/%b/%Y').date()
return d
def DieWalkuere():
"""
return: json from career.hse.ru/news
"""
run_datetime = datetime.datetime.now().strftime("%Y-%m-%d")
m_r = requests.get('https://career.hse.ru/news/') # делаем запрос к сайту
soup = bs(m_r.text, 'lxml')
num_of_cycles = int(soup.find_all('a', {'class' : 'pages__page'})[-1].text) # узнаём количество страниц в новостной ленте
hse_career_posts = [run_datetime] # список в который будут записаны результаты
with tqdm(total=num_of_cycles) as pbar:
for i in range(num_of_cycles): # создаём цикл, с количеством итераций равным количеству страниц в новостной ленте
r = requests.get(f'https://career.hse.ru/news/page{i}.html') # запрошиваем нужную страницу
soup = bs(r.text, 'lxml') # применяем к ее html BeautifulSoup, преобразуем в lxml
posts_content = soup.find_all("div", {"class": "post__content"}) # вычленяем все div'ы в которых содежраться новостные посты
posts_meta = soup.find_all("div", {"class": "post-meta__date"}) # вычленяем div'ы с мета-данными постов
qq = list(zip(posts_content, posts_meta)) # объеденяем списоки div'ов с постами и div'ов с мета-данными в список кортежей
for e in qq:
title = e[0].find('a').text # заголовок поста
href = e[0].find('a')['href'] # ссылка на страницу с конкретной новостью
text_content = e[0].find('div', {'class' : 'post__text'}).text # лид из поста
# из элемента с мета-данными получаем год, месяц и день, записанные в очень своеобразном формате
dd = e[1].find('div', {'class' : 'post-meta__day'}).text
mm = e[1].find('div', {'class' : 'post-meta__month'}).text
yyyy = e[1].find('div', {'class' : 'post-meta__year'}).text
d = str(get_date(yyyy, mm, dd)) # преобразуем дату в стандарт iso
e_dict = {'title':title, 'href':href,
'text_content':text_content, 'date':d} # записываем все полученыеданне в словарь
hse_career_posts.append(e_dict) # помещаем словарь в список
pbar.update()
with tqdm(total=len(hse_career_posts[1:])) as pbar:
for e in hse_career_posts[1:]:
try:
url = e['href']
if 'http:' in url:
url = url.replace('http:', 'https:')
if not 'https:' in url:
url = 'https:'+url
r = requests.get(url).text
e['href_content_html'] = r
except:
e['href_content_html'] = ''
pass
pbar.update()
return hse_career_posts
def Fricka(listofobj):
"""
get: list of dictionaries
return: list of strings + run_date
"""
title_p = "margin-bottom: .0001pt; text-align: center; text-indent: 1.0cm; line-height: 150%;"
Liste = [listofobj[0]]
with tqdm(total=len(listofobj[1:])) as pbar:
for obj in listofobj[1:]:
href = obj.get('href_content_html') # html код страницы с дайджестом
o = bs(href, 'lxml') # применяем BeautifulSoup, преобразуем в lxml
core_div = o.find('div', {'class': 'post__text'})
list_of_p = core_div.find_all('p') # вычленяем блоки с текстами вакансий
for p in list_of_p:
if p.get('style') == title_p: # находим блоки в которых заголовки вакансий
tex = p.text
tex = xa0(tex) #удаляем неразрывные пробелы
if tex != '':
Liste.append('o'+tex) # добавляем в начало символьный маркер для заголовка вакансии
Liste.append(obj['date']) # добавляем текст блока в список
else:
spans = p.find_all('span') # находим в блоке все span'ы
for s in spans:
tex = s.text # достаём из каждого текст
tex = xa0(tex) # удаляем неразрывные пробелы
if tex != '':
Liste.append(tex) # добавляем в список
pbar.update()
return Liste
def Wotan(listofstr):
"""
get: list of strings
return: list: run_date and dictionari of dictionaries
example: {title_1 : {aspect_1 : text_1, aspect_2 : text_2},
title_2 : {aspect_1 : text_1, aspect_2 : text_2},
...
}
"""
keys = ({'Контакты:':'contacts', 'О компании:':'about_company',
'Обязанности:':'responsibilities', 'Условия:':'conditions',
'Окомпании:':'about_company', 'Требования:':'demands'}) # названия подзаголовков
WW = [listofstr[0]]
Woerterbuch = defaultdict(dict) # структура словарь словарей
current_date = ''
current_title = ''
curretn_aspect = ''
with tqdm(total=len(listofstr[1:])) as pbar:
# заголовок вакансии – ключ, его значение – словарь, ключи в котором аспекты каждой вакансии
for i, e in enumerate(listofstr[1:]):
t = re.compile(r'[o]\d+[\.][\s]\b') # сохраняем паттерн для вычленения заголовкой
if t.match(e): # распознаём заголовок
nomore = t.match(e).group()
current_title = e.replace(nomore, '')
Woerterbuch[current_title]
try:
if re.match(r'\d{4}[-]\d{2}[-]\d{2}\b', e): # распознаем дату
current_date = e
Woerterbuch[current_title]['date'] = current_date
elif e.strip() in keys: # распознаем аспекты вакании
curretn_aspect = keys[e.strip()]
Woerterbuch[current_title][curretn_aspect] = ''
else: # всё остальное – содержание аспектов
Woerterbuch[current_title][curretn_aspect]+=e
except:
pass
pbar.update()
WW.append(Woerterbuch)
return WW
def Bruennhilde(dictofdicts):
"""
get: dictionary of dictionaries
example: {title_1 : {aspect_1 : text_1, aspect_2 : text_2},
title_2 : {aspect_1 : text_1, aspect_2 : text_2},
...
}
return: list of dictionaries
example: [{title : title_1, aspect_1 : text_1, aspect_2 : text_2},
{title : title_2, aspect_1 : text_1, aspect_2 : text_2},
...
]
"""
with open('metro.txt', 'r', encoding='utf-8') as f:
ms = f.read().split('\n')
Siegfried = []
# меняем структуру данных для простоты табличного представления, и достаём дополнительные данные
for k, v in dictofdicts.items():
Fafnir = {}
c = k.split(' в ')
n = re.compile(r'[А-ЯЁA-Z][\w\s]+')
# вычленяем название компании из заговоловка вакансии
if len(c) > 1 and n.search(c[-1]):
Fafnir['title'] = k
Fafnir['company'] = n.search(c[-1]).group()
else:
Fafnir['title'] = k
money = re.compile(r'[\d][\d\s]+[руб]{1,3}[.]{,1}\s')
metrore = re.compile(r'\s[м][.][\s][А-ЯЁ][а-я]+\s')
# пытаемся найти информацию о деньгах и локациях (относительно метро)
if 'conditions' in v:
cond = v['conditions']
if money.search(cond):
Fafnir['money'] = ', '.join(money.findall(cond))
metro = []
if metrore.search(cond):
for e in ms:
if e in cond:
metro.append('м. ' + e)
if len(metro) > 0:
Fafnir['metro'] = ', '.join(metro)
Fafnir.update(v)
if len(Fafnir) > 2:
Siegfried.append(Fafnir)
return Siegfried
# +
#This cell downloads the website, be careful!
Data = DieWalkuere()
with open('hse_career_department_website_posts.json', 'w') as f:
json.dump(Data, f)
# -
with open('hse_career_department_website_posts.json', 'r') as f:
Data = json.load(f)
# +
# данные о вакансиях для студентов
data2, stud = finder(Data, 'студент')
step1_stud = Fricka(stud)
step2_stud = Wotan(step1_stud)
step3_stud = Bruennhilde(step2_stud[1])
Table_stud = pandas.DataFrame(step3_stud)
# +
# данные о вакансиях для выпускников
data3, grad = finder(data2, 'выпускник')
step1_grad = Fricka(grad)
step2_grad = Wotan(step1_grad)
step3_grad = Bruennhilde(step2_grad[1])
Table_grad = pandas.DataFrame(step3_grad)
# +
#Table_stud
# +
#Table_grad
# +
# сохраня
rdd = 'results'+step2_stud[0]
os.mkdir(rdd)
with open(rdd+'\Table_stud'+step2_stud[0]+'.json', 'w') as f:
json.dump(step3_stud, f)
Table_stud.to_csv(rdd+'\Table_stud'+step2_stud[0]+'.csv', encoding='utf-8')
with open(rdd+'\Table_grad'+step2_grad[0]+'.json', 'w') as f:
json.dump(step3_grad, f)
Table_grad.to_csv(rdd+'\Table_grad'+step2_grad[0]+'.csv', encoding='utf-8')
with open(rdd+'\ostatki'+step2_grad[0]+'.json', 'w') as f:
json.dump(data3, f)
| HSE_Career_website_scraper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # SAS Job Execution - job execution functions
# Job definitions are jobs that you can execute. You first create a job definition before executing it.
# The funtions in this use case execute jobs based on existing job definitions.
# ## Authentication
# +
#############################################
########## authentication ###################
#############################################
### with source("./R/authentication/auth_package.R") you will be able
### to load all functions here defined to facilitate other projects
### how to use example at the end of this file
## Refer to the authentication project
#Global variables to assign:
source("../authentication/get-access-token-r.r")
sasserver <- "http://your-server"
client_name <- "r_client" ## your client
client_secret <- "r_secret" ## your password
username <- "viya_user"
password <- "<PASSWORD>"
tokenDetailed <- authenticate(host = sasserver,
username = username,
password = password,
client_name = client_name,
client_secret = client_secret)
token <- tokenDetailed$access_token
token
# -
# ## Import modules, variable assignment
# +
library("httr")
library("jsonlite")
updated_def1 <- readRDS("updated.Rda")$id # reading the file from job-definition-crud-r to get job id
# Variables to assign (uncomment and assign if you did not do so in the authenticaiton step above)
# sasserver <- "http://your-server"
# client_name <- "r_client" ## create your client
# client_secret <- "r_secret" ## create your password
# username <- "viya_user"
# password <- "<PASSWORD>"
# -
# ## Create *get job list* function
# +
#############################################
######### get job request list ##############
#############################################
### To execute a job you will need the
### `updated_def1` variable created on `job_definition-crud-r.r`
library("httr")
library("jsonlite")
get_jobRequest_list <- function(host,
access_token,
start = 0,
limit = 10,
filter = NULL,
verbose = FALSE) {
url <- parse_url(host)
url$path <- "/jobExecution/jobs"
url$query <- list(
start = start,
limit = limit,
filter = filter
)
response <- GET(
url = build_url(url),
add_headers(
"accept"="application/vnd.sas.api+json",
"authorization" = paste("Bearer", access_token)
),
if(verbose) verbose()
)
stop_for_status(response)
execList <- fromJSON(content(response, as = "text"))
return(execList)
}
# -
# ## Run the *get job definitions* function
# +
### protip: always that is possible use a filter
### otherwise the endpoint is going to do a full request
### and it is going to take a while
joblist <- get_jobRequest_list(sasserver,
token,
start = 1,
limit = 20,
filter = "eq(createdBy, 'sasdemo')"
)
joblist
# -
# ## Create a *run job* function
# +
##################################################
### Submit a Job Definition for Execution ########
##################################################
execute_job_definition <- function(host,
access_token,
jobDefinitionId,
### following parameters overrides definitions
arguments = NULL,
## persistant job needs createdBy, name, desc
verbose = FALSE) {
url <- parse_url(host)
url$path <- "/jobExecution/jobs"
body <- toJSON(list(
jobDefinitionUri = paste0("/jobDefinitions/definitions/", jobDefinitionId)
),
auto_unbox = TRUE
)
response <- POST(
url = build_url(url),
add_headers(
"accept"="application/vnd.sas.api+json",
"Content-Type" = "application/vnd.sas.job.execution.job.request+json",
"authorization" = paste("Bearer", access_token)
),
body = body,
if(verbose) verbose()
)
stop_for_status(response)
exec <- fromJSON(content(response, as = "text"))
exec$etag <- cache_info(response)$etag
return(exec)
}
# -
# ## Run the *run job* function
# +
### execution is async
exec <- execute_job_definition(sasserver,
token,
updated_def1 ## job created in job_definition.R
)
exec
## use defined parameters
exec_param <- execute_job_definition(sasserver,
token,
updated_def1, ## job created in job_execution.R
list(AGE = 14)
)
exec_param$jobRequest$arguments
# -
# ## Create a *get job execution state* function
check_job_state <- function(host,
access_token,
executionId,
verbose = FALSE) {
url <- parse_url(host)
url$path <- paste0("/jobExecution/jobs/", executionId,"/state")
response <- GET(
url = build_url(url),
add_headers(
"accept"="text/plain",
"authorization" = paste("Bearer", access_token)
),
if(verbose) verbose()
)
stop_for_status(response)
execState <- content(response, as = "text")
return(execState)
}
# ## Run the *get job execution state* function
execState <- check_job_state(sasserver,
token,
exec$id)
execState
# ## Create a *get job execution details* function
get_job_state <- function(host,
access_token,
executionId,
verbose = FALSE) {
url <- parse_url(host)
url$path <- paste0("/jobExecution/jobs/", executionId)
response <- GET(
url = build_url(url),
add_headers(
"accept"="application/vnd.sas.job.execution.job+json",
"authorization" = paste("Bearer", access_token)
),
if(verbose) verbose()
)
stop_for_status(response)
execState <- fromJSON(content(response, as = "text"))
return(execState)
}
# ## Run the *get job execution details* function
# +
execStateFull <- get_job_state(sasserver,
token,
exec$id)
execStateFull$results ## output files
execStateFull$endTimeStamp
execStateFull$links
execStateFull$elapsedTime ## in seconds?
execStateFull$id
# -
# ## Create an *update job exectuion request* function
update_execute_job_definition <- function(host,
access_token,
jobReqId, ## requestId (exec)
jobDefinitionId, ## definitionId
etag,
name = NULL,
description = NULL,
arguments = NULL,
verbose = FALSE) {
url <- parse_url(host)
url$path <- paste0("/jobExecution/jobRequests/", jobReqId)
body <- toJSON(list(
id = jobReqId,
name = name,
description = description,
jobDefinitionUri = paste0("/jobDefinitions/definitions/", jobDefinitionId),
arguments = arguments
),
auto_unbox = TRUE
)
response <- PUT( ### updating changes from POST to PUT
url = build_url(url),
add_headers(
"accept"="application/vnd.sas.api+json",
"Content-Type" = "application/vnd.sas.job.execution.job.request+json",
"authorization" = paste("Bearer", access_token),
"If-Match" = etag
),
body = body,
if(verbose) verbose()
)
stop_for_status(response)
exec <- fromJSON(content(response, as = "text"))
return(exec)
}
# ## Run the *update job exectuion request* function
# +
updated_jobExec <- update_execute_job_definition(sasserver,
token,
jobReqId = exec$id,
jobDefinitionId = exec$jobRequest$jobDefinition$id,
etag = exec$etag,
name = "sashelp.class distribution",
description = "ods output with ager 14 cutoff",
arguments = list(AGE = "14"))
updated_jobExec
# -
# ## Create a *delete job definition* function
#### Delete a Definition
delete_job_execution <- function(host,
access_token,
executionId,
verbose = FALSE) {
url <- parse_url(host)
url$path <- paste0("/jobExecution/jobs/", executionId)
response <- DELETE(
url = build_url(url),
add_headers(
"accept"="application/json",
"authorization" = paste("Bearer", access_token)
),
if(verbose) verbose()
)
stop_for_status(response)
if(response$status_code == 204){
print(paste0("The job execution ", executionId," was successfully deleted."))
} else {
print(paste0("The job execution ", executionId," was not deleted."))
}
return(response)
}
# ## Run the *delete job definition* function
response <- delete_job_execution(sasserver,
token,
execStateFull$id)
| r/job-execution/job-execution-r.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using R Functions in Python
#
# This notebook provides all of the code present in the Medium article **Guide to R and Python in a Single Jupyter Notebook**. The focus on the notebook is on the use of R functions, libraries, vectors, dataframes, and formula within a Python notebook and how to interface between the Python and R environment. Following this, examples of using these features are given with the examples of smoothing splines, natural and basis splines, as well as generalized additive models (GAMs).
#
# **Author:** <a href="https://github.com/Srtgn"><NAME></a> based on <a href="https://github.com/mrdragonbear"><NAME></a>'s notebook<br>
#
# <span style="color:red">
# If you have configured your python environment based on the instructions in <a href="https://github.com/bmcs-group/bmcs_utils/blob/master/docs/README.md">BMCS development process</a>, please add the following packages to use the "R" parts of this notebook:</span>
#
# conda install -c r rpy2
#
# conda install -c conda-forge tzlocal
#
# conda install -c conda-forge r-gam
# minpack.lm also sould be installed with"
#
# conda install -c conda-forge r-minpack.lm
#
# "minpack.lm" includes nls.lm which stands for
# "nonlinear least squares levenberg-marquardt". It
# is being used to evaluate the desired equation based the provided data.
#
# The purpose of nls.lm is to minimize the sum square of the vector returned by the function fn, by
# a modification of the Levenberg-Marquardt algorithm.[Package Link](https://cran.r-project.org/web/packages/minpack.lm/minpack.lm.pdf)
## RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css").text
HTML(styles)
# ## Learning Goals
#
# The main goal of this notebook is to get familiar with calling R functions within Python. Along the way, we'll learn about the "formula" interface to statsmodels, which gives an intuitive way of specifying regression models, and we'll review the different approaches to fitting curves.
#
# Key Skills:
# - Importing (base) R functions
# - Importing R library functions
# - Populating vectors R understands
# - Populating dataframes R understands
# - Populating formulas R understands
# - Running models in R
# - Getting results back to Python
# - Getting model predictions in R
# - Plotting in R
# - Reading R's documentation
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# ## Linear/Polynomial Regression (Python, Review)
# Hopefully, you are at least somewhat familiar with using the statsmodels package in Python.
# Reading data and (some) exploring in Pandas:
# +
diab = pd.read_csv("data/diabetes.csv")
print("""
# Variables are:
# subject: subject ID number
# age: age diagnosed with diabetes
# acidity: a measure of acidity called base deficit
# y: natural log of serum C-peptide concentration
#
# Original source is Sockett et al. (1987)
# mentioned in Hastie and Tibshirani's book
# "Generalized Additive Models".
""")
display(diab.head())
display(diab.dtypes)
display(diab.describe())
# -
ax0 = diab.plot.scatter(x='age',y='y',c='Red',title="Diabetes data") #plotting direclty from pandas!
ax0.set_xlabel("Age at Diagnosis")
ax0.set_ylabel("Log C-Peptide Concentration");
# Linear regression with statsmodels.
#
# - Previously, we worked from a vector of target values and a design matrix we built ourself (e.g. from PolynomialFeatures).
# - Now, Statsmodels' *formula interface* can help build the target value and design matrix for you.
# +
#Using statsmodels
import statsmodels.formula.api as sm
model1 = sm.ols('y ~ age',data=diab)
fit1_lm = model1.fit()
# -
# Build a data frame to predict values on (sometimes this is just the test or validation set)
# - Very useful for making pretty plots of the model predcitions -- predict for TONS of values, not just whatever's in the training set
# +
x_pred = np.linspace(0,16,100)
predict_df = pd.DataFrame(data={"age":x_pred})
predict_df.head()
# -
# Use `get_prediction(<data>).summary_frame()` to get the model's prediction (and error bars!)
prediction_output = fit1_lm.get_prediction(predict_df).summary_frame()
prediction_output.head()
# Plot the model and error bars
# +
ax1 = diab.plot.scatter(x='age',y='y',c='Red',title="Diabetes data with least-squares linear fit")
ax1.set_xlabel("Age at Diagnosis")
ax1.set_ylabel("Log C-Peptide Concentration")
ax1.plot(predict_df.age, prediction_output['mean'],color="green")
ax1.plot(predict_df.age, prediction_output['mean_ci_lower'], color="blue",linestyle="dashed")
ax1.plot(predict_df.age, prediction_output['mean_ci_upper'], color="blue",linestyle="dashed");
ax1.plot(predict_df.age, prediction_output['obs_ci_lower'], color="skyblue",linestyle="dashed")
ax1.plot(predict_df.age, prediction_output['obs_ci_upper'], color="skyblue",linestyle="dashed");
# -
# <div class="exercise"><b>Exercise 1</b></div>
#
# 1. Fit a 3rd degree polynomial model and plot the model+error bars
# - Route1: Build a design df with a column for each of `age`, `age**2`, `age**3`
# - Route2: Just edit the formula
# +
fit2_lm = sm.ols(formula="y ~ age + np.power(age, 2) + np.power(age, 3)",data=diab).fit()
poly_predictions = fit2_lm.get_prediction(predict_df).summary_frame()
poly_predictions.head()
# +
ax2 = diab.plot.scatter(x='age',y='y',c='Red',title="Diabetes data with least-squares cubic fit")
ax2.set_xlabel("Age at Diagnosis")
ax2.set_ylabel("Log C-Peptide Concentration")
ax2.plot(predict_df.age, poly_predictions['mean'],color="green")
ax2.plot(predict_df.age, poly_predictions['mean_ci_lower'], color="blue",linestyle="dashed")
ax2.plot(predict_df.age, poly_predictions['mean_ci_upper'], color="blue",linestyle="dashed");
ax2.plot(predict_df.age, poly_predictions['obs_ci_lower'], color="skyblue",linestyle="dashed")
ax2.plot(predict_df.age, poly_predictions['obs_ci_upper'], color="skyblue",linestyle="dashed");
# -
# ## Linear/Polynomial Regression, but make it R
# This is the meat of the lab. After this section we'll know everything we need to in order to work with R models. The rest of the lab is just applying these concepts to run particular models. This section therefore is your 'cheat sheet' for working in R.
#
# What we need to know:
# - Importing (base) R functions
# - Importing R Library functions
# - Populating vectors R understands
# - Populating DataFrames R understands
# - Populating Formulas R understands
# - Running models in R
# - Getting results back to Python
# - Getting model predictions in R
# - Plotting in R
# - Reading R's documentation
# **Importing R functions**
# +
# if you're on JupyterHub you may need to specify the path to R
# import os
# os.environ['R_HOME'] = "/home/matt.stewart/anaconda3/bin/R"
import rpy2.robjects as robjects
# +
r_lm = robjects.r["lm"]
r_predict = robjects.r["predict"]
#r_plot = robjects.r["plot"] # more on plotting later
#lm() and predict() are two of the most common functions we'll use
# -
# **Importing R libraries**
from rpy2.robjects.packages import importr
#r_cluster = importr('cluster')
#r_cluster.pam;
# **Populating vectors R understands**
r_y = robjects.FloatVector(diab['y'])
r_age = robjects.FloatVector(diab['age'])
# What happens if we pass the wrong type?
# How does r_age display?
# How does r_age print?
# **Populating Data Frames R understands**
diab_r = robjects.DataFrame({"y":r_y, "age":r_age})
# How does diab_r display?
# How does diab_r print?
# **Populating formulas R understands**
simple_formula = robjects.Formula("y~age")
simple_formula.environment["y"] = r_y #populate the formula's .environment, so it knows what 'y' and 'age' refer to
simple_formula.environment["age"] = r_age
# **Running Models in R**
diab_lm = r_lm(formula=simple_formula) # the formula object is storing all the needed variables
simple_formula = robjects.Formula("y~age") # reset the formula
diab_lm = r_lm(formula=simple_formula, data=diab_r) #can also use a 'dumb' formula and pass a dataframe
# **Getting results back to Python**
diab_lm #the result is already 'in' python, but it's a special object
print(diab_lm.names) # view all names
diab_lm[0] #grab the first element
diab_lm.rx2("coefficients") #use rx2 to get elements by name!
np.array(diab_lm.rx2("coefficients")) #r vectors can be converted to numpy (but rarely needed)
# **Getting Predictions**
# +
# make a df to predict on (might just be the validation or test dataframe)
predict_df = robjects.DataFrame({"age": robjects.FloatVector(np.linspace(0,16,100))})
# call R's predict() function, passing the model and the data
predictions = r_predict(diab_lm, predict_df)
# -
x_vals = predict_df.rx2("age")
# +
ax = diab.plot.scatter(x='age',y='y',c='Red',title="Diabetes data")
ax.set_xlabel("Age at Diagnosis")
ax.set_ylabel("Log C-Peptide Concentration");
ax.plot(x_vals,predictions); #plt still works with r vectors as input!
# -
# **Plotting in R**
# %load_ext rpy2.ipython
# - The above turns on the %R "magic"
# - R's plot() command responds differently based on what you hand to it; Different models get different plots!
# - For any specific model search for plot.modelname. E.g. for a GAM model, search plot.gam for any details of plotting a GAM model
# - The %R "magic" runs R code in 'notebook' mode, so figures display nicely
# - Ahead of the `plot(<model>)` code we pass in the variables R needs to know about (`-i` is for "input")
# %R -i diab_lm plot(diab_lm);
# The documentation for the `lm()` funciton is [here](https://stat.ethz.ch/R-manual/R-devel/library/stats/html/lm.html), and a prettier version (same content) is [here](https://www.rdocumentation.org/packages/stats/versions/3.5.2/topics/lm). When googling, perfer rdocumentation.org when possible.
# Sections:
# - **Usage**: gives the function signature, including all optional arguments
# - **Arguments**: What each function input controls
# - **Details**: additional info on what the funciton *does* and how arguments interact. **Often the right place to start reading**
# - **Value**: the structure of the object returned by the function
# - **Refferences**: The relevant academic papers
# - **See Also**: other functions of interest
# <div class="exercise"><b>Exercise 2</b></div>
#
# 1. Add confidence intervals calculated in R to the linear regression plot above. Use the `interval=` argument to `r_predict()` (documentation [here](https://stat.ethz.ch/R-manual/R-devel/library/stats/html/predict.lm.html)). You will have to work with a matrix returned by R.
# 2. Fit a 5th degree polynomial to the diabetes data in R. Search the web for an easier method than writing out a formula with all 5 polynomial terms.
# +
CI_matrix = np.array(r_predict(diab_lm, predict_df, interval="confidence"))
ax = diab.plot.scatter(x='age',y='y',c='Red',title="Diabetes data")
ax.set_xlabel("Age at Diagnosis")
ax.set_ylabel("Log C-Peptide Concentration");
ax.plot(x_vals,CI_matrix[:,0], label="prediction")
ax.plot(x_vals,CI_matrix[:,1], label="95% CI", c='g')
ax.plot(x_vals,CI_matrix[:,2], label="95% CI", c='g')
plt.legend();
# +
ploy5_formula = robjects.Formula("y~poly(age,5)") # reset the formula
diab5_lm = r_lm(formula=ploy5_formula, data=diab_r) #can also use a 'dumb' formula and pass a dataframe
predictions = r_predict(diab5_lm, predict_df, interval="confidence")
ax = diab.plot.scatter(x='age',y='y',c='Red',title="Diabetes data")
ax.set_xlabel("Age at Diagnosis")
ax.set_ylabel("Log C-Peptide Concentration");
ax.plot(x_vals,predictions);
# -
# ## Lowess Smoothing
# Lowess Smoothing is implemented in both Python and R. We'll use it as another example as we transition languages.
# **In Python**
# +
from statsmodels.nonparametric.smoothers_lowess import lowess as lowess
ss1 = lowess(diab['y'],diab['age'],frac=0.15)
ss2 = lowess(diab['y'],diab['age'],frac=0.25)
ss3 = lowess(diab['y'],diab['age'],frac=0.7)
ss4 = lowess(diab['y'],diab['age'],frac=1)
# -
ss1[:10,:] # we get back simple a smoothed y value for each x value in the data
# Notice the clean code to plot different models. We'll see even cleaner code in a minute
for cur_model, cur_frac in zip([ss1,ss2,ss3,ss4],[0.15,0.25,0.7,1]):
ax = diab.plot.scatter(x='age',y='y',c='Red',title="Lowess Fit, Fraction = {}".format(cur_frac))
ax.set_xlabel("Age at Diagnosis")
ax.set_ylabel("Log C-Peptide Concentration")
ax.plot(cur_model[:,0],cur_model[:,1],color="blue")
plt.show()
# **In R**
#
# We need to:
# - Import the loess function
# - Send data over to R
# - Call the function and get results
# +
r_loess = robjects.r['loess.smooth'] #extract R function
r_y = robjects.FloatVector(diab['y'])
r_age = robjects.FloatVector(diab['age'])
ss1_r = r_loess(r_age,r_y, span=0.15, degree=1)
# -
ss1_r #again, a smoothed y value for each x value in the data
# **Varying span**
# Next, some extremely clean code to fit and plot models with various parameter settings. (Though the `zip()` method seen earlier is great when e.g. the label and the parameter differ)
for cur_frac in [0.15,0.25,0.7,1]:
cur_smooth = r_loess(r_age,r_y, span=cur_frac)
ax = diab.plot.scatter(x='age',y='y',c='Red',title="Lowess Fit, Fraction = {}".format(cur_frac))
ax.set_xlabel("Age at Diagnosis")
ax.set_ylabel("Log C-Peptide Concentration")
ax.plot(cur_smooth[0], cur_smooth[1], color="blue")
plt.show()
# ## Smoothing Splines
# From this point forward, we're working with R functions; these models aren't (well) supported in Python.
#
# For clarity: this is the fancy spline model that minimizes $MSE - \lambda\cdot\text{wiggle penalty}$ $=$ $\sum_{i=1}^N \left(y_i - f(x_i)\right)^2 - \lambda \int \left(f''(x)\right)^2$, across all possible functions $f$. The winner will always be a continuous, cubic polynomial with a knot at each data point
# +
r_smooth_spline = robjects.r['smooth.spline'] #extract R function
# run smoothing function
spline1 = r_smooth_spline(r_age, r_y, spar=0)
# -
# <div class="exercise"><b>Exercise 3</b></div>
#
# 1. We actually set the spar parameter, a scale-free value that translates to a $\lambda$ through a complex expression. Inspect the 'spline1' result and extract the implied value of $\lambda$
# 2. Working from the fitting/plotting loop examples above, produce a plot like the one below for spar = [0,.5,.9,2], including axes labels and title.
lambda1 = spline1.rx2("lambda")
for cur_spar in [0,0.5,0.9,2]:
cur_model = r_smooth_spline(r_age, r_y, spar= cur_spar)
cur_lambda = cur_model.rx2("lambda")[0]
ax = diab.plot.scatter(x='age',y='y',c='Red',title="$\lambda=$"+str(cur_lambda)) #can use TeX style in labels
ax.set_xlabel("Age at Diagnosis")
ax.set_ylabel("Log C-Peptide Concentration")
ax.plot(cur_model.rx2("x"),cur_model.rx2("y"),color="darkgreen")
plt.show()
# **CV**
# R's `smooth_spline` funciton has built-in CV to find a good lambda. See package [docs](https://www.rdocumentation.org/packages/stats/versions/3.5.2/topics/smooth.spline).
# +
spline_cv = r_smooth_spline(r_age, r_y, cv=True)
lambda_cv = spline_cv.rx2("lambda")[0]
ax19 = diab.plot.scatter(x='age',y='y',c='Red',title="smoothing spline with $\lambda=$"+str(np.round(lambda_cv,4))+", chosen by cross-validation")
ax19.set_xlabel("Age at Diagnosis")
ax19.set_ylabel("Log C-Peptide Concentration")
ax19.plot(spline_cv.rx2("x"),spline_cv.rx2("y"),color="darkgreen");
# -
# ## Natural & Basis Splines
# Here, we take a step backward on model complexity, but a step forward in coding complexity. We'll be working with R's formula interface again, so we will need to populate Formulas and DataFrames.
# +
#We will now work with a new dataset, called GAGurine.
#The dataset description (from the R package MASS) is below:
#Data were collected on the concentration of a chemical GAG
# in the urine of 314 children aged from zero to seventeen years.
# The aim of the study was to produce a chart to help a paediatrican
# to assess if a child's GAG concentration is ‘normal’.
#The variables are:
# Age: age of child in years.
# GAG: concentration of GAG (the units have been lost).
# +
GAGurine = pd.read_csv("data/GAGurine.csv")
display(GAGurine.head())
ax31 = GAGurine.plot.scatter(x='Age',y='GAG',c='black',title="GAG in urine of children")
ax31.set_xlabel("Age");
ax31.set_ylabel("GAG");
# -
# Standard stuff: import function, convert variables to R format, call function
# +
from rpy2.robjects.packages import importr
r_splines = importr('splines')
# populate R variables
r_gag = robjects.FloatVector(GAGurine['GAG'].values)
r_age = robjects.FloatVector(GAGurine['Age'].values)
r_quarts = robjects.FloatVector(np.quantile(r_age,[.25,.5,.75])) #woah, numpy functions run on R objects!
# -
# What happens when we call the ns or bs functions from r_splines?
ns_design = r_splines.ns(r_age, knots=r_quarts)
bs_design = r_splines.bs(r_age, knots=r_quarts)
print(ns_design)
# `ns` and `bs` return design matrices, not model objects! That's because they're meant to work with `lm`'s formula interface. To get a model object we populate a formula including `ns(<var>,<knots>)` and fit to data
# +
r_lm = robjects.r['lm']
r_predict = robjects.r['predict']
# populate the formula
ns_formula = robjects.Formula("Gag ~ ns(Age, knots=r_quarts)")
ns_formula.environment['Gag'] = r_gag
ns_formula.environment['Age'] = r_age
ns_formula.environment['r_quarts'] = r_quarts
# fit the model
ns_model = r_lm(ns_formula)
# -
# Predict like usual: build a dataframe to predict on and call `predict()`
# +
# predict
predict_frame = robjects.DataFrame({"Age": robjects.FloatVector(np.linspace(0,20,100))})
ns_out = r_predict(ns_model, predict_frame)
# -
ax32 = GAGurine.plot.scatter(x='Age',y='GAG',c='grey',title="GAG in urine of children")
ax32.set_xlabel("Age")
ax32.set_ylabel("GAG")
ax32.plot(predict_frame.rx2("Age"),ns_out, color='red')
ax32.legend(["Natural spline, knots at quartiles"]);
# <div class="exercise"><b>Exercise 4</b></div>
#
# 1. Fit a basis spline model with the same knots, and add it to the plot above
# 2. Fit a basis spline with 8 knots placed at [2,4,6...14,16] and add it to the plot above
# +
bs_formula = robjects.Formula("Gag ~ bs(Age, knots=r_quarts)")
bs_formula.environment['Gag'] = r_gag
bs_formula.environment['Age'] = r_age
bs_formula.environment['r_quarts'] = r_quarts
bs_model = r_lm(bs_formula)
bs_out = r_predict(bs_model, predict_frame)
# -
ax32 = GAGurine.plot.scatter(x='Age',y='GAG',c='grey',title="GAG in urine of children")
ax32.set_xlabel("Age")
ax32.set_ylabel("GAG")
ax32.plot(predict_frame.rx2("Age"),ns_out, color='red')
ax32.plot(predict_frame.rx2("Age"),bs_out, color='blue')
ax32.legend(["Natural spline, knots at quartiles","B-spline, knots at quartiles"]);
# +
overfit_formula = robjects.Formula("Gag ~ bs(Age, knots=r_quarts)")
overfit_formula.environment['Gag'] = r_gag
overfit_formula.environment['Age'] = r_age
overfit_formula.environment['r_quarts'] = robjects.FloatVector(np.array([2,4,6,8,10,12,14,16]))
overfit_model = r_lm(overfit_formula)
overfit_out = r_predict(overfit_model, predict_frame)
# -
ax32 = GAGurine.plot.scatter(x='Age',y='GAG',c='grey',title="GAG in urine of children")
ax32.set_xlabel("Age")
ax32.set_ylabel("GAG")
ax32.plot(predict_frame.rx2("Age"),ns_out, color='red')
ax32.plot(predict_frame.rx2("Age"),bs_out, color='blue')
ax32.plot(predict_frame.rx2("Age"),overfit_out, color='green')
ax32.legend(["Natural spline, knots at quartiles", "B-spline, knots at quartiles", "B-spline, lots of knots"]);
# ## GAMs
# We come, at last, to our most advanced model. The coding here isn't any more complex than we've done before, though the behind-the-scenes is awesome.
#
# First, let's get our (multivariate!) data
# +
kyphosis = pd.read_csv("data/kyphosis.csv")
print("""
# kyphosis - wherther a particular deformation was present post-operation
# age - patient's age in months
# number - the number of vertebrae involved in the operation
# start - the number of the topmost vertebrae operated on
""")
display(kyphosis.head())
display(kyphosis.describe(include='all'))
display(kyphosis.dtypes)
# +
#If there are errors about missing R packages, run the code below:
#r_utils = importr('utils')
#r_utils.install_packages('codetools')
#r_utils.install_packages('gam')
# -
# To fit a GAM, we
# - Import the `gam` library
# - Populate a formula including `s(<var>)` on variables we want to fit smooths for
# - Call `gam(formula, family=<string>)` where `family` is a string naming a probability distribution, chosen based on how the response variable is thought to occur.
# - Rough `family` guidelines:
# - Response is binary or "N occurances out of M tries", e.g. number of lab rats (out of 10) developing disease: chooose `"binomial"`
# - Response is a count with no logical upper bound, e.g. number of ice creams sold: choose `"poisson"`
# - Response is real, with normally-distributed noise, e.g. person's height: choose `"gaussian"` (the default)
# +
#There is a Python library in development for using GAMs (https://github.com/dswah/pyGAM)
# but it is not yet as comprehensive as the R GAM library, which we will use here instead.
# R also has the mgcv library, which implements some more advanced/flexible fitting methods
r_gam_lib = importr('gam')
r_gam = r_gam_lib.gam
r_kyph = robjects.FactorVector(kyphosis[["Kyphosis"]].values)
r_Age = robjects.FloatVector(kyphosis[["Age"]].values)
r_Number = robjects.FloatVector(kyphosis[["Number"]].values)
r_Start = robjects.FloatVector(kyphosis[["Start"]].values)
kyph1_fmla = robjects.Formula("Kyphosis ~ s(Age) + s(Number) + s(Start)")
kyph1_fmla.environment['Kyphosis']=r_kyph
kyph1_fmla.environment['Age']=r_Age
kyph1_fmla.environment['Number']=r_Number
kyph1_fmla.environment['Start']=r_Start
kyph1_gam = r_gam(kyph1_fmla, family="binomial")
# -
# The fitted gam model has a lot of interesting data within it
print(kyph1_gam.names)
# Remember plotting? Calling R's `plot()` on a gam model is the easiest way to view the fitted splines
# %R -i kyph1_gam plot(kyph1_gam, residuals=TRUE,se=TRUE, scale=20);
# Prediction works like normal (build a data frame to predict on, if you don't already have one, and call `predict()`). However, predict always reports the sum of the individual variable effects. If `family` is non-default this can be different from the actual prediction for that point.
#
# For instance, we're doing a 'logistic regression' so the raw prediction is log odds, but we can get probability by using in `predict(..., type="response")`
# +
kyph_new = robjects.DataFrame({'Age': robjects.IntVector((84,85,86)),
'Start': robjects.IntVector((5,3,1)),
'Number': robjects.IntVector((1,6,10))})
print("Raw response (so, Log odds):")
display(r_predict(kyph1_gam, kyph_new))
print("Scaled response (so, probabilty of kyphosis):")
display(r_predict(kyph1_gam, kyph_new, type="response"))
| notebooks/drafts/R_and_Python/R_and_Python_Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn')
# +
import os
from urllib.request import urlretrieve
import pandas as pd
URL = 'https://data.seattle.gov/api/views/65db-xm6k/rows.csv?accessType=DOWNLOAD'
def get_fremont_data(filename = 'Fremont.csv', url = URL, force_download = False):
if force_download or not os.path.exists(filename):
urlretrieve(url, fliename)
data = pd.read_csv('Fremont.csv', index_col = 'Date', parse_dates = True)
data.columns = ('Total', 'East', 'West')
return data
# -
data = get_fremont_data()
data.resample('W').sum().plot();
ax = data.resample('D').sum().rolling(365).sum().plot();
ax.set_ylim(0,None);
data.groupby(data.index.time).mean().plot();
pivoted = data.pivot_table('Total',index = data.index.time, columns = data.index.date)
pivoted.iloc[:5, :5]
pivoted.plot(legend = False, alpha = 0.01);
| JupyterWorkflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys
sys.path.append("../")
# + pycharm={"name": "#%%\n"}
from py3js.tree import Tree, Node, TreeKind
# + pycharm={"name": "#%%\n"}
root = Node("christmas", [
Node("tree"),
Node("presents"),
Node("holiday", [
Node("Covid", [
Node("sit at home"),
Node("a very long label \na very long \n label a very long label ")
]),
Node("Not Covid"),
])
])
# + pycharm={"name": "#%%\n"}
# normal view
Tree(root, TreeKind.TIDY, width=800, font_size=15)
# + pycharm={"name": "#%%\n"}
# radial view
Tree(root, TreeKind.RADIAL_TIDY)
| examples/tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Regression with Amazon SageMaker XGBoost algorithm
# _**Distributed training for regression with Amazon SageMaker XGBoost script mode**_
#
# ---
#
# ## Contents
# 1. [Introduction](#Introduction)
# 2. [Setup](#Setup)
# 1. [Fetching the dataset](#Fetching-the-dataset)
# 2. [Data Ingestion](#Data-ingestion)
# 3. [Training the XGBoost model](#Training-the-XGBoost-model)
# 3. [Deploying the XGBoost model](#Deploying-the-XGBoost-model)
#
# ---
# ## Introduction
#
# This notebook demonstrates the use of Amazon SageMaker XGBoost to train and host a regression model. [XGBoost (eXtreme Gradient Boosting)](https://xgboost.readthedocs.io) is a popular and efficient machine learning algorithm used for regression and classification tasks on tabular datasets. It implements a technique know as gradient boosting on trees, and performs remarkably well in machine learning competitions, and gets a lot of attention from customers.
#
# We use the [Abalone data](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression.html), originally from the [UCI data repository](https://archive.ics.uci.edu/ml/datasets/abalone). More details about the original dataset can be found [here](https://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.names). In this libsvm converted version, the nominal feature (Male/Female/Infant) has been converted into a real valued feature as required by XGBoost. Age of abalone is to be predicted from eight physical measurements.
#
# ---
# ## Setup
#
#
# This notebook was created and tested on an ml.m5.2xlarge notebook instance.
#
# Let's start by specifying:
# 1. The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.
# 1. The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp with a the appropriate full IAM role arn string(s).
# ensure sagemaker version >= 1.35.0
# !pip show sagemaker
# + isConfigCell=true
# %%time
import os
import boto3
import re
import sagemaker
# Get a SageMaker-compatible role used by this Notebook Instance.
role = sagemaker.get_execution_role()
region = boto3.Session().region_name
### update below values appropriately ###
bucket = sagemaker.Session().default_bucket()
prefix = 'sagemaker/DEMO-xgboost-dist-script'
####
print(region)
# -
# ### Fetching the dataset
#
# Following methods split the data into train/test/validation datasets and upload files to S3.
# +
# %%time
import io
import boto3
import random
def data_split(FILE_DATA, DATA_DIR, FILE_TRAIN_BASE, FILE_TRAIN_1, FILE_VALIDATION, FILE_TEST,
PERCENT_TRAIN_0, PERCENT_TRAIN_1, PERCENT_VALIDATION, PERCENT_TEST):
data = [l for l in open(FILE_DATA, 'r')]
train_file_0 = open(DATA_DIR + "/" + FILE_TRAIN_0, 'w')
train_file_1 = open(DATA_DIR + "/" + FILE_TRAIN_1, 'w')
valid_file = open(DATA_DIR + "/" + FILE_VALIDATION, 'w')
tests_file = open(DATA_DIR + "/" + FILE_TEST, 'w')
num_of_data = len(data)
num_train_0 = int((PERCENT_TRAIN_0/100.0)*num_of_data)
num_train_1 = int((PERCENT_TRAIN_1/100.0)*num_of_data)
num_valid = int((PERCENT_VALIDATION/100.0)*num_of_data)
num_tests = int((PERCENT_TEST/100.0)*num_of_data)
data_fractions = [num_train_0, num_train_1, num_valid, num_tests]
split_data = [[],[],[],[]]
rand_data_ind = 0
for split_ind, fraction in enumerate(data_fractions):
for i in range(fraction):
rand_data_ind = random.randint(0, len(data)-1)
split_data[split_ind].append(data[rand_data_ind])
data.pop(rand_data_ind)
for l in split_data[0]:
train_file_0.write(l)
for l in split_data[1]:
train_file_1.write(l)
for l in split_data[2]:
valid_file.write(l)
for l in split_data[3]:
tests_file.write(l)
train_file_0.close()
train_file_1.close()
valid_file.close()
tests_file.close()
def write_to_s3(fobj, bucket, key):
return boto3.Session(region_name=region).resource('s3').Bucket(bucket).Object(key).upload_fileobj(fobj)
def upload_to_s3(bucket, channel, filename):
fobj=open(filename, 'rb')
key = prefix+'/'+channel
url = 's3://{}/{}/{}'.format(bucket, key, filename)
print('Writing to {}'.format(url))
write_to_s3(fobj, bucket, key)
# -
# ### Data ingestion
#
# Next, we read the dataset from the existing repository into memory, for preprocessing prior to training. This processing could be done *in situ* by Amazon Athena, Apache Spark in Amazon EMR, Amazon Redshift, etc., assuming the dataset is present in the appropriate location. Then, the next step would be to transfer the data to S3 for use in training. For small datasets, such as this one, reading into memory isn't onerous, though it would be for larger datasets.
# +
# %%time
import urllib.request
# Load the dataset
FILE_DATA = 'abalone'
urllib.request.urlretrieve("https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/regression/abalone", FILE_DATA)
#split the downloaded data into train/test/validation files
FILE_TRAIN_0 = 'abalone.train_0'
FILE_TRAIN_1 = 'abalone.train_1'
FILE_VALIDATION = 'abalone.validation'
FILE_TEST = 'abalone.test'
PERCENT_TRAIN_0 = 35
PERCENT_TRAIN_1 = 35
PERCENT_VALIDATION = 15
PERCENT_TEST = 15
DATA_DIR = 'data'
if not os.path.exists(DATA_DIR):
os.mkdir(DATA_DIR)
data_split(FILE_DATA, DATA_DIR, FILE_TRAIN_0, FILE_TRAIN_1, FILE_VALIDATION, FILE_TEST,
PERCENT_TRAIN_0, PERCENT_TRAIN_1, PERCENT_VALIDATION, PERCENT_TEST)
# -
#upload the files to the S3 bucket
upload_to_s3(bucket, 'train/train_0.libsvm', DATA_DIR + "/" + FILE_TRAIN_0)
upload_to_s3(bucket, 'train/train_1.libsvm', DATA_DIR + "/" + FILE_TRAIN_1)
upload_to_s3(bucket, 'validation/validation.libsvm', DATA_DIR + "/" + FILE_VALIDATION)
upload_to_s3(bucket, 'test/test.libsvm', DATA_DIR + "/" + FILE_TEST)
# ## Create a XGBoost script to train with
#
# SageMaker can now run an XGboost script using the XGBoost estimator. When executed on SageMaker a number of helpful environment variables are available to access properties of the training environment, such as:
#
# - `SM_MODEL_DIR`: A string representing the path to the directory to write model artifacts to. Any artifacts saved in this folder are uploaded to S3 for model hosting after the training job completes.
# - `SM_OUTPUT_DIR`: A string representing the filesystem path to write output artifacts to. Output artifacts may include checkpoints, graphs, and other files to save, not including model artifacts. These artifacts are compressed and uploaded to S3 to the same S3 prefix as the model artifacts.
#
# Supposing two input channels, 'train' and 'validation', were used in the call to the XGBoost estimator's fit() method, the following environment variables will be set, following the format `SM_CHANNEL_[channel_name]`:
#
# `SM_CHANNEL_TRAIN`: A string representing the path to the directory containing data in the 'train' channel
# `SM_CHANNEL_VALIDATION`: Same as above, but for the 'validation' channel.
#
# A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to model_dir so that it can be hosted later. Hyperparameters are passed to your script as arguments and can be retrieved with an argparse.ArgumentParser instance. For example, the script that we will run in this notebook is provided as the accompanying file (`abalone.py`) and also shown below:
#
# ```python
#
# import argparse
# import json
# import logging
# import os
# import pandas as pd
# import pickle as pkl
#
# from sagemaker_containers import entry_point
# from sagemaker_xgboost_container.data_utils import get_dmatrix
# from sagemaker_xgboost_container import distributed
#
# import xgboost as xgb
#
#
# def _xgb_train(params, dtrain, evals, num_boost_round, model_dir, is_master):
# """Run xgb train on arguments given with rabit initialized.
#
# This is our rabit execution function.
#
# :param args_dict: Argument dictionary used to run xgb.train().
# :param is_master: True if current node is master host in distributed training, or is running single node training job. Note that rabit_run will include this argument.
# """
# booster = xgb.train(params=params, dtrain=dtrain, evals=evals, num_boost_round=num_boost_round)
#
# if is_master:
# model_location = model_dir + '/xgboost-model'
# pkl.dump(booster, open(model_location, 'wb'))
# logging.info("Stored trained model at {}".format(model_location))
#
#
# if __name__ == '__main__':
# parser = argparse.ArgumentParser()
#
# # Hyperparameters are described here. In this simple example we are just including one hyperparameter.
# parser.add_argument('--max_depth', type=int,)
# parser.add_argument('--eta', type=float)
# parser.add_argument('--gamma', type=int)
# parser.add_argument('--min_child_weight', type=int)
# parser.add_argument('--subsample', type=float)
# parser.add_argument('--verbose', type=int)
# parser.add_argument('--objective', type=str)
# parser.add_argument('--num_round', type=int)
#
# # Sagemaker specific arguments. Defaults are set in the environment variables.
# parser.add_argument('--output_data_dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])
# parser.add_argument('--model_dir', type=str, default=os.environ['SM_MODEL_DIR'])
# parser.add_argument('--train', type=str, default=os.environ['SM_CHANNEL_TRAIN'])
# parser.add_argument('--validation', type=str, default=os.environ['SM_CHANNEL_VALIDATION'])
# parser.add_argument('--sm_hosts', type=str, default=os.environ['SM_HOSTS'])
# parser.add_argument('--sm_current_host', type=str, default=os.environ['SM_CURRENT_HOST'])
#
# args, _ = parser.parse_known_args()
#
# # Get SageMaker host information from runtime environment variables
# sm_hosts = json.loads(os.environ['SM_HOSTS'])
# sm_current_host = args.sm_current_host
#
# dtrain = get_dmatrix(args.train, 'libsvm')
# dval = get_dmatrix(args.validation, 'libsvm')
# watchlist = [(dtrain, 'train'), (dval, 'validation')] if dval is not None else [(dtrain, 'train')]
#
# train_hp = {
# 'max_depth': args.max_depth,
# 'eta': args.eta,
# 'gamma': args.gamma,
# 'min_child_weight': args.min_child_weight,
# 'subsample': args.subsample,
# 'verbose': args.verbose,
# 'objective': args.objective}
#
# xgb_train_args = dict(
# params=train_hp,
# dtrain=dtrain,
# evals=watchlist,
# num_boost_round=args.num_round,
# model_dir=args.model_dir)
#
# if len(sm_hosts) > 1:
# # Wait until all hosts are able to find each other
# entry_point._wait_hostname_resolution()
#
# # Execute training function after initializing rabit.
# distributed.rabit_run(
# exec_fun=_xgb_train,
# args=xgb_train_args,
# include_in_training=(dtrain is not None),
# hosts=sm_hosts,
# current_host=sm_current_host,
# update_rabit_args=True
# )
# else:
# # If single node training, call training method directly.
# if dtrain:
# xgb_train_args['is_master'] = True
# _xgb_train(**xgb_train_args)
# else:
# raise ValueError("Training channel must have data to train model.")
#
#
# def model_fn(model_dir):
# """Deserialized and return fitted model.
#
# Note that this should have the same name as the serialized model in the _xgb_train method
# """
# model_file = 'xgboost-model'
# booster = pkl.load(open(os.path.join(model_dir, model_file), 'rb'))
# return booster
# ```
#
#
# Because the container imports your training script, always put your training code in a main guard `(if __name__=='__main__':)` so that the container does not inadvertently run your training code at the wrong point in execution.
#
# For more information about training environment variables, please visit https://github.com/aws/sagemaker-containers.
# ## Training the XGBoost model
#
# After setting training parameters, we kick off training, and poll for status until training is completed, which in this example, takes between few minutes.
#
# To run our training script on SageMaker, we construct a sagemaker.xgboost.estimator.XGBoost estimator, which accepts several constructor arguments:
#
# * __entry_point__: The path to the Python script SageMaker runs for training and prediction.
# * __role__: Role ARN
# * __train_instance_type__ *(optional)*: The type of SageMaker instances for training. __Note__: Because Scikit-learn does not natively support GPU training, Sagemaker Scikit-learn does not currently support training on GPU instance types.
# * __sagemaker_session__ *(optional)*: The session used to train on Sagemaker.
# * __hyperparameters__ *(optional)*: A dictionary passed to the train function as hyperparameters.
# +
hyperparams = {
"max_depth":"5",
"eta":"0.2",
"gamma":"4",
"min_child_weight":"6",
"subsample":"0.7",
"verbose":"1",
"objective":"reg:linear",
"num_round":"50"}
instance_type = "ml.m5.2xlarge"
output_path = 's3://{}/{}/{}/output'.format(bucket, prefix, 'abalone-dist-xgb')
content_type = "libsvm"
# +
# Open Source distributed script mode
from sagemaker.session import s3_input, Session
from sagemaker.xgboost.estimator import XGBoost
boto_session = boto3.Session(region_name=region)
session = Session(boto_session=boto_session)
script_path = 'abalone.py'
xgb_script_mode_estimator = XGBoost(
entry_point=script_path,
framework_version='0.90-1', # Note: framework_version is mandatory
hyperparameters=hyperparams,
role=role,
train_instance_count=2,
train_instance_type=instance_type,
output_path=output_path)
train_input = s3_input("s3://{}/{}/{}/".format(bucket, prefix, 'train'), content_type=content_type)
validation_input = s3_input("s3://{}/{}/{}/".format(bucket, prefix, 'validation'), content_type=content_type)
# -
# ### Train XGBoost Estimator on abalone data
#
#
# Training is as simple as calling `fit` on the Estimator. This will start a SageMaker Training job that will download the data, invoke the entry point code (in the provided script file), and save any model artifacts that the script creates.
xgb_script_mode_estimator.fit({'train': train_input, 'validation': validation_input})
# ## Deploying the XGBoost model
#
# After training, we can use the estimator to create an Amazon SageMaker endpoint – a hosted and managed prediction service that we can use to perform inference.
#
# You can also optionally specify other functions to customize the behavior of deserialization of the input request (`input_fn()`), serialization of the predictions (`output_fn()`), and how predictions are made (`predict_fn()`). The defaults work for our current use-case so we don’t need to define them.
predictor = xgb_script_mode_estimator.deploy(initial_instance_count=1,
instance_type="ml.m5.2xlarge")
predictor.serializer = str
test_file = DATA_DIR + "/" + FILE_TEST
with open(test_file, 'r') as f:
payload = f.read()
runtime_client = boto3.client('runtime.sagemaker', region_name=region)
response = runtime_client.invoke_endpoint(EndpointName=predictor.endpoint,
ContentType='text/x-libsvm',
Body=payload)
result = response['Body'].read().decode('ascii')
print('Predicted values are {}.'.format(result))
# ### (Optional) Delete the Endpoint
#
# If you're done with this exercise, please run the delete_endpoint line in the cell below. This will remove the hosted endpoint and avoid any charges from a stray instance being left on.
xgb_script_mode_estimator.delete_endpoint()
| introduction_to_amazon_algorithms/xgboost_abalone/xgboost_abalone_dist_script_mode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pyjanitor-dev
# language: python
# name: pyjanitor-dev
# ---
# + [markdown] nbsphinx={"execute": "never"}
# # Inflating and Converting Currency
# -
# ## Notice
#
# This notebook's section on `convert_currency` has been disabled, as `exchangeratesapi.io` has disabled pinging of its API without an API key.
# + [markdown] nbsphinx={"execute": "never"}
# ## Background
# + [markdown] nbsphinx={"execute": "never"}
# This notebook serves to show a brief and simple example of how to use the `convert_currency()` and `inflate_currency()` methods from pyjanitor's finance submodule.
#
# The data for this example notebook come from the [United States Department of Agriculture Economic Research Service](https://www.ers.usda.gov/data-products/food-expenditure-series/), and we are specifically going to download the data of nominal food and alcohol expenditures, with taxes and tips, for all purchasers. The data set includes nominal expenditures for 1997-2018, and the expenditures are provided in **millions** of U.S. dollars for the year in the which the expenditures were made. For example, the expenditure values for 1997 are in units of 1997 U.S. dollars, whereas expenditures for 2018 are in 2018 U.S. dollars.
# -
# ## Getting and Cleaning the Data
# + nbsphinx={"execute": "never"} tags=[]
import pandas as pd
import janitor
import os
# + nbsphinx={"execute": "never"} tags=[]
url = (
"https://www.ers.usda.gov/webdocs/DataFiles/50606/nominal_expenditures.csv?v=9289.4"
)
# 1) Read in the data from .csv file
# 2) Clean up the column names
# 3) Remove any empty rows or columns
# 4) Melt the dataframe (from wide to long) to obtain "tidy" format
data = (
pd.read_csv(
url,
usecols=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
nrows=22,
thousands=','
)
.clean_names()
.remove_empty()
.melt(id_vars=['year'], var_name='store_type', value_name='expenditure')
)
data.head()
# -
# ## Use inflate_currency() to Inflate All Values to 2018$
# + nbsphinx={"execute": "never"} tags=[]
from janitor.finance import inflate_currency, convert_currency
# Use split-apply-combine strategy to obtain 2018$ values
# Group the data frame by year
grouped = data.groupby(['year'])
# Apply the inflate_currency() method to each group
# (Note that each group comes with a name; in this case,
# the name corresponds to the year)
data_constant_dollar = grouped.apply(
lambda x: x.inflate_currency(
column_name='expenditure',
country='USA',
currency_year=int(x.name),
to_year=2018,
make_new_column=True
)
)
data_constant_dollar.head()
# -
# ## Plot Time Series to Observe Currency Inflation
# + nbsphinx={"execute": "never"} tags=[]
# Plot time series of nominal and real (2018$) expenditures for grocery stores
# Note that the 2018 values for both series should be equal
(
data_constant_dollar
.loc[data_constant_dollar['store_type'].str.contains('grocery_stores'), :]
.set_index('year')
.drop(columns='store_type')
.plot()
)
# -
# ## Use convert_currency() to Convert USD to British Pounds
#
# _Note: Disabled and commented out due to `exchangeratesapi.io` policies.
# We are working through the deprecation of the API._
# + nbsphinx={"execute": "never"} tags=[]
from datetime import date
# # Apply the convert_currency() method to the 'expenditure_2018' column
# # Use the exchange rate from Dec. 31, 2018, since our data are in 2018$
# data_constant_pounds = (
# data_constant_dollar
# .convert_currency(
# api_key="a8ef744de81e4bd3908e2acf5a137c3a",
# column_name='expenditure_2018',
# from_currency='USD',
# to_currency='GBP',
# historical_date=date(2018, 12, 31),
# make_new_column=True
# )
# )
# data_constant_pounds.head()
# -
# ## Plot Time Series to Observe Currency Conversion
# + nbsphinx={"execute": "never"}
# # You can see from this plot that GBP were worth more than USD on Dec. 31, 2018
# # (by about 1.3x, it appears)
# (
# data_constant_pounds
# .loc[data_constant_pounds['store_type'].str.contains('grocery_stores'), :]
# .set_index('year')
# .drop(columns='store_type')
# .plot()
# )
# + nbsphinx={"execute": "never"}
| examples/notebooks/inflating_converting_currency.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Detectando faces
# +
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
detectorObjeto = cv.CascadeClassifier('detector_face.xml')
img = cv.imread('xuxa.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
objetos_detectados = detectorObjeto.detectMultiScale(gray, 1.1, 5)
for (x,y,w,h) in objetos_detectados:
cv.rectangle(img,(x,y),(x+w,y+h),(0,0,255),3)
plt.imshow(img[:,:,::-1])
# -
# ## Detectando olhos
# +
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
detectorObjeto = cv.CascadeClassifier('detector_olhos.xml')
img = cv.imread('pele2.jpg')
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
objetos_detectados = detectorObjeto.detectMultiScale(gray, 1.01, 20)
for (x,y,w,h) in objetos_detectados:
cv.rectangle(img,(x,y),(x+w,y+h),(0,0,255),3)
plt.imshow(img[:,:,::-1])
# -
| 1 - Aulas/aula13/DetectorFace.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 画像データの寄与が大きいのかを確認する.
# - TargetEncodingした特徴量のみで線型回帰,lgbmで予測を行う.
# - その結果と画像のみの結果,標準偏差と比較を行う.
# +
import os
import gc
import warnings
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
import lightgbm as lgb
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
# +
DATAPATH = "../data/taskA/table"
df = pd.read_csv(os.path.join(DATAPATH, "asset_data.csv"))
df = df.rename(columns={"last_sale.total_price": "target"})
df['target'] = df['target'].astype(float) * 1e-18
df = df.query('target > 0').reset_index(drop=True)
df['target'] = df['target'].apply(lambda x: np.log1p(x))
display(df.head())
print(f"data shape: {df.shape}")
# -
# ## 比較用の標準偏差を算出
std = df.groupby(['collection.name'])['target'].std()
df['target_std'] = df['collection.name'].map(std)
df
std
# ## CVを用いてスコアを算出する.
def train_model(df, model='linear', n_splits=4):
kf = KFold(n_splits=n_splits, random_state=6174, shuffle=True)
rmse_scores = np.array([])
mae_scores = np.array([])
for train_idx, val_idx in kf.split(df):
train_X, val_X = df.iloc[train_idx], df.iloc[val_idx]
train_y, val_y = df.loc[train_idx, 'target'].values, df.loc[val_idx, 'target'].values
enc = train_X.groupby(["collection.name"])['target'].mean()
train_X['target_encoding'] = train_X['collection.name'].map(enc)
val_X['target_encoding'] = val_X['collection.name'].map(enc)
train_X = train_X['target_encoding'].values.reshape(-1, 1)
val_X = val_X['target_encoding'].values.reshape(-1, 1)
if model == 'linear':
model = LinearRegression()
elif model == 'lgb':
model = lgb.LGBMRegressor()
model.fit(train_X, train_y)
pred = model.predict(val_X)
rmse = np.sqrt(mean_squared_error(val_y, pred))
mae = mean_absolute_error(val_y, pred)
rmse_scores = np.append(rmse_scores, rmse)
mae_scores = np.append(mae_scores, mae)
print(f"RMSE score: {rmse_scores.mean()}")
print(f"MAE score: {mae_scores.mean()}")
train_model(df)
train_model(df, model='lgb')
print(f"Price std: {std.mean()}")
| notebooks/target_encoding_pred.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.3
# language: julia
# name: julia-1.6
# ---
# # Game Log Sanitization
# Strip out any personally-identifiable information from game log dump so data can be shared with the community.
# +
import Pkg;
Pkg.add("CSV");
Pkg.add("DataFrames");
Pkg.add("DataFramesMeta");
using CSV;
using DataFrames;
using DataFramesMeta;
import Dates;
# -
# ## Load the data
df = DataFrame(CSV.File("data.csv"));
#df = dropmissing(df, :"runner-username")
#df = dropmissing(df, :"corp-username")
size(df)
# ## Compute game length
fmt = Dates.DateFormat("yyyymmddTHHMMSSZ")
start_dt = Dates.DateTime.(df[:, :start], fmt)
end_dt = Dates.DateTime.(df[:, :end], fmt)
game_time = round.(end_dt - start_dt, Dates.Minute)
mins = map(x -> x.value, game_time)
insertcols!(df, 3, :time => mins)
size(df)
# ## Remove game time of day
transform!(df, :start => ByRow(x -> split(x, "T")[1]) => :date)
size(df)
# ## Hash Usernames
# +
#using SHA
#using Random
#salt = randstring(5)
#runners = df[:, :"runner-username"] .* salt
#runners = bytes2hex.(sha256.(runners[:]))
#corps = df[:, :"corp-username"] .* salt
#corps = bytes2hex.(sha256.(corps[:]))
#insertcols!(df, 4, :corpusernamehash => corps)
#insertcols!(df, 5, :runnerusernamehash => runners)
#size(df)
# -
# ## Write new CSV file
for_output = df[:, [:date, :time, :turn, :room, :format, :winner, :reason, :corp, :runner]]
fmt = Dates.DateFormat("yyyymmdd")
filename = string(Dates.format(Dates.now(), fmt), "_games.csv")
CSV.write(filename, for_output)
| clean_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys, os, math, string, datetime as dt, time, re
import pandas as pd, numpy as np, matplotlib.pyplot as plt, sqlite3 as sql
import emosent, emoji, string
import yfinance as yf
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk import pos_tag, sentiment, RegexpParser, bigrams
from nltk.help import upenn_tagset
import csv, sys, re
from api_key import cryptor
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import bs4, pyautogui, requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.keys import Keys
import random
# +
# nltk.download("tagsets")
# -
# ##### Parse Data From Discord Channel Scraper
tree = os.walk("../data/raw")
for folders, subfolders, files in tree:
print(folders)
print('--')
print(subfolders)
print('----')
print(files)
print("\n")
def getalldata(folder):
master_df = pd.DataFrame(columns=["id", "name", "content", "timestamp"])
tree = os.walk(folder)
for folders, subfolder, files in tree:
if folders == "../data/raw":
next
else:
for file in files: # get database file
connection_str = folders.replace("\\", "/") +"/" + file #setup connection string
connect = sql.connect(connection_str)
mycur = connect.cursor()
mycur.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name") #get available tables
available_tables = mycur.fetchall()
server_name = folders.replace("../data/raw\\", "") #server name to append to df
channel_name = file.replace(".db", "") # channel name to append to df
for i, tbl in enumerate(available_tables):
df = pd.read_sql(f"SELECT * FROM {tbl[0]}", con=connect)
df["channel"] = channel_name
df["server"] = server_name
master_df = pd.concat([master_df, df], axis=0, ignore_index=True)
connect.close()
print(f"processed {channel_name}. Beep Bop Boop.....")
return master_df
# Decompose Files to Singular DB w/ Columns to Represent the File Structure
total_df = getalldata("../data/raw")
# Comments
# Servers Scraped
total_df.server.unique()
# Comments
# Channels Scraped
total_df.channel.unique()
# Comments
total_df.server = total_df.server.apply(lambda x: x if x != "Misc" else "misc")
# Comments
total_df = total_df.sort_values(["server", "channel"])
# Comments
total_df.drop_duplicates(subset=["id", "content"])[lambda x: (x.server == "misc")].channel.value_counts()
# ##### Dealing with Duplicate Dbs in /Misc
# Comments
# stock-chat.db = Legacy Trading/profits.db = text3.db
total_df[lambda x: x.channel == "stock-chat"].drop_duplicates(subset=["id", "content"]).sort_values("timestamp")
# Comments
(total_df[lambda x: (x.channel == "profits")&(x.server == "Legacy Trading")]).drop_duplicates(subset=["id", "content"]).sort_values("timestamp")
# Comments
# verdict: text3.db = stock-chat = Legacy Trading/profits
total_df[lambda x: x.channel == "text3"].drop_duplicates(subset=["id", "content"]).sort_values("timestamp")
# <h3 style="color:red">verdict: delete text3.db and stock-chat.db from /Misc/</h3>
# Comments
# OmegaTrades/potentialdupe = misc/trade-ideas.db = misc/text5.db
total_df[lambda x: x.channel == "potentialdupe"].drop_duplicates(subset=["id", "content"]).sort_values("timestamp")
# Comments
# the trade ideas databate contains more unique comments
total_df[lambda x: x.channel == "trade-ideas"].drop_duplicates(subset=["id", "content"]).sort_values("timestamp")
# Comments
# trade-ideas = text5.db
total_df[lambda x: (x.channel == "text5") & (x.server == "misc")].drop_duplicates(subset=["id", "content"]).sort_values("timestamp")
# Comments
# OmegaTrades/potentialdupe = partial duplicate of misc/trade-ideas.db = misc/text5.db that are partial duplicates of text.db
total_df[lambda x: (x.channel == "text") & (x.server == "misc")].drop_duplicates(subset=["id", "content"]).sort_values("timestamp")
# <h3 style="color:red">verdict: delete OmegaTrades/potentialdupe.db, Misc/text5.db and ./trade-ideas.db + rename: text.db to trade-ideas and mv to /OmegaTrades</h3>
# Comments
# Omega Trades/winning = misc/text2.db
total_df[lambda x: (x.channel == "text2") & (x.server == "misc")].drop_duplicates(subset=["id", "content"])
# Comments
# Winning has a few more comments
(total_df[lambda x: (x.channel == "winning")&(x.server == "OmegaTrades")]).drop_duplicates(subset=["id", "content"])
# <h3 style="color:red">verdict: delete Misc/text2.db</h3>
# Comments
# text.db = text4.db
total_df[lambda x: (x.channel == "text") & (x.server == "misc")].drop_duplicates(subset=["id", "content"])
# Comments
total_df[lambda x: (x.channel == "text4") & (x.server == "misc")].drop_duplicates(subset=["id", "content"])
# <h3 style="color:red">remove Misc/text4.db</h3>
# Comments
# wetlqd-ideas = text6.db
total_df[lambda x: (x.channel == "text6") & (x.server == "misc")].drop_duplicates(subset=["id", "content"])
# Comments
total_df[lambda x: (x.channel == "wetlqd-ideas") & (x.server == "misc")].drop_duplicates(subset=["id", "content"])
# <h3 style="color:red">remove Misc/text6.db</h3>
# <h3>/raw/data Cleanup</h3>
# <ul>
# <li>rm: text6, text5, text4, text3, text2, stock-chat, trade-ideas, OmegaTrades/potentialDup</li>
# <li>rename: misc/text.db -> Omega Trades/trade-ideas.db</li>
# </ul>
# Comments
# Retrieve All the Data
total_df_no_misc = getalldata("../data/raw")
# Comments
# Remove Duplicates
total_df_no_misc = total_df_no_misc.drop_duplicates(["id", "content"])
# Comments
# Unique Channels / Topics
total_df_no_misc.channel.unique()
# Comments
total_df_no_misc = total_df_no_misc.sort_values("timestamp").reset_index(drop=True)
# Comments
total_df_no_misc.shape
# Comments
# Define Regular Expressions to find specialized grams / tokens.
# Urls
url_regex = r"https*\:.+|www\..+"
# Discord Emotes Token
discord_emote_regex = r"<:.+:\d+>"
# Names of Chat Specific Emotes
emote_name = r":\w+:"
# Comments
# Function to Decompose Comments into New Columns
# Used to Prep Comments Best for Sentiment Analysis
def add_specialized_token_column(regex, comment_column, new_column_name):
total_df.loc[:, new_column_name] = total_df.loc[:, comment_column].apply(lambda x: ",".join([r for r in re.findall(regex, x)]))
return total_df
# Comments
total_df_w_url = add_specialized_token_column(url_regex, "content", "links")
total_df_w_url.links.unique()
# Comments
total_df_w_new_cols = add_specialized_token_column(discord_emote_regex, "content", "chat_emotes")
total_df_w_new_cols.chat_emotes.unique()
# Comments
total_df_w_new_cols[lambda x: (x.chat_emotes!="")]
# Comments
total_df_w_new_cols.loc[:, "chat_emotes"] = total_df_w_new_cols.chat_emotes.apply(lambda x: ",".join([e.replace(":", "") for e in re.findall(emote_name, x)]))
total_df_w_new_cols[lambda x: x.chat_emotes != ""]
# <h3>Comment Cleaning</h3>
# <ul>
# <li>Starting Point: 31,934 Unique Comments</li>
# <li>In order to get these comments ready for NLP analysis, I must:</li>
# <ul>
# <li>Remove scripted comments (those automatically genreated by bots)</li>
# <li>Identify Single Words Making Up Comments That Are Key to This Analysis:</li>
# <ul>
# <li>Emojis and Emotes</li>
# <li>Specific Companies and Their Tickers</li>
# <li>User Mentions</li>
# <li>Links</li>
# </ul>
# </ul>
# </ul>
# #### Find Mentions
# Comments
# A Reference to Specific Chat Users displayed in the chatroom as @user and in the scraped data as <@id>
# Catch All Mentions (User and Generic)
def catchMentions(comment):
userIdCatcherT = r"(<@(!)?\d+>|@everyone)"
mention = re.findall(userIdCatcherT, comment)
if mention != []:
return ",".join([re.sub(r"[<>!@]", "", x[i][0]) for i, x in enumerate([mention])])
else:
return ""
# Comments
total_df = total_df_no_misc.assign(mentions = lambda x: x.content.apply(catchMentions))
# #### Find Bots
# Comments
# Find Automated Response from Non-human Chatters
# # !abbv represent commands said in the channel in order to get a response from specific bot
# total_df[lambda x: x.content.str.contains(r"!ta")].iloc[0, 2] # user commands to call bots ~ to a user mention
def catchBot(c):
botCom = re.compile("\shas reached level \*\*.\d+\*\*")
bot2Com = re.compile("\<.*\>, you just advanced to")
botreg = botCom.search(c)
botreg1 = bot2Com.search(c.strip())
if (botreg != None) or (botreg1 != None):
return 1
else:
return 0
# Comments
# First Run to create record of users
total_df_no_misc = total_df_no_misc.assign(isBot = lambda x: x.content.apply(catchBot))
# Comments
# Get Bot by Discord Id
bots = total_df_no_misc[lambda x: x.isBot == 1].id.unique()
# Comments
# Combine IDs w/ Self Proclaimed Bots
bots = np.concatenate([bots, total_df_no_misc[lambda x: x.name.str.contains("Bot#")].id.unique()], axis=0)
# Comments
# Identify the Robots !
total_df_no_misc.loc[(total_df_no_misc.id.isin(bots)), "isBot"] = 1
# #### Find Emojis
# Comments
# Catch the Emotes
# Good proxy for mood and feeling packaged in a single token / gram
def catchEmojis(comment):
emoji_list = emoji.emoji_lis(comment)
return ",".join([e["emoji"] for e in emoji_list])
# Comments
total_df = total_df.assign(emojis = lambda x: x.content.apply(catchEmojis))
# Comments
total_df[lambda x: x.emojis != ""].head()
# #### Find Companies
# * Find companies using name, ticker or associated words
# * This project goal is to match and process comment sentiment and match it with the particular companies mentioned.
# * Companies are signified by a variety of words including: their traded ticker abbreviation, their full company name and by the names of members of their C-Suite.
# * In order to accurately and systematically obtain these mentioned companies, I will have to create a simple list of companies based on single words (ticker).
# * Once I have an understanding of the most popular companies being talked about in the groups, I can create a more refined parsing function to identify words that most likely trace back to the company.
# * I want to maintain the structure of the comments throughout the analysis (maintain initial structure), so that my NLP analysis can grade sentiment based on the original comment's grammer and word usage
# Comments
# Discords link to stock starts with a $, but can only pull out 338 comments out of 30,000+. There should be more.
total_df[lambda x: x.content.str.contains(r"\$[A-Za-z]")]
# Comments
# The starting Regex for tickers
company_regex = re.compile(r"[A-Z]{2,4}")
# Comments
# identifying companies based on tickers. Better representative. 338 -> 6,438
total_df[lambda x:(x.content.str.contains(company_regex))]
# next to do:
# gather list of companies on the main American exchanges: NYSE and NASDAQ and their tickers (available via NASDAQ's website)
# have dictionary parser that translates company name mentions to its associated ticker.
# with the name and tickers, a more thorough pulling of the appropiate companies can take place. The pulling should take place on the str.lower value of the content column.
# company mentions will be stored just as emojis and mentions are stored for now. (as comma-separated values column)
# Comments
# Use the previously defined regular expressions to find the non-word tokens in a sentence
userIdCatcher = r"(<@(!)?\d+>|@everyone)"
discord_emote_regex = r"<:.+:\d+>"
url_regex = r"https*\:.+|www\..+"
punc_regex = r"[\’\'\'\"\"\“\”!\?@#$%&\(\)\*,-.\\\{\}+~\/:;<>\[\]^`|=_’]"
contract_regex = r"[\'\’\’]"
emoji_regex = emoji.get_emoji_regexp()
stop_words = set(stopwords.words("english"))
# Comments
# standardize the tokens
def clean_comment(comment):
comment = re.sub("|".join([x for x in [userIdCatcher, discord_emote_regex, url_regex]]), "", comment)
comment = re.sub(contract_regex, "", comment)
comment = re.sub(emoji_regex, "", comment)
comment = re.sub(punc_regex, " ", comment)
return comment.replace("\n", " ").lower()
# Comments
# understand basic linguistic structure of comments. Words used. To see how I should approach picking out companies and analyzing sentiment
def wordCounts(comment_array):
stop_words = set(stopwords.words("english"))
comment_dictionary = {}
for comment in comment_array:
# remove punc
punc_regex = r"[\’\'\'\"\"\“\”!\?@#$%&\(\)\*,-.\\\{\}+~\/:;<>\[\]^`|=_]"
# lower here to standardize all words
comment = comment.lower()
comment = re.sub(punc_regex, "", comment)
# remove numbers
comment = re.sub(r"[0-9]", "", comment)
# remove emojis
emoji_list = emoji.emoji_lis(comment)
emoji_list = [e["emoji"] for e in emoji_list]
# tokenize
words = word_tokenize(comment)
for w in words:
if (w in comment_dictionary.keys()) & (w not in stop_words) & (w not in emoji_list):
comment_dictionary[w] += 1
elif (w not in stop_words) & (w not in emoji_list):
comment_dictionary[w] = 1
comments_words = dict(sorted(comment_dictionary.items(), key=lambda x: x[1], reverse=True))
# returns DF with token and count
return pd.DataFrame.from_dict(comments_words, orient="index", columns=["count"]).reset_index().rename(columns={"index": "word"})
# word counts sans stopwords
wordy_df = wordCounts(total_df.content.values)
# possible check for ticker mentions
wordy_df = wordy_df.assign(no_vow = lambda x: x.word.apply(lambda x: 1 if re.findall(r"[aeiou]+", x) != [] else 0))
# Comments
# Example Rough Cut
wordy_df[lambda x: x.no_vow <1].head(30)
total_df[lambda x: x.content.str.contains(r"\s*jks\s*")]
# Comments
# Show Tree Breakdown via part of speech tagging
sample_tokens = word_tokenize(total_df[lambda x: x.content.str.contains("amzn")].sample(1, random_state=6).content.values[0])
sample_pos_tags = pos_tag(sample_tokens)
pattern = 'NP: {<DT>?<JJ>*<NN>}'
comment_parse = RegexpParser(pattern)
tree = comment_parse.parse(sample_pos_tags)
# Comments Draw Tree
print(tree)
tree.draw()
# Comments
# Create a rough Lexicon specific to our chatters
token_df = total_df_w_new_cols.copy(deep=True)
token_df = token_df[lambda x: x.isBot == 0]
token_df = token_df.assign(for_tokenizer = lambda x: x.content.apply(clean_comment)).reset_index(drop=True)
token_df.head()
# Comments
token_df.timestamp = pd.to_datetime(token_df.timestamp)
# Comments
token_df.groupby([pd.Grouper(key="timestamp", freq="M"), "server"]).count()
# Comments
token_df.groupby([pd.Grouper(key="timestamp", freq="M"), "server"]).count()
# Comments
# The people love elon
token_df[lambda x: x.for_tokenizer.str.contains("elon")].loc[3651, "content"]
# Comments
token_df.loc[7733, "content"]
# Comments
token_df.loc[7790, "content"]
sample_tokens = word_tokenize(token_df.loc[7790, "content"])
sample_pos_tags = pos_tag(sample_tokens)
pattern = 'NP: {<DT>?<JJ>*<NN>}'
comment_parse = RegexpParser(pattern)
tree = comment_parse.parse(sample_pos_tags)
print(tree)
tree.draw()
# Comments
lex_df = wordCounts(token_df.for_tokenizer.values)
# Comments
lex_df.shape
# Comments
# sample of key company tokens
lex_df[lambda x: x.word == "amzn"] # stock ticker
# Comments
# sample of key company tokens
lex_df[lambda x: x.word == "tsla"] # stock ticker
# +
# The most important tokens left to be selected are urls, discord specific emotes (<:[A-Za-z]+:\d+>) and finally companies mentioned
# Being able to best ascertain the companies mentioned by specific comments (company name and company ticker) will allow me to begin to collect the associated industry sentiment,
# which will be the daily finanical data of the most popular companies, the Financial News during this period and its sentiment and Institutional Analyst Ratings from Investment Banks and Research Facilities
# These three separate data sources will provide sentiment scores representing retail investor sentiment (from the discord private chats), finicial news sector sentiment (from the scraping of the Financial News)
# and the institutional players Sentiment for the analyzed Companies.
# The three sentiments of the separate sectors will give us the general mood of these sectors in time to match with the financial data for a company during this same time.
# Having these separate data souces broken down by type will allow us to tell the story of a company by the moods of the separate cohorts.
# +
# Finding Companies
# We want to get a list of all tokens and their frequencies as the starting point to company mentions.
# to do so, we need to copy the content column, lower its words, strip out the specialized tokens in a row's columns, remove all puncutations and remove stop words.
# From here, we should get a good starting lexicon for the chatrooms and analyze it to see which tokens refer back to a specific company
# -
# Comments Create Bigrams
def pairCounts(comment_array):
comment_dictionary = {}
for comment in comment_array:
words = word_tokenize(comment)
pairs = [" ".join(pair) for pair in bigrams(words)]
for pair in pairs:
couple = pair.split(" ")
if (pair in comment_dictionary.keys()) & (couple[0] not in stop_words) & (couple[1] not in stop_words):
comment_dictionary[pair] += 1
elif (pair not in stop_words) & (couple[0] not in stop_words) & (couple[1] not in stop_words):
comment_dictionary[pair] = 1
comments_words = dict(sorted(comment_dictionary.items(), key=lambda x: x[1], reverse=True))
return pd.DataFrame.from_dict(comments_words, orient="index", columns=["count"]).reset_index().rename(columns={"index": "word"})
# Comments
word_pairs = pairCounts(token_df.for_tokenizer.values)
# Comments
word_pairs.head(25)
# Comments
ticker_array = tickers_and_names.ticker.values
# Comments
tickers_and_names[lambda x: x.ticker == "nio"]
# Comments
######## del
company_regex = re.compile(r"[A-Z]{2,5}")
# Comments
# with Ticker mentions
token_df[lambda x: x.content.str.contains(company_regex)]
# Comments
# look through comments example
token_df[lambda x: x.for_tokenizer.str.contains(r" h ")]
# Comments
# look through comments
token_df[lambda x: x.for_tokenizer.str.contains(r"disney")].iloc[-3, 2]
# Comments
# Token DF
token_df = token_df.assign(named_companies = lambda x: x.content.apply(lambda x: ",".join([x for x in re.findall(company_regex, x)])))
# Comments
# Find the companies
c_dict = {}
for val in token_df.named_companies.values:
if "," in val:
for v in val.split(","):
if v in c_dict.keys():
c_dict[v] += 1
else:
c_dict[v] = 1
else:
if val in c_dict.keys():
c_dict[val] += 1
else:
c_dict[val] = 1
# Comments
# Find the Companies
company_starter_df = pd.DataFrame.from_dict(c_dict, orient="index", columns=["counts"]).reset_index().rename(columns={"index": "company"}).sort_values("counts", ascending=False).reset_index(drop=True)
# Comments
# Top 25 Companies
company_starter_df.head(25)
# Comments
# Regex Identifies ~2330 ticker-like tokens
# needs to be cleaned
company_starter_df.tail(40)
# Comments
# Examples of abbreviations that do not point to an actual company (even if the ticker may be listed)
# rows content can see the comments are talking about earnings reports
token_df[lambda x: x.named_companies.str.contains("ER")]
# Comments
# find the uppercase freakouts
token_df.assign(isUpper = lambda x:x.content.apply(str.isupper))
# Comments
# Trade Abbreviation terms
non_ticker_expressions = {
"ath ": "all time High",
"pdt": "pattern day trader",
"rh": "RobinHood",
"td": "TD Ameritrade",
"tos": "Think or Swim",
"leg": "a trade",
"pump": "overvalue",
"exp": "expiration",
"dd": "due diligence",
"er": "earnings report"
}
# Comments
# Read SQL table
with sql.connect("../data/interim/discord/discord.db") as con:
total_df_w_new_cols = pd.read_sql("SELECT * FROM comments", con=con)
# Comments
total_df_w_new_cols = total_df_w_new_cols.drop("pk", axis=1)
# Comments
total_df_w_new_cols.timestamp = pd.to_datetime(total_df_w_new_cols.timestamp)
# Comments
total_df_w_new_cols.info()
# Comments
total_df_w_new_cols
# Comments
total_df_w_new_cols[lambda x: x.content.str.contains(company_regex)]
# Comments
company_regex = re.compile(r"[A-Z]{2,4}")
# Comments
total_df_w_new_cols[lambda x: x.content.str.contains(company_regex)]
# Comments
# Key Abbreviations that point to a trade, result or trading platform
trade_abbvs = {"ath ": "all time High",
"pdt": "pattern day trader",
"rh": "RobinHood",
"td": "TD Ameritrade",
"tos": "Think or Swim",
"leg": "a trade",
"pump": "overvalue",
"exp": "expiration",
"dd": "due diligence",
"er": "earnings report",
"pm": "pre-market", # opening
"ah": "after-hours", # trading hours,
"am": "after-market", # analogous to ah
"gg": "good game", # bot comment and normal internet positive abbv
"ta": "technical analysis",
"est": "eastern standard time", # stock engages timezones
"ipo": "intial public offering", # new stocks
"ev": "electric vehicles", # or expected value
"us": "United States",
"itm": "in the money",
"otm": "out of the money",
}
# Comments
total_df = total_df_w_new_cols.copy(deep=True)
# Comments
companies_starter = add_specialized_token_column(company_regex, "content", "companies")
# Comments
companies_starter = companies_starter[lambda x: x.companies != ""]
# Comments
ticker_dict= {}
# Comments
# recount tickers
def getCompany(company_str):
companies = company_str.split(",")
for c in companies:
if c not in ticker_dict.keys():
ticker_dict[c] = 1
else:
ticker_dict[c]+=1
return None
# Comments
companies_starter.companies.apply(getCompany)
# Comments
company_counts = pd.DataFrame.from_dict(ticker_dict, "index", columns=["counts"]).reset_index().rename(columns={"index": "company"})
# Comments
company_counts = company_counts.sort_values("counts", ascending=False, ignore_index=True)
# Comments
company_counts.head(65)
# Comments
companies_starter[lambda x: x.companies.str.contains("ES")]
# Comments
trade_terms = [k.upper().strip() for k in list(trade_abbvs.keys())]
# Comments
# Approx tickers w/o trade terms
company_counts[lambda x: ~(x.company.isin(trade_terms))].shape
# Comments
company_counts_real = company_counts[lambda x: ~(x.company.isin(trade_terms))]
# Comments
company_counts_real.to_csv("../data/interim/trading_counts.csv", index=False)
# Comments
# Intial Counts
company_counts_real
# Comments
# A few generic words are still here but looks like a good start list
company_counts_real.head(50)
# Comments
companies_lowered = company_counts_real.company.apply(str.lower).values
# Comments
top100 = company_counts_real.head(100).company.values
# Comments
# Parser
hundred_dict = {k: "" for k in top100}
# Comments
# Initial c.c.
classifiable_companies[lambda x: x.Symbol == "WKHS"]
# Comments
# See which ones I can Get
classed_dict = {k: v for k, v in zip(classifiable_companies.Symbol.values, classifiable_companies.Name.values)}
# Comments
for k in hundred_dict.keys():
try:
hundred_dict[k] = classed_dict[k]
except KeyError:
hundred_dict[k] = ""
# Comments
empty_keys = []
for k, v in hundred_dict.items():
if v == "":
empty_keys.append(k)
# Comments
# Caught Tickers that are not listed (includes equity adjancent terms)
len(empty_keys)
# Comments
# Define ETFs, Equities that have been acquired and trade terms
hundred_dict['SPY'] = "S&P 500"
hundred_dict['ETF'] = "Exchange Traded Fund"
hundred_dict['BTC'] = "Bitcoin"
hundred_dict['VIX'] = "Volatility Index"
hundred_dict['SHLL'] = "Tortoise Acquistion"
hundred_dict['NQ'] = "Non-Qualified Options"
hundred_dict['MACD'] = "Moving Average Convergence Divergence"
hundred_dict['SQQQ'] = "Short QQQ ETF"
hundred_dict['WSB'] = "Wallstreet Bets"
hundred_dict['VWAP'] = "Volume-Weighted Average Trading"
hundred_dict['VXX'] = "Vix Short"
hundred_dict['APPL'] = "Apple Inc."
hundred_dict['QQQ'] = "Invesco QQQ Trust Shares"
hundred_dict['USD'] = "United States Dollar"
hundred_dict['IV'] = "Implied Volatility"
hundred_dict['GRAF'] = "VLDR"
# Comments
empty_keys = []
for k, v in hundred_dict.items():
if v == "":
empty_keys.append(k)
# Comments
len(empty_keys)
# Comments
# Can be purged, trading lingo such as SPAC=Specialized Trading Company, SL=Stop Loss
empty_keys
# +
# Write to find the 100 companies mentions and their lowercase counterparts.
# -
# Comments
# The 100 Trading Terms
hundred_dict
# Comments
#
list(hundred_dict.keys())
# Comments
# Financial Terms to Keep
terms_list = ['SPY', 'ETF' , 'BTC', 'VIX',
'NQ','MACD', 'SQQQ', 'WSB', 'VWAP','VXX',
'APPL','QQQ','USD','IV']
#hundred_dict['SHLL'] = "Tortoise Acquistion"
#hundred_dict['GRAF'] = "VLDR"
# Comments
# See which to skip when calling to API
skip_keys = empty_keys + terms_list
# Comments
# get time bounds for calls
start_date = total_df_w_new_cols.timestamp[0].strftime("%Y-%m-%d")
end_date = (total_df_w_new_cols.timestamp[31933]+ dt.timedelta(1)).strftime('%Y-%m-%d')
# Comments
end_date
# Comments
y_tickers = [f for f in list(hundred_dict.keys()) if f not in skip_keys + empty_keys]
# Comments
# ETF test
spy_ticker = yf.Ticker('SPY')
# Comments
sample_df = yf.download('SPY', start=start_date, end=end_date)
# Comments
stock_movements_df = pd.DataFrame()
# Comments
# Call and Create a single DataFrame
for c in y_tickers:
_temp_df = yf.download(c, start=start_date, end=end_date)
_temp_df = _temp_df.assign(ticker=c).reset_index().rename(columns={"index": "date"})
stock_movements_df = pd.concat([stock_movements_df, _temp_df], axis=0, ignore_index=True)
# Comments
stock_movements_df = stock_movements_df.rename(columns={"Adj Close": "Adj_Close"})
# Comments
start_date = total_df_w_new_cols.timestamp[0].strftime("%Y-%m-%d")
# Comments
end_date = (total_df_w_new_cols.timestamp[31933]+ dt.timedelta(1)).strftime('%Y-%m-%d')
# Comments
# Amount of Days between first and last comment collected
total_df_w_new_cols.timestamp[0] - total_df_w_new_cols.timestamp[31933]
# Comments
# Timedelta of trading window (9:30 to 4:00)
dt.datetime(2019, 8, 2, 16, 0, 0) - dt.datetime(2019, 8, 2, 9, 30, 0)
# Comments
len(my_list)
# Comments
my_list[1]
# Comments
my_list[-1]
# +
# Comments
# A minute to minute pull for equities
# CSV_URL = f'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol=IBM&interval=1min&slice=year2month12&apikey={ALPHA_VANTAGE_API_KEY}'
# with requests.Session() as s:
# download = s.get(CSV_URL)
# decoded_content = download.content.decode('utf-8')
# cr = csv.reader(decoded_content.splitlines(), delimiter=',')
# new_list = list(cr)
# -
# Comments
# OHLC from AlphaVantage
new_list[-1]
# Comments
[i for i, x in enumerate(y_tickers) if x == "GRAF"]
# Comments
# Remove GRAF
y_tickers.pop(74)
# Comments
# Add renamed Ticker
y_tickers.append("VLDR")
# Comments
len(my_list)
# +
# Financial Data
# CSV_URL = f'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol=IBM&interval=1min&slice=year2month6&apikey={ALPHA_VANTAGE_API_KEY}'
# with requests.Session() as s:
# download = s.get(CSV_URL)
# decoded_content = download.content.decode('utf-8')
# cr = csv.reader(decoded_content.splitlines(), delimiter=',')
# new_list = list(cr)
# -
# Financial Data
new_list[-1]
# Financial Data
new_list[1]
# Financial Data
numbs = [6, 5, 4, 3, 2, 1]
# Financial Data
[i for i, x in enumerate(y_tickers) if x == "TSLA"]
# +
# Financial Data
# f = 0
# for tick in ticker_set:
# print(f"Starting {tick}")
# for n in numbs:
# CSV_URL = f'https://www.alphavantage.co/query?function=TIME_SERIES_INTRADAY_EXTENDED&symbol={tick}&interval=1min&slice=year2month{n}&apikey={ALPHA_VANTAGE_API_KEY}'
# download = pd.read_csv(CSV_URL, sep=",", header=0, encoding="utf-8").assign(ticker = tick)
# with sql.connect("../data/interim/stocks.db") as con:
# download.to_sql("minute", con=con, index=False, if_exists="append")
# f += 1
# if f % 5 == 0:
# print(f"Added {f} Sleeping...")
# time.sleep(68)
# print(f"Added {tick}")
# +
# Financial Data
# with sql.connect("../data/interim/stocks.db") as con:
# download.to_sql("minute", con=con, index=False, if_exists="append")
# +
# Financial Data
# with sql.connect("../data/interim/stocks.db") as con:
# t = pd.read_sql("SELECT * FROM minute WHERE ticker = 'TSLA'", con=con)
# -
# Financial Data
ticker_set = (set(y_tickers)).difference(set(t.ticker.unique()))
# Financial Data
ticker_set = ticker_set.difference({"ING", "TLRY", "MARA", "YOU", "UONE", "IZEA","AREC","BLNK","HD","SPCE","SHLL","GILD","MO","HEAR","CRM","XERS", 'TSLA', "ON"})
# Financial Data
t.time = pd.to_datetime(t.time)
# Financial Data
t.time.min()
# Financial Data
t.time.max()
# Financial Data
[hundred_dict[y] for y in y_tickers if y != "VLDR" else hundred_dict["GRAF"]]
# Financial Data
news_dict = {'SPY': 'S&P 500',
'TSLA': 'Tesla',
'NIO': 'NIO',
'AMD': 'Advanced Micro Devices',
'AAPL': 'Apple',
'DKNG': 'DraftKings',
'PLTR': 'Palantir Technologies',
'PT': 'Pintec Technology',
'ACB': '<NAME>',
'ZM': 'Zoom',
'BABA': 'Alibaba',
'FB': 'Facebook',
'BTC': 'Bitcoin',
'BA': 'Boeing',
'WKHS': 'Workhorse Group',
'SQ': 'Square',
'BYND': 'Beyond Meat',
'NVDA': 'NVIDIA Corporation',
'IDEX': 'Ideanomics',
'AMZN': 'Amazon',
'PFE': 'Pfizer',
'MSFT': 'Microsoft',
'NKLA': 'Nikola',
'GNUS': 'Genius Brands',
'FSLY': 'Fastly',
'SHLL': 'Tortoise Acquistion',
'PTON': 'Peloton',
'RSI': 'Rush Street Interactive',
'ROKU': 'Roku',
'PINS': 'Pinterest',
'CRSR': 'Corsair Gaming',
'PLUG': 'Plug Power Inc.',
'WWR': 'Westwater Resources',
'SPAQ': 'Spartan Acquisition',
'HYLN': 'Hyliion',
'SNDL': 'Sundial Growers',
'JD': 'JD.com',
'SNAP': 'Snap Inc.',
'RKT': 'Rocket Companies Inc.',
'ES': 'Eversource Energy',
'JKS': 'JinkoSolar',
'NNDM': 'Nano Dimension',
'GME': 'GameStop',
'SE': 'Sea Limited',
'IT': 'Gartner',
'MA': 'Mastercard',
'SQQQ': 'Short QQQ ETF',
'KO': 'Coca-Cola Company',
'MARA': 'Marathon Digital',
'HEAR': 'Turtle Beach',
'CRM': 'Salesforce',
'CGC': 'Canopy Growth',
'IZEA': 'IZEA Worldwide',
'CBAT': 'CBAK Energy Technology Inc. Common Stock',
'VXX': 'Vix Short',
'APPL': 'Apple',
'INTC': 'Intel',
'SPCE': 'Virgin Galactic',
'AAL': 'American Airlines',
'SPI': 'SPI Energy',
'ON': 'ON Semiconductor',
'AREC': 'American Resources',
'NFLX': 'Netflix',
'QQQ': 'Invesco QQQ Trust Shares',
'UONE': 'Urban One',
'BLNK': 'Blink Charging',
'HD': 'Home Depot',
'ING': 'ING Group',
'DIS': 'Disney',
'MO': 'Altria',
'TLRY': 'Tilray',
'ADTX': 'Aditxt',
'YOU': 'Clear Secure',
'LMNL': 'Liminal BioSciences',
'JMIA': 'Jumia Technologies',
'EXAS': 'Exact Sciences',
'SOLO': 'Electrameccanica Vehicles',
'XERS': 'Xeris Biopharma',
'DPW': 'Ault Global',
'GILD': 'Gilead Sciences',
'GRAF': 'Velodyne Lidar'}
# Financial Data
# Missing Keys
set(news_dict.keys()).difference(set(daily_data.ticker.unique()))
# Financial Data
# pulled minute and daily data
with sql.connect("../data/interim/stocks.db") as con:
daily_data = pd.read_sql("SELECT * FROM daily", con=con, index_col="pk", parse_dates={"Date": "%Y-%m-%d"})
minute = pd.read_sql("SELECT * FROM minute WHERE ticker = 'TSLA'", con=con, parse_dates={"time": "%Y-%m-%d %H:%M:%S"})
# Financial Data
# Daily Companies
len(daily_data.ticker.unique())
# Financial Data
daily_data[lambda x: (x.ticker == "TSLA")]
# Financial Data
# Yahoo API
for c in y_tickers:
_temp_df = yf.download(c, start=start_date, end=end_date)
_temp_df = _temp_df.assign(ticker=c).reset_index().rename(columns={"index": "date"})
stock_movements_df = pd.concat([stock_movements_df, _temp_df], axis=0, ignore_index=True)
# Financial Data
# Yahoo API
vxx = yf.ticker.Ticker("VXX")
dl = vxx.history("5y", start="2019-01-01", end="2021-12-31")
vxx_info = vxx.info
# Financial Data
dl
# Financial Dagta
vxx_info
# Financial Data
set(news_dict.keys()).difference(set(daily_data.ticker.unique()))
# Financial Data
# ETFs and Other Unique Securities I have interest in and can query the API with
non_companies = ["BTC-USD", "QQQ","SPY", "VXX", "SQQQ"]
# Financial Data
# Grab Data and Calculate Volatility and Turnover via OHLC Data
# Turnover will be approx. b/c of buybacks and secondary offerings
def create_daily_data(ticker):
tick = yf.ticker.Ticker(ticker)
historical_data = tick.history("5y")
outstanding = tick.info.get("sharesOutstanding")
if outstanding == None:
outstanding = 1
daily_close = historical_data["Close"]
pct_change = daily_close.pct_change().fillna(0)
periods = 2
# calc volatility
vola = (pct_change.rolling(periods).std() * np.sqrt(periods)).fillna(0)
historical_data = historical_data.assign(Volatility = vola)
historical_data = historical_data.assign(Turnover = lambda x: x.Volume / outstanding)
historical_data = historical_data.assign(company = ticker)
return historical_data.reset_index().round({"Volatility": 6, "Turnover": 6})
# Financial Data
# list of companies
companies = list(daily_data.ticker.unique())
# Financial Data
# Delisted Companies
companies.append("VLDR")
# Financial Data
# Full list of Securites to Analyze
companies = companies + non_companies
# Financial Data
for i, l in enumerate(companies):
new_df = create_daily_data(l)
full_daily_data = pd.concat([full_daily_data, new_df], axis=0, ignore_index=True)
if i % 5 == 0:
print(f"Finished with {i}")
# Financial Data
full_daily_data = full_daily_data.rename(columns={"Stock Splits": "Stock_Splits"})
# Ananlyst Recommendations
dis = yf.ticker.Ticker('DIS')
recommend_df = dis.recommendations
# Analyst Recommendations
recommend_df
# Analyst Recommendations
# Firms Ratings Since 2020
recommend_df[lambda x: x.index < dt.datetime(2020, 12, 31)].value_counts('Firm')
# Analyst Recommendations
dis_info = dis.info
# Analyst Recommendations
dis_info
# Analyst Recommendations
dis.get_institutional_holders()
# Analyst Recommendations
dis.recommendations[lambda x: x.index >= dt.datetime(2020, 12, 1)]
# News Articles
# Selenium Setup
caps = DesiredCapabilities().CHROME
caps["pageLoadStrategy"] = "eager"
dates = []
pat = re.compile("\?cid=\d+")
# ### Wanted Columns for Investing Sites
# * ticker of company
# * title of article
# * category
# * content (article content)
# * release_date
# * provider / publisher
# * url
# * article_id
# News Articles
# Investing.com Windows
# ~May 2020 - May 2021 to match the busiest time periods of chat data
co_array= [{"company": "TSLA", "start": 330, "stop": 185, "url": "https://www.investing.com/equities/tesla-motors-news"},
{"company": "NIO", "start": 23, "stop":13, "url": "https://www.investing.com/equities/nio-inc-news"},
{"company":"AMD", "start":26, "stop":14, "url": "https://www.investing.com/equities/adv-micro-device-news"},
{"company":"AAPL","start": 375, "stop":350, "url": "https://www.investing.com/equities/apple-computer-inc-news"},
{"company":"DKNG", "start":22, "stop":8, "url": "https://www.investing.com/equities/diamond-eagle-acquisition-corp-news"},
{"company":"PLTR", "start":9, "stop":1, "url": "https://www.investing.com/equities/palantir-technologies-inc-news"},
{"company":"ZM", "start":29, "stop":12, "url": "https://www.investing.com/equities/zoom-video-communications-news"},
{"company":"BABA", "start":100, "stop":45, "url": "https://www.investing.com/equities/alibaba-news"},
{"company": "HYLN","start":1 , "end": 1, "url":"https://www.investing.com/equities/tortoise-acquisition-corp-opinion"},
{"company": "LMNL","start": 1, "end": 1, "url":"https://www.investing.com/equities/prometic-life-sciences-inc.-opinion"},
{"company": "BTC-USD","start": 1000, "end": 450, "url":"https://www.investing.com/crypto/bitcoin/news/"},
{"company": "IDEX","start": 1, "end": 1, "url":"https://www.investing.com/equities/you-on-demand-holdings-inc-news"},
{"company": "ACB","start": 10, "stop": 1, "url":"https://www.investing.com/equities/aurora-cannabis-news?cid=1055316"},
{"company": "CGC","start": 6, "stop": 1, "url":"https://www.investing.com/equities/tweed-marijuana-inc-news?cid=1057241"},
{"company": "IZEA","start": 1, "stop": 1, "url":"https://www.investing.com/equities/izea-inc-news"},
{"company": "CBAT","start": 1, "stop": 1, "url":"https://www.investing.com/equities/china-bak-battery-news"},
{"company":"BA", "start":160, "stop":50, "url": "https://www.investing.com/equities/boeing-co-news"},
{"company":"WKHS", "start":3, "stop":1, "url": "https://www.investing.com/equities/amp-holding-inc-news"},
{"company":"PFE", "start":250, "stop":238, "url": "https://www.investing.com/equities/pfizer-news"},
{"company":"FB", "start":404, "stop":130, "url": "https://www.investing.com/equities/facebook-inc-news"},
{"company":"SQ", "start":20, "stop":7, "url": "https://www.investing.com/equities/square-inc-news"},
{"company":"BYND", "start":21, "stop":8, "url": "https://www.investing.com/equities/beyond-meat-inc-news"} ,
{"company":"NVDA", "start":52, "stop":23, "url": "https://www.investing.com/equities/nvidia-corp-news"},
{"company":"AMZN", "start":335, "stop":155, "url": "https://www.investing.com/equities/amazon-com-inc-news"},
{"company": "MSFT","start": 220, "stop": 70, "url":"https://www.investing.com/equities/microsoft-corp-news"},
{"company":"NKLA", "start":13, "stop":3, "url": "https://www.investing.com/equities/nikola-corp-news/"},
{"company":"PTON", "start":30, "stop":12, "url":"https://www.investing.com/equities/peloton-interactive-inc-news"},
{"company":"ROKU", "start":16, "stop":7, "url": "https://www.investing.com/equities/roku-news"},
{"company":"PINS", "start":15, "stop":7, "url": "https://www.investing.com/equities/pinterest-inc-news"},
{"company": "DIS", "start": 57, "stop": 25, "url": "https://www.investing.com/equities/disney-news"},
{"company": "GNUS","start": 2, "stop": 1, "url":"https://www.investing.com/equities/genius-brands-intl.-news"},
{"company": "FSLY","start": 5, "stop": 1, "url":"https://www.investing.com/equities/fastly-inc-news"},
{"company": "RSI","start": 1, "stop": 1, "url":"https://www.investing.com/equities/dmy-technology-group-news"},
{"company": "CRSR","start": 1, "stop": 1, "url":"https://www.investing.com/equities/corsair-gaming-inc-news"},
{"company": "PLUG","start": 10, "stop": 1, "url":"https://www.investing.com/equities/plug-power-news"},
{"company": "WWR","start": 1, "stop": 1, "url":"https://www.investing.com/equities/uranium-resources-news"},
{"company": "SNDL","start": 5, "stop": 1, "url":"https://www.investing.com/equities/sundial-growers-inc-news"},
{"company": "JD","start": 35, "stop": 15, "url":"https://www.investing.com/equities/jd.com-inc-adr-news"},
{"company": "SNAP","start": 28, "stop": 10, "url":"https://www.investing.com/equities/snap-inc-news"},
{"company": "RKT","start": 2, "stop": 1, "url":"https://www.investing.com/equities/rocket-companies-inc-news"},
{"company": "ES","start": 3, "stop": 1, "url":"https://www.investing.com/equities/northeast-utilities-news"},
{"company": "JKS","start": 3, "stop": 1, "url":"https://www.investing.com/equities/jinkosolar-holding-comp-ltd-news"},
{"company": "NNDM","start": 2, "stop": 1, "url":"https://www.investing.com/equities/nano-dimension-ltd-news"},
{"company": "GME","start": 99, "stop": 20, "url":"https://www.investing.com/equities/gamestop-corp-news"},
{"company": "SE","start": 7, "stop": 1, "url":"https://www.investing.com/equities/sea-limited-news"},
{"company": "IT","start": 10, "stop": 1, "url":"https://www.investing.com/equities/gartner-news"},
{"company": "MA","start": 40, "stop": 10, "url":"https://www.investing.com/equities/mastercard-cl-a-news"},
{"company": "KO","start": 38, "stop": 8, "url":"https://www.investing.com/equities/coca-cola-co-news"},
{"company": "MARA","start": 4, "stop": 2, "url":"https://www.investing.com/equities/marathon-pa-news"},
{"company": "HEAR","start": 1, "stop": 1, "url":"https://www.investing.com/equities/parametric-sound-corp-news"},
{"company": "CRM","start": 38, "stop": 12, "url":"https://www.investing.com/equities/salesforce-com-news"},
{"company": "IZEA","start": 1, "stop": 1, "url":"https://www.investing.com/equities/izea-inc-news"},
{"company": "CBAT","start": 1, "stop": 1, "url":"https://www.investing.com/equities/china-bak-battery-news"},
{"company": "INTC","start": 58, "stop": 20, "url":"https://www.investing.com/equities/intel-corp-news"},
{"company": "SPCE","start": 16, "stop": 1, "url":"https://www.investing.com/equities/social-capital-hedosophia-news"},
{"company": "AAL","start": 75, "stop": 23, "url":"https://www.investing.com/equities/american-airlines-group-news"},
{"company": "SPI","start": 1, "stop": 1, "url":"https://www.investing.com/equities/spi-energy-co-ltd-news"},
{"company": "ON","start": 2, "stop": 1, "url":"https://www.investing.com/equities/on-semiconductor-news"},
{"company": "AREC","start": 2, "stop": 1, "url":"https://www.investing.com/equities/american-resources-news"},
{"company": "NFLX","start": 92, "stop": 31, "url":"https://www.investing.com/equities/netflix,-inc.-news"},
{"company": "UONE","start": 1, "stop": 1, "url":"https://www.investing.com/equities/radio-one-(a)-news"},
{"company": "BLNK","start": 4, "stop": 3, "url":"https://www.investing.com/equities/car-charging-group-news"},
{"company": "HD","start": 28, "stop": 11, "url":"https://www.investing.com/equities/home-depot-news"},
{"company": "ING","start": 7, "stop": 2, "url":"https://www.investing.com/equities/ing-group-nv-news"},
{"company": "MO","start": 10, "stop": 1, "url":"https://www.investing.com/equities/altria-group-news"},
{"company": "TLRY","start": 11, "stop": 4, "url":"https://www.investing.com/equities/tilray-inc-news"},
{"company": "ADTX","start": 1, "stop": 1, "url":"https://www.investing.com/equities/aditx-therapeutics-inc-news"},
{"company": "JMIA","start": 1, "stop": 1, "url":"https://www.investing.com/equities/jumia-technologies-ag-news"},
{"company": "EXAS","start": 3, "stop": 1, "url":"https://www.investing.com/equities/exact-sciences-co-news"},
{"company": "SOLO","start": 1, "stop": 1, "url":"https://www.investing.com/equities/electrameccanica-vehicles-news"},
{"company": "XERS","start": 1, "stop": 1, "url":"https://www.investing.com/equities/xeris-pharmaceuticals-news"},
{"company": "DPW","start": 13, "stop": 4, "url":"https://www.investing.com/equities/deutsche-post-news"},
{"company": "GILD","start": 30, "stop": 6, "url":"https://www.investing.com/equities/gilead-sciences-inc-news"},
{"company": "VLDR","start": 1, "stop": 1, "url":"https://www.investing.com/equities/graf-industrial-corp-news"},
{"company": "QQQ","start": 9, "stop": 1, "url":"https://www.investing.com/etfs/powershares-qqqq-news"},
{"company": "SPY","start": 190, "stop": 5, "url":"https://www.investing.com/etfs/spdr-s-p-500-news"},
{"company": "VXX","start": 1, "stop": 1, "url":"https://www.investing.com/etfs/ipath-series-b-sp500-vix-st-futures-news"},
{"company": "SQQQ","start": 1, "stop": 1, "url":"https://www.investing.com/etfs/ultrapro-short-qqq-news"}]
# News Articles
# Scraper
driver = webdriver.Chrome("../../../Python/scraping/chromedriver2.exe", desired_capabilities=caps)
i = 0
test_df = pd.DataFrame()
for c in co_array:
base_url = c["url"]
ending_url = ""
if re.findall(pat, c["url"]) != []:
ending_url = re.findall(pat, base_url)[0]
base_url = re.sub(pat, "", base_url)
starting_url = base_url + "/" + str(c["stop"]) + ending_url
ticker = c["company"]
shuffle = False
if (c["start"] - c["stop"]) >= 100:
shuffle = True
elif c["start"] == c["stop"]:
c["start"] = 2
for k in range(c["stop"], c["start"], 1): # c["start"] - c["stop"]
driver.get(starting_url)
main_window = driver.window_handles[0]
dater = driver.find_elements(By.CSS_SELECTOR, "section#leftColumn > div.mediumTitle1")
for d in dater:
articles = d.find_elements(By.TAG_NAME, "article")
if shuffle:
random.shuffle(articles)
articles = articles[:3]
for art in articles:
id = art.get_attribute("data-id")
link = art.find_element(By.TAG_NAME, "a").get_attribute("href") # title #leftColumn > div.mediumTitle1 > article:nth-child(2) > div.textDiv > p
details = art.find_element(By.TAG_NAME, "div")
title = art.find_element(By.CLASS_NAME, "title").text
if title == "":
next
for j, s in enumerate(details.find_elements(By.TAG_NAME, "span")):
if j == 1:
publisher = s.text.replace("By", "").strip()
elif j == 2:
date = s.text.replace("-", "").strip()
link = art.find_element(By.TAG_NAME, "a").get_attribute("href")
if link.startswith("https://www.investing.com"):
driver.execute_script(f"window.open(\"{link}\",\"_blank\");")
driver.switch_to.window(window_name=driver.window_handles[1])
artsy = driver.find_element(By.CSS_SELECTOR, "div.articlePage").text
driver.close()
driver.switch_to.window(window_name=driver.window_handles[0])
test_df.loc[i, ["id", "title", "link", "date", "publisher", "article", "ticker"]] = [id, title, link, date, publisher, artsy, ticker]
i += 1
starting_url = base_url + "/" + str(k) + ending_url
time.sleep(4)
print(f"finished with {ticker}")
driver.quit()
# News Articles
driver.quit()
# News Articles
#
test_df
# News Articles
test_df.to_csv("../data/interim/articlesCK.csv", index_label="pk")
# News Articles
re.sub(re.compile(".+\(Reuters\) - "),"", test_df.loc[11202, "article"].replace("\n", ""))
# News Articles
# Publishers
test_df.groupby("publisher").count().sort_values("id", ascending=False)#.id.describe()
# News Articles
# sample article
sample_text_art = test_df.loc[4152, "article"].replace("\n", " ")#.replace("© Reuters. FILE PHOTO: American Airlines passenger planes crowd a runway where they are parked due to flight reductions to slow the spread of coronavirus disease (COVID-19), at Tulsa International Airport in Tulsa, Oklahoma, U.S. March 23, 2020. REUTERS/<NAME>/ By <NAME> (Reuters) -", "")
# News Articles
sample_text_art
# News Articles
test_df.loc[test_df.publisher == "Reuters - Sep 08, 2020 5", "date"]
# News Articles
test_df.loc[1879, :]
# News Articles
test_df.publisher.unique()
# News Articles
test_df.loc[test_df.publisher == "Reuters - Sep 08, 2020 5", "date"] = "Sep 08, 2020"
test_df.loc[test_df.publisher == "Reuters - Sep 08, 2020 5", "publisher"] = "Reuters"
test_df.loc[test_df.publisher == "Reuters - Jan 28, 2021", "date"] = "Jan 28, 2021"
test_df.loc[test_df.publisher == "Reuters - Jan 28, 2021", "publisher"] = "Reuters"
test_df.loc[test_df.publisher == "Reuters - Jul 29, 2020", "date"] = "Jul 29, 2020"
test_df.loc[test_df.publisher == "Reuters - Jul 29, 2020", "publisher"] = "Reuters"
test_df.loc[test_df.publisher == "Reuters - Oct 22, 2020", "date"] = "Oct 22, 2020"
test_df.loc[test_df.publisher == "Reuters - Oct 22, 2020", "publisher"] = "Reuters"
# News Articles
test_df.publisher.unique()
# News Articles
test_df[lambda x: x.publisher == 'Reuters']
# News Articles
# Articles still retain some nonstory metadata
test_df[lambda x: x.publisher == 'Reuters'].loc[0, "article"]
# News Articles
for j in test_df.publisher.unique():
print("="*30)
print(j)
print("="*30)
print(test_df[lambda x: x.publisher == j].iloc[0, -2])
# News Articles
# Articles can have article, publisher and author at the start of the headlines since investing.com is pulling data from a few different sites.
# So they need to be clean with regeular expressions
# .+ (Reuters) - x
# Investing.com - | -- x
# (Bloomberg) -- x
# StockNews => .+\n | copyright Reuters + Title\n
# DailyCoin => Title\n
# BTC Peers => Title\n
# CoinQuora => Title\n
# Seeking Alpha => Title\n
# copyright Reuters + Title\n
# News Articles
# Hard coded names
b = -1
r_regex = re.compile(r".*\(Reuters\) -")
iv_regex = re.compile(r".*Investing.com (–|--|-)")
bloom_regex = re.compile(r".*\(Bloomberg\) --")
# News Articles
# Regex, title and any trailing data will be removed
def cleanArticle(article, title, index):
global b
b += 1
i = index[b]
article = article.replace("\n", " ").strip()
title = title[i]
article = re.sub(re.compile(r".*" + title), "", article)
article = re.sub(r_regex, "", article)
article = re.sub(iv_regex, "", article)
article = re.sub(bloom_regex, "", article)
return article.strip()
# News Articles
test_df = test_df.assign(articles2 = lambda x: x.article.apply(cleanArticle, title=x.title, index=x.article.index))
# News Articles
test_df.drop_duplicates().reset_index(drop=True).drop("article", axis=1).rename(columns={"articles2": "article"}).to_csv("../data/interim/articlesCK.csv", index_label="pk")
# News Articles
test_df.date = pd.to_datetime(cleaned_articles.date)
# News Articles
test_df.sort_values(["ticker", "date"]).groupby("ticker").agg(["first", "last", "count"]).sort_values(["count", 'first'], ascending=[False, True])
# Analyst Recommendations
recommend_df.groupby(["Firm", "To Grade"]).count().sort_index(level=0).tail(20)
# Financial Data
# Vanguard GICS Sectors will provide baselines to compare portfolio to
gics_sectors = {"Communication Services ETF":"VOX",
"Consumer Discretionary ETF":"VCR",
"Consumer Staples": "VDC",
"Energy ETF": "VDE",
"Financials ETF": "VFH",
"Health Care ETF": "VHT",
"Industrials ETF": "VIS",
"Information Technology ETF": "VGT",
"Materials ETF" : "VAW",
"Real Estate ETF": "VNQ",
"Utilities ETF": "VPU"}
# Financial Data
full_daily_data = pd.DataFrame()
# Financial Data
for i, l in enumerate(gics_sectors.values()):
new_df = create_daily_data(l)
full_daily_data = pd.concat([full_daily_data, new_df], axis=0, ignore_index=True)
print(f"Finished with {l}")
# Financial Data
full_daily_data = full_daily_data.rename(columns={"Stock Splits": "Stock_Splits"})
# Financial Data
with sql.connect("../data/interim/stocks.db") as con:
company_dailys = pd.read_sql("SELECT * FROM daily", con=con)
# Financial Data
full_daily_data.index.name = "pk"
# Financial Data
daily_with_gics = pd.concat([company_dailys, full_daily_data], axis=0, ignore_index=True)
# Financial Data
daily_with_gics["Date"] = pd.to_datetime(daily_with_gics["Date"])
# Financial Data
company_dailys[lambda x: x.company == "AMZN"].drop("pk", axis=1)
# Financial Data
# Wantede Stocks Contain Some IPOs
company_dailys.groupby("company").count().sort_values("Open", ascending=False)
# Comments
with sql.connect("../data/interim/discord/discord.db") as con:
emote_df = pd.read_sql("SELECT * FROM comments WHERE emojis != ''", con=con)
# Comments
emote_agg = {}
def agg_emojis(comma_sep_emotes):
for e in comma_sep_emotes.split(","):
if e in emote_agg.keys():
emote_agg[e] += 1
else:
emote_agg[e] = 1
# Comments
emote_df.emojis.apply(agg_emojis)
# Comments
emote_agg_df = pd.Series(emote_agg).reset_index(name="count").rename(columns={"index": "emote"})
# Comments
emoji_parse = emoji.UNICODE_EMOJI
# Comments
emote_agg_df = emote_agg_df.assign(unicode_name = lambda e: e.emote.apply(lambda x: emoji_parse[x])).sort_values("count", ascending=False)
# +
# with sql.connect("../data/interim/discord/discord.db") as con:
# emote_agg_df.to_sql("chatEmotes", con=con, index_label='pk', if_exists="replace")
# -
# Comments
tickers_array = company_dailys.symbol.unique()
# Financial Data
def stringNone(na):
return na if na != None else ""
# Fiancial Data
# Yahoo API
keys = ["quoteType", "longName", "shortName", "logo_url", "sector", "industry", "holdings", "fundFamily", "category"]
k = 77
# company_info_df = pd.DataFrame()
for t in tickers_array[77:]:
try:
tick_info = yf.ticker.Ticker(t).get_info()
except:
print("Sleeping...")
time.sleep(90)
tick_info = yf.ticker.Ticker(t).get_info()
for key in keys:
company_info_df.loc[k, key] = stringNone(tick_info.get(key))
k+= 1
print(f"{t} Done.")
# Financial Data
# Yahoo API info
company_info_df.loc[:, "sector"] = company_info_df.sector.apply(lambda v: v if v != "" else np.nan).fillna(company_info_df.category)
# Financial Data
# Yahoo API info
company_info_df.loc[:, "symbol"] = pd.Series(tickers_array)
# Financial Data
# Sector Names
company_info_df[lambda x: (x.quoteType == "EQUITY")].sector.unique()
# Fiancial Data
# Info
company_info_df.iloc[-15:, :]
# Financial Data
# map yahoo sector to Like GICS sector
sectors = {
"Large Growth": "Technology",
"Large Blend": "Mutual Fund",
"Trading--Miscellaneous": "Inverse Equity",
"Trading--Inverse Equity": "Inverse Equity",
"Communications": "Communications Services",
"Equity Energy": "Energy",
"Financial": "Financials",
"Financial Services": "Financials",
"Health": "Healthcare",
'Basic Materials': 'Materials',
"Natural Resources": "Materials",
"Consumer Defensive": "Consumer Staples",
"Consumer Cyclical": "Consumer Discretionary",
}
# Financial Data
#
company_info_df_w_sectors = company_info_df.copy(deep=True)
company_info_df_w_sectors.loc[:, "sector"] = company_info_df_w_sectors.loc[:, "sector"].apply(lambda x: x if x not in sectors.keys() else sectors[x])
# Financial Data
# Unique Asset. Maybe Currency?
company_info_df_w_sectors[lambda f: f.sector == ""]
# Financial Data
# Info
company_info_df_w_sectors.loc[72, "logo_url"] = "https://bitcoin.org/img/icons/opengraph.png?1641218872"
# News Articles
cleaned_articles = pd.read_csv("../data/interim/articlesCK.csv")
# Financial Data
# Info
equitys = company_info_df_w_sectors[lambda x: x.quoteType == "EQUITY"].symbol.values
# Analyst Recommendations
recommendation_history = pd.DataFrame()
for sym in equitys[29:]:
try:
recom = yf.ticker.Ticker(sym).get_recommendations()
except:
time.sleep(60)
recom = yf.ticker.Ticker(sym).get_recommendations()
if type(recom) != pd.DataFrame:
next
else:
recom = recom.reset_index().assign(symbol=sym)
recommendation_history = pd.concat([recommendation_history, recom], axis=0, ignore_index=True)
print("Done with {}".format(sym))
# Analyst Recommendations
pd.concat([recommendation_history["To Grade"], recommendation_history["From Grade"]], axis=0).unique()
# Analyst Recommendations
# Translate Differing Analyst terms into an interval scale [1, 5]
analyst_dict = {
'Overweight': 'Bullish',
'Hold': 'Neutral',
'Buy': 'Very Bullish',
'Neutral': 'Neutral',
'Underperform': 'Bearish',
'Outperform': 'Bullish',
'Equal-Weight': 'Neutral',
'Sell': 'Very Bearish',
'': '',
'Underweight': 'Bearish',
'Sector Perform': 'Neutral',
'Market Perform': 'Neutral',
'Perform': 'Neutral', 'Sector Weight': 'Neutral',
'In-Line': 'Neutral', 'Underperformer': 'Bearish',
'Market Outperform': 'Bullish',
'Peer Perform': 'Neutral',
'Negative': 'Bearish',
'Positive': 'Bullish',
'Sector Outperform': 'Bullish',
'Strong Buy': 'Very Bullish',
'Long-term Buy': 'Very Bullish',
'Long-Term Buy': 'Very Bullish',
'Equal-weight': 'Neutral',
'Reduce': 'Bearish',
'Accumulate': 'Bullish',
'Mixed': 'Neutral',
'Fair Value': 'Neutral',
'Trim': 'Bearish',
'Outperformer': 'Bullish',
'Top Pick': 'Very Bullish',
'Speculative Buy': "Bullish",
'Average': 'Neutral',
'Market Underperform': 'Bearish',
'Market Weight': 'Neutral',
'Below Average': 'Bearish',
'Hold Neutral': 'Neutral',
'Conviction Buy': 'Very Bullish',
'Sector Underperform': 'Bearish',
'Sector Performer': 'Neutral',
'Above Average': 'Bullish'
}
# Analyst Recommendations
recommendation_history.shape
# Analyst Recommendations
recommendation_history = recommendation_history[lambda x: x["To Grade"] != ""]
# Analyst Recommendations
recommendation_history = recommendation_history.assign(new_grade = lambda x: x["To Grade"].apply(lambda c: analyst_dict[c])).assign(prev_grade = lambda x: x["From Grade"].apply(lambda c: analyst_dict[c]))
# Analyst Recommendations
# Action types = upgrade, downgrade, or no change
recommendation_history.Action.unique()
# Analyst Recommendations
yt = recommendation_history.copy(deep=True)
# Analyst Recommendations
up_and_down = {'up': {'Very Bullish': 'Bullish', 'Bullish': 'Neutral', 'Neutral': 'Bearish', 'Bearish': 'Very Bearish'},
'down': {'Bullish': 'Very Bullish', 'Neutral': 'Bullish', 'Bearish': 'Neutral', 'Very Bearish': 'Bearish'}}
# Analyst Recommendations
for i, r in recommendation_history.iterrows():
grade = r["prev_grade"]
act = r["Action"]
if grade == '':
if act in ['main', 'reit', 'init']:
recommendation_history.loc[i, 'prev_grade'] = r['new_grade']
elif act == 'down':
recommendation_history.loc[i, 'prev_grade'] = up_and_down['down'][r['new_grade']]
elif act == 'up':
recommendation_history.loc[i, 'prev_grade'] = up_and_down['up'][r['new_grade']]
# +
# recommendation_history, company_info_df_w_sectors, daily table from stocks and articlesCK
# -
with sql.connect('../data/interim/stocks.db') as con:
dailys_df = pd.read_sql("SELECT * FROM daily", con=con)
# Financial Data
dailys_df = dailys_df.drop('pk', axis=1)
# News Articles
cleaned_articles = cleaned_articles.drop('pk', axis=1)
# Comments
mentions = pd.read_csv('../data/interim/trading_counts.csv').rename(columns={'company': 'symbol'})
# News Articles
cleaned_articles = cleaned_articles.rename(columns={'ticker': 'symbol'})
# Financial Data
dailys_df = dailys_df.rename(columns={'company': 'symbol'})
# Financial Data
dailys_df.Date = pd.to_datetime(dailys_df.Date)
# News Articles
cleaned_articles.date = pd.to_datetime(cleaned_articles.date)
# Save to SQL
with sql.connect('../data/raw/companies.db') as con:
dailys_df.to_sql("daily", con=con, index=True, index_label='pk', if_exists='replace')
recommendation_history.to_sql('recommendations', con=con, index=True, index_label='pk', if_exists='replace')
company_info_df_w_sectors.drop('holdings', axis=1).to_sql('info', con=con, index=True, index_label='pk', if_exists='replace')
cleaned_articles.to_sql('info', con=con, index=True, index_label='pk', if_exists='replace')
mentions.to_sql('mentions', con=con, index=True, index_label='pk', if_exists='replace')
# News Articles scrape for Seeking Alpha
driver = webdriver.Chrome("../../../Python/scraping/chromedriver2.exe", desired_capabilities=caps)
driver.get("https://seekingalpha.com/symbol/HEAR/news")
time.sleep(4.2)
button = driver.find_element(By.CSS_SELECTOR, 'button[data-test-id="header-button-sign-in"]')
button.click()
button_g = driver.find_element(By.CSS_SELECTOR, 'button[data-test-id="sign-in-with-google"]')
time.sleep(4.2)
button_g.click()
email = driver.find_element(By.CSS_SELECTOR, '[type="email"]')
time.sleep(22.3)
email.send_keys("<EMAIL>")
time.sleep(10)
driver.quit()
# News Articles
with open('sqqq.htm', encoding='utf-8') as fp:
soup = bs4.BeautifulSoup(fp.read(), 'html5lib')
for city in soup.find_all('h3'):
print(city.text)
# News Articles
# Scraping attempt
equities = company_info_df_w_sectors.symbol.apply(str.lower).values
copied = "?from=2020-05-01T16%3A00%3A00.000Z&to=2021-09-01T16%3A00%3A00.000Z"
for e in equities:
letters = e.split('')
pyautogui.moveTo(2499, 135, duration=2)
pyautogui.click()
pyautogui.typewrite(letters + ['enter'], .5)
pyautogui.moveTo(2406, 433, duration=2)
pyautogui.click()
pyautogui.moveTo(2424, 47, duration=1.2)
pyautogui.click(clicks=3, interval=1)
pyautogui.hotkey('ctrl', 'v')
pyautogui.hotkey('enter')
time.sleep(5)
for l in range(1000):
pyautogui.keyDown('pagedown')
if l % 200 == 0:
print(f'{l} at {dt.datetime.fromtimestamp(time.time()).strftime("%I:%M:%S")}')
pyautogui.moveTo(2079, 590, duration=1.2)
pyautogui.hotkey('ctrl', 's')
time.sleep(2)
pyautogui.typewrite([letters]+['.', 'm', 'h', 't', 'm', 'l', 'enter'], .15)
time.sleep(2)
pyautogui.hotkey('enter')
time.sleep(3)
pyautogui.hotkey('home')
time.sleep(2)
# News Articles
# Saved Text Files
for folder, sf, files in os.walk(r"H:\Big Data\articles"):
for f in files[:5]:
if f.endswith(".htm"):
f_name = "H:\\Big Data\\articles\\" + f
with open(f_name, encoding='latin-1') as file:
soup = bs4.BeautifulSoup(file.read(), 'html5lib')
print("="*18)
print(f.replace(".htm", "").upper())
print("="*18)
for headline in soup.find_all('div', {'div':'[data-test-id="content-container"]'}):
print(headline.text.replace("\n", " "))
# News Articles
len(articles)
# News Articles Setup
# soup.select('div[data-test-id="content-container"]') # article content
# soup.select('h3[data-test-id="post-list-item-title"]') # title
# soup.select('span[data-test-id="post-list-comments"]') # engagement
# soup.select('span[data-test-id="post-list-date"]') # date
# soup.select('span[data-test-id="post-list-author"]') # author
# +
# News Articles
# Scrape Files
crypt_articles = pd.DataFrame()
columns = ['content', 'headline', 'link', 'comments', 'date', 'symbol', 'publisher']
art_links = []
o = 0
for folder, sf, files in os.walk(r"H:\Big Data\articles"):
for file in files:
if file.endswith(".htm"):
art_links.append(file)
for sym in art_links:
soup = bs4.BeautifulSoup(open(f"H:\\Big Data\\articles\\{sym}", encoding='latin-1').read().replace("\n", ""), 'html5lib')
sym = sym.replace(".htm", "")
articles = soup.select('article[data-test-id="post-list-item"]')
for a in articles:
cont = a.select('div[data-test-id="content-container"]') # article content
headline = a.select('h3[data-test-id="post-list-item-title"]') # title
comms = a.select('a[data-test-id="post-list-comments"]') # engagement
date_art = a.select('span[data-test-id="post-list-date"]') # date
link = a.select('h3[data-test-id="post-list-item-title"]')[0].select('a')[0].attrs['href'] # article link
symbol = sym
publisher = 'SA'
scrapes = [cont, headline, comms, date_art]
scraped = []
for s in scrapes:
if len(s) != 0:
scraped.append(s[0].text)
else:
scraped.append('')
crypt_articles.loc[o, ['article', 'headline', 'comments', 'date', 'link', 'symbol', 'publisher']] = scraped + [link, sym.upper(), 'Seeking Alpha']
o+=1
# -
# News Articles
with sql.connect('../data/interim/companies.db') as con:
arts = pd.read_sql("select * from articles", index_col='pk', con=con)
# News Articles
crypt_articles = crypt_articles.assign(comments = lambda x: x.comments.apply(str)).assign(comments = lambda x: x.comments.apply(lambda c: re.sub(r"Comment(s)?", "", c)))
# News Articles
crypt_articles = crypt_articles.assign(headline = lambda x: x.headline.apply(str.strip)).assign(article = lambda x: x.article.apply(str.strip))
# News Articles
crypt_articles.loc[:, "date"] = pd.to_datetime(crypt_articles.date)
# News Articles
# Encrypt to Conform to Subscription TOS
crypt_articles = crypt_articles.assign(article = lambda x: x.article.apply(bytes, encoding='utf-8')).assign(article = lambda x: x.article.apply(cryptor.encrypt))
crypt_articles = crypt_articles.assign(headline = lambda x: x.headline.apply(bytes, encoding='utf-8')).assign(headline = lambda x: x.headline.apply(cryptor.encrypt))
crypt_articles = crypt_articles.assign(link = lambda x: x.link.apply(bytes, encoding='utf-8')).assign(link = lambda x: x.link.apply(cryptor.encrypt))
crypt_articles = crypt_articles.assign(comments = lambda x: x.comments.apply(bytes, encoding='utf-8')).assign(comments = lambda x: x.comments.apply(cryptor.encrypt))
crypt_articles = crypt_articles.assign(date = lambda x: x.date.apply(str).apply(bytes, encoding='utf-8')).assign(date = lambda x: x.date.apply(cryptor.encrypt))
crypt_articles = crypt_articles.assign(publisher = lambda x: x.publisher.apply(str).apply(bytes, encoding='utf-8')).assign(publisher = lambda x: x.publisher.apply(cryptor.encrypt))
crypt_articles = crypt_articles.assign(symbol = lambda x: x.symbol.apply(str).apply(bytes, encoding='utf-8')).assign(symbol = lambda x: x.symbol.apply(cryptor.encrypt))
# News Articles
with sql.connect('../data/raw/crypt.db') as con:
crypt_articles.to_sql("crypt_articles", index=True,index_label='pk', con=con, if_exists='replace')
# News Articles
with sql.connect('../data/interim/companies.db') as con:
articles = pd.read_sql("select * from articles", index_col="pk", con=con)
# News Articles
articles
# Sentiment Analysis
sia = SentimentIntensityAnalyzer()
# Sentiment Analysis
def sentiment_art(art):
return sia.polarity_scores(art.replace(" ", '').replace('\x92', "'"))
# News Articles
articles = articles.assign(compound_sent = lambda x: x.article.apply(sentiment_art))
# News Articles
articles= articles.assign(pos_sent = lambda x: x.compound_sent.apply(lambda y: y['pos']))\
.assign(neg_sent = lambda x: x.compound_sent.apply(lambda y: y['neg']))\
.assign(neu_sent = lambda x: x.compound_sent.apply(lambda y: y['neu']))\
.assign(comp_sent = lambda x: x.compound_sent.apply(lambda y: y['compound'])).drop("compound_sent", axis=1, inplace=True)
# News Articles
# Seeking Alpha
crypt_articles = crypt_articles.assign(sentiment = lambda x: x.article.apply(cryptor.decrypt).apply(str, encoding='utf-8').apply(sentiment_art))
# News Articles
crypt_articles = crypt_articles.assign(pos_sent = lambda x: x.sentiment.apply(lambda y: y['pos']))\
.assign(neg_sent = lambda x: x.sentiment.apply(lambda y: y['neg']))\
.assign(neu_sent = lambda x: x.sentiment.apply(lambda y: y['neu']))\
.assign(comp_sent = lambda x: x.sentiment.apply(lambda y: y['compound']))
# News Articles
crypt_articles.drop("sentiment", axis=1, inplace=True)
# News Articles
crypt_articles
# News Articles
crypt_articles.loc[:, 'article'] = crypt_articles.article.apply(cryptor.decrypt).apply(str, encoding='utf-8').apply(lambda x: x.replace(' ', '').replace('\x92', "'")).apply(bytes, encoding='utf-8').apply(cryptor.encrypt)
# News Articles
crypt_articles
# +
# News Articles
# Combine Article Sources into One
with sql.connect("../data/interim/companies.db") as con:
arts = pd.read_sql(f"SELECT * from articles", con=con, parse_dates={'date': '%Y-%m-%d %H:%M:%S'}).drop('id', axis=1)
with sql.connect("../data/raw/crypt.db") as con:
second_arts = pd.read_sql('SELECT pk, headline as title, link, date, publisher, symbol, article, pos_sent, neg_sent, neu_sent, comp_sent FROM crypt_articles;', con=con)#.applymap(cryptor.decrypt)
decrypt_cols = [x for x in second_arts.columns if x not in ['pk', 'pos_sent', "neu_sent", "neg_sent", "comp_sent"]]
for col in decrypt_cols:
second_arts.loc[:, col] = second_arts.loc[:, col].apply(bytes).apply(cryptor.decrypt).apply(str, encoding='utf-8')
if col == 'date':
second_arts.loc[:, col] = second_arts.loc[:, col].apply(str.split, sep=" ").apply(lambda x: x[0]).apply(pd.to_datetime)
arts = pd.concat([arts, second_arts], axis=0, ignore_index=True).sort_values('date')
# -
# News Articles
arts.groupby('symbol').agg(['first', 'last', 'count']).date.sort_values('first', ascending=False)
# +
# Comments
with sql.connect("../data/interim/companies.db") as con:
mentions = pd.read_sql(f"SELECT * from mentions", con=con, index_col='pk')
with sql.connect("../data/interim/discord/discord.db") as con:
comments = pd.read_sql(f"SELECT * from comments", con=con, index_col='pk')
# -
# Comments
mentioned_companies = mentions.symbol.values[:100]
# Comments
mentions.iloc[:100, :]
# Comments
company_regex = re.compile(r"|".join([x for x in mentioned_companies]))
# Comments
# Top 100 Companies
value_comments = comments[lambda x: x.content.str.contains(company_regex)]
# Comments
value_comments.loc[:, 'timestamp'] = pd.to_datetime(comments[lambda x: x.content.str.contains(company_regex)].timestamp)
# Comments
value_comments = value_comments[lambda x: (x.isBot==0)].reset_index().assign(symbols=lambda x: x.content.apply(lambda s: re.findall(company_regex, s)))
# Comments
value_comments=value_comments.assign(symbols=lambda x: x.symbols.apply(str))
# Comments
value_comments = value_comments.assign(content_cleaned = lambda x: x.content.apply(lambda s: re.sub(re.compile(r"<.+>|@everyone"), "", s)))
# Comments
value_comments=value_comments.assign(compound_sent = lambda x: x.content_cleaned.apply(sentiment_art))
# Comments
value_comments=value_comments.assign(pos_sent = lambda x: x.compound_sent.apply(lambda y: y['pos']))\
.assign(neg_sent = lambda x: x.compound_sent.apply(lambda y: y['neg']))\
.assign(neu_sent = lambda x: x.compound_sent.apply(lambda y: y['neu']))\
.assign(comp_sent = lambda x: x.compound_sent.apply(lambda y: y['compound']))
# Comments
value_comments=value_comments.drop(["compound_sent", "content_cleaned"], axis=1)
# Comments
with sql.connect("../data/interim/companies.db") as con:
value_comments.to_sql('symbol_comments', con=con,index=False, if_exists='replace')
# Financial Data
with sql.connect("../data/interim/companies.db") as con:
info = pd.read_sql(f"SELECT * from info", con=con, index_col='pk')
# Financial Data
info.loc[info['sector']=='Communication Services', 'sector'] = "Communications Services"
# Financial Data
# Comparison Baselines
comps = info[lambda v: ((v.quoteType=='ETF') & (v.shortName.str.contains('Vanguard')) | (v.quoteType=='ETF') & (v.shortName.str.contains('SPDR')))].loc[:, ['shortName', 'quoteType', 'sector', 'symbol']]
# Financial Data
info[lambda v: (v.quoteType=='EQUITY')].loc[:, ['shortName', 'quoteType', 'sector', 'symbol']].merge(comps, on='sector', how='left')
# Financial Data
comps
# Financial Data
with sql.connect("../data/interim/companies.db") as con:
info.reset_index(drop=True).to_sql("info", con=con, index_label='pk', index=True, if_exists='replace')
# Financial Data
# pull equity from info table with associated GICS Sector VIA JOIN:
with sql.connect("../data/interim/companies.db") as con:
reader = pd.read_sql("SELECT * FROM info JOIN (SELECT shortName compName, sector, symbol compSymbol FROM info WHERE compName LIKE '%Vanguard%' OR '%SPDR%') USING (sector) WHERE symbol='TSLA'", con=con, index_col='pk')
# Financial Data
reader
# Analyst Recommendations
recs
articles
# News Articles
for c in crypt_arts.columns:
if not (c.endswith('_sent')):
crypt_arts.loc[:, c] = crypt_arts.loc[:, c].apply(cryptor.decrypt).apply(str, encoding='utf-8')
symbol_comms.shape
# recommendations, symbol comments, and articles
with sql.connect("../data/interim/companies.db") as con:
recs = pd.read_sql("SELECT Date, symbol, Firm, new_grade, prev_grade, Action FROM recommendations", con=con, )
articles = pd.read_sql("SELECT date, symbol, publisher,pos_sent, neu_sent, neg_sent, comp_sent FROM articles", con=con)
symbol_comms = pd.read_sql("SELECT timestamp, symbols, channel, comp_sent FROM symbol_comments", con=con, parse_dates="timestamp")
# save seekingalpha raw articles
with sql.connect("../data/raw/crypt.db") as con:
crypt_arts = pd.read_sql("SELECT date, symbol, publisher, pos_sent, neu_sent, neg_sent, comp_sent FROM crypt_articles", con=con)
crypt_arts.loc[:, "date"] = crypt_arts.date.apply(lambda x: x + "00")
| notebooks/Data_Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from keras.utils import np_utils
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D
from keras.optimizers import Adam
import glob
from PIL import Image
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras.layers.core import Flatten, Dense, Dropout, Lambda
# -
def plots(ims, figsize=(12,6), rows=1, interp=False, titles=None):
if type(ims[0]) is np.ndarray:
ims = np.array(ims).astype(np.uint8)
if (ims.shape[-1] != 3):
ims = ims.transpose((0,2,3,1))
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None:
sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i], interpolation=None if interp else 'none')
# +
from keras.preprocessing import image
BATCH_SIZE = 64
PATH="data/"
def get_fit_sample():
gen = image.ImageDataGenerator()
sample_batches = gen.flow_from_directory(PATH+'valid', target_size=(224,224),
class_mode='categorical', shuffle=False, batch_size=300)
imgs, labels = next(sample_batches)
return imgs
gen = image.ImageDataGenerator(featurewise_std_normalization=True)
gen.fit(get_fit_sample())
val_batches = gen.flow_from_directory(PATH+'valid', target_size=(224,224),
class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)
gen = image.ImageDataGenerator(featurewise_std_normalization=True)
gen.fit(get_fit_sample())
batches = gen.flow_from_directory(PATH+'train', target_size=(224,224),
class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)
#imgs,labels = next(batches)
#plots(imgs[:2])
# -
# +
CLASSES = 2
INPUT_SHAPE = (224,224,3)
model = Sequential()
# Block 1
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1', input_shape=INPUT_SHAPE))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool'))
# Block 2
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1'))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool'))
# Block 3
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2'))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool'))
# Block 4
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool'))
# Block 5
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2'))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool'))
# Classification block
model.add(Flatten(name='flatten'))
model.add(Dense(4096, activation='relu', name='fc1'))
model.add(Dropout(0.5))
model.add(Dense(4096, activation='relu', name='fc2'))
model.add(Dropout(0.5))
model.add(Dense(CLASSES, activation='softmax', name='predictions'))
from keras.optimizers import SGD
sgd = SGD(lr=0.01, decay=0.0005, momentum=0.9, nesterov=False)
model.compile(optimizer=sgd, loss='mean_squared_error', metrics=['accuracy'])
# +
# %%time
hist = model.fit_generator(batches, steps_per_epoch=100, epochs=10, validation_data=val_batches, validation_steps=10)
model.save('ConvNet-D-vgg16.h5')
# http://qiita.com/TypeNULL/items/4e4d7de11ab4361d6085
loss = hist.history['loss']
val_loss = hist.history['val_loss']
nb_epoch = len(loss)
plt.plot(range(nb_epoch), loss, marker='.', label='loss')
plt.plot(range(nb_epoch), val_loss, marker='.', label='val_loss')
plt.legend(loc='best', fontsize=10)
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
# -
# https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3
| cnn_dogs_cats/ConvNet-D-vgg16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.2 64-bit
# name: python392jvsc74a57bd0aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49
# ---
# # Tensorfloww and MNIST
#
# - [Tutorial link](https://www.tensorflow.org/tutorials/quickstart/beginner)
import tensorflow as tf
# +
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# -
x_train[0]
| notebooks/tensorflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + slideshow={"slide_type": "subslide"}
from __future__ import division
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all" # Allow printing multiple outputs from each cell
# + slideshow={"slide_type": "subslide"}
# Dependency for the computations
import numpy as np
# Dependency only for the charts
import matplotlib.pyplot as plt
import seaborn as sns # Make matplotlib charts look better
sns.set(style='ticks')
# Dependency only for adding methods to classes dynamically in jupyter (Jupyter Dynamic Classes)
# See https://github.com/jupyter/notebook/issues/1243 and https://alexhagen.github.io/jdc/
import jdc
# -
# %matplotlib inline
# A literate programming notebook written as I follow http://neuralnetworksanddeeplearning.com/chap1.html (as recommended in fast.ai course lesson 2 notes)
#
# This is basically a 'My First Neural Network' notebook with (hopefully) copious documentation, built to learn NumPy and solidify the concepts for me before going back to using the higher-level libraries such as Keras/TensorFlow. It won't use GPU, and it isn't optimised for performance. It's just about checking I understand the concepts fully, and to explore the NumPy APIs.
#
# The end goal (based on the chapter 1 content) is to build an entry for the Kaggle MNIST competition, using a neural network, written from scratch - and for that neural network to be general enough for other problems.
# # Perceptrons
#
# A perceptron takes any number of binary inputs $x_0, x_1,..., x_n$ and outputs a single binary value. First we compute $\sum_j x_j w_j$, where each $w_j \in \mathbb{R}$ is a weight (and there are equal numbers of inputs $x_i$ and weights $w_i$). This is the dot product of the two vectors, $\mathbf{x} \cdot \mathbf{w}$, which we can compute using the `np.dot` method.
# +
x = [0, 0, 1, 1, 0] # 5 binary inputs
w = [1, 2, 3, 4, 1] # 5 real-valued weights
dot = np.dot(x, w)
print(dot)
# -
# So for a perceptron we see that because of the binary-valued inputs, the output of $\mathbf{x} \cdot \mathbf{w}$ is simply the sum of weights $w_j$ where $x_j = 1$.
#
# As mentioned, the output of a perceptron is in fact binary-valued, and this is done using a threshold: If $\mathbf{x} \cdot \mathbf{w} \le threshold$ then the perceptron outputs 0, otherwise it outputs 1.
#
# We replace the threshold with bias $-b = threshold$, and move it to the other side of the inequality, such that we have: If $\mathbf{x} \cdot \mathbf{w} + b \le 0$ then the perceptron outputs 0, otherwise it outputs 1.
#
# So we could fully capture a perceptron's behaviour with a simple method:
# +
def perceptron(x, w, bias):
if np.dot(x, w) + bias <= 0:
return 0
else:
return 1
# For example, using our x and w from before
x = [0, 0, 1, 1, 0]
w = [1, 2, 3, 4, 1]
perceptron(x, w, bias=2)
perceptron(x, w, bias=-10)
# +
# From the book, using the perceptron to implement NAND
w = [-2, -2]
bias = 3
for x1, x2 in [(0,0), (0,1), (1,0), (1,1)]:
perceptron([x1,x2], w, bias)
# -
# We can in fact use a perceptron to learn any linearly separable function, including NAND. To show this, let's consider how a perceptron learns.
#
# We start with randomly initialised weights and bias terms. We also pass in training data, in tuples of (input, output) ie (x, y). For a binary function this is ((x1, x2), y).
#
# Then for each **epoch** (ie the number of times we want to learn), we take each training example, and using its input and our current weights we compute the current value of the perceptron on that input. We find the difference between the expected output `y` and the current output, the error term. Note that this must be computed `y - output` - the order matters. The error term will either be 0, 1 or -1.
#
# We adjust the weights in each position $w_i$ by $w_i += \eta * error * x_i$ ($\eta$, eta is just a learning rate that we can tweak - it changes how quickly the weights change for wrong examples.) We adjust the bias by $\eta * error$, since it's not linked to any input directly.
#
# Assuming the input training data is linearly separable this algorithm will converge to 100% accuracy in a finite number of epochs.
#
#
# To demonstrate this, we'll define a function to train a perceptron - and demonstrate it on the NAND case.
# +
def train_perceptron(training_data, epochs=30, eta=1):
x_len = len(training_data[0][0]) # Tuples of ((xs...), y) - want len xs
w = np.random.rand(x_len) # Random starting weights equal length to x_len, 0-1
bias = np.random.rand() # Random starting bias, 0-1
errors = {} # Count errors at each epoch
for i in range(epochs):
errors[i] = 0
for x, y in training_data:
output = perceptron(x, w, bias)
error = y - output
if error != 0:
errors[i]+=1
w[0] += eta * error * x[0]
w[1] += eta * error * x[1]
bias += eta * error
return w, bias, errors
training_data = [((0,0), 1), ((0, 1), 1), ((1,0), 1), ((1,1), 0)] # Tuples of ((x1,x2), x1 NAND x2)
w, b, errors = train_perceptron(training_data)
# -
plt.figure()
xs = errors.keys()
ys = errors.values()
plt.xlabel('epoch')
plt.ylabel('errors')
plt.title('Errors at each epoch')
plt.plot(xs, ys)
# +
x_intersect = [0,-bias/w[1]]
y_intersect = [-bias/w[0],0]
plt.xlim([-1,1])
plt.ylim([-1,1])
plt.title('Separation boundary found')
plt.plot(x_intersect, y_intersect)
# -
# We can see the effect of using a smaller eta variable on the same training data:
# +
w, b, errors = train_perceptron(training_data, eta=0.05)
plt.figure()
xs = errors.keys()
ys = errors.values()
plt.xlabel('epoch')
plt.ylabel('errors')
plt.title('Errors at each epoch')
plt.plot(xs, ys)
# -
# Perceptrons have some limitations:
#
# - They only converge for linearly separable problems, which most interesting problems are not
# - Because they use a step function, we can't make small tweaks to their output - it's either 0 or 1
#
# The way we deal with the first issue will be by building networks of neurons, where the output of one layer can be inputs to the next layer. This allows us to solve complex problems (or, mathematically - can approximate to an arbitrary accuracy any function). However this exasperates our second problem - without being able to tweak our perceptrons in a predictable way we're not going to be able to create a trainable network of them.
#
# For this reason, we change the non-linearity, that is the **activation function**. Our computed $\mathbf{x} \cdot \mathbf{w} + b$ doesn't need to change, but instead of using the inequality with 0, we pass this value to a different function.
# # Sigmoid neurons
#
# The sigmoid neuron uses the activation function $\sigma(z) = \frac{1}{1+e^-z}$, where z is our computed $\mathbf{x} \cdot \mathbf{w} + b$. Before we code this, we can examine its shape:
def sigmoid_fn(x):
return 1/(1+np.exp(-x))
# +
xs = np.linspace(-5,5,100) # 100 evenly spaced values between -5 and 5
ys = sigmoid_fn(xs)
plt.title('Sigmoid')
plt.plot(xs, ys)
# +
# Compare this to the step function:
def step(x):
return 1 if x>0 else 0
xs = np.linspace(-5,5,100) # 100 evenly spaced values between -5 and 5
ys = [step(x) for x in xs]
plt.title('Step function')
plt.plot(xs, ys)
# -
# We can see that the sigmoid is a smoothed step function, maintaining the bounds of 0 and 1, but changing by only a small amount each time its input changes. This is the only difference between the sigmoid and perceptron, but it turns out to be significant.
# At this point it makes sense to generalise our code to define a neuron, with a view to eventually building up a neural network - that is, a network of layers of neurons. So let's define a neuron:
class Neuron:
def __init__(self, num_inputs, activation_function):
# We always initialise a neuron with random weights and bias
# We must pass an activation function, as this is different for each type of neuron
self.weights = np.random.rand(num_inputs)
self.bias = np.random.rand()
self.num_inputs = num_inputs # This will let us generalise updates
self.activation_function = activation_function
def _compute_value(self, xs):
# Compute the value of the neuron for the given inputs
return np.dot(xs, self.weights) + self.bias
def get_value(self, xs):
return self.activation_function(self._compute_value(xs))
# Essentially, a neuron knows how to compute its value from its weights and bias term, and it has an activation function, which takes such a value and computes the desired output for the neuron.
# +
# So a perceptron is simply:
perceptron = Neuron(2, activation_function = step)
perceptron.get_value([0,0])
# And a sigmoid is simply:
sigmoid = Neuron(2, activation_function = sigmoid_fn)
sigmoid.get_value([0,0])
# -
# Now we can re-implement our learning method using the generic neuron (and thus supporting any given activation function)
# %%add_to Neuron
def train(self, training_data, epochs=30, eta=0.05):
errors = {}
for i in range(epochs):
errors[i] = 0
for X, y in training_data:
output = self.get_value(X)
error = y - output
if np.absolute(error) > 0.5: # Not a binary output, so allow some difference here - as long as we're close
errors[i]+=1
# Update each weight in turn, and the bias
for j in range(self.num_inputs):
self.weights[j] += eta * error * X[j]
self.bias += eta * error
return errors
sigmoid = Neuron(2, activation_function = step) # Make sure we re-create the neuron to get the new method included
errors = sigmoid.train(training_data)
x_intersect = [0,-sigmoid.bias/sigmoid.weights[1]]
y_intersect = [-sigmoid.bias/sigmoid.weights[0],0]
plt.xlim([-1,1])
plt.ylim([-1,1])
plt.title('Separation boundary found')
plt.plot(x_intersect, y_intersect)
plt.figure()
xs = errors.keys()
ys = errors.values()
plt.xlabel('epoch')
plt.ylabel('errors')
plt.title('Errors at each epoch')
plt.plot(xs, ys)
# # A layer of neurons: Naive
#
# Like a perceptron, a sigmoid can only classify linearly separable problems. To solve general problems we need to have multiple layers of neurons - and because each neuron has a non-linear activation function this will provide us a non-linear system and allow us to approximate non-linearly separable (actually, all) functions.
#
# Let's first consider what a layer of neurons actually looks like, in the simplest way:
class Layer:
def __init__(self):
self.neurons = []
def add_neuron(self, neuron):
self.neurons.append(neuron)
def evaluate(self, xs):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.get_value(xs))
return outputs
# A layer of neurons just contains the neurons that have been added to it. It receives some input - just like a single neuron does - but its output contains the computed value of each neuron in the layer.
#
# The way to think of this is as the layer receiving the output of the previous layer, or the inputs to the network as a whole. Then the outputs of this layer could be the outputs of the network as a whole, or they could be fed forward into the next layer.
#
# In each layer, every neuron receives as input the complete output of the last layer. We call these layers 'fully connected' or 'dense', and they're the only sort of layer we'll consider here.
#
# Consider a layer of 3 neurons:
# +
layer = Layer()
for i in range(3): # Add 3 neurons
sig = Neuron(4, activation_function = sigmoid_fn) # Each sigmoid takes 4 inputs
layer.add_neuron(sig)
layer.evaluate([1,-4,3,-6]) # We can evaluate any 4 real numbers as input
for i in range(3): # Add 3 more neurons
sig = Neuron(4, activation_function = sigmoid_fn) # Each sigmoid takes 4 inputs
layer.add_neuron(sig)
layer.evaluate([1, 0, 1, -1])
# -
# As we can see, our layer takes as input 4 values, and each of its neurons can take as input such a list. It evaluates to 3 outputs, because we added 3 neurons. When we add another 3 neurons for a total of 6, it evaluates to 6 outputs.
#
# Clearly all of our neurons need to share the same number of inputs - the same input it passed to all of them when we evaluate them. A better API would enforce this constraint. I'm not sure if there's ever a reason to mix-and-match activation functions within a layer, but we're not going to do that here - so let's also make the activation function a parameter of the `Layer` and enforce that it's the same for all neurons. Here's a better API for a `Layer`:
# +
class Layer:
def __init__(self, num_inputs, num_neurons, activation_function):
self.neurons = []
for i in range(num_neurons):
neuron = Neuron(num_inputs, activation_function)
self.neurons.append(neuron)
def evaluate(self, xs):
outputs = []
for neuron in self.neurons:
outputs.append(neuron.get_value(xs))
return outputs
layer = Layer(4, 3, sigmoid_fn)
layer.evaluate([1,2,3,4])
layer = Layer(4, 6, sigmoid_fn)
layer.evaluate([1,-1,1,-1])
# -
# Notice that we've lost some power here - we can no longer add to a layer after it's been created. It'd be easy enough to add that back, but we won't need to do that here so we'll keep the simpler class going forward.
# # Representing a layer as a matrix
#
# Recall what's happening when we compute the value of each neuron - we compute $\mathbf{w} \cdot \mathbf{x} + b$, and then we pass that to the activation function (eg. step or sigmoid).
#
# If we say that $\mathbf{x}$, our input vector, is a column vector (which makes sense graphically if we vertically align neurons within a layer) of length $n$, then we know that $\mathbf{w}$ is also a vector of length $n$. If we say that $\mathbf{w}$ is a row vector (or we transpose it from a column vector so that it is), then we can compute the dot product by doing a matrix multiply of the two vectors.
#
# For example, let's say that $\mathbf{w} = \begin{pmatrix}1 & 2 & 3\end{pmatrix}$ and $\mathbf{x} = \begin{pmatrix}5 \\ 10 \\ 2 \end{pmatrix}$, then we can compute their dot product $\mathbf{w} \cdot \mathbf{x} = \begin{pmatrix}1 & 2 & 3 \end{pmatrix} \cdot \begin{pmatrix}5 \\ 10 \\ 2\end{pmatrix} = (1x5) + (2x10) + (3x2) = 31$
#
# If we have a second neuron in the same layer (so receiving the same input $\mathbf{x}$), with weights $\mathbf{w'} = \begin{pmatrix}2 & -2 & 0\end{pmatrix}$, then we can say that $\mathbf{w'} \cdot \mathbf{x} = \begin{pmatrix}2 & -2 & 0\end{pmatrix} \cdot \begin{pmatrix}5 \\ 10 \\ 2 \end{pmatrix} = (2x5) + (-2x10) + (0x2) = -10$
#
# Then we can say that the dot products for our layer are $\begin{pmatrix}31 \\ -10\end{pmatrix}$. Recall that the value (before the activation function) is $\mathbf{w} \cdot \mathbf{x} + b$ - so we need to deal with our bias terms. Suppose the first neuron had bias $b = 3$ and the second neuron $b' = 5$, then we can add the bias terms using vector addition: $\begin{pmatrix}31 \\ -10\end{pmatrix} + \begin{pmatrix}3 \\ 5\end{pmatrix} = \begin{pmatrix}34 \\ -5\end{pmatrix}$
#
# And finally we apply the activation function, say for simplicity that these are perceptrons: $\begin{pmatrix} 1 \\
# 0\end{pmatrix}$
#
# This works, but we haven't actually gained anything - we've just split adding our bias and passing through the activation function to the end, doing it after we've done each dot product - but we're still doing individual dot products, and then combining them into a vector we can add the bias terms to and then element-wise applying the activation function. It turns out that we can use matrix multiplication to compute all the neurons in the layer at the same time. We say that $\mathbf{W}$ is a matrix, where each row is the weights of a single neuron. So for the above 2-neuron layer:
# $\mathbf{W} = \begin{pmatrix} 1 & 2 & 3 \\ 2 & -2 & 0 \end{pmatrix}$. Now, we use matrix multiplication:
# $\begin{pmatrix} 1 & 2 & 3 \\ 2 & -2 & 0 \end{pmatrix} \cdot \begin{pmatrix}5 \\ 10 \\ 2 \end{pmatrix} = \begin{pmatrix}31 \\ -10 \end{pmatrix}$
#
# We then add our bias terms, and apply the activation function to each element as before.
#
# This gives us a different way to view a layer of our network, it's simply a matrix $\mathbf{W}$ and a vector $\mathbf{b}$ - we don't need to loop over all our neurons or even define them as their own objects. The matrix has one row for each neuron (representing its weights). The length of each neuron's weights (ie the number of columns) is equal to the input. If we say we have m inputs to our layer, and n neurons in the layer, then our matrix multiply $\mathbf{W} \cdot \mathbf{x}$ looks like $(nxm) \cdot (mx1) = (nx1)$. This is what we'd expect, and explains why our weight matrix must be that specific shape. Each layer takes as input a column vector, and outputs a column vector of a length determined by its number of neurons.
#
# Of course, our last layer needs to have the expected output shape for the problem - and therefore has a known number of rows. We can control the number of neurons, and thus rows, in all other layers though.
# +
# Rewriting our Layer using the matrix multiply form
class Layer:
def __init__(self, num_inputs, num_neurons, activation_function):
# Use initial random values with mean 0, variance 1
# The shape of our weights is n x m, where m = num columns = num_inputs, and n=num rows = num_neurons
variance = 2/float(num_inputs + num_neurons)
# self.weights = np.random.normal(scale=variance, size=(num_neurons, num_inputs))
self.weights = np.random.randn(num_neurons, num_inputs)
# The shape of bias is 1 row, with a column for each neuron - a row vector.
# self.bias = np.random.random_sample(size=(num_neurons, 1))
self.bias = np.random.randn(num_neurons, 1)
self.activation_function = activation_function
def evaluate(self, x):
# First, matrix multiply `W` by `x`, ie. weights by activations
wx = np.matmul(self.weights, x)
# Add the bias term
add_b = wx + self.bias
# Pass through the activation function
return self.activation_function(add_b)
layer = Layer(3, 2, sigmoid_fn)
x = np.array([1,2,3]).reshape(3,1)
layer.evaluate(x)
# -
# # From a layer to a network
#
# Now we have everything that we need to define a simple feed-forward neural network consisting of dense layers of neurons: we'll include our input and output layers as a layer of neurons, and we can have any layers between them - we just need to make sure our dimensions all match up.
#
# We won't consider our MNIST dataset in any detail yet, other than to say that it contains images of 28x28 greyscale pixels. One obvious way to represent this as an input would be as 28x28=784 values, with each representing the intensity of one pixel. Our job is to classify which number the image is a drawing of, a single digit 0-9. So our final layer will have 10 outputs representing the digits 0-9, each representing the probability that the image is that digit. We would predict that the drawing is the digit with the highest probability. We call all other layers (those that aren't the input or output layer) hidden layers, and we can choose how many of these we want, and how complex they should be.
#
# For example, if we wanted a single hidden layer with 40 neurons, then we'd need two layers of neurons:
# - A layer that takes as input 784 values, and has 40 neurons, and thus outputs 40 values - our hidden layer
# - A layer that takes as input 40 values, and has 10 neurons, and thus outputs 10 values - our output layer
#
# We'd generate predictions by evaluating the hidden layer with the input of 784 values, and then evaluating the output layer with the input of 40 values resulting from that.
#
# If we wanted to have two hidden layers with 50 and 110 neurons respectively, then we'd need three layers of neurons:
# - A layer that takes as input 784 values, and has 50 neurons, and thus outputs 50 values - our first hidden layer
# - A layer that takes as input 50 values, and has 110 neurons, and thus outputs 110 values - our second hidden layer
# - A layer that takes as input 110 values, and has 10 neurons, and thus outputs 10 values - our output layer
# Let's see how a network looks in practice
class Network:
def __init__(self):
self.layers = []
def add_layer(self, layer):
self.layers.append(layer)
def evaluate(self, x):
value = x
for layer in self.layers:
value = layer.evaluate(value)
return value
# This looks very similar to our first attempt at a `Layer`, but this time we won't simplify any further: we want to permit any layer to be used in our network (so a network can contain layers with different activation functions), and additionally as each layer has a non-linearity (the activation function) we can't simplify the computation any further.
# +
# Our examples of networks from above, using sigmoids:
net1 = Network()
net1.add_layer(Layer(784, 40, sigmoid_fn)) # Hidden layer
net1.add_layer(Layer(40, 10, sigmoid_fn)) # Output layer
net2 = Network()
net2.add_layer(Layer(784, 50, sigmoid_fn)) # First hidden layer
net2.add_layer(Layer(50, 110, sigmoid_fn)) # Second hidden layer
net2.add_layer(Layer(110, 10, sigmoid_fn))
# +
x = np.random.random_sample(size=(784,1))
net1.evaluate(x)
net2.evaluate(x)
# -
# # Training the network
#
# At this point we've got everything that we need to build a network, made of layers of neurons - to compute any classification problem. We do this by feeding the input through the layers, so we call this a feed-forward network. The problem we have now is training it: we can be given inputs and known outputs, we can pass it the input and observe the output, but we don't yet have a way to adjust the weights. We can't update them like we did for a single neuron - that doesn't capture our more subtle relationship between neurons within our network.
#
# This is where backpropagation comes in.
# Our activation function needs to be differentiable
class Sigmoid_cls:
def __init__(self):
pass
def compute(self, z):
# This method can overflow, it doesn't change noticeably for large positive/negative so clip it
z = np.clip(z, -500, 500)
return 1/(1+np.exp(-z))
def derivative(self, z):
return self.compute(z)*(1-self.compute(z))
# We introduce a cost function, also differentiable (quadratic cost function)
class QuadraticCost_cls:
def __init__(self):
pass
def compute(self, computed, desired):
# Quadratic cost of a single training example
return ((computed - desired) ** 2).mean(axis=None)
def derivative(self, computed, desired):
# Derivative of the compute function, returned as an array
return (computed - desired)
# +
# %%add_to Layer
# Backprop is simpler if we calculate both the weighted input z(x) ie wx + b, as well as f(z(x)) ie sigmoid
# So modify layer:
def weighted_input(self, x):
# First, matrix multiply `W` by `x`, ie. weights by activation
wx= np.matmul(self.weights, x)
# Add the bias term
add_b = wx + self.bias
# We return without applying the activation function
return add_b
def evaluate(self, xs):
# Pass the weighted input through the activation function
return self.activation_function.compute(self.weighted_input(xs))
# -
# %%add_to Network
# Add the cost function as a value of the network
def __init__(self, cost_function):
self.layers = []
self.cost_function = cost_function
# # Notes
#
# - We split the forward pass to return z(x), and then pass on f(z(x))
#
# %%add_to Network
def backprop(self, network_input, desired_output):
gradient_b = [np.zeros(layer.bias.shape) for layer in self.layers] # layer-by-layer gradients wrt biases
gradient_w = [np.zeros(layer.weights.shape) for layer in self.layers] # layer-by-layer gradients wrt weights
# Feedforward - pass the input through the current network, same effect as evaluate
activation = network_input # Current activation passing through
activations = [network_input] # Activations, per layer, starting from input
weighted_inputs = [] # Weighted inputs, per layer, starting from first non-input layer. Stored for backward pass.
for layer in self.layers:
z = layer.weighted_input(activation)
weighted_inputs.append(z)
activation = layer.activation_function.compute(z)
activations.append(activation)
# Backward pass - propagate errors backward
# First we deal with the last layer
# Last layer is special because:
# 1) It can compare its actual output to the desired output (the others all feed through more layers first)
# 2) It doesn't have any layers after it to feed into
last_layer = self.layers[-1]
last_weighted_input = weighted_inputs[-1]
network_output = activations[-1]
previous_activations = activations[-2]
activation_derivative = last_layer.activation_function.derivative(last_weighted_input)
cost_derivative = self.cost_function.derivative(network_output, desired_output)
delta = activation_derivative * cost_derivative
# Delta is the shape of our last layer (since both its components are), which perfectly matches its biases
gradient_b[-1] = delta
# The weights in the last layer are matrix multiplied with the activations in the previous layer
# as part of computing the weighted input.
# We do the same here to find the gradient, but we must transpose it for the matrix multiply to
# have the right shape (that of the last layer weights)
# eg. If the last layer is 2 neurons, and the second to last is 5, then our weights are shape (2,5)
# delta is (2,1) and activations before are (5,1), transposed to (1,5)
gradient_w[-1] = np.matmul(delta, previous_activations.transpose())
# Then we deal with each other layer, moving backward
# Using negative indices to count backward, so this is from 2nd-to-last layer to first layer
# When we index backward we count from 1 (ie last layer is -1), so first layer is at [-len(layers)]
# and range excludes the end point so we add 1 to include that first layer
for this_l in range(2, len(self.layers)+1):
this_layer = self.layers[-this_l]
this_weighted_input = weighted_inputs[-this_l]
layer_after_weights = self.layers[-this_l+1].weights # The weights of the layer after this one when going forward
previous_activations = activations[-this_l-1] # We included input layer in activations
previous_delta = delta # Same shape as the layer after this one
activation_derivative = this_layer.activation_function.derivative(this_weighted_input) # Same shape as this layer
# We can't use the network cost function here, because we don't know exactly our target output,
# only the gradient we've computed for the layer after this one. We can find that cost though:
# If we say that this layer has m neurons and the layer after this has m', then we know that
# the weights of the layer after this are shape (m', m)
# We know that the previous delta was shape (m', 1) - the shape of the layer after this
# Our cost is a function of these, and needs to be of the same shape as our activation function,
# which is (m, 1)
# So we matrix multiply the transpose of the weights (m, m') by the previous delta (m', 1), which gives
# us the correct shape.
cost_derivative = np.matmul(layer_after_weights.transpose(), previous_delta)
delta = activation_derivative * cost_derivative
# Again, delta is the shape of this layer since both its components are
gradient_b[-this_l] = delta
# And again we matrix multiply it with the activations in the layer before
# to match the weights at this layer
gradient_w[-this_l] = np.dot(delta, previous_activations.transpose())
# print activations[-1] # We'll comment this out if it gets annoying, but it's a useful illustration
# print self.cost_function.compute(activations[-1], desired_output) # ^ Same with this
# print
return (gradient_b, gradient_w)
# +
# A very simple example, building up a network and training it to map a column [1,2,3,4] to a column [1,0]
# This is an example of classification, in this case [1,2,3,4] is in the first class
net1 = Network(QuadraticCost_cls())
net1.add_layer(Layer(4, 5, Sigmoid_cls())) # Hidden layer
net1.add_layer(Layer(5, 10, Sigmoid_cls())) # Hidden layer
net1.add_layer(Layer(10, 30, Sigmoid_cls())) # Hidden layer
net1.add_layer(Layer(30, 2, Sigmoid_cls())) # Output layer
x = np.array([1,2,3,4]).reshape(4,1)
y = np.array([1,0]).reshape(2,1)
# This is basic gradient descent - backprop gives us the gradient for weights and biases, we descent in that direction
for i in range(200):
g_b, g_w = net1.backprop(x,y)
for j in range(len(net1.layers)):
layer = net1.layers[j]
layer.weights -= g_w[j]
layer.bias -= g_b[j]
# -
# # Stochastic gradient descent
#
# If we have more than one training example, then we'll need a way to update the weights such that our cost function decreases by as much as possible over all of them - like with the perceptron.
#
# The batch gradient descent algorithm would involve computing the gradient (using backpropagation) for all training examples, and then updating it once - and then repeating that process for however many epochs we want to teach the network for. This uses a lot of memory and means we only descend the cost function after computing for all the training examples, and we can be liable to get stuck at a local minima.
#
# In stochastic gradient descent we instead descend the cost function after each individual training example. This is noisy but it still converges to the minima, and sometimes more noise is beneficial for escaping local minima.
# We actually used basically this for training our perceptron:
# ```
# for x, y in training_data:
# # Find error
# w[0] += eta * error * x[0]
# w[1] += eta * error * x[1]
# bias += eta * error
# ```
#
# Notice we update the weights and bias terms inside the loop over each training example. This is stochastic gradient descent.
#
# A compromise solution is the minibatch one, where we group our training data into batches on which we perform batch gradient descent sequentially. For example we might update the weights after every 64 training examples. This is much more efficient than stochastic gradient descent, but still has some noise and more regular descents than the pure batch method.
# +
net1 = Network(QuadraticCost_cls())
net1.add_layer(Layer(4, 5, Sigmoid_cls())) # Hidden layer
net1.add_layer(Layer(5, 10, Sigmoid_cls())) # Hidden layer
net1.add_layer(Layer(10, 30, Sigmoid_cls())) # Hidden layer
net1.add_layer(Layer(30, 2, Sigmoid_cls())) # Output layer
x1 = np.array([1,2,3,4]).reshape(4,1)
y1 = np.array([1,0]).reshape(2,1)
x2 = np.array([4,3,2,1]).reshape(4,1)
y2 = np.array([0,1]).reshape(2,1)
# This is basic gradient descent - backprop gives us the gradient for weights and biases, we descent in that direction
for i in range(200):
g_b1, g_w1 = np.array(net1.backprop(x1,y1))
g_b2, g_w2 = np.array(net1.backprop(x2,y2))
diff_b, diff_w = (g_b1+g_b2)/2, (g_w1+g_w2)/2
for j in range(len(net1.layers)):
layer = net1.layers[j]
layer.weights -= diff_w[j]
layer.bias -= diff_b[j]
net1.evaluate(x1)
net1.evaluate(x2)
# -
# We'll create a simple toy dataset of 500 examples, where each example has an input of shape (2,1) and each output has a shape (2,1)
#
# Our input values will all be between 0-1, and our target outputs will all be (1,0) or (0,1), ie another simple classification.
#
# We'll use completely random input values, and evenly split the target output between (1,0) and (0,1). This might not work very well because we're not giving the network any inherent structure to learn, but it lets us work with a slightly larger dataset.
#
# Note: Might want to change this to have a rule, eg x[0]<=0.5 => y[0]=1 (maybe only do that afterward to introduce a test set)
# +
def generate_toy_dataset(num_examples = 500):
examples_per_class = int(num_examples/2)
dataset = []
for i in range(0, examples_per_class):
x = np.array([np.random.rand()/2, np.random.rand()]).reshape(2,1)
y = np.array([1,0]).reshape(2,1)
dataset.append((x,y))
for i in range(0, examples_per_class):
x = np.array([(np.random.rand()/2)+0.5, np.random.rand()]).reshape(2,1)
y = np.array([0,1]).reshape(2,1)
dataset.append((x,y))
return dataset
dataset = generate_toy_dataset()
# -
def create_mini_batches(dataset, batch_size=64):
num_batches = (len(dataset)//batch_size) + 1 # Floor division, +1
mini_batches = []
at = 0
for i in range(num_batches):
mini_batch = dataset[at:at+64]
mini_batches.append(mini_batch)
at += 64
return mini_batches
net1 = Network(QuadraticCost_cls())
mini_batches = create_mini_batches(dataset)
print len(mini_batches)
print len(mini_batches[0])
print len(mini_batches[7])
print mini_batches[0][0]
def update_mini_batch(network, mini_batch, eta=0.1, p=False):
gradient_b = [np.zeros(layer.bias.shape) for layer in network.layers] # layer-by-layer gradients wrt biases
gradient_w = [np.zeros(layer.weights.shape) for layer in network.layers] # layer-by-layer gradients wrt weights
for x,y in mini_batch:
delta_gradient_b, delta_gradient_w = network.backprop(x,y)
for i in range(len(network.layers)):
gradient_b[i] += delta_gradient_b[i]
gradient_w[i] += delta_gradient_w[i]
# After all the training examples, apply the gradient change to the network
for i in range(len(network.layers)):
layer = network.layers[i]
layer.weights -= (gradient_w[i] * eta/len(mini_batch))
layer.bias -= (gradient_b[i] * eta/len(mini_batch))
# The final piece to put all this together is some kind of evaluation function. We could use the cost function, but it's simpler to treat this as classification - we'll choose the larger output as our class, and see how many we get right.
def score(network, test_data):
num_correct = 0
for x,y in test_data:
network_output = network.evaluate(x)
chosen_class = np.argmax(network_output)
expected_class = np.argmax(y)
if chosen_class == expected_class:
num_correct += 1
return num_correct
def SGD(network, training_data, num_epochs=20, eta=1):
np.random.shuffle(training_data)
mini_batches = create_mini_batches(training_data, batch_size=64)
for i in range(num_epochs):
for mini_batch in mini_batches:
update_mini_batch(net1, mini_batch, eta, i==1)
# Evaluate the network on the training data
num_correct = score(network, training_data)
print(num_correct)
# +
# Add some layers to the network
net1 = Network(QuadraticCost_cls())
net1.add_layer(Layer(2, 10, Sigmoid_cls())) # Hidden layer
net1.add_layer(Layer(10, 2, Sigmoid_cls())) # Output layer
dataset = generate_toy_dataset(num_examples=500)
SGD(net1, dataset, eta=1)
# -
# Evaluating on our training set after each epoch lets us see how it's performing, but what we're really interested in is the performance on data that the network hasn't been trained on. We'll generate a smaller test data set and score the network on those after each epoch.
# +
def to_percent(correct, total):
return correct/total*100
def SGD(network, training_data, validation_data=None, num_epochs=20, eta=1):
np.random.shuffle(training_data)
if not validation_data:
print "WARN: test data not given, so scoring on training data"
validation_data = training_data # Score on the training data if we don't have test data
validation_count = len(validation_data)
mini_batches = create_mini_batches(training_data, batch_size=64)
for i in range(num_epochs):
for mini_batch in mini_batches:
update_mini_batch(network, mini_batch, eta, i==1)
# Evaluate the network on the training data
num_correct = score(network, validation_data)
print "Epoch %d complete: %d/%d (%d%%)" % \
(i+1, num_correct, validation_count, to_percent(num_correct, validation_count))
# +
# Add some layers to the network
net1 = Network(QuadraticCost_cls())
net1.add_layer(Layer(2, 10, Sigmoid_cls())) # Hidden layer
net1.add_layer(Layer(10, 2, Sigmoid_cls())) # Output layer
train_data = generate_toy_dataset(num_examples=500)
test_data = generate_toy_dataset(num_examples=100)
SGD(net1, train_data, test_data, eta=1)
# -
# # MNIST
# ## Preparing the data
import csv
import numpy as np
with open('mnist-data/train.csv','r') as train_csv_file:
train_data_iter = csv.reader(train_csv_file)
train_data_list = [data for data in train_data_iter]
print train_data_list[0]
print
print train_data_list[1]
# Exclude the header row
train_data = np.asarray(train_data_list[1:], dtype=int)
train_data.shape
train_data[0]
train_data[:5]
# ## Train/validation split
#
# We have 42,000 pairs of image (represented as 784 values) and labels. We want to test our network on unseen values, but we don't want to use the actual test data for that because we don't have their real labels to evaluate them. So we split our data 90%/10%, training with the 90% and validating the network's progress using the other 10%.
# +
split = len(train_data) * 0.9
split
splits = np.split(train_data, [int(split), len(train_data)+1])
# Returns one with len 90%, one with len 10%, one with len 0
train_data, valid_data = splits[0], splits[1]
# +
train_xs = np.delete(train_data, 0, axis=1) # Delete first column
train_xs.shape
train_xs[:5]
valid_xs = np.delete(valid_data, 0, axis=1)
valid_xs.shape
valid_xs[:5]
# +
train_ys = train_data[:,0] # Get first column
train_ys.shape
train_ys[:5]
valid_ys = valid_data[:,0]
valid_ys.shape
valid_ys[:5]
# -
# ## Create a network
mnist_net = Network(QuadraticCost_cls())
mnist_net.add_layer(Layer(784, 30, Sigmoid_cls()))
mnist_net.add_layer(Layer(30, 10, Sigmoid_cls())) # Output layer
# +
# We already almost have our input in a good state:
x = train_xs[0]
x.shape
# So we'll just reshape it to (784,1) to match our expectation:
train_xs = [x.reshape(784,1) for x in train_xs]
valid_xs = [x.reshape(784,1) for x in valid_xs]
# +
# We need to convert our outputs to a (10,1) array, this is called one-hot encoding
def one_hot(x):
vals = [0]*10
vals[x] = 1
return np.asarray(vals).reshape(10,1)
one_hot(0)
one_hot(2)
one_hot(9)
train_ys = [one_hot(y) for y in train_ys]
valid_ys = [one_hot(y) for y in valid_ys]
# +
# Now just zip them back into (x,y) pairs
train_data = zip(train_xs, train_ys)
valid_data = zip(valid_xs, valid_ys)
train_data[0][1].shape
# -
SGD(mnist_net, train_data, validation_data=valid_data, eta=0.5, num_epochs=50)
# +
# TODO: Some charts of correct % per epoch for different etas
# -
| literate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
### Total baseline Training ###
import csv
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import os
def normalization(x):
return (x - np.min(x))/(np.max(x) - np.min(x))
path_pool = os.listdir('data/BaseRL/')
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
label_pool = range(len(path_pool))
set_number = 0
for path in path_pool:
file_list = os.listdir('data/BaseRL/{:s}'.format(path))
n_files = len(file_list)
iteration = np.arange(0, int((n_files - 3)/2*500), 500)
t = np.linspace(0,60,len(iteration))
totals = []
original_totals = []
for it in iteration:
data = pd.read_csv('data/BaseRL/{:s}/run_{:d}_0.csv'.format(path,it))
original_rewards = data['original_rewards']
indiv_original_rewards = np.zeros(len(original_rewards))
rewards = data['rewards']
indiv_rewards = np.zeros(len(rewards))
total_rewards = 0
for i in range(len(rewards)):
total_rewards += rewards[i]
indiv_rewards[i] = total_rewards
totals.append(total_rewards)
total_original_rewards = 0
for i in range(len(original_rewards)):
total_original_rewards += original_rewards[i]
indiv_original_rewards[i] = total_original_rewards
original_totals.append(total_original_rewards)
#totals = (totals - np.min(totals))/(np.max(totals) - np.min(totals))
#original_totals = (original_totals - np.min(original_totals))/(np.max(original_totals) - np.min(original_totals))
ax1.plot(t,totals,label='{:d} Trial'.format(label_pool[set_number]+1))
ax2.plot(t,original_totals,label='{:d} Trial'.format(label_pool[set_number]+1))
set_number += 1
ax1.set_title('Baseline RL Training Results')
ax1.set_xlabel('Time(m)')
ax1.set_ylabel('Rewards')
ax1.legend()
ax1.set_xlim([-1, 61])
ax1.grid()
fig1.savefig('data/baseline_total.png')
ax2.set_title('Baseline RL Training Results')
ax2.set_xlabel('Time(m)')
ax2.set_ylabel('Rewards')
ax2.legend()
ax2.set_xlim([-1, 61])
ax2.grid()
fig2.savefig('data/baseline_original.png')
print('Done')
# +
### Total baseline Training Errorbar ###
import csv
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
import os
import operator
def normalization(x):
return (x - np.min(x))/(np.max(x) - np.min(x))
path_pool = os.listdir('data/BaseRL/')
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
label_pool = range(len(path_pool))
iteration = np.arange(0, 61000, 500)
t = np.linspace(0,60,len(iteration))
max_totals = [-99999]*len(iteration)
max_origis = [-99999]*len(iteration)
min_totals = [99999]*len(iteration)
min_origis = [99999]*len(iteration)
avg_totals = np.zeros((len(path_pool),len(iteration)))
avg_origis = np.zeros((len(path_pool),len(iteration)))
set_number = 0
for path in range(len(path_pool)):
file_list = os.listdir('data/BaseRL/{:s}'.format(path_pool[path]))
totals = []
original_totals = []
for it in range(len(iteration)):
data = pd.read_csv('data/BaseRL/{:s}/run_{:d}_0.csv'.format(path_pool[path],iteration[it]))
original_rewards = data['original_rewards']
indiv_original_rewards = np.zeros(len(original_rewards))
rewards = data['rewards']
indiv_rewards = np.zeros(len(rewards))
total_rewards = 0
for i in range(len(rewards)):
total_rewards += rewards[i]
indiv_rewards[i] = total_rewards
totals.append(total_rewards)
total_original_rewards = 0
for i in range(len(original_rewards)):
total_original_rewards += original_rewards[i]
indiv_original_rewards[i] = total_original_rewards
original_totals.append(total_original_rewards)
avg_totals[path][it] = total_rewards
avg_origis[path][it] = total_original_rewards
if total_rewards < min_totals[it]:
min_totals[it] = total_rewards
if total_original_rewards < min_origis[it]:
min_origis[it] = total_original_rewards
if total_rewards > max_totals[it]:
max_totals[it] = total_rewards
if total_original_rewards > max_origis[it]:
max_origis[it] = total_original_rewards
#totals = (totals - np.min(totals))/(np.max(totals) - np.min(totals))
#original_totals = (original_totals - np.min(original_totals))/(np.max(original_totals) - np.min(original_totals))
#ax1.plot(t,totals,label='{:d} Trial'.format(label_pool[set_number]))
#ax2.plot(t,original_totals,label='{:d} Trial'.format(label_pool[set_number]))
#set_number += 1
for tt in range(len(totals)):
totals[tt] = np.average(avg_totals[:,tt])
for tt in range(len(original_totals)):
original_totals[tt] = np.average(avg_origis[:,tt])
ax1.errorbar(t, totals, yerr=[list(map(operator.sub, totals, max_totals)),
list(map(operator.sub, min_totals, totals))], fmt='o', label='Rewards')
ax2.errorbar(t, original_totals, yerr=[list(map(operator.sub, original_totals, max_origis)),
list(map(operator.sub, min_origis, original_totals))], fmt='o', label='Original_rewards')
ax1.set_title('Baseline RL Training Results')
ax1.set_xlabel('Time(m)')
ax1.set_ylabel('Rewards')
ax1.legend()
ax1.set_xlim([-1, 61])
ax1.grid()
fig1.savefig('data/baseline_total_error.png')
ax2.set_title('Baseline RL Training Results')
ax2.set_xlabel('Time(m)')
ax2.set_ylabel('Rewards')
ax2.legend()
ax2.set_xlim([-1, 61])
ax2.grid()
fig2.savefig('data/baseline_original_error.png')
print('Done')
# -
| Report/Data/Plots/Codes/baseline_total.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp helper_functions
# -
# # helper_functions
#
# > read and preprocess netcdf data
from nbdev import *
from drcstools.helper_functions import *
#export
import xarray as xr
import numpy as np
from glob import glob
#export
def rotated_grid_transform(grid_in, option, SP_coor):
"""
rotated pole
"""
pi = np.pi
lon = grid_in[0]
lat = grid_in[1];
lon = (lon*pi)/180; # Convert degrees to radians
lat = (lat*pi)/180;
SP_lon = SP_coor[0];
SP_lat = SP_coor[1];
SP_lon = SP_lon - 180
SP_lat = -SP_lat
theta = 90+SP_lat; # Rotation around y-axis
phi = SP_lon; # Rotation around z-axis
theta = (theta*pi)/180;
phi = (phi*pi)/180; # Convert degrees to radians
x = np.cos(lon)*np.cos(lat); # Convert from spherical to cartesian coordinates
y = np.sin(lon)*np.cos(lat);
z = np.sin(lat);
if option == 1: # Regular -> Rotated
x_new = np.cos(theta)*np.cos(phi)*x + np.cos(theta)*np.sin(phi)*y + np.sin(theta)*z;
y_new = -np.sin(phi)*x + np.cos(phi)*y;
z_new = -np.sin(theta)*np.cos(phi)*x - np.sin(theta)*np.sin(phi)*y + np.cos(theta)*z;
else: # Rotated -> Regular
phi = -phi;
theta = -theta;
x_new = np.cos(theta)*np.cos(phi)*x + np.sin(phi)*y + np.sin(theta)*np.cos(phi)*z;
y_new = -np.cos(theta)*np.sin(phi)*x + np.cos(phi)*y - np.sin(theta)*np.sin(phi)*z;
z_new = -np.sin(theta)*x + np.cos(theta)*z;
lon_new = np.arctan2(y_new,x_new); # Convert cartesian back to spherical coordinates
lat_new = np.arcsin(z_new);
lon_new = (lon_new*180)/pi; # Convert radians back to degrees
lat_new = (lat_new*180)/pi;
return lon_new , lat_new
#export
def adjust_lon_lat(ds, lon_name, lat_name, reverse = False):
"""Adjusts longitude from 0 to 360 to -180 to 180 and reverses latitude."""
if reverse == True:
ds = ds.reindex({lat_name:ds[lat_name][::-1]})
ds['_longitude_adjusted'] = xr.where(
ds[lon_name] > 180,
ds[lon_name] - 360,
ds[lon_name])
ds = (ds
.swap_dims({lon_name: '_longitude_adjusted'})
.sel(**{'_longitude_adjusted': sorted(ds._longitude_adjusted)})
.drop(lon_name))
ds = ds.rename({'_longitude_adjusted': lon_name})
return ds
show_doc(adjust_lon_lat)
# Args:
# - ds (xarray): xarray Dataset
# - lon_name (str): name of longitude in ds
# - lat_name (str): name of latitude in ds
# - reverse (bool): if True latitude is reversed
#
# Returns:
# - ds (xarray)
#export
def guess_bounds(points, bound_position=0.5):
"""
Guess bounds of grid cells.
Simplified function from iris.coord.Coord.
Parameters
----------
points: numpy.array
Array of grid points of shape (N,).
bound_position: float, optional
Bounds offset relative to the grid cell centre.
Returns
-------
Array of shape (N, 2).
"""
diffs = np.diff(points)
diffs = np.insert(diffs, 0, diffs[0])
diffs = np.append(diffs, diffs[-1])
min_bounds = points - diffs[:-1] * bound_position
max_bounds = points + diffs[1:] * (1 - bound_position)
return np.array([min_bounds, max_bounds]).transpose()
show_doc(guess_bounds)
#export
def quadrant_area(radian_lat_bounds, radian_lon_bounds, radius_of_earth):
"""
Calculate spherical segment areas.
Taken from SciTools iris library.
Area weights are calculated for each lat/lon cell as:
.. math::
r^2 (lon_1 - lon_0) ( sin(lat_1) - sin(lat_0))
The resulting array will have a shape of
*(radian_lat_bounds.shape[0], radian_lon_bounds.shape[0])*
The calculations are done at 64 bit precision and the returned array
will be of type numpy.float64.
Parameters
----------
radian_lat_bounds: numpy.array
Array of latitude bounds (radians) of shape (M, 2)
radian_lon_bounds: numpy.array
Array of longitude bounds (radians) of shape (N, 2)
radius_of_earth: float
Radius of the Earth (currently assumed spherical)
Returns
-------
Array of grid cell areas of shape (M, N).
"""
# ensure pairs of bounds
if (
radian_lat_bounds.shape[-1] != 2
or radian_lon_bounds.shape[-1] != 2
or radian_lat_bounds.ndim != 2
or radian_lon_bounds.ndim != 2
):
raise ValueError("Bounds must be [n,2] array")
# fill in a new array of areas
radius_sqr = radius_of_earth ** 2
radian_lat_64 = radian_lat_bounds.astype(np.float64)
radian_lon_64 = radian_lon_bounds.astype(np.float64)
ylen = np.sin(radian_lat_64[:, 1]) - np.sin(radian_lat_64[:, 0])
xlen = radian_lon_64[:, 1] - radian_lon_64[:, 0]
areas = radius_sqr * np.outer(ylen, xlen)
# we use abs because backwards bounds (min > max) give negative areas.
return np.abs(areas)
show_doc(quadrant_area)
#export
def grid_cell_areas(lon1d, lat1d, radius=6371000.0):
"""
Calculate grid cell areas given 1D arrays of longitudes and latitudes
for a planet with the given radius.
Parameters
----------
lon1d: numpy.array
Array of longitude points [degrees] of shape (M,)
lat1d: numpy.array
Array of latitude points [degrees] of shape (M,)
radius: float, optional
Radius of the planet [metres] (currently assumed spherical)
Returns
-------
Array of grid cell areas [metres**2] of shape (M, N).
"""
lon_bounds_radian = np.deg2rad(guess_bounds(lon1d))
lat_bounds_radian = np.deg2rad(guess_bounds(lat1d))
area = quadrant_area(lat_bounds_radian, lon_bounds_radian, radius)
return area
show_doc(grid_cell_areas)
import xarray
ds = xr.open_dataset("data/era5.nc", use_cftime = True).isel(time = slice(0,2))
lons = ds.longitude.values
lats = ds.latitude.values
area_t = grid_cell_areas(lon1d = lons, lat1d = lats)
ds['area_t'] = (("latitude", "longitude"), area_t)
ds.area_t.plot()
| 03_helper_functions.ipynb |