code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# Definitions:
# \begin{eqnarray}
# y & = & e^{a} k^{\alpha} {n}^{1-\alpha} \\
# c & = & y - i \\
# r & = & \alpha (y/k) = \alpha (k/{n})^{\alpha-1}\\
# w & = & (1-\alpha) y/{n}
# \end{eqnarray}
#
# Arbitrage:
# \begin{eqnarray}
# 0 & = & \chi {n}^{\eta}/c^{-\rho} - w \\
# 0 & = & 1 - (1-\delta+r_{t+1})\beta(c_{t+1}/c_{t})^{-\rho}
# \end{eqnarray}
#
# Transition:
# \begin{eqnarray}
# a_{t+1} & = & \omega a_{t} + \epsilon_{a} \\
# k_{t+1} & = & (1-\delta) k_{t} + i_{t}
# \end{eqnarray}
#
# Expectation:
# \begin{eqnarray}
# m & = & \beta (1-\delta+r_{t+1})c_{t+1}^{-\rho}
# \end{eqnarray}
#
# Value:
# \begin{eqnarray}
# v_{t} & = & u(c_{t},{n}_{t}) + \beta v_{t+1}
# \end{eqnarray}
#
# Felicity:
# \begin{eqnarray}
# u(c,{n}) & = & \left(\frac{c^{1-\rho}}{1-\rho}\right)-\chi \left(\frac{{n}^{1+\eta}}{1+\eta}\right)
# \end{eqnarray}
# \begin{eqnarray}
# u^{c} & = & c^{-\rho} \\
# u^{n} & = & - \chi {n}^{\eta}
# \end{eqnarray}
#
# so
# \begin{eqnarray}
# \left(\frac{W}{1}\right) & = & \left(\frac{\chi {n}^{\eta}}{c^{-\rho}}\right)
# \end{eqnarray}
# Modifications:
#
# 1. Do not optimize labor
# 1. Depreciation happens after production
# 1. Labor shocks
#
# Definitions:
# \begin{eqnarray}
# y & = & k^{\alpha} {n P \Theta}^{1-\alpha} \\
# c & = & y - i \\
# rd & = & \alpha (y/k) = \alpha \left(\frac{k}{n P \Theta}\right)^{\alpha-1}-\delta\\
# w & = & (1-\alpha) y/{(n P \Theta)}
# \end{eqnarray}
#
# Arbitrage:
# \begin{eqnarray}
# % 0 & = & \chi {n}^{\eta}/c^{-\rho} - w \\
# 0 & = & 1 - (1+rd_{t+1})\beta(c_{t+1}/c_{t})^{-\rho}
# \end{eqnarray}
#
# Transition:
# \begin{eqnarray}
# p_{t+1} & = & \omega p_{t} + \epsilon_{p} \\
# k_{t+1} & = & (1-\delta) k_{t} + i_{t}
# \end{eqnarray}
#
# Expectation:
# \begin{eqnarray}
# m & = & \beta (1-\delta+r_{t+1})c_{t+1}^{-\rho}
# \end{eqnarray}
#
# Value:
# \begin{eqnarray}
# v_{t} & = & u(c_{t},{n}_{t}) + \beta v_{t+1}
# \end{eqnarray}
#
# Felicity:
# \begin{eqnarray}
# u(c,{n}) & = & \left(\frac{c^{1-\rho}}{1-\rho}\right)-\chi \left(\frac{{n}^{1+\eta}}{1+\eta}\right)
# \end{eqnarray}
import numpy as np
from matplotlib import pyplot as plt
# # Solving the rbc model
#
# This worksheet demonstrates how to solve the RBC model with the [dolo](http://econforge.github.io/dolo/) library
# and how to generate impulse responses and stochastic simulations from the solution.
#
# - This notebook is distributed with dolo in : ``examples\notebooks\``. The notebook was opened and run from that directory.
# - The model file is in : ``examples\global_models\``
#
# First we import the dolo library.
from dolo import *
# # The RBC model
# + [markdown] run_control={"breakpoint": false}
# The RBC model is defined in a [YAML](http://www.yaml.org/spec/1.2/spec.html#Introduction) file which we can read locally or pull off the web.
# + run_control={"breakpoint": false}
# filename = ('https://raw.githubusercontent.com/EconForge/dolo'
# '/master/examples/models/compat/rbc.yaml')
filename='../models/rbc_cdc-to.yaml'
# %cat $filename
# + [markdown] run_control={"breakpoint": false}
# `yaml_import(filename)` reads the YAML file and generates a model object.
# -
filename='/Volumes/Data/GitHub/llorracc/dolo/examples/models/rbc_cdc-to_030-Add-Transitory-Shocks.yaml'
#rbc_cdc-to_040-Change-timing-of-Depreciation.yaml
model = yaml_import(filename)
# + [markdown] run_control={"breakpoint": false}
# The model file already has values for steady-state variables stated in the calibration section so we can go ahead and check that they are correct by computing the model equations at the steady state.
# -
model.residuals()
# + [markdown] run_control={"breakpoint": false}
# Printing the model also lets us have a look at all the model equations and check that all residual errors are 0 at the steady-state, but with less display prescision.
# -
print( model )
# + [markdown] run_control={"breakpoint": false}
# Next we compute a solution to the model using a first order perturbation method (see the source for the [approximate_controls](https://github.com/EconForge/dolo/blob/master/dolo/algos/perturbation.py) function). The result is a decsion rule object. By decision rule we refer to any object that is callable and maps states to decisions. This particular decision rule object is a TaylorExpansion (see the source for the [TaylorExpansion](https://github.com/EconForge/dolo/blob/master/dolo/numeric/taylor_expansion.py) class).
# + run_control={"breakpoint": false}
filename='/Volumes/Data/GitHub/llorracc/dolo/examples/models/rbc_cdc-to_030-Add-Transitory-Shocks.yaml'
filename='/Volumes/Data/GitHub/llorracc/dolo/examples/models/rbc_cdc-to_021-Expectation-to-mu.yaml'
filename='/Volumes/Data/GitHub/llorracc/dolo/examples/models/rbc_cdc-to.yaml'
#filename='/Volumes/Data/GitHub/llorracc/dolo/examples/models/rbc_cdc-to_005_PShockToLabor.yaml'
#filename='/Volumes/Data/GitHub/llorracc/dolo/examples/models/rbc_cdc-to_008_PShockToLabor-TShk.yaml'
filename='/Volumes/Data/GitHub/llorracc/dolo/examples/models/rbc_cdc-to_015_Do-Not-Optimize-On-Labor.yaml'
#filename='/Volumes/Data/GitHub/llorracc/dolo/examples/models/rbc_cdc-to_021-Expectation-to-mu_2.yaml'
#filename='/Volumes/Data/GitHub/llorracc/dolo/examples/models/rbc_cdc-to_041-Change-timing-of-Depreciation.yaml'
model = yaml_import(filename)
dr_pert = perturbate(model)
dr_global = time_iteration(model)
tab_global = tabulate(model, dr_global, 'k')
tab_pert = tabulate(model, dr_pert, 'k')
from matplotlib import pyplot as plt
plt.figure(figsize=(8,3.5))
plt.subplot(121)
plt.plot(tab_global['k'], tab_global['i'], label='Global')
plt.plot(tab_pert['k'], tab_pert['i'], label='Perturbation')
plt.ylabel('i')
plt.title('Investment')
plt.legend()
# plt.subplot(122)
# plt.plot(tab_global['k'], tab_global['n'], label='Global')
# plt.plot(tab_pert['k'], tab_pert['n'], label='Perturbation')
# plt.ylabel('n')
# plt.title('Labour')
# plt.legend()
plt.tight_layout()
original_delta = model.calibration['δ']
drs = []
delta_values = np.linspace(0.01, 0.04,5)
for val in delta_values:
model.set_calibration(δ=val)
drs.append(time_iteration(model))
plt.figure(figsize=(5,3))
for i,dr in enumerate(drs):
sim = tabulate(model, dr,'k')
plt.plot(sim['k'],sim['i'], label='$\delta={}$'.format(delta_values[i]))
plt.ylabel('i')
plt.title('Investment')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
model.set_calibration(δ=original_delta)
# -
# # Decision rule
#
# Here we plot optimal investment and labour for different levels of capital (see the source for the [plot_decision_rule](https://github.com/EconForge/dolo/blob/master/dolo/algos/simulations.py) function).
# It would seem, according to this, that second order perturbation does very well for the RBC model. We will revisit this issue more rigorously when we explore the deviations from the model's arbitrage section equations.
#
# Let us repeat the calculation of investment decisions for various values of the depreciation rate, $\delta$. Note that this is a comparative statics exercise, even though the models compared are dynamic.
# + [markdown] run_control={"breakpoint": false}
# We find that more durable capital leads to higher steady state investment and slows the rate of convergence for capital (the slopes are roughly the same, which implies that relative to steady state capital investment responds stronger at higher $\delta$; this is in addition to the direct effect of depreciation).
# -
# # Use the model to simulate
# + [markdown] run_control={"breakpoint": false}
# We will use the deterministic steady-state as a starting point.
# + run_control={"breakpoint": false}
s0 = model.calibration['states']
print(str(model.symbols['states'])+'='+str(s0))
# + [markdown] run_control={"breakpoint": false}
# We also get the covariance matrix just in case. This is a one shock model so all we have is the variance of $e_z$.
# -
sigma2_ez = model.exogenous.Sigma
sigma2_ez
# ## Impulse response functions
#
# Consider a 10% shock to productivity.
s1 = s0.copy()
s1[0] *= 1.0
print(str(model.symbols['states'])+'='+str(s1))
# + [markdown] run_control={"breakpoint": false}
# The `simulate` function is used both to trace impulse response functions and to compute stochastic simulations. Choosing `n_exp>=1`, will result in that many "stochastic" simulations. With `n_exp = 0`, we get one single simulation without any stochastic shock (see the source for the [simulate](https://github.com/EconForge/dolo/blob/master/dolo/algos/simulations.py) function).
# The output is a panda table of size $H \times n_v$ where $n_v$ is the number of variables in the model and $H$ the number of dates.
# -
simulate(model, dr, N=50, T=150)
from dolo.algos.simulations import response
m0 = model.calibration["exogenous"]
s0 = model.calibration["states"]
dr_global.eval_ms(m0, s0)
irf = response(model,dr_global, 'e_lP')
# + [markdown] run_control={"breakpoint": false}
# Let us plot the response of consumption and investment.
# -
plt.figure(figsize=(8,4))
plt.subplot(221)
plt.plot(irf.sel(V='lP'))
plt.title('Productivity')
plt.grid()
plt.subplot(222)
plt.plot(irf.sel(V='i'))
plt.title('Investment')
plt.grid()
#plt.subplot(223)
#plt.plot(irf.sel(V='n'))
#plt.grid()
#plt.title('Labour')
plt.subplot(224)
plt.plot(irf.sel(V='c'))
plt.title('Consumption')
plt.grid()
plt.tight_layout()
# Note that the plotting is made using the wonderful [matplotlib](http://matplotlib.org/users/pyplot_tutorial.html) library. Read the online [tutorials](http://matplotlib.org/users/beginner.html) to learn how to customize the plots to your needs (e.g., using [latex](http://matplotlib.org/users/usetex.html) in annotations). If instead you would like to produce charts in Matlab, you can easily export the impulse response functions, or any other matrix, to a `.mat` file.
# it is also possible (and fun) to use the graph visualization altair lib instead:
# it is not part of dolo dependencies. To install `conda install -c conda-forge altair`
import altair as alt
df = irf.drop('N').to_pandas().reset_index() # convert to flat database
base = alt.Chart(df).mark_line()
ch1 = base.encode(x='T', y='lP')
ch2 = base.encode(x='T', y='i')
ch3 = base.encode(x='T', y='n')
ch4 = base.encode(x='T', y='c')
(ch1|ch2)& \
(ch2|ch4)
irf_array = np.array( irf )
import scipy.io
scipy.io.savemat("export.mat", {'table': irf_array} )
# ## Stochastic simulations
#
# Now we run 1000 random simulations. The result is an array of size $T\times N \times n_v$ where
# - $T$ the number of dates
# - $N$ the number of simulations
# - $n_v$ is the number of variables
#
sim = simulate(model, dr_global, N=1000, T=40 )
print(sim.shape)
# + [markdown] run_control={"breakpoint": false}
# We plot the responses of consumption, investment and labour to the stochastic path of productivity.
# +
plt.figure(figsize=(8,4))
for i in range(1000):
plt.subplot(221)
plt.plot(sim.sel(N=i,V='z'), color='red', alpha=0.1)
plt.subplot(222)
plt.plot(sim.sel(N=i,V='i'), color='red', alpha=0.1)
plt.subplot(223)
plt.plot(sim.sel(N=i,V='n'), color='red', alpha=0.1)
plt.subplot(224)
plt.plot(sim.sel(N=i,V='c'), color='red', alpha=0.1)
plt.subplot(221)
plt.title('Productivity')
plt.subplot(222)
plt.title('Investment')
plt.subplot(223)
plt.title('Labour')
plt.subplot(224)
plt.title('Consumption')
plt.tight_layout()
# + [markdown] run_control={"breakpoint": false}
# We find that while the distribution of investment and labour converges quickly to the ergodic distribution, that of consumption takes noticeably longer. This is indicative of higher persistence in consumption, which in turn could be explained by permanent income considerations.
# + [markdown] run_control={"breakpoint": false}
# # Descriptive statistics
# A common way to evaluate the success of the RBC model is in its ability to mimic patterns in the descriptive statistics of the real economy. Let us compute some of these descriptive statistics from our sample of stochastic simulations. First we compute growth rates:
# + run_control={"breakpoint": false}
dsim = sim / sim.shift(T=1)
# + [markdown] run_control={"breakpoint": false}
# Then we compute the volatility of growth rates for each simulation:
# + run_control={"breakpoint": false}
volat = dsim.std(axis=1)
print(volat.shape)
# -
volat
# + [markdown] run_control={"breakpoint": false}
# Then we compute the mean and a confidence interval for each variable. In the generated table the first column contains the standard deviations of growth rates. The second and third columns contain the lower and upper bounds of the 95% confidence intervals, respectively.
# -
table = np.column_stack([
volat.mean(axis=0),
volat.mean(axis=0)-1.96*volat.std(axis=0),
volat.mean(axis=0)+1.96*volat.std(axis=0) ])
table
# We can use the [pandas](http://pandas.pydata.org/pandas-docs/stable/10min.html) library to present the results in a nice table.
import pandas
df = pandas.DataFrame(table, index=sim.V,
columns=['Growth rate std.',
'Lower 95% bound',
'Upper 95% bound' ])
pandas.set_option('precision', 4)
df
# # Error measures
# <mark>Marked text</mark>
#
#
# It is always important to get a handle on the accuracy of the solution. The `omega` function computes and aggregates the errors for the model's arbitrage section equations. For the RBC model these are the investment demand and labor supply equations. For each equation it reports the maximum error over the domain and the mean error using ergodic distribution weights (see the source for the [omega](https://github.com/EconForge/dolo/blob/master/dolo/algos/fg/accuracy.py) function).
# +
from dolo.algos.accuracy import omega
print("Perturbation solution")
err_pert = omega(model, dr_pert)
err_pert
# -
print("Global solution")
err_global=omega(model, dr_global)
err_global
# + [markdown] run_control={"breakpoint": false}
# The result of `omega` is a subclass of `dict`. `omega` fills that dict with some useful information that the default print does not reveal:
# -
err_pert.keys()
# + [markdown] run_control={"breakpoint": false}
# In particular the domain field contains information, like bounds and shape, that we can use to plot the spatial pattern of errors.
# + run_control={"breakpoint": false}
a = err_pert['domain'].a
b = err_pert['domain'].b
orders = err_pert['domain'].orders
errors = concatenate((err_pert['errors'].reshape( orders.tolist()+[-1] ),
err_global['errors'].reshape( orders.tolist()+[-1] )),
2)
figure(figsize=(8,6))
titles=["Investment demand pertubation errors",
"Labor supply pertubation errors",
"Investment demand global errors",
"Labor supply global errors"]
for i in range(4):
subplot(2,2,i+1)
imgplot = imshow(errors[:,:,i], origin='lower',
extent=( a[0], b[0], a[1], b[1]), aspect='auto')
imgplot.set_clim(0,3e-4)
colorbar()
xlabel('z')
ylabel('k')
title(titles[i])
tight_layout()
| examples/notebooks/rbc_model-variants-discard/rbc_model_Do-Not-Optimize-On-Labor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D1_ModelTypes/student/W1D1_Intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] pycharm={"name": "#%% md\n"}
# # Intro
# + [markdown] colab_type="text"
# **Our 2021 Sponsors, including Presenting Sponsor Facebook Reality Labs**
#
# <p align='center'><img src='https://github.com/NeuromatchAcademy/widgets/blob/master/sponsors.png?raw=True'/></p>
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Overview
# The first two days of NMA are all about the process of modeling and what models are. It’s important to learn about these meta-modeling aspects before diving into different kinds of modeling tools during the remainder of NMA. So today will be about the diversity of models and what different models can do for us. Typically everyone has an intuition about how different analysis tools can extract different kinds of information from experimental data; you choose a different analysis depending on your question. However, this is less appreciated for models. But it’s the same for them; you want to build a different kind of model to answer different kinds of questions. It all depends on your goals. So today, we will examine three kinds of models that we can classify as (according to Dayan and Abbott, 2001): what, how, and why models.
#
# Each tutorial will guide you through one of those models to describe the exact same data: the time interval between neuronal action potentials, aka inter-spike interval (ISI). In tutorial 1, we will ask what function best describes the shape of the ISI distribution (it’s an exponential distribution). Such a “what” model can compactly describe the ISI distribution and allows, for example, to quantify ISI properties across datasets, task conditions, brain areas etc. In tutorial 2, we ask which mechanism could generate the observed ISI distribution. Such a “how” model proposes a specific way that a system produces the observed behavior. Here, you will see that it’s a balance between excitation and inhibition that generates exponentially distributed ISIs. Finally we will ask “why” the exponential distribution is the most optimal way to code information in neurons. “Why” models thus ask about the underlying principles of a phenomenon.
#
# In any research, we typically start with descriptive (“what”) models; you will see examples of those during the model fitting, GLM, dimensionality reduction, and deep learning days. Next, we often ask about the mechanisms and build “how” models to generate or test hypotheses of underlying mechanisms; examples of those will be in linear systems, real neurons, dynamic networks, and decision making days. Ultimately, we are usually interested in the underlying reason of why the phenomenon exists in the first place; examples of those are in Bayes, optimal Control, and reinforcement learning days. “Why” models are often the hardest to achieve; “what” models are usually the easiest. But more importantly, they allow answering different questions, provide different insights and have different utilities. Thinking about the question I want to answer, why I want to answer this question (i.e. my goal) and the hypotheses I want to evaluate determines my own modeling choices every day. The resulting diversity in models is great because all models address different facets of a problem (like in today's 3 tutorials) and are thus complementary in our quest for knowledge. Today’s materials will hopefully allow you to better appreciate the opportunities and limitations offered by all the modeling tools you will learn during NMA.
#
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Video
# + cellView="form" pycharm={"name": "#%%\n"}
# @markdown
from ipywidgets import widgets
out2 = widgets.Output()
with out2:
from IPython.display import IFrame
class BiliVideo(IFrame):
def __init__(self, id, page=1, width=400, height=300, **kwargs):
self.id=id
src = "https://player.bilibili.com/player.html?bvid={0}&page={1}".format(id, page)
super(BiliVideo, self).__init__(src, width, height, **kwargs)
video = BiliVideo(id=f"BV1HT4y1E7U4", width=854, height=480, fs=1)
print("Video available at https://www.bilibili.com/video/{0}".format(video.id))
display(video)
out1 = widgets.Output()
with out1:
from IPython.display import YouTubeVideo
video = YouTubeVideo(id=f"KxldhMR5PxA", width=854, height=480, fs=1, rel=0)
print("Video available at https://youtube.com/watch?v=" + video.id)
display(video)
out = widgets.Tab([out1, out2])
out.set_title(0, 'Youtube')
out.set_title(1, 'Bilibili')
display(out)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Slides
# + cellView="form" pycharm={"name": "#%%\n"}
# @markdown
from IPython.display import IFrame
IFrame(src=f"https://mfr.ca-1.osf.io/render?url=https://osf.io/rbx2a/?direct%26mode=render%26action=download%26mode=render", width=854, height=480)
| tutorials/W1D1_ModelTypes/student/W1D1_Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # **Textile Defect Classification Using CNN**
# ### **Importing Libraries**
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
#importing relevant libraries
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
import h5py
import cv2
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import matplotlib.image as mpimg
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import tensorflow as tf
import keras
from keras import layers
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers.core import Flatten, Dense, Dropout
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
# +
#loading fabric images
filename = "../input/textiledefectdetection/train64.h5"
with h5py.File(filename, "r") as f:
print("Keys: %s" % f.keys())
a_group_key = list(f.keys())[0]
X_train = np.array(f[a_group_key])
filename = "../input/textiledefectdetection/test64.h5"
with h5py.File(filename, "r") as f:
print("Keys: %s" % f.keys())
a_group_key = list(f.keys())[0]
X_test = np.array(f[a_group_key])
# -
X = np.concatenate((X_train, X_test))
X.shape
# +
#creating dataframe
df_train = pd.read_csv("../input/textiledefectdetection/train64.csv")
df_test = pd.read_csv("../input/textiledefectdetection/test64.csv")
df = pd.concat([df_train,df_test])
df.shape
# -
# ### **Exploratory Data Analysis**
plt.imshow(X[40000])
# +
#plotting the type of defect or no defect categorically
plt.figure(figsize=(10, 6), dpi=80)
sns.countplot(x='indication_type', data=df);
# -
# ### **Data Transformation & train-test splitting**
# +
#Extracting labels(type of defects) from the whole dataframe
y = df[["indication_type"]]
y = pd.get_dummies(y)
y.columns = ['Color','Cut','No Defect','Hole','Metal_Contamination','Thread']
y[:5]
# -
X.shape, y.shape
# +
#splitting randomly to remove order
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
X_train.shape,X_test.shape,y_train.shape,y_test.shape
# +
# X_train = X_train[:int(len(X_train)/3)][:][:][:]
# X_test = X_test[:int(len(X_test)/5)][:][:][:]
# y_train = y_train[:int(len(y_train)/3)][:][:][:]
# y_test = y_test[:int(len(y_test)/5)][:][:][:]
# X_train.shape,X_test.shape,y_train.shape,y_test.shape
# -
# ### **Image Data Generator**
# +
#transforming and creating batches to feed in our model
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
datagen.fit(X_train)
bs=32
train_batches = datagen.flow(X_train, y_train, batch_size=bs)
test_batches = datagen.flow(X_test, y_test, batch_size=bs)
type(train_batches)
# -
# ### **CNN MODEL**
# +
#model building
model = Sequential()
model.add(layers.Conv2D(100, (3, 3), activation='relu', input_shape=(64, 64, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(6,activation='softmax'))
model.compile(keras.optimizers.Adam(lr=.001), loss='categorical_crossentropy', metrics=['accuracy'])
# -
model.fit(train_batches, steps_per_epoch=len(X_train) //bs, validation_data=test_batches,
validation_steps=len(X_test)//bs, epochs=50, verbose=1)
# ### **Predictions**
# +
#predicting outputs for test dataset
datagen.fit(X_test)
predictions= model.predict(X_test)
predictions[:5]
# +
y_pred=[]
for i in range(len(predictions)):
y_pred.append(np.argmax(predictions[i]))
y_test2=[]
for i in range(len(y_test)):
y_test2.append(np.argmax(y_test.iloc[i,:]))
# +
labels=np.array(['Color','Cut','No Defect','Hole','Metal_Contamination','Thread'])
# # 0='Color'
# # 1='Cut'
# # 2='No Defect'
# # 3='Hole'
# # 4='Metal_Contamination'
# # 5='Thread'
# #rows=>true
# #column=>predicted
confusion_matrix(y_test2, y_pred, labels=[0,1,2,3,4,5])
#diagonal values representing the correct predicitions
# -
# ### **Sample Testing**
# +
i=1000
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title("y_pred = "+str(labels[y_pred[i]])+"\n"+"y_true = "+str(labels[y_test2[i]]))
plt.imshow(X_test[i])
if(y_pred[i]!=y_test2[i]):
print("WRONG PREDICTION")
else:
print("PREDICTED ACCURATELY")
# -
| CNN_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Two Dimensional Histogram Filter - Your First Feature (and your first bug).
# Writing code is important. But a big part of being on a self driving car team is working with a **large** existing codebase. On high stakes engineering projects like a self driving car, you will probably have to earn the trust of your managers and coworkers before they'll let you make substantial changes to the code base.
#
# A typical assignment for someone new to a team is to make progress on a backlog of bugs. So with that in mind, that's what you will be doing for your first project in the Nanodegree.
#
# You'll go through this project in a few parts:
#
# 1. **Explore the Code** - don't worry about bugs at this point. The goal is to get a feel for how this code base is organized and what everything does.
# 2. **Implement a Feature** - write code that gets the robot moving correctly.
# 3. **Fix a Bug** - Implementing motion will reveal a bug which hadn't shown up before. Here you'll identify what the bug is and take steps to reproduce it. Then you'll identify the cause and fix it.
#
# ## Part 1: Exploring the code
# In this section you will just run some existing code to get a feel for what this localizer does.
#
# You can navigate through this notebook using the arrow keys on your keyboard. You can run the code in a cell by pressing **`Ctrl + Enter`**
#
# Navigate through the cells below. In each cell you should
#
# 1. Read through the code. It's okay to not understand everything at this point.
# 2. Make a guess about what will happen when you run the code.
# 3. Run the code and compare what you see with what you expected.
# 4. When you get to a **TODO** read the instructions carefully and complete the activity.
#
#
# +
# This code "imports" code from some of the other files we've written
# in this directory. Specifically simulate.py and helpers.py
import simulate as sim
import helpers
import localizer
# Don't worry too much about this code for now...
from __future__ import division, print_function
# %load_ext autoreload
# %autoreload 2
# -
# This code defines a 5x5 robot world as well as some other parameters
# which we will discuss later. It then creates a simulation and shows
# the initial beliefs.
R = 'r'
G = 'g'
grid = [
[R,G,G,G,R],
[G,G,R,G,R],
[G,R,G,G,G],
[R,R,G,R,G],
[R,G,R,G,R],
]
blur = 0.05
p_hit = 200.0
simulation = sim.Simulation(grid, blur, p_hit)
simulation.show_beliefs()
# Run the code below multiple times by repeatedly pressing Ctrl + Enter.
#
# After each run observe how the state has changed.
simulation.run(1)
simulation.show_beliefs()
# What do you think this call to `run` is doing? Look at the code in **`simulate.py`** to find out (remember - you can see other files in the current directory by clicking on the `jupyter` logo in the top left of this notebook).
#
# Spend a few minutes looking at the `run` method and the methods it calls to get a sense for what's going on.
# #### What am I looking at?
#
# The red star shows the robot's true position. The blue circles indicate the strength of the robot's belief that it is at any particular location.
#
# Ideally we want the biggest blue circle to be at the same position as the red star.
# +
# We will provide you with the function below to help you look
# at the raw numbers.
def show_rounded_beliefs(beliefs):
for row in beliefs:
for belief in row:
print("{:0.3f}".format(belief), end=" ")
print()
# The {:0.3f} notation is an example of "string
# formatting" in Python. You can learn more about string
# formatting at https://pyformat.info/
# -
show_rounded_beliefs(simulation.beliefs)
# _____
# ## Part 2: Implement a 2D sense function.
# As you can see, the robot's beliefs aren't changing. No matter how many times we call the simulation's sense method, nothing happens. The beliefs remain uniform.
# ### Instructions
# 1. Open `localizer.py` and complete the `sense` function.
# 3. Run the code in the cell below to import the localizer module (or reload it) and then test your sense function.
# 4. If the test passes, you've successfully implemented your first feature! Keep going with the project. If your tests don't pass (they likely won't the first few times you test), keep making modifications to the `sense` function until they do!
# +
reload(localizer)
def test_sense():
R = 'r'
_ = 'g'
simple_grid = [
[_,_,_],
[_,R,_],
[_,_,_]
]
p = 1.0 / 9
initial_beliefs = [
[p,p,p],
[p,p,p],
[p,p,p]
]
observation = R
expected_beliefs_after = [
[1/11, 1/11, 1/11],
[1/11, 3/11, 1/11],
[1/11, 1/11, 1/11]
]
p_hit = 3.0
p_miss = 1.0
beliefs_after_sensing = localizer.sense(
observation, simple_grid, initial_beliefs, p_hit, p_miss)
if helpers.close_enough(beliefs_after_sensing, expected_beliefs_after):
print("Tests pass! Your sense function is working as expected")
return
elif not isinstance(beliefs_after_sensing, list):
print("Your sense function doesn't return a list!")
return
elif len(beliefs_after_sensing) != len(expected_beliefs_after):
print("Dimensionality error! Incorrect height")
return
elif len(beliefs_after_sensing[0] ) != len(expected_beliefs_after[0]):
print("Dimensionality Error! Incorrect width")
return
elif beliefs_after_sensing == initial_beliefs:
print("Your code returns the initial beliefs.")
return
total_probability = 0.0
for row in beliefs_after_sensing:
for p in row:
total_probability += p
if abs(total_probability-1.0) > 0.001:
print("Your beliefs appear to not be normalized")
return
print("Something isn't quite right with your sense function")
test_sense()
# -
# ## Integration Testing
# Before we call this "complete" we should perform an **integration test**. We've verified that the sense function works on it's own, but does the localizer work overall?
#
# Let's perform an integration test. First you you should execute the code in the cell below to prepare the simulation environment.
# +
from simulate import Simulation
import simulate as sim
import helpers
reload(localizer)
reload(sim)
reload(helpers)
R = 'r'
G = 'g'
grid = [
[R,G,G,G,R,R,R],
[G,G,R,G,R,G,R],
[G,R,G,G,G,G,R],
[R,R,G,R,G,G,G],
[R,G,R,G,R,R,R],
[G,R,R,R,G,R,G],
[R,R,R,G,R,G,G],
]
# Use small value for blur. This parameter is used to represent
# the uncertainty in MOTION, not in sensing. We want this test
# to focus on sensing functionality
blur = 0.1
p_hit = 100.0
simulation = sim.Simulation(grid, blur, p_hit)
# +
# Use control+Enter to run this cell many times and observe how
# the robot's belief that it is in each cell (represented by the
# size of the corresponding circle) changes as the robot moves.
# The true position of the robot is given by the red star.
# Run this cell about 15-25 times and observe the results
simulation.run(1)
simulation.show_beliefs()
# If everything is working correctly you should see the beliefs
# converge to a single large circle at the same position as the
# red star. Though, if your sense function is implemented correctly
# and this output is not converging as expected.. it may have to do
# with the `move` function bug; your next task!
#
# When you are satisfied that everything is working, continue
# to the next section
# -
# ## Part 3: Identify and Reproduce a Bug
# Software has bugs. That's okay.
#
# A user of your robot called tech support with a complaint
#
# > "So I was using your robot in a square room and everything was fine. Then I tried loading in a map for a rectangular room and it drove around for a couple seconds and then suddenly stopped working. Fix it!"
#
# Now we have to debug. We are going to use a systematic approach.
#
# 1. Reproduce the bug
# 2. Read (and understand) the error message (when one exists)
# 3. Write a test that triggers the bug.
# 4. Generate a hypothesis for the cause of the bug.
# 5. Try a solution. If it fixes the bug, great! If not, go back to step 4.
# ### Step 1: Reproduce the bug
# The user said that **rectangular environments** seem to be causing the bug.
#
# The code below is the same as the code you were working with when you were doing integration testing of your new feature. See if you can modify it to reproduce the bug.
# +
from simulate import Simulation
import simulate as sim
import helpers
reload(localizer)
reload(sim)
reload(helpers)
R = 'r'
G = 'g'
grid = [
[R,G,G,G,R,R,R],
[G,G,R,G,R,G,R],
[G,R,G,G,G,G,R],
[R,R,G,R,G,G,G],
]
blur = 0.001
p_hit = 100.0
simulation = sim.Simulation(grid, blur, p_hit)
# remember, the user said that the robot would sometimes drive around for a bit...
# It may take several calls to "simulation.run" to actually trigger the bug.
simulation.run(1)
simulation.show_beliefs()
# -
simulation.run(1)
# ### Step 2: Read and Understand the error message
# If you triggered the bug, you should see an error message directly above this cell. The end of that message should say:
#
# ```
# IndexError: list index out of range
# ```
#
# And just above that you should see something like
#
# ```
# path/to/your/directory/localizer.pyc in move(dy, dx, beliefs, blurring)
# 38 new_i = (i + dy ) % width
# 39 new_j = (j + dx ) % height
# ---> 40 new_G[int(new_i)][int(new_j)] = cell
# 41 return blur(new_G, blurring)
# ```
#
# This tells us that line 40 (in the move function) is causing an `IndexError` because "list index out of range".
#
# If you aren't sure what this means, use Google!
#
# Copy and paste `IndexError: list index out of range` into Google! When I do that, I see something like this:
#
# 
#
# Browse through the top links (often these will come from stack overflow) and read what people have said about this error until you are satisfied you understand how it's caused.
# ### Step 3: Write a test that reproduces the bug
# This will help you know when you've fixed it and help you make sure you never reintroduce it in the future. You might have to try many potential solutions, so it will be nice to have a single function to call to confirm whether or not the bug is fixed
# +
# According to the user, sometimes the robot actually does run "for a while"
# - How can you change the code so the robot runs "for a while"?
# - How many times do you need to call simulation.run() to consistently
# reproduce the bug?
# Modify the code below so that when the function is called
# it consistently reproduces the bug.
def test_robot_works_in_rectangle_world():
from simulate import Simulation
import simulate as sim
import helpers
reload(localizer)
reload(sim)
reload(helpers)
R = 'r'
G = 'g'
grid = [
[R,G,G,G,R,R,R],
[G,G,R,G,R,G,R],
[G,R,G,G,G,G,R],
[R,R,G,R,G,G,G],
]
blur = 0.001
p_hit = 100.0
for i in range(1000):
simulation = sim.Simulation(grid, blur, p_hit)
simulation.run(1)
test_robot_works_in_rectangle_world()
# -
# ### Step 4: Generate a Hypothesis
# In order to have a guess about what's causing the problem, it will be helpful to use some Python debuggin tools
#
# The `pdb` module (`p`ython `d`e`b`ugger) will be helpful here!
#
# #### Setting up the debugger
# 1. Open `localizer.py` and uncomment the line to the top that says `import pdb`
# 2. Just before the line of code that is causing the bug `new_G[int(new_i)][int(new_j)] = cell`, add a new line of code that says `pdb.set_trace()`
# 3. Run your test by calling your test function (run the cell below this one)
# 4. You should see a text entry box pop up! For now, type `c` into the box and hit enter to **c**ontinue program execution. Keep typing `c` and enter until the bug is triggered again
test_robot_works_in_rectangle_world()
# #### Using the debugger
# The debugger works by pausing program execution wherever you write `pdb.set_trace()` in your code. You also have access to any variables which are accessible from that point in your code.
#
# Try running your test again. This time, when the text entry box shows up, type `new_i` and hit enter. You will see the value of the `new_i` variable show up in the debugger window. Play around with the debugger: find the values of `new_j`, `height`, and `width`. Do they seem reasonable / correct?
#
# When you are done playing around, type `c` to continue program execution. Was the bug triggered? Keep playing until you have a guess about what is causing the bug.
# ### Step 5: Write a Fix
# You have a hypothesis about what's wrong. Now try to fix it. When you're done you should call your test function again. You may want to remove (or comment out) the line you added to `localizer.py` that says `pdb.set_trace()` so your test can run without you having to type `c` into the debugger box.
test_robot_works_in_rectangle_world()
# ## Congratulations!
# You've implemented your first feature and successfully debugged a problem the robot was having with rectangular environments. Well done.
| 3_2_ Robot_localisation/writeup_two dimentional histogram filter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# paramBokeh is a small library that represents Parameters graphically in a notebook and on bokeh server. Parameters are Python attributes extended using the [Param library](https://github.com/ioam/param) to support types, ranges, and documentation, which turns out to be just the information you need to automatically create widgets for each parameter. paramBokeh currently uses bokeh to display the widgets, but the design of Param and paramBokeh allows your code to be completely independent of the underlying widgets library, and can also be used with other widget frameworks such as paramNB, which uses ipywidgets.
#
# # Parameters and widgets
#
# To use paramBokeh, first declare some Parameterized classes with various Parameters:
# +
import param
import datetime as dt
class BaseClass(param.Parameterized):
x = param.Parameter(default=3.14,doc="X position")
y = param.Parameter(default="Not editable",constant=True)
string_value = param.String(default="str",doc="A string")
num_int = param.Integer(50000,bounds=(-200,100000))
unbounded_int = param.Integer(23)
float_with_hard_bounds = param.Number(8.2,bounds=(7.5,10))
float_with_soft_bounds = param.Number(0.5,bounds=(0,None),softbounds=(0,2))
unbounded_float = param.Number(30.01,precedence=0)
hidden_parameter = param.Number(2.718,precedence=-1)
integer_range = param.Range(default=(3,7),bounds=(0, 10))
float_range = param.Range(default=(0,1.57),bounds=(0, 3.145))
dictionary = param.Dict(default={"a":2, "b":9})
class Example(BaseClass):
"""An example Parameterized class"""
timestamps = []
boolean = param.Boolean(True, doc="A sample Boolean parameter")
color = param.Color(default='#FFFFFF')
date = param.Date(dt.datetime(2017, 1, 1),
bounds=(dt.datetime(2017, 1, 1), dt.datetime(2017, 2, 1)))
select_string = param.ObjectSelector(default="yellow",objects=["red","yellow","green"])
select_fn = param.ObjectSelector(default=list,objects=[list,set,dict])
int_list = param.ListSelector(default=[3,5], objects=[1,3,5,7,9],precedence=0.5)
single_file = param.FileSelector(path='../../*/*.py*',precedence=0.5)
multiple_files = param.MultiFileSelector(path='../../*/*.py?',precedence=0.5)
record_timestamp = param.Action(lambda x: x.timestamps.append(dt.datetime.now()),
doc="""Record timestamp.""",precedence=0.7)
Example.num_int
# -
# As you can see, declaring Parameters depends only on the separate Param library. Parameters are a simple idea with some properties that are crucial for helping you create clean, usable code:
#
# - The Param library is pure Python with no dependencies, which makes it easy to include in any code without tying it to a particular GUI or widgets library, or even to the Jupyter notebook.
# - Parameter declarations focus on semantic information relevant to your domain, allowing you to avoid polluting your domain-specific code with anything that ties it to a particular way of displaying or interacting with it.
# - Parameters can be defined wherever they make sense in your inheritance hierarchy, allowing you to document, type, and range-limit them once, with all of those properties inherited by any base class. E.g. parameters work the same here whether they were declared in `BaseClass` or `Example`, which makes it easy to provide this metadata once, and avoiding duplicating it throughout the code wherever ranges or types need checking or documentation needs to be stored.
#
# If you then decide to use these Parameterized classes in a notebook environment, you can import paramBokeh and easily display and edit the parameter values as an optional additional step:
import parambokeh
from bokeh.io import output_notebook
output_notebook()
widgets = parambokeh.Widgets(BaseClass)
parambokeh.Widgets(Example)
# As you can see, `parambokeh.Widgets()` does not need to be provided with any knowledge of your domain-specific application, not even the names of your parameters; it simply displays widgets for whatever Parameters may have been defined on that object. Using Param with paramBokeh thus achieves a nearly complete separation between your domain-specific code and your display code, making it vastly easier to maintain both of them over time. Here even the `msg` button behavior was specified declaratively, as an action that can be invoked (printing "Hello") independently of whether it is used in a GUI or other context.
#
# Interacting with the widgets above is only supported on a live Python-backed server, but you can also export static renderings of the widgets to a file or web page.
#
# By default, editing values in this way requires you to run the notebook cell by cell -- when you get to the above cell, edit the values as you like, and then move on to execute subsequent cells, where any reference to those parameter values will use your interactively selected setting:
Example.unbounded_int
Example.num_int
# Example.timestamps records the times you pressed the "record timestamp" button.
Example.timestamps
# +
#Example.print_param_defaults() # see all parameter values
# -
# As you can see, you can access the parameter values at the class level from within the notebook to control behavior explicitly, e.g. to select what to show in subsequent cells. Moreover, any instances of the Parameterized classes in your own code will now use the new parameter values unless specifically overridden in that instance, so you can now import and use your domain-specific library however you like, knowing that it will use your interactive selections wherever those classes appear. None of the domain-specific code needs to know or care that you used ParamNB; it will simply see new values for whatever attributes were changed interactively. ParamNB thus allows you to provide notebook-specific, domain-specific interactive functionality without ever tying your domain-specific code to the notebook environment.
# You can install ParamBokeh as described at [github.com/ioam/parambokeh](https://github.com/ioam/parambokeh). Have fun widgeting!
| examples/user_guide/Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mie Basics
#
# **<NAME>**
#
# **April 2021**
#
# *If miepython is not installed, uncomment the following cell (i.e., delete the #) and run (shift-enter)*
# +
# #!pip install --user miepython
# +
import numpy as np
import matplotlib.pyplot as plt
try:
import miepython
except ModuleNotFoundError:
print('miepython not installed. To install, uncomment and run the cell above.')
print('Once installation is successful, rerun this cell again.')
# -
# ## Index of Refraction and Size Parameter
#
# When a monochromatic plane wave is incident on a sphere, it scatters and absorbs light depending on the properties of the light and sphere. If the sphere is in a vacuum, then the complex index of refraction of the sphere is
#
# $$
# m_\mathrm{vac}= m_\mathrm{re}- j\,m_\mathrm{im}
# $$
#
# The factor $m_\mathrm{im}=\kappa$ is the *index of absorption* or the *index of attenuation*.
#
# The non-dimensional sphere size parameter for a sphere in a vacuum is
#
# $$
# x_\mathrm{vac} = \frac{2\pi r }{\lambda_\mathrm{vac}}
# $$
#
# where $r$ is the radius of the sphere and $\lambda_\mathrm{vac}$ is the wavelength of the light in a vacuum.
#
# If the sphere is in a non-absorbing environment with real index $n_\mathrm{env}$ then the Mie scattering formulas can still be used, but the index of refraction of the sphere is now
#
# $$
# m= \frac{m_\mathrm{re}- j\,m_\mathrm{im}}{ n_\mathrm{env}}
# $$
#
# The wavelength in the sphere size parameter should be the wavelength of the plane wave in the environment, thus
#
# $$
# x = \frac{2\pi r } {\lambda_\mathrm{vac}/n_\mathrm{env}}
# $$
# ### Sign Convention
# The sign of the imaginary part of the index of refraction in `miepython` is assumed negative (as shown above). This convention is standard for atmospheric science and follows that of van de Hulst.
# + [markdown] heading_collapsed=true
# ### Absorption Coefficient
#
# The imaginary part of the refractive index is a non-dimensional representation of light absorption. This can be seen by writing out the equation for a monochromatic, planar electric field
#
# $$
# \mathcal{E}(z,t) = \mathcal{E}_0 e^{j (k z - \omega t)}
# $$
#
# where $k$ is the complex wavenumber
#
# $$
# k=k_\mathrm{re}-k_\mathrm{im}=2\pi {{m_\mathrm{re}}\over{\lambda_\mathrm{vac}}}-2\pi j \frac{m_\mathrm{im}}{\lambda_\mathrm{vac}}
# $$
#
# Thus
#
# $$
# \mathcal{E}(z,t) = \mathcal{E}_0 e^{-k_\mathrm{im}z}e^{j (k_\mathrm{re} z - \omega t)}
# $$
#
# and the corresponding time-averaged irradiance $E(z)$
#
# $$
# E(z) = {1\over2} c\epsilon |\mathcal{E}|^2 = E_0 \exp(-2k_\mathrm{im}z) = E_0 \exp(-\mu_a z)
# $$
#
# and therefore
#
# $$
# \mu_a = 2k_\mathrm{im} = 4\pi\cdot \frac{ m_\mathrm{im}}{ \lambda_\mathrm{vac}}
# $$
#
# Thus the imaginary part of the index of refraction is basically just the absorption coefficient measured in wavelengths.
# -
miepython.mie_S1_S2(1.507-0.002j , 0.7086 , np.array([-1.0],dtype=float))
miepython.mie_S1_S2(1.507-0.002j , 0.7086 , -1)
# ### Complex Refractive Index of Water
# Let's import and plot some data from the M.S. Thesis of D. Segelstein, "The Complex Refractive Index of Water",
# University of Missouri--Kansas City, (1981) to get some sense the complex index of refraction. The imaginary part shows absorption peaks at 3 and 6 microns, as well as the broad peak starting at 10 microns.
# +
#import the Segelstein data
h2o = np.genfromtxt('http://omlc.org/spectra/water/data/segelstein81_index.txt', delimiter='\t', skip_header=4)
h2o_lam = h2o[:,0]
h2o_mre = h2o[:,1]
h2o_mim = h2o[:,2]
#plot it
plt.plot(h2o_lam,h2o_mre)
plt.plot(h2o_lam,h2o_mim*3)
plt.plot((1,15),(1.333,1.333))
plt.xlim((1,15))
plt.ylim((0,1.8))
plt.xlabel('Wavelength (microns)')
plt.ylabel('Refractive Index')
plt.annotate(r'$m_\mathrm{re}$', xy=(3.4,1.5))
plt.annotate(r'$m_\mathrm{im}\,\,(3\times)$', xy=(3.4,0.5))
plt.annotate(r'$m_\mathrm{re}=1.333$', xy=(10,1.36))
plt.title('Infrared Complex Refractive Index of Water')
plt.show()
# +
# import the Johnson and Christy data for gold
try:
au = np.genfromtxt('https://refractiveindex.info/tmp/data/main/Au/Johnson.txt', delimiter='\t')
except OSError:
# try again
au = np.genfromtxt('https://refractiveindex.info/tmp/data/main/Au/Johnson.txt', delimiter='\t')
# data is stacked so need to rearrange
N = len(au)//2
au_lam = au[1:N,0]
au_mre = au[1:N,1]
au_mim = au[N+1:,1]
plt.scatter(au_lam,au_mre,s=1,color='blue')
plt.scatter(au_lam,au_mim,s=1,color='red')
plt.xlim((0.2,2))
plt.xlabel('Wavelength (microns)')
plt.ylabel('Refractive Index')
plt.annotate(r'$m_\mathrm{re}$', xy=(1.0,0.5),color='blue')
plt.annotate(r'$m_\mathrm{im}$', xy=(1.0,8),color='red')
plt.title('Complex Refractive Index of Gold')
plt.show()
# -
# ### The Absorption Coefficient of Water
#
# +
mua = 4*np.pi* h2o_mim/h2o_lam
plt.plot(h2o_lam,mua)
plt.xlim((0.1,20))
plt.ylim((0,1.5))
plt.xlabel('Wavelength (microns)')
plt.ylabel('Absorption Coefficient (1/micron)')
plt.title('Water')
plt.show()
# -
# ## Size Parameters
#
# ### Size Parameter $x$
# The sphere size relative to the wavelength is called the size
# parameter $x$
# $$
# x = 2\pi {r/\lambda}
# $$
# where $r$ is the radius of the sphere.
# +
N=500
m=1.5
x = np.linspace(0.1,20,N) # also in microns
qext, qsca, qback, g = miepython.mie(m,x)
plt.plot(x,qsca)
plt.xlabel("Sphere Size Parameter x")
plt.ylabel("bScattering Efficiency")
plt.title("index of refraction m=1.5")
plt.show()
# -
# ### Size Parameter $\rho$
#
# The value $\rho$ is also sometimes used to facilitate comparisons for spheres with different indicies of refraction
# $$
# \rho = 2x(m-1)
# $$
# Note that when $m=1.5$ and therefore $\rho=x$.
#
# As can be seen in the graph below, the scattering for spheres with different indicies of refraction pretty similar when plotted against $\rho$, but no so obvious when plotted against $x$
# +
N=500
m=1.5
rho = np.linspace(0.1,20,N) # also in microns
m = 1.5
x15 = rho/2/(m-1)
qext, sca15, qback, g = miepython.mie(m,x15)
m = 1.1
x11 = rho/2/(m-1)
qext, sca11, qback, g = miepython.mie(m,x11)
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
ax1.plot(rho,sca11,color='blue')
ax1.plot(rho,sca15,color='red')
ax1.set_xlabel(r"Size parameter $\rho$")
ax1.set_ylabel("Scattering Efficiency")
ax1.annotate('m=1.5', xy=(10,3.3), color='red')
ax1.annotate('m=1.1', xy=(8,1.5), color='blue')
ax2.plot(x11,sca11,color='blue')
ax2.plot(x15,sca15,color='red')
ax2.set_xlabel(r"Size parameter $x$")
ax2.annotate('m=1.5', xy=(5,4), color='red')
ax2.annotate('m=1.1', xy=(40,1.5), color='blue')
plt.show()
# -
# ## Embedded spheres
#
# The short answer is that everything just scales.
#
# Specifically, divide the index of the sphere $m$ by the index of the surrounding material to get a relative index $m'$
#
# $$
# m' =\frac{m}{n_\mathrm{surroundings}}
# $$
#
# The wavelength in the surrounding medium $\lambda'$ is also altered
#
# $$
# \lambda' = \frac{\lambda_\mathrm{vacuum}}{n_\mathrm{surroundings}}
# $$
#
# Thus, the relative size parameter $x'$ becomes
#
# $$
# x' = \frac{2 \pi r} {\lambda'}= \frac{2 \pi r n_\mathrm{surroundings}}{ \lambda_\mathrm{vacuum}}
# $$
#
# Scattering calculations for an embedded sphere uses $m'$ and $x'$ instead of $m$ and $x$.
#
# If the spheres are air ($m=1$) bubbles in water ($m=4/3$), then the relative index of refraction will be about
#
# $$
# m' = m/n_\mathrm{water} \approx 1.0/(4/3) = 3/4 = 0.75
# $$
# +
N=500
m=1.0
r=500 # nm
lambdaa = np.linspace(300,800,N) # also in nm
mwater = 4/3 # rough approximation
mm = m/mwater
xx = 2*np.pi*r*mwater/lambdaa
qext, qsca, qback, g = miepython.mie(mm,xx)
plt.plot(lambdaa,qsca)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Scattering Efficiency")
plt.title("One micron diameter air bubbles in water")
plt.show()
# -
# or just use `ez_mie(m, d, lambda0, n_env)`
# +
m_sphere = 1.0
n_water = 4/3
d = 1000 # nm
lambda0 = np.linspace(300,800) # nm
qext, qsca, qback, g = miepython.ez_mie(m_sphere, d, lambda0, n_water)
plt.plot(lambda0,qsca)
plt.xlabel("Wavelength (nm)")
plt.ylabel("Scattering Efficiency")
plt.title("One micron diameter air bubbles in water")
plt.show()
# -
# ## Multiple scatterers
#
# This will eventually turn into a description of the scattering coefficient.
# +
m = 1.5
x = np.pi/3
theta = np.linspace(-180,180,1800)
mu = np.cos(theta/180*np.pi)
s1,s2 = miepython.mie_S1_S2(m,x,mu)
scat = 5*(abs(s1)**2+abs(s2)**2)/2 #unpolarized scattered light
N=13
xx = 3.5 * np.random.rand(N, 1) - 1.5
yy = 5 * np.random.rand(N, 1) - 2.5
plt.scatter(xx,yy,s=40,color='red')
for i in range(N):
plt.plot(scat*np.cos(theta/180*np.pi)+xx[i],scat*np.sin(theta/180*np.pi)+yy[i],color='red')
plt.plot([-5,7],[0,0],':k')
plt.annotate('incoming\nirradiance', xy=(-4.5,-2.3),ha='left',color='blue',fontsize=14)
for i in range(6):
y0 = i -2.5
plt.annotate('',xy=(-1.5,y0),xytext=(-5,y0),arrowprops=dict(arrowstyle="->",color='blue'))
plt.annotate('unscattered\nirradiance', xy=(3,-2.3),ha='left',color='blue',fontsize=14)
for i in range(6):
y0 = i -2.5
plt.annotate('',xy=(7,y0),xytext=(3,y0),arrowprops=dict(arrowstyle="->",color='blue',ls=':'))
#plt.annotate('scattered\nspherical\nwave', xy=(0,1.5),ha='left',color='red',fontsize=16)
#plt.annotate('',xy=(2.5,2.5),xytext=(0,0),arrowprops=dict(arrowstyle="->",color='red'))
#plt.annotate(r'$\theta$',xy=(2,0.7),color='red',fontsize=14)
#plt.annotate('',xy=(2,2),xytext=(2.7,0),arrowprops=dict(connectionstyle="arc3,rad=0.2", arrowstyle="<->",color='red'))
plt.xlim(-5,7)
plt.ylim(-3,3)
plt.axis('off')
plt.show()
| docs/01_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## <NAME> Reasearch Questions
# 1. BMI of Gender in relation to Tobacco Users
# 2. Medical charges of Tobacco Users in relation to Age
# ##### Import data in from clean spreadsheet
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from Jscripts.project_functions.function import load_and_process
df = load_and_process(r"C:\Users\fares\Documents\GitHub\course-project-group_1015\analysis\Johnny\Medical_Cost.csv")
df.head(10)
# #### Allowing data to read the Medical Costs per region since $ gives error when analyzing
df1 = df.copy()
df1['Medical Costs per region'] = df1['Medical Costs per region'].str.replace('$','')
df1['Medical Costs per region'] = df1['Medical Costs per region'].astype(float)
# #### return the info
df.info()
# #### Describe the data
df.describe() #
# #### check for missing values "NA/0" in Data
df.isnull().sum()
# #### show columns within Data
df.columns
# #### calculate correlation between varibles
df.corr()
# #### heatmap to visualize the correlation between functions within the raw data. Numbers close to 1.0 will have a brighter colour.
f,ax = plt.subplots(figsize = (15,15))
sns.heatmap(df.corr(), annot=True, linewidths=.5, fmt='.1f', ax=ax)
plt.show()
# #### Heatmap but in winter format that looks cool
plt.figure(figsize=(8,6))
sns.heatmap(df.corr(),annot=True,cmap="winter_r")
plt.show()
# #### Displot to show the Age
sns.displot(df['Age'])
# #### Displot to show the Children
sns.displot(df['Children'])
# #### Displot to show the BMI
sns.displot(df['BMI'])
# #### Barplot to show the Age in comparison to the BMI
sns.barplot(x = 'Age', y = 'BMI', data = df)
# #### Pairplot to show various outcomes
sns.pairplot(data=df.iloc[:,:],corner=True)
# #### Lineplot to show the Age and BMI comparison
df.BMI.plot(kind='line', color='g', label='BMI', linewidth=1, alpha=0.5, grid=True, linestyle='-')
df.Age.plot(kind='line', color='r', label='Age', linewidth=1, alpha=0.5, grid=True, linestyle=':')
plt.legend('upper left')
plt.xlabel('Age')
plt.ylabel('children')
plt.title('Line Plot')
plt.show()
# #### Violinplot to show the Age and Gender comparison in regards to Tobacco Users
sns.violinplot(data=df,
y='Age',
x='Gender',
hue='Tobacco User')
# #### Boxplot to show the Age and Medical Costs per region comparison in regards to Tobacco Users
sns.boxplot(data=df,
y="Age",
x="Medical Costs per region",
hue="Tobacco User")
# #### As shown in the above graph, the increase of age leads to an increase in medical costs
# #### Boxplot to show the BMI and Medical Costs per region comparison in regards to Tobacco Users
sns.boxplot(data=df,
y="BMI",
x="Medical Costs per region",
hue="Tobacco User")
# #### Boxplot to show the BMI and Age
df.boxplot(column='Age',by = 'BMI')
# #### Boxplot to show the BMI and Age comparison in regards to Tobacco Users
sns.boxplot(data=df,
y="Age",
x="BMI",
hue="Tobacco User")
# #### Boxplot to show the Medical Costs per region and Age comparison in regards to Tobacco Users
sns.boxplot(data=df,
y="Age",
x="Medical Costs per region",
hue="Tobacco User")
df4=(df['Gender']=='female') & (df['Tobacco User']=='yes') & (df['Children']>0)
df[df4]
df.boxplot(column='Age',by = 'Children')
# #### Histogram to analyze the Age
df.Age.plot(kind='hist', bins=50, figsize=(10,10))
plt.show()
# #### Cleaning data for Female smokers
df3=(df['Gender']=='female') & (df['Tobacco User']=='yes') & (df['Children']>0)
df[df3]
# #### Cleaning data for male smokers
df3=(df['Gender']=='male') & (df['Tobacco User']=='yes') & (df['Children']>0)
df[df3]
# #### The data above shows that there are more Male smokers than female smokers
# #### Renaming Gender and Male to furter analyze
df1=df['Gender']=='female'
df_female=df[df1]
df2=df['Gender']=='male'
df_male=df[df2]
#df3=df['Medical Costs per region']=='charges'
#df_charges=df[df3]
# #### historgram to show male BMI's
df_male.BMI.plot(kind='hist', bins=50, figsize=(10,10))
plt.show()
# +
#### historgram to show female BMI's
# -
df_female.BMI.plot(kind='hist', bins=50, figsize=(10,10))
plt.show()
# #### Females tend to have lower BMI than male BMI's as there are greater number of Male smokers, which lead to a health deficit
| analysis/Johnny/JMilestone2/.ipynb_checkpoints/EDA-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf2
# language: python
# name: tf2
# ---
# # Implementing Gates
#
# This function shows how to implement various gates in TensorFlow.
#
# One gate will be one operation with a variable and the input tensor of our model. We will ask TensorFlow
# to change the variable based on our loss function!
import tensorflow as tf
# ### Gate 1
#
# Create a multiplication gate: $f(x) = a * x$
# ```
# a --
# |
# |---- (multiply) --> output
# |
# x --
# ```
# +
# Initialize variables and input data
a = tf.Variable(4.)
x_data = tf.keras.Input(shape=(1,))
x_val = 5.
# Add a layer which computes f(x) = a * x
multiply_layer = tf.keras.layers.Lambda(lambda x:tf.multiply(a, x))
outputs = multiply_layer(x_data)
# Build the model
model = tf.keras.Model(inputs=x_data, outputs=outputs, name="gate_1")
print(model.summary())
# Optimizer
optimizer=tf.keras.optimizers.SGD(0.01)
# Run loop across gate
print('Optimizing a Multiplication Gate Output to 50.')
for i in range(10):
# Open a GradientTape.
with tf.GradientTape() as tape:
# Forward pass.
mult_output = model(x_val)
# Loss value as the difference between
# the output and a target value, 50.
loss_value = tf.square(tf.subtract(mult_output, 50.))
# Get gradients of loss with reference to the variable "a" to adjust.
gradients = tape.gradient(loss_value, a)
# Update the variable "a" of the model.
optimizer.apply_gradients(zip([gradients], [a]))
print("Step: {} ==> {} * {} = {}".format(i, a.numpy(), x_val, a.numpy() * x_val))
# +
# Instead of using a lambda layer, we can also use a subclassed layer
class MyCustomMultiplyLayer(tf.keras.layers.Layer):
def __init__(self, units):
super(MyCustomMultiplyLayer, self).__init__()
self.units = units
self.a = tf.Variable(4.)
def call(self, inputs):
return inputs * self.a
# Initialize variables
x_data = tf.keras.Input(dtype=tf.float32, shape=(1,))
a = tf.Variable(4, dtype=tf.float32)
# Add a layer which computes f(x) = a * x
multiply_layer = MyCustomMultiplyLayer(units=1)
outputs = multiply_layer(x_data)
# Build the model
model = tf.keras.Model(inputs=x_data, outputs=outputs, name="gate_1")
#print(model.summary())
# Optimizer
optimizer=tf.keras.optimizers.SGD(0.01)
# Run loop across gate
print('Optimizing a Multiplication Gate Output to 50.')
for i in range(10):
# Open a GradientTape.
with tf.GradientTape() as tape:
# Forward pass.
mult_output = model(5.)
# Loss value as the difference between
# the output and a target value, 50.
loss_value = tf.square(tf.subtract(mult_output, 50.))
# Get gradients of loss with reference to the variable "a" to adjust.
gradients = tape.gradient(loss_value, multiply_layer.a)
# Update the weights of the model.
optimizer.apply_gradients(zip([gradients], [multiply_layer.a]))
print("Step: {} ==> {} * {} = {}".format(i, multiply_layer.a.numpy(), x_val, multiply_layer.a.numpy() * x_val))
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Gate 2
#
# Create a nested gate: $f(x) = a * x + b$
#
# ```
# a --
# |
# |-- (multiply)--
# | |
# x -- |-- (add) --> output
# |
# b --
# ```
# + pycharm={"name": "#%%\n"}
# Initialize variables and input data
x_data = tf.keras.Input(dtype=tf.float32, shape=(1,))
x_val = 5.
a = tf.Variable(1., dtype=tf.float32)
b = tf.Variable(1., dtype=tf.float32)
# Add a layer which computes f(x) = a * x
multiply_layer = tf.keras.layers.Lambda(lambda x:tf.multiply(a, x))
# Add a layer which computes f(x) = b + x
add_layer = tf.keras.layers.Lambda(lambda x:tf.add(b, x))
res = multiply_layer(x_data)
outputs = add_layer(res)
# Build the model
model = tf.keras.Model(inputs=x_data, outputs=outputs, name="gate_2")
print(model.summary())
# Optimizer
optimizer=tf.keras.optimizers.SGD(0.01)
# Run loop across gate
print('Optimizing two Gate Output to 50.')
for i in range(10):
# Open a GradientTape.
with tf.GradientTape(persistent=True) as tape:
# Forward pass.
two_gate_output = model(x_val)
# Loss value as the difference between
# the output and a target value, 50.
loss_value = tf.square(tf.subtract(two_gate_output, 50.))
# Get gradients of loss with reference to the variables "a" and "b" to adjust.
gradients_a = tape.gradient(loss_value, a)
gradients_b = tape.gradient(loss_value , b)
# Update the variables "a" and "b" of the model.
optimizer.apply_gradients(zip([gradients_a, gradients_b], [a, b]))
print("Step: {} ==> {} * {} + {}= {}".format(i, a.numpy(),
x_val, b.numpy(),
a.numpy() * x_val + b.numpy()))
| ch6/02_Implementing_an_Operational_Gate/02_gates.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calculating simple
# - Pct change
# - Log returns
# - Standard deviation (Volatility)
# - Rolling
# - Simple Moving Avarage
# - Exponential Moving Average
# ### Standard deviation
#
# - $\sigma_{p} = \sigma_{daily}\times \sqrt{p}$
# - $\sigma_{annually} = \sigma_{daily}\times \sqrt{252}$
#
# *(252 trading days per year)*
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib notebook
remote_file = "https://raw.githubusercontent.com/LearnPythonWithRune/FinancialDataAnalysisWithPython/main/files/AAPL.csv"
data = pd.read_csv(remote_file, index_col=0, parse_dates=True)
| Financial Data Analysis With Python/jupyter/starter/06 - Simple Calculations - Volatility, SMA, and EMA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="1meIzCd9in5s"
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
# + id="z0la95GPXn_t"
# dummy, to be updated
class AD(nn.Module):
def __init__(self, in_channels, pooling1 = False, pooling2=False):
super(AD, self).__init__()
self.pooling1=pooling1
self.pooling2=pooling2
self.layer1=nn.Conv2d(in_channels=in_channels, out_channels=96,kernel_size=3,stride=1)
self.layer2=nn.BatchNorm2d(96)
self.layer3=nn.ReLU()
self.layer4=nn.MaxPool2d(2,2) #1st pooling layer
self.layer5=nn.Conv2d(in_channels=96, out_channels=192,kernel_size=3,stride=1)
self.layer6=nn.BatchNorm2d(192)
self.layer7=nn.ReLU()
self.layer8=nn.MaxPool2d(2,2) #2nd pooling layer
self.layer9=nn.Conv2d(in_channels=192, out_channels=192,kernel_size=3,stride=1)
self.layer10=nn.BatchNorm2d(192)
self.layer11=nn.ReLU()
self.layer12=nn.Conv2d(in_channels=192,out_channels=2,kernel_size=1,stride=1)
print(self.pooling1)
def forward(self,x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
if self.pooling1==True:
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.layer7(x)
if self.pooling1==True:
x = self.layer8(x)
x = self.layer9(x)
x = self.layer10(x)
x = self.layer11(x)
x = self.layer12(x)
return x
net = AD(62,True,False)
print(net)
# -
| detector/other_notebooks/pytorch_updated.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="https://s3.amazonaws.com/edu-static.mongodb.com/lessons/M220/notebook_assets/screen_align.png" style="margin: 0 auto;">
#
# + [markdown] slideshow={"slide_type": "slide"}
# <h1 style="text-align: center; font-size=58px;">Cursor Methods and Aggregation Equivalents</h1>
# + [markdown] slideshow={"slide_type": "notes"}
# In this lesson we're going to discuss methods we can call against Pymongo cursors, and the aggregation stages that would perform the same tasks in a pipeline.
# + [markdown] slideshow={"slide_type": "slide"}
# <h2 style="text-align: center; font-size=58px;">Limiting</h2>
# + slideshow={"slide_type": "subslide"}
import pymongo
from bson.json_util import dumps
uri = "<your_atlas_uri>"
client = pymongo.MongoClient(uri)
mflix = client.sample_mflix
movies = mflix.movies
# + [markdown] slideshow={"slide_type": "notes"}
# Here's (point) a collection object for the `movies` collection.
# + slideshow={"slide_type": "subslide"}
limited_cursor = movies.find(
{ "directors": "<NAME>" },
{ "_id": 0, "title": 1, "cast": 1 }
).limit(2)
print(dumps(limited_cursor, indent=2))
# + [markdown] slideshow={"slide_type": "notes"}
# So this is a find query with a predicate (point) and a projection (point). And the find() method is always gonna return a cursor to us. But before assigning that cursor to a variable, we've transformed it with the limit() method, to make sure no more than 2 documents are returned by this cursor.
#
# (run command)
#
# And we can see we only got two (point) documents back.
# + slideshow={"slide_type": "subslide"}
pipeline = [
{ "$match": { "directors": "<NAME>" } },
{ "$project": { "_id": 0, "title": 1, "cast": 1 } },
{ "$limit": 2 }
]
limited_aggregation = movies.aggregate( pipeline )
print(dumps(limited_aggregation, indent=2))
# + [markdown] slideshow={"slide_type": "notes"}
# Now this is the equivalent operation with the aggregation framework. Instead of tacking a .limit() to the end of the cursor, we add $limit as a stage in our pipeline.
#
# (enter command)
#
# And it's the same output. And these (point to `$match` and `$project`) aggregation stages represent the query predicate and the projection from when we were using the query language.
# + [markdown] slideshow={"slide_type": "slide"}
# <h2 style="text-align: center; font-size=58px;">Sorting</h2>
# + slideshow={"slide_type": "subslide"}
from pymongo import DESCENDING, ASCENDING
sorted_cursor = movies.find(
{ "directors": "<NAME>" },
{ "_id": 0, "year": 1, "title": 1, "cast": 1 }
).sort("year", ASCENDING)
print(dumps(sorted_cursor, indent=2))
# + [markdown] slideshow={"slide_type": "notes"}
# This is an example of the `sort()` (point) cursor method. `sort()` takes two parameters, the key we're sorting on and the sorting order. In this example we're sorting on year (point), in increasing (point) order.
#
# ASCENDING and DESCENDING are values from the pymongo library to specify sort direction, but they're really just the integers 1 and -1.
#
# (enter command)
#
# And we can see that the movies were returned to us in order of the year they were made.
# + slideshow={"slide_type": "subslide"}
pipeline = [
{ "$match": { "directors": "<NAME>" } },
{ "$project": { "_id": 0, "year": 1, "title": 1, "cast": 1 } },
{ "$sort": { "year": ASCENDING } }
]
sorted_aggregation = movies.aggregate( pipeline )
print(dumps(sorted_aggregation, indent=2))
# + [markdown] slideshow={"slide_type": "notes"}
# And this is the equivalent pipeline, with a sort (point) stage that corresponds to a dictionary, giving the sort (point) field, and the direction (point) of the sort.
#
# (enter command)
#
# And the agg framework was able to sort by year here.
# + slideshow={"slide_type": "subslide"}
sorted_cursor = movies.find(
{ "cast": "<NAME>" },
{ "_id": 0, "year": 1, "title": 1, "cast": 1 }
).sort([("year", ASCENDING), ("title", ASCENDING)])
print(dumps(sorted_cursor, indent=2))
# + [markdown] slideshow={"slide_type": "notes"}
# So just a special case to note here, sorting on multiple keys in the cursor method is gonna look a little different.
#
# When sorting on one key, the `sort()` method takes two arguments, the key and the sort order.
#
# When sorting on two or more keys, the `sort()` method takes a single argument, an array of tuples. And each tuple has a key and a sort order.
#
# (enter command)
#
# And we can see that after sorting on year, the cursor sorted the movie titles alphabetically.
# + slideshow={"slide_type": "subslide"}
pipeline = [
{ "$match": { "cast": "<NAME>" } },
{ "$project": { "_id": 0, "year": 1, "title": 1, "cast": 1 } },
{ "$sort": { "year": ASCENDING, "title": ASCENDING } }
]
sorted_aggregation = movies.aggregate( pipeline )
print(dumps(sorted_aggregation, indent=2))
# + [markdown] slideshow={"slide_type": "slide"}
# <h2 style="text-align: center; font-size=58px;">Skipping</h2>
# + slideshow={"slide_type": "subslide"}
pipeline = [
{ "$match": { "directors": "<NAME>" } },
{ "$project": { "_id": 0, "title": 1, "cast": 1 } },
{ "$count": "num_movies" }
]
sorted_aggregation = movies.aggregate( pipeline )
print(dumps(sorted_aggregation, indent=2))
# + [markdown] slideshow={"slide_type": "notes"}
# (enter command)
#
# So we know from counting the documents in this aggregation, that if we don't specify anything else, we're getting 15 (point) documents returned to us.
#
# Note that the cursor method `count()` that counts documents in a cursor has been deprecated. So if you want to know how many documents are returned by a query, you should use the `$count` aggregation stage.
# + slideshow={"slide_type": "subslide"}
skipped_cursor = movies.find(
{ "directors": "<NAME>" },
{ "_id": 0, "title": 1, "cast": 1 }
).skip(14)
print(dumps(skipped_cursor, indent=2))
# + [markdown] slideshow={"slide_type": "notes"}
# The `skip()` method allows us to skip documents in a collection, so only documents we did not skip appear in the cursor. Because we only have 15 documents, skipping 14 of them should only leave us with 1.
#
# (enter command)
#
# And look at that, we've only got 1 document in our cursor. The issue is, we don't really know which documents we skipped over, because we haven't specified a sort key and really, we have no idea the order in which documents are stored in the cursor.
# + slideshow={"slide_type": "subslide"}
skipped_sorted_cursor = movies.find(
{ "directors": "<NAME>" },
{ "_id": 0, "title": 1, "year": 1, "cast": 1 }
).sort("year", ASCENDING).skip(10)
print(dumps(skipped_sorted_cursor, indent=2))
# + [markdown] slideshow={"slide_type": "notes"}
# So here we've sorted on year (point) and then skipped the first 14. Now we know that when we're skipping 10 documents, we're skipping the 10 oldest Sam Raimi movies in this collection.
#
# (enter command)
#
# And we only got 5 of those 15 documents back, because we skipped 10 of them.
#
# These cursor methods are nice because we can tack them on a cursor in the order we want them applied. It even kinda makes our Python look like Javascript, with this `.sort()` and `.skip()`.
# + slideshow={"slide_type": "subslide"}
pipeline = [
{ "$match": { "directors": "<NAME>" } },
{ "$project": { "_id": 0, "year": 1, "title": 1, "cast": 1 } },
{ "$sort": { "year": ASCENDING } },
{ "$skip": 10 }
]
sorted_skipped_aggregation = movies.aggregate( pipeline )
print(dumps(sorted_skipped_aggregation, indent=2))
# + [markdown] slideshow={"slide_type": "notes"}
# So here's an example of the same query in the aggregation framework. As you can see the `$skip` stage represents the `.skip()` from before.
#
# (run command)
#
# And it gives us the same output.
#
# The `skip()` method is useful for paging results on a website, because we can sort the results chronologically, and then if we have 10 movies displayed on each page, the first page would have a skip value of 0, but then the second page would skip the first 10 movies, the third page would skip the first 20 movies, etc.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Summary
#
# * `.limit()` == `$limit`
# * `.sort()` == `$sort`
# * `.skip()` == `$skip`
# + [markdown] slideshow={"slide_type": "notes"}
# So just to recap, in this lesson we covered some cursor methods and their aggregation equivalents. Remember that there won't always be a 1 to 1 mapping, because the aggregation framework can do a lot more than cursors can.
#
# But these three methods exist as both aggregation stages and cursor methods.
| mflix-python/notebooks/.ipynb_checkpoints/cursor_methods_agg_equivalents-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: vae-venv-cpu1
# language: python
# name: vae-venv-cpu1
# ---
# +
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_probability as tfp
tfk = tf.keras
tfl = tfk.layers
tfd = tfp.distributions
tfs = tf.summary
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook as tqdm
# +
batch_size = 64
buffer_size = 500
def normalise_image(img):
return tf.cast(img, tf.float32) / 255.
train_dataset = tfds.load(name="cifar10", split=tfds.Split.ALL)
train_dataset = train_dataset.shuffle(buffer_size=buffer_size).repeat()
train_dataset = train_dataset.map(lambda x: x['image'])
train_dataset = train_dataset.map(normalise_image)
train_dataset = train_dataset.batch(batch_size)
# +
class Encoder(tfl.Layer):
def __init__(self, latent_size, name='encoder', **kwargs):
self.latent_size = latent_size
super(Encoder, self).__init__(name=name, **kwargs)
def build(self, input_shape):
self.layers = [
tfl.Conv2D(filters=64,
kernel_size=(5, 5),
padding='same'),
tf.nn.relu,
tfl.Conv2D(filters=128,
kernel_size=(5, 5),
strides=(2, 2),
padding='same'),
tf.nn.relu,
tfl.Conv2D(filters=256,
kernel_size=(5, 5),
strides=(2, 2),
padding='same'),
tf.nn.relu,
tfl.Conv2D(filters=512,
kernel_size=(5, 5),
strides=(2, 2),
padding='same'),
tf.nn.relu,
tfl.Reshape((4 * 4 * 512,)),
tfl.Dense(512),
tf.nn.relu
]
self.loc_head = tfl.Dense(self.latent_size)
self.log_scale_head = tfl.Dense(self.latent_size)
def call(self, tensor):
for layer in self.layers:
tensor = layer(tensor)
loc = self.loc_head(tensor)
scale = tf.nn.softplus(self.log_scale_head(tensor))
self.posterior = tfd.Normal(loc=loc, scale=scale)
return self.posterior.sample()
class Decoder(tfl.Layer):
def __init__(self, name='decoder', **kwargs):
super(Decoder, self).__init__(name=name, **kwargs)
def build(self, input_shape):
self.layers = [
tfl.Dense(512),
tf.nn.relu,
tfl.Dense(4 * 4 * 512),
tf.nn.relu,
tfl.Reshape((4, 4, 512)),
tfl.Conv2DTranspose(filters=256,
kernel_size=(5, 5),
strides=(2, 2),
padding='same'),
tf.nn.relu,
tfl.Conv2DTranspose(filters=128,
kernel_size=(5, 5),
strides=(2, 2),
padding='same'),
tf.nn.relu,
tfl.Conv2DTranspose(filters=64,
kernel_size=(5, 5),
strides=(2, 2),
padding='same'),
tf.nn.relu,
tfl.Conv2DTranspose(filters=3,
kernel_size=(5, 5),
padding='same'),
tf.nn.sigmoid
]
def call(self, tensor):
for layer in self.layers:
tensor = layer(tensor)
return tensor
class VAE(tfk.Model):
def __init__(self, latent_size, name='vae', **kwargs):
self.latent_size = latent_size
self.log_noise = tf.Variable(0.0)
super(VAE, self).__init__(name=name, **kwargs)
@property
def log_prob(self):
return tf.reduce_mean(self._log_prob)
@property
def kl_divergence(self):
kl_each_latent = tfd.kl_divergence(self.posterior, self.prior)
kl_each_example = tf.reduce_sum(kl_each_latent, axis=-1)
return tf.reduce_mean(kl_each_example)
@property
def posterior(self):
return self.encoder.posterior
def build(self, input_shape):
self.encoder = Encoder(self.latent_size)
self.decoder = Decoder()
self.prior = tfd.Normal(loc=tf.zeros(self.latent_size),
scale=tf.ones(self.latent_size))
def call(self, tensor):
latents = self.encoder(tensor)
loc = self.decoder(latents)
scale = tf.exp(self.log_noise)
self.likelihood = tfd.Normal(loc=loc, scale=scale)
self._log_prob = tf.reduce_sum(self.likelihood.log_prob(tensor), axis=(1, 2, 3))
return loc
# +
train_steps = int(1e6)
beta = 1e0
learn_rate = 1e-3
log_freq = 10
vae = VAE(64)
optimizer = tfk.optimizers.Adam(learn_rate)
train_summary_writer = tfs.create_file_writer('summaries/train/cifar10')
with train_summary_writer.as_default():
for batch in tqdm(train_dataset.take(train_steps), total=train_steps):
with tf.GradientTape() as tape:
reconstructions = vae(batch)
likelihood = vae.log_prob
kl_divergence = tf.reduce_sum(vae.kl_divergence)
neg_elbo = - likelihood + beta * tf.reduce_sum(vae.kl_divergence)
gradients = tape.gradient(neg_elbo, vae.trainable_variables)
optimizer.apply_gradients(zip(gradients, vae.trainable_variables))
if tf.equal(optimizer.iterations % log_freq, 0):
tfs.scalar('ELBO', - neg_elbo, step=optimizer.iterations)
tfs.scalar('Likelihood', likelihood, step=optimizer.iterations)
tfs.scalar('KL-divergence', kl_divergence, step=optimizer.iterations)
tfs.scalar('log-noise', vae.log_noise, step=optimizer.iterations)
tfs.image('Original', batch, step=optimizer.iterations)
tfs.image('Reconstruction', reconstructions, step=optimizer.iterations)
# -
| vae-cifar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
# +
# # !wget https://f000.backblazeb2.com/file/malay-dataset/emotion/emotion-twitter-lexicon.json
# -
import tensorflow as tf
import numpy as np
from rotary_embedding_tensorflow import apply_rotary_emb, RotaryEmbedding
from fast_transformer import FastTransformer
from malaya.text.bpe import WordPieceTokenizer
tokenizer = WordPieceTokenizer('BERT.wordpiece', do_lower_case = False)
# tokenizer.tokenize('halo nama sayacomel')
# +
import pickle
with open('subjectivity-fastformer.pkl', 'rb') as fopen:
input_ids, Y = pickle.load(fopen)
len(input_ids), len(Y)
# -
epoch = 10
batch_size = 32
warmup_proportion = 0.1
num_train_steps = int(len(input_ids) / batch_size * epoch)
num_warmup_steps = int(num_train_steps * warmup_proportion)
import optimization
# +
def create_initializer(initializer_range=0.02):
return tf.truncated_normal_initializer(stddev=initializer_range)
class Model:
def __init__(
self,
dimension_output,
learning_rate = 2e-5,
training = True,
):
self.X = tf.placeholder(tf.int32, [None, None])
mask = tf.math.not_equal(self.X, 0)
mask = tf.cast(mask, tf.bool)
self.Y = tf.placeholder(tf.int32, [None])
self.maxlen = tf.shape(self.X)[1]
self.lengths = tf.count_nonzero(self.X, 1)
self.model = FastTransformer(
num_tokens = 32000,
dim = 336,
depth = 4,
heads = 12,
max_seq_len = 2048,
absolute_pos_emb = True,
mask = mask
)
self.logits = self.model(self.X)[0]
self.logits_seq = tf.layers.dense(self.logits, dimension_output,
kernel_initializer=create_initializer())
self.logits_seq = tf.identity(self.logits_seq, name = 'logits_seq')
self.logits = self.logits_seq[:, 0]
self.logits = tf.identity(self.logits, name = 'logits')
self.cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits = self.logits, labels = self.Y
)
)
self.optimizer = optimization.create_optimizer(self.cost, learning_rate,
num_train_steps, num_warmup_steps, False)
correct_pred = tf.equal(
tf.argmax(self.logits, 1, output_type = tf.int32), self.Y
)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# +
dimension_output = 2
learning_rate = 2e-5
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(
dimension_output,
learning_rate
)
sess.run(tf.global_variables_initializer())
var_lists = tf.trainable_variables()
# +
import collections
import re
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match('^(.*):\\d+$', name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name_to_variable[name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ':0'] = 1
return (assignment_map, initialized_variable_names)
# -
tvars = tf.trainable_variables()
checkpoint = 'fastformer-tiny-social-media/model.ckpt-1000000'
assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(tvars,
checkpoint)
saver = tf.train.Saver(var_list = assignment_map)
saver.restore(sess, checkpoint)
pad_sequences = tf.keras.preprocessing.sequence.pad_sequences
# +
from sklearn.model_selection import train_test_split
train_input_ids, test_input_ids, train_Y, test_Y = train_test_split(
input_ids, Y, test_size = 0.2
)
# +
from tqdm import tqdm
import time
for EPOCH in range(epoch):
train_acc, train_loss, test_acc, test_loss = [], [], [], []
pbar = tqdm(
range(0, len(train_input_ids), batch_size), desc = 'train minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(train_input_ids))
batch_x = train_input_ids[i: index]
batch_x = pad_sequences(batch_x, padding='post')
batch_y = train_Y[i: index]
acc, cost, _ = sess.run(
[model.accuracy, model.cost, model.optimizer],
feed_dict = {
model.Y: batch_y,
model.X: batch_x,
},
)
train_loss.append(cost)
train_acc.append(acc)
pbar.set_postfix(cost = cost, accuracy = acc)
pbar = tqdm(range(0, len(test_input_ids), batch_size), desc = 'test minibatch loop')
for i in pbar:
index = min(i + batch_size, len(test_input_ids))
batch_x = test_input_ids[i: index]
batch_x = pad_sequences(batch_x, padding='post')
batch_y = test_Y[i: index]
acc, cost = sess.run(
[model.accuracy, model.cost],
feed_dict = {
model.Y: batch_y,
model.X: batch_x,
},
)
test_loss.append(cost)
test_acc.append(acc)
pbar.set_postfix(cost = cost, accuracy = acc)
train_loss = np.mean(train_loss)
train_acc = np.mean(train_acc)
test_loss = np.mean(test_loss)
test_acc = np.mean(test_acc)
print(
'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
% (EPOCH, train_loss, train_acc, test_loss, test_acc)
)
# -
saver = tf.train.Saver(tf.trainable_variables())
saver.save(sess, 'fastformer-tiny-subjectivity/model.ckpt')
# +
real_Y, predict_Y = [], []
pbar = tqdm(
range(0, len(test_input_ids), batch_size), desc = 'validation minibatch loop'
)
for i in pbar:
index = min(i + batch_size, len(test_input_ids))
batch_x = test_input_ids[i: index]
batch_x = pad_sequences(batch_x, padding='post')
batch_y = test_Y[i: index]
predict_Y += np.argmax(sess.run(model.logits,
feed_dict = {
model.X: batch_x,
},
), 1, ).tolist()
real_Y += batch_y
# +
from sklearn import metrics
print(
metrics.classification_report(
real_Y, predict_Y, target_names = ['negative', 'positive'],
digits = 5
)
)
# -
strings = ','.join(
[
n.name
for n in tf.get_default_graph().as_graph_def().node
if ('Variable' in n.op
or 'Placeholder' in n.name
or 'logits' in n.name
or 'alphas' in n.name
or 'self/Softmax' in n.name)
and 'adam' not in n.name
and 'beta' not in n.name
and 'global_step' not in n.name
and 'ReadVariableOp' not in n.name
and 'AssignVariableOp' not in n.name
and '/Assign' not in n.name
and '/Adam' not in n.name
]
)
strings.split(',')
def freeze_graph(model_dir, output_node_names):
if not tf.gfile.Exists(model_dir):
raise AssertionError(
"Export directory doesn't exists. Please specify an export "
'directory: %s' % model_dir
)
checkpoint = tf.train.get_checkpoint_state(model_dir)
input_checkpoint = checkpoint.model_checkpoint_path
absolute_model_dir = '/'.join(input_checkpoint.split('/')[:-1])
output_graph = absolute_model_dir + '/frozen_model.pb'
clear_devices = True
with tf.Session(graph = tf.Graph()) as sess:
saver = tf.train.import_meta_graph(
input_checkpoint + '.meta', clear_devices = clear_devices
)
saver.restore(sess, input_checkpoint)
output_graph_def = tf.graph_util.convert_variables_to_constants(
sess,
tf.get_default_graph().as_graph_def(),
output_node_names.split(','),
)
with tf.gfile.GFile(output_graph, 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('%d ops in the final graph.' % len(output_graph_def.node))
freeze_graph('fastformer-tiny-subjectivity', strings)
def load_graph(frozen_graph_filename):
with tf.gfile.GFile(frozen_graph_filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
return graph
# +
# g = load_graph('fastformer-tiny-entities/frozen_model.pb')
# x = g.get_tensor_by_name('import/Placeholder:0')
# logits = g.get_tensor_by_name('import/logits:0')
# test_sess = tf.InteractiveSession(graph = g)
# +
# # %%time
# predicted = test_sess.run(logits,
# feed_dict = {
# x: [parsed_sequence],
# },
# )[0]
# merged = merge_wordpiece_tokens_tagging(bert_sequence, [idx2tag[d] for d in predicted])
# print(list(zip(merged[0], merged[1])))
# -
from tensorflow.tools.graph_transforms import TransformGraph
# +
transforms = ['add_default_attributes',
'remove_nodes(op=Identity, op=CheckNumerics, op=Dropout)',
'fold_batch_norms',
'fold_old_batch_norms',
'quantize_weights(fallback_min=-10, fallback_max=10)',
'strip_unused_nodes',
'sort_by_execution_order']
input_nodes = [
'Placeholder',
]
output_nodes = [
'logits',
'logits_seq'
]
pb = 'fastformer-tiny-subjectivity/frozen_model.pb'
input_graph_def = tf.GraphDef()
with tf.gfile.FastGFile(pb, 'rb') as f:
input_graph_def.ParseFromString(f.read())
transformed_graph_def = TransformGraph(input_graph_def,
input_nodes,
output_nodes, transforms)
with tf.gfile.GFile(f'{pb}.quantized', 'wb') as f:
f.write(transformed_graph_def.SerializeToString())
# +
# g = load_graph('fastformer-tiny-entities/frozen_model.pb.quantized')
# x = g.get_tensor_by_name('import/Placeholder:0')
# logits = g.get_tensor_by_name('import/logits:0')
# test_sess = tf.InteractiveSession(graph = g)
# +
# # %%time
# predicted = test_sess.run(logits,
# feed_dict = {
# x: [parsed_sequence],
# },
# )[0]
# merged = merge_wordpiece_tokens_tagging(bert_sequence, [idx2tag[d] for d in predicted])
# print(list(zip(merged[0], merged[1])))
# -
file = 'fastformer-tiny-subjectivity/frozen_model.pb'
outPutname = 'subjectivity/tiny-fastformer/model.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
file = 'fastformer-tiny-subjectivity/frozen_model.pb.quantized'
outPutname = 'subjectivity/tiny-fastformer-quantized/model.pb'
b2_bucket.upload_local_file(
local_file=file,
file_name=outPutname,
file_infos=file_info,
)
| session/subjectivity/fastformer-tiny.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.10 64-bit (''crypto'': conda)'
# name: python3
# ---
# # Indicators
import pandas as pd
import talib as ta
# ## Sources
btc_yahoo = pd.read_csv('/home/giujorge/datalake/lab/Crypto/crypto/data/external/yahoo/daily/usd/BTC-USD.csv', parse_dates=True, index_col=0)
btc_yahoo.head()
# ## Pattern Recognition
## CDL2CROWS - Two Crows
btc_yahoo['CDL2CROWS'] = ta.CDL2CROWS(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDL3BLACKCROWS - Three Black Crows
btc_yahoo['CDL3BLACKCROWS'] = ta.CDL3BLACKCROWS(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDL3INSIDE - Three Inside Up/Down
btc_yahoo['CDL3INSIDE'] = ta.CDL3INSIDE(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDL3LINESTRIKE - Three-Line Strike
btc_yahoo['CDL3LINESTRIKE'] = ta.CDL3LINESTRIKE(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDL3OUTSIDE - Three Outside Up/Down
btc_yahoo['CDL3OUTSIDE'] = ta.CDL3OUTSIDE(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDL3STARSINSOUTH - Three Stars In The South
btc_yahoo['CDL3STARSINSOUTH'] = ta.CDL3STARSINSOUTH(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDL3WHITESOLDIERS - Three Advancing White Soldiers
btc_yahoo['CDL3WHITESOLDIERS'] = ta.CDL3WHITESOLDIERS(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLABANDONEDBABY - Abandoned Baby
btc_yahoo['CDLABANDONEDBABY'] = ta.CDLABANDONEDBABY(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLADVANCEBLOCK - Advance Block
btc_yahoo['CDLADVANCEBLOCK'] = ta.CDLADVANCEBLOCK(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLBELTHOLD - Belt-hold
btc_yahoo['CDLBELTHOLD'] =ta.CDLBELTHOLD(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLBREAKAWAY - Breakaway
btc_yahoo['CDLBREAKAWAY'] =ta.CDLBREAKAWAY(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLCLOSINGMARUBOZU - Closing Marubozu
btc_yahoo['CDLCLOSINGMARUBOZU'] =ta.CDLCLOSINGMARUBOZU(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLCONCEALBABYSWALL - Concealing Baby Swallow
btc_yahoo['CDLCONCEALBABYSWALL'] =ta.CDLCONCEALBABYSWALL(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLCOUNTERATTACK - Counterattack
btc_yahoo['CDLCOUNTERATTACK'] =ta.CDLCOUNTERATTACK(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLDARKCLOUDCOVER - Dark Cloud Cover
btc_yahoo['CDLDARKCLOUDCOVER'] =ta.CDLDARKCLOUDCOVER(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"], penetration=0)
## CDLDOJI - Doji
btc_yahoo['CDLDOJI'] =ta.CDLDOJI(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLDOJISTAR - Doji Star
btc_yahoo['CDLDOJISTAR'] =ta.CDLDOJISTAR(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLDRAGONFLYDOJI - Dragonfly Doji
btc_yahoo['CDLDRAGONFLYDOJI'] =ta.CDLDRAGONFLYDOJI(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLENGULFING - Engulfing Pattern
btc_yahoo['CDLENGULFING'] =ta.CDLENGULFING(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLEVENINGDOJISTAR - Evening Doji Star
btc_yahoo['CDLEVENINGDOJISTAR'] =ta.CDLEVENINGDOJISTAR(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"], penetration=0)
## CDLEVENINGSTAR - Evening Star
btc_yahoo['CDLEVENINGSTAR'] =ta.CDLEVENINGSTAR(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"], penetration=0)
## CDLGAPSIDESIDEWHITE - Up/Down-gap side-by-side white lines
btc_yahoo['CDLGAPSIDESIDEWHITE'] =ta.CDLGAPSIDESIDEWHITE(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLGRAVESTONEDOJI - Gravestone Doji
btc_yahoo['CDLGRAVESTONEDOJI'] =ta.CDLGRAVESTONEDOJI(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLHAMMER - Hammer
btc_yahoo['CDLHAMMER'] = ta.CDLHAMMER(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLHANGINGMAN - Hanging Man
btc_yahoo['CDLHANGINGMAN'] =ta.CDLHANGINGMAN(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLHARAMI - Harami Pattern
btc_yahoo['CDLHARAMI'] = ta.CDLHARAMI(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLHARAMICROSS - Harami Cross Pattern
btc_yahoo['CDLHARAMICROSS'] = ta.CDLHARAMICROSS(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLHIGHWAVE - High-Wave Candle
btc_yahoo['CDLHIGHWAVE'] = ta.CDLHIGHWAVE(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLHIKKAKE - Hikkake Pattern
btc_yahoo['CDLHIKKAKE'] = ta.CDLHIKKAKE(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLHIKKAKEMOD - Modified Hikkake Pattern
btc_yahoo['CDLHIKKAKEMOD'] = ta.CDLHIKKAKEMOD(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLHOMINGPIGEON - Homing Pigeon
btc_yahoo['CDLHOMINGPIGEON'] = ta.CDLHOMINGPIGEON(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLIDENTICAL3CROWS - Identical Three Crows
btc_yahoo['CDLIDENTICAL3CROWS'] = ta.CDLIDENTICAL3CROWS(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLINNECK - In-Neck Pattern
btc_yahoo['CDLINNECK'] = ta.CDLINNECK(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLINVERTEDHAMMER - Inverted Hammer
btc_yahoo['CDLINVERTEDHAMMER'] = ta.CDLINVERTEDHAMMER(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLKICKING - Kicking
btc_yahoo['CDLKICKING'] = ta.CDLKICKING(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLKICKINGBYLENGTH - Kicking - bull/bear determined by the longer marubozu
btc_yahoo['CDLKICKINGBYLENGTH'] = ta.CDLKICKINGBYLENGTH(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLLADDERBOTTOM - Ladder Bottom
btc_yahoo['CDLLADDERBOTTOM'] = ta.CDLLADDERBOTTOM(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLLONGLEGGEDDOJI - Long Legged Doji
btc_yahoo['CDLLONGLEGGEDDOJI'] = ta.CDLLONGLEGGEDDOJI(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLLONGLINE - Long Line Candle
btc_yahoo['CDLLONGLINE'] = ta.CDLLONGLINE(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLMARUBOZU - Marubozu
btc_yahoo['CDLMARUBOZU'] = ta.CDLMARUBOZU(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLMATCHINGLOW - Matching Low
btc_yahoo['CDLMATCHINGLOW'] = ta.CDLMATCHINGLOW(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLMATHOLD - Mat Hold
btc_yahoo['CDLMATHOLD'] = ta.CDLMATHOLD(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"], penetration=0)
## CDLMORNINGDOJISTAR - Morning Doji Star
btc_yahoo['CDLMORNINGDOJISTAR'] = ta.CDLMORNINGDOJISTAR(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"], penetration=0)
## CDLMORNINGSTAR - Morning Star
btc_yahoo['CDLMORNINGSTAR'] = ta.CDLMORNINGSTAR(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"], penetration=0)
## CDLONNECK - On-Neck Pattern
btc_yahoo['CDLONNECK'] = ta.CDLONNECK(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLPIERCING - Piercing Pattern
btc_yahoo['CDLPIERCING'] = ta.CDLPIERCING(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLRICKSHAWMAN - Rickshaw Man
btc_yahoo['CDLRICKSHAWMAN'] = ta.CDLRICKSHAWMAN(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLRISEFALL3METHODS - Rising/Falling Three Methods
btc_yahoo['CDLRISEFALL3METHODS'] = ta.CDLRISEFALL3METHODS(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLSEPARATINGLINES - Separating Lines
btc_yahoo['CDLSEPARATINGLINES'] = ta.CDLSEPARATINGLINES(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLSHOOTINGSTAR - Shooting Star
btc_yahoo['CDLSHOOTINGSTAR'] = ta.CDLSHOOTINGSTAR(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLSHORTLINE - Short Line Candle
btc_yahoo['CDLSHORTLINE'] = ta.CDLSHORTLINE(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLSPINNINGTOP - Spinning Top
btc_yahoo['CDLSPINNINGTOP'] = ta.CDLSPINNINGTOP(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLSTALLEDPATTERN - Stalled Pattern
btc_yahoo['CDLSTALLEDPATTERN'] = ta.CDLSTALLEDPATTERN(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLSTICKSANDWICH - Stick Sandwich
btc_yahoo['CDLSTICKSANDWICH'] = ta.CDLSTICKSANDWICH(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLTAKURI - Takuri (Dragonfly Doji with very long lower shadow)
btc_yahoo['CDLTAKURI'] = ta.CDLTAKURI(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLTASUKIGAP - Tasuki Gap
btc_yahoo['CDLTASUKIGAP'] = ta.CDLTASUKIGAP(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLTHRUSTING - Thrusting Pattern
btc_yahoo['CDLTASUKIGAP'] = ta.CDLTASUKIGAP(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLTRISTAR - Tristar Pattern
btc_yahoo['CDLTRISTAR'] = ta.CDLTRISTAR(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLUNIQUE3RIVER - Unique 3 River
btc_yahoo['CDLUNIQUE3RIVER'] = ta.CDLUNIQUE3RIVER(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLUPSIDEGAP2CROWS - Upside Gap Two Crows
btc_yahoo['CDLUPSIDEGAP2CROWS'] = ta.CDLUPSIDEGAP2CROWS(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
## CDLXSIDEGAP3METHODS - Upside/Downside Gap Three Methods
btc_yahoo['CDLXSIDEGAP3METHODS'] = ta.CDLXSIDEGAP3METHODS(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"])
# ## Exploration
pattern_rec = ['CDL2CROWS','CDL3BLACKCROWS','CDL3INSIDE','CDL3LINESTRIKE','CDL3OUTSIDE','CDL3STARSINSOUTH','CDL3WHITESOLDIERS','CDLABANDONEDBABY','CDLADVANCEBLOCK','CDLBELTHOLD','CDLBREAKAWAY','CDLCLOSINGMARUBOZU','CDLCONCEALBABYSWALL','CDLCOUNTERATTACK','CDLDOJI','CDLDOJISTAR','CDLDRAGONFLYDOJI','CDLENGULFING','CDLGAPSIDESIDEWHITE','CDLGRAVESTONEDOJI','CDLHAMMER','CDLHANGINGMAN','CDLHARAMI','CDLHARAMICROSS','CDLHIGHWAVE','CDLHIKKAKE','CDLHIKKAKEMOD','CDLHOMINGPIGEON','CDLIDENTICAL3CROWS','CDLINNECK','CDLINVERTEDHAMMER','CDLKICKING','CDLKICKINGBYLENGTH','CDLLADDERBOTTOM','CDLLONGLEGGEDDOJI','CDLLONGLINE','CDLMARUBOZU','CDLMATCHINGLOW','CDLORNINGDOJISTAR','CDLONNECK','CDLPIERCING','CDLRICKSHAWMAN','CDLRISEFALL3METHODS','CDLSEPARATINGLINES','CDLSHOOTINGSTAR','CDLSHORTLINE','CDLSPINNINGTOP','CDLSTALLEDPATTERN','CDLSTICKSANDWICH','CDLTAKURI','CDLTASUKIGAP','CDLTHRUSTING','CDLTRISTAR','CDLUNIQUE3RIVER','CDLUPSIDEGAP2CROWS','CDLXSIDEGAP3METHODS']
pattern_rec_pen = ['CDLDARKCLOUDCOVER', 'CDLEVENINGDOJISTAR', 'CDLEVENINGSTAR', 'CDLMATHOLD', 'CDLMORNINGDOJISTAR', 'CDLMORNINGSTAR']
# for i in pattern_rec:
# btc_yahoo['{}'] = ta.s'{}(btc_yahoo["Open"], btc_yahoo['High'], btc_yahoo['Low'], btc_yahoo["Adj Close"]).format(i,i)
btc_yahoo.shape
btc_yahoo.isnull().sum()
btc_yahoo.describe()
# Zero columns
btc_yahoo.columns[(btc_yahoo == 0).all()]
| notebooks/eda/102_ta_pattern_recognition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/leoninekev/training-frcnn-google-ml-engine/blob/master/ml_engine_training_walkthrough.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="KH6-APjfYZ61" colab_type="text"
# ## Model Training and Serving Online predictions on Google AI-platform: Overview
#
# Following serves as a brief walkthrough along the files and structure of this repository; Aimed towards imparting clarity in process of- writing a keras code, packaging and staging a training job on GCP's ml-engine.
#
# What follows are the steps documented to:
# * Setup a GCP project.
# * Authenticate GCP service account, creating gcloud bucket with cloud credentials.
# * Package and Submit a training job to google's ai-platform.
# a documents the steps to
#
# * Towards the end of it, the notebook also highlights method to **Serve prediction from the model**
#
# _
#
# Keras is a high-level API for building and training deep learning models.
# Explore more about it at [tf.keras](https://www.tensorflow.org/guide/keras)
#
# **Note:** While it assumes you are running this notebook on **google Colab**, just proceed with listed/prompted instructions; Although the same notebook can also be run on local jupyter with minimal changes.
#
# + [markdown] id="aGCL9ejmayR3" colab_type="text"
# ## 1. Set up your GCP project
#
# 1. [Select or create a GCP project.](https://console.cloud.google.com/cloud-resource-manager)
#
# 2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)
#
# 3. [Enable the AI Platform ("Cloud Machine Learning Engine") and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)
#
# 4. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
# + [markdown] id="oaqruLj3A4q_" colab_type="text"
# * In case you already have a GCP project configured, just enter suitable details.
# + id="-Ieeq-5lYpbt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="a1dbc03e-801d-424e-dbaf-5ce26cbd36a5"
PROJECT_ID = "nifty-episode-231612" #@param {type:"string"}
# ! gcloud config set project $PROJECT_ID
# + [markdown] id="86ZNT2sJYohG" colab_type="text"
# ## 2.Authenticate your GCP account
#
# * **Run the following cell and do as prompted, when asked to authenticate your account via oAuth.**
#
# + id="T02KpSxYY1_9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="c34de5f5-a5ee-4432-b101-c1e473c64041"
import sys
if 'google.colab' in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
else:
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# + id="z3rmHKEhjbkr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="199ee792-58bf-4fd7-b8bc-41299374a3d5"
# !gcloud config list
# + [markdown] id="xc0QJmDGY6_g" colab_type="text"
# ### 2.1 Set Up Cloud Storage bucket
#
# * To submit a training job through Cloud SDK, it requires uploading a Python package
# containing your training code files to a Cloud Storage bucket.
# * Thus ai-platform runs the code from this package.
# * Also it saves the resulting trained model & configs from your job in the same bucket, which can then
# be versioned to serve online predictions.
#
# In case you didn't create a bucket already set the name of your Cloud Storage bucket below (Ensure that its unique). If you've a Bucket already, enter the name of your existing bucket anyway.
#
# Enter `REGION` variable, which is needed throughout the process be it training or online prediction.
# Explore more on availablity region [here](https://cloud.google.com/ml-engine/docs/tensorflow/regions).
# + id="zPo3wYUTY-th" colab_type="code" colab={}
BUCKET_NAME="nifty-episode-231612-mlengine" #@param {type:"string"}
REGION="asia-east1" #@param {type:"string"}
# + [markdown] id="SDit2ZSwEHZ6" colab_type="text"
# **Note**: Run following cell **ONLY** if you haven't created a bucket already, Or want to create a New one.
# + id="4ZFmjiwcEFPR" colab_type="code" colab={}
# ! gsutil mb -l $REGION gs://$BUCKET_NAME
# + [markdown] id="rgdZi_1lbBVZ" colab_type="text"
# * **Finally, validate access to your Cloud Storage bucket by examining its contents:**
#
# + id="-0HC0cSpbKAf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 374} outputId="8afd23e7-3e4e-4a90-845b-294a2daa6592"
# ! gsutil ls -al gs://$BUCKET_NAME
# + [markdown] id="KdFhraVLbU2j" colab_type="text"
# ## 3. Submit a training job on AI Platform
#
# Following code is a keras implementation of **FRCNN (an object detection model)** used for detecting food objects in image.
# google ai-platform is used to package the code and submit as training job on google cloud platform.
# * It outputs a model.hdf5 file (weights of trained FRCNN model) and a config file holding key model archetecture parameters, and saves them at a specified path in cloud Storage bucket.
#
# **Run the following cell to:**
# * First, download the training code(Clone the repo to get code and dependencies).
# * Although required dependencies are needed to be installed for model training locally. But this code is to be trained in ai-Platform, therefore dependencies come preinstalled based on the [runtime version](https://cloud.google.com/ml-engine/docs/tensorflow/runtime-version-list) one choses during training.
# * change the notebook's working directory to core directory containing **setup.py** and **trainer/** directory.
# + id="fjPizllyRgYE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="c9d48ced-059b-4626-8649-0ecb2777086e"
# !git clone https://github.com/leoninekev/training-frcnn-google-ml-engine.git
# #! pip install -r requirements.txt
# Set the working directory to the sample code directory
# %cd training-frcnn-google-ml-engine/move_to_cloudshell/
# + id="57YPYIDaWafj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="f6a03888-7ffd-4c3f-fdac-90b8e47311d9"
# ! ls -pR
# + [markdown] id="UF6vZPflLjgZ" colab_type="text"
# **Now prior to submiting a training job few key variables need to be configured as follows:**
#
# + [markdown] id="k4TZHpJR63CC" colab_type="text"
# * Navigate to **trainer/** directory to modify- bucket_path, output model_name, config_name in **task.py** in accordance with your gcp service account.
# + id="ZXWqHbCftckM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="735c9be9-1d72-4ac4-fe5a-793fab209cb5"
# %cd trainer
# !ls
# + [markdown] id="hxnUVVzX6wRB" colab_type="text"
# * Run **%pycat task.py** (this draws a pop displayinf content of task.py)
# * Copy all code to local Python IDE, or a cell below and edit the default arguments values in **parsers** for
# * **--path**
# * **--config_filename**
# * **--output_weight_path**
# * **--bucket_path**
#
# and name for **model_weights** before saving.
#
# + id="3NdsK5Kit87F" colab_type="code" colab={}
# %pycat task.py
# #copy the code from popup, paste it to a python IDLE locally, edit it and again copy the whole post edit
# + [markdown] id="aU7jVz5Z7BMI" colab_type="text"
# * Copy the edited code from local IDE in following colab cell beneath the command: **%%writefile task.py** (as shown below)
# and run the cell - The new edits will be overwritten to a new task.py file(you may also save a new task_file.py, and later delete the older task.py)
# + id="MFd1avzJv1zn" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e28fef84-b5d3-4082-f455-be8b674e3d29"
# %%writefile task.py
from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
from tensorflow.python.lib.io import file_io
from keras import backend as K
from keras.optimizers import Adam, SGD, RMSprop
from keras.layers import Input
from keras.models import Model
import config, data_generators
import losses as losses
import roi_helpers
from keras.utils import generic_utils
sys.setrecursionlimit(40000)
parser = OptionParser()
parser.add_option("-p", "--path", dest="train_path", help="Path to training data(annotation.txt file).",default="gs://nifty-episode-231612-mlengine/training_data_and_annotations_for_cloud_060519/annotations.txt")# /data.pickle -- for pickled annotations
parser.add_option("-o", "--parser", dest="parser", help="Parser to use. One of simple_text or simple_pickle",
default="simple")# simple_pick --for simple_parser_pkl
parser.add_option("-n", "--num_rois", type="int", dest="num_rois", help="Number of RoIs to process at once.", default=32)
parser.add_option("--network", dest="network", help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("--hf", dest="horizontal_flips", help="Augment with horizontal flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--vf", dest="vertical_flips", help="Augment with vertical flips in training. (Default=false).", action="store_true", default=False)
parser.add_option("--rot", "--rot_90", dest="rot_90", help="Augment with 90 degree rotations in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--num_epochs", type="int", dest="num_epochs", help="Number of epochs.", default=1)# deafult=1 --for test
parser.add_option("--config_filename", dest="config_filename", help=
"Location to store all the metadata related to the training (to be used when testing).",
default="config_new.pickle")
parser.add_option("--output_weight_path", dest="output_weight_path", help="Output path for weights.",default='gs://nifty-episode-231612-mlengine/my_job_files/')
parser.add_option("--input_weight_path", dest="input_weight_path", help="Input path for weights. If not specified, will try to load default weights provided by keras.",
default='https://github.com/fchollet/deep-learning-models/releases/download/v0.2/resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5')
parser.add_option("--bucket_path", dest="bucket_path", help="bucket path for stroing weights & configs", default='gs://nifty-episode-231612-mlengine/my_job_files/')
(options, args) = parser.parse_args()
if not options.train_path:# if filename is not given
parser.error('Error: path to training data must be specified. Pass --path to command line')
if options.parser == 'simple':
from simple_parser_text import get_data
elif options.parser == 'simple_pick':
from simple_parser_pkl import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
# pass the settings from the command line, and persist them in the config object
C = config.Config()
C.use_horizontal_flips = bool(options.horizontal_flips)
C.use_vertical_flips = bool(options.vertical_flips)
C.rot_90 = bool(options.rot_90)
C.model_path = options.output_weight_path
C.num_rois = int(options.num_rois)
if options.network == 'vgg':
C.network = 'vgg'
import vgg as nn
elif options.network == 'resnet50':
import resnet as nn
C.network = 'resnet50'
else:
print('Not a valid model')
raise ValueError
# check if weight path was passed via command line
if options.input_weight_path:
C.base_net_weights = options.input_weight_path
else:
# set the path to weights based on backend and model
C.base_net_weights = nn.get_weight_path()# 'resnet50_weights_th_dim_ordering_th_kernels_notop.h5'
all_imgs, classes_count, class_mapping = get_data(options.train_path)
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
C.class_mapping = class_mapping
inv_map = {v: k for k, v in class_mapping.items()}
print('Training images per class:')
pprint.pprint(classes_count)
print('Num classes (including bg) = {}'.format(len(classes_count)))
config_output_filename = options.bucket_path + options.config_filename# gs://input-your-bucket-name/train_on_gcloud/my_job_files/config.pickle
def new_open(name, mode, buffering=-1):# to open & load files from gcloud storage
return file_io.FileIO(name, mode)
with new_open(config_output_filename, 'wb') as config_f:
pickle.dump(C,config_f, protocol=2)# dumps config.pickle(compatible for python 2) in gcloud bucket
print('Config has been written to {}, and can be loaded when testing to ensure correct results'.format(config_output_filename))
random.shuffle(all_imgs)
num_imgs = len(all_imgs)
train_imgs = [s for s in all_imgs if s['imageset'] == 'trainval']
val_imgs = [s for s in all_imgs if s['imageset'] == 'test']
print('Num train samples {}'.format(len(train_imgs)))
print('Num val samples {}'.format(len(val_imgs)))
data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C, nn.get_img_output_length, K.image_dim_ordering(), mode='train')
data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length,K.image_dim_ordering(), mode='val')
if K.image_dim_ordering() == 'th':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
# define the base network (resnet here, can be VGG, Inception, etc)
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(shared_layers, roi_input, C.num_rois, nb_classes=len(classes_count), trainable=True)
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier, used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
try:
print('loading weights from {}'.format(C.base_net_weights))
weights_path = get_file('base_weights.h5',C.base_net_weights)# downloading and adding weight paths
model_rpn.load_weights(weights_path)
model_classifier.load_weights(weights_path)
print('weights loaded.')
except:
print('Could not load pretrained model weights. Weights can be found in the keras application folder \
https://github.com/fchollet/keras/tree/master/keras/applications')
optimizer = Adam(lr=1e-5)
optimizer_classifier = Adam(lr=1e-5)
model_rpn.compile(optimizer=optimizer, loss=[losses.rpn_loss_cls(num_anchors), losses.rpn_loss_regr(num_anchors)])
model_classifier.compile(optimizer=optimizer_classifier, loss=[losses.class_loss_cls, losses.class_loss_regr(len(classes_count)-1)], metrics={'dense_class_{}'.format(len(classes_count)): 'accuracy'})
model_all.compile(optimizer='sgd', loss='mae')
epoch_length = 1000
num_epochs = int(options.num_epochs)
iter_num = 0
losses = np.zeros((epoch_length, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
start_time = time.time()
best_loss = np.Inf
class_mapping_inv = {v: k for k, v in class_mapping.items()}
print('Starting training')
vis = True
for epoch_num in range(num_epochs):
progbar = generic_utils.Progbar(epoch_length)
print('Epoch {}/{}'.format(epoch_num + 1, num_epochs))
while True:
try:
if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print('Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length))
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
X, Y, img_data = next(data_gen_train)
loss_rpn = model_rpn.train_on_batch(X, Y)
P_rpn = model_rpn.predict_on_batch(X)
R = roi_helpers.rpn_to_roi(P_rpn[0], P_rpn[1], C, K.image_dim_ordering(), use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois//2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois//2, replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
losses[iter_num, 0] = loss_rpn[1]
losses[iter_num, 1] = loss_rpn[2]
losses[iter_num, 2] = loss_class[1]
losses[iter_num, 3] = loss_class[2]
losses[iter_num, 4] = loss_class[3]
progbar.update(iter_num+1, [('rpn_cls', losses[iter_num, 0]), ('rpn_regr', losses[iter_num, 1]),
('detector_cls', losses[iter_num, 2]), ('detector_regr', losses[iter_num, 3])])
iter_num += 1
if iter_num == epoch_length:
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print('Mean number of bounding boxes from RPN overlapping ground truth boxes: {}'.format(mean_overlapping_bboxes))
print('Classifier accuracy for bounding boxes from RPN: {}'.format(class_acc))
print('Loss RPN classifier: {}'.format(loss_rpn_cls))
print('Loss RPN regression: {}'.format(loss_rpn_regr))
print('Loss Detector classifier: {}'.format(loss_class_cls))
print('Loss Detector regression: {}'.format(loss_class_regr))
print('Elapsed time: {}'.format(time.time() - start_time))
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
iter_num = 0
start_time = time.time()
if curr_loss < best_loss:
if C.verbose:
print('Total loss decreased from {} to {}, saving weights'.format(best_loss,curr_loss))
best_loss = curr_loss
model_weights= 'model_frcnn_new.hdf5'
model_all.save_weights(model_weights)
with new_open(model_weights, mode='r') as infile:# to write hdf5 file to gs://input-your-bucket-name/train_on_gcloud/my_job_files/
with new_open(C.model_path + model_weights, mode='w+') as outfile:
outfile.write(infile.read())
break
except Exception as e:
print('Exception: {}'.format(e))
continue
print('Training complete, exiting.')
# + [markdown] id="q0tuMyJ_wf9U" colab_type="text"
# * Exit out of **trainer/**, to the directory containing **setup.py** for initiating dependency packaging before training on gcloud.
# * Verify the present working directory
# + id="ajSWRrHh0gea" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="a9b92627-3931-48cc-8d28-403499bb393d"
# %cd ..
# !pwd
# !ls
# + [markdown] id="JqY_No38-Mu9" colab_type="text"
# * Define a **JOB_NAME** for training job
# + id="S74X6xVV_E_D" colab_type="code" colab={}
JOB_NAME='test_job_GcloudColab_3'
# + [markdown] id="dPOQm0LEcXy4" colab_type="text"
# Run the following cell to package the **`trainer/`** directory:
# * It uploads the package to specified **gs://$BUCKET_NAME/JOB_NAME/**, and instruct AI Platform to run the **`trainer.task`** module from that package.
#
# * The **`--stream-logs`** flag lets you view training logs in the cell below (One can
# also view logs and other job details in the GCP Console, if you've enbaled **Stackdriver logging service**.)
#
# For staging your code to package and further training, ensure that following crucial parameters are defined priorly (given below are dummy bucket, job, region names, you may input as you will):
# * **BUCKET_NAME** = 'nifty-episode-231612-mlengine'
# * **JOB_NAME** = 'test_job_GcloudColab_3'
# * **REGION**= 'asia-east1'
# * **package-path**= trainer/
# * **model-name** = trainer.task
# * **runtime-version**=1.13
# + [markdown] id="_F18qQezO81x" colab_type="text"
# Now submit a training job to AI Platform.
# * Following runs the training module in gcloud and exports the training package and trained model to Cloud Storage.
# + id="dX1DXZZwW606" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="9814bfd6-ccb3-485e-8a51-07b5481298af"
# ! gcloud ai-platform jobs submit training $JOB_NAME --package-path trainer/ --module-name trainer.task --region $REGION --runtime-version=1.13 --scale-tier=CUSTOM --master-machine-type=standard_gpu --staging-bucket gs://$BUCKET_NAME --stream-logs
# + [markdown] id="USJ6J6XJS5AV" colab_type="text"
#
# + [markdown] id="KM3Q71nHCi5W" colab_type="text"
# ## Online predictions in AI Platform
#
# ### Create model and version resources in AI Platform
#
# To serve online predictions using the model you trained and exported previously,
# create a **Model Resource** in AI Platform and a **Version Resource**
# within it.
#
# The version resource is what actually uses your trained model to
# serve predictions. Multiple Model and Versions could be created together in AI Platform to test and experiment with results.
#
# Explore more about [models
# and
# versions](https://cloud.google.com/ml-engine/docs/tensorflow/projects-models-versions-jobs).
# + [markdown] id="j7gwxsnJTd6w" colab_type="text"
# * First, Define a name and create the model resource;
# Also Enable Online prediction logging, to stream logs that contain the **stderr and stdout streams** from your prediction nodes, and can be useful for debugging during version creation and inferencing.
# + id="13j3uYJFDYoj" colab_type="code" colab={}
MODEL_NAME = "food_predictor"
# ! gcloud beta ai-platform models create $MODEL_NAME \
# --regions $REGION --enable-console-logging
# + [markdown] id="iuv_4mMZDZIU" colab_type="text"
# Now, Create the model version. Since from Previous the training job is exported to a timestamped directory in your Cloud Storage bucket. AI Platform uses this directory to create a model version.
#
# * The code packaged during training is stored at **gs://$BUCKET_NAME/JOB_NAME/** from previous steps.
#
# Since the model saved as an ouput to training is keras' .hdf5 format. i.e., Not tensorflow's recommended Saved model, the versioning of this model is done using [Custom Prediction routine](https://cloud.google.com/ml-engine/docs/tensorflow/custom-prediction-routines) explained in GCP documentation for Version creation.
#
#
# + [markdown] id="ngRR0XhPSwAF" colab_type="text"
# * **First, Clone the custom prediction implementation**
# + id="Q68VBc59RwRJ" colab_type="code" colab={}
# !git clone https://github.com/leoninekev/ml-engine-custom-prediction-routine.git
# + [markdown] id="ACeLKRzERzdx" colab_type="text"
# Now proceed as follows:
# * Navigate to directory containing **setup.py**
# * package the code by running following cell.
# * Copy the packaged .tar.gz file to specific folder in cloud storage bucket
# + id="R_28dC8MF3NU" colab_type="code" colab={}
python setup.py sdist --formats=gztar
gsutil cp dist/test_code_new_model_beta5-0.1.tar.gz gs://nifty-episode-231612-mlengine/cloud_test_package_2/cloud_test_package_v
# + [markdown] id="nVeecosOSZgP" colab_type="text"
# * Define following Model Versioning parameters
# + id="Ph0_7_C9SWc8" colab_type="code" colab={}
MODEL_NAME="FoodPredictor_060619"
VERSION_NAME='v5_a'
REGION=asia-east1
# + [markdown] id="gkNUfIesSZDQ" colab_type="text"
# * Now submit a Version job, running following cell
# + id="geoGl3RCSYac" colab_type="code" colab={}
gcloud beta ai-platform versions create $VERSION_NAME --model $MODEL_NAME
--python-version 3.5 --runtime-version 1.5 --machine-type mls1-c4-m2
--origin gs://nifty-episode-231612-mlengine/cloud_test_package_2/cloud_test_package_v5
--package-uris gs://nifty-episode-231612-mlengine/cloud_test_package_2/cloud_test_package_v5/test_code_new_model_beta5-0.1.tar.gz
--prediction-class predictor.MyPredictor
# + id="Mv_a__YlWef5" colab_type="code" colab={}
| ml_engine_training_walkthrough.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Postprocessing parameter in compile method
#
# Compile method is a method that creates the explainer you need for your model.<br /> This compile method has many parameters, and among those is `postprocessing` parameter, that will be explained in this tutorial. <br />
# This parameter allows to **modify** the dataset with several techniques, for a better visualization.
# <b>This tutorial </b>presents the different way you can modify data, and the right syntax to do it.
#
# Contents:
# - Loading dataset and fitting a model.
#
# - Creating our SmartExplainer and compiling it without postprocessing.
#
# - New SmartExplainer with postprocessing parameter.
#
#
# Data from Kaggle: [Titanic](https://www.kaggle.com/c/titanic/data)
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble.forest import RandomForestClassifier
# ## Building Supervized Model
# #### First step : Importing our dataset
from shapash.data.data_loader import data_loading
titanic_df, titanic_dict = data_loading('titanic')
y_df=titanic_df['Survived']
X_df=titanic_df[titanic_df.columns.difference(['Survived'])]
titanic_df.head()
# #### Second step : Encode our categorical variables
# +
from category_encoders import OrdinalEncoder
categorical_features = [col for col in X_df.columns if X_df[col].dtype == 'object']
encoder = OrdinalEncoder(
cols=categorical_features,
handle_unknown='ignore',
return_df=True).fit(X_df)
X_df = encoder.transform(X_df)
# -
# #### Third step : Train/test split and fitting our model
Xtrain, Xtest, ytrain, ytest = train_test_split(X_df, y_df, train_size=0.75, random_state=1)
classifier = RandomForestClassifier(n_estimators=200).fit(Xtrain, ytrain)
y_pred = pd.DataFrame(classifier.predict(Xtest), columns=['pred'], index=Xtest.index) # Predictions
# #### Fourth step : Declaring our Explainer
from shapash.explainer.smart_explainer import SmartExplainer
xpl = SmartExplainer(features_dict=titanic_dict) # Optional parameter, dict specifies label for features name
# # Compiling without postprocessing parameter
# After declaring our explainer, we need to compile it on our model and data in order to have information.
xpl.compile(
x=Xtest,
model=classifier,
preprocessing=encoder, # Optional: compile step can use inverse_transform method
y_pred=y_pred # Optional
)
# We can now use our explainer to understand model predictions, through plots or data. We also can find our original dataset, before preprocessing.
xpl.x_pred
# All the analysis you can do is in this tutorial : **[Tutorial](https://github.com/MAIF/shapash/blob/master/tutorial/tutorial02-Shapash-overview-in-Jupyter.ipynb)**
# # Compiling with postprocessing parameter
# Nevertheless, here we want to add postprocessing to our data to understand them better, and to have a better **explicability**.
# The syntax for the **postprocessing parameter** is as follow :
# ```python
# postprocess = {
# 'name_of_the_feature': {'type': 'type_of_modification', 'rule': 'rule_to_apply'},
# 'second_name_of_features': {'type': 'type_of_modification', 'rule': 'rule_to_apply'},
# ...
# }
# ```
# You have five different types of modifications :
#
# - 1) **prefix** : <br />
# If you want to modify the beginning of the data. The syntax is
# ```python
# {'features_name': {'type': 'prefix',
# 'rule': 'Example : '}
# }
# ```
#
# - 2) **suffix** : <br />
# If you want to add something at the end of some features, the syntax is similar :
# ```python
# {'features_name': {'type': 'suffix',
# 'rule': ' is an example'}
# }
# ```
#
# - 3) **transcoding** : <br />
# This is a mapping function which modifies categorical variables. The syntax is :
# ```python
# {'features_name': {'type': 'transcoding',
# 'rule': {'old_name1': 'new_name1',
# 'old_name2': 'new_name2',
# ...
# }
# }
# }
# ```
# If you don't map all possible values, those values won't be modified.
#
# - 4) **regex** : <br />
# If you want to modify strings, you can do it by regular expressions like this:
# ```python
# {'features_name': {'type': 'regex',
# 'rule': {'in': '^M',
# 'out': 'm'
# }
# }
# }
# ```
#
# - 5) **case** : <br />
# If you want to change the case of a certain features, you can or change everything in lowercase with `'rule': 'lower'`, or change in uppercase with `'rule': 'upper'`. The syntax is :
# ```python
# {'features_name': {'type': 'case',
# 'rule': 'upper'}
# ```
# Of course, you don't have to modify all features. Let's give an example.
postprocess = {
'Age': {'type': 'suffix',
'rule': ' years old' # Adding 'years old' at the end
},
'Sex': {'type': 'transcoding',
'rule': {'male': 'Man',
'female': 'Woman'}
},
'Pclass': {'type': 'regex',
'rule': {'in': ' class$',
'out': ''} # Deleting 'class' word at the end
},
'Fare': {'type': 'prefix',
'rule': '$' # Adding $ at the beginning
},
'Embarked': {'type': 'case',
'rule': 'upper'
}
}
# You can now add this postprocess dict in parameter :
xpl_postprocess = SmartExplainer(features_dict=titanic_dict) # New explainer
xpl_postprocess.compile(
x=Xtest,
model=classifier,
preprocessing=encoder, # Optional: compile step can use inverse_transform method
y_pred=y_pred, # Optional
postprocessing=postprocess
)
# You can now visualize your dataset, which is modified.
xpl_postprocess.x_pred
# All the plots are also modified with the postprocessing modifications.
# #### Application with to_pandas method
# The main purpose of postprocessing modifications is a better understanding of the data, especially when the features names are not specified, such as in to_pandas() method, which orders the features depending on their importance.
xpl_postprocess.to_pandas()
| tutorial/postprocess/tuto-postprocess01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # Distributions in Pandas
import pandas as pd
import numpy as np
np.random.binomial(2, 0.6)
s = np.random.binomial(15, 0.5, 20)
s
# +
# np.random.binomial?
# -
chance_of_tornado = 0.01/100
np.random.binomial(100000, chance_of_tornado)
# +
chance_of_tornado = 0.01
tornado_events = np.random.binomial(1, chance_of_tornado, 1000)
two_days_in_a_row = 0
for j in range(1,len(tornado_events)-1):
if tornado_events[j]==1 and tornado_events[j-1]==1:
two_days_in_a_row+=1
print('{} tornadoes back to back in {} years'.format(two_days_in_a_row, 1000/365))
# -
np.random.uniform(0, 1)
np.random.normal(0.75)
# Formula for standard deviation
# $$\sqrt{\frac{1}{N} \sum_{i=1}^N (x_i - \overline{x})^2}$$
# +
distribution = np.random.normal(0.75,size=1000)
np.sqrt(np.sum((np.mean(distribution)-distribution)**2)/len(distribution))
# -
np.std(distribution)
import scipy.stats as stats
stats.kurtosis(distribution)
stats.skew(distribution)
chi_squared_df2 = np.random.chisquare(2, size=10000)
stats.skew(chi_squared_df2)
chi_squared_df5 = np.random.chisquare(5, size=10000)
stats.skew(chi_squared_df5)
# +
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
output = plt.hist([chi_squared_df2,chi_squared_df5], bins=50, histtype='step',
label=['2 degrees of freedom','5 degrees of freedom'])
plt.legend(loc='upper right')
# -
# # Hypothesis Testing
df = pd.read_csv('grades.csv')
df.head()
len(df)
early = df[df['assignment1_submission'] <= '2015-12-31']
late = df[df['assignment1_submission'] > '2015-12-31']
len(late)
early.mean()
late.mean()
# +
from scipy import stats
# stats.ttest_ind?
# -
stats.ttest_ind(early['assignment1_grade'], late['assignment1_grade'])
stats.ttest_ind(early['assignment2_grade'], late['assignment2_grade'])
stats.ttest_ind(early['assignment3_grade'], late['assignment3_grade'])
| 01 - Introduction to Data Science in Python/data and backup files/Week 4.ipynb |
# # Sharing pretrained models (PyTorch)
# Install the Transformers and Datasets libraries to run this notebook.
# !pip install datasets transformers[sentencepiece]
# !apt install git-lfs
# You will need to setup git, adapt your email and name in the following cell.
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "<NAME>"
# You will also need to be logged in to the Hugging Face Hub. Execute the following and enter your credentials.
# +
from huggingface_hub import notebook_login
notebook_login()
# +
from huggingface_hub import notebook_login
notebook_login()
# +
from transformers import TrainingArguments
training_args = TrainingArguments(
"bert-finetuned-mrpc", save_strategy="epoch", push_to_hub=True
)
# +
from transformers import AutoModelForMaskedLM, AutoTokenizer
checkpoint = "camembert-base"
model = AutoModelForMaskedLM.from_pretrained(checkpoint)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
# -
model.push_to_hub("dummy-model")
tokenizer.push_to_hub("dummy-model")
tokenizer.push_to_hub("dummy-model", organization="huggingface")
tokenizer.push_to_hub("dummy-model", organization="huggingface", use_auth_token="<TOKEN>")
from huggingface_hub import (
# User management
login,
logout,
whoami,
# Repository creation and management
create_repo,
delete_repo,
update_repo_visibility,
# And some methods to retrieve/change information about the content
list_models,
list_datasets,
list_metrics,
list_repo_files,
upload_file,
delete_file,
)
# +
from huggingface_hub import create_repo
create_repo("dummy-model")
# +
from huggingface_hub import create_repo
create_repo("dummy-model", organisation="huggingface")
# +
from huggingface_hub import upload_file
upload_file(
"<path_to_file>/config.json",
path_in_repo="config.json",
repo_id="<namespace>/dummy-model",
)
# +
from huggingface_hub import Repository
repo = Repository("<path_to_dummy_folder>", clone_from="<namespace>/dummy-model")
# -
repo.git_pull()
repo.git_add()
repo.git_commit()
repo.git_push()
repo.git_tag()
repo.git_pull()
model.save_pretrained("<path_to_dummy_folder>")
tokenizer.save_pretrained("<path_to_dummy_folder>")
repo.git_add()
repo.git_commit("Add model and tokenizer files")
repo.git_push()
# +
from transformers import AutoModelForMaskedLM, AutoTokenizer
checkpoint = "camembert-base"
model = AutoModelForMaskedLM.from_pretrained(checkpoint)
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
# Do whatever with the model, train it, fine-tune it...
model.save_pretrained("<path_to_dummy_folder>")
tokenizer.save_pretrained("<path_to_dummy_folder>")
| notebooks/course/chapter4/section3_pt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Change Directory to Main Katib folder
# cd /root/katib/sdk/python/
# ## Install Katib SDK
# ! python setup.py install
# ## Import Required Libraries
from kubernetes.client import V1PodTemplateSpec
from kubernetes.client import V1ObjectMeta
from kubernetes.client import V1PodSpec
from kubernetes.client import V1Container
import kubeflow.katib as kc
from kubeflow.katib import constants
from kubeflow.katib import utils
from kubeflow.katib import V1alpha3AlgorithmSetting
from kubeflow.katib import V1alpha3AlgorithmSetting
from kubeflow.katib import V1alpha3AlgorithmSpec
from kubeflow.katib import V1alpha3CollectorSpec
from kubeflow.katib import V1alpha3EarlyStoppingSetting
from kubeflow.katib import V1alpha3EarlyStoppingSpec
from kubeflow.katib import V1alpha3Experiment
from kubeflow.katib import V1alpha3ExperimentCondition
from kubeflow.katib import V1alpha3ExperimentList
from kubeflow.katib import V1alpha3ExperimentSpec
from kubeflow.katib import V1alpha3ExperimentStatus
from kubeflow.katib import V1alpha3FeasibleSpace
from kubeflow.katib import V1alpha3FileSystemPath
from kubeflow.katib import V1alpha3FilterSpec
from kubeflow.katib import V1alpha3GoTemplate
from kubeflow.katib import V1alpha3GraphConfig
from kubeflow.katib import V1alpha3Metric
from kubeflow.katib import V1alpha3MetricsCollectorSpec
from kubeflow.katib import V1alpha3NasConfig
from kubeflow.katib import V1alpha3ObjectiveSpec
from kubeflow.katib import V1alpha3Observation
from kubeflow.katib import V1alpha3Operation
from kubeflow.katib import V1alpha3OptimalTrial
from kubeflow.katib import V1alpha3ParameterAssignment
from kubeflow.katib import V1alpha3ParameterSpec
from kubeflow.katib import V1alpha3SourceSpec
from kubeflow.katib import V1alpha3Suggestion
from kubeflow.katib import V1alpha3SuggestionCondition
from kubeflow.katib import V1alpha3SuggestionList
from kubeflow.katib import V1alpha3SuggestionSpec
from kubeflow.katib import V1alpha3SuggestionStatus
from kubeflow.katib import V1alpha3TemplateSpec
from kubeflow.katib import V1alpha3Trial
from kubeflow.katib import V1alpha3TrialAssignment
from kubeflow.katib import V1alpha3TrialCondition
from kubeflow.katib import V1alpha3TrialList
from kubeflow.katib import V1alpha3TrialSpec
from kubeflow.katib import V1alpha3TrialStatus
from kubeflow.katib import V1alpha3TrialTemplate
from kubeflow.tfjob import constants
from kubeflow.tfjob import utils
from kubeflow.tfjob import V1ReplicaSpec
from kubeflow.tfjob import V1TFJob
from kubeflow.tfjob import V1TFJobSpec
from kubeflow.tfjob import TFJobClient
# ## Define Experiment related Specs
# +
algorithmsettings = V1alpha3AlgorithmSetting(
name= None,
value = None
)
algorithm = V1alpha3AlgorithmSpec(
algorithm_name = "random",
algorithm_settings = [algorithmsettings]
)
# Metric Collector
collector = V1alpha3CollectorSpec(kind = "TensorFlowEvent")
FileSystemPath = V1alpha3FileSystemPath(kind = "/train" , path = "Directory")
metrics_collector_spec = V1alpha3MetricsCollectorSpec(
collector = collector,
source = FileSystemPath)
# Objective
objective = V1alpha3ObjectiveSpec(
goal = 0.9999,
objective_metric_name = "accuracy_1",
type = "maximize")
# Parameters
feasible_space = V1alpha3FeasibleSpace(min = "100", max = "200")
parameters = [V1alpha3ParameterSpec(
feasible_space = feasible_space,
name = "--batch-size",
parameter_type = "int"
)]
#Defining the namespace where tfjob needs to be created
namespace = utils.get_default_target_namespace()
#Defining a Container
container = V1Container(
name="tensorflow",
image="gcr.io/kubeflow-ci/tf-mnist-with-summaries:1.0",
image_pull_policy="Always",
command=["python", "/var/tf_mnist/mnist_with_summaries.py", "--log_dir=/train/metrics","{{- with .HyperParameters}}","{{- range .}}","{{.Name}}={{.Value}}", "{{- end}}","{{- end}}" ]
)
#Defining Worker Spec
worker = V1ReplicaSpec(
replicas=1,
restart_policy="OnFailure",
template=V1PodTemplateSpec(
spec=V1PodSpec(
containers=[container]
)
)
)
#Defining TFJob
tfjob = V1TFJob(
api_version="kubeflow.org/v1",
kind="TFJob",
metadata=V1ObjectMeta(name="trialsample",namespace=namespace),
spec=V1TFJobSpec(
clean_pod_policy="None",
tf_replica_specs={"Worker": worker}
)
)
#Creating TFJob
tfjob_client = TFJobClient()
tfjob_client.create(tfjob, namespace=namespace)
# Experiment
experiment = V1alpha3Experiment(
api_version="kubeflow.org/v1alpha3",
kind="Experiment",
metadata=V1ObjectMeta(name="tfjob-experiment",namespace="anonymous"),
spec=V1alpha3ExperimentSpec(
algorithm = algorithm,
max_failed_trial_count=3,
max_trial_count=12,
metrics_collector_spec= metrics_collector_spec ,
objective = objective,
parallel_trial_count=4,
parameters = parameters
)
)
# -
namespace = kc.utils.get_default_target_namespace()
kclient = kc.KatibClient()
# ## Create Experiment
kclient.create_experiment(experiment,namespace=namespace)
# ## Get Single Experiment
kclient.get_experiment(name="tfjob-experiment", namespace=namespace)
# ## Get all Experiments
kclient.get_experiment(namespace=namespace)
# ## Get experiment status
kclient.get_experiment_status(name="tfjob-experiment", namespace=namespace)
# ## Check whether experiment has succeeded
kclient.is_experiment_succeeded(name="tfjob-experiment", namespace=namespace)
# ## Delete experiment
# +
# kclient.delete_experiment(name="tfjob-example", namespace=namespace)
# -
# ## List Trials of an experiment
kclient.list_trials(name="tfjob-experiment", namespace=namespace)
# ## List all Experiments
kclient.list_experiments(namespace=namespace)
| sdk/python/examples/tfjob-katib-sdk-v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
data = pd.read_csv('Scores1.csv')
data
plt.figure(figsize=(19, 8))
sns.barplot(x = data.Category, y = data.Score, hue = data.Model, data = data)
plt.xticks(rotation=30)
plt.ylabel("AUC Scores")
plt.show()
| models/Data visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
# +
my_dictionary = { 'apple': 2, 4: 3, 3:1, 'grapes': 4 }
def values_that_are_keys(my_dictionary):
values = list(my_dictionary.values())
keys = list(my_dictionary.keys())
vk = []
for x in values:
if x in keys:
vk.append(x)
return vk
values_that_are_keys(my_dictionary)
# +
wordlist = ['apple', 'potato', 'pepper', 'orangey']
def word_length_dictionary(words):
lengths = []
for x in words:
lengths.append(len(x))
return {words[i]: lengths[i] for i in range(len(words))}
print(word_length_dictionary(wordlist))
# +
def double_index(lst, index):
newlst = lst
newlst[index] = lst[index] * 2
return newlst
double_index([1, 2, 3, 4], 2)
# +
def delete_starting_evens(input):
while (input[0] % 2 == 0):
input.remove(input[0])
return input
delete_starting_evens([4, 8, 10, 11, 12, 15])
# +
def divisible_by_ten(num):
return (num % 10 == 0)
divisible_by_ten(10)
# -
| notebooks/Bretts Test Scratchwork.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tuning a Pipeline
#
# This short guide shows how tune a Pipeline using a [BTB](https://github.com/HDI-Project/BTB) Tuner.
#
# Note that some steps are not explained for simplicity. Full details
# about them can be found in the previous parts of the tutorial.
#
# Here we will:
# 1. Load a dataset and a pipeline
# 2. Explore the pipeline tunable hyperparameters
# 3. Write a scoring function
# 4. Build a BTB Tunable and BTB Tuner.
# 5. Write a tuning loop
# ## Load dataset and the pipeline
#
# The first step will be to load the dataset that we were using in previous tutorials.
# +
from mlprimitives.datasets import load_dataset
dataset = load_dataset('census')
# -
# And load a suitable pipeline.
#
# Note how in this case we are using the variable name `template` instead of `pipeline`,
# because this will only be used as a template for the pipelines that we will create
# and evaluate during the later tuning loop.
# +
from mlblocks import MLPipeline
template = MLPipeline('single_table.classification.categorical_encoder.xgboost')
# -
# ## Explore the pipeline tunable hyperparameters
# Once we have loaded the pipeline, we can now extract the hyperparameters that we will tune
# by calling the `get_tunable_hyperparameters` method.
#
# In this case we will call it using `flat=True` to obtain the hyperparameters in a format
# that is compatible with BTB.
tunable_hyperparameters = template.get_tunable_hyperparameters(flat=True)
tunable_hyperparameters
# ## Write a scoring function
#
# To tune the pipeline we will need to evaluate its performance multiple times with different hyperparameters.
#
# For this reason, we will start by writing a scoring function that will expect only one
# input, the hyperparameters dictionary, and evaluate the performance of the pipeline using them.
#
# In this case, the evaluation will be done using 5-fold cross validation based on the `get_splits`
# method from the dataset.
# +
import numpy as np
def cross_validate(hyperparameters=None):
scores = []
for X_train, X_test, y_train, y_test in dataset.get_splits(5):
pipeline = MLPipeline(template.to_dict()) # Make a copy of the template
if hyperparameters:
pipeline.set_hyperparameters(hyperparameters)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_test)
scores.append(dataset.score(y_test, y_pred))
return np.mean(scores)
# -
# By calling this function without any arguments we will obtain the score obtained
# with the default hyperparameters.
default_score = cross_validate()
default_score
# Optionally, we can certify that by passing a hyperparameters dictionary the new hyperparameters
# will be used, resulting on a different score.
hyperparameters = {
('xgboost.XGBClassifier#1', 'max_depth'): 4
}
cross_validate(hyperparameters)
# ## Create a BTB Tunable
#
# The next step is to create the BTB Tunable instance that will be tuned by the BTB Tuner.
#
# For this we will use its `from_dict` method, passing our hyperparameters dict.
# +
from btb.tuning import Tunable
tunable = Tunable.from_dict(tunable_hyperparameters)
# -
# ## Create the BTB Tuner
#
# After creating the Tunable, we need to create a Tuner to tune it.
#
# In this case we will use the GPTuner, a Meta-model based tuner that uses a Gaussian Process Regressor
# for the optimization.
# +
from btb.tuning import GPTuner
tuner = GPTuner(tunable)
# -
# Optionally, since we already know the score obtained by the default arguments and
# these have a high probability of being already decent, we will inform the tuner
# about their performance.
#
# In order to obtain the default hyperparameters used before we can either call
# the template `get_hyperparameters(flat=True)` method, the `tunable.get_defaults()`.
defaults = tunable.get_defaults()
defaults
tuner.record(defaults, default_score)
# ## Start the Tuning loop
#
# Once we have the tuner ready we can the tuning loop.
#
# During this loop we will:
#
# 1. Ask the tuner for a new hyperparameter proposal
# 2. Run the `cross_validate` function to evaluate these hyperparameters
# 3. Record the obtained score back to the tuner.
# 4. If the obtained score is better than the previous one, store the proposal.
# +
best_score = default_score
best_proposal = defaults
for iteration in range(10):
print("scoring pipeline {}".format(iteration + 1))
proposal = tuner.propose()
score = cross_validate(proposal)
tuner.record(proposal, score)
if score > best_score:
print("New best found: {}".format(score))
best_score = score
best_proposal = proposal
# -
# After the loop has finished, the best proposal will be stored in the `best_proposal` variable,
# which can be used to generate a new pipeline instance.
best_proposal
best_pipeline = MLPipeline(template.to_dict())
best_pipeline.set_hyperparameters(best_proposal)
best_pipeline.fit(dataset.data, dataset.target)
| examples/tutorials/7. Tuning a Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
#import matplotlib
Data = pd.read_csv("MalwareData.csv",sep="|")
# +
# legitimate files
legit = Data[0:41323].drop(["legitimate"],axis=1)
#choosing subset of data from malware files
mal = Data[41323::].drop(["legitimate"],axis=1)
# -
print("shape of legit dataset :",legit.shape[0],"samples",legit.shape[1],"features")
print("shape of malware dataset :",mal.shape[0],"samples",mal.shape[1],"features")
# 56 features to define the to define whether the sample is legit or a malware (excluding "legitimate")
Data.columns
Data.head(10)
legit.head(10)
# Tree Classifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# Removing non essential columns for training data
data_train = Data.drop(['Name','md5','legitimate'], axis=1).values
labels = Data['legitimate'].values
extratrees = ExtraTreesClassifier().fit(data_train,labels)
select = SelectFromModel(extratrees, prefit=True)
data_train_new = select.transform(data_train)
print(data_train.shape, data_train_new.shape)
# +
# number of selected features for training
features = data_train_new.shape[1]
importances = extratrees.feature_importances_
indices = np.argsort(importances)[::-1]
# sorting the features according to its importance (influence on final result)
for i in range(features):
print("%d"%(i+1), Data.columns[2+indices[i]],importances[indices[i]])
# +
from sklearn.ensemble import RandomForestClassifier
legit_train, legit_test, mal_train, mal_test = train_test_split(data_train_new, labels, test_size = 0.25)
# initialising a RandomForestClassifier model with 50 trees in the forest
randomf =RandomForestClassifier(n_estimators=50)
# training the model
randomf.fit(legit_train, mal_train)
# +
# checking performance of the model
print("Score of algo :", randomf.score(legit_test, mal_test)*100)
# +
from sklearn.metrics import confusion_matrix
result = randomf.predict(legit_test)
''''The first number of the first matrix gives the number of correct predictions of that
particular result which should be obtained and the second number gives the number of incorrect predictions made.
Similarly vice versa for the second matrix present'''
conf_mat = confusion_matrix(mal_test,result)
print(conf_mat)
# -
# **Logistic Regression**
import pickle
import warnings
warnings.filterwarnings('ignore')
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
# +
# Scaling the data to pass into the logistic regressor
sc = StandardScaler()
legit_train_scale = sc.fit_transform(legit_train)
legit_test_scale = sc.transform(legit_test)
# -
# Training the Model
logclf = LogisticRegression(random_state = 0)
logclf.fit(legit_train_scale, mal_train)
# TRAIN MODEL MULTIPLE TIMES FOR BEST SCORE
best = 0
for _ in range(20):
legit_train, legit_test, mal_train, mal_test = train_test_split(data_train_new, labels, test_size = 0.25)
legit_train_scale = sc.fit_transform(legit_train)
legit_test_scale = sc.transform(legit_test)
logclf = LogisticRegression(random_state = 0)
logclf.fit(legit_train_scale, mal_train)
acc = logclf.score(legit_test_scale, mal_test)
#print("Accuracy: " + str(acc))
if acc > best:
best = acc
with open("malware_log_clf.pickle", "wb") as f:
pickle.dump(logclf, f)
# +
#Checking final accuracy
pickle_in = open("malware_log_clf.pickle", "rb")
logclf = pickle.load(pickle_in)
logclf.score(legit_test_scale, mal_test)
# -
| Malware Detection/Malware_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append("..")
from optimus import Optimus
op = Optimus()
op.spark
# +
from pyspark.sql.types import StructType, StructField, StringType, BooleanType, IntegerType, ArrayType
df = op.create.df(
[
("names", "str", True),
("height", "float", True),
("function", "str", True),
("rank", "int", True),
],
[
("bumbl#ebéé ", 17.5, "Espionage", 7),
("Optim'us", 28.0, "Leader", 10),
("ironhide&", 26.0, "Security", 7),
])
#.repartition(1)
df.table()
# -
df.table()
df.rows.append(["Grimlock",80.0,"Commander",9])\
.rows.select(df["rank"]>7)\
.cols.select(["names","height","function"])\
.cols.apply_expr("height", df["height"]-1)\
.cols.drop("function")\
.show()
df = df\
.rows.sort(["rank","height"])\
.cols.lower(["names","function"])\
.cols.remove_accents("names")\
.cols.remove_special_chars("names")\
.cols.trim("names")
df.show()
print(df.cols.min("height"))
print(df.cols.percentile(['height', 'rank'], [0.05, 0.25, 0.5, 0.75, 0.95]))
print(df.cols.max("height"))
print(df.cols.median(["height","rank"]))
print(df.cols.range(["height","rank"]))
print(df.cols.std(["height","rank"]))
df.show()
nest = df.cols.nest(["names", "function"], output_col = "new_col", shape ="string")
nest.show()
unnest = nest.cols.unnest("new_col", " ").cols.drop("new_col")
unnest.show()
df.registerTempTable("autobots")
nest.sql("SELECT * FROM autobots").show()
# +
from pyspark.sql import functions as F
def func(value, args):
return value + args
df\
.cols.apply("height", func, "int", 1)\
.cols.apply_expr("rank", F.col("rank")+1)\
.table()
# +
from optimus.functions import abstract_udf as audf
def func(val, args):
return val>8
df.rows.select(audf("rank", func, "bool")).show()
# +
def func(val, args):
return val+args[0]+args[1]
df.withColumn("height", audf ("height", func, "int", [1,2])).show()
# +
def func(col_name, args):
return F.col(col_name) + args
df.withColumn("height", audf ("height", func, "int", 2, "column_exp")).show()
# -
df = df.melt(id_vars="names", value_vars=["height", "function","rank"])
df.table()
df = df.pivot("names","variable","value")
df.table()
df.rows.append(["Grimlock",80.0,"Commander",9]).table()
df.rows.append(["Grimlock", "Commander","80", "9"]).table()
df.rows.sort("names").table()
df.rows.sort("names","asc").table()
df.cols.rename("names","name").table()
df.cols.rename([("names","name"),("function","task")])
df.cols.append("new_rank", df["rank"]+"1").show()
print(type("12"+"123"))
df.cols.qcut("height","bins", 2).show()
| examples/new-api-cheat-sheet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import *
from collections import Counter
import seaborn as sns
import tqdm
import pandas as pd
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
IMAGE_DIR = 'image_contest_level_2'
# -
# # 读取数据集
df = pd.read_csv('image_contest_level_2/labels.txt', sep=' ', header=None)
df.head()
# # 统计字符串长度
lens = np.array(map(lambda x:len(x.split(';')[1]), df[0]))
lens.min(), lens.max()
# # 统计出现次数
c = Counter(''.join([x.decode('utf-8') for x in df[0]]))
d = pd.DataFrame(c.most_common(), columns=['word', 'count'])
d.head()
# # 画柱状图
plt.figure(figsize=(16, 9))
sns.barplot(d['word'], d['count'], palette="Greens_d")
print ' '.join(d['word'])
# # 计算出现频率
n = len(df)
for i in d.index:
print d['word'][i], d['count'][i] / float(n)
# # 猜括号生成方式
# +
1+1+1+1
(1+1)+1+1
1+(1+1)+1
1+1+(1+1)
(1+1+1)+1
1+(1+1+1)
((1+1)+1)+1
(1+(1+1))+1
1+((1+1)+1)
1+(1+(1+1))
(1+1)+(1+1)
# -
2*5/11.0+5/11.0
# # 画出运算符相关系数
# +
data = {}
for c in '=+-*(':
data[c] = [x.count(c) for x in df[0]]
df2 = pd.DataFrame(data)
df2.corr()
# -
sns.heatmap(df2.corr(), annot=True, linewidths=.5)
# # 猜测生成方式
test = np.random.randint(3, size=(10000, 3))
test
# +
data = {}
for i in range(3):
data[i] = np.count_nonzero(test == i, axis=-1)
df2 = pd.DataFrame(data)
df2.corr()
# -
sns.heatmap(df2.corr(), annot=True, linewidths=.5)
| 决赛代码/.ipynb_checkpoints/数据集探索-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pexo
# language: python
# name: pexo
# ---
# [< list of demos](Introduction.ipynb#demos)
#
# <a id='top'></a>
# # Barycentric Julian dates in TDB or TCB _(single star)_
#
# This demo shows how to calculate barycentric times ([TCB](https://en.wikipedia.org/wiki/Barycentric_Coordinate_Time) and [TDB](https://en.wikipedia.org/wiki/Barycentric_Dynamical_Time)) for a list of UTC Julian dates, using τ Ceti as an example.
# ## Arguments:
#
# `-p HD10700` - primary star: τ Ceti. PEXO retieves target information from Simbad and astrometry from Gaia/Hipparcos
#
# `-i HARPS` - instrument: HARPS
#
# `-t '2450000 2453000 10'` - epochs, JD from 2455000 to 2460000, every 10 days
#
# `-m emulate` - PEXO mode
#
# `-v 'JDutc BJDtdb BJDtcb` - output variables
#
# `-o ../results/tdb-out.txt` - output file
# ## Run the code
#
# Execute PEXO with the command below. This may take a few minutes to compute.
# +
# # cd into the code directory
# %cd ../code
# run PEXO via the R script
# ! Rscript pexo.R -p HD10700 -i HARPS -t '2450000 2453000 10' -m emulate -v 'JDutc BJDtdb BJDtcb' -o ../results/tdb-out.txt
# -
# ## Output
#
# The output file contains a table with 6 columns. Julian dates are divided into integer and decimal parts to preserve precision.
from utilities import Table
Table("../results/tdb-out.txt")
# The plot below shows the BJD\[TDB\] as a function of UTC Julian date in units of days.
from utilities import Plot
p = Plot("../results/tdb-out.txt", factors=(1, 24*60)).add_plot(xcol=["JDutc1", "JDutc2"], ycol=["BJDtdb1", "BJDtdb2"], yxdiff=True, legend="TDB").add_plot(xcol=["JDutc1", "JDutc2"], ycol=["BJDtcb1", "BJDtcb2"], yxdiff=True, legend="TCB")
o=p.axis.set_xlabel("JD UTC, days")
o=p.axis.set_ylabel("BJD correction, minutes")
| demos/demo-tdb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Stochastic Variational GP Regression
#
# ## Overview
#
# In this notebook, we'll give an overview of how to use SVGP stochastic variational regression ((https://arxiv.org/pdf/1411.2005.pdf)) to rapidly train using minibatches on the `3droad` UCI dataset with hundreds of thousands of training examples. This is one of the more common use-cases of variational inference for GPs.
#
# If you are unfamiliar with variational inference, we recommend the following resources:
# - [Variational Inference: A Review for Statisticians](https://arxiv.org/abs/1601.00670) by <NAME>, <NAME>, <NAME>.
# - [Scalable Variational Gaussian Process Classification](https://arxiv.org/abs/1411.2005) by <NAME>, <NAME>, <NAME>.
# +
import tqdm
import math
import torch
import gpytorch
from matplotlib import pyplot as plt
# Make plots inline
# %matplotlib inline
# -
# For this example notebook, we'll be using the `song` UCI dataset used in the paper. Running the next cell downloads a copy of the dataset that has already been scaled and normalized appropriately. For this notebook, we'll simply be splitting the data using the first 80% of the data as training and the last 20% as testing.
#
# **Note**: Running the next cell will attempt to download a **~136 MB** file to the current directory.
# +
import urllib.request
import os
from scipy.io import loadmat
from math import floor
# this is for running the notebook in our testing framework
smoke_test = ('CI' in os.environ)
if not smoke_test and not os.path.isfile('../elevators.mat'):
print('Downloading \'elevators\' UCI dataset...')
urllib.request.urlretrieve('https://drive.google.com/uc?export=download&id=1jhWL3YUHvXIaftia4qeAyDwVxo6j1alk', '../elevators.mat')
if smoke_test: # this is for running the notebook in our testing framework
X, y = torch.randn(1000, 3), torch.randn(1000)
else:
data = torch.Tensor(loadmat('../elevators.mat')['data'])
X = data[:, :-1]
X = X - X.min(0)[0]
X = 2 * (X / X.max(0)[0]) - 1
y = data[:, -1]
train_n = int(floor(0.8 * len(X)))
train_x = X[:train_n, :].contiguous()
train_y = y[:train_n].contiguous()
test_x = X[train_n:, :].contiguous()
test_y = y[train_n:].contiguous()
if torch.cuda.is_available():
train_x, train_y, test_x, test_y = train_x.cuda(), train_y.cuda(), test_x.cuda(), test_y.cuda()
# -
# ## Creating a DataLoader
#
# The next step is to create a torch `DataLoader` that will handle getting us random minibatches of data. This involves using the standard `TensorDataset` and `DataLoader` modules provided by PyTorch.
#
# In this notebook we'll be using a fairly large batch size of 1024 just to make optimization run faster, but you could of course change this as you so choose.
# +
from torch.utils.data import TensorDataset, DataLoader
train_dataset = TensorDataset(train_x, train_y)
train_loader = DataLoader(train_dataset, batch_size=1024, shuffle=True)
test_dataset = TensorDataset(test_x, test_y)
test_loader = DataLoader(test_dataset, batch_size=1024, shuffle=False)
# -
# ## Creating a SVGP Model
#
#
# For most variational/approximate GP models, you will need to construct the following GPyTorch objects:
#
# 1. A **GP Model** (`gpytorch.models.ApproximateGP`) - This handles basic variational inference.
# 1. A **Variational distribution** (`gpytorch.variational._VariationalDistribution`) - This tells us what form the variational distribution q(u) should take.
# 1. A **Variational strategy** (`gpytorch.variational._VariationalStrategy`) - This tells us how to transform a distribution q(u) over the inducing point values to a distribution q(f) over the latent function values for some input x.
#
# Here, we use a `VariationalStrategy` with `learn_inducing_points=True`, and a `CholeskyVariationalDistribution`. These are the most straightforward and common options.
#
#
# #### The GP Model
#
# The `ApproximateGP` model is GPyTorch's simplest approximate inference model. It approximates the true posterior with a distribution specified by a `VariationalDistribution`, which is most commonly some form of MultivariateNormal distribution. The model defines all the variational parameters that are needed, and keeps all of this information under the hood.
#
# The components of a user built `ApproximateGP` model in GPyTorch are:
#
# 1. An `__init__` method that constructs a mean module, a kernel module, a variational distribution object and a variational strategy object. This method should also be responsible for construting whatever other modules might be necessary.
#
# 2. A `forward` method that takes in some $n \times d$ data `x` and returns a MultivariateNormal with the *prior* mean and covariance evaluated at `x`. In other words, we return the vector $\mu(x)$ and the $n \times n$ matrix $K_{xx}$ representing the prior mean and covariance matrix of the GP.
# +
from gpytorch.models import ApproximateGP
from gpytorch.variational import CholeskyVariationalDistribution
from gpytorch.variational import VariationalStrategy
class GPModel(ApproximateGP):
def __init__(self, inducing_points):
variational_distribution = CholeskyVariationalDistribution(inducing_points.size(0))
variational_strategy = VariationalStrategy(self, inducing_points, variational_distribution, learn_inducing_locations=True)
super(GPModel, self).__init__(variational_strategy)
self.mean_module = gpytorch.means.ConstantMean()
self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
inducing_points = train_x[:500, :]
model = GPModel(inducing_points=inducing_points)
likelihood = gpytorch.likelihoods.GaussianLikelihood()
if torch.cuda.is_available():
model = model.cuda()
likelihood = likelihood.cuda()
# -
# ### Training the Model
#
# The cell below trains the model above, learning both the hyperparameters of the Gaussian process **and** the parameters of the neural network in an end-to-end fashion using Type-II MLE.
#
# Unlike when using the exact GP marginal log likelihood, performing variational inference allows us to make use of stochastic optimization techniques. For this example, we'll do one epoch of training. Given the small size of the neural network relative to the size of the dataset, this should be sufficient to achieve comparable accuracy to what was observed in the DKL paper.
#
# The optimization loop differs from the one seen in our more simple tutorials in that it involves looping over both a number of training iterations (epochs) *and* minibatches of the data. However, the basic process is the same: for each minibatch, we forward through the model, compute the loss (the `VariationalELBO` or ELBO), call backwards, and do a step of optimization.
# +
num_epochs = 1 if smoke_test else 4
model.train()
likelihood.train()
# We use SGD here, rather than Adam. Emperically, we find that SGD is better for variational regression
optimizer = torch.optim.Adam([
{'params': model.parameters()},
{'params': likelihood.parameters()},
], lr=0.01)
# Our loss object. We're using the VariationalELBO
mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=train_y.size(0))
epochs_iter = tqdm.tqdm_notebook(range(num_epochs), desc="Epoch")
for i in epochs_iter:
# Within each iteration, we will go over each minibatch of data
minibatch_iter = tqdm.tqdm_notebook(train_loader, desc="Minibatch", leave=False)
for x_batch, y_batch in minibatch_iter:
optimizer.zero_grad()
output = model(x_batch)
loss = -mll(output, y_batch)
minibatch_iter.set_postfix(loss=loss.item())
loss.backward()
optimizer.step()
# -
# ### Making Predictions
#
# The next cell gets the predictive covariance for the test set (and also technically gets the predictive mean, stored in `preds.mean()`). Because the test set is substantially smaller than the training set, we don't need to make predictions in mini batches here, although this can be done by passing in minibatches of `test_x` rather than the full tensor.
model.eval()
likelihood.eval()
means = torch.tensor([0.])
with torch.no_grad():
for x_batch, y_batch in test_loader:
preds = model(x_batch)
means = torch.cat([means, preds.mean.cpu()])
means = means[1:]
print('Test MAE: {}'.format(torch.mean(torch.abs(means - test_y.cpu()))))
| examples/04_Variational_and_Approximate_GPs/SVGP_Regression_CUDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tarfile
def recursive_files(dir_name='.', ignore=None):
for dir_name,subdirs,files in os.walk(dir_name):
if ignore and os.path.basename(dir_name) in ignore:
continue
for file_name in files:
if ignore and file_name in ignore:
continue
yield os.path.join(dir_name, file_name)
def make_tar_file(dir_name='.', target_file_name='workspace_archive.tar', ignore=None):
tar = tarfile.open(target_file_name, 'w')
for file_name in recursive_files(dir_name, ignore):
tar.add(file_name)
tar.close()
dir_name = '.'
target_file_name = 'workspace_archive.tar'
# List of files/directories to ignore
ignore = {'.ipynb_checkpoints', '__pycache__', target_file_name}
make_tar_file(dir_name, target_file_name, ignore)
# -
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/perfectpanda-works/machine-learning/blob/master/LEARNING_PYTORCH_WITH_EXAMPLES6.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="pkGgtjU8WefQ" colab_type="text"
# #nnモジュールのカスタマイズ
# + [markdown] id="wJG_50jHw-QF" colab_type="text"
# 既存のモジュールのシーケンスよりも複雑なモデルを指定したい場合があります。
#
# これらの場合、nn.Moduleをサブクラス化し、入力テンソルを受け取り、他のモジュールまたはTensorの他のautograd操作を使用して出力テンソルを生成する転送を定義することにより、独自のモジュールを定義できます。
#
# これで、チュートリアルなどでよく見るクラスでのネットワークの定義の形になります。
# + id="uRXIorMPxWx6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8aeea944-b37a-4362-9623-ce5d12dd3c16"
# -*- coding: utf-8 -*-
import torch
# + id="bb7YcmRmxeFz" colab_type="code" colab={}
class TwoLayerNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
コンストラクタ
"""
super(TwoLayerNet, self).__init__()
self.linear1 = torch.nn.Linear(D_in, H)
self.linear2 = torch.nn.Linear(H, D_out)
def forward(self, x):
"""
順伝播の定義。xが入力するテンソル。
"""
h_relu = self.linear1(x).clamp(min=0)
y_pred = self.linear2(h_relu)
return y_pred
# + id="GrXVwgWywtHM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="f849a2ad-c913-4e1f-b2c0-b8d7b65b3929"
# N :バッチサイズ
# D_in :入力次元数
# H :隠れ層の次元数
# D_out:出力次元数
N, D_in, H, D_out = 64, 1000, 100, 10
# ランダムな入力データと出力データの作成
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
# クラスとして定義したネットワークをインスタンス化する
model = TwoLayerNet(D_in, H, D_out)
# 誤差関数:平均二乗誤差
criterion = torch.nn.MSELoss(reduction='sum')
# 最適化手法:勾配降下法
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4)
for t in range(500):
# ①順伝播
y_pred = model(x)
# ②誤差の計算
loss = criterion(y_pred, y)
if t % 100 == 99:
print(t, loss.item())
# ③逆伝播をして、重みの更新を行う。
optimizer.zero_grad()
loss.backward()
optimizer.step()
# + [markdown] id="9ZgUp4sxh4l8" colab_type="text"
# #制御と重みの共有について
# + id="xgfWetcZVs-M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="c8ec88fa-0509-47f0-b0d5-65bed456aee2"
# -*- coding: utf-8 -*-
import random
import torch
class DynamicNet(torch.nn.Module):
def __init__(self, D_in, H, D_out):
"""
コンストラクタ:「middle_linear」を追加
"""
super(DynamicNet, self).__init__()
self.input_linear = torch.nn.Linear(D_in, H)
self.middle_linear = torch.nn.Linear(H, H)
self.output_linear = torch.nn.Linear(H, D_out)
def forward(self, x):
"""
順伝播の「middle_layer」を0〜3回forループで繰り返します。
「middle_layer」をループ回数使い回しています。
順伝播を定義するときには、Pythonのループや制御文を利用することができます。
ここでは、計算グラフを定義するときに同じモジュールを何度も再利用しても大丈夫なことを示しています。
これはLuaTorch(昔のPyTorch)からの改善点で、昔は各モジュールは1回しか使えませんでした。
"""
h_relu = self.input_linear(x).clamp(min=0)
for _ in range(random.randint(0, 3)):
h_relu = self.middle_linear(h_relu).clamp(min=0)
y_pred = self.output_linear(h_relu)
return y_pred
N, D_in, H, D_out = 64, 1000, 100, 10
x = torch.randn(N, D_in)
y = torch.randn(N, D_out)
model = DynamicNet(D_in, H, D_out)
criterion = torch.nn.MSELoss(reduction='sum')
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
for t in range(500):
# 順伝播
y_pred = model(x)
# 損失の計算
loss = criterion(y_pred, y)
if t % 100 == 99:
print(t, loss.item())
# 最適化と重みの更新
optimizer.zero_grad()
loss.backward()
optimizer.step()
# + id="_zDpvWw4h6gV" colab_type="code" colab={}
| LEARNING_PYTORCH_WITH_EXAMPLES6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + deletable=true editable=true
import sys, os
import h2o
from h2o.estimators.deepwater import H2ODeepWaterEstimator
from h2o.estimators import H2OGeneralizedLinearEstimator
from requests import get
import numpy as np
h2o.init()
if not H2ODeepWaterEstimator.available(): exit
# + deletable=true editable=true
print("Downloading the model")
## http://data.dmlc.ml/mxnet/models/imagenet/inception-bn_old.tar.gz
# ## !gunzip ...
print("Importing the model architecture for scoring in H2O")
model = H2ODeepWaterEstimator(epochs=0, ## no training - just load the state - NOTE: training for this 3-class problem wouldn't work since the model has 1k classes
mini_batch_size=32, ## mini-batch size is used for scoring
## all parameters below are needed
network='user',
network_definition_file=os.getcwd() + "/Inception_BN-symbol.json",
network_parameters_file=os.getcwd() + "/Inception_BN-0039.params",
mean_image_file= os.getcwd() + "/mean_224.nd",
image_shape=[224,224],
channels=3
)
frame = h2o.import_file("bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv")
print(frame.head(5))
nclasses = frame[1].nlevels()[0]
model.train(x=[0],y=1, training_frame=frame) ## must call train() to initialize the model, but it isn't training
# + deletable=true editable=true
## Extract deep features from final layer before going into Softmax.
extracted_features = model.deepfeatures(frame, "global_pool_output")
#extracted_features = model.deepfeatures(frame, "conv_5b_double_3x3_1_output")
print(extracted_features.shape)
print(extracted_features)
#assert extracted_features.ncol == 1024
# -
extracted_features["target"] = frame["C2"]
train,valid, test = extracted_features.split_frame(ratios=[.7, .15])
model = H2OGeneralizedLinearEstimator(model_id = "deep_features_glm", family = "multinomial")
model.train(y = "target", x = [x for x in extracted_features.columns if x != "target"],
training_frame = train,
validation_frame = valid)
model_perf = model.model_performance(test)
model_perf.confusion_matrix()
model_perf.logloss()
| examples/deeplearning/notebooks/inception_deep_features_classifier.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.2
# language: julia
# name: julia-1.0
# ---
# # Astro 528: Lab 2, Exercise 3
#
# ## Model for Radial Velocity of a Star with no Planet
#
# In this exercise, we will consider a radial velocity planet search that measures the velocity of a target star with independent, uncorrelated, Gausa measurement uncertainties. First, I'll provide some code for the simplest possible model, one where the star has no planets and its true velocity is a constant. I'll do it in a way that lends itself to generalizing. Then you'll write code for a model with one planet on a circular orbit. Then, you'll compute the likelihood of the observations a few different ways.
# +
"Parameters for a radial velocity model consisting of a constant radial velocity"
struct rv_model_const
mean_rv::Float64
end
"Compute the radial velocity at specified time using rv_model_const model."
function (model::rv_model_const)(time)
return model.mean_rv
end
"""
generate_simualted_data(times, sigma, model)
Generate simulated observations assuming Gaussian measurement noise.
Inputs:
- times: Array of observation times
- sigma: Array specifying standard deviation of observations
- model: User-provided model. Must provide a function that takes a scalar time
as input and return predicted velocity.
Output: Array of simulated observations
"""
function generate_simualted_data(model, times::Array, sigmas::Array)
@assert length(times) == length(sigmas)
@assert length(times) >= 1
@assert all(isfinite.(times))
@assert all(isfinite.(sigmas))
model.(times) .+ sigmas .* randn(length(sigmas))
end
# -
# Note that `generate_simulated_data()` uses [`randn()`](https://docs.julialang.org/en/v1/stdlib/Random/#Base.randn) to draw random variables from the standard normal distribution. Next, I'll demonstrate calling the functions above.
rv_model = rv_model_const(3.0) # Create a model to return a constant RV of 3m/s.
time_jan1_2019 = 2458484.5 # Julian date for 0UT on Jan 1, 2019
rv_model(time_jan1_2019) # Evaluate model at specified time
# Let's assume a measurement precision of $\sigma_i = 2$ m/s at each of $N_{obs}$ well-separated observation times.
n_obs = 5
obs_data = generate_simualted_data(rv_model, ones(n_obs), 2.0*ones(n_obs) )
# Now, we'll import the test module (that is part of the standard Julia library) and preform a few simple tests.
# Import the test module that is part of the standard Julia library and
using Test
@test rv_model_const(42)(1.0) == 42.0
@test rv_model_const(3.14159)(17.0) == 3.14159
# Testing equality or inequality is straightforward for integers, but dangerous for floating point numbers. If a floating point number is equal to an integer, then it's generally ok, but I think it's better to always be cautious about testing floating point numbers for equality. Instead, you can test that two numbers are approximately equal using $\simeq$. In Jupyter or Atom (and probably many other IDEs), you can get unicode characters like $\simeq$ by typing `\approx<tab>`. When testing that two numbers are approximately equal you need to specify a tolerance. `atol` refers to an absolute tolerance, while `rtol` refers to a relative or fractional tolerance.
# For further information on using these, see the [Julia manual](https://docs.julialang.org/en/v1/stdlib/Test/index.html#Basic-Unit-Tests-1).
#
#
# Explore how Julia's Test.@test macro behaves by running and tinkering with the following code.
@test 10 == 10
@test 10 != 11
@test 10.0 ≈ 10.001 atol=0.01
@test 10.0 ≈ 10.001 rtol=0.0001
@test 10.0 ≈ 10.001 rtol=0.0001
#@test 10.0 ≈ 10.001 atol=0.0001 # This test would fail
#@test 10.0 ≈ 10.001 # This test would fail due to default tolerance being smaller
# ## Model for Radial Velocity of a Star with a Planet on a Circular Orbit
#
# ### Create a type to contain model parameters
# A model to describe the radial velocity of a star with planet will require more model parameters. We could simply use an array of floating point values. But that can be risky, since it would be easy to confuse the order of parameters. One way to help reduce the chance of such a bug is to define your own custom type. In addition to storing data, making it a type will make it possible for the compiler to recognize errors (if you try to pass a variable of the wrong type to a function) and to optimize functions.
#
# (Another advantage of a custom type is that it can aid reusability of code, even if low-level details are refactored long after the original code was developed, if the custom type is accessed via a set of documented functions, rather than by accessing its contents directly. We'll come back to this later.)
#
# a. Write a [composite type](https://docs.julialang.org/en/v1/manual/types/index.html#Composite-Types-1) named `rv_model_circ` that consists of the orbital period, radial velocity amplitude of the star, the time of the star's maximum radial velocity and the star's time-averaged radial velocity. (FYI, for historical reasons, observational astronomers define positive velocity as moving away from the observer.)
# +
# INSERT CODE
# -
# When a user creates a compound type, Julia automatically creates a function that can be used to intialize a variable of that type. In this case it would be `rv_model_circ(Float,Float,Float,Float)`. The order of the function arguments matches the order of the types when the user defined the type. Often, most of the variables have different types, and this can work well. However, when there are several variables with the same type, this can be a little dangerous. It would be easy for someone to confuse the order of the parameters (or even the choice of parameterization). One strategy for reducing this risk is to create a constructor function that takes named parameters. For example, for our `rv_model_const` type, we could make the following constructor function.
function rv_model_const(;mean_rv=NaN,unused_parameter=1.0)
@assert !isnan(mean_rv)
rv_model_const(mean_rv)
end
rv_model_const(mean_rv=3)
# Notice that a named parameter needs a default value. In some cases (for parameters that truely are optional), it makes sense to pick a good default value. In this case, we don't really want the mean_rv to be optional. So I've set the default value of `mean_rv` to NaN and include an assertion to make sure that `mean_rv` has some other value.
#
# b. Write a constructor function for your `rv_model_circ` type using named parameters to reduce risk of someone accidentally initializing it incorrectly. For names, use `P` for period, `K` for the amplitude, `t_rv_max` for the time of maximum radial velocity, and `mean_rv` for the time_averaged velocity.
# +
# INSERT CODE
# -
# As we've just seen, Julia allows there to be multiple functions with the same name. The choice of which function to call is determined by the type of the variables passed to the function. Julia features "multiple dispatch", meaning that the choice of what function to call depends on all the function arguments, rather than just the first one. Multiple dispatch can be powerful for allowing programmers to write generic and highly optimized code.
#
# To harness that full power it is useful to use a functional programming paradigm. However, Julia can also be used to express object-oriented programming patterns, that may be more familiar/comfortable to programmers used to traditional object-oriented languages (e.g., C++, Java, Python). As one example, if you like the pattern of using an "object", then you can "overload" the parenthesis operator for a user-defined composite type. The syntax for "overloading" the () operator isn't obvious, so I've provided the shell below.
#
# c. Write a function that allows a variable of type `rv_model_circ` to be called as if it were a function taking a single input, the time at which to evaluate the model, and returning a single value, the radial velocity predicted given the model parameters contained in the variable of type `rv_model_circ`.
# +
# INSERT CODE
# -
# ### Add Assertions
#
# Sometimes a programmer calls a function with arguments that either don't make sense or represent a case that the function was not originally designed to handle properly. The worst possible function behavior in such a case is returning an incorrect result without any warning that something bad has happened. Returning an error at the end is better, but can make it difficult to figure out the problem. Generally, the earlier the problem is spotted, the easier it will be to fix the problem. Therefore, good developers often include assertions to verify that the function arguments are acceptable.
#
# For example, in `generate_simualted_data` above, we included assertions that the size of the arrays for observations times measurement uncertainties matched. We also checked that there was at least one observation.
#
# d. What are the preconditions for your your function `rv_model_circ(time)`?
# Use these to motivate assertions for your function overloading the `()` operator for `rv_model_circ`. Update your code to include at least one assertion for each input parameter.
#
# ### Write unit tests for your function
# e. Use the preconditions above to write at least three unit tests for `rv_model_circ(time)`.
# +
# INSERT CODE
# -
# Run your tests. Do your functions pass all of them? If not, correct the function (and tests if necessary) and rerun the tests.
#
#
# ### Automate the application of your tests.
# Often, a well-intentioned programmer introduces a bug, but doesn't notice until long after the bug was written. One way to reduce the risk of such bugs is to have an comprehensive set of unit tests that are applied _automatically_ each time a developer commits a change. If some new codes causes a test to fail, we want to know that promptly, so it can be fixed and before it causes scientists to lose time running the buggy code or trying to interpret results of a buggy code.
#
# The '.travis.yml' file provided in this repository already provides instructions for [Travis-CI.com](https://travis-ci.com/) to automatically run tests each time you commit changes and push them to GitHub.com. (You may need to log into [Travis-CI.com](https://travis-ci.com/) and give it permission to access your repository first.) The tests for this notebook are in `tests/test3.jl`.
#
# f. Add your tests above to `tests/test3.jl`, so that they become part of your repository's _continuous integration_ testing.
#
# ### Testing the assertions!
# In this case, the assertions are probably pretty simple. But sometimes, the assertions can be complicated enough that you'll need to test that they're working as intended! When an assert statement is followed by an expression that evaluates to false, then it ["throws an exception"](https://docs.julialang.org/en/v1.0/manual/control-flow/#Exception-Handling-1). We'll want to make sure that our code is throwing an exception when we pass our function invalid arguments. For example, let's test that `generate_simulated_data` throws an exception in the following cases.
@test_throws AssertionError generate_simualted_data(rv_model, ones(10), ones(11))
@test_throws AssertionError generate_simualted_data(rv_model, fill(Inf,10), ones(10))
@test_throws AssertionError generate_simualted_data(rv_model, ones(10), fill(NaN,10))
# g. Write a test that makes sure your assert statement for `rv_model_circ` is indeed throwing an assertion when the inputs are not valid.
#
# # Validating a model
#
# While unit tests are great for identifying issues with small pieces of code, sometimes bugs arise because of how units are combined. Or maybe scientifically interesting issue only becomes apparent once you couple different parts of your code.
# Another important strategy is to validate that your code performs as expected on simulated data. Of course, actual data is probably more complicated than your simulated data. But if your code doesn't work on simulated data, then it's very unlikely to work well when applied to realistic data.
#
# ## Generate simulated dataset
# h. Generate an ordered list of 100 simulated observation times drawn uniformly during calender year 2019 (expressed as Julian date) and store in a variable named `obs_times`. You'll likely want to use the functions [`rand(min:max,number)`](https://docs.julialang.org/en/v1/stdlib/Random/#Base.rand) and [`sort`](https://docs.julialang.org/en/v1/base/sort/#Base.sort) or [`sort!`](https://docs.julialang.org/en/v1/base/sort/#Base.sort!).
# +
# INSERT CODE
# -
# i. Now, combine the `generate_simualted_data` function above with your `rv_model_circ` type and the function above to generate simulated data corresponding to an orbital period of 4 days, amplitude of 10 m/s, the maximum velocity (away from the observer) occurring on Jan 1, 2019, and a time-averaged velocity of 3 m/s. Include uncorrelated Gaussian measurement noise with standard deviation 2 m/s.
# the circular model at 100 times with Julian dates during the 2019 calender year. Store the result in `obs_rvs`.
# +
# INSERT CODE
# -
# At this point, it is often useful to visually inspect your simulated data to make sure that your functions are doing what you expect.
using Plots # In separate cell, since only need to run once per notebook
plt_raw = scatter(obs_times,obs_rvs,yerror=2*ones(length(obs_times)),
xlabel ="Time (JD)", ylabel="RV (m/s)", legend=:no)
plt_phased = scatter(mod.(obs_times.-time_jan1_2019,4.0),obs_rvs,yerror=2*ones(length(obs_times)),
xlabel ="Time (d)", ylabel="RV (m/s)", legend=:no)
plot(plt_raw,plt_phased,layout=(2,1))
# j. Are you happy with the behavior of your functions?
# If you found a bug, then create a new unit test that would have identified that bug and add it to the 'tests/test2.jl' file.
#
# # Computing Likelihood of Data
#
# A common task in astronomy is to compute the probability of obtaining a set of observations given an assumed model. For example, consider a set of observations ($y_i$'s), each of which can be assumed to follow a normal distribution centered on the true value ($z_i$) with a standard deviation of $\sigma_i$, so
#
# $$p(y_i | z_i) = \frac{\exp \left[-(y_i-z_i)^2/(2\sigma_i^2)\right]}{\sqrt{2\pi \sigma_i^2}}$$
#
# When the measurement error for each observation is independent and uncorrelated with the other observations, the probability of a combination of measurements is simply the product of the individual probabilities.
#
# ### Write a function for likelihood of one observation
# k. Write a function `gauss_std_normal` to calculate the probability density of a random variable drawn from a standard normal distribution (i.e., Gaussian with mean zero and standard deviation of unity).
# (FYI: You can use the built-in functions `exp(x)` and `sqrt(x)`. Julia makes it easy to define small functions using the syntax: `add3(a,b,c) = a+b+c` . )
# +
# INSERT CODE
# -
# ### Write a function for likelihood of a set of observations
# l. Write a function `likelihood` that takes inputs of an `rv_model_circ`, followed by arrays of observation times, observed RVs, measurement uncertainties. It should return the likelihood of the observations, assuming that the measurement are independent an uncorrelated. Therefore, the likelihood of the observed data is simply the product of the probability of each observation.
# +
# INSERT CODE
likelihood(model,obs_times,obs_rvs,2*ones(length(obs_times)))
# -
# ### Combine the above into a function
# m. Write a function `simulate_rvs_and_calc_likelihood` that takes a single integer parameter ($N_{obs}$) and returns the likelihood of a set of simulated observations using the model parameters specified above. To facilitate testing, seed the random number generator at the beginning of each function call using [`Random.seed!()`](https://docs.julialang.org/en/v1/stdlib/Random/index.html#Generators-(creation-and-seeding)-1).
using Random
function simulate_rvs_and_calc_likelihood(num_obs::Integer)
Random.seed!(42)
# INSERT CODE
end
# ### Test your function
# n. While the specific results depend on the values of the pseudo-random numbers generated, we should still think about what results we expect. What is the expected value of the log likelihood as a function of $N_{obs}$? What is a reasonable expectation for how much the results would vary depending on the specific pseudo-random numbers used?
# (Tip: It may help to consider the properties of the [$\chi^2$ distribution](https://en.wikipedia.org/wiki/Chi-squared_distribution).)
#
# o. Test your function `simulate_rvs_and_calc_likelihood` for $N_{obs}$ = 100 and $N_{obs}$ = 600. Are you happy with the results? If not, what is going wrong?
println("N_obs = 100: L = ",simulate_rvs_and_calc_likelihood(100))
println("N_obs = 600: L = ",simulate_rvs_and_calc_likelihood(600))
# INSERT RESPONSE
#
# ### Write functions for log likelihood
# p. Write functions `log_gauss_std_normal`, `log_likelihood` and `simulate_rvs_and_calc_log_likelihood` analogous to those above, except now calculating the log likelihood (base e).
# +
# INSERT CODE
# -
# ### Test your functions
# q. Test your function `simulate_rvs_and_calc_log_likelihood` for $N_{obs}$ = 100 and $N_{obs}$ = 600. Compare the results of this function with the results of your `simulate_rvs_and_calc_likelihood` function and your theoretical expectations for the log likelihood of each dataset.
#INSERT CODE
println("N_obs = 100: log L = ",simulate_rvs_and_calc_log_likelihood(100))
println("N_obs = 600: log L = ",simulate_rvs_and_calc_log_likelihood(600))
# INSERT REPSONSE
#
# ### Conclusion
# q. What lessons does this exercise illustrate that could be important when writing code for your research?
#
# INSERT RESPONSE
| ex3.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.0
# language: julia
# name: julia-1.7
# ---
# # "Golden Ratio"
# > "Golden Ratio - Euclid's Elements and the Pentagon"
# - toc: true
# - badges: true
# - comments: true
# - categories: [jupyter, math]
# # Introduction
#
# The golden ratio, also known as the divine proportion or phi, is a mathematical constant found in nature that has been used by artists and architects for centuries. It is said that the proportions of the golden ratio are aesthetically pleasing to the human eye, and can be found in everything from flowers to galaxies. In this blog post, we will explore Euclid's pentagon and how it is related to the golden ratio.
#
# In order to construct the geometry of the pentagon, we will use the excellent [luxor.jl](https://github.com/JuliaGraphics/Luxor.jl) package.
#
#
# # Golden Ratio - Euclid's Elements and the Pentagon
#
# The discovery of the golden ratio is one that dates back to ancient Greece. It was first mentioned in works by Euclid, Ptolemy during their respective times who all had differing views on what it actually meant but were still able recognize its significance for mathematics.
#
# The importance of the golden ratio is evident in many ancient cultures. In Greece, Euclid and his students were interested to figure out how best could constructoids like pentagons or icosahedrons since these shapes provide us with some insight into architectural designs such as temples where they had fivefold symmetry around their circular bases which led them down a path towards discovering this constant that still impacts mathematics today.
#
# So we are going to show why this constant is important when constructing the pentagon.
#
# The following code constructs the pentagon using the luxor.jl package:
#
# +
#collapse-show
#load dependancies
using Luxor;
using Base.MathConstants;
using SymPy;
using Images;
# +
#collapse-show
url = "https://upload.wikimedia.org/wikipedia/commons/c/ce/Scuola_di_atene_23.jpg";
download(url, "scuola.jpg");#download to local file
# -
euclid = load("scuola.jpg")
# +
#collapse-show
"""
guidelines()
Set axis guideline for orientation
"""
function guidelines()
setdash("dot")
background("antiquewhite");
setcolor("darkblue");
arrow(Point(-250,0),Point(250,0))
arrow(Point(0,-250),Point(0,250))
#circle with radious of 200
circle(Point(0,0),200, :stroke)
#box 400 x 400
box(Point(0,0), 400, 400, :stroke)
setdash("solid")
end
# +
@draw begin
guidelines()
setline(0.3);
ngon(Point(0,0), 200, 5, -pi/2, action = :stroke)
setdash("dot")
end
# +
#get polygon vertices coordinates
# +
@draw begin
fontsize(14)
guidelines()
setline(0.3);
setdash("solid")
ngon(Point(0,0), 200, 5, -pi/2, action = :stroke)
a = ngon(Point(0,0), 200, 5, -pi/2, action = :stroke, vertices=true)
setdash("solid")
setline(0.5);
l1 = line(a[3], a[5], :stroke)
l2 = line(a[2], a[4], :stroke)
f1,ip1 = intersectionlines(a[3], a[5], a[2], a[4])
circle(ip1, 3, :fill)
label("C", :W, ip1)
label("A", :W, a[3])
label("B", :W, a[5])
label("D", :W, a[2])
end
# -
# When examining figure above,if we draw two diagonals from the center of the pentagon, we can see that the two diagonals are equal in length, forming 3 isoceles triangles. These diagonals are $AB$ and $BD$.
# Using elementary geometry, we can show that according to Euclid's definition, point $C$ divides the line $AB$ precisely in a __Golden Ratio__ $\phi$.
# Furthermore, the ratio AB to AD is also equal to $\phi$.
#
# This fact illustrates that the ability to construct a line divided in a Golden Ratio provides a simple means to construct a pentagon.
# For this reason, the ancient Greeks were interested in $\phi$.
# Thru simple measurements of line segments of figure, we can derive the ratios in question and show inevocably the presence of the golden ratio in the pentagon.
# +
# get point coordinates
C = ip1;
A = a[3];
B = a[5];
# +
AB = dimension(A,B)[1];
AC = dimension(A,C)[1];
BC = dimension(B,C)[1];
# -
# Ratio of _Line_ $BC / AC$:
# +
BC_AC = round(BC/AC, digits = 5)
# -
# Ratio of Line $AB / BC$
AB_BC = round(AB / BC; digits=5)
BC_AC == AB_BC
# +
# Phi Constant from the Math Constants Library
φ
# -
# # Deriving the Value of $\phi$
# Lets look again at the line $AB$ and $AC$ from the figure above.
#
# Let Line Segment $AC$ (shorter segment) be 1 unit long and the length of Line Segment $AB$ be $x$.
#
# If the ratio of $x$ to 1 is the same as that of $x+1$ to $x$, then the line has been cut in a Golden Ratio $\phi$:
@syms x
# We can set up an equation to solve for the length x. The equation leads to a quadratic:
# # $\frac{x}{1} = \frac{x+1}{x} \implies x^2 - x -1 =0$
eq = ((x / 1) ~ (x+1) / x); #setting up the equation in SymPy
sols = solve(eq, x)
sols[2] # value of Phi
sols[2] |> float
| _notebooks/2022-01-16-GoldenRatio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="5EI9kVD6HJCo" colab_type="text"
# # Tutorial for building own layer in the TensorFlow2
# # Made by <NAME>
# # Cotact: <EMAIL>
# + id="SAA7W6edefUy" colab_type="code" outputId="9f5e01a1-e113-4f4e-b20d-478e3578c029" colab={"base_uri": "https://localhost:8080/", "height": 960}
# Please download TensorFlow 2.0 or higher to work with the tutorial
# !pip uninstall --yes tensorflow
# !pip install tensorflow
# + id="AxKCXMaoekS4" colab_type="code" colab={}
# These are the libraries needed for the tutorial
import tensorflow as tf
from matplotlib.pyplot import imshow
import numpy as np
import pandas as pd
# + id="3_IBoy6lTqqz" colab_type="code" outputId="3df07223-9455-4452-db56-f9aac60f3225" colab={"base_uri": "https://localhost:8080/", "height": 702}
# Make sure that the GPU is available and the version is 2.0 or higher
print(tf.__version__)
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
tf.test.is_gpu_available() #>>> True
# + id="CEJUnrM6nXaL" colab_type="code" colab={}
# Download and load CIFAR10 dataset to the memory.
# If you don't have enough memory (which is not the case on the google colab), then you have to download it manually
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
# + id="sA1mxQuan4Dz" colab_type="code" colab={}
# Normalize images
x_train = x_train * (1./255.)
x_test = x_test * (1./255.)
# Standarize Images
mean = np.mean(x_train,axis=(0, 1, 2, 3))
std = np.std(x_train,axis=(0, 1, 2, 3))
x_train = (x_train-mean)/(std + 1e-7)
x_test = (x_test-mean)/(std + 1e-7)
# Turn classes to one-hot-encoding matrices
OUTPUT_CLASSES = y_train.max() + 1
y_train = tf.keras.utils.to_categorical(y_train, OUTPUT_CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, OUTPUT_CLASSES)
# + id="C23HNqT-pbCU" colab_type="code" outputId="b80b5566-67e4-4a15-e741-38fbb6ea31c9" colab={"base_uri": "https://localhost:8080/", "height": 303}
# Example image
imshow(x_test[0])
# + id="mYOgecuApwCt" colab_type="code" colab={}
# Basic constants
# 300 should be enough to train the model
EPOCHS = 300
BATCH_SIZE = 256
INPUT_SHAPE = x_train[255].shape
# + id="r-QU0WMZxuYE" colab_type="code" colab={}
# Data Augumentation
# You can play with the parameters to get better accuracy
imagegen = tf.keras.preprocessing.image.ImageDataGenerator(rotation_range=45,
width_shift_range=0.25,
height_shift_range=0.25,
horizontal_flip=True,
zoom_range=0.25,
shear_range=0.15)
imagegen.fit(x_train)
dataflow = imagegen.flow(x_train, y_train, batch_size=BATCH_SIZE)
# + id="RiupdUjbfNzf" colab_type="code" colab={}
# Here is the first resnet layer, that does preserve input shape
# ResNET Layer
class ResidualLayer(tf.keras.layers.Layer):
def __init__(self, f=None, fillter_size_top=None,
fillter_size_mid=None, fillter_size_bot=None):
super(ResidualLayer, self).__init__()
self.conv_top_1 = tf.keras.layers.Conv2D(fillter_size_top, (1, 1),
strides=(1, 1), padding='valid')
# Divided by 2 to ensure different parameters
self.conv_top_2 = tf.keras.layers.Conv2D(fillter_size_top//4, (1, 1),
strides=(1, 1), padding='valid')
self.conv_mid_1 = tf.keras.layers.Conv2D(fillter_size_mid, (f, f),
strides=(1, 1), padding='same')
# Divided by 2 to ensure different parameters
self.conv_mid_2 = tf.keras.layers.Conv2D(fillter_size_mid//4, (f, f),
strides=(1, 1), padding='same')
self.conv_bot_1 = tf.keras.layers.Conv2D(fillter_size_bot, (1, 1),
strides=(1, 1), padding='valid')
# The outputs have to be the same to add up
self.conv_bot_2 = tf.keras.layers.Conv2D(fillter_size_bot, (1, 1),
strides=(1, 1), padding='valid')
self.batch_norm_top_1 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_top_2 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_mid_1 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_mid_2 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_bot_1 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_bot_2 = tf.keras.layers.BatchNormalization(axis=3)
self.activation_relu = tf.keras.layers.Activation('relu')
self.add_op = tf.keras.layers.Add()
def call(self, input_x, training=False):
x_shortcut = input_x
##PATH 1
x_path_1 = input_x
# First CONV block of path 1
x_path_1 = self.conv_top_1(x_path_1)
x_path_1 = self.batch_norm_top_1(x_path_1, training=training)
x_path_1 = self.activation_relu(x_path_1)
# Second CONV block of path 1
x_path_1 = self.conv_mid_1(x_path_1)
x_path_1 = self.batch_norm_mid_1(x_path_1, training=training)
x_path_1 = self.activation_relu(x_path_1)
# Third CONV block of path 1
x_path_1 = self.conv_bot_1(x_path_1)
x_path_1 = self.batch_norm_bot_1(x_path_1, training=training)
##PATH 2
x_path_2 = input_x
# First CONV block of path 1
x_path_2 = self.conv_top_2(x_path_2)
x_path_2 = self.batch_norm_top_2(x_path_2, training=training)
x_path_2 = self.activation_relu(x_path_2)
# Second CONV block of path 1
x_path_2 = self.conv_mid_2(x_path_2)
x_path_2 = self.batch_norm_mid_2(x_path_2, training=training)
x_path_2 = self.activation_relu(x_path_2)
# Third CONV block of path 1
x_path_2 = self.conv_bot_2(x_path_2)
x_path_2 = self.batch_norm_bot_2(x_path_2, training=training)
# Addition of PATH 1 and PATH 2
x = self.add_op([x_path_1, x_path_2])
# Addition to the shortcut path
x = self.add_op([x, x_shortcut])
x_output = self.activation_relu(x)
return x_output
# + id="xcextqIwL5i_" colab_type="code" colab={}
# Here is the second resnet layer, that does not preserve input shape
# ResNET layer
class ResidualLayerScal(tf.keras.layers.Layer):
def __init__(self, f=None, s=None, fillter_size_top=None,
fillter_size_mid=None, fillter_size_bot=None):
super(ResidualLayerScal, self).__init__()
self.conv_top_1 = tf.keras.layers.Conv2D(fillter_size_top, (1, 1),
strides=(1, 1), padding='valid')
# Make the hyperparameters different
self.conv_top_2 = tf.keras.layers.Conv2D(fillter_size_top//4, (1, 1),
strides=(1, 1), padding='valid')
self.conv_mid_1 = tf.keras.layers.Conv2D(fillter_size_mid, (f, f),
strides=(1, 1), padding='same')
# Make the hyperparameters different
self.conv_mid_2 = tf.keras.layers.Conv2D(fillter_size_mid//4, (f, f),
strides=(1, 1), padding='same')
self.conv_bot_1 = tf.keras.layers.Conv2D(fillter_size_bot, (1, 1),
strides=(s, s), padding='valid')
# You can't make the hyperparameters different, the output have to be the same
self.conv_bot_2 = tf.keras.layers.Conv2D(fillter_size_bot, (1, 1),
strides=(s, s), padding='valid')
self.conv_scal = tf.keras.layers.Conv2D(fillter_size_bot, (1, 1),
strides=(s, s), padding='valid')
self.batch_norm_top_1 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_top_2 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_mid_1 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_mid_2 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_bot_1 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_bot_2 = tf.keras.layers.BatchNormalization(axis=3)
self.batch_norm_scal = tf.keras.layers.BatchNormalization(axis=3)
self.activation_relu = tf.keras.layers.Activation('relu')
self.add_op = tf.keras.layers.Add()
def call(self, input_x, training=False):
x_shortcut = input_x
##PATH 1
x_path_1 = input_x
# First CONV block of path 1
x_path_1 = self.conv_top_1(x_path_1)
x_path_1 = self.batch_norm_top_1(x_path_1, training=training)
x_path_1 = self.activation_relu(x_path_1)
# Second CONV block of path 1
x_path_1 = self.conv_mid_1(x_path_1)
x_path_1 = self.batch_norm_mid_1(x_path_1, training=training)
x_path_1 = self.activation_relu(x_path_1)
# Third CONV block of path 1
x_path_1 = self.conv_bot_1(x_path_1)
x_path_1 = self.batch_norm_bot_1(x_path_1, training=training)
##PATH 2
x_path_2 = input_x
# First CONV block of path 1
x_path_2 = self.conv_top_2(x_path_2)
x_path_2 = self.batch_norm_top_2(x_path_2, training=training)
x_path_2 = self.activation_relu(x_path_2)
# Second CONV block of path 1
x_path_2 = self.conv_mid_2(x_path_2)
x_path_2 = self.batch_norm_mid_2(x_path_2, training=training)
x_path_2 = self.activation_relu(x_path_2)
# Third CONV block of path 1
x_path_2 = self.conv_bot_2(x_path_2)
x_path_2 = self.batch_norm_bot_2(x_path_2, training=training)
# Addition of PATH 1 and PATH 2
x = self.add_op([x_path_1, x_path_2])
# Scaling Block
x_shortcut = self.conv_scal(x_shortcut)
x_shortcut = self.batch_norm_scal(x_shortcut, training=training)
# Addition to the shortcut path
x = self.add_op([x, x_shortcut])
x_output = self.activation_relu(x)
return x_output
# + id="0Dx48qELQGGH" colab_type="code" colab={}
# Then the layers are combined into a model
class FunnyResNet(tf.keras.Model):
def __init__(self):
super(FunnyResNet, self).__init__()
self.batchnorm_lay1 = tf.keras.layers.BatchNormalization(axis=3)
self.conv_lay1 = tf.keras.layers.Conv2D(64, strides=(2, 2), kernel_size=(3, 3), activation='relu')
self.act_lay1 = tf.keras.layers.Activation('relu')
# Our ResNet block 1
self.resnet_b1_lay1 = ResidualLayerScal(f=3, s=2, fillter_size_top=128,
fillter_size_mid=128, fillter_size_bot=256)
self.resnet_b1_lay2 = ResidualLayer(f=3, fillter_size_top=128,
fillter_size_mid=128, fillter_size_bot=256)
self.resnet_b1_lay3 = ResidualLayer(f=3, fillter_size_top=128,
fillter_size_mid=128, fillter_size_bot=256)
self.resnet_b1_lay4 = ResidualLayer(f=3, fillter_size_top=128,
fillter_size_mid=128, fillter_size_bot=256)
self.resnet_b1_lay5 = ResidualLayer(f=3, fillter_size_top=128,
fillter_size_mid=128, fillter_size_bot=256)
# Our ResNet block 2
self.resnet_b2_lay1 = ResidualLayerScal(f=5, s=2, fillter_size_top=256,
fillter_size_mid=256, fillter_size_bot=512)
self.resnet_b2_lay2 = ResidualLayer(f=5, fillter_size_top=256,
fillter_size_mid=256, fillter_size_bot=512)
self.resnet_b2_lay3 = ResidualLayer(f=5, fillter_size_top=256,
fillter_size_mid=256, fillter_size_bot=512)
self.resnet_b2_lay4 = ResidualLayer(f=5, fillter_size_top=256,
fillter_size_mid=256, fillter_size_bot=512)
self.resnet_b2_lay5 = ResidualLayer(f=5, fillter_size_top=256,
fillter_size_mid=256, fillter_size_bot=512)
# Our ResNet block 3
self.resnet_b3_lay1 = ResidualLayerScal(f=7, s=2, fillter_size_top=512,
fillter_size_mid=512, fillter_size_bot=1024)
self.resnet_b3_lay2 = ResidualLayer(f=7, fillter_size_top=512,
fillter_size_mid=512, fillter_size_bot=1024)
self.resnet_b3_lay3 = ResidualLayer(f=7, fillter_size_top=512,
fillter_size_mid=512, fillter_size_bot=1024)
self.resnet_b3_lay4 = ResidualLayer(f=7, fillter_size_top=512,
fillter_size_mid=512, fillter_size_bot=1024)
self.resnet_b3_lay5 = ResidualLayer(f=7, fillter_size_top=512,
fillter_size_mid=512, fillter_size_bot=1024)
self.flat_lay = tf.keras.layers.Flatten()
self.dense_lay2 = tf.keras.layers.Dense(OUTPUT_CLASSES, activation='softmax')
def call(self, x_input, training=False):
x = self.conv_lay1(x_input)
x = self.batchnorm_lay1(x, training=training)
x = self.act_lay1(x)
# Residual block 1
x = self.resnet_b1_lay1(x, training=training)
x = self.resnet_b1_lay2(x, training=training)
x = self.resnet_b1_lay3(x, training=training)
x = self.resnet_b1_lay4(x, training=training)
x = self.resnet_b1_lay5(x, training=training)
# Residual block 2
x = self.resnet_b2_lay1(x, training=training)
x = self.resnet_b2_lay2(x, training=training)
x = self.resnet_b2_lay3(x, training=training)
x = self.resnet_b2_lay4(x, training=training)
x = self.resnet_b2_lay5(x, training=training)
# Residual block 3
x = self.resnet_b3_lay1(x, training=training)
x = self.resnet_b3_lay2(x, training=training)
x = self.resnet_b3_lay3(x, training=training)
x = self.resnet_b3_lay4(x, training=training)
x = self.resnet_b3_lay5(x, training=training)
x = self.flat_lay(x)
x = self.dense_lay2(x)
return x
# + id="X10SmzJ0p9ZM" colab_type="code" outputId="a868e36f-e801-4c51-beaf-f394c6f2d607" colab={"base_uri": "https://localhost:8080/", "height": 148}
model = FunnyResNet()
# Set initial weight shapes based on the initial input
model(x_train[0:2])
optimizer = tf.keras.optimizers.Adam(learning_rate=0.02)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
# + id="Ddyb4lJzvg3v" colab_type="code" outputId="78289253-b7a0-4203-dd86-f96912459e1b" colab={"base_uri": "https://localhost:8080/", "height": 1000}
history = model.fit(dataflow, epochs=EPOCHS, shuffle=True,
validation_data=[x_test, y_test])
| SimpleResNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Get information about the available genomes given a list of taxons
#
# ----------
# - Author: <NAME>, Ph.D.,
# - Post-Doctoral Fellow at Harvard University
# - Date: March 2021
# -----------
#
# Given a list of taxons (i.e. orders, superorder, species,..) retrieve information about the available genome assemblies in each taxon from NCBI (GeneBank and RefSeq) such as the number of available genome assemblies per taxon, list of the species with available genome per taxon.
import sys
import zipfile
import pandas as pd
from pprint import pprint
from datetime import datetime
from collections import defaultdict, Counter
import ncbi.datasets
# - This script uses the ncbi.datasets python library. (More info https://www.ncbi.nlm.nih.gov/datasets/)
## Set up api
api_instance = ncbi.datasets.GenomeApi(ncbi.datasets.ApiClient())
# **Indicate file to read: here**
# - The file must include a header "taxon" and a 1 taxon per row
#Indicate file to read here (replace taxons_to_check.txt for your file)
filetoread="Taxons_to_check.txt"
# Check first 5 lines of the file:
taxonlist=pd.read_csv(filetoread)
taxonlist[0:5]
# ## Number of genome assemblies available per taxon
# - Warning: species might have more than 1 assembly,
# - If you want the number of unique species with genome assembly per taxon see below
#
for lindex, taxon in taxonlist.iterrows():
tax_name = taxon['Taxons']
# query NCBI
genome_summary = api_instance.assembly_descriptors_by_taxon(taxon=tax_name, limit='all')
print(f"- {tax_name}: assemblies; {genome_summary.total_count}")
# ## List the species of the available genomes
for lindex, taxon in taxonlist.iterrows():
tax_name =taxon['Taxons']
columns = ['spp', 'acc', 'level',"numChrScaff","sub.date","org.rank","taxID","assembly.length"]
lst = []
# query NCBI
genome_summary = api_instance.assembly_descriptors_by_taxon(taxon=tax_name, limit='all')
print(f"{tax_name}; {genome_summary.total_count} assemblies")
if genome_summary.total_count is None:
print("No genomes")
else:
for assembly in map(lambda d: d.assembly, genome_summary.assemblies):
lst.append([
assembly.org.sci_name,
assembly.assembly_accession,
assembly.assembly_level,
len(assembly.chromosomes),
assembly.submission_date,
assembly.org.rank,
assembly.org.tax_id,
assembly.seq_length])
genomesdf = pd.DataFrame(lst, columns=columns)
display(genomesdf)
# ## Number of genome assemblies available per Species
for lindex, taxon in taxonlist.iterrows():
tax_name = taxon['Taxons']
columns = ['spp', 'acc', 'level',"numChrScaff","sub.date","org.rank","taxID","assembly.length"]
lst = []
# query NCBI
genome_summary = api_instance.assembly_descriptors_by_taxon(taxon=tax_name, limit='all')
if genome_summary.total_count is None:
print(f"{tax_name}; sppeecies with genome: 0")
else:
for assembly in map(lambda d: d.assembly, genome_summary.assemblies):
lst.append([
assembly.org.sci_name,
assembly.assembly_accession,
assembly.assembly_level,
len(assembly.chromosomes),
assembly.submission_date,
assembly.org.rank,
assembly.org.tax_id,
assembly.seq_length])
genomesdf = pd.DataFrame(lst, columns=columns)
print(f"{tax_name}; species with genome: ", len(genomesdf.spp.unique()))
# print(" -Spp list: ",",".join(genomesdf.spp.unique()))
| Get_Available_Genomes_by_Taxon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this lecture we're going to review some of the basics of statistical testing in python. We're going to
# talk about hypothesis testing, statistical significance, and using scipy to run student's t-tests.
# +
# We use statistics in a lot of different ways in data science, and on this lecture, I want to refresh your
# knowledge of hypothesis testing, which is a core data analysis activity behind experimentation. The goal of
# hypothesis testing is to determine if, for instance, the two different conditions we have in an experiment
# have resulted in different impacts
# Let's import our usual numpy and pandas libraries
import numpy as np
import pandas as pd
# Now let's bring in some new libraries from scipy
from scipy import stats
# +
# Now, scipy is an interesting collection of libraries for data science and you'll use most or perpahs all of
# these libraries. It includes numpy and pandas, but also plotting libraries such as matplotlib, and a
# number of scientific library functions as well
# +
# When we do hypothesis testing, we actually have two statements of interest: the first is our actual
# explanation, which we call the alternative hypothesis, and the second is that the explanation we have is not
# sufficient, and we call this the null hypothesis. Our actual testing method is to determine whether the null
# hypothesis is true or not. If we find that there is a difference between groups, then we can reject the null
# hypothesis and we accept our alternative.
# Let's see an example of this; we're going to use some grade data
df=pd.read_csv ('datasets/grades.csv')
df.head()
# -
# If we take a look at the data frame inside, we see we have six different assignments. Lets look at some
# summary statistics for this DataFrame
print("There are {} rows and {} columns".format(df.shape[0], df.shape[1]))
# +
# For the purpose of this lecture, let's segment this population into two pieces. Let's say those who finish
# the first assignment by the end of December 2015, we'll call them early finishers, and those who finish it
# sometime after that, we'll call them late finishers.
early_finishers=df[pd.to_datetime(df['assignment1_submission']) < '2016']
early_finishers.head()
# +
# So, you have lots of skills now with pandas, how would you go about getting the late_finishers dataframe?
# Why don't you pause the video and give it a try.
# -
# Here's my solution. First, the dataframe df and the early_finishers share index values, so I really just
# want everything in the df which is not in early_finishers
late_finishers=df[~df.index.isin(early_finishers.index)]
late_finishers.head()
# +
# There are lots of other ways to do this. For instance, you could just copy and paste the first projection
# and change the sign from less than to greater than or equal to. This is ok, but if you decide you want to
# change the date down the road you have to remember to change it in two places. You could also do a join of
# the dataframe df with early_finishers - if you do a left join you only keep the items in the left dataframe,
# so this would have been a good answer. You also could have written a function that determines if someone is
# early or late, and then called .apply() on the dataframe and added a new column to the dataframe. This is a
# pretty reasonable answer as well.
# +
# As you've seen, the pandas data frame object has a variety of statistical functions associated with it. If
# we call the mean function directly on the data frame, we see that each of the means for the assignments are
# calculated. Let's compare the means for our two populations
print(early_finishers['assignment1_grade'].mean())
print(late_finishers['assignment1_grade'].mean())
# +
# Ok, these look pretty similar. But, are they the same? What do we mean by similar? This is where the
# students' t-test comes in. It allows us to form the alternative hypothesis ("These are different") as well
# as the null hypothesis ("These are the same") and then test that null hypothesis.
# When doing hypothesis testing, we have to choose a significance level as a threshold for how much of a
# chance we're willing to accept. This significance level is typically called alpha. #For this example, let's
# use a threshold of 0.05 for our alpha or 5%. Now this is a commonly used number but it's really quite
# arbitrary.
# The SciPy library contains a number of different statistical tests and forms a basis for hypothesis testing
# in Python and we're going to use the ttest_ind() function which does an independent t-test (meaning the
# populations are not related to one another). The result of ttest_index() are the t-statistic and a p-value.
# It's this latter value, the probability, which is most important to us, as it indicates the chance (between
# 0 and 1) of our null hypothesis being True.
# Let's bring in our ttest_ind function
from scipy.stats import ttest_ind
# Let's run this function with our two populations, looking at the assignment 1 grades
ttest_ind(early_finishers['assignment1_grade'], late_finishers['assignment1_grade'])
# +
# So here we see that the probability is 0.18, and this is above our alpha value of 0.05. This means that we
# cannot reject the null hypothesis. The null hypothesis was that the two populations are the same, and we
# don't have enough certainty in our evidence (because it is greater than alpha) to come to a conclusion to
# the contrary. This doesn't mean that we have proven the populations are the same.
# -
# Why don't we check the other assignment grades?
print(ttest_ind(early_finishers['assignment2_grade'], late_finishers['assignment2_grade']))
print(ttest_ind(early_finishers['assignment3_grade'], late_finishers['assignment3_grade']))
print(ttest_ind(early_finishers['assignment4_grade'], late_finishers['assignment4_grade']))
print(ttest_ind(early_finishers['assignment5_grade'], late_finishers['assignment5_grade']))
print(ttest_ind(early_finishers['assignment6_grade'], late_finishers['assignment6_grade']))
# +
# Ok, so it looks like in this data we do not have enough evidence to suggest the populations differ with
# respect to grade. Let's take a look at those p-values for a moment though, because they are saying things
# that can inform experimental design down the road. For instance, one of the assignments, assignment 3, has a
# p-value around 0.1. This means that if we accepted a level of chance similarity of 11% this would have been
# considered statistically significant. As a research, this would suggest to me that there is something here
# worth considering following up on. For instance, if we had a small number of participants (we don't) or if
# there was something unique about this assignment as it relates to our experiment (whatever it was) then
# there may be followup experiments we could run.
# +
# P-values have come under fire recently for being insuficient for telling us enough about the interactions
# which are happening, and two other techniques, confidence intervalues and bayesian analyses, are being used
# more regularly. One issue with p-values is that as you run more tests you are likely to get a value which
# is statistically significant just by chance.
# Lets see a simulation of this. First, lets create a data frame of 100 columns, each with 100 numbers
df1=pd.DataFrame([np.random.random(100) for x in range(100)])
df1.head()
# +
# Pause this and reflect -- do you understand the list comprehension and how I created this DataFrame? You
# don't have to use a list comprehension to do this, but you should be able to read this and figure out how it
# works as this is a commonly used approach on web forums.
# -
# Ok, let's create a second dataframe
df2=pd.DataFrame([np.random.random(100) for x in range(100)])
# +
# Are these two DataFrames the same? Maybe a better question is, for a given row inside of df1, is it the same
# as the row inside df2?
# Let's take a look. Let's say our critical value is 0.1, or and alpha of 10%. And we're going to compare each
# column in df1 to the same numbered column in df2. And we'll report when the p-value isn't less than 10%,
# which means that we have sufficient evidence to say that the columns are different.
# Let's write this in a function called test_columns
def test_columns(alpha=0.1):
# I want to keep track of how many differ
num_diff=0
# And now we can just iterate over the columns
for col in df1.columns:
# we can run out ttest_ind between the two dataframes
teststat,pval=ttest_ind(df1[col],df2[col])
# and we check the pvalue versus the alpha
if pval<=alpha:
# And now we'll just print out if they are different and increment the num_diff
print("Col {} is statistically significantly different at alpha={}, pval={}".format(col,alpha,pval))
num_diff=num_diff+1
# and let's print out some summary stats
print("Total number different was {}, which is {}%".format(num_diff,float(num_diff)/len(df1.columns)*100))
# And now lets actually run this
test_columns()
# +
# Interesting, so we see that there are a bunch of columns that are different! In fact, that number looks a
# lot like the alpha value we chose. So what's going on - shouldn't all of the columns be the same? Remember
# that all the ttest does is check if two sets are similar given some level of confidence, in our case, 10%.
# The more random comparisons you do, the more will just happen to be the same by chance. In this example, we
# checked 100 columns, so we would expect there to be roughly 10 of them if our alpha was 0.1.
# We can test some other alpha values as well
test_columns(0.05)
# +
# So, keep this in mind when you are doing statistical tests like the t-test which has a p-value. Understand
# that this p-value isn't magic, that it's a threshold for you when reporting results and trying to answer
# your hypothesis. What's a reasonable threshold? Depends on your question, and you need to engage domain
# experts to better understand what they would consider significant.
# Just for fun, lets recreate that second dataframe using a non-normal distribution, I'll arbitrarily chose
# chi squared
df2=pd.DataFrame([np.random.chisquare(df=1,size=100) for x in range(100)])
test_columns()
# +
# Now we see that all or most columns test to be statistically significant at the 10% level.
# -
# In this lecture, we've discussed just some of the basics of hypothesis testing in Python. I introduced you
# to the SciPy library, which you can use for the students t test. We've discussed some of the practical
# issues which arise from looking for statistical significance. There's much more to learn about hypothesis
# testing, for instance, there are different tests used, depending on the shape of your data and different
# ways to report results instead of just p-values such as confidence intervals or bayesian analyses. But this
# should give you a basic idea of where to start when comparing two populations for differences, which is a
# common task for data scientists.
| Course - 1: Introduction to Data Science in Python/resources/week-4/BasicStatisticalTesting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Carga Data INGRESO X MATERIA
#
# Creación del DataFrame asociado a los Ingresos por materia
#
# Rev: 29-10-2020
# +
import os
import pandas as pd
import numpy as np
from pyarrow import feather
from tqdm import tqdm
from src.data import cleandata
# -
path_raw = "../data/raw/pjud"
archivos = os.listdir(path_raw)
tqdm.pandas()
# +
#creacion dataframe con los datos de INGRESOS por MATERIA Penal de los años 2015 a 2019
dataframes = []
for archivo in archivos:
if archivo.find("Ingresos por Materia Penal") != -1:
df = pd.read_csv(f"{path_raw}/{archivo}", sep = ";", encoding = 'cp850', dtype = 'unicode',low_memory = True)
dataframes.append(df)
df_ingresos_materia = pd.concat(dataframes, axis = 0)
# -
df_ingresos_materia
df_ingresos_materia.columns
# Podemos observar que existen columnas que se repiten, y que tienen datos NAN en algunas pero esos datos en otras columnas, pasa en TOTAL INGRESOS POR MATERIAS=TOTAL INGRESOS POR MATERIAS(*)
df_ingresos_materia['TOTAL INGRESOS POR MATERIAS'] = df_ingresos_materia['TOTAL INGRESOS POR MATERIAS'].fillna(df_ingresos_materia['TOTAL INGRESOS POR MATERIAS(*)'])
# +
df_ingresos_materia.drop(['N°','TOTAL INGRESOS POR MATERIAS(*)'], axis = 'columns', inplace = True)
df_ingresos_materia.drop(['(*)Se agregó columna total de ingresos, dado que en algunas causas, la materia se repite (error de tramitación)'],
axis='columns',inplace=True)
# +
# TRANSFORMAMOS DE FLOAT A INTEGER
df_ingresos_materia['COD. CORTE'] = df_ingresos_materia['COD. CORTE'].fillna(0).astype(np.int16)
df_ingresos_materia['COD. TRIBUNAL'] = df_ingresos_materia['COD. TRIBUNAL'].fillna(0).astype(np.int16)
df_ingresos_materia['COD. MATERIA'] = df_ingresos_materia['COD. MATERIA'].fillna(0).astype(np.int16)
df_ingresos_materia['AÑO INGRESO'] = df_ingresos_materia['AÑO INGRESO'].fillna(0).astype(np.int16)
df_ingresos_materia['TOTAL INGRESOS POR MATERIAS'] = df_ingresos_materia['TOTAL INGRESOS POR MATERIAS'].fillna(0).astype(np.int8)
# +
# Transformamos fechas
df_ingresos_materia['FECHA INGRESO'] = df_ingresos_materia['FECHA INGRESO'].progress_apply(cleandata.convierte_fecha)
# +
# Elimino espacios en las columnas tipo objetos
df_ingresos_materia = df_ingresos_materia.progress_apply(cleandata.elimina_espacios, axis=0)
# +
# Elimino tildes
cols = df_ingresos_materia.select_dtypes(include = ["object"]).columns
df_ingresos_materia[cols] = df_ingresos_materia[cols].progress_apply(cleandata.elimina_tilde)
# +
# Categorizacion
df_ingresos_materia['CORTE'] = df_ingresos_materia['CORTE'].astype('category')
# -
df_ingresos_materia['TIPO CAUSA'].unique()
# Dejo solo causas Ordinarias
tipo_causa = df_ingresos_materia[df_ingresos_materia['TIPO CAUSA']!='Ordinaria']
df_ingresos_materia.drop(tipo_causa.index, axis=0, inplace=True)
df_ingresos_materia.reset_index(inplace = True)
df_ingresos_materia
# +
# Guardamos dataset como archivo feather
path_interim = "../data/interim/pjud"
os.makedirs(path_interim, exist_ok = True)
df_ingresos_materia.to_feather(f'{path_interim}/IngresosMateria_feather')
# -
| notebooks/.ipynb_checkpoints/3.1-jalvaradoruiz-carga-limpieza-data-ingresos-materia-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import datetime as dt
import requests as rq
from scipy import integrate
from scipy import optimize
# -
# ### Define SEIR
# +
def diff_eqns(y, t, beta, sigma, gamma, N):
St, Et, It, Rt = y
dSdt = -beta * St * It / N
dEdt = beta * St * It / N - sigma * Et
dIdt = sigma * Et - gamma * It
dRdt = gamma * It
return ([dSdt, dEdt, dIdt, dRdt])
def seir_model(t, beta, sigma, gamma, E0, N):
S0 = N - E0
I0 = R0 = 0
out = integrate.odeint(diff_eqns, (S0, E0, I0, R0), t, args=(beta, sigma, gamma, N))
return out.T # S, E, I, R
# -
# ### Sample SEIR plot
# +
N = 1000
days = np.arange(100)
beta = 1.5
sigma = 1 / 5.2 # 5.2 days incubation, i.e. from exposed to infectious
gamma = 1 / 2.9 # 2.9 days from infectious to removal
S, E, I, R = seir_model(days, beta, sigma, gamma, 1, N)
df = pd.DataFrame({
"Days": days,
"S": S,
"E": E,
"I": I,
"R": R
})
df.plot(
x='Days',
y=['S', 'E', 'I', 'R'],
grid=True,
title="SEIR sample"
)
# -
# ### Fit Italy's data
it_df = pd.read_csv("csv/italy.csv", parse_dates=['date'])
it_df.sample()
# +
def seir_model_italy_i(t, beta):
sigma = 1 / 5.2
gamma = 1 / 2.9
E0 = 19
N = 60461826
return seir_model(t, beta, sigma, gamma, E0, N)[2]
params, covar = optimize.curve_fit(seir_model_italy_i, it_df.index, it_df.active)
it_df['fitted'] = seir_model_italy_i(it_df.index, *params)
print("Params:", params)
ax = it_df.plot(
x='date',
y=['active', 'fitted'],
grid=True,
title="Fitting active cases to I"
)
| 08_Learning_SEIR.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: bonn
# language: python
# name: bonn
# ---
# # Subject Related Data
# `CB DEV NOTES`
# 1. Some table structures and data types changed for pyrat ingestion
# 2. The GUI didn't work for me. Maybe an error on my end.
# ## Login
#
# Either log in via a local config file (see [01_pipeline](./01_pipeline.ipynb)), or enter login information manually. If you are don't have your login information, contact the administrator.
#
# Local Config
import os
if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')
assert os.path.basename(os.getcwd())=='adamacs', ("Please move to the main directory")
import datajoint as dj; dj.conn()
# Manual Entry
import datajoint as dj; import getpass
dj.config['database.host'] = '172.26.128.53' # Put the server name between these apostrophe
dj.config['database.user'] = 'danielmk' # Put your user name between these apostrophe
dj.config['database.password'] = get<PASSWORD>.get<PASSWORD>() # Put your password in the prompt
dj.conn()
# ## Activation
# Next, import from `adamacs.pipeline` to activate the relevant schema.
from adamacs.utility import *
from adamacs.nbgui import *
from adamacs.pipeline import subject
# Assign easy names for relevant tables
sub, lab, protocol, line, mutation, user, project, subject_genotype, subject_death = (
subject.Subject(), subject.Lab(), subject.Protocol(), subject.Line(),
subject.Mutation(), subject.User(), subject.Project(), subject.SubjectGenotype(),
subject.SubjectDeath()
)
# ## Entry via GUI
"""Run this cell to load the subject entry sheet."""
mouse_gui = MouseEntrySheet()
mouse_gui.app
# ## Entry via `insert()`
#
# ### **WARNING**
# Only run these manual entry cells if you know what you are doing!
#
# Strings for these values may be edited to insert single rows into corresponding tables.
#
# ### Lab
# +
lab_key = 'Beck' # Short, unique identifier for the lab. Maximum 8 characters. Example: 'Rose'.
lab_name = 'Neuronal input-output computation during cognition' # A longer, more descriptive name for the laboratory.
institution = 'Institute for Experimental Epileptology and Cognition Research' # The institution the laboratory belongs to.
address = 'Venusberg-Campus 1, 53127 Bonn' # The postal address of the laboratory.
lab.insert1((lab_key, lab_name, institution, address))
# -
# ### Protocol
# +
protocol_key = 'LANUF3' # Short, unique identifier for the protocol. Maximum 16 characters.
protocol_description = 'Another dummy protocol ID for testing purposes' # Description of the protocol.
protocol.insert1((protocol_key, protocol_description))
# -
# ### User Entry
# insert multiple entries
data = [{'user_id': 1, 'name': 'natashak', 'lab': lab_key},
{'user_id': 2, 'name': 'georgejk', 'lab': lab_key}]
user.insert(data)
# ### Line/Mutation
# +
line_id = 1 # Unique identifier for the line.
line_name = 'Gcamp6-ThyC57BL/6J-Tg(Thy1-GCaMP6s)GP4.12Dkim/J' # Description of the line.
is_active = 1 # Whether or not the line is actively breeding.
mutation_id = 2 # Unique identifier for the mutation.
mutation_description = 'Tg(Thy1-GCaMP6s)GP4.12D<PASSWORD>' # A description of the mutation.
line.insert1((line_id, line_name, is_active))
mutation.insert1((line_id, mutation_id, mutation_description))
# -
# ### Subject
# +
subject_id = 'WEZ-8701'
earmark = 'G155'
sex = 'M'
birth_date = '2010-08-20'
subject_description = 'Basic Mouse'
generation = 'F2'
parents = {'m': 'Rose_ROS-0001', 'f' : 'Rose_ROS-0002'}
owner_id = 1
responsible_id = 2
line_id = 1
protocol_key = 'LANUF3'
sub.insert1((subject_id, earmark, sex, birth_date, subject_description, generation,
parents, owner_id, responsible_id, line_id, protocol_key))
# -
# ### Subject Genotype
# +
subject = 'WEZ-8701'
line_id = 1
mutation_id = 2 # Unique identifier for the mutation.
genotype = 'wt/tg' # The target phenotype of the line.
subject_genotype.insert1((subject, line_id, mutation_id, genotype))
# -
# ### Project Entry
# +
project_key = 'TEC'
project_description = 'Trace Eyeblink Conditioning'
project.insert1((project_key, project_description))
# -
# ### Subject Death
# +
subject = 'WEZ-8701'
death_date = '2010-12-01'
case = 'natural'
subject_death.insert1((subject, death_date, case))
# -
# ## Fetch
# ### As table
user * lab
sub
line * subject_genotype
subject_genotype
# ### As dictionary
# One item:
(sub & 'subject="Rose_ROS-0019"').fetch1()
# List of dictionaries:
(subject_genotype & 'subject="Rose_ROS-0019"').fetch(as_dict=True)
| notebooks/02_manual_insert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Numerical instabilities of the Nonlinear Schrödinger Equation
#
# This example demonstrates numerical instabilities that build up during
# simulations of the standard nonlinear Schrödinger equation when the
# $z$-increment used for propagtion is too small.
#
# Here, the simple split-step Fourier method is used and the instabilities
# develop from round-off errors during the propagation of a fundamental soliton.
#
# The numerical instabilities of the nonlinear Schrödinger equation developing on
# top of plain wave solutions where first studied in Ref. [WH1986]_.
# An in-depth study of instabilities of the split-step Fourier method for the
# simulation of the nonlinear Schrödinger equation, developing on a background
# given by a soliton, is provided by Ref. [L2012]_.
#
# .. codeauthor:: <NAME> <<EMAIL>>
#
# We first import the functionality needed to perform the sequence of numerical
# experiments:
#
#
import sys
import numpy as np
import numpy.fft as nfft
from fmas.models import ModelBaseClass
from fmas.config import FTFREQ, FT, IFT, C0
from fmas.solver import SiSSM, SySSM, IFM_RK4IP, LEM_SySSM, CQE
from fmas.grid import Grid
# Next, we implement a model for the nonlinear Schrödinger equation. In
# particular, we here consider the standard nonlinear Schrödinger equation,
# given by
#
# \begin{align}\partial_z u = -i \frac{\beta_2}{2}\partial_t^2 u + i\gamma |u|^2 u,\end{align}
#
# wherein $u = u(z, t)$ represents the slowly varying pulse envelope,
# $\beta_2=-1$ is the second order dispersion parameter, and
# $\gamma=1$ is the nonlinear parameter:
#
#
class NSE(ModelBaseClass):
def __init__(self, w, b2 = -1.0, gamma = 1.):
super().__init__(w, 0.5*b2*w*w)
self.gamma = gamma
@property
def Lw(self):
return 1j*self.beta_w
def Nw(self, uw):
ut = IFT(uw)
return 1j*self.gamma*FT(np.abs(ut)**2*ut)
# Next, we initialize the computational domain and use a simple split-step
# Fourier method to propagate a single fundamental soliton for ten soliton
# periods.
#
#
# -- INITIALIZATION STAGE
# ... COMPUTATIONAL DOMAIN
grid = Grid( t_max = 30., t_num = 2**10)
t, w = grid.t, grid.w
# ... NSE MODEL
model = NSE(w, b2=-1., gamma=1.)
# ... INITIAL CONDITION
u_0t = 1./np.cosh(t)
# In a first numerical experiment, the stepsize is intentionally kept very
# large in order to allow the numerical istabilities to build up.
#
#
solver = SySSM(model.Lw, model.Nw)
solver.set_initial_condition(w, FT(u_0t))
solver.propagate(z_range = 10*np.pi, n_steps = 511, n_skip = 1)
z, utz = solver.z_, solver.utz
# In this case, instabilities are expected to build up since the
# $z$-increment $\Delta z$, used by the propagation algorithm,
# exceeds the threshold increment $\Delta
# z_{\mathrm{T}}=2\pi/\mathrm{max}(\omega)$ (both increments are displayed
# below).
#
#
# +
# -- MAXIMUM FREQUENCY SUPPORTED ON COMPUTATIONAL GRID
w_max = np.pi/(t[1]-t[0])
# -- THRESHOLD INCREMENT
dz_T = np.pi*2/w_max**2
print("Increment dz =", z[1]-z[0])
print("Threshold increment dz_T =", dz_T)
# -
# In a second numerical experiment, the stepsize is set small enough to shift
# the resonance outside the computational domain.
#
#
# +
solver = SySSM(model.Lw, model.Nw)
solver.set_initial_condition(w, FT(u_0t))
solver.propagate(z_range = 10*np.pi, n_steps = 15000, n_skip = 1)
z2, utz2 = solver.z_, solver.utz
print("Increment dz =", z2[1]-z2[0])
print("Threshold increment dz_T =", dz_T)
# -
# Next, we prepare a figure that shows the results of the above to experiments.
# The left subfigure shows the results of the first simulation run in which the
# numerical instabilities and their predicted locations are shown.
# The right subfigure shows the results of second simulation run in which the
# $z$-increment was small enough to shift the instabilities outside the
# computational domain.
#
#
# +
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as col
f, (ax,ax2) = plt.subplots(1, 2, figsize=(8,4))
# -- EXAMPLE WITH INSTABILITIES
dz = z[1]-z[0]
w_I = lambda n: np.sqrt(n*2*np.pi*2./dz)
shift=nfft.fftshift
Iw_ini = np.abs(FT(utz[0]))**2
Iw_fin = np.abs(FT(utz[-1]))**2
ax.plot(shift(w), shift(Iw_ini)/np.max(Iw_ini), color='gray', dashes=[2,2], lw=1., label='$z=0$')
ax.plot(shift(w), shift(Iw_fin)/np.max(Iw_ini), color='k', lw=1., label='$z=10\pi$')
for n in range(1,20,1):
ax.axvline(w_I(n), lw=0.75)
ax.axvline(-w_I(n), lw=0.75)
dw_lim = (-35,35)
dw_ticks = (-30, -15, 0, 15, 30)
ax.tick_params(axis='x', length=2., pad=2, top=False)
ax.set_xlim(dw_lim)
ax.set_xticks(dw_ticks)
ax.set_xlabel(r"Detuning $\omega$")
y_lim = (1e-35,10)
y_ticks = (1e-30,1e-20,1e-10,1)
ax.tick_params(axis='y', length=2., pad=2, top=False)
ax.set_yscale('log')
ax.set_ylim(y_lim)
ax.set_yticks(y_ticks)
ax.set_ylabel(r"Spectral intensity $I_\omega(z)/\mathrm{max}(I_\omega(z=0)}$")
ax.set_title(r"Numerical instabilities build up")
ax.legend()
# -- EXAMPLE WITHOUT INSTABILITIES
Iw_ini = np.abs(FT(utz2[0]))**2
Iw_fin = np.abs(FT(utz2[-1]))**2
ax2.plot(shift(w), shift(Iw_ini)/np.max(Iw_ini), color='gray', dashes=[2,2], lw=1., label='$z=0$')
ax2.plot(shift(w), shift(Iw_fin)/np.max(Iw_ini), color='k', lw=1., label='$z=10\pi$')
ax2.tick_params(axis='x', length=2., pad=2, top=False)
ax2.set_xlim(dw_lim)
ax2.set_xticks(dw_ticks)
ax2.set_xlabel(r"Detuning $\omega$")
ax2.tick_params(axis='y', length=2., pad=2, top=False)
ax2.set_yscale('log')
ax2.set_ylim(y_lim)
ax2.set_yticks(y_ticks)
ax2.set_title(r"No numerical instabilities")
ax2.legend()
plt.show()
# -
# **References:**
#
# .. [WH1986] <NAME>, <NAME>, Split-step methods for the solution
# of the nonlinear Schrödinger equation, SIAM J. Numer. Anal., 23
# (1986) 485, http://www.jstor.org/stable/2157521.
#
# .. [L2012] <NAME>, Instability Analysis of the Split-Step Fourier Method
# on the Background of a Soliton of the Nonlinear Schrödinger
# Equation, Numerical Methods for Partial Differential Equations 28
# (2012) 641, https://doi.org/10.1002/num.20649
#
#
#
| docs/_downloads/ee011c6a01f7095b00e749525322f76b/g_instabilities_dz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Лекция 5: Квантили, доверительные интервалы и распределения, производные от нормального.
# ### Пример для узнаваемости продукта:
# Представим, что вы сделали некий новый продукт, например специальный вид матраца для качественного сна, и хотите выяснить, насколько хорошо людям ваш продукт известен. Можно определить бинарную случайную величину, которая эту ситуацию будет описывать: пусть эта величина - $X = 1$, если член целевой аудитории знает о вашем продукте, и $X = 0$, если член целевой аудитории не знает о вашем продукте. Такие исследования часто называют "измерениями узнаваемости бренда". Это величина с распределением Бернулли и параметром $p$ (вероятность узнавания), который вы хотите измерить.
#
# Соберем выборку респондентов (Она должна быть репрезентативной, то есть отражать параметры генеральной совокупности) и зададим им вопрос : "Знаете ли вы о наших чудо-матрацах?". Ответы респондентов составят выборку из нашего распределения Бернулли - список из нулей и единиц. Тогда наша оценка узнаваемости – доля единиц в выборке. Возникает вопрос: а сколько респондентов нам нужно опросить?
#
# Давайте попробуем несколько вариантов. Мы опросили сначала 20 человек, 10 из них о наших матрацах знают, тогда оценка нашего $p$ по выборке есть $\frac{1}{2}$. Затем опросим 200 человек. И из них уже 90 знает о наших матрацах. Наша оценка $p$ по другой выборке есть $0.45$. Какой из вариантов нам больше подойдет? Какой из них точнее? Здравый смысл подсказывает, что второй – там же больше наблюдений. Для оценки этой точности нам поможет понятие доверительного интервала:
#
# **Определение** Доверительным интервалом для параметра $\theta$ называют такую пару статистик $l$ и $r$, что $P(l\le\theta\le r)\ge 1-\alpha$.
#
# Здесь $\theta$ - наш оцениваемый параметр, а $1-\alpha$ называют уровнем доверия.
#
# Смысл здесь такой: Если повторять наш эксперимент бесконечно (чтобы частотные оценки превратились в вероятности), то наш доверительный интервал будет включать в себя истинное значение $\theta$ в $100(1-\alpha)$ процентах случаев.
# Наша оценка – выборочное среднее, а если респондентов много, можно применить центральную предельную теорему:
# $$p\approx N(E(X),\frac{\sigma^2(X)}{n})$$
# Для распределения Бернулли $E(X) = p$ , а $\sigma^2(X)=p(1-p)$
# **ВНИМАНИЕ!** $p$ нам неизвестно, самое лучшее, что мы о нем можем сказать – дать оценку p по выборке, то есть заменить в формуле нормального распределения $p$ на $\overline{p}$. И тогда получаем:
# $$p\approx N(\bar{p},\frac{\bar{p}(1-\bar{p})}{n})$$
#
# А теперь "правило двух сигм" (там было как раз 95% вероятностной массы):
#
# $$P(\overline{p}-2\sqrt{\frac{\bar{p}(1-\bar{p})}{n}} \le p \le \overline{p}+2\sqrt{\frac{\bar{p}(1-\bar{p})}{n}})\approx 0.95$$
# А значит теперь мы получаем оценки доверительных интервалов для наших случаев:
# * Для выборки из 20 наблюдений интервал имеет вид:
# +
from math import sqrt
p = 0.5
n = 20
interval = 2*sqrt(p*(1-p)/n)
print(f'[{p-interval}, {p+interval}]')
# -
# * Для выборки из 200 наблюдений интервал имеет вид:
# +
p = 0.45
n = 200
interval = 2*sqrt(p*(1-p)/n)
print(f'[{p-interval}, {p+interval}]')
# -
# Доверительный интервал указывает на неопределенность в нашей оценке параметра. Заметьте, что с увеличением количества наблюдений интервал становится уже, а это и означает, что чем больше наблюдений, тем выше точность. Вообще говоря, не обязательно использовать ЦПТ для построения интервалов - часто, когда распределение известно, можно найти метод получше, но метод оценки через ЦПТ довольно универсален и часто будет вам пригождаться.
# # Интервальные оценки
# Здесь мы с вами использовали правило "двух сигм" для построения доверительного интервала для Бернуллиевской случайной величины с помощью нормального приближения (Центральной Предельной Теоремы).
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/37/Standard_deviation_diagram_%28decimal_comma%29.svg/325px-Standard_deviation_diagram_%28decimal_comma%29.svg.png" width="500">
#
# На самом деле "два сигма" это приближенное значение. Попробуем его уточнить. Для этого надо дать вероятностное определение квантиля (Напоминаю, что для выборок мы договорились, что n-процентный квантиль - такое число, что ниже него лежит n процентов наблюдений в выборке)
# Квантилем порядка $\alpha$ называют такую величину $X_{\alpha}$, что $$P(X\le X_{\alpha})\ge \alpha$$ и $$P(X\ge X_{\alpha})\ge 1-\alpha$$
# Тогда для правила "двух сигм" наши границы интервала $$P(l\le X \le r) = 1-\alpha$$
# Будут в точности равны $X_{0.025}$ и $X_{0.975}$, то есть:
# $$P(X_{0.025}\le X \le X_{0.975}) = 0.95$$
# Тогда для произвольного квантиля ситуация будет такая:
#
# $$P(X_{\frac{\alpha}{2}} \le X \le X_{1-\frac{\alpha}{2}}) = 1-\alpha$$
# Полученный нами интервал называют предсказательным интервалом $[X_{\frac{\alpha}{2}},X_{1-\frac{\alpha}{2}}]$
# Заметьте, что здесь случайной является величина в центре, а границы задаются параметрами распределения генеральной совокупности.
#
# В случае нормального распределения $X \sim N(\mu,\sigma^2)$ этот интервал можно выразить через соответствующие квантили стандартного нормального распределения, то есть $N(0,1)$ в таком виде:
#
#
# $$P(\mu - z_{1-\frac{\alpha}{2}}\sigma \le X \le \mu + z_{1-\frac{\alpha}{2}}\sigma) = 1-\alpha$$
#
# где $z_{1-\frac{\alpha}{2}}=z_{\frac{\alpha}{2}}$ - квантиль стандартного нормального распределения.
#
# А равенство выше выполняется из-за того, что нормальное распределение симметрично.
# Как раз $z_{0.025}\approx 1.96 \approx 2$
#
# Вот оно - правило двух сигм :)
# В свою очередь доверительный интервал для мат.ожидания (интервальная оценка характеристики генеральной совокупности, сделанная по выборке), примет такой вид:
# $$P(\overline{X_n} - z_{1-\frac{\alpha}{2}}\frac{\sigma}{\sqrt{n}} \le \mu \le \overline{X_n} + z_{1-\frac{\alpha}{2}}\frac{\sigma}{\sqrt{n}}) = 1-\alpha$$
#
# Еще раз, предсказательный интервал говорит, где будет находиться наша случайная величина при условии знания распределения генеральной совокупности(ее дисперсии и математического ожидания), а доверительный – где будет находиться значение статистики в генеральной совокупности(ее мат.ожидание) при условии знания выборки :)
#
# Здесь мы взяли какую-то оценку $\mu$, а именно - выборочное среднее, а $\sqrt{n}$ появился как следствие центральной предельной теоремы для среднего (по выборке длины n стандартное отклонение уменьшается в $\sqrt{n}$ раз)
# # Распределения, производные от нормального
# ## Хи-квадрат
#
# Пусть у нас есть k независимых случайных величин, распределенных стандартно нормально:
# $$X_1...X_n \sim N(0,1)$$
#
# Тогда говорят, что величина $$X = \sum_{i=1}^k{X_i^2}$$
# имеет распределение Хи-квадрат с k степенями свободы.
#
# плотность распределения имеет жутковатый вид, поэтому мы выписывать такое не будем. Но пользоваться им это нам не помешает. При разных k графики функции распределения имеют различный вид. Ниже приведены графики для k = 1,2,3,4
# +
from scipy.stats import chi2
import scipy
import numpy as np
import matplotlib.pyplot as plt
df = [1,2,3,4,5,6]
fig, ax = plt.subplots(2, 3, figsize=(12,7))
x = []
pdf = []
# for i in df:
x = np.linspace(chi2.ppf(0.01, 1),chi2.ppf(0.99, 1), 100)
ax[0,0].plot(x, chi2.pdf(x,1),c='red', lw=2, alpha=0.6, label='chi2 pdf')
x = np.linspace(chi2.ppf(0.01, 3),chi2.ppf(0.99, 3), 100)
ax[0,1].plot(x, chi2.pdf(x, 3),c='red', lw=2, alpha=0.6, label='chi2 pdf')
x = np.linspace(chi2.ppf(0.01, 4),chi2.ppf(0.99, 4), 100)
ax[0,2].plot(x, chi2.pdf(x, 4),c='violet', lw=2, alpha=0.6, label='chi2 pdf')
x = np.linspace(chi2.ppf(0.01, 2),chi2.ppf(0.99, 2), 100)
ax[1,0].plot(x, chi2.pdf(x,2),c='red', lw=2, alpha=0.6, label='chi2 pdf')
x = np.linspace(chi2.ppf(0.01, 5),chi2.ppf(0.99, 5), 100)
ax[1,1].plot(x, chi2.pdf(x, 5),c='blue', lw=2, alpha=0.6, label='chi2 pdf')
x = np.linspace(chi2.ppf(0.01, 6),chi2.ppf(0.99, 6), 100)
ax[1,2].plot(x, chi2.pdf(x, 6),c='green', lw=2, alpha=0.6, label='chi2 pdf')
# -
# ## Распределение Стьюдента
#
# Пусть у нас есть стандартная нормальная случайная величина и величина, распределенная по хи-квадрат с k степенями свободы:
# $$X \sim N(0,1)$$
# $$Y \sim \chi_k^2$$
#
# Тогда говорят, что величина $$\phi = \frac{X}{\sqrt{Y/k}}$$
# имеет распределение Стьюдента с k степенями свободы.
#
# Ниже приведены графики для k = 1,2,3,4
# +
from scipy.stats import t
import numpy as np
import matplotlib.pyplot as plt
df = [1,2,3,4]
fig, ax = plt.subplots(1, 1, figsize=(10,5))
x = []
pdf = []
# for i in df:
x = np.linspace(t.ppf(0.01, 1),t.ppf(0.99, 1), 100)
ax.plot(x, t.pdf(x, 1),c='red', lw=2, alpha=0.6, label='student pdf')
x = np.linspace(t.ppf(0.01, 2),t.ppf(0.99, 2), 100)
ax.plot(x, t.pdf(x, 2),c='violet', lw=2, alpha=0.6, label='student pdf')
x = np.linspace(t.ppf(0.01, 3),t.ppf(0.99, 3), 100)
ax.plot(x, t.pdf(x, 3),c='blue', lw=2, alpha=0.6, label='student pdf')
x = np.linspace(t.ppf(0.01, 4),t.ppf(0.99, 4), 100)
ax.plot(x, t.pdf(x, 4),c='green', lw=2, alpha=0.6, label='student pdf')
# -
# Такое распределение всегда имеет центр в нуле и , хотя и похоже на нормальное, имеет "более тяжелые хвосты". Так говорят, когда вероятность значений сильно отклоняющихся от среднего, выше, чем у нормального. При больших k распределение Стьюдента слабо отличается от нормального распределения (при k > 30-40 на глаз не отличишь)
# ## Распределение Фишера
#
# Пусть у нас есть две независимые случайные величины, распределенные по хи-квадрат с k и m степенями свободы соответственно:
# $$X \sim \chi_k^2$$
# $$Y \sim \chi_m^2$$
#
# Тогда говорят, что величина $$\psi = \frac{X/k}{Y/m}$$
# имеет распределение Фишера с параметрами k и m.
#
# плотность распределения имеет жутковатый вид, поэтому мы выписывать такое не будем. Но пользоваться им это нам не помешает. При разных k графики функции распределения имеют различный вид. Ниже приведены графики для (1,1) (2,2) (3,3) и (4,4)
# +
from scipy.stats import f
import numpy as np
import matplotlib.pyplot as plt
df = [1,2,3,4]
fig, ax = plt.subplots(2, 2, figsize=(10,5))
x = []
pdf = []
x = np.linspace(f.ppf(0.01, 1,1),f.ppf(0.99, 1,1), 100)
ax[0,0].plot(x, f.pdf(x, 1,1),c='red', lw=2, alpha=0.6, label='student pdf')
x = np.linspace(f.ppf(0.01, 2,2),f.ppf(0.99, 2,2), 100)
ax[0,1].plot(x, f.pdf(x, 2,2),c='violet', lw=2, alpha=0.6, label='student pdf')
x = np.linspace(f.ppf(0.01, 3,3),f.ppf(0.99, 3,3), 100)
ax[1,0].plot(x, f.pdf(x, 3,3),c='blue', lw=2, alpha=0.6, label='student pdf')
x = np.linspace(f.ppf(0.01, 4,4),f.ppf(0.99, 4,4), 100)
ax[1,1].plot(x, f.pdf(x, 4,4),c='green', lw=2, alpha=0.6, label='student pdf')
# -
# # Зачем мы городили огород?
#
# Взглянем еще раз на нормальное распределение.
# Если $$X \sim N(\mu,\sigma^2)$$
#
# То выборочное среднее $$\overline{X_n} \sim N(\mu,\frac{\sigma^2}{n})$$
#
# А выборочная дисперсия имеет вид:$$S_n^2 = \frac{1}{n-1}\sum(X_i-\overline{X_n})^2$$
#
# В скобках стоит сумма квадратов нормально распределенных случайных величин! Следовательно выборочная дисперсия имеет распределение хи-квадрат с точностью до нормировки, а именно:
# $$(n-1)\frac{S_n^2}{\sigma^2} \sim \chi^2_{n-1}$$
#
# Еще одна крайне полезная величина, получаемая из нормального распределения, имеет распределение Стьюдента. Это так называемая T-статистика:
# $$\frac{\bar{X_n} - \mu}{S_n/\sqrt{n}} \sim St(n-1)$$
#
# Ну а если у нас будет две нормальные выборки вида:
# $$X_1 \sim N(\mu_1,\sigma_1^2)$$
# $$X_2 \sim N(\mu_2,\sigma_2^2)$$
#
# Тогда величина $$\frac{S_1^2/\sigma_1^2}{S_2^2/\sigma_2^2} \sim Fisher(n_1-1,n_2-1)$$
# Ну а теперь, когда мы знаем многие необходимые распределения, займемся построением доверительных интервалов.
# ## Доверительный интервал для среднего:
from statsmodels.stats.weightstats import _zconfint_generic, _tconfint_generic
# Представим, что у нас есть выборка из большой группы людей, у которых мы измеряем вес. Вес в этой группе распределен нормально со средним 70 кг и стандартным отклонением 5 кг.
import numpy as np
from numpy.random import normal
from scipy.stats import norm
# +
# Случайная выборка
sample = np.array([normal(70,5) for _ in range(1000)])
fig, ax = plt.subplots(1, 1, figsize=(10,5))
x = []
pdf = []
x = np.linspace(norm.ppf(0.01, 70,5),norm.ppf(0.99, 70,5), 100)
ax.plot(x, norm.pdf(x, 70,5),c='red', lw=2, alpha=0.6, label='Norm pdf')
ax.set_title('Normal distribution pdf')
# -
# ### Точечная оценка
print(f"Среднее {sample.mean()}, Стандартное отклонение {sample.std(ddof=1)}")
# ### Интервальная оценка
#
# #### z-интервал
# Если нам вдруг нам стало известно значение дисперсии в генеральной совокупности, то интервал для среднего в выборке можно получить как:
#
# $$\bar{X}_n \pm z_{1-\frac{\alpha}{2}} \frac{\sigma}{\sqrt{n}}$$
sigma = 5
print("95% Доверительный интервал для среднего", _zconfint_generic(sample.mean(),
np.sqrt(5/len(sample)),
0.05, 'two-sided'))
# #### t-интервал
# В действительности дисперсия нам неизвестна. И поэтому вместо дисперсии генеральной совокупности нам надо подставить ее выборочную оценку, а вместо квантиля нормального распределения - квантиль распределения Стьюдента.
# $$\bar{X}_n \pm t_{1-\frac{\alpha}{2}} \frac{S}{\sqrt{n}}$$
#
# Еще раз отметим, что при количестве наблюдений > 30-40 распределение Стьюдента почти не отличается от нормального, так что можно вместо $t_{1-\frac{\alpha}{2}}$ брать $z_{1-\frac{\alpha}{2}}$
sample_std = sample.std(ddof=1)/sqrt(len(sample))
print("95% Доверительный интервал для среднего", _tconfint_generic(sample.mean(), sample_std,
len(sample),
0.05, 'two-sided'))
np.mean(sample) - scipy.stats.norm.ppf(1-0.05/2)*np.sqrt(5/len(sample))
scipy.stats.norm.ppf(0.05/2)
# ## Доверительный интервал для доли
#
# Некоторые часто встречающиеся случайные величины имеют бинарные значения (клики в рекламные баннеры, отток пользователей, возвращаемость на сайт в течение недели и многое другое). Для них тоже нужно уметь строить доверительные интервалы.
# +
general = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1]
sample = np.random.choice(general, size = 5047, replace = True)
# -
len(general)
len(sample)
# Истинное значение доли в генеральной совокупности есть
np.mean(general)
# Давайте посмотроим доверительный интервал для этой бернуллиевской случайной величины **на основе нормального распределения (ЦПТ)**:
#
# $$p \pm z_{1-\frac{\alpha}{2}} \sqrt{\frac{p\left(1-p\right)}{n}}$$
#
# Здесь *p* - наша оценка доли по выборке.
from statsmodels.stats.proportion import proportion_confint
normal_interval = proportion_confint(sum(sample), len(sample), method = 'normal')
normal_interval
normal_interval[-1]-normal_interval[0]
# Существует множество методов уточнения этой оценки, один из которых обязатиельно стоит упомянуть - это **метод Уилсона**. Интервал в этом случае примет вид:
#
# $$\frac1{ 1 + \frac{z^2}{n} } \left(p + \frac{z^2}{2n} \pm z \sqrt{ \frac{p\left(1-p\right)}{n} + \frac{
# z^2}{4n^2} } \right)$$
#
# Здесь $z == z_{1-\frac{\alpha}{2}}$ , а *p* - наша оценка доли по выборке.
normal_interval = proportion_confint(sum(sample), len(sample), method = 'wilson')
normal_interval
# Еще одно полезное знание, которое мы можем отсюда извлечь - способ расчета размера выборки, необходимого для получения интервала заданой ширины. Сделать это можно с помощью соответствующей функции из statsmodels
from statsmodels.stats.proportion import samplesize_confint_proportion
# 0.01 - половина ширины интервала
sufficient_size = int(np.ceil(samplesize_confint_proportion(sample.mean(), 0.005)))
sufficient_size
sample2 = np.random.choice(general, size = sufficient_size)
normal_interval2 = proportion_confint(sum(sample2), len(sample2), method = 'normal')
normal_interval2
normal_interval2[-1] - normal_interval2[0]
# ## Доверительный интервал для разности долей в случае двух выборок
# Пусть проводится тест новой промомеханики: часть пользователей интернет-магазина получает скидку на определенную категорию товаров, а потом измеряется, сделали они заказ в этой категории или нет. Так мы сможем посмотреть, увеличивается ли количество заказов в нашей категории в случае использования промомеханики
import scipy
np.random.seed(45)
group_a = np.random.binomial(n = 1, p = 0.08, size = 1000)
group_b = np.random.binomial(n = 1, p = 0.06, size = 1000)
# Можно построить оценку в виде доверительного интервала Уилсона для каждой из выборок:
interval_group_a = proportion_confint(group_a.sum(), len(group_a), method = 'wilson')
interval_group_b = proportion_confint(group_b.sum(), len(group_b), method = 'wilson')
print('Интервальная оценка для А',interval_group_a)
print('Интервальная оценка для В',interval_group_b)
# Доверительные интервалы пересекаются, значит ли это , что доли в выборках неотличимы?
#
# Нет, мы должны построить доверительный интервал для разности:
#
# $$p_1 - p_2 \pm z_{1-\frac{\alpha}{2}}\sqrt{\frac{p_1(1 - p_1)}{n_1} + \frac{p_2(1 - p_2)}{n_2}}$$
def difference_in_proportions(sample1, sample2, alpha = 0.05):
z = scipy.stats.norm.ppf(1 - alpha / 2.)
n1 = len(sample1)
n2 = len(sample2)
p1 = float(sum(sample1)) / n1
p2 = float(sum(sample2)) / n2
l = (p1 - p2) - z * np.sqrt(p1 * (1 - p1)/ n1 + p2 * (1 - p2)/ n2)
r = (p1 - p2) + z * np.sqrt(p1 * (1 - p1)/ n1 + p2 * (1 - p2)/ n2)
return (l, r)
print("Интервал для разности долей:", difference_in_proportions(group_a, group_b))
# Интервал пересекает ноль, значит разность статистически не значима :)
# До сих пор мы говорили о независимых выборках (пользователи из одной группы получили скидку, а из другой – нет), но так бывает не всегда. Например мы могли показать два разных рекламных баннера одной и той же выборке людей (тогда при оценке интервала надо учесть , что выборки связанные). Делается это так:
#
#
# $X_1$\ $X_2$ | 1 | 0 | $\sum$
# ------------ | ---- | ----- | ---
# 1 | a | b | a + b
# 0 | c | d | c + d
# $\sum$ | a + c| b + d | n
#
# $$ p_1 = \frac{a + b}{n}$$
#
# $$ p_2 = \frac{a + c}{n}$$
#
# $$ p_1 - p_2 = \frac{b - c}{n}$$
#
#
# $$ \frac{b - c}{n} \pm z_{1-\frac{\alpha}{2}}\sqrt{\frac{b + c}{n^2} - \frac{(b - c)^2}{n^3}}$$
def difference_related_samples(sample1, sample2, alpha = 0.05):
z = scipy.stats.norm.ppf(1 - alpha / 2.)
sample = list(zip(sample1, sample2))
n = len(sample)
b = sum([1 if (x[0] == 1 and x[1] == 0) else 0 for x in sample])
c = sum([1 if (x[0] == 0 and x[1] == 1) else 0 for x in sample])
l = float(b - c) / n - z * np.sqrt(float((b + c)) / n**2 - float((b - c)**2) / n**3)
r = float(b - c) / n + z * np.sqrt(float((b + c)) / n**2 - float((b - c)**2) / n**3)
return (l, r)
# +
np.random.seed(45)
group_a = np.random.binomial(n = 1, p = 0.07, size = 1000) # Клики группы в первый баннер
group_b = np.random.binomial(n = 1, p = 0.04, size = 1000) # Клики группы во второй баннер
difference_related_samples(group_a, group_b)
# -
# Интервал снова не пересекает ноль. Стало быть , тест говорит, что баннер B лучше, чем баннер A.
# ## Доверительный интервал c помощью бутстрэпа
# Представим, что нам нужно построить интервальную оценку для какого-то показателя, о распределении которого мы не знаем ничего. В качестве примера можно взять квантиль 40%.
# Чтоб такой доверительный интервал построить, нам нужно получить выборочное распределение этой статистики. А что если выборка одна и распределение получить нельзя?
# Как бы мы получали такое распределение, будь у нас доступ к генеральной совокупности? Понятное дело - мы бы набрали из нее выборок, на каждой из которых посчитали бы искомую статистику, а потом построили соответствующее распределение. Такое нельзя сделать почти никогда.
#
# Есть второй вариант – сделать предположение о характере распределения нашей статистики и оценить параметры этого распределения. Это звучит отлично, если у нас есть какой-то способ понять, как распределена наша статистика теоретически (а по условию мы про это распределение ничего не знаем)
#
# Вариант номер три – бутстрэп. Поскольку генеральная совокупность нам недоступна, давайте в качестве приближения воспользуемся нашей выборкой и начнем генерировать из нее псевдовыборки размера n с возвращением. Получив некоторое количество таких псевдовыборок, оценим по ним распределение нашей статистики.
# Возьмем датасет об извержениях гейзера Old Faithful:
import pandas as pd
df = pd.read_csv('https://gist.githubusercontent.com/curran/4b59d1046d9e66f2787780ad51a1cd87/raw/9ec906b78a98cf300947a37b56cfe70d01183200/data.tsv', sep='\t')
df
# переведем длительность извержения в минуты:
df['time']=df['eruptions']*60
# %pylab inline
# Красным цветом на рисунке указана выборочная медиана
pylab.figure(figsize(12, 5))
pylab.subplot(1,2,1)
pylab.hist(df.time, bins = 20, color = 'green', range = (50, 400), label = 'eruptions')
pylab.vlines(np.median(df['time']),0,50, color = 'red')
pylab.legend()
np.median(df['time'])
# Дадим интервальную оценку медианного значения длительности извержения:
def bootstrap_samples(data, n_samples):
indices = np.random.randint(0, len(data), (n_samples, len(data)))
samples = data[indices]
return samples
def boundaries_with_quantiles(stat, alpha):
lr = np.percentile(stat, [100 * alpha / 2., 100 * (1 - alpha / 2.)])
return lr
# +
np.random.seed(42)
eruptions = df['time'].values
print(eruptions)
bootstrapped_medians = list(map(np.median, bootstrap_samples(eruptions, 3)))
print("95% доверительный интервал для времени извержения Old Faithful:",
boundaries_with_quantiles(bootstrapped_medians, 0.05))
# -
# Если немного потрудиться, можно [отсюда](https://geysertimes.org/geyser.php?id=Beehive) достать времена извержений другого гейзера. Давайте попробуем дать интервальную оценку разности медианных времен извержений этих двух гейзеров.
data_raw =[
'4m 35s',
'5m 13s',
'4m 55s',
'4m 46s',
'4m 53s',
'4m 58s ',
'4m 52s ',
'4m 47s ',
'4m 47s ',
'4m 42s ',
'4m 49s ',
'4m 45s ',
'4m 56s ',
'4m 35s ',
'4m 51s',
'4m 42s',
'4m 50',
'4m 39s',
'4m 52s ',
'4m 42s',
'5m 18s',
'4m 53s',
'5m 2s']
# Распарсим это дело в секунды, а дальше сделаем так же, как в прошлый раз.
import re
def process(x):
st = list(map(int,re.findall(r'\d+',x)))
return st[0]*60+st[1]
data_processed = np.array(list(map(process,data_raw)))
np.median(data_processed)
# +
bootstrapped_medians_2 = list(map(np.median, bootstrap_samples(data_processed, 1000)))
print("95% доверительный интервал для времени извержения Beehive:",
boundaries_with_quantiles(bootstrapped_medians_2, 0.05))
# -
# Тогда точечная оценка разности медиан:
print("Разность:", np.median(data_processed) - np.median(eruptions))
# А доверительный интервал:
median_difference = list(map(lambda x: x[1] - x[0], zip(bootstrapped_medians,
bootstrapped_medians_2)))
print("95% доверительный интервал для разности медианных времен извержений",
boundaries_with_quantiles(median_difference, 0.05))
# Бутстрэп выглядит как совсем магическое ухищрение, но в действительности он очень хорошо работает, а при некоторых ограничениях вид и качество интервала, полученного с помощью бутстрэпа, могут быть оценены в явном виде (но тут уже придется покопаться в математике посерьезнее)
#
| lect5/lect5. Confidence intervals.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# Code for cleanning and preparing the daily surveys data
# +
from __future__ import absolute_import, division, print_function
import datetime
import time
import os
import pandas as pd
# +
import sys
sys.path.insert(0, '../../src/data/')
from config import *
# -
# # Config
# # read
daily_survey_data_anon = pd.read_hdf(surveys_anon_store_path, 'daily/daily_survey_data_anon')
daily_survey_data_anon.head()
# # Translate values
# +
def Q1_Translation(answer):
"""
A function to translate the string answers from the daily survey data to numbered responses
"""
if answer == 'Extremely good':
return(7.0)
if answer == 'Moderately good':
return(6.0)
if answer == 'Slightly good':
return(5.0)
if answer == 'Neither good nor bad':
return(4.0)
if answer == 'Slightly bad':
return(3.0)
if answer == 'Moderately bad':
return(2.0)
if answer == 'Extremely bad':
return(1.0)
def Q2_Translation(answer):
"""
A function to translate the string answers from the daily survey data to numbered responses
"""
if answer == 'Strongly agree':
return(7.0)
if answer == 'Agree':
return(6.0)
if answer == 'Somewhat agree':
return(5.0)
if answer == 'Neither agree nor disagree':
return(4.0)
if answer == 'Somewhat disagree':
return(3.0)
if answer == 'Disagree':
return(2.0)
if answer == 'Strongly disagree':
return(1.0)
# -
#Apply translation functions
print(len(daily_survey_data_anon))
daily_survey_data_clean = daily_survey_data_anon[daily_survey_data_anon['Progress'] == '100'].copy()
print(len(daily_survey_data_clean))
daily_survey_data_clean['Q1'] = daily_survey_data_clean['Q1'].apply(Q1_Translation)
daily_survey_data_clean['Q2'] = daily_survey_data_clean['Q2'].apply(Q2_Translation)
daily_survey_data_clean
daily_survey_data_clean.dtypes
# # Translate Dates
daily_survey_data_clean['RecordedDate_ts'] = pd.to_datetime(daily_survey_data_clean['RecordedDate']).dt.tz_localize(time_zone)
daily_survey_data_clean
# # Assing "affective date" to each record
# This is done based on the time of day it was answered. Keep in mind that in some cases, it might be that two answers are relevant to the same day. In these cases, take an average (and keep count)
effective_date_breakpoint_hour = 12 # anything before 12:00 will belong to the previous date
test_ts1 = pd.Timestamp("2018-06-18 15:22:40-04:00", tz=time_zone)
test_ts2 = pd.Timestamp("2018-08-03 09:24:19-04:00", tz=time_zone)
test_ts2
# +
def reset_time(ts):
return ts.replace(hour=0, minute=0, second=0)
def effective_date(ts):
new_ts = ts
if ts.hour < 12:
new_ts = ts - pd.Timedelta(days=1)
return reset_time(new_ts)
# -
daily_survey_data_clean['effective_ts'] = daily_survey_data_clean['RecordedDate_ts'].apply(effective_date)
daily_survey_data_clean
# # Remove columns we don't need
daily_survey_data_clean = daily_survey_data_clean[['effective_ts','member','Q1','Q2']]
daily_survey_data_clean.head()
# # Handle multiple answers per day
f = {'Q1':['mean'], 'Q2':['mean','count']}
daily_survey_data_clean = daily_survey_data_clean.groupby(['effective_ts','member']).agg(f)
daily_survey_data_clean.columns = ['Q1','Q2','c']
daily_survey_data_clean.head()
# +
#daily_survey_data_clean = daily_survey_data_clean.reset_index()
# -
daily_survey_data_clean[daily_survey_data_clean.isnull().any(axis=1)]
# Note - I can't explain this NaNs. This is just funny Qualtrics stuff. The surveys show as completed.
# # Remove weekend data
# Weekend data shouldn't occur, but it does because of our huristic
print(len(daily_survey_data_clean))
df = daily_survey_data_clean.reset_index()
daily_survey_data_clean = df[df.effective_ts.dt.dayofweek < 5]
print(len(daily_survey_data_clean))
daily_survey_data_clean.head()
# # Remove researchers
print(len(daily_survey_data_clean))
daily_survey_data_clean = daily_survey_data_clean.query('member not in ("7EYKW64FHG","O3PUFCVB5K")').copy()
print(len(daily_survey_data_clean))
# # Remove data from before member was active
members = pd.read_hdf(analysis_store_path, 'metadata/members')
print(len(daily_survey_data_clean))
daily_survey_data_clean = daily_survey_data_clean.join(members['start_date_ts'], on='member').query('effective_ts >= start_date_ts').copy()
print(len(daily_survey_data_clean))
del daily_survey_data_clean['start_date_ts']
daily_survey_data_clean.head()
# # Remove data before/after the experiment took place
period1_start_ts = pd.Timestamp(period1_start, tz=time_zone)
period1_end_ts = pd.Timestamp(period1_end, tz=time_zone)
period2_start_ts = pd.Timestamp(period2_start, tz=time_zone)
period2_end_ts = pd.Timestamp(period2_end, tz=time_zone)
# +
cond = \
((daily_survey_data_clean.effective_ts >= period1_start_ts) & (daily_survey_data_clean.effective_ts < period1_end_ts)) | \
((daily_survey_data_clean.effective_ts >= period2_start_ts) & (daily_survey_data_clean.effective_ts < period2_end_ts))
print(len(daily_survey_data_clean))
daily_survey_data_clean = daily_survey_data_clean.loc[cond,:].copy()
print(len(daily_survey_data_clean))
# -
# # Store
with pd.HDFStore(surveys_clean_store_path) as store:
store.put('daily/daily_survey_data_clean', daily_survey_data_clean, format='table')
| notebooks/10-data-prep/01.15-ol-daily.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:env]
# language: python
# name: conda-env-env-py
# ---
# **conditioning** how rapidly function outputs change with respect to small changes in function inputs.
| notebooks/Conditioning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <img src='images/cover.png' width='linewidth' align="center">
# + [markdown] slideshow={"slide_type": "slide"}
# #### September, 2018, <NAME>, Math Professor at Hebrew University of Jerusalem
# ### -- "What About The Efforts of <font color="blue">Google, IBM, Microsoft, Intel, Alibaba, Rigetti, D-Wave, QuantumCircuits, IonQ, NIST, Atos...</font> to Reach Very Stable Qubits And Demonstrate Quantum Supremacy?"
# ### <font color="red">--"They Will All Fail".</font>
# + [markdown] slideshow={"slide_type": "subslide"}
# #### 1977, <NAME>, Founder of digital equipment corporation
# ### <font color="red">"There Is No Reason Anyone Would Want A Computer In Their Home."</font>
# #### 1995, <NAME>, Founder of 3Com
# ### <font color="red">"I Predict The Internet Will Soon Go Spectacularly Supernova And In 1996 Catastrophically Collapse.</font>"
# + [markdown] slideshow={"slide_type": "slide"}
# # <center> What Are Qubits and How to Manipulate Them? </center>
# + [markdown] slideshow={"slide_type": "subslide"}
# | | Bits |Probabilistic Bits |Qubits |
# |--- |--- |--- |--- |
# |State (Single Unit)|Bit $\in \{0,1\}$ |Real Vector <br> $\vec{s}=a\vec{0}+b\vec{1}$ <br> $a,b\in R_+$ <br> $a+b=1$ |Complex Vector <br> $\vert\psi\rangle=\alpha \vert 0\rangle +\beta \vert 1\rangle$ <br> $ \alpha,\beta\in \mathcal{C}$ <br> $\vert \alpha\vert^2+\vert\beta\vert^2=1$ |
# |State (Multi Unit) |Bitstring $\in \{0,1\}^n$ |Prob. Distribution <br>(Stochastic Vector) <br>$\vec{s}=\{p_x\}_{x\in \{0,1\}^n}$|Wavefunction <br>(Complex Vector)<br> $\vert\psi\rangle=\{\alpha_x\}_{x\in \{0,1\}^n}$|
# |Operations |Boolean Logic | Stochastic Matrices <br>$\sum_{j=1}^S P_{i,j}=1$ |Unitary Matrices <br> $U^{\dagger}U=1$|
# |Component Ops |Boolean Gates |Tensor Product of Matrices |Tensor Product of Matrices|
#
# $$|\psi\rangle = \alpha_0|00\cdots 0\rangle + \alpha_1|00\cdots 01\rangle + \cdots \alpha_{2^{n-1}}|11\cdots 1\rangle$$
# $$|\phi\rangle = a|00\rangle - b|11\rangle \hspace{0.5cm}\longleftarrow entangled \hspace{0.2cm} state$$
#
# + slideshow={"slide_type": "subslide"}
from qutip import Bloch
import numpy as np
sphere = Bloch()
sphere.add_vectors([1,0,1]/np.sqrt(2))
sphere.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# <img src='images/nand_toffoli.jpg' width=200 align='left'>
# <img src='images/Q_universal_set.png' width=400 align='right'>
# <img src='images/toffoli.png' width=400 align='bottom'>
# + [markdown] slideshow={"slide_type": "subslide"}
# <table><tr>
# <td><img src='images/quantum_dots.png' width=250 height=400 align="left"> </td>
# <td><img src='images/qdot_circuit_diagram.png' width=250 height=400 align="left"> </td>
# <td><img src='images/qdot_state_prep.png' width=250 height=400 align="left"> </td>
# </tr></table>
# <table><tr>
# <td><img src='images/qdot_qbit_control.png' width=250 height=400 align="left"> </td>
# <td><img src='images/qdot_2_qbit_gate.png' width=250 height=400 align="left"> </td>
# <td><img src='images/qdot_readout.png' width=250 height=400 align="left"> </td>
# </tr></table>
# ##### Nature Nanotechnology 8, 432-437 (2013)
# + [markdown] slideshow={"slide_type": "slide"}
# # <center> Quantum Computer Architecture </center>
# + [markdown] slideshow={"slide_type": "subslide"}
# <img src='images/Q-accelerator.png' width=600 align="center">
# + [markdown] slideshow={"slide_type": "subslide"}
# <img src='images/QC_stack_rigetti.png' width=450 align="center">
# #### Quil is considered a low-level quantum Intermediate Representation with classical control
# #### arXiv: 1608.03355 (2017)
# + [markdown] slideshow={"slide_type": "subslide"}
# <table><tr>
# <td><img src='images/Q_comp.jpg' width=250 height=400 align="left"> </td>
# <td><img src='images/refrigerator.png' width=250 height=200 align="right"> </td>
# </tr></table>
# + [markdown] slideshow={"slide_type": "subslide"}
# <table><tr>
# <td><img src="images/agave.png" style="width:300px;height:150px;" align="center"></td>
# <td><img src="images/gate_performance.png" style="width:320px;height:200px;" align="left"></td>
# <td><img src="images/2qbit_performance.png" style="width:200px;height:200px;" align="right"></td>
# </tr></table>
# + [markdown] slideshow={"slide_type": "subslide"}
# <table><tr>
# <td><img src='images/pyquil_stack.jpg' width=350 align='left'> </td>
# <td><img src='images/cloud_services.jpeg' width=450 alighn='right'> </td>
# </tr></table>
# ### Rigetti Computing is offering a <font color='red'>1 million dollar</font> prize for the first conclusive demonstration of quantum advantage on QCS.
# ### More details of the prize will be announced on October 30th, 2018
# + [markdown] slideshow={"slide_type": "slide"}
# # <center>What Problems Can Quantum Computers Solve?</center>
# + [markdown] slideshow={"slide_type": "subslide"}
# <img src='images/BQP.jpg' width=600 align="center">
# #### Slide from one of Scott Aaronson's lectures
# + [markdown] slideshow={"slide_type": "subslide"}
# ## <center>Some experimental implementations of quantum algorithms </center>
# |Algorithm | Technology |Solved Problem |
# |--- |--- |--- |
# |Shor's |Optics |Factorisation of 21 |
# |Grover's |NMR |Unstructured search <br>N = 8 |
# |Quantum Annealing |D-Wave 2X |Ising model on a graph <br>with 1097 vertices|
# |<NAME> |Optics<br> NMR |2 × 2 system of <br>linear equations |
# + [markdown] slideshow={"slide_type": "slide"}
# ## <center> DEMO </center>
# # <center>Quantum Chemistry</center>
# ## <center>with OpenFermion and Pyquil</center>
# + [markdown] slideshow={"slide_type": "subslide"}
# #### $$H(\vec{r};\vec{R}) = - \frac{1}{2}\sum_i\nabla_i^2 - \sum_{i,j}\frac{Z_j}{|R_j-r_i|}+\sum_{i<j}\frac{1}{|r_i-r_j|} + \sum_{i<j}\frac{Z_iZ_j}{|R_i-R_j|}$$
# #### $$H(R) = h_0 + \sum_{i,j}h_{i,j}a_i^{\dagger}a_j + \sum_{i,j,k,l}h_{i,j,k,l}a_i^{\dagger}a_j^{\dagger}a_ka_l \equiv \sum_i h_i$$
# ##### $$a^{\dagger}_i = \sigma_{i(-)}\otimes \sigma_{z}^{\otimes i}, \hspace{0.5 cm} a_i = \sigma_{i(+)}\otimes \sigma_{z}^{\otimes i} \hspace{0.5cm}\leftarrow Jordan-Wigner$$
# #### $$U = \exp\left[-iH\delta t \right] = \exp\left[-i\sum_j h_j\delta t \right] \approx \prod_j e^{ih_j\delta t} + O(\delta t^2)$$
# + [markdown] slideshow={"slide_type": "subslide"}
# ### <center>Digital quantum evolution of quantum state</center>
# <img src="images/circuit.png" style="width:600px;height:200px;" align="center">
# #### One can use, e.g. Phase Estimation Algorithm Diagram to measure Ground State Energy if proper Slater determinant constructed
# + [markdown] slideshow={"slide_type": "subslide"}
# <img src="images/qbit_vs_basis_size.png" style="width:600px;height:500px;" align="center">
# #### Science 309, 1705 (2005)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## <center>Coding with OpenFermion and Rigetti's Pyquil</center>
# + slideshow={"slide_type": "subslide"}
from openfermion.hamiltonians import MolecularData
geometry = [['H', [0, 0, 0]],
['H', [0, 0, 0.74]]] # H--H distance = 0.74pm
basis = 'sto-3g'
multiplicity = 1 #(2S+1)
charge = 0
h2_molecule = MolecularData(geometry, basis, multiplicity, charge)
from openfermionpyscf import run_pyscf
h2_molecule = run_pyscf(h2_molecule)
one_electron_integrals = h2_molecule.one_body_integrals
two_electron_integrals = h2_molecule.two_body_integrals
h2_filename = h2_molecule.filename
h2_molecule.save()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Mapping to qubits and Compiling the circuits on Rigetti QVM or QPU
# + slideshow={"slide_type": "subslide"}
from openfermion.transforms import get_fermion_operator, jordan_wigner
h2_qubit_hamiltonian = jordan_wigner(get_fermion_operator(h2_molecule.get_molecular_hamiltonian()))
from openfermion.ops import QubitOperator
from forestopenfermion import pyquilpauli_to_qubitop, qubitop_to_pyquilpauli
pyquil_h2_qubit_hamiltonian_generator = qubitop_to_pyquilpauli(h2_qubit_hamiltonian)
from pyquil.quil import Program
from pyquil.gates import *
from pyquil.paulis import exponentiate
import pyquil.api as api
qvm = api.QVMConnection()
localized_electrons_program = Program()
# put two-localized electrons on the first spatial site
localized_electrons_program.inst([X(0), X(1)])
pyquil_program = Program()
#First-order Trotter evolution for t=0.1
for term in pyquil_h2_qubit_hamiltonian_generator.terms:
pyquil_program += exponentiate(0.1 * term)
results = qvm.run(localized_electrons_program+pyquil_program, [0,1,2,3], trials=10)
wvf = qvm.wavefunction(localized_electrons_program+pyquil_program, [0,1,2,3])
print(wvf)
# + slideshow={"slide_type": "subslide"}
print("number of gates is:", len(pyquil_program))
print()
print(pyquil_program)
# This Quil program can be sent to the Forest cloud API
# for simulation or for execution on hardware.
# + [markdown] slideshow={"slide_type": "slide"}
# <table><tr>
# <td><img src="images/Shor_lecture.jpg" style="width:250px;height:180px;" ></td>
# <td><img src="images/gate42.jpg" style="width:250px;height:180px;" ></td>
# </tr></table>
# <table><tr>
# <td><img src="images/sevan_summit.jpg" style="width:250px;height:180px;" ></td>
# <td><img src="images/team.jpg" style="width:250px;height:180px;" ></td>
# </tr></table>
#
# + [markdown] slideshow={"slide_type": "subslide"}
# <td><img src="images/end.png" style="linewidth" ></td>
| QC_presentations/STC/QC_presentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/env python3
"""torch_logistic_regression.ipynb
<NAME> 2019
with help from <NAME> wrt pytorch
performs logistic regression on feature vectors
against positional matching labels using pytorch
required patch_catalogue.csv be present in cwd
as output by feature_vectors.ipynb as well as manual_labels.csv
will save the following:
weights.csv, predictions.csv, objects.csv, multi_objects.csv,
torch_lr_losses.pdf, torch_lr_weights.pdf, torch_lr_predictions.pdf, torch_lr_partition.pdf
"""
import csv
import pandas as pd
import numpy as np
from tqdm import tqdm_notebook as tqdm
import matplotlib.pyplot as plt
import torch
from torch.autograd import Variable
import torch.nn.functional as F
# +
# load the patch catalogue (load the dataset and make it iterable)
catalogue = pd.read_csv('patch_catalogue.csv')
catalogue.set_index(['name_TGSS','name_NVSS'],inplace=True)
scores = catalogue['score']
# remove positions, could test to see if it recovers separation?
del (catalogue['ra_TGSS'],catalogue['dec_TGSS'],
catalogue['ra_NVSS'],catalogue['dec_NVSS'],
catalogue['score'])
# these derived log features prove more useful than the regular values
catalogue['log_flux_TGSS'] = np.log10(catalogue['peak_TGSS'])
catalogue['log_integrated_TGSS'] = np.log10(catalogue['integrated_TGSS'])
catalogue['log_ratio_flux_TGSS'] = np.log10(catalogue['peak_TGSS']/
catalogue['integrated_TGSS'])
catalogue['log_flux_NVSS'] = np.log10(catalogue['peak_NVSS'])
# +
# create features and labels within pytorch
# scores are out of separation scorer, so 0.1 should likely be 0
labels = (scores.values > 0.1)
features = catalogue.values
# train on half the catalogue (A), predict against the whole thing (A+B)
labels_A = labels[::2]
# labels_B = labels[1::2]
features_A = features[::2]
# features_B = features[1::2]
labels_A = Variable(torch.from_numpy(labels_A).float())
# labels_B = Variable(torch.from_numpy(labels_B).float())
features_A = Variable(torch.Tensor(features_A))
# features_B = Variable(torch.Tensor(features_B))
# -
# create the model class
class LogisticRegression(torch.nn.Module):
def __init__(self, input_dim):
super().__init__()
# for our uses, the output layer is binary classification
self.linear = torch.nn.Linear(input_dim, 1)
def forward(self, x):
# and here's the sigmoid!
outputs = F.sigmoid(self.linear(x))
return outputs
# set some of the training hyper-parameters
input_dim = features_A.shape[1]
# learning rate cf. time-step in physical simulations
learning_rate = 0.001
# an epoch is a total cycle through all the training data
# increase this value if the losses plot doesn't appear to stabilise
num_epochs = int(1e4)
# instantiate the model, criterion (i.e. loss), and optimizer classes
model = LogisticRegression(input_dim)
# binary cross entropy, standard use
criterion = torch.nn.BCELoss(size_average=True)
# stochastic gradient decent cf. unbiased estimate of a noisy observation
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# train the model, this can take some time depending on num_epochs
losses = []
for epoch in tqdm(range(num_epochs)):
# reset gradient accumulation
optimizer.zero_grad()
# forward step: predict and find loss
predictions = model(features_A)
loss = criterion(predictions, labels_A)
# use .item() to stop memory leak to GPU, advice from M.Alger
losses.append(loss.item())
# backwards step: use loss to optimize a little bit
loss.backward()
optimizer.step()
# create a histogram of the loss trend, hopefully shows some stabilisation
# if it doesn't, try increasing num_epochs
plt.figure(figsize=(14,7))
plt.rcParams.update({'font.size': 18})
plt.plot(losses)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('torch_lr - losses showing stabilisation')
plt.savefig('torch_lr_losses.pdf',bbox_inches='tight')
# +
# save weights and bias to reconstruct model if needs be
parameters = list(model.parameters())
weights = parameters[0].detach().numpy().ravel()
bias = parameters[1].detach().numpy()
np.savetxt('weights.csv', np.concatenate((weights,bias)), delimiter=',')
# -
# create plot of classifier weights
# of particular interest: separation, alpha, log_flux_NVSS
plt.figure(figsize=(14,7))
plt.rcParams.update({'font.size': 18})
plt.bar(range(len(weights)),weights)
plt.xlabel('weights')
plt.xticks(range(len(weights)),catalogue.columns,rotation='vertical')
plt.ylabel('co-eff')
plt.title('torch_lr - weights')
plt.savefig('torch_lr_weights.pdf',bbox_inches='tight')
# +
# classify the entire catalogue and compare to labels
features_cat = Variable(torch.Tensor(features))
predictions_cat = model(features_cat).detach().numpy()
# where the two populations cross is where we say the classifier decides the split
nc_100 = np.histogram(predictions_cat[labels == 0],bins=np.arange(0,1,0.01),density=True)[0]
pc_100 = np.histogram(predictions_cat[labels == 1],bins=np.arange(0,1,0.01),density=True)[0]
midpoint = 0.5
for i in range(len(nc_100)):
if nc_100[i] < pc_100[i]:
midpoint = 0.01*i
break
pred_labels_cat = (predictions_cat > midpoint).astype(float)
pred_labels_cat = np.array([x[0] for x in pred_labels_cat])
# unconditional, note the native 63% negative class bias
accuracy = (pred_labels_cat == labels).mean()
# precision (true if said so)
precision = (labels[pred_labels_cat == True] == True).mean()
# recall (said so if true)
recall = (pred_labels_cat[labels == True] == True).mean()
print(('over whole catalogue:\n accuracy = {0:.3f}, precision = {1:.3f}, recall = {2:.3f}')
.format(accuracy,precision,recall))
# saves names of match and predicted label
catalogue['pred_labels'] = pred_labels_cat
catalogue['pred_labels'].to_csv('predictions.csv')
# -
def inv_sigmoid(y):
"""given: y = 1/(e^-x+1)"""
x = np.log(y/(1-y))
return x
# +
# create histogram of predictions with populations separated off of label
plt.rcParams.update({'font.size': 18})
fig, (ax1, ax2, ax3) = plt.subplots(3,figsize=(14,16))
ax1.set_title('logistic regression predictions \n \n score, h(x)')
ax2.set_title('class probability, g(x)')
ax3.set_title('class prediction, f(x)')
ax1.set_ylabel('pdf')
ax2.set_ylabel('pdf')
ax1.hist((inv_sigmoid(predictions_cat[labels == 0]),
inv_sigmoid(predictions_cat[labels == 1])), bins=100,
histtype='step', label=('negative class','postive class'), color = ('red','blue'), density = True)
ax1.legend()
ax2.hist((predictions_cat[labels == 0],predictions_cat[labels == 1]), bins=100,
histtype='step', label=('negative class','postive class'), color = ('red','blue'), density = True)
ax2.legend()
negative_class = np.histogram(predictions_cat[labels == 0],bins=[0,midpoint,1],density=True)[0]
positive_class = np.histogram(predictions_cat[labels == 1],bins=[0,midpoint,1],density=True)[0]
ax3.bar(np.array((0,0.75)),negative_class,0.2, label='negative class', edgecolor='red', color='None')
ax3.bar(np.array((0.25,1)),positive_class,0.2, label='positive class', edgecolor='blue', color='None')
ax3.set_xticks((0,0.25,0.75,1))
ax3.set_xticklabels(('0','0','1','1'))
ax3.set_ylabel('pdf, binned as [0,{},1]'.format(midpoint))
ax3.legend()
plt.savefig('torch_lr_predictions.pdf',bbox_inches='tight')
# +
# compute accuracy against manual labels
manual_labels = pd.read_csv('manual_labels.csv')
manual_labels.set_index(['name_TGSS','name_NVSS'],inplace=True)
man_cat = catalogue.loc[manual_labels.index.values]
label_man = manual_labels['manual_label'].values
pred_man = man_cat['pred_labels']
accuracy = (pred_man == label_man).mean()
precision = (label_man[pred_man == True] == True).mean()
recall = (pred_man[label_man == True] == True).mean()
print(('on manual labels:\n accuracy = {0:.3f}, precision = {1:.3f}, recall = {2:.3f}')
.format(accuracy,precision,recall))
# +
# partition the sky into physical objects using classifier
# we do this naively, by transitively linking together matches
# critical is that this naive partitioning can be bad given a good classifier
cat_pairs = set(catalogue.index.values)
obj_pairs = []
for pair in tqdm(cat_pairs):
if catalogue.loc[pair]['pred_labels'] == 1:
obj_pairs.append(pair)
objects = {}
tnames = {}
nnames = {}
index = 0
for pair in tqdm(obj_pairs):
tname, nname = pair[0], pair[1]
if not tname in tnames and not nname in nnames:
i = index
objects[i] = [tname,nname]
tnames[tname] = i
nnames[nname] = i
elif tname in tnames and not nname in nnames:
i = tnames[tname]
objects[i].append(nname)
nnames[nname] = i
elif not tname in tnames and nname in nnames:
i = nnames[nname]
objects[i].append(tname)
tnames[tname] = i
elif tname in tnames and nname in nnames:
# must merge objects, zig-zag problem
i = tnames[tname]
j = nnames[nname]
if i == j:
continue
else:
obj_i = objects[i]
obj_j = objects[j]
merged_obj = list(set(obj_i+obj_j))
objects[index] = merged_obj
del objects[i], objects[j]
for name in merged_obj:
if name[0] == 'T':
tnames[name] = index
elif name[0] == 'N':
nnames[name] = index
index += 1
# -
# find the most interesting objects, those with many components
multi_objects = {}
most_components = 0
most_components_i = 0
for key, val in objects.items():
if len(val) > 2:
multi_objects[key] = val
if len(val) > most_components:
most_components = len(val)
most_components_i = key
# the extreme amount of components here is a sign that the naive partioning is indeed naive
print(most_components, multi_objects[most_components_i])
# +
# save object partition
def dict_to_csv(dict_to_convert, filename):
values = []
for val in dict_to_convert.values():
values.append(val)
with open(filename, 'w', newline = '') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(values)
dict_to_csv(objects,'objects.csv')
dict_to_csv(multi_objects,'multi_objects.csv')
# -
def connect_the_dots(centre,field_of_view):
"""creates a picture of the object partition in the square
about the centre (tuple of ra, dec in degrees) of fov (degrees)
only shows the links between sources across surveys, i.e. 'matches'
"""
c_ra, c_dec = centre
w_fov = field_of_view
lookup_cat = pd.read_csv('patch_catalogue.csv', usecols=['name_TGSS','name_NVSS',
'ra_TGSS','dec_TGSS','ra_NVSS','dec_NVSS'])
lookup_cat.set_index(['name_TGSS','name_NVSS'],inplace=True)
lookup_cat['pred_labels'] = pred_labels_cat
# find all objects/links within a 3 degree window of centre
window = lookup_cat[(lookup_cat['pred_labels']==1) &
(lookup_cat['ra_TGSS']>c_ra-w_fov) &
(lookup_cat['ra_TGSS']<c_ra+w_fov) &
(lookup_cat['dec_TGSS']>c_dec-w_fov) &
(lookup_cat['dec_TGSS']<c_dec+w_fov) &
(lookup_cat['ra_NVSS']>c_ra-w_fov) &
(lookup_cat['ra_NVSS']<c_ra+w_fov) &
(lookup_cat['dec_NVSS']>c_dec-w_fov) &
(lookup_cat['dec_NVSS']<c_dec+w_fov)]
del window['pred_labels'], lookup_cat
walues = window.values
del window
tgss_x = np.reshape(walues[:,[0]],len(walues))
tgss_y = np.reshape(walues[:,[1]],len(walues))
nvss_x = np.reshape(walues[:,[2]],len(walues))
nvss_y = np.reshape(walues[:,[3]],len(walues))
plt.figure(figsize=(14,14))
plt.rcParams.update({'font.size': 18})
plt.plot(tgss_x,tgss_y,'r,')
plt.plot(nvss_x,nvss_y,'b,')
for i in tqdm(range(len(walues))):
plt.plot([tgss_x[i],nvss_x[i]],[tgss_y[i],nvss_y[i]],'k-',linewidth=0.5)
# invert x-axis to read as RA from right to left
ax = plt.gca()
xlim = ax.get_xlim()
ax.set_xlim(xlim[::-1])
plt.title('Naive partition of TGSS to NVSS in sky around {0:.2f},{1:.2f}'.format(c_ra,c_dec))
plt.ylabel('DEC / °')
plt.xlabel('RA / °')
plt.savefig('torch_lr_partition.pdf',bbox_inches='tight')
# interesting centre candidates:
# 153.65,-27.09 | J101436.8-270532, for the many-component object about the centre
# 166.10,-27.16, 158.60 -15.58, 152.64,-18.01
centre = 153.65,-27.09
field_of_view = 5
connect_the_dots(centre,field_of_view)
| source/torch_logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %run LifeGoals_mesa.ipynb
model = LifeModel()
wealth = [[],[],[],[],[],[]]
while(not model.schedule.agents[0].completed):
model.step()
for i in range(len(model.schedule.agents)):
wealth[i].append(model.schedule.agents[i].wealth)
# +
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(wealth[0],label='Nuwan')
plt.plot(wealth[1],label='Land Owner')
plt.plot(wealth[2],label='Builder')
plt.plot(wealth[3],label='Carpenter')
plt.plot(wealth[4],label='Painter')
plt.plot(wealth[5],label='Car Sale')
plt.legend(loc = 7)
| MESA/evaluate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Logistic Regression Project
#
# In this project, we will be working with a fake 'data of ad' set that shows whether a particular internet user clicks on an Ad on a company's website. We will try to create a model that will predict whether or not to click on an ad based on the features of that user.
#
# This data set includes the following features
#
# * 'daily_time_spent_on_site': consumer time on site in minutes
# * 'age': cutomer age in years
# * 'area_income': Average Income of geographical area of consumer
# * 'daily_internet_usage': Average minutes a day consumer is on the internet
# * 'ad_topic_line': Headline of the advertisement
# * 'city': City of consumer
# * 'male': Whether or not consumer was male
# * 'country': Country of consumer
# * 'timestamp': Time at which consumer clicked on Ad or closed window
# * 'clicked_on_ad': 0 or 1 indicated clicking on Ad
#
# ## Import Libraries
#
# **Import a few libraries that you think you will need (Or just import them as you go along!)**
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
# ## Get the Data
# **Read in the advertisement.csv file and set it to a data frame called data_of_ad.**
data_of_ad = pd.read_csv("data/advertisement.csv")
# **Check the head of data_of_ad**
data_of_ad.head()
# **Use info method and describe method on data_of_ad**
data_of_ad.info()
data_of_ad.describe()
# ## Exploratory Data Analysis
#
# Let's use seaborn to explore the data!
#
# Try recreating the plots shown below!
#
# **Create a histogram(bins=30) of the Age**
data_of_ad["age"].plot.hist(bins = 30)
# **Create a jointplot showing area_income versus Age.**
sns.jointplot(data = data_of_ad, x = "age", y = "area_income")
# **Create a jointplot showing the kde distributions of daily_time_spent_on_site vs. Age.**
sns.jointplot(data = data_of_ad, x = "age", y = "daily_time_spent_on_site", kind = "kde", color = "red" )
# **Create a jointplot of 'daily_time_spent_on_site' vs. 'daily_internet_usage'**
sns.jointplot(data = data_of_ad, x = "daily_time_spent_on_site", y = "daily_internet_usage", color = "green" )
# **Finally, create a pairplot with the hue defined by the 'clicked_on_ad' column feature.**
sns.pairplot(data = data_of_ad, hue = "clicked_on_ad")
# # Logistic Regression
#
# Now it's time to do a train test split, and train our model!
#
# You'll have the freedom here to choose columns that you want to train on!
# **Split the data into training set and testing set using train_test_split**
data_of_ad.head(1)
from sklearn.model_selection import train_test_split
# +
X = data_of_ad[['daily_time_spent_on_site', 'age', 'area_income','daily_internet_usage', 'male']]
y = data_of_ad["clicked_on_ad"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 101)
# -
# **Train and fit a logistic regression model on the training set.**
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train, y_train)
# ## Predictions and Evaluations
# **Now predict values for the testing data.**
predictions = logmodel.predict(X_test)
# **Create a classification report for the model.**
from sklearn.metrics import classification_report,confusion_matrix
print(classification_report(y_test, predictions))
print(confusion_matrix(y_test, predictions))
# ## Great Job!
| LOGISTIC_REGRESSION/logistic_regression_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <h1 style="font-size:2.5em;text-align:center;">Sistemas de Recomendación - Clase II</h1>
# + [markdown] slideshow={"slide_type": "slide"}
# # Contenido
#
# 1. [Ratings](#Ratings)
# 1. [Similitud entre usuarios y contenido](#Similitud-entre-usuarios-y-contenido)
# 1. [Algoritmos de recomendación](#Algoritmos-de-recomendación)
# 1. [Problemas con sistemas de recomendación](#Problemas-con-sistemas-de-recomendación)
# + [markdown] slideshow={"slide_type": "slide"}
# # Ratings
# + [markdown] slideshow={"slide_type": "subslide"}
# ## ¿Qué es un rating?
#
# - Es una herramienta que establece el sentimiento de un **usuario** sobre el **contenido** que está consumiendo.
# - Se representa internamente como **un número en una escala** (e.g. de 0 a 5).
# - Se busca que ese número pueda ser traducido a una representación gráfica para el usuario.
# - Pueden ser **explícitos** (dados directamente por el usuario) o **implícitos** (dados por las interacciones que realiza el usuario en una página).
# - Ejemplos: estrellas de Amazon, reputación de un vendedor en Mercado Libre, la cantidad de veces que uno reproduce una película en Netflix o una canción en Spotify.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Matriz de Usuario-Contenido
#
# - Es una matriz (tabla) con **una fila por cada usuario** y **una columna por cada item** de contenido (o visceversa).
# - El **contenido** de la matriz es el **rating** (explícito o implícito) que le asigna un usuario a cierto item de contenido.
# - Desafío en el diseño de un sistema de recomendación: ¿Qué tipo de información utilizar para rellenar el contenido de la matriz?.
# - Es el punto de entrada para entrenar la mayoría de los algoritmos de recomendación.
# - El objetivo de los algoritmos de recomendación es **llenar huecos** en la matriz de usuario-contenido.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Matriz de Usuario-Contenido: Ejemplo
#
# <img alt="User Item Matrix" src="./img/user-item-matrix.png" style="width:80%;margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Matriz de Usuario-Contenido: Problemas de Dispersión
#
# - Las matrices de usuario-contenido tienden a ser **altamente dispersas**.
# - Las plataformas suelen tener **muchos usuarios e items**.
# - Es raro encontrar a usuarios que interactúen con todos los items.
# - Deben poblarse con ratings (implícitos o explícitos), en caso contrario no brindan suficiente información a los algoritmos de recomendación.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Ratings explícitos vs. ratings implícitos.
#
# - Los datos para generar una matriz de usuario-contenido se obtienen de las **interacciones que el usuario hace con el sitio**.
# - Los **ratings explícitos** se consiguen a través de un **sistema específicamente diseñado**.
# - El usuario nos provee su apreciación de un item.
# - Los **ratings implícitos** se consiguen a través de datos recolectados a partir del **comportamiento del usuario**.
# - Se tiene que registrar lo que hace el usuario dentro del sitio. Desde compras hasta historial de revisión.
# - Sitios distintos tienen **distintas formas de capturar la información**.
# - No todas las aplicaciones se benefician de sistemas explícitos.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Ratings explícitos
#
# - Son la **forma más directa** de llenar la matriz de usuario-contenido.
# - Dependen de la **voluntad del usuario**.
# - ¿Qué tan **seguro está el usuario** de lo que le gusta?
# - ¿Qué cosa en particular no le gusta? E.g. canción vs. género musical, artículo vs. tema de una noticia.
# - ¿Son confiables los sistemas de reputación?
#
# <img alt="User Item Matrix" src="./img/yelp-hotel-review.jpg" style="margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Ratings implícitos
#
# - Se basan en el **monitoreo** de lo que hace el usuario.
# - Dar un bajo rating a una película, pero reveerla una y otra vez.
# - Se busca encontrar un **número que defina la conformidad** de un usuario con un item.
# - Cantidad de reproducciones de una canción, historial de navegación, compras, etc.
# - **No todo** lo que hace un usuario **es sinónimo de conformidad**.
# - Traducir comportamiento en ratings es una tarea subjetiva, depende mucho del contexto.
# - El **tiempo es relevante**.
# - No es lo mismo recomendar un bar de cerveza a las 9 a.m. que a las 9 p.m.
# - Comprar algo que recién sale al mercado es más valioso que comprar algo más viejo.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Cálculo de ratings implícitos
#
# - Registramos las **acciones del usuario**.
# - Distintos **eventos** dan distintos **valores**.
# - Buscar item.
# - Entrar en los detalles de un item (o items de categoría similar).
# - Averiguar medios de pago/envío.
# - Preguntar.
# - Comprar.
# - Buscamos llegar al rating máximo (e.g. comprar).
# - Puede ser un valor continúo (e.g. porcentaje de una canción/video que el usuario reproduce)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Cálculo de ratings implícitos: fórmula
#
# $$
# IR_{i,u} = w_1 \times \#event_1 + w_2 \times \#event_2 + \dots + w_n \times \#event_n
# $$
#
# - $IR_{i,u}$: Rating implícito entre usuario *u* e item *i*.
# - $\#event_i$: Número de veces que un evento ocurre.
# - Pueden estar **acotados por un valor máximo**.
# - Se pueden **pesar por el tiempo** desde que ocurrió el evento.
# - $w_1, \dots, w_n$: Pesos que se les da a los distintos eventos (basados en análisis).
# - Estos pueden **calcularse mediante análisis o como parámetros de un clasificador** binario que busca encontrar la probabilidad de comprar un item.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Items poco frecuentes
#
# - Muchas veces los **eventos raros** nos dicen más sobre el usuario que cualquier otro evento.
# - No es lo mismo comprar un DVD del "Señor de los Anillos" que comprar una edición de coleccionista con un poster.
# - Los eventos raros hacen **subgrupos de usuarios** más específicos.
# - Podemos **reponderar el rating** de un item (implícito o explícito) mediante esta información.
#
# $$
# R^{(final)}_{i,u} = R_{i,u} \times \log\Big(\frac{N}{1+n}\Big)
# $$
#
# - $R_{i,u}$: Rating calculado originalmente (explícito o implícito)
# - $N$: Número total de usuarios.
# - $n$: Número de usuarios que compraron el item *i*.
# - $R^{(final)}_{i,u}$: Rating reponderado.
# + [markdown] slideshow={"slide_type": "slide"}
# # Similitud entre usuarios y contenido
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Métricas de similitud
#
# - Buscamos ver que tan **cercanos están dos elementos**.
# - Se busca **recomendar contenido entre usuarios similares**.
# - Una **función (o métrica) de similitud** busca comparar dos elementos.
# - $\text{sim}(i_1, i_2) = 1$ implica igualdad.
# - $\text{sim}(i_1, i_2) = 0$ implica dos elementos completamente opuestos.
# - Las métricas de similitud **dependen del conjunto de datos**. Algunas funcionan mejor que otras.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Similitud de Jaccard
#
# - Se define **entre dos conjuntos**.
# - Suelen ser mejor aplicadas sobre datos binarios (e.g. un usuario compró o no un item).
# - Evalúa **cantidad de interacciones** en común vs. independientes.
# - La distancia entre dos elementos $i$ y $j$, se define formalmente como:
#
# $$
# \text{Jaccard}(i, j) = \frac{|\{u:~u~\text{compró}~i \land j\}|}{|\{u:~u~\text{compró}~i \lor j\}|}
# $$
#
# - El término "compró" puede cambiarse por cualquier interacción que se busque modelar.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Distancia Manhattan
#
# - Se define entre **valores cuantitativos**.
# - Conocida también como norma $L_1$.
# - La distancia Manhattan se piensa como la **cantidad de cuadras que hay que hacer para llegar de un punto al otro** en Manhattan.
# - Se define formalmente entre dos elementos $i$ y $j$ como:
#
# $$
# \text{Manhattan}(i, j) = \sum_{u=1}^{n}|r_{i,u} - r_{j,u}|
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Distancia Euclídea
#
# - Se define entre **valores cuantitativos**.
# - Conocida también como norma $L_2$.
# - Es la **distancia recta más corta entre dos puntos**. Proviene del teorema de Pitágoras.
# - Se define formalmente entre dos elementos $i$ y $j$ como:
#
# $$
# \text{Euclidean}(i, j) = \sqrt{\sum_{u=1}^{n}(r_{i,u} - r_{j,u})^2}
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Similitud Coseno
#
# - Se define entre **valores cuantitativos**.
# - Mide el **ángulo entre vectores** que representen nuestros datos.
# - Se define formalmente entre dos elementos $i$ y $j$ como:
#
# $$
# \text{Cosine}(i, j) = \frac{\sum_{u=1}^{n}r_{i,u}r_{j,u}}{\sqrt{\sum_{u=1}^{n}r_{i,u}^2}\sqrt{\sum_{u=1}^{n}r_{j,u}^2}}
# $$
#
# - En general **se normaliza utilizando promedios** (parecido a lo que se hace con el coeficiente de Pearson).
# - La idea es eliminar el sesgo de los usuarios.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Coeficiente de correlación de Pearson
#
# - Mide que tan **correlacionadas están las puntuaciones** entre dos elementos (pueden ser usuarios o items).
# - Compara los **elementos contra el promedio** y evalúa que tan diferentes son.
# - Se define formalmente entre dos elementos $i$ y $j$ como:
#
# $$
# \text{Pearson}(i, j) = \frac{\sum_{u=1}^{n}(r_{i,u}-\bar{r_i})(r_{j,u}-\bar{r_j})}{\sqrt{\sum_{u=1}^{n}(r_{i,u}-\bar{r_{i}})^2}\sqrt{\sum_{u=1}^{n}(r_{j,u}-\bar{r_{j}})^2}}
# $$
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Clustering
#
# - Definidas las métricas de similitud (o distancia) podemos hacer uso de **algoritmos de clustering**.
# - La idea es **buscar usuarios o items cercanos y agruparlos**.
# - Técnicas clásicas de clustering sirven: K-Means, Jerárquico, Expectation-Maximization.
# + [markdown] slideshow={"slide_type": "slide"}
# # Algoritmos de recomendación
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Filtrado colaborativo
#
# - Es uno de los modelos de recomendación más básicos pero **muy robusto**.
# - Se basa en la idea **asociar usuarios similares de acuerdo a sus gustos**, expresados en los ratings.
# - Puede aplicarse sobre **usuarios** (user-based collaborative filtering).
# - Dados dos usuarios similares $u$ y $v$, recomendamos el item $i$ al usuario $u$ que adquirió el usuario $v$ y todavía no fue consumido por $u$.
# - Puede aplicarse sobre **items** (item-based collaborative filtering).
# - Dados dos items similares $i$ y $j$ y un usuario $u$ que adquirió $i$, le recomendamos $j$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Filtrado basado en usuarios vs items
#
# - Los items se consideran **más estables**.
# - Un sistema puede tener **pocas interacciones para cada usuario**.
# - **Depende de la cantidad** de usuarios o items en el sistema y **su variabilidad**.
# - Los items proveen menos posibilidad de variación. Se **estancan rápido**.
# - Los **usuarios evolucionan**, las **recomendaciones pueden ser más dinámicas**.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Pipeline de filtrado colaborativo
#
# - Partimos de la **matriz de usuario-contenido**.
# - Calculamos la **similitud entre los elementos**.
# - Ordenamos los **elementos que más se parecen** al elemento actual.
# - Elegimos un **vecindario sobre el cuál calcular** la predicción.
# - Utilizamos los **ratings del usuario y su vecindario** para calcular el rating actual.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Vecindario para calcular predicción
#
# - A la hora de calcular un rating es necesario elegir aquellos **elementos que sean los más similares**.
# - La opción de **clustering** nos da un vecindario en el cluster de un elemento.
# - Los **K** vecinos más cercanos da un número fijo de vecinos.
# - Siempre nos brinda valores con los que calcular.
# - Un **umbral** en los ratings más cercanos dá un número variable de vecinos.
# - Puede dejar el algoritmo sin datos para calcular la predicción.
# - El **análisis de datos y la tolerancia** sobre las predicciones nos dan idea de qué método utilizar.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Cálculo de la predicción
#
# - A partir de los datos de ratings de un usuarios, las similitudes, y el vecindario, **completamos las matriz de usuario-contenido** con el cálculo de nuevos ratings:
#
# $$
# r^{(p)}_{i,u} = \bar{r_u} + \frac{\sum_{j \in N}\text{sim}(i,j)(r_{u,j}-\bar{r_j})}{\sum_{j \in N}\text{sim}(i,j)}
# $$
#
# - $r^{(p)}_{i,u}$: Es el valor predicho para el rating del item $i$ por el usuario $u$.
# - $\bar{r_u}$: Es la media de ratings dada por el usuario $u$ a todos los items que puntuó.
# - $j \in N$: Son todos los items $j$ del vecindario $N$.
# - $\bar{r_j}$: Es el rating medio del item $j$ (es opcional).
# - $\text{sim}(i,j)$: Es la métrica de similitud entre los items $i$ y $j$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Surpr!se
# + slideshow={"slide_type": "-"}
import pandas as pd
from surprise import Dataset, Reader, KNNWithMeans
from surprise.accuracy import rmse
from surprise.model_selection import cross_validate, train_test_split
# + slideshow={"slide_type": "-"}
ratings = pd.read_csv("./data/ml-latest-small/ratings.csv")
reader = Reader(rating_scale=(ratings.rating.min(), ratings.rating.max()))
ratings = Dataset.load_from_df(ratings[["userId", "movieId", "rating"]], reader)
# + slideshow={"slide_type": "-"}
ratings_train, ratings_test = train_test_split(ratings, test_size=0.2)
model = KNNWithMeans(k=5).fit(ratings_train)
predictions = model.test(ratings_test)
print("RMSE on test: {:.4f}".format(rmse(predictions, verbose=False)))
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Surpr!se: Cross Validation
# + slideshow={"slide_type": "-"}
model = KNNWithMeans(k=5, verbose=False)
cross_validated_metrics = cross_validate(model, ratings, measures=['RMSE', 'MAE'], cv=5, verbose=True)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Filtrado basado en contenido
#
# - En lugar de utilizar los ratings, utilizamos la **información del contenido**.
# - Metadatos como tema, descripción, género, etc.
# - Dependen mucho del **tipo de los datos**.
# - Las películas tienen directores y actores. Podemos recomendar películas similares en base a actores que tengan en común.
# - Requiere de tres aspectos:
# 1. Analizador de contenido: Crea un modelo basado en la información de los items.
# 2. Perfil del usuario: Genera un perfil del usuario en base al contenido consumido. E.g. una lista de películas vistas.
# 3. Recuperación de contenido: Recomienda contenido basado en la similitud del perfil del usuario con el perfil de los items. E.g. iterando y comparando uno a uno los elementos de la lista.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Analizador de contenido
#
# - Se busca **describir los datos mediante sus metadatos**.
# - Puede dividirse entre **hechos** y **etiquetas** (tags).
# - Los **hechos** son indiscutibles, tienen que ver con los datos en sí. E.g. director o actor de una película.
# - Las **etiquetas** generalmente se piensan en cómo los usuarios **categorizan los items**. E.g. una lista de Spotify para **levantar el ánimo**.
# - **No hay división clara**. Pero los hechos tienden a ser menos subjetivos. Las etiquetas dependen más del usuario.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Vector del contenido
#
# - El analizador de contenido busca **representar items mediante vectores**.
# - En filtrado colaborativo el **vector está dado por ratings**.
# - Acá el vector es **producido mediante características**.
# - Se lo puede pensar como una **ingeniería de atributos** de aprendizaje automático.
# - Los atributos pueden ser binarios, categóricos, cuantitativos, etc.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Vector del contenido: uso del texto
#
# - Muchas veces los **metadatos están en el texto** (e.g. título o descripción).
# - Las **técnicas clásicas de NLP** sirven para lidiar con esto.
# - Buscamos representar una descripción mediante una **bolsa de palabras**.
# - Requerimos de un paso previo de **tokenización**.
# - Generamos un **vector donde cada dimensión representa una palabra y el valor representa el conteo**.
# - Es útil eliminar las **palabras vacías**.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Vector del contenido: TF-IDF
#
# - Podemos utilizar **TF-IDF** para reponderar de acuerdo a la **importancia de los términos**.
# - La idea es **pesar una palabra en un metadato** de acuerdo a que tanto ocurre en todos los metadatos.
#
# $$
# \text{tf-idf}(w, d) = f_{w,d} \times \text{idf}(w)
# $$
#
# $$
# \text{idf}(w) = \frac{|D|}{|\{d \in D : w \in d\}|}
# $$
#
# - $w$: Es una palabra.
# - $d$: Es un documento.
# - $f_{w,d}$: Es la cantidad de veces que $w$ ocurre en $d$.
# - $D$: Es el conjunto de todos los documentos.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Otras técnicas: LDA, LSA, word2vec
#
# - Otras formas de representar datos es mediante otras técnicas de NLP.
# - LDA permite **representar los documentos mediante temas**. Los temas son representados mediante **palabras**.
# - LSA (o LSI) obtiene **variables latentes entre documentos**. Busca encontrar relaciones que no están a simple vista.
# - word2vec es un **algoritmo para representar palabras** basado en información de co-ocurrencia.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Matriz de contenido: Ejemplo
#
# <img alt="Content Matrix" src="./img/movies-matrix.png" style="width:80%;margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Crear el perfil de usuario
#
# - Ya habiendo creado el vector de contenido necesitamos crear **el perfil de usuario**.
# - Se puede hacer **sumando los vectores de contenido con los que el usuario interactuó**.
# - Es sensato **normalizar este vectors**.
# - El vector final **sirve para encontrar contenido similar para el usuario**.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Perfil de usuario: Ejemplo
#
# - Suponiendo que el usuario haya puesto 5 estrellas para "Raiders of the Lost Ark" y 3 para "La La Land".
#
# <img alt="User Profile" src="./img/user-content.png" style="width:80%;margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Ejemplo: Géneros de películas
# + slideshow={"slide_type": "-"}
movies = pd.read_csv("./data/ml-latest-small/movies.csv")
movies['genres'] = movies['genres'].apply(lambda x: x.split("|"))
movies.head()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Ejemplo: Géneros de películas - Vector de contenido
# + slideshow={"slide_type": "-"}
genres = set(g for G in movies['genres'] for g in G)
for g in genres:
movies[g] = movies.genres.transform(lambda x: int(g in x))
movie_genres = movies.drop(columns=['movieId', 'title','genres'])
movie_genres.head()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Ejemplo: Géneros de película - Similitud entre géneros
# + slideshow={"slide_type": "-"}
from sklearn.metrics.pairwise import cosine_similarity
cosine_sim = cosine_similarity(movie_genres, movie_genres)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Ejemplo: Géneros de película - Sistema de recomendación
# + slideshow={"slide_type": "-"}
def movie_finder(title):
return movies[movies['title'].str.contains(title)]['title'].tolist()
movie_idx = dict(zip(movies['title'], list(movies.index)))
title = movie_finder('Toy Story')[0]
n_recommendations = 5
idx = movie_idx[title]
sim_scores = list(enumerate(cosine_sim[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:(n_recommendations+1)]
similar_movies = [i[0] for i in sim_scores]
print("Recomendaciones para {}:".format(title))
for movie in movies['title'].iloc[similar_movies]:
print("\t{}".format(movie))
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Factorización de matrices
#
# - Se busca **reducir dimensiones** de manera que se elimine el ruido.
# - Las dimensiones más importantes, que quedan cuando se reducen las demás, se llaman **factores latentes**.
# - La idea es representar usuario y contenido a través de factores latentes.
# - Nuevos usuarios (o items), con cierto contenido inicial, son fácilmente agregados.
# - El **algoritmo de SVD** del álgebra lineal nos brinda esa oportunidad.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Algoritmo de SVD
#
# - **M** es la matriz de ratings.
# - **U** es la matriz de características de usuarios.
# - **$\sigma$** es la matriz de valores singulares.
# - **V$^T$** es la matriz de características de items.
#
# <img alt="SVD" src="./img/svd.png" style="width:80%;margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Algoritmo de SVD
#
# - Elegimos **r** valores singulares.
#
# <img alt="Truncated SVD" src="./img/truncated-svd.png" style="width:80%;margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Algoritmo de SVD
#
# - Podemos calcular valores faltantes en base a los vectores de representación.
#
# <img alt="SVD Calculation" src="./img/svd-calculation.png" style="width:80%;margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Ejemplo: Películas
#
# <img alt="Movie Ratings" src="./img/movie-matrix-ratings.png" style="width:75%;margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Ejemplo: Películas
#
# <img alt="Movie SVD" src="./img/movie-matrix-svd.png" style="width:80%;margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Ejemplo: Películas
#
# <img alt="Movie SVD" src="./img/movie-matrix-factorization.png" style="width:40%;margin:auto;"/>
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Algoritmo de <NAME>
#
# - El algoritmo de SVD **tiene algunos problemas**:
# - Calcular y actualizarlo es lento.
# - Necesariamente requiere valores para reemplazar los lugares vacíos.
# - <NAME> desarrolló [un algoritmo](https://sifter.org/~simon/journal/20061211.html) que a grandes razgos **calcula SVD iterativamente**.
# - La idea es definir una **función hipótesis y una función de coste**.
# - Una vez definidas, **se minimiza** la función de coste respecto de los parámetros de la función hipótesis.
# - Se puede **entrenar** como un algoritmo supervisado mediante **descenso por la gradiente**.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Algoritmo de <NAME>: Hipótesis
#
# - Se define la función de hipótesis para un rating de un usuario a un item como:
#
# $$
# \hat{r}_{i,u} = \mu + b_u + b_i + q_i^Tp_u
# $$
#
# - $\hat{r}_{i,u}$: Es el rating a calcular.
# - $\mu$: Es el promedio de todos los ratings.
# - $b_u$: Es el sesgo (*bias*) del usuario.
# - $b_i$: Es el sesgo (*bias*) del item.
# - $p_u$: Es el vector de factores latentes que representa el usuario $u$.
# - $q_i$: Es el vector de factores latentes que representa el item $i$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Algoritmo de <NAME>: Función de costo
#
# - Se define la función de costo como:
#
# $$
# \sum_{r_{i,u} \in R_{train}} (r_{i,u} - \hat{r}_{i,u})^2 + \lambda\big(b_i^2 + b_u^2 + ||q_i||^2 + ||p_u||^2\big)
# $$
#
# - $r_{i,u}$: Son los ratings del conjunto de entrenamiento.
# - $\hat{r}_{i,u}$: Es el rating predicho.
# - $\mu$: Es el promedio de todos los ratings.
# - $b_u$: Es el sesgo (*bias*) del usuario.
# - $b_i$: Es el sesgo (*bias*) del item.
# - $p_u$: Es el vector de factores latentes que representa el usuario $u$.
# - $q_i$: Es el vector de factores latentes que representa el item $i$.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Surpr!se
# + slideshow={"slide_type": "-"}
from surprise import SVD
model = SVD(n_factors=100, n_epochs=20, random_state=42).fit(ratings_train)
predictions = model.test(ratings_test)
print("RMSE on test: {:.4f}".format(rmse(predictions, verbose=False)))
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Otros algoritmos de factorización
#
# - SVD++ que es una extensión de SVD, que tiene en cuenta ratings implícitos.
# - Non negative matrix factorization (NMF), es muy similar a SVD. Los **factores latentes no son negativos**.
# - Ambos algoritmos están implementados en Scikit Surprise.
# + [markdown] slideshow={"slide_type": "slide"}
# # Problemas con sistemas de recomendación
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Problema de "Cold Start"
#
# - Es el **principal problema** con el que lidiar en sistemas de recomendación.
# - Pasa cuando hay un **nuevo usuario** (o contenido), del que no conocemos nada.
# - También aplica a **usuarios únicos** (i.e. no parecidos a ningún otro usuario).
# - En este caso se lo llama **oveja gris**.
# - Hay varias maneras de trabajarlo.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Problema de "Cold Start": Algunas soluciones
#
# - Muchas veces la solución más sencilla es **preguntarle al usuario**.
# - Algunos sitios piden algún tipo de rating al subscribirse (e.g. los géneros preferidos de película).
# - Depende de la **voluntad del usuario**.
# - Opción es **conectar el usuario mediante alguna red social**.
# - Una opción sencilla es **recomendar lo más popular** y ver el comportamiento.
# - E.g. Amazon recomienda "items que se están buscando ahora". YouTube muestra "trendings".
# - Para **contenido** se puede agrupar y mostrar con cierta relevancia.
# - E.g. la categoría de Netflix para "New Arrivals".
# - Los items suelen ser representables mediante **vectores de contenido** y recomendar mediante **filtrado basado en contenido**.
# - Una solución es aplicar **reglas de asociación** sobre las interacciones del usuario (e.g. ver los items que fue revisando)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Problema de "Cold Start": Sesionización
#
# - La idea es llevar **registro de los usuario**.
# - E.g. usando cookies o sesiones anónimas.
# - Se busca **hacer seguimiento de todo lo que hace el usuario**.
# - Cada sesión se puede **agrupar en un vector**.
# - Se lleva **registro de los vectores y se pueden comparar**.
# - En general, es mejor **convencer al usuario de registrarse**.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Problema de Sesgo
#
# - Muchas veces los algoritmos crean **sesgo**.
# - Aquellas cosas más recomendadas son las **únicas que se recomiendan**.
# - Una opción es **pesar por tiempo**.
# - No siempre es útil, depende del contenido.
# - Agregar **aleatoriedad**.
# - E.g. recomendar algo más que aquel contenido con mayor rating.
# - Utilizar **recomendaciones no personalizadas**.
# - Reglas de asociación. E.g. ¿Qué otros productos compraron con este?
# - Ordernar por lo más reciente.
# - Es un trabajo que tiene que ver más con el **diseño general del sistema** que con el algoritmo.
| clase_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# The objective function
def func(x):
return 100*np.square(np.square(x[0])-x[1])+np.square(x[0]-1)
# first order derivatives of the function (the Jacobian)
def dfunc(x):
df1 = 400*x[0]*(np.square(x[0])-x[1])+2*(x[0]-1)
df2 = -200*(np.square(x[0])-x[1])
return np.array([df1, df2])
# The Gradient descent algorithm
def grad(x, max_int):
miter = 1
step = .0001/miter
vals = []
objectfs = []
# you can customize your own condition of convergence, here we limit the number of iterations
while miter <= max_int:
vals.append(x)
objectfs.append(func(x))
temp = x-step*dfunc(x)
if np.abs(func(temp)-func(x))>0.01:
x = temp
else:
break
print(x, func(x), miter)
miter += 1
return vals, objectfs, miter
#Initialization
start = [5, 5]
val, objectf, iters = grad(start, 50)
| Line Search/First-order optimization algorithms/Gradient Descent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Over deze opdrachten
#
# * dit is Jupyter Notebook `sqlite-0.ipynb` - voor het aanmaken van de database.
# * voor een inleiding over het gebruik van Jupyter Notebooks: [Inleiding Jupyter Notebook](Inleiding-Jupyter.ipynb)
# * de hele reeks SQLite opdrachten:
# * [SQLite 0 - init database](sqlite-0.ipynb) (om met een schone lei te beginnnen)
# * [SQLite 1 - selectie en projectie](sqlite-1.ipynb)
# * [SQLite 2 - joins](sqlite-2.ipynb)
# * [SQLite 3 - CRUD](sqlite-3.ipynb)
# * [SQLite 4 - Schema](sqlite-4.ipynb)
#
# ### Voorbeeld
#
# Bij deze opdrachten gebruiken we een voorbeeld-database met drie tabellen: `leden`, `inschrijvingen`, en `events`.
# Deze database komt uit een webtoepassing; deze vind je op glitch.com. REF
# Daar kun je de toepassing bekijken, uitproberen, en er een eigen versie ("remix") van maken.
# ## Aanmaken van de database
# In de volgende opdrachten voer je allerlei queries uit op een database.
# Je moeten dan eerst wel een database met inhoud hebben.
# Met de onderstaande opdrachten maak je deze database.
# Deze opdrachten hoef je maar één keer uit te voeren: de database blijft bestaan, met je veranderingen.
# Je kunt deze opdrachten later ook uitvoeren om opnieuw te beginnen in een goed-gedefinieerde toestand.
# We maken de tabel(len) aan.
# We verwijderen eerst een eventueel bestaande versie van de tabel(len):
# we hebben dan een goed gedefinieerde toestand.
#
# > Opmerking: er zijn kleine verschillen in de notatie van de constraints bij het aanmaken van een tabel; MySQL gebruikt bijvoorbeeld een andere notatie dan Oracle.
# ### Eerste tabel: leden
#
# De opdracht bestaat uit de volgende onderdelen:
#
# 1. het opstarten van `sqlite` (de eerste twee regels). Hierna kunnen we SQL opdrachten geven;
# 2. het verwijderen van de `leden`-tabel als deze bestaat (`DROP TABLE`);
# 3. het aanmaken van de `leden`-tabel (`CREATE TABLE`);
# 4. het vullen van de tabel uit een csv-bestand (dit zijn geen SQL-opdrachten);
# 5. een SQL-`SELECT`-opdracht om te controleren of de tabel inderdaad ingelezen is.
# + language="bash"
# sqlite3 example.db
#
# DROP TABLE IF EXISTS leden;
#
# CREATE TABLE leden(
# lidnr INTEGER PRIMARY KEY,
# voornaam VARCHAR(255) NOT NULL,
# achternaam VARCHAR(255) NOT NULL,
# email VARCHAR(255) NOT NULL UNIQUE
# );
#
# .mode csv
# .import leden.csv leden
#
# SELECT lidnr, voornaam, achternaam, email
# FROM leden;
# -
# We hebben een voorbeeld-inhoud van de tabel(len) in csv-bestanden.
# Zo'n csv-bestand kun je gemakkelijk aanpassen in een teksteditor.
# Voor het importeren van een csv-bestand gebruiken we een speciale SQLite-opdracht.
# ### Tweede tabel: events
#
# De tabel `events` bevat de events waarvoor de leden kunnen inschrijven.
# Elk event heeft een datum en een beschrijving.
#
# Hiervoor volgen we hetzelfde patroon:
# + language="bash"
# sqlite3 example.db
#
# DROP TABLE IF EXISTS events;
#
# CREATE TABLE events(
# eventnr INTEGER,
# datum VARCHAR(10) NOT NULL,
# beschrijving VARCHAR(255),
# PRIMARY KEY (eventnr),
# CONSTRAINT name UNIQUE (datum, beschrijving)
# );
#
# .mode csv
# .import events.csv events
#
# SELECT eventnr, datum, beschrijving
# FROM events;
# -
# ### Derde tabel: inschrijvingen
#
# Deze tabel beschrijft een N-M relatie tussen leden en inschrijvingen.
# Naast de verwijzingen (via *foreign keys*) naar de andere tabellen vindt je hier de gegevens over de inschrijving (maaltijd-keuze).
# + language="bash"
# sqlite3 example.db
#
# DROP TABLE IF EXISTS inschrijvingen;
#
# CREATE TABLE inschrijvingen(
# eventnr INTEGER,
# lidnr INTEGER,
# maaltijd VARCHAR(255),
# PRIMARY KEY (lidnr, eventnr),
# FOREIGN KEY (lidnr) REFERENCES leden (lidnr),
# FOREIGN KEY (eventnr) REFERENCES events (eventnr)
# );
#
# .mode csv
# .import inschrijvingen.csv inschrijvingen
#
# SELECT eventnr, lidnr, maaltijd
# FROM inschrijvingen;
# -
# ### Demonstratie: alle inschrijvingen
#
# Voor een overzicht van alle inschrijvingen met de gegevens van de leden en van de events gebruiken we een join.
# Dit is een voorproefje - in een volgend notebook werken we dit verder uit.
# + language="bash"
# sqlite3 example.db
#
# SELECT evt.datum
# , evt.beschrijving
# , lid.voornaam
# , lid.achternaam
# , lid.email
# , ins.maaltijd
# FROM leden lid, events evt, inschrijvingen ins
# WHERE lid.lidnr=ins.lidnr AND evt.eventnr=ins.eventnr;
| sqlite-0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from genesis import parsers
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rcParams['figure.figsize'] = [8,8]
# %config InlineBackend.figure_format = 'retina'
# +
fname = 'data/mod_3.out'
# This parses the entier outfile
gout = parsers.parse_genesis_out(fname)
# This is a dict of:
gout.keys()
# -
# param is the readback of the basic paramters:
list(gout['param'])[0:10]
# Data contains the lattice readback arrays, slice data, and possibly field and particle data
data = gout['data']
for k in data:
print(k, data[k].shape)
# +
z = data['z']
for i in range(len(data['index'])):
power = data['power'][i, :]
plt.plot(z, power)
# -
# # Plotting
# +
x1 = gout['data']['z']
x2 = gout['data']['z']
y1 = gout['data']['aw']
y2 = gout['data']['qfld']
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'o-')
plt.title('Lattice')
plt.ylabel('qw')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, '.-')
plt.xlabel('z (m)')
plt.ylabel('qfld')
plt.show()
# -
| examples/example_parsing_genesis_out.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Metrics analysis - Driver Failure
# This notebook generates the chart for driver failures.
# +
# selectivity per run. This is required to compute the input throughput
NOTEBOOK_PHASE = 3
if (NOTEBOOK_PHASE == 1):
inputfactor = 1
elif(NOTEBOOK_PHASE == 2):
inputfactor = 2
elif(NOTEBOOK_PHASE == 3):
inputfactor = 3.166667
print("Will use input factor: " + str(inputfactor))
# -
# settings for saving plots
saveplots = True
dpiResolution = 200
import pyspark.sql.functions as F
import numpy as np
# Import to indent the plots in the notebook
# %matplotlib notebook
# %matplotlib inline
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.collections as collections
import seaborn as sns
from IPython.core.display import display, HTML
from PIL import Image
from matplotlib.lines import Line2D
from matplotlib.ticker import AutoMinorLocator
# +
# Python imports
import pandas as pd
from collections import defaultdict
from datetime import timedelta
from datetime import datetime
import numpy as np
import pytz
import math
# SQL imports
from pyspark.sql.functions import *
from pyspark.sql import Window
from pyspark.sql import functions
from pyspark.sql.types import IntegerType, LongType, DoubleType, TimestampType, StringType
# -
# settings to get plots in the right style
plt.style.use('ggplot')
plt.style.use('seaborn-deep')
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = 20
plt.rcParams['axes.labelsize'] = 20
plt.rcParams['axes.labelweight'] = 'normal'
plt.rcParams['axes.labelcolor'] = 'black'
plt.rcParams['axes.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'lightgrey'
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['axes.titleweight'] = 'normal'
plt.rcParams['figure.edgecolor'] = 'lightgrey'
plt.rcParams['xtick.labelsize'] = 20
plt.rcParams['ytick.labelsize'] = 20
plt.rcParams['xtick.color'] = 'black'
plt.rcParams['ytick.color'] = 'black'
plt.rcParams['legend.fontsize'] = 20
plt.rcParams['legend.edgecolor'] = 'lightgrey'
plt.rcParams['figure.titlesize'] = 20
plt.rcParams['figure.titleweight'] ='bold'
plt.rcParams['grid.color'] = 'grey'
plt.rcParams['grid.linestyle'] = ':'
plt.rcParams['figure.facecolor'] = 'white'
plt.rcParams['axes.spines.top'] = False
plt.rcParams['axes.spines.right'] = False
plt.rcParams['axes.spines.left'] = False
plt.rcParams['axes.spines.bottom'] = True
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] ='out'
plt.rcParams["date.autoformatter.minute"] = "%H:%M"
plt.rcParams["font.family"] = "Times New Roman"
# check if Spark is running
spark
# List of which frameworks should be included in this comparison
# Only Spark frameworks because they only have a driver.
frameworks_that_could_be_in_the_data = ["SPARK", "STRUCTUREDSTREAMING"]
frameworks = ["SPARK_AMO", "SPARK_ALO", "STRUCTUREDSTREAMING_AMO", "STRUCTUREDSTREAMING_ALO"]
frameworksPrintable = {
"SPARK": "Spark Streaming",
"STRUCTUREDSTREAMING":"Structured Streaming"
}
stages=["ingest", "parse", "join", "aggregate", "window"]
dataPath = dict()
for i in frameworks_that_could_be_in_the_data:
dataPath[i + "_AMO"] = "./scalability-data/driver-failure/" + i + "/stage" + str(NOTEBOOK_PHASE) + "/5x-4cpu-20gb/AMO/*"
dataPath[i + "_ALO"] = "./scalability-data/driver-failure/" + i + "/stage" + str(NOTEBOOK_PHASE) + "/5x-4cpu-20gb/ALO/*"
print("The paths that will be read: ")
dataPath
# ## General Methods
# +
def datetimeFromEpoch(epoch):
return datetime.utcfromtimestamp(epoch//1000).replace(microsecond=epoch%1000*1000)
datetimeFromEpochUDF = functions.udf(datetimeFromEpoch, TimestampType())
# -
# method to save the image
def save_img_colored_and_grayscale(path_colored_img):
if saveplots:
plt.savefig(path_colored_img + '.png', dpi=dpiResolution, bbox_inches="tight", pad_inches = 0)
im = Image.open(path_colored_img + '.png').convert('L')
im.save(path_colored_img + '_grayscale.png', dpi=(300, 300))
# # Latency
# Read in latency data and transform in the right format for plotting.
# Check if each framework had all containers running during the benchmark. To avoid including runs which had issues with some components.
containerCheck = defaultdict(dict)
for framework in frameworks:
try:
if framework == "KAFKASTREAMS":
requiredAmtContainers = 5
elif framework == "FLINK":
requiredAmtContainers = 6
else:
requiredAmtContainers = 7
containerCheckPhase = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/resources-per-container-timeseries.csv/*")
amtOfContainers = containerCheckPhase.select("containerName").distinct().count()
if amtOfContainers != requiredAmtContainers:
containerCheckPhase.select("containerName").distinct().show()
print("WARNING FOR " + framework + " volume: " + str(NOTEBOOK_PHASE) + " amount of containers: " + str(amtOfContainers))
else:
print("all checks passed for " + framework)
except:
print('framework ' + framework + " not in data")
# Phases that are present in the data
latencyTimeseriesDataWithoutStartup = dict()
for framework in frameworks:
latencyTimeseriesDataPhase = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/latency-timeseries-data-without-startup.csv/*") \
.withColumn("time", datetimeFromEpochUDF(col("outputBucketTime")))
minTimeSec = int(latencyTimeseriesDataPhase.select("startTime").collect()[0][0]) / 60000
latencyTimeseriesDataWithoutStartup[framework] = latencyTimeseriesDataPhase.withColumn("timeSec", (col("outputBucketTime")/60000.0)-minTimeSec)
# # Throughput
# ## Stage 0 throughput
# +
throughputTimeseriesDataWithStartup = dict()
for framework in frameworks:
throughputTimeseriesDataWithStartupPhase = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/output-throughput-timeseries-second-buckets.csv/*") \
.withColumn("time", datetimeFromEpochUDF(col("outputBucketTime")))
minTimeSec = int(throughputTimeseriesDataWithStartupPhase.select("startTime").collect()[0][0]) / 60000
throughputTimeseriesDataWithStartup[framework] = throughputTimeseriesDataWithStartupPhase.withColumn("timeSec", (col("outputBucketTime")/60000.0)-minTimeSec)
inputThroughputTimeseriesDataWithStartup = dict()
for framework in frameworks:
inputThroughputTimeseriesDataWithStartupPhase = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/input-throughput-timeseries-second-buckets.csv/*") \
.withColumn("time", datetimeFromEpochUDF(col("inputBucketTime")))
minTimeSec = int(inputThroughputTimeseriesDataWithStartupPhase.select("startTime").collect()[0][0]) / 60000
inputThroughputTimeseriesDataWithStartup[framework] = inputThroughputTimeseriesDataWithStartupPhase.withColumn("timeSec", (col("inputBucketTime")/60000.0)-minTimeSec)
# -
# # CPU
cpuTimeseries = dict()
for framework in frameworks:
try:
cpuTimeseries[framework] = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/cpu-per-container-timeseries.csv/*") \
.withColumn("timeParsed", datetimeFromEpochUDF(col("time")))
except:
print("didnt work for " + framework)
containersPandas = dict()
for framework in frameworks:
containersPandas[framework] = cpuTimeseries[framework].select("containerName").distinct().toPandas()
# You can use this to assign different colors to the different workers in the plot. We didn't use this here.
# Map label to RGB
color_map = dict()
for framework in frameworks:
#Assign different color to each container
rgb_values = sns.diverging_palette(255, 133, l=60, n=len(containersPandas[framework]), center="dark")
color_map[framework] = dict(zip(containersPandas[framework]['containerName'], rgb_values))
sns.palplot(sns.diverging_palette(255, 133, l=60, n=len(containersPandas), center="dark"))
# Map label to RGB
color_map = defaultdict(dict)
for framework in frameworks:
#Assign different color to each container:
rgb_values = sns.husl_palette(len(containersPandas[framework]), h=0.4, l=0.65, s=1)
color_map[framework] = dict(zip(containersPandas[framework]['containerName'], rgb_values))
cpuTimeseriesDataWithStartup = dict()
for framework in frameworks:
cpuOfPhasePerContainer = spark.read.option("header", "true").option("inferSchema", "true") \
.csv(dataPath[framework] + "/cpu-per-container-timeseries.csv/*") \
.withColumn("timeParsed", datetimeFromEpochUDF(col("time")))
minTimeSec = int(cpuOfPhasePerContainer.select("startTime").collect()[0][0]) / 60000
cpuTimeseriesDataWithStartup[framework] = cpuOfPhasePerContainer \
.withColumn("timeSec", (col("time")/60000.0)-minTimeSec)
cpuTimeseriesDataWithoutStartup = dict()
for framework in frameworks:
cpuTimeseriesDataPhase = cpuTimeseriesDataWithStartup[framework]
if len(cpuTimeseriesDataPhase.head(1)) > 0:
startTime = cpuTimeseriesDataPhase.agg(F.min("time")).collect()[0][0]
cpuWithoutStartup = cpuTimeseriesDataPhase \
.filter(col("time")>startTime + 120000)
minTimeSec = int(cpuTimeseriesDataPhase.select("startTime").collect()[0][0]) / 60000
cpuTimeseriesDataWithoutStartup[framework] = cpuWithoutStartup \
.withColumn("timeSec", (col("time")/60000.0)-minTimeSec)
else:
print("No data for stage " + str(i))
containersPandasPerPhase = dict()
for framework in frameworks:
containersPandasPerPhase[framework] = cpuTimeseriesDataWithStartup[framework] \
.select("containerName").distinct().orderBy("containerName").toPandas()
# # Metric correlations
# Plotting different metrics for a certain stage together.
# For generating a chart of the four metrics
def generateDriverFailureChart(colNum, containersPandas, latencyPandas, throughputPandas, inputThroughputPandas, cpuPandas, start, end):
minor_x_locator = AutoMinorLocator(3) # how many minor grid lines in between two major grid lines for x axis
pct01_line, = ax[0, colNum].plot(latencyPandas["timeSec"], latencyPandas["percentile_01_second"], color="#a8a8a8", linestyle="--", label = "1p")
pct50_line, = ax[0, colNum].plot(latencyPandas["timeSec"], latencyPandas["percentile_50_second"], color="#7e7e7e", linestyle="solid", label = "50p")
pct99_line, = ax[0, colNum].plot(latencyPandas["timeSec"], latencyPandas["percentile_99_second"], color="#151515", linestyle="solid", label = "99p")
ax[0, colNum].xaxis.set_minor_locator(minor_x_locator)
minor_y_locator_1 = AutoMinorLocator(2) # how many minor grid lines in between two major grid lines for y axis
ax[0, colNum].yaxis.set_minor_locator(minor_y_locator_1)
ax[0, colNum].grid(which='minor', color='black')
ax[0, colNum].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000.0), ',').replace(',', ' ') + " s"))
tp_line2 = ax[1, colNum].scatter(inputThroughputPandas["timeSec"], inputThroughputPandas["inputMsgCount"].multiply(inputfactor), s=5, label = "input", color="#7e7e7e")
ax[1, colNum].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000.0), ',').replace(',', ' ') + "K"))
ax[1, colNum].xaxis.set_minor_locator(minor_x_locator)
ax[1, colNum].grid(which='minor', color='black')
tp_line1 = ax[2, colNum].scatter(throughputPandas["timeSec"], throughputPandas["outputMsgCount"], s=5, label = "output", color="#151515")
ax[2, colNum].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: format(int(x/1000.0), ',').replace(',', ' ') + "K"))
ax[2, colNum].xaxis.set_minor_locator(minor_x_locator)
ax[2, colNum].grid(which='minor', color='black')
cpuTimeseriesDataWithStartupPhase = cpuTimeseriesDataWithStartup[framework]\
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
for contNum, containerId in enumerate(containersPandasPerPhase[framework]['containerName']):
if "FLINK" in framework: label = "taskmanager-" + str(contNum)
else: label = containerId
data = cpuTimeseriesDataWithStartupPhase.loc[cpuTimeseriesDataWithStartupPhase['containerName'] == containerId]
cpu_worker_line, = ax[3, colNum].plot(data['timeSec'], data['cpuUsagePct'],
c="black", linestyle=":", label="cpu usage worker")
ax[3, colNum].set_ylim(ymin=0, ymax=110)
ax[3, colNum].xaxis.set_minor_locator(minor_x_locator)
minor_y_locator_3 = AutoMinorLocator(2) # how many minor grid lines in between two major grid lines for y axis
ax[3, colNum].yaxis.set_minor_locator(minor_y_locator_3)
ax[3, colNum].grid(which='minor', color='black')
ax[3, colNum].get_yaxis().set_major_formatter(
matplotlib.ticker.FuncFormatter(lambda x, p: str(int(x)) + " %"))
if (colNum == 0):
ax[0, colNum].set_ylabel("latency")
ax[1, colNum].set_ylabel("input\nthroughput\nmsg/s")
ax[2, colNum].set_ylabel("output\nthroughput\nmsg/s")
ax[3, colNum].set_ylabel("CPU")
if (colNum == len(frameworks)-1):
ax[0, colNum].legend(loc = "upper right", ncol=4, bbox_to_anchor=(1, 1.45), framealpha=1.0, frameon=False)
ax[2, colNum].legend(loc = "upper right", ncol=3, bbox_to_anchor=(1, 1.45), framealpha=0.5, frameon=False)
ax[1, colNum].legend(loc = "upper right", ncol=3, bbox_to_anchor=(1, 1.45), framealpha=0.5, frameon=False)
ax[3, colNum].legend([cpu_worker_line], ["per worker"], loc = "upper right", bbox_to_anchor=(1, 1.45), frameon=False)
stageLatencyPandasShortSample = dict()
stageThroughputPandasShortSample = dict()
stageInputThroughputPandasShortSample = dict()
stageCpuPandasShortSample = dict()
start = 10
end = 15
for j, framework in enumerate(frameworks):
print(framework)
stageLatencyPandasShortSample[framework] = latencyTimeseriesDataWithoutStartup[framework].orderBy("timeSec") \
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
stageThroughputPandasShortSample[framework] = throughputTimeseriesDataWithStartup[framework].orderBy("timeSec") \
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
stageInputThroughputPandasShortSample[framework] = inputThroughputTimeseriesDataWithStartup[framework].orderBy("timeSec") \
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
stageCpuPandasShortSample[framework] = cpuTimeseriesDataWithoutStartup[framework].orderBy("timeSec") \
.filter((col("timeSec") >start) & (col("timeSec")<end)).toPandas()
# +
# frameworks=["FLINK", "KAFKASTREAMS", "SPARK", "STRUCTUREDSTREAMING"]
frameworksPrintable2 = {
"SPARK_AMO": "Spark Str. \n at-most-once",
"SPARK_ALO": "Spark Str. \n at-least-once",
"STRUCTUREDSTREAMING_AMO": "Structured Str. \n at-most-once",
"STRUCTUREDSTREAMING_ALO": "Structured Str. \n at-least-once"
}
f, ax = plt.subplots(4, 4,figsize=(9, 8), sharey='row', sharex='col')
pad = 5
for j, framework in enumerate(frameworks):
generateDriverFailureChart(j, containersPandas=containersPandas, \
latencyPandas=stageLatencyPandasShortSample[framework], \
throughputPandas=stageThroughputPandasShortSample[framework], \
inputThroughputPandas=stageInputThroughputPandasShortSample[framework], \
cpuPandas=stageCpuPandasShortSample[framework])
ax[0, j].annotate(frameworksPrintable2[framework], xy=(0.5, 1.25), xytext=(0, pad),
xycoords='axes fraction', textcoords='offset points',
size='medium', ha='center', va='baseline')
plt.subplots_adjust(wspace=0.1, hspace=0.35)
save_img_colored_and_grayscale("./figures/driver-failure/overall/phase" + str(NOTEBOOK_PHASE) + "/driver_failure")
plt.show()
# -
| result-analysis/Driver Failure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Transformación de datos
# ===
# #### Contenido
# > * [Indexación y selección](#Indexación-y-selección)
# * [Indexación por nombres de filas y columnas](#Indexación-por-nombres-de-filas-y-columnas)
# * [Indexación numérica](#Indexación-numérica)
# * [Indexación booleana](#Indexación-booleana)
# * [Agrupamiento, agregación y pivote](#Agrupamiento,-agregación-y-pivote)
# * [Agrupamiento](#Agrupamiento)
# * [Agregación](#Agregación)
# * [Pivotes](#Pivotes)
# * [Transformaciones](#Transformaciones)
# * [Stack & unstack](#Stack-&-unstack)
# * [Tablas dinámicas](#Tablas-dinámicas)
# * [Unión](#Unión)
# * [Variables dummy](#Variables-dummy)
# * [Inserción, borrado, casos duplicados y datos faltantes](#Inserción,-borrado,-casos-duplicados-y-datos-faltantes)
# * [Inserción de Columnas](#Inserción-de-columnas)
# * [Inserción de filas](#Inserción-de-filas)
# * [Casos duplicados y datos faltantes](#Casos-duplicados-y-datos-faltantes)
# * [Estadísticos descriptivos](#Estadísticos-descriptivos)
# * [Paneles de DataFrames](#Paneles-de-DataFrames)
# **Preparación de datos**
# Muchos de los ejemplos anteriores pueden ser aplicados directamente a las columnas de un dataframe.
# +
## importa la librería
import pandas
import numpy as np
#pandas.set_option('display.notebook_repr_html', False)
# -
## lee el archivo del disco.
x = pandas.read_csv('files/iris.csv',
sep = ',',
thousands = None,
decimal = '.')
x.describe()
# # Indexación y selección
# [Contenido](#Contenido)
## número de filas del dataframe
len(x)
## número de columnas del dataframe
len(list(x.axes[1]))
## total de datos
x.size
## transpuesta
print(x.T.head())
# ## Indexación por nombres de filas y columnas
# [Contenido](#Contenido)
## parte inicial del dataframe
print(x.head())
## parte inicial del dataframe
print(x.head(3))
## parte final del dataframe
print(x.tail())
## parte final del dataframe
print(x.tail(3))
## nombres de las columnas
list(x.axes[1])
## nombres de las columnas
x.columns
## nombres de las filas
x.index
## nombres de las filas
print(list(x.index))
## todas las filas y la columna Species
#-R-# print(head(x[,'Species'], 10))
x['Species'].head()
## varias columnas
x[['Species', 'Sepal_Length']].head()
## otra forma de obtener una columna
x.get('Species').head()
## acceso con el operador '.'
x.Species.head()
x.loc[:,['Species']].head()
## forma alternativa para acceder a un elemento
x.loc[2]
x.loc[2].Sepal_Length
x.loc[2]['Sepal_Length']
## valor para una fila y columna particular
x.loc[2,['Species', 'Sepal_Length']]
## nombres de las filas. los nombres deben ser únicos
key = ['case_{}'.format(n) for n in range(150)]
key[0:5]
## cambia los nombres de las filas
x.index = key
x.head()
## selección por nombre de la fila
x.loc['case_2']
## selección por nombre de la fila
x.loc[['case_2', 'case_4']]
## selección por nombres de fila y columna
x.loc[['case_2'], ['Sepal_Length']]
x.loc['case_2', 'Sepal_Length']
## selección por nombres de fila y columna
x.loc[['case_2', 'case_4'], ['Sepal_Length', 'Sepal_Width']]
## vuelve al indice por defecto
## convierte el indice en una columna
x = x.reset_index()
x.head()
del x['index']
x.head()
# ## Indexación numérica
# [Contenido](#Contenido)
## selección de las primeras 3 filas
x[:3]
## desde la tercera hasta la última fila
x[3:].head()
## selección por intervalos
print( x[::15] )
## orden inverso
print( x[::-1].head() )
## todas las filas excepto la última
x[:-1].tail()
## valores para una fila particular
x.loc[1]
x.iloc[2]
x.iloc[2, 3]
## valores para una fila particular
x.loc[[2]]
## indices como vector
x.iloc[[1, 3, 5]]
## indices como vector
x.iloc[[1, 3, 5],[0, 4]]
## acceso rapido a un elemento
x.at[0, 'Sepal_Length']
# ## Indexación booleana
# [Contenido](#Contenido)
## selección condicional
(x['Species'] == 'virginica').head()
## selección condicional
x.loc[x['Species'] == 'virginica'].head()
## metodo `isin()`
w = x['Species'].isin(['virginica'])
w.head()
x[w].head()
## selección condicional
x.loc[x['Petal_Length'] > 3.2].head()
## conteo de casos
x['Species'].value_counts()
# # Agrupamiento, agregación y pivote
# [Contenido](#Contenido)
# **Preparación de datos.**
# +
## importa la librería
import pandas
import numpy as np
#pandas.set_option('display.notebook_repr_html', False)
# -
## lee el archivo del disco.
x = pandas.read_csv('files/iris.csv',
sep = ',',
thousands = None,
decimal = '.')
# ## Agrupamiento
# [Contenido](#Contenido)
## ordenacion por un eje
x.sort_index(1, ascending = True).head(200)
## ordena por varias columnas
## devuelve los indices de las filas
## ordena primero por Sepal.Width y luego por Sepal.Length
x.sort_values(by = ['Sepal_Width', 'Sepal_Length']).head(20)
## selecciona un subconjunto de los datos.
x[x['Species'] == 'virginica'].head()
## partición por los valores de la columna `Species`
y = x.groupby('Species')
y.groups.keys()
## cantidad de grupos
len(y.groups)
## cantidad de grupos
y.size()
## elementos de un subgrupo
x.loc[y.groups['setosa']].head()
## elementos de otro subgrupo
x.loc[y.groups['virginica']].head()
# ## Agregación
# [Contenido](#Contenido)
## funciones que pueden ser aplicadas a un DataFrame:
##
## abs all any clip clip_lower clip_upper
## corr corrwith count cov cummax cummin
## cumprod cumsum describe diff eval kurt
## mad max mean median min mode
## ct_change prod quantile rank round sem
## skew sum std var
##
## se genera un nuevo DataFrame
x.groupby('Species').sum()
(x.groupby('Species').sum())["Sepal_Length"]
# +
## unión
z = pandas.concat( [x.iloc[y.groups['setosa']],
x.iloc[y.groups['virginica']],
x.iloc[y.groups['versicolor']]])
z['Species'].value_counts()
# -
## conteo de casos.
## número de casos por `Species`
x['Species'].value_counts()
## conteo de casos por `Sepal.Length`
x['Sepal_Length'].value_counts()
## aplicación de una funcion a columnas especificas
## de un data.frame
import numpy as np
x[['Sepal_Length', 'Sepal_Width']].apply(np.mean)
## aplica la función a la columna especificada por grupos
(x.groupby('Species').mean())["Sepal_Length"]
## aplica la función a la columna especificada por grupos
(x.groupby('Species').mean())[["Sepal_Length",
"Sepal_Width",
"Petal_Length",
"Petal_Width"]]
# ## Pivotes
# [Contenido](#Contenido)
## agrega una clave para identificar cada caso
x['key'] = list(range(150))
x.head()
# +
z = pandas.melt(x, # DataFrame
id_vars = ['key', 'Species'], # columnas que no se apilan
var_name = 'Variables', # nombre de la columna que contiene las columnas apiladas
value_name = 'Valores') # nombre de la columna que contiene los valores
# -
z.head()
del x['key']
# # Transformaciones
# [Contenido](#Contenido)
# **Preparación de datos.**
# +
## importa la librería
import pandas
import numpy as np
#pandas.set_option('display.notebook_repr_html', False)
# -
## lee el archivo del disco.
x = pandas.read_csv('files/iris.csv',
sep = ',',
thousands = None,
decimal = '.')
# ## Stack & unstack
# [Contenido](#Contenido)
x.stack().head(20)
(x.stack()).unstack().head(4)
# ## Tablas dinámicas
# [Contenido](#Contenido)
m = pandas.DataFrame( {'key1' : ['a', 'a', 'b', 'b', 'c', 'c'],
'key2' : ['A', 'B', 'A', 'B', 'A', 'B'],
'values1' : [ 1, 2, 3, 4, 5, 6 ],
'values2' : [ 7, 8, 9, 10, 11, 12]})
print(m)
# +
z = pandas.pivot_table(m,
index = ['key1', 'key2'],
values = ['values1', 'values2'])
print(z)
# +
z = pandas.pivot_table(m,
index = ['key2', 'key1'],
values = ['values1', 'values2'])
print(z)
# -
# ## Unión
# [Contenido](#Contenido)
# +
## no es una unión estrictamente.
## solo funciona cuando un dataframe tiene un número de
## filas múltiplo del otro.
d1 = pandas.DataFrame({ 'x' : list(range(1,5)),
'y' : list(range(6,10))})
d2 = pandas.DataFrame({ 'x' : [ 1, 2, 3, 4, 5, 1, 2, 3, 4, 5],
'w' : [10, 20, 30, 40, 50, 60, 70, 80, 90, 100]})
pandas.merge(d1, d2)
# -
# ## Variables dummy
# [Contenido](#Contenido)
## genera variables dummies para variables categóricas.
pandas.get_dummies(x.Species)
# # Inserción, borrado, casos duplicados y datos faltantes
# [Contenido](#Contenido)
# **Preparación de datos**
# +
## importa la librería
import pandas
import numpy as np
#pandas.set_option('display.notebook_repr_html', False)
# -
## lee el archivo del disco.
x = pandas.read_csv('files/iris.csv',
sep = ',',
thousands = None,
decimal = '.')
# ## Inserción de columnas
# [Contenido](#Contenido)
## creación de una nueva columna
x['n'] = 1
x.head()
## modificación de un valor particular
x.iat[0,5] = 10
x.head()
## modificación de una columna
x['n'] = list(range(150))
x.head()
## borrado de la columna
del x['n']
x.head()
## los DataFrames son diccionarios y pueden manipularse como tal
## se indica en que posicion se inserta la columna con valores
## lógicos
x.insert(2, 'logical_value', x['Sepal_Length'] > 4)
x.head()
del x['logical_value']
x.head()
# +
## alineación
## suma las columnas colB
a = pandas.DataFrame({'colA': [1, 2],
'colB': [3, 4]})
b = pandas.DataFrame({'colB': [5, 6],
'colC': [7, 8]})
print(a)
print('')
print(b)
print('')
print(a + b)
# -
## se pueden aplicar todas las operaciones matemáticas
## existentes en numpy
np.sqrt(a)
# ## Inserción de filas
# [Contenido](#Contenido)
# +
## adición de una fila (al final)
u = pandas.DataFrame({'Sepal_Length' : [1],
'Sepal_Width' : [1],
'Petal_Length' : [1],
'Petal_Width' : [1],
'Species' : ['setosa']})
x.append(u, ignore_index = True).tail()
# -
# ## Casos duplicados y datos faltantes
# [Contenido](#Contenido)
## se crea un vector aleatorio de indices
u = np.random.choice(range(150),
size=20,
replace=False)
u
## submuestra de data.frame original `x`
y = x.loc[u].copy()
y.head()
## cambia los nombres de las filas
y.index = list(range(20))
y
## de la submuestra `y` se hacen varios registros incompletos
## cambiando varios valores en la columna `Sepal.Length` por NA
u = np.random.choice(range(20), size=10, replace=False)
u
y.iloc[u]
## casos con datos faltantes
y.loc[u, 'Sepal_Length'] = np.nan
y
## apilado de dataframes
## los casos 151 a 170 contienen casos duplicados o
## casos con datos faltantes
w = pandas.concat([x, y])
w.index = list(range(170))
w.tail(25)
## casos duplicados
## note que el caso 142 aparece duplicado
w[w.duplicated()]
## casos únicos o no duplicados
## incluye los casos con valores NA como únicos
## note que se eliminaron varios casos entre el 151 y el 170
w.drop_duplicates().tail(20)
## casos nulos
w['Sepal_Length'].isnull().tail(10)
## hay que usar operadore lógicos para considerar más columnas
w[w['Sepal_Length'].isnull()]
## casos completos (sin faltantes)
w.dropna().tail(20)
# ## Estadísticos descriptivos
# [Contenido](#Contenido)
## resumen de estadísticos descriptivos
print(x.describe())
x.mean()
x.mean(1).head()
# ## Paneles de DataFrames
# [Contenido](#Contenido)
# +
## se crean los DataFrames
df1 = pandas.DataFrame({'colA': [1, 2],
'colB': [3, 4]})
df2 = pandas.DataFrame({'colB': [5, 6],
' colC': [7, 8]})
df3 = pandas.DataFrame({'colC': [9, 0],
' colD': [1, 2]})
# -
## creación del panel como un diccionario
pdPanel = { 'df1': df1,
'df2': df2,
'df3': df3}
print(pdPanel)
| P04-transformacion.ipynb |
# + [markdown] ein.tags=["worksheet-0"]
# # Linear Classification (Irises)
# + ein.tags=["worksheet-0"]
# third party
import matplotlib.pyplot as pyplot
import numpy
import pandas
import seaborn
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn import preprocessing
# + ein.tags=["worksheet-0"]
# %matplotlib inline
# + ein.tags=["worksheet-0"]
iris = datasets.load_iris()
x_iris, y_iris = iris.data, iris.target
# + ein.tags=["worksheet-0"]
print(x_iris.shape)
print(y_iris.shape)
# + ein.tags=["worksheet-0"]
print(x_iris[0], y_iris[0])
# + ein.tags=["worksheet-0"]
print(iris.target_names)
# + [markdown] ein.tags=["worksheet-0"]
# ## Model with First Two Attributes
# + [markdown] ein.tags=["worksheet-0"]
# The first model will be a linear model with two input attributes.
# + [markdown] ein.tags=["worksheet-0"]
# First, get all the rows and the first two-columns for the `x` data.
# + ein.tags=["worksheet-0"]
x, y = x_iris[:, :2], y_iris
# + ein.tags=["worksheet-0"]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=33)
# + ein.tags=["worksheet-0"]
print(x_train.shape, y_train.shape)
# + [markdown] ein.tags=["worksheet-0"]
# ## Standardize the Data
# + ein.tags=["worksheet-0"]
frame = pandas.DataFrame(x_train)
frame.head()
# + ein.tags=["worksheet-0"]
seaborn.set_style('whitegrid')
axes = seaborn.distplot(frame[0], hist=False, label=iris.feature_names[0])
axes = seaborn.distplot(frame[1], ax=axes, hist=False, label=iris.feature_names[1])
title = axes.set_title("First Two Attributes")
# + [markdown] ein.tags=["worksheet-0"]
# Looking at the plot, you can see that they have different ranges, so the data will be standardized. By subtracting the mean from each data point and dividing by the standard deviation for that column, each column is changed to have a mean of 0 and a standard deviation of 1.
# + ein.tags=["worksheet-0"]
scalar = preprocessing.StandardScaler().fit(x_train)
x_train = scalar.transform(x_train)
x_test = scalar.transform(x_test)
# + ein.tags=["worksheet-0"]
numpy.unique(y_train)
# + ein.tags=["worksheet-0"]
iris.target_names
# + ein.tags=["worksheet-0"]
iris.feature_names[:2]
# + ein.tags=["worksheet-0"]
def print_irises():
figure = pyplot.figure()
axe = figure.gca()
axe.set_xlabel(iris.feature_names[0])
axe.set_ylabel(iris.feature_names[1])
colors = 'r b g'.split()
for index, name in enumerate(iris.target_names):
x_data = frame[0][y_train == index]
y_data = frame[1][y_train == index]
axe.plot(x_data, y_data, '{0}o'.format(colors[index]), label=str(iris.target_names[index]))
title = axe.set_title("{0} vs {1}".format(iris.feature_names[0], iris.feature_names[1]))
legend = axe.legend()
return axe
axe = print_irises()
# + [markdown] ein.tags=["worksheet-0"]
# # Stochastic Gradient Descent
# + [markdown] ein.tags=["worksheet-0"]
# First we need to re-think the problem as a binary classification problem - *Can we predict whether a flower is a setosa or not using our two features?* Given only two features, it doesn't appear that we can separate the versicolor from the virginica using lines, but we might be able to separate setosa from the other two species.
# + ein.tags=["worksheet-0"]
## imports
from sklearn.linear_model import SGDClassifier
# + ein.tags=["worksheet-0"]
numpy.unique(y_train)
# + [markdown] ein.tags=["worksheet-0"]
# Since we don't care about separating *versicolor* from *virginica* and *setosa* is 0, we can re-do the y-training set to only have 0's (setosa) and 1's (not setosa).
# + ein.tags=["worksheet-0"]
y_train = pandas.Series(y_train)
# + ein.tags=["worksheet-0"]
y_binary = y_train.apply(lambda x: 0 if x == 0 else 1)
# + ein.tags=["worksheet-0"]
len(y_binary[y_binary == 0]) == len(y_train[y_train==0])
len(y_binary) == len(y_train)
y_binary.unique()
# + ein.tags=["worksheet-0"]
classifier = SGDClassifier()
classifier = classifier.fit(x_train, y_binary)
# + ein.tags=["worksheet-0"]
intercept = classifier.intercept_[0]
w_1, w_2 = classifier.coef_[0]
print("{0} + {1} x_1 + {2} x_2 = 0".format(intercept, w_1, w_2))
# + ein.tags=["worksheet-0"]
print("{0} + {1} x_1 = {2} x_2".format(intercept, w_1, -w_2))
# + ein.tags=["worksheet-0"]
print("({0} + {1} x_1)/{2} = x_2".format(intercept, w_1, -w_2))
# + ein.tags=["worksheet-0"]
axe = print_irises()
x_plot = numpy.linspace(4, 8, 100)
y_plot = (intercept + x_plot * w_1)/-w_2
line = axe.plot(x_plot, y_plot)
| machine_learning/learning_scikit_learn/irises_linear_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preprocessing
# import relevant statistical packages
import numpy as np
import pandas as pd
# import relevant data visualisation packages
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# import custom packages
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score as r2, mean_squared_error
from mlxtend.feature_selection import ExhaustiveFeatureSelector as EFS
from mlxtend.plotting import plot_linear_regression as PLS
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.tree import DecisionTreeRegressor, export_graphviz
from sklearn.externals.six import StringIO
import pydotplus
from IPython.display import Image
from sklearn.ensemble import RandomForestRegressor
# import data
url = "/Users/arpanganguli/Documents/Professional/Finance/ISLR/Boston.csv"
Boston = pd.read_csv(url, index_col='SlNo')
Boston = pd.get_dummies(Boston, columns=['chas'], drop_first=True)
Boston.head()
# +
X = Boston.drop('medv', axis = 1)
y = Boston.medv
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size = 0.5, test_size = 0.5, random_state = 0)
# +
p = [13, 6, 4]
plt.xkcd()
plt.figure(figsize=(25, 10))
plt.title('MSE for different number of trees', fontsize=30, color='m')
plt.xlabel('number of trees', fontsize=20, color='c')
plt.ylabel('MSE', fontsize=20, color='c')
for j in p:
MSE = pd.DataFrame()
for k in range(1,500):
rf_Boston = RandomForestRegressor(max_features=6, n_estimators=k).fit(X_train, y_train)
rf_pred = rf_Boston.predict(X_test)
mse = mean_squared_error(y_test, rf_pred)
MSE = MSE.append([mse])
MSE.reset_index(drop=True, inplace=True)
plt.plot(MSE)
| Chapter 8/Applied Exercises/7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # How to use Python from R
# It is possible to leverage the package **reticulate** [1] for interoperability between Python and R.
# Reticulate embeds a Python session within your R session, enabling seamless, high-performance interoperability.
# It provides a comprehensive set of tools, including facilities for:
#
#
# * Calling Python from R in a variety of ways including R Markdown, sourcing Python scripts, importing Python modules, and using Python interactively within an R session.
#
# * Translation between R and Python objects (for example, between R and Pandas data frames, or between R matrices and NumPy arrays).
#
# * Flexible binding to different versions of Python including virtual environments and Conda environments.
# ## Installation and configuration
install.packages("reticulate", repos='http://cran.rstudio.com')
library("reticulate")
use_python("/opt/anaconda/envs/python2/bin/python")
# ## Simple example using the Python module 'os'
os <- import("os")
os$name
# ## A pratical example using the Python module 'ro4eo'
ro4eo <- import("ro4eo")
token = '<PASSWORD>'
ro_title = 'SatCen Change Detection over Madrid'
ro_description = 'This is the result of the SatCen Change Detection service of the EVER-EST VRC for Land Monitoring'
ro_vrc = 'SatCen'
master = 'https://catalog.terradue.com/sentinel1/search?format=atom&uid=S1A_IW_GRDH_1SDV_20180312T061753_20180312T061818_020978_024034_457D'
slave = 'https://catalog.terradue.com/sentinel1/search?format=atom&uid=S1A_IW_GRDH_1SDV_20180216T061753_20180216T061818_020628_02351B_9F18'
aoi_wkt = 'POLYGON((-3.611068725585937 40.57602136929037,-3.611068725585937 40.42669550575275,-3.4047317504882812 40.42669550575275,-3.4047317504882812 40.57602136929037,-3.611068725585937 40.57602136929037))'
roeo = ro4eo$EOResearchObject(access_token=token,
title=ro_title,
description=ro_description,
vrc=ro_vrc,
aoi=aoi_wkt)
roeo$isTokenValid()
# ## References
# [1] https://rstudio.github.io/reticulate/index.html
# [2] https://rstudio.github.io/reticulate/articles/calling_python.html
| How to use Python from R.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# +
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import tensorflow as tf
# +
def load_graph(model_file):
graph = tf.Graph()
graph_def = tf.GraphDef()
with open(model_file, "rb") as f:
graph_def.ParseFromString(f.read())
with graph.as_default():
tf.import_graph_def(graph_def)
return graph
def read_tensor_from_image_file(file_name,
input_height=299,
input_width=299,
input_mean=0,
input_std=255):
input_name = "file_reader"
output_name = "normalized"
file_reader = tf.read_file(file_name, input_name)
if file_name.endswith(".png"):
image_reader = tf.image.decode_png(
file_reader, channels=3, name="png_reader")
elif file_name.endswith(".gif"):
image_reader = tf.squeeze(
tf.image.decode_gif(file_reader, name="gif_reader"))
elif file_name.endswith(".bmp"):
image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
else:
image_reader = tf.image.decode_jpeg(
file_reader, channels=3, name="jpeg_reader")
float_caster = tf.cast(image_reader, tf.float32)
dims_expander = tf.expand_dims(float_caster, 0)
resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])
normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])
sess = tf.Session()
result = sess.run(normalized)
return result
def load_labels(label_file):
label = []
proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
for l in proto_as_ascii_lines:
label.append(l.rstrip())
return label
# +
input_folder="D:/Projects/SDC/Term3/Capstone-Project-SDC-Term3-P3-Udacity/sim-recs/src/tl_detector/"
model_file = "/tmp/optimized_graph.pb"
input_height = 224
input_width = 224
input_mean = 0
input_std = 255
input_layer = "Placeholder"
output_layer = "final_result"
label_file = "/tmp/output_labels.txt"
input_name = "import/" + input_layer
output_name = "import/" + output_layer
graph = load_graph(model_file)
input_operation = graph.get_operation_by_name(input_name)
output_operation = graph.get_operation_by_name(output_name)
labels = load_labels(label_file)
# -
def inference(sess, input_tensor, input_operation, output_operation, labels):
results = sess.run(output_operation.outputs[0], {input_operation.outputs[0]: input_tensor})
results = np.squeeze(results)
top_indices = results.argsort()[-5:][::-1]
# for i in top_indices:
# print(labels[i], results[i])
return results, top_indices
# +
# %matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from random import shuffle
import datetime
import glob
import os
image_paths = glob.glob(os.path.join(input_folder, "*.jpg"))
shuffle(image_paths)
start_time = datetime.datetime.now()
nr_images = 20
fig, axes = plt.subplots(nrows=nr_images, ncols=1, figsize=(28, 8 * nr_images))
image_tensors = []
for i, image_name in enumerate(image_paths[:nr_images]):
image_tensors.append(read_tensor_from_image_file(
image_name,
input_height=input_height,
input_width=input_width,
input_mean=input_mean,
input_std=input_std))
sorted_labels = []
sorted_results = []
print ("[INFO] program started on - " + str(start_time))
with tf.Session(graph=graph) as sess:
for i, image_name in enumerate(image_paths[:nr_images]):
results, top_indices = inference(
sess=sess,
input_tensor=image_tensors[i],
input_operation=input_operation,
output_operation=output_operation,
labels=labels)
sorted_labels.append([ labels[j] for j in top_indices])
sorted_results.append([ results[j] for j in top_indices])
end_time = datetime.datetime.now()
time_diff = end_time - start_time
print (f"Time to run: {time_diff}")
for i, image_name in enumerate(image_paths[:nr_images]):
img=mpimg.imread(image_name)
axes[i].imshow(img)
axes[i].set_title(f'Best guess: {sorted_labels[i][0]} with certainty {sorted_results[i][0]}')
# -
| train/infer_retrained_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercise
#
# Lets plot the molecule first.
import visualisation
import numpy as np
atoms = np.loadtxt('water.txt')
x = atoms[:, 0]
y = atoms[:, 1]
visualisation.show(x, y)
# Now we can create the function for the rotation matrix.
def rotation_matrix(angle):
angle = np.deg2rad(angle)
return np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
# We can then perform the matrix multiplication.
# Note that to achieve this on the correct set of dimension we wuse array slicing and transposition.
# This is discussed in detail in LOIL 10.1.
atoms[:, :2] = np.matmul(rotation_matrix(90), atoms[:, :2].T).T
x = atoms[:, 0]
y = atoms[:, 1]
visualisation.show(x, y)
atoms[:, :2] = np.matmul(rotation_matrix(90), atoms[:, :2].T).T
x = atoms[:, 0]
y = atoms[:, 1]
visualisation.show(x, y)
| CH40208/comp_chem_methods/matrices_exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''base'': conda)'
# name: python3
# ---
# $\renewcommand{\ket}[1]{|#1\rangle}$
#
# # Pure/Mixed States and Density Matrix
#
# ## StateVector
# A state of a system gives a complete description of the system. State vectors are a really convenient way to express a pure states of a system. They are expressed inside a bra( $\bra{ \;}$ ) or a ket( $\ket{\;}$ )
#
# Let us see an example, for a one-qubit system in an equal superposition state is given by the state vector
# $$\ket{+} = \frac{1}{\sqrt{2}} (\ket{0}+\ket{1})$$
#
# Information from state vector
# - Possible states: $\ket{0}$ and $\ket{1}$
# - Probability amplitude associated with each state: 50% each
#
# This state vector gives the complete information of the system.
#
# However, there is a limitation in statevector notation. System are represented as linear superposition of normalized statevector or basis states in statevector notation. There are many other systems that cannot be expressed conveniently by using state vector notations. Only those states called pure states can be conveniently written using a State vector notation.
#
# ## Pure State
# “Pure states are those for which we can precisely define their quantum state at every point in time.” Pure states can be written in a form of linear superposition of normalized state vectors.
#
# Consider the previous example: $$\ket{+} = \frac{1}{\sqrt{2}} (\ket{0}+\ket{1}) = \frac{1}{\sqrt{2}}( \begin{bmatrix} 1 \\0 \end{bmatrix}+ \begin{bmatrix} 1 \\0 \end{bmatrix})
# =\begin{bmatrix} \frac{1}{\sqrt{2}} \\\frac{1}{\sqrt{2}} \end{bmatrix} $$
#
# If we consider initialization and Hadamard gate is ideal, it is absolutely certain that system is in |+> state. Also, we know that we will get each state with a 50% probability upon measurement in ideal condition. So there is no uncertainty about what the system will evolve and hence is a pure state.
#
#
#
# %matplotlib inline
from qiskit.quantum_info import Statevector
from qiskit_textbook.tools import array_to_latex
from qiskit import QuantumCircuit, execute
from qiskit import Aer
from qiskit.visualization import plot_state_qsphere, plot_histogram, plot_bloch_multivector, plot_state_city
backend = Aer.get_backend('statevector_simulator')
simulator= Aer.get_backend('aer_simulator')
qc_h = QuantumCircuit(1)
qc_h.h(0)
state_h = Statevector.from_instruction(qc_h)
array_to_latex(state_h)
qc_h.measure_all()
counts = execute(qc_h,simulator).result().get_counts()
plot_histogram(counts)
# Smilarly a n-qubit pure state will be expressed in state vector notation as: $$ | \psi \rangle = \begin{bmatrix} \alpha_0 \\ \alpha_1 \\ \vdots \\ \alpha_{N-1} \end{bmatrix}, $$
#
# ## Mixed state
# There are some other systems called mixed states that are different from pure states. They consist of statistical ensembles of different quantum states
#
# Let's go through an example to understand it:
#
# Let us take Bell state as an example:
# $$ \ket{\psi_{AB}} = \frac{1}{\sqrt{2}} ( \ket{0 0} + \ket{1 1} ) = \frac{1}{\sqrt{2}} \begin{bmatrix} 1 \\ 0 \\ 0 \\ 1 \end{bmatrix} $$
# In state vector, we can see this is a pure state as the total information of the system can be known.
# Bell State Preparation
qc_et = QuantumCircuit(2)
qc_et.h(0)
qc_et.cx(0,1)
state_et = Statevector.from_instruction(qc_et)
array_to_latex(state_et)
qc_et.measure_all()
display(qc_et.draw('mpl'))
counts = execute(qc_et,simulator).result().get_counts()
plot_histogram(counts)
# Now let's tweak the experiment by measuring the qubit $q_1$ but not $q_1$.
#
# <img src=https://qiskit.org/textbook/ch-quantum-hardware/images/bell_and_measure.png width="500">
#
# So we get:
# $$\ket{\Psi_{AB}} \xrightarrow[]{\text{Measure } q_1} \ket{\Psi_B}$$
#
# If we measure $q_1$ to be in state $\ket{0}$, $q_0$ would also be in the same state $\ket{0}$. And if measured $\ket{1}$, $q_0$ would be in state $\ket{1}$. And the probability of such occurance is $\frac{1}{2}$.
#
# Say the measurement outcome in $q_1$ is not specific, lets try to represent the state $q_0$ in a general way.
# Here, the state $ \Psi_B$ needs to be represented as an ensemble of multiple quantum states.
# The ensemble of states $ \Psi_B$ is:
# $$
# \psi_B \equiv \left \{| \psi_{B_0} \rangle , | \psi_{B_1} \rangle \right \} = \left \{ | 0_B \rangle , | 1_B \rangle \right \},
# $$
# with their classical probability:
# $$ \left \{ p_0, p_1 \right \} = \left \{ 1/2, 1/2 \right \} $$
# Hence $ \Psi_B$ is a <b>mixed state</b>.
#
# This representation of state $\psi_B$ seems valid. But when we look into system with ensemble of n different states,it starts to seems really inconvenient:
# $$
# \left \{ |\psi_j \rangle \right \}_{j = 1}^n = \left \{ | \psi_1 \rangle, | \psi_2 \rangle, \dots, | \psi_n \rangle \right \},
# $$
# with their classical probabilities:
# $$
# \left \{ p_j \right \}_{j = 1}^n = \left \{ p_1, p_2, \dots, p_n \right \}
# $$
#
# This when we start to see the limitation of statevector representation. It starts to get difficult to track the whole ensemble of states. It gets much more difficult when we apply any gate to the ensemble.
#
# # Density Matrix
# To overcome this limitation, we use a different kind of notation called density matrix notation.
#
# A mixed state, consisting of several possible outcome pure normalized states $|\psi_j \rangle$ , each with probability of occurrence $p_j$, is defined by a density matrix of the form:
#
# $$ \rho \equiv \sum_{j} p_j |\psi_j \rangle \langle \psi_j | $$
#
# For pure states, there is only one $\psi_j$ and $p_j =1$. Density matrix can be considered as a general matrix to describe a quantum state of a system.
#
# For mixed states like $\Psi_B$, we use the states and their probabilities of occurrence to construct density matrix as follows:
# $$
# \begin{aligned}
# \rho_B & = \frac{1}{2} | 0_B \rangle \langle 0_B | + \frac{1}{2} | 1_B \rangle \langle 1_B |
# \\
# \\
# & = \frac{1}{2} \begin{bmatrix} 1 & 0 \end{bmatrix} \begin{bmatrix} 1 \\ 0 \end{bmatrix} + \frac{1}{2} \begin{bmatrix} 0 & 1 \end{bmatrix} \begin{bmatrix} 0 \\ 1 \end{bmatrix}
# \\
# \\
# & = \frac{1}{2} \begin{bmatrix} 1 & 0 \\ 0 & 0 \end{bmatrix} + \frac{1}{2} \begin{bmatrix} 0 & 0 \\ 0 & 1 \end{bmatrix}
# \\
# \\
# & = \frac{1}{2} \begin{bmatrix} 1 & 0 \\ 0 & 1 \end{bmatrix}
# \end{aligned}
# $$
#
#
# ### Calculate mixed state of $\ket{\phi_{AB}}(Bell State)$
#
# The density matrix representation for this state is then given by:
#
# $$
# \begin{aligned}
# & \rho_{AB} = | \psi_{AB} \rangle \langle \psi_{AB} |
# \\
# \\
# & \rho_{AB} = \left ( \frac{1}{\sqrt{2}} \begin{bmatrix} 1 \\ 0 \\ 0 \\ 1 \end{bmatrix} \right ) \left ( \frac{1}{\sqrt{2}} \begin{bmatrix} 1 & 0 & 0 & 1 \end{bmatrix} \right )
# \\
# \\
# & \rho_{AB} = \frac{1}{2} \begin{bmatrix} 1 & 0 & 0 & 1 \\ 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 \\ 1 & 0 & 0 & 1 \\ \end{bmatrix}
# \end{aligned}
# $$
# +
from qiskit.quantum_info import DensityMatrix
# Density matrix of Bell State Preparation
qc_et = QuantumCircuit(2)
qc_et.h(0)
qc_et.cx(0,1)
state_et = DensityMatrix.from_instruction(qc_et)
array_to_latex(state_et, pretext ='Density Matrix')
plot_state_city(state_et.data, title='Density Matrix')
# -
# ## Example of a mixed State
# The current status of real quantum devices are far from ideal. They are prone to errors and many have low decoherence time. Let us consider a system with a faulty Hadamard gate such that 20% of the time, it rotates the qubit either too short or too much by $30^{\circ}$($\pi/6) about x axis. The ensemble of the mixed state contain following states:
#
# $$ | \psi_1 \rangle = \frac{1}{\sqrt{2}} \left( | 0 \rangle + | 1 \rangle \right) $$
# $$ | \psi_2 \rangle = \frac{\sqrt{3}}{2}| 0 \rangle + \frac{1}{2} | 1 \rangle $$
# $$ | \psi_3 \rangle = \frac{1}{2} | 0 \rangle + \frac{\sqrt{3}}{2} | 1 \rangle $$
#
# with probability of occurance:
# {p_0,p_2,p_3} = {80%,10%,10%}
#
# +
from math import pi as pi
qc1 = QuantumCircuit(1)
# create state psi_1
qc1.h(0)
# create state psi_2
qc2 = QuantumCircuit(1)
qc2.rx(pi-pi/6,0)
qc2.ry(-pi/2,0)
# create state psi_3
qc3 = QuantumCircuit(1)
qc3.rx(pi+pi/6,0)
qc3.ry(-pi/2,0)
psi1= Statevector.from_instruction(qc1)
display(plot_bloch_multivector(psi1))
psi2= Statevector.from_instruction(qc2)
display(plot_bloch_multivector(psi2))
psi3= Statevector.from_instruction(qc3)
display(plot_bloch_multivector(psi3))
# -
# We construct the density matrix of the state as:
# $$ \rho_H = \frac{4}{5} | \psi_1 \rangle \langle \psi_1 | + \frac{1}{10} | \psi_2 \rangle \langle \psi_2 | + \frac{1}{10} | \psi_3 \rangle \langle \psi_3 | $$
# $$
# \begin{aligned}
# & \rho_H = \frac{4}{5} \begin{bmatrix} \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} \end{bmatrix} + \frac{1}{10} \begin{bmatrix} \frac{3}{4} & \frac{\sqrt{3}}{4} \\ \frac{\sqrt{3}}{4} & \frac{1}{4} \end{bmatrix} + \frac{1}{10} \begin{bmatrix} \frac{1}{4} & \frac{\sqrt{3}}{4} \\ \frac{\sqrt{3}}{4} & \frac{3}{4} \end{bmatrix}
# \\
# \\
# & \rho_H = \begin{bmatrix} \frac{1}{2} & \frac{\sqrt{3}}{20} + \frac{2}{5} \\ \frac{\sqrt{3}}{20} + \frac{2}{5} & \frac{1}{2} \end{bmatrix}
# \end{aligned}
# $$
#
import numpy as np
rho = np.array([[1/2,np.sqrt(3)/20 + 2/5],[np.sqrt(3)/20 + 2/5,1/2]])
psi= DensityMatrix(rho)
array_to_latex(psi)
# Some properties of Density matrix
# - Pure State has $tr(\rho^2) = 1$
# - Mixed State has $tr(\rho^2) \leq 1$
#
# - Evolution of the mixed state/system with an unitary matrix $\hat U$ is given by
# $$
# \rho = \sum_{j} p_j |\psi_j \rangle \langle \psi_j | \enspace \xrightarrow[]{\enspace \hat U \enspace} \enspace \rho' = \sum_{j} p_j \hat U |\psi_j \rangle \langle \psi_j | \hat U^{\dagger} = \hat U \rho \hat U^{\dagger}
# $$
# compute Trace of density matrix
rho_sq = np.dot(state_et.data,state_et.data)
np.trace(rho_sq)
import qiskit.tools.jupyter
# %qiskit_version_table
| day2/Lecture 2.3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Creating a list or other data structure is necessary for experimenting with "for" loops / iterations. Building larger ones can be easy.
list1 = [1,2,3,4,5,6,7,8,9,]
list2 = list1*4
for num in list2:
print(num)
# The same manipulations can be done with tuples (which are in parentheses)
tup = (1,2,3,4,5,6,7,8,9,10)
tup
# I've learned that in order for me to concatenate numbers and strings, I must first cast the number as a string. My professor's examples do not require this for some reason. He taught me to use f. string literates which are much easier than anything else, but he doesn't seem to be using this in the examples in which I must cast strings.
for i in tup:
print(str(i) +' is a number')
# I think this would be the equivalent of above in f.string style.
# It is significantly easier to remember and does not require a type cast for the number. I fell in love with it immediately, but it is only available on the newest python versions at the moment.
for i in tup:
print(f'{i} is a number')
# The method for the standard .format is shown below. It is also simple, but requires that I remember to create a variable to take the place of the iterable variable I used in the for loop. I'm not sure why it errors without the second variable, but it does. That instantly makes fstring literals my favorite.
for i in list1:
if i %2 == 0:
print('{r} is even'.format(r = i))
# Another common for loop addition is another variable that represents a running count. While I recall seeing this before, I am unsure how it was used. My instructor used it to show another way to do Gaussian addition of consecutive numbers. As an example he created a list of 1 through 10 and iterated through them while adding the iterable variable to the tallying variable. This worked just like the Gaussian method of splitting the numbers over each other and multiplying the sum of any column by the number of columns (example:
# 1 2 3 4 5
# 10 9 8 7 6 = 11 X 5columns = 55
#
# +
isum = 0
for i in tup:
isum = isum + i
print(isum)
# -
# The program above does it more like this:
# t sum num total = sum on next round
# 1 0 0 0
# 2 0 1 1
# 3 1 2 3
# 4 3 3 6
# 5 6 4 10
# 6 10 5 15
# 7 15 6 21
# 8 21 7 28
# 9 28 8 36
# 10 36 9 45
# 11 45 10 55
#
# It is a cool trick.
# Tuple unpacking was the next lesson by my professor. It required a list of tuple pairs that we could unpack using a for loop with multiple variables in order to pick what we wanted via printing one or both of the variables.
tuplst = [(2,3),(34,98),(23,25),(98,8)]
for a,b in tuplst:
print(b)
# The same kind of thing worked for dictionaries except we used .items. There were other choices if you hit tab after the dot. .Keys gave the keys, .values gave the value assigned to the key, and .item gave both.
dic={'key1':'mango', 'key2':'orange', 'key3':'banana'}
for i in dic:
print(i)
for i in dic.items():
print(i)
for i in dic.values():
print(i)
# The main problem I have with using .tools is that one must remember to include the empty parenteses () or it doesn't work. The error messages are still a little cryptic to me and I must get better at understanding them.
| For Loops in python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="ZwZNOAMZcxl3"
# ##### Copyright 2019 The TensorFlow Neural Structured Learning Authors
# + cellView="form" id="nxbcnXODdE06"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="-BszoQj0dSZO"
# # Adversarial regularization for image classification
# + [markdown] id="wfqlePz0g6o5"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/neural_structured_learning/tutorials/adversarial_keras_cnn_mnist"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/neural-structured-learning/blob/master/g3doc/tutorials/adversarial_keras_cnn_mnist.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/neural-structured-learning/blob/master/g3doc/tutorials/adversarial_keras_cnn_mnist.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/neural-structured-learning/g3doc/tutorials/adversarial_keras_cnn_mnist.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="oHEGl8h_m6tS"
# ## Overview
#
# In this tutorial, we will explore the use of adversarial learning
# ([Goodfellow et al., 2014](https://arxiv.org/abs/1412.6572)) for image
# classification using the Neural Structured Learning (NSL) framework.
#
# The core idea of adversarial learning is to train a model with adversarially-perturbed data (called adversarial examples) in addition to the organic training data. To the human eye, these adversarial examples look the same as the original but the perturbation will cause the model to be confused and make incorrect predictions or classifications. The adversarial examples are constructed to intentionally mislead the model into making wrong predictions or classifications. By training with such examples, the model learns to be robust against adversarial perturbation when making predictions.
#
# In this tutorial, we illustrate the following procedure of applying adversarial
# learning to obtain robust models using the Neural Structured Learning framework:
#
# 1. Create a neural network as a base model. In this tutorial, the base model is
# created with the `tf.keras` functional API; this procedure is compatible
# with models created by `tf.keras` sequential and subclassing APIs as well.
# For more information on Keras models in TensorFlow, see this [documentation](https://www.tensorflow.org/api_docs/python/tf/keras/Model).
# 2. Wrap the base model with the **`AdversarialRegularization`** wrapper class,
# which is provided by the NSL framework, to create a new `tf.keras.Model`
# instance. This new model will include the adversarial loss as a
# regularization term in its training objective.
# 3. Convert examples in the training data to feature dictionaries.
# 4. Train and evaluate the new model.
# + [markdown] id="dZEDFUhqn42r"
# ## Recap for Beginners
#
#
#
# There is a corresponding [video explanation](https://youtu.be/Js2WJkhdU7k) on adversarial learning for image classification part of the TensorFlow Neural Structured Learning Youtube series. Below, we have summarized the key concepts explained in this video, expanding on the explanation provided in the Overview section above.
#
# The NSL framework jointly optimizes both image features and structured signals to help neural networks better learn. However, what if there is no explicit structure available to train the neural network? This tutorial explains one approach involving the creation of adversarial neighbors (modified from the original sample) to dynamically construct a structure.
#
# Firstly, adversarial neighbors are defined as modified versions of the sample image applied with small perturbations that mislead a neural net into outputting inaccurate classifications. These carefully designed perturbations are typically based on the reverse gradient direction and are meant to confuse the neural net during training. Humans may not be able to tell the difference between a sample image and it's generated adversarial neighbor. However, to the neural net, the applied perturbations are effective at leading to an inaccurate conclusion.
#
# Generated adversarial neighbors are then connected to the sample, therefore dynamically constructing a structure edge by edge. Using this connection, neural nets learn to maintain the similarities between the sample and the adversarial neighbors while avoiding confusion resulting from misclassifications, thus improving the overall neural network's quality and accuracy.
#
# The code segment below is a high-level explanation of the steps involved while the rest of this tutorial goes into further depth and technicality.
#
# 1. Read and prepare the data. Load the MNIST dataset and normalize the feature values to stay in the range [0,1]
# ```
# import neural_structured_learning as nsl
#
# (x_train, y_train), (x_train, y_train) = tf.keras.datasets.mnist.load_data()
# x_train, x_test = x_train / 255.0, x_test / 255.0
# ```
# + [markdown] id="WSlSGafKn42s"
# 2. Build the neural network. A Sequential Keras base model is used for this example.
# ```
# model = tf.keras.Sequential(...)
# ```
#
# + [markdown] id="wFJ6cixdn42s"
# 3. Configure the adversarial model. Including the hyperparameters: multiplier applied on the adversarial regularization, empirically chosen differ values for step size/learning rate. Invoke adversarial regularization with a wrapper class around the constructed neural network.
# ```
# adv_config = nsl.configs.make_adv_reg_config(multiplier=0.2, adv_step_size=0.05)
# adv_model = nsl.keras.AdversarialRegularization(model, adv_config)
# ```
# + [markdown] id="6ohmfLgLn42s"
# 4. Conclude with the standard Keras workflow: compile, fit, evaluate.
# ```
# adv_model.compile(optimizer='adam', loss='sparse_categorizal_crossentropy', metrics=['accuracy'])
# adv_model.fit({'feature': x_train, 'label': y_train}, epochs=5)
# adv_model.evaluate({'feature': x_test, 'label': y_test})
# ```
# + [markdown] id="VgSOF-49Q7kS"
# What you see here is adversarial learning enabled in 2 steps and 3 simple lines of code. This is the simplicity of the neural structured learning framework. In the following sections, we expand upon this procedure.
# + [markdown] id="qODwGDl-n42t"
# ## Setup
# + [markdown] id="4RhmgQ7-mlrl"
# Install the Neural Structured Learning package.
# + id="ByJ7133BQULR"
# !pip install --quiet neural-structured-learning
# + [markdown] id="PZvsEQrhSqKx"
# Import libraries. We abbreviate `neural_structured_learning` to `nsl`.
# + id="EuqEuAYzTMo0"
import matplotlib.pyplot as plt
import neural_structured_learning as nsl
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
# + [markdown] id="3LwBtQGaTvbe"
# ## Hyperparameters
#
# We collect and explain the hyperparameters (in an `HParams` object) for model
# training and evaluation.
#
# Input/Output:
#
# * **`input_shape`**: The shape of the input tensor. Each image is 28-by-28
# pixels with 1 channel.
# * **`num_classes`**: There are a total of 10 classes, corresponding to 10
# digits [0-9].
#
# Model architecture:
#
# * **`conv_filters`**: A list of numbers, each specifying the number of
# filters in a convolutional layer.
# * **`kernel_size`**: The size of 2D convolution window, shared by all
# convolutional layers.
# * **`pool_size`**: Factors to downscale the image in each max-pooling layer.
# * **`num_fc_units`**: The number of units (i.e., width) of each
# fully-connected layer.
#
# Training and evaluation:
#
# * **`batch_size`**: Batch size used for training and evaluation.
# * **`epochs`**: The number of training epochs.
#
# Adversarial learning:
#
# * **`adv_multiplier`**: The weight of adversarial loss in the training
# objective, relative to the labeled loss.
# * **`adv_step_size`**: The magnitude of adversarial perturbation.
# * **`adv_grad_norm`**: The norm to measure the magnitude of adversarial
# perturbation.
#
# + id="iOc8YdmIRSHo"
class HParams(object):
def __init__(self):
self.input_shape = [28, 28, 1]
self.num_classes = 10
self.conv_filters = [32, 64, 64]
self.kernel_size = (3, 3)
self.pool_size = (2, 2)
self.num_fc_units = [64]
self.batch_size = 32
self.epochs = 5
self.adv_multiplier = 0.2
self.adv_step_size = 0.2
self.adv_grad_norm = 'infinity'
HPARAMS = HParams()
# + [markdown] id="72zL1AMcYYGG"
# ## MNIST dataset
#
# The [MNIST dataset](http://yann.lecun.com/exdb/mnist/) contains grayscale
# images of handwritten digits (from '0' to '9'). Each image shows one digit at
# low resolution (28-by-28 pixels). The task involved is to classify images into
# 10 categories, one per digit.
#
# Here we load the MNIST dataset from
# [TensorFlow Datasets](https://www.tensorflow.org/datasets). It handles
# downloading the data and constructing a `tf.data.Dataset`. The loaded dataset
# has two subsets:
#
# * `train` with 60,000 examples, and
# * `test` with 10,000 examples.
#
# Examples in both subsets are stored in feature dictionaries with the following
# two keys:
#
# * `image`: Array of pixel values, ranging from 0 to 255.
# * `label`: Groundtruth label, ranging from 0 to 9.
# + id="R1dK6E4axNHB"
datasets = tfds.load('mnist')
train_dataset = datasets['train']
test_dataset = datasets['test']
IMAGE_INPUT_NAME = 'image'
LABEL_INPUT_NAME = 'label'
# + [markdown] id="IBkh4mbsxLR_"
# To make the model numerically stable, we normalize the pixel values to [0, 1]
# by mapping the dataset over the `normalize` function. After shuffling training
# set and batching, we convert the examples to feature tuples `(image, label)`
# for training the base model. We also provide a function to convert from tuples
# to dictionaries for later use.
# + id="VhMEJqKs0_7z"
def normalize(features):
features[IMAGE_INPUT_NAME] = tf.cast(
features[IMAGE_INPUT_NAME], dtype=tf.float32) / 255.0
return features
def convert_to_tuples(features):
return features[IMAGE_INPUT_NAME], features[LABEL_INPUT_NAME]
def convert_to_dictionaries(image, label):
return {IMAGE_INPUT_NAME: image, LABEL_INPUT_NAME: label}
train_dataset = train_dataset.map(normalize).shuffle(10000).batch(HPARAMS.batch_size).map(convert_to_tuples)
test_dataset = test_dataset.map(normalize).batch(HPARAMS.batch_size).map(convert_to_tuples)
# + [markdown] id="JrrMpPNmpCKK"
# ## Base model
#
# Our base model will be a neural network consisting of 3 convolutional layers
# follwed by 2 fully-connected layers (as defined in `HPARAMS`). Here we define
# it using the Keras functional API. Feel free to try other APIs or model
# architectures (e.g. subclassing). Note that the NSL framework does support all three types of Keras APIs.
# + id="4UjrtuIsYWo3"
def build_base_model(hparams):
"""Builds a model according to the architecture defined in `hparams`."""
inputs = tf.keras.Input(
shape=hparams.input_shape, dtype=tf.float32, name=IMAGE_INPUT_NAME)
x = inputs
for i, num_filters in enumerate(hparams.conv_filters):
x = tf.keras.layers.Conv2D(
num_filters, hparams.kernel_size, activation='relu')(
x)
if i < len(hparams.conv_filters) - 1:
# max pooling between convolutional layers
x = tf.keras.layers.MaxPooling2D(hparams.pool_size)(x)
x = tf.keras.layers.Flatten()(x)
for num_units in hparams.num_fc_units:
x = tf.keras.layers.Dense(num_units, activation='relu')(x)
pred = tf.keras.layers.Dense(hparams.num_classes)(x)
model = tf.keras.Model(inputs=inputs, outputs=pred)
return model
# + id="288nsmN5pLoo"
base_model = build_base_model(HPARAMS)
base_model.summary()
# + [markdown] id="mlTUGn1t_HAr"
# Next we train and evaluate the base model.
# + id="K2cFDbmRpRMp"
base_model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['acc'])
base_model.fit(train_dataset, epochs=HPARAMS.epochs)
# + id="J94Y_WTaqAsi"
results = base_model.evaluate(test_dataset)
named_results = dict(zip(base_model.metrics_names, results))
print('\naccuracy:', named_results['acc'])
# + [markdown] id="c8OClWqGALIm"
# We can see that the base model achieves 99% accuracy on the test set. We will
# see how robust it is in
# [Robustness Under Adversarial Perturbations](#scrollTo=HXK9MGG8lBX3) below.
# + [markdown] id="CemXA8N9q336"
# ## Adversarial-regularized model
#
# Here we show how to incorporate adversarial training into a Keras model with a
# few lines of code, using the NSL framework. The base model is wrapped to create
# a new `tf.Keras.Model`, whose training objective includes adversarial
# regularization.
# + [markdown] id="YUOpl-rkzRrY"
# First, we create a config object with all relevant hyperparameters using the
# helper function `nsl.configs.make_adv_reg_config`.
# + id="-WWVwJB2qstE"
adv_config = nsl.configs.make_adv_reg_config(
multiplier=HPARAMS.adv_multiplier,
adv_step_size=HPARAMS.adv_step_size,
adv_grad_norm=HPARAMS.adv_grad_norm
)
# + [markdown] id="OmeIUyxE4s68"
# Now we can wrap a base model with `AdversarialRegularization`. Here we create a
# new base model (`base_adv_model`), so that the existing one (`base_model`) can
# be used in later comparison.
#
# The returned `adv_model` is a `tf.keras.Model` object, whose training objective
# includes a regularization term for the adversarial loss. To compute that loss,
# the model has to have access to the label information (feature `label`), in
# addition to regular input (feature `image`). For this reason, we convert the
# examples in the datasets from tuples back to dictionaries. And we tell the
# model which feature contains the label information via the `label_keys`
# parameter.
# + id="TObqJLEX4sQq"
base_adv_model = build_base_model(HPARAMS)
adv_model = nsl.keras.AdversarialRegularization(
base_adv_model,
label_keys=[LABEL_INPUT_NAME],
adv_config=adv_config
)
train_set_for_adv_model = train_dataset.map(convert_to_dictionaries)
test_set_for_adv_model = test_dataset.map(convert_to_dictionaries)
# + [markdown] id="aKTQWzfj7JvL"
# Next we compile, train, and evaluate the
# adversarial-regularized model. There might be warnings like
# "Output missing from loss dictionary," which is fine because
# the `adv_model` doesn't rely on the base implementation to
# calculate the total loss.
# + id="aTSK-cHbuWDw"
adv_model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['acc'])
adv_model.fit(train_set_for_adv_model, epochs=HPARAMS.epochs)
# + id="3v_Jn7wuviZx"
results = adv_model.evaluate(test_set_for_adv_model)
named_results = dict(zip(adv_model.metrics_names, results))
print('\naccuracy:', named_results['sparse_categorical_accuracy'])
# + [markdown] id="LgnslZYk9Acg"
# We can see that the adversarial-regularized model also performs very well (99%
# accuracy) on the test set.
# + [markdown] id="HXK9MGG8lBX3"
# ## Robustness under Adversarial perturbations
#
# Now we compare the base model and the adversarial-regularized model for
# robustness under adversarial perturbation.
#
# We will use the `AdversarialRegularization.perturb_on_batch` function for
# generating adversarially perturbed examples. And we would like the generation
# based on the base model. To do so, we wrap the base model with
# `AdversarialRegularization`. Note that as long as we don't invoke training (`Model.fit`), the learned variables in the model won't change and the model is
# still the same one as in section [Base Model](#scrollTo=JrrMpPNmpCKK).
# + id="FLkYw54pvxJO"
reference_model = nsl.keras.AdversarialRegularization(
base_model, label_keys=[LABEL_INPUT_NAME], adv_config=adv_config)
reference_model.compile(
optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['acc'])
# + [markdown] id="DR0Rn5rxBeDh"
# We collect in a dictionary the models to be evaluted, and also create a metric
# object for each of the models.
#
# Note that we take `adv_model.base_model` in order to have the same input format
# (not requiring label information) as the base model. The learned variables in
# `adv_model.base_model` are the same as those in `adv_model`.
# + id="igRBxPlPm_JE"
models_to_eval = {
'base': base_model,
'adv-regularized': adv_model.base_model
}
metrics = {
name: tf.keras.metrics.SparseCategoricalAccuracy()
for name in models_to_eval.keys()
}
# + [markdown] id="BAPYegAbC8mZ"
# Here is the loop to generate perturbed examples and to evaluate models with
# them. We save the perturbed images, labels, and predictions for visualization
# in the next section.
# + id="IGnLXhswmUN8"
perturbed_images, labels, predictions = [], [], []
for batch in test_set_for_adv_model:
perturbed_batch = reference_model.perturb_on_batch(batch)
# Clipping makes perturbed examples have the same range as regular ones.
perturbed_batch[IMAGE_INPUT_NAME] = tf.clip_by_value(
perturbed_batch[IMAGE_INPUT_NAME], 0.0, 1.0)
y_true = perturbed_batch.pop(LABEL_INPUT_NAME)
perturbed_images.append(perturbed_batch[IMAGE_INPUT_NAME].numpy())
labels.append(y_true.numpy())
predictions.append({})
for name, model in models_to_eval.items():
y_pred = model(perturbed_batch)
metrics[name](y_true, y_pred)
predictions[-1][name] = tf.argmax(y_pred, axis=-1).numpy()
for name, metric in metrics.items():
print('%s model accuracy: %f' % (name, metric.result().numpy()))
# + [markdown] id="S5cC3XbRGFJQ"
# We can see that the accuracy of the base model drops dramatically (from 99% to
# about 50%) when the input is perturbed adversarially. On the other hand, the
# accuracy of the adversarial-regularized model only degrades a little (from 99%
# to 95%). This demonstrates the effectiveness of adversarial learning on
# improving model's robustness.
# + [markdown] id="YfB5oBBfWLRK"
# ## Examples of adversarially-perturbed images
#
# Here we take a look at the adversarially-perturbed images. We can see that the
# perturbed images still show digits recognizable by human, but can successfully
# fool the base model.
# + id="3iK9vO_xKJfg"
batch_index = 0
batch_image = perturbed_images[batch_index]
batch_label = labels[batch_index]
batch_pred = predictions[batch_index]
batch_size = HPARAMS.batch_size
n_col = 4
n_row = (batch_size + n_col - 1) // n_col
print('accuracy in batch %d:' % batch_index)
for name, pred in batch_pred.items():
print('%s model: %d / %d' % (name, np.sum(batch_label == pred), batch_size))
plt.figure(figsize=(15, 15))
for i, (image, y) in enumerate(zip(batch_image, batch_label)):
y_base = batch_pred['base'][i]
y_adv = batch_pred['adv-regularized'][i]
plt.subplot(n_row, n_col, i+1)
plt.title('true: %d, base: %d, adv: %d' % (y, y_base, y_adv))
plt.imshow(tf.keras.utils.array_to_img(image), cmap='gray')
plt.axis('off')
plt.show()
# + [markdown] id="g_vo1pWYJlHP"
# ## Conclusion
#
# We have demonstrated the use of adversarial learning for image classification
# using the Neural Structured Learning (NSL) framework. We encourage users to
# experiment with different adversarial settings (in hyper-parameters) and to see
# how they affect model robustness.
| g3doc/tutorials/adversarial_keras_cnn_mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/fhswf/ki-wir/blob/main/VAEM.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="CMeykhqAHLsA"
# # Variational Autoencoder
#
# + colab={"base_uri": "https://localhost:8080/"} id="X1NznsKoMDBK" outputId="26c0b38b-d415-4594-cb8e-de9d0fa7a4d4"
# Setup on Colab
# !pip install gradio &> /dev/null
# !pip install pytorch_lightning &> /dev/null
# !curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | sudo bash
# !sudo apt-get install git-lfs
# !git lfs install
# !if [ ! -e ki_wir ]; then git clone https://github.com/fhswf/ki-wir.git ki_wir; else cd ki_wir; git pull; fi
# !cd ki_wir; git lfs fetch
# !cd ki_wir; git lfs checkout
# + id="_TMCwyyRLDCG"
import gradio as gr
import torch
import numpy as np
import ki_wir.models.vanilla_vae as vanilla_vae
import ki_wir.models.logcosh_vae as logcosh_vae
import ki_wir.models.dfcvae as dfc_vae
import ki_wir.models.experiment as experiment
from PIL import Image
from torchvision import transforms
import torchvision.utils as vutils
# -
# %env CUDA_VISIBLE_DEVICES=1
device = torch.device("cuda:0")
# + id="hTwjYZ7iM4cA"
params={"in_channels": 3, "latent_dim": 128, "img_size": 64}
config = { "DFC": [ dfc_vae.DFCVAE, "ki_wir/pretrained/dfc.ckpt" ], \
"LogCosh": [ logcosh_vae.LogCoshVAE, "ki_wir/pretrained/logcosh.ckpt" ], \
"Vanilla": [ vanilla_vae.VanillaVAE, "ki_wir/pretrained/vanilla.ckpt" ] }
# + id="hTwjYZ7iM4cA"
models = {}
for m, c in config.items():
model = c[0](**params)
exp = experiment.VAEXperiment(model, params)
exp.load_from_checkpoint(c[1], vae_model=model, params=params).to(device)
models[m] = exp
# -
models
# + id="hTwjYZ7iM4cA"
def reconstruct(name, image1, image2, alpha):
SetRange = transforms.Lambda(lambda X: 2 * X - 1.)
img1 = Image.fromarray(image1)
img2 = Image.fromarray(image2)
img1 = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor(),
])(img1)
img2 = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor(),
])(img2)
img = alpha*img2 + (1-alpha)*img1
orig = transforms.ToPILImage(mode='RGB')(img)
img = SetRange(img)
#img = torch.moveaxis(img, 0, -1)
img = torch.unsqueeze(img.cuda(), 0)
dec = models[name].model.generate(img, latent_dim=128)
dec = torch.squeeze(dec[0], 0)
dec = transforms.Lambda(lambda X: 0.5 * (X + 1.))(dec)
return transforms.ToPILImage(mode='RGB')(dec)
# + id="99KmCnp_lq9Z"
test_label = ""
for name in models.keys():
exp = models[name]
exp.curr_device = device
samples = exp.model.sample(144, device)
vutils.save_image(samples.cpu().data,
f"sample_{name}.png",
normalize=True,
nrow=12)
# + colab={"base_uri": "https://localhost:8080/", "height": 924} id="2Tb5jUK-9Aer" outputId="0405d55b-9b03-40ca-ae30-be320201b60b"
model = gr.inputs.Dropdown(list(models.keys()), type="value", default=None, label="Model")
alpha = gr.inputs.Slider(minimum=0, maximum=1.0, step=0.1, default=0, label=None)
out1 = gr.outputs.Image(type="auto", label="original")
out2 = gr.outputs.Image(type="auto", label="reconstructed")
iface = gr.Interface(fn=reconstruct, layout="vertical", inputs=[model, "image", "image", alpha], outputs=out1).launch(debug=True, share=True)
# -
| VAE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (coursera)
# language: python
# name: coursera
# ---
# # Basic scoring pipeline #
# This notebooks is just an addition to `create_basic_training_pipeline.ipynb` to demonstrate how to use the trained model to make scoring
from azureml.core import Workspace, Dataset
from azureml.core.datastore import Datastore
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from azureml.pipeline.steps import PythonScriptStep
from azureml.pipeline.core import PipelineData
from azureml.pipeline.core import Pipeline
from azureml.core import Experiment
from msrest.exceptions import HttpOperationError
# Because we are going to list all files and send them to the Form Recognizer one by one, we will need couple more parameters compare to training pipeline:
#
# - storage_name: a storage name that contains input data
# - storage_key: a storage key to get access to the storage with input data
# - container_name: the name of the container that contains folder with input data
subscription_id = "<provide it here>"
wrksp_name = "<provide it here>"
resource_group = "<provide it here>"
compute_name = "mycluster"
min_nodes = 0
max_nodes = 4
vm_priority = "lowpriority"
vm_size = "Standard_F2s_v2"
project_folder = "basic_scoring_steps"
fr_endpoint = "<provide it here>"
fr_key = "<provide it here>"
storage_name = "<provide it here>"
storage_key = "<provide it here>"
container_name = "data"
datastore_name = "data_ds"
scoring_ds_name = "basic_scoring"
# Getting a reference to the workspace. If it doesn't exist there is no sense to create new one because we don't have any models anyway
try:
aml_workspace = Workspace.get(
name=wrksp_name,
subscription_id=subscription_id,
resource_group=resource_group)
print("Found the existing Workspace")
except Exception as e:
print(f"Workspace doesn't exist")
# Create a compute cluster for scoring
if compute_name in aml_workspace.compute_targets:
compute_target = aml_workspace.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print(f"Found existing compute target {compute_name} so using it")
else:
compute_config = AmlCompute.provisioning_configuration(
vm_size=vm_size,
vm_priority=vm_priority,
min_nodes=min_nodes,
max_nodes=max_nodes,
)
compute_target = ComputeTarget.create(aml_workspace, compute_name,
compute_config)
compute_target.wait_for_completion(show_output=True)
# We need to mount blob storage with input data tp scoring compute cluster. To do that we need to register blob container as a data store in AML
try:
blob_datastore = Datastore.get(aml_workspace, datastore_name)
print("Found Blob Datastore with name: %s" % datastore_name)
except HttpOperationError:
blob_datastore = Datastore.register_azure_blob_container(
workspace=aml_workspace,
datastore_name=datastore_name,
account_name=storage_name,
container_name=container_name,
account_key=storage_key)
print("Registered blob datastore with name: %s" % datastore_name)
# Now, we can create a dataset with all input files there (it doesn't make much sense for this example, but it's very useful for parallel step)
scoring_file_path = blob_datastore.path("Test")
scoring_file_dataset = Dataset.File.from_files(path=scoring_file_path, validate=True)
scoring_file_dataset = scoring_file_dataset.register(
aml_workspace, scoring_ds_name, create_new_version=True)
print("Dataset has been registered")
# We need pipeline data object to store all outputs from scoring
scoring_output = PipelineData(
"scoring_output",
datastore=blob_datastore)
# Just one step here: scoring. We will list all files and make scoring one by one
scoring_step = PythonScriptStep(
name = "scoring",
script_name="score.py",
inputs=[scoring_file_dataset.as_named_input("scoring_files")],
outputs=[scoring_output],
arguments=[
"--output", scoring_output,
"--fr_endpoint", fr_endpoint,
"--fr_key", fr_key],
compute_target=compute_target,
source_directory=project_folder
)
steps = [scoring_step]
# Create a pipeline object with one step only
pipeline = Pipeline(workspace=aml_workspace, steps=steps)
# Execute the pipeline
pipeline_run = Experiment(aml_workspace, 'scoring_basic_exp').submit(pipeline)
pipeline_run.wait_for_completion()
# Register the pipeline as an reusable entity in AML
pipeline.publish(
name="basic_scoring",
description="Scoring data using form recognizer single model")
| Pipelines/Azure_Machine_Learning/create_basic_scoring_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
import json
import pandas as pd
import numpy as np
from pandas import json_normalize
url = 'https://finance.naver.com/api/sise/etfItemList.nhn'
json_data = json.loads(requests.get(url).text)
df = json_normalize(json_data['result']['etfItemList'])
# ## 1. 전처리
# #### 문자열 처리 쉽도록 종목코드 앞에 'A' 붙임
df['itemcode'] = 'A' + df['itemcode']
df = df.sort_values(by='etfTabCode', axis=0)
df['exchange_nm'] = 'KRX'
df = df.rename(columns = {'itemcode':'code', 'itemname':'name', 'nowVal':'price', 'quant':'volumne_sh', 'amonut':'trading_amt_mln', 'marketSum':'market_cap_100m'})
df = df.drop(['etfTabCode', 'risefall', 'changeVal', 'changeRate', 'threeMonthEarnRate'], axis=1)
df
# # 2. 종목 필터링
# ## 1) 투자목적 적합성
# ### (1) 레버리지, 인버스 배제
# - 종목명에 금지어가 포함돼 있으면 유니버스에서 원천 삭제(`restricted_tokens`)
# - 레버리지, 인버스는 단기 투자목적에만 적합하므로 배제
restricted_tokens = ['레버리지', '인버스', '2X']
df = df[~df.itemname.str.contains('|'.join(restricted_tokens))]
# ## 2) 유동성
# ### (1) 시총/거래량
# - 시총(`limit_market_cap`) >= 300억원
# - 거래금액(`limit_trading_amt`) >= 1억원
limit_market_cap = 300 # 300억원
limit_trading_amt = 100 # 1억원 (100백만원)
df = df[np.logical_and(df.market_cap_100m >= limit_market_cap, df.trading_amt_mln >= limit_trading_amt)]
# # 3. 파일 저장
df.reset_index(inplace=True, drop=True)
df.to_csv('universe.csv', encoding='utf-8-sig', index=False)
| src/data/make_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:90% !important; }</style>"))
# !pip install pandas
# !pip install gensim
# !pip install pyspellchecker
# !pip install tqdm
import numpy as np
import pandas as pd
import gensim
import pickle
from scipy.stats import pearsonr, spearmanr
import tensorflow as tf
import matplotlib.pyplot as plt
# from spellchecker import SpellChecker
# from tqdm import tqdm
import re
import itertools
tf.test.is_gpu_available()
# ### Data prep
# ##### SemEval-2014
# +
"""LOADING DATA"""
file = open('./data/semeval.txt', 'r')
headers = file.readline().split('\t')
file = open('./data/semeval.txt', 'r')
data = list()
for line in file:
data.append(line.split('\t'))
data = data[1:]
data = pd.DataFrame(data, columns=headers)
data['relatedness_score'] = pd.to_numeric(data['relatedness_score'])
file = open('./data/semeval_train.txt', 'r')
headers = file.readline().split('\t')
file = open('./data/semeval_train.txt', 'r')
data_train = list()
for line in file:
data_train.append(line.split('\t'))
data_train = data_train[1:]
data_train = pd.DataFrame(data_train, columns=headers)
data_train['relatedness_score'] = pd.to_numeric(data_train['relatedness_score'])
data_train.tail()
# -
print(data_train.iloc[10]['sentence_A'])
print(data_train.iloc[10]['sentence_B'])
# +
def norm(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
data['normed_score'] = norm(data['relatedness_score'])
data_train['normed_score'] = norm(data_train['relatedness_score'])
# -
data['sentence_A'] = [x.lower() for x in data['sentence_A']]
data['sentence_B'] = [x.lower() for x in data['sentence_B']]
data_train['sentence_A'] = [x.lower() for x in data_train['sentence_A']]
data_train['sentence_B'] = [x.lower() for x in data_train['sentence_B']]
# word2vec model, pretrained
word2vec = gensim.models.KeyedVectors.load_word2vec_format('./models/enwiki_20180420_300d.txt')
# +
"""TRAINING DATA"""
# 32 is the longest sentence. so let's pad all with [0,..., 0] until len()==32
all_vec_a = list()
all_vec_b = list()
for i in range(len(data_train)):
full_vec_a = list()
full_vec_b = list()
for token in data_train['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in data_train['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 32:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 32:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/train_a_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/train_b_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
"""TESTING DATA"""
all_vec_a = list()
all_vec_b = list()
for i in range(len(data)):
full_vec_a = list()
full_vec_b = list()
for token in data['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in data['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 32:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 32:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
with open('./data/test_a_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# -
train_a = pickle.load(open('./data/train_a_w2v300.data', 'rb'))
train_b = pickle.load(open('./data/train_b_w2v300.data', 'rb'))
test_a = pickle.load(open('./data/test_a_w2v300.data', 'rb'))
test_b = pickle.load(open('./data/test_b_w2v300.data', 'rb'))
# +
"""NETWORK DEFINITION"""
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(50, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(2.5),
dropout=0.35)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhattan = lambda x: tf.keras.backend.abs(x[0] - x[1])
# manhattan = lambda x: tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(x[0] - x[1])))
merged_mangattan = tf.keras.layers.Lambda(function=manhattan, output_shape=lambda x: x[0])([encoded_l, encoded_r])
prediction = tf.keras.layers.Dense(1, activation='linear')(merged_mangattan)
siamese_net = tf.keras.Model([left_input, right_input], prediction)
"""OPTIMIZER AND LOSS DEFINITION"""
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=1,
rho=0.9,
clipvalue=2.5),
loss='mse')
# -
siamese_net.fit([np.array(train_a), np.array(train_b)],
np.array(data_train['normed_score']),
epochs=300,
batch_size=64)
preds = siamese_net.predict([np.array(train_a), np.array(train_b)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], data_train['normed_score'])[0]}")
print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], data_train['normed_score'])[0]}")
preds = siamese_net.predict([np.array(test_a), np.array(test_b)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], data['normed_score'])[0]}")
print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], data['normed_score'])[0]}")
# #### glove
# +
def loadGloveModel(File):
print("Loading Glove Model")
f = open(File,'r')
gloveModel = {}
for line in f:
splitLines = line.split()
word = splitLines[0]
wordEmbedding = np.array([float(value) for value in splitLines[1:]])
gloveModel[word] = wordEmbedding
print(len(gloveModel)," words loaded!")
return gloveModel
gm = loadGloveModel('./models/glove.6B/glove.6B.300d.txt')
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(data_train)):
full_vec_a = list()
full_vec_b = list()
for token in data_train['sentence_A'][i].split(' '):
try:
full_vec_a.append(gm[token].tolist())
except:
continue
for token in data_train['sentence_B'][i].split(' '):
try:
full_vec_b.append(gm[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 32:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 32:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/train_a_semeval_glove300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/train_b_semeval_glove300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(data)):
full_vec_a = list()
full_vec_b = list()
for token in data['sentence_A'][i].split(' '):
try:
full_vec_a.append(gm[token].tolist())
except:
continue
for token in data['sentence_B'][i].split(' '):
try:
full_vec_b.append(gm[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 32:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 32:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_semeval_glove300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_images_glove300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# -
train_a = pickle.load(open('./data/train_a_semeval_glove300.data', 'rb'))
train_b = pickle.load(open('./data/train_b_semeval_glove300.data', 'rb'))
test_a = pickle.load(open('./data/test_a_semeval_glove300.data', 'rb'))
test_b = pickle.load(open('./data/test_b_images_glove300.data', 'rb'))
# +
"""NETWORK DEFINITION"""
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(256, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(2.5),
dropout=0.35)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhattan = lambda x: tf.keras.backend.abs(x[0] - x[1])
# manhattan = lambda x: tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(x[0] - x[1])))
merged_mangattan = tf.keras.layers.Lambda(function=manhattan, output_shape=lambda x: x[0])([encoded_l, encoded_r])
prediction = tf.keras.layers.Dense(1, activation='linear')(merged_mangattan)
siamese_net = tf.keras.Model([left_input, right_input], prediction)
"""OPTIMIZER AND LOSS DEFINITION"""
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=1,
rho=0.9,
clipvalue=1.5),
loss='mse')
# -
siamese_net.fit([np.array(train_a), np.array(train_b)],
np.array(data_train['normed_score']),
epochs=700,
batch_size=64)
preds = siamese_net.predict([np.array(train_a), np.array(train_b)])
print(f"Train: {pearsonr([x[0] for x in preds.tolist()], data_train['normed_score'])[0]}")
preds = siamese_net.predict([np.array(test_a), np.array(test_b)])
print(f"Test: {pearsonr([x[0] for x in preds.tolist()], data['normed_score'])[0]}")
preds = siamese_net.predict([np.array(test_a), np.array(test_b)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], data['normed_score'])[0]}")
preds = siamese_net.predict([np.array(test_a), np.array(test_b)])
print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], data['normed_score'])[0]}")
"""128, 1.5, 0.35, Adadelta(1, 0.9, 1.5), 700"""
# ##### STS
file = open('./data/Stsbenchmark/train.txt', 'r')
headers = file.readline().split('\t')
headers = [x.replace('"', '').replace('\n', '') for x in headers]
# file = open('./data/semeval.txt', 'r')
# data = list()
# for line in file:
# data.append(line.split('\t'))
# data = data[1:]
data = list()
for line in file:
a = line.split('\t')
a = [x.replace('"', '').replace('\n', '') for x in a]
data.append(a)
sts_train = pd.DataFrame(data, columns=headers + ['extra'])
sts_train = sts_train.iloc[:len(sts_train)-1,:]
sts_train['sim'] = [float(x) for x in sts_train['sim']]
sts_train['normed_score'] = norm(sts_train['sim'])
file = open('./data/Stsbenchmark/test.txt', 'r')
headers = file.readline().split('\t')
headers = [x.replace('"', '').replace('\n', '') for x in headers]
# file = open('./data/semeval.txt', 'r')
# data = list()
# for line in file:
# data.append(line.split('\t'))
# data = data[1:]
data = list()
for line in file:
a = line.split('\t')
a = [x.replace('"', '').replace('\n', '') for x in a]
data.append(a)
headers
sts_test = pd.DataFrame(data, columns=headers + ['extra', 'exta2'])
sts_test = sts_test.iloc[:len(sts_test)-2,:]
sts_test['sim'] = [float(x) for x in sts_test['sim']]
sts_test['normed_score'] = norm(sts_test['sim'])
# +
m = 0
for i in range(len(sts_train)):
if len(sts_train['sent_1'][i].split(' ')) > m:
m = len(sts_train['sent_1'][i].split(' '))
print(m)
m = 0
for i in range(len(sts_train)):
if len(sts_train['sent_2'][i].split(' ')) > m:
m = len(sts_train['sent_2'][i].split(' '))
print(m)
# +
m = 0
for i in range(len(sts_test)):
if len(sts_test['sent_1'][i].split(' ')) > m:
m = len(sts_test['sent_1'][i].split(' '))
print(m)
m = 0
for i in range(len(sts_test)):
if len(sts_test['sent_2'][i].split(' ')) > m:
m = len(sts_test['sent_2'][i].split(' '))
print(m)
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(sts_train)):
full_vec_a = list()
full_vec_b = list()
for token in sts_train['sent_1'][i].split(' '):
try:
full_vec_a.append(gm[token].tolist())
except:
continue
for token in sts_train['sent_2'][i].split(' '):
try:
full_vec_b.append(gm[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 56:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 56:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/train_a_sts_glove300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/train_b_sts_glove300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(sts_test)):
full_vec_a = list()
full_vec_b = list()
for token in sts_test['sent_1'][i].split(' '):
try:
full_vec_a.append(gm[token].tolist())
except:
continue
for token in sts_test['sent_2'][i].split(' '):
try:
full_vec_b.append(gm[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 32:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 32:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_sts_glove300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_sts_glove300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# -
train_a = pickle.load(open('./data/train_a_sts_glove300.data', 'rb'))
train_b = pickle.load(open('./data/train_b_sts_glove300.data', 'rb'))
test_a = pickle.load(open('./data/test_a_sts_glove300.data', 'rb'))
test_b = pickle.load(open('./data/test_b_sts_glove300.data', 'rb'))
# ## STS
import os
# +
def load_sts_dataset(filename):
"""
Loads a subset of the STS dataset into a DataFrame.
In particular both sentences and their human rated similarity score.
:param filename:
:return:
"""
sent_pairs = []
with tf.io.gfile.GFile(filename, "r") as f:
for line in f:
ts = line.strip().split("\t")
sent_pairs.append((ts[5], ts[6], float(ts[4])))
return pd.DataFrame(sent_pairs, columns=["sent_1", "sent_2", "sim"])
def download_and_load_sts_data():
sts_dataset = tf.keras.utils.get_file(
fname="Stsbenchmark.tar.gz",
origin="http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz",
extract=True)
sts_dev = load_sts_dataset(os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-dev.csv"))
sts_test = load_sts_dataset(os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-test.csv"))
sts_train = load_sts_dataset(os.path.join(os.path.dirname(sts_dataset), 'stsbenchmark', 'sts-train.csv'))
return sts_dev, sts_test, sts_train
sts_dev, sts_test, sts_train = download_and_load_sts_data()
# -
sts_train.head()
def norm(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
# +
file = open('./data/Stsbenchmark/dev.txt')
header = file.readline().split('\t')
headers = [x.replace('"', '').replace('\n', '') for x in header]
data = list()
for line in file:
a = line.split('\t')
a = [x.replace('"', '').replace('\n', '') for x in a]
data.append(a)
sts_dev = pd.DataFrame(data, columns=headers)
sts_dev = sts_dev.iloc[:len(sts_dev)-1,:]
sts_dev = sts_dev[['sent_1', 'sent_2', 'sim']]
sts_dev['sim'] = [float(x) for x in sts_dev['sim']]
sts_dev['sim'] = norm(sts_dev['sim'])
# +
df_aug = pd.read_csv('./data/sts_train_taug.csv')
df_aug.tail()
df_test_aug = pd.read_csv('./data/sts_test_taug.csv')
df_test_aug.tail()
# -
sts_test['sim'] = norm(sts_test['sim'])
sts_train['sim'] = norm(sts_train['sim'])
df_aug['sim'] = norm(df_aug['sim'])
df_test_aug['sim'] = norm(df_test_aug['sim'])
sts_train = sts_train.append(df_aug)
sts_train = sts_train.reset_index()
# +
def clean_text(text):
''' Pre process and convert texts to a list of words '''
text = str(text)
text = text.lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
return text
def text_to_word_list(text):
''' Pre process and convert texts to a list of words '''
text = str(text)
text = text.lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
text = text.split()
return text
# -
def prepare_embeddings(model, datasets, question_cols):
vocabulary = dict()
inverse_vocabulary = ['<unk>']
questions_cols = question_cols
# Iterate over the questions only of both training and test datasets
for dataset in datasets:
for index, row in dataset.iterrows():
# Iterate through the text of both questions of the row
for question in questions_cols:
q2n = [] # q2n -> question numbers representation
for word in text_to_word_list(row[question]):
# # Check for unwanted words
if word not in model.vocab:
continue
if word not in vocabulary:
vocabulary[word] = len(inverse_vocabulary)
q2n.append(len(inverse_vocabulary))
inverse_vocabulary.append(word)
else:
q2n.append(vocabulary[word])
# Replace questions as word to question as number representationindex, question, q2n
dataset.at[index, question]= q2n
embedding_dim = model.vector_size
embeddings = 1 * np.random.randn(len(vocabulary) + 1, embedding_dim) # This will be the embedding matrix
embeddings[0] = 0 # So that the padding will be ignored
# Build the embedding matrix
for word, index in vocabulary.items():
if word in model.vocab:
embeddings[index] = model.word_vec(word)
return embeddings, embedding_dim
# +
# word2vec = gensim.models.KeyedVectors.load_word2vec_format('./models/enwiki_20180420_300d.txt')
# word2vec = gensim.models.KeyedVectors.load_word2vec_format('./models/GoogleNews-vectors-negative300.bin', binary=True)
# -
embeddings, embedding_dim = prepare_embeddings(model=word2vec, datasets=[sts_train, df_test_aug], question_cols=['sent_1', 'sent_2'])
max_seq_length = max(sts_train.sent_1.map(lambda x: len(x)).max(),
sts_train.sent_2.map(lambda x: len(x)).max(),
df_test_aug.sent_1.map(lambda x: len(x)).max(),
df_test_aug.sent_2.map(lambda x: len(x)).max())
X_train = {'left': sts_train.sent_1, 'right': sts_train.sent_2}
X_test = {'left': df_test_aug.sent_1, 'right': df_test_aug.sent_2}
for dataset, side in itertools.product([X_train, X_test], ['left', 'right']):
dataset[side] = tf.keras.preprocessing.sequence.pad_sequences(dataset[side], maxlen=max_seq_length)
# +
def exponent_neg_manhattan_distance(left, right):
""" Helper function for the similarity estimate of the LSTMs outputs"""
return tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(left - right), axis=1, keepdims=True))
# The visible layer
left_input = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32')
right_input = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32')
embedding_layer = tf.keras.layers.Embedding(len(embeddings), embedding_dim, weights=[embeddings], input_length=max_seq_length,
trainable=False)
encoded_left = embedding_layer(left_input)
encoded_right = embedding_layer(right_input)
# Since this is a siamese network, both sides share the same LSTM
shared_gru = tf.keras.layers.GRU(50, name='gru', recurrent_activation='sigmoid', reset_after=True,
bias_initializer=tf.keras.initializers.Constant(4.5), dropout=0.0,
kernel_regularizer=None, recurrent_dropout=0.0)
left_output = shared_gru(encoded_left)
right_output = shared_gru(encoded_right)
# Calculates the distance as defined by the MaLSTM model
magru_distance = tf.keras.layers.Lambda(function=lambda x: exponent_neg_manhattan_distance(x[0], x[1]),
output_shape=lambda x: (x[0][0], 1))([left_output, right_output])
magru = tf.keras.Model([left_input, right_input], [magru_distance])
optimizer=tf.keras.optimizers.Adadelta(learning_rate=1, rho=0.985, clipnorm=2.5)
# optimizer = tf.keras.optimizers.Adam(learning_rate=0.01, clipvalue=1.5)
import tensorflow.keras.backend as K
def pear(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
xm, ym = x-mx, y-my
r_num = K.sum(tf.multiply(xm,ym))
r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = r_num / r_den
r = K.maximum(K.minimum(r, 1.0), -1.0)
return K.square(r)
magru.compile(loss='mean_squared_error', optimizer=optimizer)
# -
hist = magru.fit([X_train['left'], X_train['right']],
np.array(sts_train['sim']),
epochs=50,
batch_size=64,
validation_data=([X_test['left'], X_test['right']], df_test_aug['sim'])
)
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
preds = magru.predict([X_train['left'], X_train['right']])
print(f"Train: {pearsonr([x[0] for x in preds.tolist()], sts_train['sim'])[0]}")
preds = magru.predict([X_test['left'], X_test['right']])
print(f"Test: {pearsonr([x[0] for x in preds.tolist()], df_test_aug['sim'])[0]}")
# mse = np.mean([(preds[x] - sts_test['sim'][x])**2 for x in range(len(sts_test))])
# print(f'MSE: {mse}')
preds = magru.predict([X_test['left'], X_test['right']])
# +
b_idx = 0
e_idx = b_idx + 5
p_avg = list()
while e_idx <= len(preds):
p_avg.append(np.mean(preds[b_idx:e_idx]))
b_idx += 5
e_idx += 5
# -
pearsonr(p_avg, sts_test['sim'])
# # SemEval-2014
# +
df_train = pd.read_csv('./data/semeval_train_ko.csv')
df_train_norm = pd.read_csv('./data/semeval_train.csv')
df_test = pd.read_csv('./data/semeval_test_ko.csv')
# df_test = df_test.iloc[:len(df_test)-1, :]
df_test.tail()
# -
df_train.tail()
df_train_norm['sent_1'] = df_train_norm['sentence_A']
df_train_norm['sent_2'] = df_train_norm['sentence_B']
df_train_norm['sim'] = df_train_norm['relatedness_score']
df_train_norm = df_train_norm[['sent_1', 'sent_2', 'sim']]
df_test_norm = pd.read_csv('./data/semeval_test.csv')
df_test_norm['sim'] = norm(df_test_norm['relatedness_score'])
df_test_norm['sent_1'] =df_test_norm['sentence_A']
df_test_norm['sent_2'] = df_test_norm['sentence_B']
df_test_norm = df_test_norm[['sent_1', 'sent_2', 'sim']]
df_test_norm.tail()
def norm(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
df_train['sim'] = norm(df_train['sim'])
df_test['sim'] = norm(df_test['sim'])
df_train_norm['sim'] = norm(df_train_norm['sim'])
df_test_norm['sim'] = norm(df_test_norm['sim'])
df_test.tail()
# +
df_test['sent_1'] = df_test['sentence_A']
df_test['sent_2'] = df_test['sentence_B']
# df_train['sent_1'] = df_train['sentence_A']
# df_train['sent_2'] = df_train['sentence_B']
# -
df_train = df_train.append(df_train_norm)
df_test = df_test.append(df_test_norm)
df_train = df_train.reset_index()
df_test = df_test.reset_index()
# +
def clean_text(text):
''' Pre process and convert texts to a list of words '''
text = str(text)
text = text.lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
return text
def text_to_word_list(text):
''' Pre process and convert texts to a list of words '''
text = str(text)
text = text.lower()
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\/'+-=]", " ", text)
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\/", " ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"'", " ", text)
text = re.sub(r"(\d+)(k)", r"\g<1>000", text)
text = re.sub(r":", " : ", text)
text = re.sub(r" e g ", " eg ", text)
text = re.sub(r" b g ", " bg ", text)
text = re.sub(r" u s ", " american ", text)
text = re.sub(r"\0s", "0", text)
text = re.sub(r" 9 11 ", "911", text)
text = re.sub(r"e - mail", "email", text)
text = re.sub(r"j k", "jk", text)
text = re.sub(r"\s{2,}", " ", text)
text = text.split()
return text
# -
def prepare_embeddings(model, datasets, question_cols):
vocabulary = dict()
inverse_vocabulary = ['<unk>']
questions_cols = question_cols
# Iterate over the questions only of both training and test datasets
for dataset in datasets:
for index, row in dataset.iterrows():
# Iterate through the text of both questions of the row
for question in questions_cols:
q2n = [] # q2n -> question numbers representation
for word in text_to_word_list(row[question]):
# # Check for unwanted words
if word not in model.vocab:
continue
if word not in vocabulary:
vocabulary[word] = len(inverse_vocabulary)
q2n.append(len(inverse_vocabulary))
inverse_vocabulary.append(word)
else:
q2n.append(vocabulary[word])
# Replace questions as word to question as number representationindex, question, q2n
dataset.at[index, question]= q2n
embedding_dim = model.vector_size
embeddings = 1 * np.random.randn(len(vocabulary) + 1, embedding_dim) # This will be the embedding matrix
embeddings[0] = 0 # So that the padding will be ignored
# Build the embedding matrix
for word, index in vocabulary.items():
if word in model.vocab:
embeddings[index] = model.word_vec(word)
return embeddings, embedding_dim
# word2vec = gensim.models.KeyedVectors.load_word2vec_format('./models/enwiki_20180420_300d.txt')
word2vec = gensim.models.KeyedVectors.load_word2vec_format('./models/GoogleNews-vectors-negative300.bin', binary=True)
embeddings, embedding_dim = prepare_embeddings(model=word2vec, datasets=[df_train, df_test], question_cols=['sent_1', 'sent_2'])
# +
max_seq_length = max(df_train.sent_1.map(lambda x: len(x)).max(),
df_train.sent_2.map(lambda x: len(x)).max(),
df_test.sent_1.map(lambda x: len(x)).max(),
df_test.sent_2.map(lambda x: len(x)).max())
X_train = {'left': df_train.sent_1, 'right': df_train.sent_2}
X_test = {'left': df_test.sent_1, 'right': df_test.sent_2}
for dataset, side in itertools.product([X_train, X_test], ['left', 'right']):
dataset[side] = tf.keras.preprocessing.sequence.pad_sequences(dataset[side], maxlen=max_seq_length)
# +
def exponent_neg_manhattan_distance(left, right):
""" Helper function for the similarity estimate of the LSTMs outputs"""
return tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(left - right), axis=1, keepdims=True))
# The visible layer
left_input = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32')
right_input = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32')
embedding_layer = tf.keras.layers.Embedding(len(embeddings), embedding_dim, weights=[embeddings], input_length=max_seq_length,
trainable=False)
encoded_left = embedding_layer(left_input)
encoded_right = embedding_layer(right_input)
# Since this is a siamese network, both sides share the same LSTM
# shared_gru = tf.keras.layers.GRU(100, name='gru', recurrent_activation='sigmoid', reset_after=True,
# bias_initializer=tf.keras.initializers.Constant(2.5), dropout=0.0,
# kernel_regularizer=None, recurrent_dropout=0.0)
# Since this is a siamese network, both sides share the same LSTM
shared_gru = tf.keras.layers.Bidirectional(tf.keras.layers.GRU(100, name='gru', recurrent_activation='sigmoid', reset_after=True,
bias_initializer=tf.keras.initializers.Constant(2.5), dropout=0.0,
kernel_regularizer=None, recurrent_dropout=0.0))
shared_dense = tf.keras.layers.Dense(50, activation='relu')
dp = tf.keras.layers.Dropout(0.25)
left_output = shared_gru(encoded_left)
right_output = shared_gru(encoded_right)
# left_output_den = shared_dense(left_output)
# right_output_den = shared_dense(right_output)
# left_output_dp = dp(left_output_den)
# right_output_dp = dp(right_output_den)
# Calculates the distance as defined by the MaLSTM model
magru_distance = tf.keras.layers.Lambda(function=lambda x: exponent_neg_manhattan_distance(x[0], x[1]),
output_shape=lambda x: (x[0][0], 1))([left_output, right_output])
magru = tf.keras.Model([left_input, right_input], [magru_distance])
optimizer=tf.keras.optimizers.Adadelta(learning_rate=1, rho=0.985, clipvalue=2.0)
# optimizer = tf.keras.optimizers.Adam(learning_rate=0.01, clipvalue=1.5)
import tensorflow.keras.backend as K
def pear(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
xm, ym = x-mx, y-my
r_num = K.sum(tf.multiply(xm,ym))
r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = r_num / r_den
r = K.maximum(K.minimum(r, 1.0), -1.0)
return K.square(r)
magru.compile(loss='mean_squared_error', optimizer=optimizer)
# -
hist = magru.fit([X_train['left'], X_train['right']],
np.array(df_train['sim']),
epochs=500,
batch_size=64,
validation_data=([X_test['left'], X_test['right']], df_test['sim'])
)
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
preds = magru.predict([X_train['left'], X_train['right']])
print(f"Train: {pearsonr([x[0] for x in preds.tolist()], df_train['sim'])[0]}")
preds = magru.predict([X_test['left'], X_test['right']])
print(f"Test: {pearsonr([x[0] for x in preds.tolist()], df_test['sim'])[0]}")
0.8131902365183467
preds = magru.predict([X_test['left'], X_test['right']])
# +
b_idx = 0
e_idx = 4927
p_avg = list()
while e_idx < len(preds):
first = preds[b_idx]
second = preds[e_idx]
p_avg.append(np.mean([first, second]))
b_idx += 1
e_idx += 1
# -
pearsonr(p_avg, df_test_norm['sim'])
df_test_norm
df_test
[1, 2, 3, 4, 5][[1, 5]]
len(preds)
len(p_avg)
df_test_norm.shape
b_idx
e_idx
len(sts_test) * 7
len(df_test_aug)
b_idx
df_test_aug.iloc[b_idx:e_idx, :]
preds
sts_test['sim']
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss for SiameseNet on STSBenchmark')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(['Train', 'Test'])
plt.savefig('./results/sts_loss.png', dpi=400)
preds = magru.predict([X_test['left'], X_test['right']])
preds
sts_plot = pd.DataFrame({
'actual': sts_test['sim'].tolist(),
'preds': [x[0] for x in preds]
})
sts_plot.sort_values('actual')
plt.scatter(x=[x for x in range(len(sts_plot))],
y=sts_plot.sort_values('actual')['actual'])
plt.scatter(x=[x for x in range(len(sts_plot))],
y=sts_plot.sort_values('actual')['preds'])
plt.legend(['Actual', 'Predicted'])
plt.title('Predicted vs. Actual Similarity Scores for STSBenchmark')
plt.xlabel('Index')
plt.ylabel('Similarity')
plt.text(x=1100, y=0.1, s='p = 0.86', size=16)
plt.savefig('./results/stsbenchmark.png', dpi=400)
magru.save_weights('./models/siam/sts')
# +
file = open('./data/semeval.txt', 'r')
headers = file.readline().split('\t')
file = open('./data/semeval.txt', 'r')
data = list()
for line in file:
data.append(line.split('\t'))
data = data[1:]
data = pd.DataFrame(data, columns=headers)
data['relatedness_score'] = pd.to_numeric(data['relatedness_score'])
data = data.iloc[:len(data)-1,:]
data.head()
# +
file = open('./data/semeval_train.txt', 'r')
headers = file.readline().split('\t')
file = open('./data/semeval_train.txt', 'r')
data_train = list()
for line in file:
data_train.append(line.split('\t'))
data_train = data_train[1:]
data_train = pd.DataFrame(data_train, columns=headers)
data_train['relatedness_score'] = pd.to_numeric(data_train['relatedness_score'])
data_train.tail()
# -
data['normed_score'] = norm(data['relatedness_score'])
data_train['normed_score'] = norm(data_train['relatedness_score'])
embeddings, embedding_dim = prepare_embeddings(model=word2vec, datasets=[data_train, data], question_cols=['sentence_A', 'sentence_B'])
max_seq_length = max(data_train.sentence_A.map(lambda x: len(x)).max(),
data_train.sentence_B.map(lambda x: len(x)).max(),
data.sentence_A.map(lambda x: len(x)).max(),
data.sentence_B.map(lambda x: len(x)).max())
X_train = {'left': data_train.sentence_A, 'right': data_train.sentence_B}
X_test = {'left': data.sentence_A, 'right': data.sentence_B}
for dataset, side in itertools.product([X_train, X_test], ['left', 'right']):
dataset[side] = tf.keras.preprocessing.sequence.pad_sequences(dataset[side], maxlen=max_seq_length)
# +
def exponent_neg_manhattan_distance(left, right):
""" Helper function for the similarity estimate of the LSTMs outputs"""
return tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(left - right), axis=1, keepdims=True))
# The visible layer
left_input = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32')
right_input = tf.keras.layers.Input(shape=(max_seq_length,), dtype='int32')
embedding_layer = tf.keras.layers.Embedding(len(embeddings), embedding_dim, weights=[embeddings], input_length=max_seq_length,
trainable=False)
encoded_left = embedding_layer(left_input)
encoded_right = embedding_layer(right_input)
# Since this is a siamese network, both sides share the same LSTM
shared_gru = tf.keras.layers.GRU(100, name='gru', recurrent_activation='sigmoid', reset_after=True,
bias_initializer=tf.keras.initializers.Constant(2.5), dropout=0.0)
left_output = shared_gru(encoded_left)
right_output = shared_gru(encoded_right)
# Calculates the distance as defined by the MaLSTM model
magru_distance = tf.keras.layers.Lambda(function=lambda x: exponent_neg_manhattan_distance(x[0], x[1]),
output_shape=lambda x: (x[0][0], 1))([left_output, right_output])
magru = tf.keras.Model([left_input, right_input], [magru_distance])
optimizer=tf.keras.optimizers.Adadelta(learning_rate=1, rho=0.985, clipvalue=2.0)
import tensorflow.keras.backend as K
def pear(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
xm, ym = x-mx, y-my
r_num = K.sum(tf.multiply(xm,ym))
r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = r_num / r_den
r = K.maximum(K.minimum(r, 1.0), -1.0)
return K.square(r)
magru.compile(loss='mean_squared_error', optimizer=optimizer)
# -
hist = magru.fit([X_train['left'], X_train['right']],
np.array(data_train['normed_score']),
epochs=500,
batch_size=64,
validation_data=([X_test['left'], X_test['right']], data['normed_score']))
preds = magru.predict([X_train['left'], X_train['right']])
print(f"Train: {pearsonr([x[0] for x in preds.tolist()], data_train['normed_score'])[0]}")
preds = magru.predict([X_test['left'], X_test['right']])
print(f"Test: {pearsonr([x[0] for x in preds.tolist()], data['normed_score'])[0]}")
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss for SiameseNet on SICK')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend(['Train', 'Test'])
# plt.savefig('./results/sick_loss.png', dpi=400)
preds = magru.predict([X_test['left'], X_test['right']])
sts_plot = pd.DataFrame({
'actual': data['normed_score'].tolist(),
'preds': [x[0] for x in preds]
})
plt.scatter(x=[x for x in range(len(sts_plot))],
y=sts_plot.sort_values('actual')['preds'])
plt.scatter(x=[x for x in range(len(sts_plot))],
y=sts_plot.sort_values('actual')['actual'])
plt.legend(['Predicted', 'Actual'])
plt.title('Predicted vs. Actual Similarity Scores (SiameseNet)')
plt.xlabel('Index')
plt.ylabel('Similarity')
plt.text(x=2000, y=0.1, s='p = 0.834', size=16)
plt.savefig('./results/sick.png', dpi=400)
magru.save_weights('./models/siam/sick')
# +
a = [2.7654870801855078, 0.35995355443076027, 0.016221679989074141, -0.012664358453398751, 0.0036888812311235068]
b = [-6.2588482809118942, -0.88952297609194686, 0.017336984676103874, -0.0054928004763216964, 0.011122959185936367]
print(-(cosine(a,b) - 1))
# -
# #### Failed attempts
# +
"""NETWORK DEFINITION"""
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(256, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(2.5),
dropout=0.35)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhattan = lambda x: tf.keras.backend.exp(tf.keras.backend.abs(x[0] - x[1]))
# manhattan = lambda x: tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(x[0] - x[1])))
merged_mangattan = tf.keras.layers.Lambda(function=manhattan, output_shape=lambda x: x[0])([encoded_l, encoded_r])
prediction = tf.keras.layers.Dense(1, activation='linear')(merged_mangattan)
siamese_net = tf.keras.Model([left_input, right_input], prediction)
"""OPTIMIZER AND LOSS DEFINITION"""
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=1,
rho=0.9,
clipvalue=1.5),
loss='mse')
# +
def exponent_neg_manhattan_distance(left, right):
""" Helper function for the similarity estimate of the LSTMs outputs"""
return tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(left - right), axis=1, keepdims=True))
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(100, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(2.5),
dropout=0.35)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhat_dist = tf.keras.layers.Lambda(
function=lambda x: exponent_neg_manhattan_distance(x[0], x[1]),
output_shape=lambda x: (x[0][0], 1))([encoded_l, encoded_r]
)
siamese_net = tf.keras.Model([left_input, right_input], manhat_dist)
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=1,
rho=0.9,
clipnorm=1.25),
loss='mse')
# -
hist = siamese_net.fit([np.array(train_a), np.array(train_b)],
np.array(sts_train['normed_score']),
epochs=500,
batch_size=64,
validation_data=([np.array(test_a), np.array(test_b)], sts_test['normed_score']))
preds = siamese_net.predict([np.array(train_a), np.array(train_b)])
print(f"Train: {pearsonr([x[0] for x in preds.tolist()], sts_train['normed_score'])[0]}")
preds = siamese_net.predict([np.array(test_a), np.array(test_b)])
print(f"Test: {pearsonr([x[0] for x in preds.tolist()], sts_test['normed_score'])[0]}")
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
# #### Testing USE
# !pip install tensorflow-hub
import tensorflow_hub as hub
from scipy.spatial.distance import cosine
# Loading up the embedding layer...
use_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
use_model = hub.KerasLayer(use_url, trainable=False)
use_model(data_train['sentence_A']).numpy()
# 0 = perfect similarityt
cosine(use_model([data_train['sentence_A'][0]]).numpy(),
use_model([data_train['sentence_B'][0]]).numpy())
data_train['normed_score']
def cossim(x,y):
return -(cosine(x,y) - 1)
use_simil = [cossim(use_model([data['sentence_A'][x]]), use_model([data['sentence_B'][x]])) for x in range(len(data))]
pearsonr(norm(use_simil), data_train['normed_score'])
pearsonr([1 - use_simil[x] for x in range(len(use_simil))], data['normed_score'])
# +
sts_plot = pd.DataFrame({
'actual': data['normed_score'].tolist(),
'preds': [1 - use_simil[x] for x in range(len(use_simil))]
})
plt.scatter(x=[x for x in range(len(sts_plot))],
y=sts_plot.sort_values('actual')['preds'])
plt.scatter(x=[x for x in range(len(sts_plot))],
y=sts_plot.sort_values('actual')['actual'])
plt.legend(['Predicted', 'Actual'])
plt.title('Predicted vs. Actual Similarity Scores (USE)')
plt.xlabel('Index')
plt.ylabel('Similarity')
plt.text(x=2000, y=0.1, s='p = 0.776', size=16)
# plt.savefig('./results/sick_use.png', dpi=400)
# -
[1 - use_simil[x] for x in range(len(use_simil))]
len(data['normed_score'].tolist())
len(use_simil)
use_simil = [cossim(use_model([data['sentence_A'][x]]), use_model([data['sentence_B'][x]])) for x in range(len(data))]
pearsonr(use_simil, data['normed_score'])
a = np.array([np.random.randint(10) for _ in range(20)])
np.linalg.norm(a, 2)
np.dot(a,a) / ((np.dot(a, a)) ** (1/2) * (np.dot(a, a)) ** (1/2))
cosine(a,a)
1 - (np.dot(a, a) / (np.linalg.norm(a) * np.linalg.norm(a)))
cosine(a,a)
def norm(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
# +
df_test = pd.read_csv('./data/Stsbenchmark/test.csv')
def norm(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
df_test['normed_score'] = norm(df_test['score'])
df_test.head()
# -
np.unique(df_train['desc'])
np.unique(df_test['test'])
df_train['sentence_A'] = [x.lower() for x in df_train['sentence_A']]
df_train['sentence_B'] = [x.lower() for x in df_train['sentence_B']]
df_test['sentence_A'] = [x.lower() for x in df_test['sentence_A']]
df_test['sentence_B'] = [x.lower() for x in df_test['sentence_B']]
word2vec = gensim.models.KeyedVectors.load_word2vec_format('./models/enwiki_20180420_300d.txt')
df_train[df_train['desc'] == 'images'].reset_index()
df_train_images = df_train[df_train['desc'] == 'images'].reset_index()
df_test_images = df_test[df_test['test'] == 'images'].reset_index()
m = 0
for i in range(len(df_test_images)):
if len(df_test_images['sentence_A'][i].split(' ')) > m:
m = len(df_test_images['sentence_A'][i].split(' '))
m
m = 0
for i in range(len(df_test_images)):
if len(df_test_images['sentence_B'][i].split(' ')) > m:
m = len(df_test_images['sentence_B'][i].split(' '))
m
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_train_images)):
full_vec_a = list()
full_vec_b = list()
for token in df_train_images['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_train_images['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 27:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 27:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/train_a_images_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/train_b_images_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
"""TESTING DATA"""
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_test_images)):
full_vec_a = list()
full_vec_b = list()
for token in df_test_images['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_test_images['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 27:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 27:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
with open('./data/test_a_images_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_images_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# -
# ### Network
"""DATA DEFINITION"""
train_a = pickle.load(open('./data/train_a_images_w2v300.data', 'rb'))
train_b = pickle.load(open('./data/train_b_images_w2v300.data', 'rb'))
test_a = pickle.load(open('./data/test_a_images_w2v300.data', 'rb'))
test_b = pickle.load(open('./data/test_b_images_w2v300.data', 'rb'))
# +
"""NETWORK DEFINITION"""
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(50, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(0.5),
dropout=0.35)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhattan = lambda x: tf.keras.backend.abs(x[0] - x[1])
# manhattan = lambda x: tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(x[0] - x[1])))
merged_mangattan = tf.keras.layers.Lambda(function=manhattan, output_shape=lambda x: x[0])([encoded_l, encoded_r])
prediction = tf.keras.layers.Dense(1, activation='linear')(merged_mangattan)
siamese_net = tf.keras.Model([left_input, right_input], prediction)
"""OPTIMIZER AND LOSS DEFINITION"""
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=0.5,
rho=0.9,
clipvalue=1.5),
loss='mse')
# -
siamese_net.fit([np.array(train_a), np.array(train_b)],
np.array(df_train_images['normed_score']),
epochs=300,
batch_size=32)
preds = siamese_net.predict([np.array(train_a), np.array(train_b)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_train_images['normed_score'])[0]}")
print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], df_train_images['normed_score'])[0]}")
preds = siamese_net.predict([np.array(test_a), np.array(test_b)]) #53.7
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_test_images['normed_score'])[0]}")
print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], df_test_images['normed_score'])[0]}")
"""
50, glorot_uniform, 2.5, 0.35, Adadelta(0.5, 0.9, 1.5)
"""
siamese_net.save_weights('./models/siam/images/') # 0.7010318394360036
# ## Headlines (2015)
# #### preparing training data
file = open('./data/sts2015_headlines/train_input.txt', 'r')
data = list()
for line in file:
data.append(line.split('\t'))
len(data)
file = open('./data/sts2015_headlines/train_scores.txt', 'r')
labels = list()
for line in file:
labels.append(line)
for i in range(len(labels)):
if labels[i] == '\n':
labels[i] = None
if type(labels[i]) == str:
labels[i] = float(labels[i].replace('\n', ''))
df_train_headlines = pd.DataFrame(data, columns=['sentence_A', 'sentence_B'])
df_train_headlines['score'] = labels
df_train_headlines['normed_scre'] = norm(df_train_headlines['score'])
# #### preparing test data
file = open('./data/sts2015_headlines/test_all.txt')
test = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
test.append(a)
df_test_headlines = pd.DataFrame(test, columns=['scores', 'sentence_A', 'sentence_B'])
df_test_headlines = df_test_headlines.iloc[:750,:]
df_test_headlines['scores'] = [float(x) for x in df_test_headlines['scores']]
df_test_headlines['normed_scre'] = norm(df_test_headlines['scores'])
m = 0
for i in range(len(df_train_headlines)):
if len(df_train_headlines['sentence_A'][i].split(' ')) > m:
m = len(df_train_headlines['sentence_A'][i].split(' '))
m
m = 0
for i in range(len(df_test_headlines)):
if len(df_test_headlines['sentence_B'][i].split(' ')) > m:
m = len(df_test_headlines['sentence_B'][i].split(' '))
m
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_train_headlines)):
full_vec_a = list()
full_vec_b = list()
for token in df_train_headlines['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_train_headlines['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 35:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 35:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/train_a_headlines_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/train_b_headlines_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
"""TESTING DATA"""
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_test_headlines)):
full_vec_a = list()
full_vec_b = list()
for token in df_test_headlines['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_test_headlines['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 27:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 27:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
with open('./data/test_a_headlines_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_headlines_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# -
# #### Modeling
"""DATA DEFINITION"""
train_a = pickle.load(open('./data/train_a_headlines_w2v300.data', 'rb'))
train_b = pickle.load(open('./data/train_b_headlines_w2v300.data', 'rb'))
test_a = pickle.load(open('./data/test_a_headlines_w2v300.data', 'rb'))
test_b = pickle.load(open('./data/test_b_headlines_w2v300.data', 'rb'))
type(train_b[0])
# +
"""NETWORK DEFINITION"""
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(50, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(0.5),
dropout=0.35)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhattan = lambda x: tf.keras.backend.abs(x[0] - x[1])
# manhattan = lambda x: tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(x[0] - x[1])))
merged_mangattan = tf.keras.layers.Lambda(function=manhattan, output_shape=lambda x: x[0])([encoded_l, encoded_r])
prediction = tf.keras.layers.Dense(1, activation='linear')(merged_mangattan)
siamese_net = tf.keras.Model([left_input, right_input], prediction)
"""OPTIMIZER AND LOSS DEFINITION"""
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=0.5,
rho=0.9,
clipvalue=1.5),
loss='mse')
# -
siamese_net.fit([np.array(train_a), np.array(train_b)],
np.array(df_train_headlines['normed_scre']),
epochs=50,
batch_size=32)
preds = siamese_net.predict([np.array(train_a), np.array(train_b)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_train_headlines['normed_scre'])[0]}")
print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], df_train_headlines['normed_scre'])[0]}")
preds = siamese_net.predict([np.array(test_a), np.array(test_b)]) #53.7
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_test_headlines['normed_scre'])[0]}")
print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], df_test_headlines['normed_scre'])[0]}")
"""
50, glorot_uniform, 0.5, 0.35, Adadelta(0.5, 0.9, 1.5), 600
"""
siamese_net.save_weights('./models/siam/headlines/') # 0.8746802156351821
# #### All data
file = open('./data/semeval_2015_all_train.txt')
test = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
test.append(a)
df_train_all = pd.DataFrame(test, columns=['scores', 'sentence_A', 'sentence_B'])
df_train_all = df_train_all.iloc[:3000,:]
df_train_all['scores'] = [float(x) for x in df_train_all['scores']]
df_train_all['normed_scre'] = norm(df_train_all['scores'])
# +
file = open('./data/onwn_test.txt')
test = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
test.append(a)
df_test_own = pd.DataFrame(test, columns=['scores', 'sentence_A', 'sentence_B'])
df_test_own = df_test_own.iloc[:len(df_test_own)-1,:]
df_test_own['scores'] = [float(x) for x in df_test_own['scores']]
df_test_own['normed_scre'] = norm(df_test_own['scores'])
df_test_own.head()
# -
m = 0
for i in range(len(df_test_own)):
if len(df_test_own['sentence_A'][i].split(' ')) > m:
m = len(df_test_own['sentence_A'][i].split(' '))
m
m = 0
for i in range(len(df_test_own)):
if len(df_test_own['sentence_B'][i].split(' ')) > m:
m = len(df_test_own['sentence_B'][i].split(' '))
m
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_test_own)):
full_vec_a = list()
full_vec_b = list()
for token in df_test_own['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_test_own['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 21:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 21:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_own_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_own_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# -
sc = SpellChecker()
sc.correction('453')
# +
file = open('./data/forum_test.txt')
test = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '').replace('"', '') for x in a]
test.append(a)
df_test_forum = pd.DataFrame(test, columns=['scores', 'sentence_A', 'sentence_B'])
df_test_forum = df_test_forum.iloc[:len(df_test_forum)-1,:]
df_test_forum['scores'] = [float(x) for x in df_test_forum['scores']]
df_test_forum['normed_scre'] = norm(df_test_forum['scores'])
print(df_test_forum.head())
m = 0
for i in range(len(df_test_forum)):
if len(df_test_forum['sentence_A'][i].split(' ')) > m:
m = len(df_test_forum['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_test_forum)):
if len(df_test_forum['sentence_B'][i].split(' ')) > m:
m = len(df_test_forum['sentence_B'][i].split(' '))
print(m)
# -
correctedA = list()
correctedB = list()
for i in tqdm(range(len(df_test_forum))):
corA = ' '.join([sc.correction(x) for x in df_test_forum['sentence_A'][i].split(' ')])
corB = ' '.join([sc.correction(x) for x in df_test_forum['sentence_B'][i].split(' ')])
correctedA.append(corA)
correctedB.append(corB)
df_test_forum['sentence_A'][0]
df_test_forum['sentence_A'] = correctedA
df_test_forum['sentence_B'] = correctedB
df_test_forum['sentence_B']
# +
"""TRAINING DATA"""
# 57 is the longest sentence. 1so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_test_forum)):
full_vec_a = list()
full_vec_b = list()
for token in df_test_forum['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_test_forum['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 18:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 18:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_forum_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_forum_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
file = open('./data/news_test.txt')
test = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
test.append(a)
df_test_news = pd.DataFrame(test, columns=['scores', 'sentence_A', 'sentence_B'])
df_test_news = df_test_news.iloc[:len(df_test_news)-1,:]
df_test_news['scores'] = [float(x) for x in df_test_news['scores']]
df_test_news['normed_scre'] = norm(df_test_news['scores'])
print(df_test_news.head())
m = 0
for i in range(len(df_test_news)):
if len(df_test_news['sentence_A'][i].split(' ')) > m:
m = len(df_test_news['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_test_news)):
if len(df_test_news['sentence_B'][i].split(' ')) > m:
m = len(df_test_news['sentence_B'][i].split(' '))
print(m)
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_test_news)):
full_vec_a = list()
full_vec_b = list()
for token in df_test_news['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_test_news['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 57:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 57:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_news_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_news_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
file = open('./data/images_test.txt')
test = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
test.append(a)
df_test_images = pd.DataFrame(test, columns=['scores', 'sentence_A', 'sentence_B'])
df_test_images = df_test_images.iloc[:len(df_test_images)-1,:]
df_test_images['scores'] = [float(x) for x in df_test_images['scores']]
df_test_images['normed_scre'] = norm(df_test_images['scores'])
print(df_test_images.head())
m = 0
for i in range(len(df_test_images)):
if len(df_test_images['sentence_A'][i].split(' ')) > m:
m = len(df_test_images['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_test_images)):
if len(df_test_images['sentence_B'][i].split(' ')) > m:
m = len(df_test_images['sentence_B'][i].split(' '))
print(m)
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_test_images)):
full_vec_a = list()
full_vec_b = list()
for token in df_test_images['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_test_images['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 27:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 27:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_images_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_images_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
file = open('./data/headlines_2013_test.txt')
test = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '').replace('#', '') for x in a]
test.append(a)
df_test_headlines = pd.DataFrame(test, columns=['scores', 'sentence_A', 'sentence_B'])
df_test_headlines = df_test_headlines.iloc[:len(df_test_headlines)-1,:]
df_test_headlines['scores'] = [float(x) for x in df_test_headlines['scores']]
df_test_headlines['normed_scre'] = norm(df_test_headlines['scores'])
print(df_test_fnwn.head())
m = 0
for i in range(len(df_test_headlines)):
if len(df_test_headlines['sentence_A'][i].split(' ')) > m:
m = len(df_test_headlines['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_test_headlines)):
if len(df_test_headlines['sentence_B'][i].split(' ')) > m:
m = len(df_test_headlines['sentence_B'][i].split(' '))
print(m)
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_test_headlines)):
full_vec_a = list()
full_vec_b = list()
for token in df_test_headlines['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_test_headlines['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 22:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 22:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_headlines_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_headlines_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# -
# ##### Modeling
# +
"""DATA DEFINITION"""
train_a = pickle.load(open('./data/train_a_all_w2v300.data', 'rb'))
train_b = pickle.load(open('./data/train_b_all_w2v300.data', 'rb'))
test_a_head = pickle.load(open('./data/test_a_headlines_w2v300.data', 'rb'))
test_b_head = pickle.load(open('./data/test_b_headlines_w2v300.data', 'rb'))
test_a_own = pickle.load(open('./data/test_a_own_w2v300.data', 'rb'))
test_b_own = pickle.load(open('./data/test_b_own_w2v300.data', 'rb'))
test_a_forum = pickle.load(open('./data/test_a_forum_w2v300.data', 'rb'))
test_b_forum = pickle.load(open('./data/test_b_forum_w2v300.data', 'rb'))
test_a_news = pickle.load(open('./data/test_a_news_w2v300.data', 'rb'))
test_b_news = pickle.load(open('./data/test_b_news_w2v300.data', 'rb'))
test_a_images = pickle.load(open('./data/test_a_images_w2v300.data', 'rb'))
test_b_images = pickle.load(open('./data/test_b_images_w2v300.data', 'rb'))
test_a_tweets = pickle.load(open('./data/test_a_tweets_w2v300.data', 'rb'))
test_b_tweets = pickle.load(open('./data/test_b_tweets_w2v300.data', 'rb'))
test_a_fnwn = pickle.load(open('./data/test_a_fnwn_w2v300.data', 'rb'))
test_b_fnwn = pickle.load(open('./data/test_b_fnwn_w2v300.data', 'rb'))
test_a_own = pickle.load(open('./data/test_a_own_w2v300.data', 'rb'))
test_b_own = pickle.load(open('./data/test_b_own_w2v300.data', 'rb'))
test_a_head = pickle.load(open('./data/test_a_headlines_w2v300.data', 'rb'))
test_b_head = pickle.load(open('./data/test_b_headlines_w2v300.data', 'rb'))
# +
"""DATA DEFINITION"""
del train_a
del train_b
del test_a_head
del test_b_head
del test_a_own
del test_b_own
del test_a_forum
del test_b_forum
del test_a_news
del test_b_news
del test_a_images
del test_b_images
del test_a_tweets
del test_b_tweets
del test_a_fnwn
del test_b_fnwn
# +
"""NETWORK DEFINITION"""
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(50, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(0.5),
dropout=0.4)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhattan = lambda x: tf.keras.backend.abs(x[0] - x[1])
# manhattan = lambda x: tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(x[0] - x[1])))
merged_mangattan = tf.keras.layers.Lambda(function=manhattan, output_shape=lambda x: x[0])([encoded_l, encoded_r])
prediction = tf.keras.layers.Dense(1, activation='linear')(merged_mangattan)
siamese_net = tf.keras.Model([left_input, right_input], prediction)
"""OPTIMIZER AND LOSS DEFINITION"""
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=1,
rho=0.95,
clipvalue=3),
loss='mse')
# -
siamese_net.fit([np.array(train_a), np.array(train_b)],
np.array(df_train_all['normed_scre']),
epochs=100,
batch_size=32)
def evaluate():
results = dict()
preds = siamese_net.predict([np.array(train_a), np.array(train_b)])
results['train'] = pearsonr([x[0] for x in preds.tolist()], df_train_all['normed_scre'])[0]
preds = siamese_net.predict([np.array(test_a_head), np.array(test_b_head)])
results['headlines'] = pearsonr([x[0] for x in preds.tolist()], df_test_headlines['normed_scre'])[0]
preds = siamese_net.predict([np.array(test_a_own), np.array(test_b_own)])
results['own'] = pearsonr([x[0] for x in preds.tolist()], df_test_own['normed_scre'])[0]
preds = siamese_net.predict([np.array(test_a_forum), np.array(test_b_forum)])
results['forum'] = pearsonr([x[0] for x in preds.tolist()], df_test_forum['normed_scre'])[0]
preds = siamese_net.predict([np.array(test_a_news), np.array(test_b_news)])
results['news'] = pearsonr([x[0] for x in preds.tolist()], df_test_news['normed_scre'])[0]
preds = siamese_net.predict([np.array(test_a_images), np.array(test_b_images)])
results['images'] = pearsonr([x[0] for x in preds.tolist()], df_test_images['normed_scre'])[0]
preds = siamese_net.predict([np.array(test_a_tweets), np.array(test_b_tweets)])
results['tweets'] = pearsonr([x[0] for x in preds.tolist()], df_test_tweets['normed_scre'])[0]
avg = np.mean([results['headlines'],
results['own'],
results['forum'],
results['news'],
results['images'],
#results['tweets']
])
avg2 = np.mean([results['headlines'],
results['own'],
results['forum'],
results['news'],
results['images'],
results['tweets']
])
results['mean_all'] = avg2
results['mean_notweet'] = avg
return results
# ### 2014 CLASS RESULTS
r = evaluate()
r
"""
50, glorot_uniform, 0.5, 0.35, Adadelta(1, 0.9, 2), 800
"""
siamese_net.save_weights('./models/siam/all/') # 0.753/0.882
siamese_net.load_weights('./models/siam/all/')
preds = siamese_net.predict([np.array(train_a), np.array(train_b)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_train_all['normed_scre'])[0]}")
print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], df_train_all['normed_scre'])[0]}")
preds = siamese_net.predict([np.array(test_a_head), np.array(test_b_head)]) #53.7
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_test_headlines['normed_scre'])[0]}")
print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], df_test_headlines['normed_scre'])[0]}")
# preds = siamese_net.predict([np.array(test_a_own), np.array(test_b_own)]) #53.7
# print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_test_own['normed_scre'])[0]}")
# print(f"Spearmans: {spearmanr([x[0] for x in preds.tolist()], df_test_own['normed_scre'])[0]}")
# ### MRPC
# +
file = open('./data/mrpc/train.txt')
header = file.readline().split('\t')
header[3] = 'sentence_A'
header[4] = 'sentence_B'
file = open('./data/mrpc/train.txt')
data = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
df_mrpc_train = pd.DataFrame(data, columns=header)
df_mrpc_train = df_mrpc_train.iloc[1:len(df_mrpc_train)-1,:]
df_mrpc_train = df_mrpc_train.reset_index()
df_mrpc_train['Quality'] = [int(x) for x in df_mrpc_train['Quality']]
# df_mrpc_train = df_mrpc_train.drop(df_mrpc_train.index[bad_idxs])
# df_mrpc_train = df_mrpc_train.reset_index()
# print(df_mrpc_train.head())
m = 0
for i in range(len(df_mrpc_train)):
if len(df_mrpc_train['sentence_A'][i].split(' ')) > m:
m = len(df_mrpc_train['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_mrpc_train)):
if len(df_mrpc_train['sentence_B'][i].split(' ')) > m:
m = len(df_mrpc_train['sentence_B'][i].split(' '))
print(m)
# -
df_mrpc_train.tail()
df_mrpc_train['sentence_A'] = [x.lower() for x in df_mrpc_train['sentence_A']]
df_mrpc_train['sentence_B'] = [x.lower() for x in df_mrpc_train['sentence_B']]
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_mrpc_train)):
full_vec_a = list()
full_vec_b = list()
for token in df_mrpc_train['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_mrpc_train['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 31:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 31:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/train_a_mrpc_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/train_b_mrpc_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
file = open('./data/mrpc/test.txt')
header = file.readline().split('\t')
header[3] = 'sentence_A'
header[4] = 'sentence_B'
file = open('./data/mrpc/test.txt')
data = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
df_mrpc_test= pd.DataFrame(data, columns=header)
df_mrpc_test = df_mrpc_test.iloc[1:len(df_mrpc_test)-1,:]
df_mrpc_test = df_mrpc_test.reset_index()
df_mrpc_test['Quality'] = [int(x) for x in df_mrpc_test['Quality']]
# df_mrpc_test = df_mrpc_test.drop(df_mrpc_test.index[bad_idxs])
# df_mrpc_test = df_mrpc_test.reset_index()
# print(df_mrpc_test.head())
m = 0
for i in range(len(df_mrpc_test)):
if len(df_mrpc_test['sentence_A'][i].split(' ')) > m:
m = len(df_mrpc_test['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_mrpc_test)):
if len(df_mrpc_test['sentence_B'][i].split(' ')) > m:
m = len(df_mrpc_test['sentence_B'][i].split(' '))
print(m)
# -
df_mrpc_test['sentence_A'] = [x.lower() for x in df_mrpc_test['sentence_A']]
df_mrpc_test['sentence_B'] = [x.lower() for x in df_mrpc_test['sentence_B']]
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_mrpc_test)):
full_vec_a = list()
full_vec_b = list()
for token in df_mrpc_test['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_mrpc_test['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 30:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 30:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_mrpc_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_mrpc_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# -
del all_vec_a
del all_vec_b
# +
train_mrpc_a = pickle.load(open('./data/train_a_mrpc_w2v300.data', 'rb'))
train_mrpc_b = pickle.load(open('./data/train_b_mrpc_w2v300.data', 'rb'))
test_mrpc_a = pickle.load(open('./data/test_a_mrpc_w2v300.data', 'rb'))
test_mrpc_b = pickle.load(open('./data/test_b_mrpc_w2v300.data', 'rb'))
# +
"""NETWORK DEFINITION"""
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(50, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(2.5),
dropout=0.3)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhattan = lambda x: tf.keras.backend.abs(x[0] - x[1])
# manhattan = lambda x: tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(x[0] - x[1])))
merged_mangattan = tf.keras.layers.Lambda(function=manhattan, output_shape=lambda x: x[0])([encoded_l, encoded_r])
prediction = tf.keras.layers.Dense(1, activation='sigmoid')(merged_mangattan)
siamese_net = tf.keras.Model([left_input, right_input], prediction)
"""OPTIMIZER AND LOSS DEFINITION"""
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=1,
rho=0.9,
clipvalue=1.5),
loss='binary_crossentropy')
# -
siamese_net.fit([np.array(train_mrpc_a), np.array(train_mrpc_b)],
np.array(df_mrpc_train['Quality']),
epochs=200,
batch_size=32)
preds = siamese_net.predict([np.array(train_mrpc_a), np.array(train_mrpc_b)])
preds = [round(x[0]) for x in preds]
print(f'Train acc: {np.sum([preds[x] == df_mrpc_train["Quality"].tolist()[x] for x in range(len(preds))]) / len(preds)}')
preds = siamese_net.predict([np.array(test_mrpc_a), np.array(test_mrpc_b)])
preds = [abs(round(x[0])) for x in preds]
print(f'Test acc: {np.sum([preds[x] == df_mrpc_test["Quality"].tolist()[x] for x in range(len(preds))]) / len(preds)}')
# ### SemEval 2012
# +
file = open('./data/2013/train/STS.input.MSRvid.txt')
file.readline()
data = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
df_msrvid_train = pd.DataFrame(data, columns=['sentence_A', 'sentence_B'])
file = open('./data/2013/train/STS.gs.MSRvid.txt')
file.readline()
data = list()
for line in file:
a = line.replace('\n', '')
data.append(a)
df_msrvid_train['score'] = [float(x) for x in data]
df_msrvid_train['normed_score'] = norm(df_msrvid_train['score'])
print(df_msrvid_train.head())
print(df_msrvid_train.tail())
m = 0
for i in range(len(df_msrvid_train)):
if len(df_msrvid_train['sentence_A'][i].split(' ')) > m:
m = len(df_msrvid_train['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_msrvid_train)):
if len(df_msrvid_train['sentence_B'][i].split(' ')) > m:
m = len(df_msrvid_train['sentence_B'][i].split(' '))
print(m)
df_msrvid_train['sentence_A'] = [x.lower() for x in df_msrvid_train['sentence_A']]
df_msrvid_train['sentence_B'] = [x.lower() for x in df_msrvid_train['sentence_B']]
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_msrvid_train)):
full_vec_a = list()
full_vec_b = list()
for token in df_msrvid_train['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_msrvid_train['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 24:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 24:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/train_a_msrvid_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/train_b_msrvid_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
file = open('./data/2013/test-gold/STS.input.MSRvid.txt')
file.readline()
data = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
df_msrvid_test = pd.DataFrame(data, columns=['sentence_A', 'sentence_B'])
file = open('./data/2013/test-gold/STS.gs.MSRvid.txt')
file.readline()
data = list()
for line in file:
a = line.replace('\n', '')
data.append(a)
df_msrvid_test['score'] = [float(x) for x in data]
df_msrvid_test['normed_score'] = norm(df_msrvid_test['score'])
print(df_msrvid_test.head())
print(df_msrvid_test.tail())
m = 0
for i in range(len(df_msrvid_test)):
if len(df_msrvid_test['sentence_A'][i].split(' ')) > m:
m = len(df_msrvid_test['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_msrvid_test)):
if len(df_msrvid_test['sentence_B'][i].split(' ')) > m:
m = len(df_msrvid_test['sentence_B'][i].split(' '))
print(m)
df_msrvid_test['sentence_A'] = [x.lower() for x in df_msrvid_test['sentence_A']]
df_msrvid_test['sentence_B'] = [x.lower() for x in df_msrvid_test['sentence_B']]
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_msrvid_test)):
full_vec_a = list()
full_vec_b = list()
for token in df_msrvid_test['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_msrvid_test['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 18:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 18:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_msrvid_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_msrvid_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
train_mrpc_a = pickle.load(open('./data/train_a_msrvid_w2v300.data', 'rb'))
train_mrpc_b = pickle.load(open('./data/train_b_msrvid_w2v300.data', 'rb'))
test_mrpc_a = pickle.load(open('./data/test_a_msrvid_w2v300.data', 'rb'))
test_mrpc_b = pickle.load(open('./data/test_b_msrvid_w2v300.data', 'rb'))
# +
"""NETWORK DEFINITION"""
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(50, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(2.5),
dropout=0.4)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhattan = lambda x: tf.keras.backend.abs(x[0] - x[1])
# manhattan = lambda x: tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(x[0] - x[1])))
merged_mangattan = tf.keras.layers.Lambda(function=manhattan, output_shape=lambda x: x[0])([encoded_l, encoded_r])
prediction = tf.keras.layers.Dense(1, activation='linear')(merged_mangattan)
siamese_net = tf.keras.Model([left_input, right_input], prediction)
"""OPTIMIZER AND LOSS DEFINITION"""
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=1,
rho=0.9,
clipvalue=2.5),
loss='mse')
# -
siamese_net.fit([np.array(train_mrpc_a), np.array(train_mrpc_b)],
np.array(df_msrvid_train['normed_score']),
epochs=300,
batch_size=32)
preds = siamese_net.predict([np.array(train_mrpc_a), np.array(train_mrpc_b)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_msrvid_train['normed_score'])[0]}")
preds = siamese_net.predict([np.array(test_mrpc_a), np.array(test_mrpc_b)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_msrvid_test['normed_score'])[0]}")
# ### SemEval 2013
# +
file = open('./data/2013/onwn_test.txt')
data = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
df_msrvid_test = pd.DataFrame(data, columns=['sentence_A', 'sentence_B'])
file = open('./data/2013/onwn_gs.txt')
data = list()
for line in file:
a = line.replace('\n', '')
data.append(a)
df_msrvid_test['score'] = [float(x) for x in data]
df_msrvid_test['normed_score'] = norm(df_msrvid_test['score'])
print(df_msrvid_test.head())
print(df_msrvid_test.tail())
m = 0
for i in range(len(df_msrvid_test)):
if len(df_msrvid_test['sentence_A'][i].split(' ')) > m:
m = len(df_msrvid_test['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_msrvid_test)):
if len(df_msrvid_test['sentence_B'][i].split(' ')) > m:
m = len(df_msrvid_test['sentence_B'][i].split(' '))
print(m)
df_msrvid_test['sentence_A'] = [x.lower() for x in df_msrvid_test['sentence_A']]
df_msrvid_test['sentence_B'] = [x.lower() for x in df_msrvid_test['sentence_B']]
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_msrvid_test)):
full_vec_a = list()
full_vec_b = list()
for token in df_msrvid_test['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_msrvid_test['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 22:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 22:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/test_a_on_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/test_b_on_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# +
file = open('./data/2013/onwn_train.txt')
data = list()
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
data.pop(len(data)-1)
file = open('./data/2013/msrpar_train.txt')
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
data.pop(len(data)-1)
file = open('./data/2013/msrpar_train2.txt')
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
data.pop(len(data)-1)
file = open('./data/2013/europarl_train.txt')
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
data.pop(len(data)-1)
file = open('./data/2013/europarl_train2.txt')
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
data.pop(len(data)-1)
file = open('./data/2013/news_train.txt')
for line in file:
a = line.split('\t')
a = [x.replace('\n', '') for x in a]
data.append(a)
data.pop(len(data)-1)
df_onwn_train = pd.DataFrame(data, columns=['score', 'sentence_A', 'sentence_B'])
df_onwn_train = df_onwn_train.iloc[:len(df_onwn_train)-1, :]
df_onwn_train['score'] = [float(x) for x in df_onwn_train['score']]
df_onwn_train['normed_score'] = norm(df_onwn_train['score'])
print(df_onwn_train.head())
print(df_onwn_train.tail())
m = 0
for i in range(len(df_onwn_train)):
if len(df_onwn_train['sentence_A'][i].split(' ')) > m:
m = len(df_onwn_train['sentence_A'][i].split(' '))
print(m)
m = 0
for i in range(len(df_onwn_train)):
if len(df_onwn_train['sentence_B'][i].split(' ')) > m:
m = len(df_onwn_train['sentence_B'][i].split(' '))
print(m)
# +
"""TRAINING DATA"""
# 57 is the longest sentence. so let's pad all with [0,..., 0] until len()==57
all_vec_a = list()
all_vec_b = list()
for i in range(len(df_onwn_train)):
full_vec_a = list()
full_vec_b = list()
for token in df_onwn_train['sentence_A'][i].split(' '):
try:
full_vec_a.append(word2vec[token].tolist())
except:
continue
for token in df_onwn_train['sentence_B'][i].split(' '):
try:
full_vec_b.append(word2vec[token].tolist())
except:
continue
# Padding... we're using 100
while len(full_vec_a) < 72:
full_vec_a.append(np.zeros(300))
while len(full_vec_b) < 72:
full_vec_b.append(np.zeros(300))
all_vec_a.append(np.array(full_vec_a))
all_vec_b.append(np.array(full_vec_b))
# Now we need to ensure that each
with open('./data/train_a_on_w2v300.data', 'wb') as f:
pickle.dump(all_vec_a, f)
with open('./data/train_b_on_w2v300.data', 'wb') as f:
pickle.dump(all_vec_b, f)
# -
del all_vec_a
del all_vec_b
train_a_on = pickle.load(open('./data/train_a_on_w2v300.data', 'rb'))
train_b_on = pickle.load(open('./data/train_b_on_w2v300.data', 'rb'))
test_a_on = pickle.load(open('./data/test_a_on_w2v300.data', 'rb'))
test_b_on = pickle.load(open('./data/test_b_on_w2v300.data', 'rb'))
# +
"""NETWORK DEFINITION"""
input_shape = (None, 300,)
left_input = tf.keras.layers.Input(input_shape)
right_input = tf.keras.layers.Input(input_shape)
siam = tf.keras.Sequential([
# tf.keras.layers.LSTM(50, kernel_initializer='glorot_normal',
# recurrent_initializer='glorot_normal',
# #bias_initializer=tf.keras.initializers.Constant(2.5),
# dropout=0.1)
tf.keras.layers.GRU(50, kernel_initializer='glorot_uniform',
bias_initializer=tf.keras.initializers.Constant(2.5),
dropout=0.35)
])
encoded_l = siam(left_input)
encoded_r = siam(right_input)
manhattan = lambda x: tf.keras.backend.abs(x[0] - x[1])
# manhattan = lambda x: tf.keras.backend.exp(-tf.keras.backend.sum(tf.keras.backend.abs(x[0] - x[1])))
merged_mangattan = tf.keras.layers.Lambda(function=manhattan, output_shape=lambda x: x[0])([encoded_l, encoded_r])
prediction = tf.keras.layers.Dense(1, activation='linear')(merged_mangattan)
siamese_net = tf.keras.Model([left_input, right_input], prediction)
"""OPTIMIZER AND LOSS DEFINITION"""
siamese_net.compile(optimizer=tf.keras.optimizers.Adadelta(learning_rate=1,
rho=0.9,
clipvalue=2.5),
loss='mse')
# -
siamese_net.fit([np.array(train_a_on), np.array(train_b_on)],
np.array(df_onwn_train['normed_score']),
epochs=1000,
batch_size=64)
preds = siamese_net.predict([np.array(train_a_on), np.array(train_b_on)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_onwn_train['normed_score'])[0]}")
preds = siamese_net.predict([np.array(test_a_on), np.array(test_b_on)])
print(f"Pearsons: {pearsonr([x[0] for x in preds.tolist()], df_msrvid_test['normed_score'])[0]}")
0.3305256639303634
df_msrvid_test['normed_score']
| week6/siamese_networds.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Mathematics/FractionMultiplication/fraction-multiplication.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
import uiButtons
# %uiButtons
# # Visualizing Fraction Multiplication
# ### Introduction
# An important skill to have when it comes to fractions is knowing how to multiply them together.<br>
#
# As we know, fractions are of the form $\frac{a}{b}$ with $a$ and $b$ integers and $b\neq 0$. <br>
#
# You can think of $\frac{a}{b}$ as the number you get when you do $a\div b$. <br>
# If we think of a fraction as a division problem then it makes sense that it works well with multiplication.<br>
# Unlike addition, multiplying fractions is easy and straightforward. <br>
#
# In this notebook we will look into two forms of fraction multiplication:
# - multiplying two fractions together $\bigg($e.g. $\dfrac{4}{7} \times \dfrac{2}{3}\bigg)$
# - multiplying a fraction by an integer $\bigg($e.g. $\dfrac{4}{7} \times 3\bigg)$
# ### Procedure
# As mentioned earlier, multiplying two fractions together is simple.<br>
# Let's say we want to multiply the fractions $\dfrac{4}{7}$ and $\dfrac{2}{3}$.<br>
# All we have to do is multiply the numerators (top numbers) together, then multiply the denominators (bottom numbers) together. Let's take a look:
#
# $$\frac{4}{7} \times \frac{2}{3}=\frac{4\times 2}{7\times 3}=\frac{8}{21}$$ <br>
#
# Let's try another example. Take the fractions $\dfrac{3}{5}$ and $\dfrac{2}{3}$. To multiply them we multiply the numerators together and the denominators together:
#
# $$\frac{3\times 2}{5\times 3}=\frac{6}{15}$$
#
# In this example, you might notice that the result is not in lowest terms: both 6 and 15 are divisible by 3, so we get $\dfrac{6}{15} = \dfrac25$. In a later notebook, we'll focus on mechanics like this. For now, we want to focus on a visual understanding of the problem.
#
# Now that we know how to multiply two fractions, let's think about what it actually means.<br>
# Recall that a fraction simply represents a part of something. We can think of multiplying fractions together as taking a part of another part. In other words $\dfrac{1}{2}\times\dfrac{1}{2}$ is like saying $\dfrac{1}{2}$ of $\dfrac{1}{2}$ (one half **of** one half). If we have $\dfrac{1}{2}$ of a pizza and we want $\dfrac{1}{2}$ of that half what do we end up with?<br>
#
# <img src="./images/pizza.png" width="400px">
#
# We get $\dfrac{1}{4}$ because $\dfrac{1}{2}\times\dfrac{1}{2}=\dfrac{1}{4}$.<br>
#
# Watch the video below to help us further visualize this concept.
# + language="html"
# <div align="middle">
# <iframe id="vid1" width="640" height="360" src="https://www.youtube.com/embed/hr_mTd-oJ-M" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
# <p><a href="https://www.youtube.com/channel/UC4a-Gbdw7vOaccHmFo40b9g" target="_blank">Click here</a> for more videos by Khan Academy</p>
# </div>
# <script>
# $(function() {
# var reachable = false;
# var myFrame = $('#vid1');
# var videoSrc = myFrame.attr("src");
# myFrame.attr("src", videoSrc)
# .on('load', function(){reachable = true;});
# setTimeout(function() {
# if(!reachable) {
# var ifrm = myFrame[0];
# ifrm = (ifrm.contentWindow) ? ifrm.contentWindow : (ifrm.contentDocument.document) ? ifrm.contentDocument.document : ifrm.contentDocument;
# ifrm.document.open();
# ifrm.document.write('If the video does not start click <a href="' + videoSrc + '" target="_blank">here</a>');
# ifrm.document.close();
# }
# }, 2000)
# });
# </script>
# -
# ### Interactive visualization
#
# The widget below allows you to visualize fraction multiplication as shown in the video. To begin, enter a fraction in the boxes below.
# + language="html"
# <script src="./d3/d3.min.js"></script>
# <!-- <script src="https://d3js.org/d3.v3.min.js"></script> -->
# <script type="text/x-mathjax-config">
# MathJax.Hub.Config({
# tex2jax: {inlineMath: [['$','$'], ['\\(','\\)']]}
# });
# </script>
# <script src="https://code.jquery.com/jquery-1.10.2.js"></script>
# <style>
# .fractionInput {
# max-width: 40px;
# }
#
# .fractionBar {
# width: 40px;
# height: 3px;
# background-color: #000000;
# }
#
# .ingredientsInput {
# margin-left: 10px;
# margin-right: 10px;
# max-width: 40px;
# /* float: right; */
# }
#
# #speech {
# margin: 50px;
# font-size: 150%;
# }
#
# li {
# margin-bottom: 15px;
# }
# </style>
# + language="html"
# <div class="fractionInputs" style="margin:20px">
# <h1 id="leftInputFractionText" style="float: left; display: none"></h1>
# <div id="opperandInput" style="float: left; display: block">
# <input type="text" class="fractionInput form-control form-control-sm" id="oppNumerator" placeholder="0" style="margin-bottom: -10px;">
# <hr align="left" class="fractionBar">
# <input type="text" class="fractionInput form-control form-control-sm" id="oppDenominator" placeholder="1" style="margin-top: -10px;">
# </div>
# <button type="button" id="continueBtn" class="btn btn-primary buttons" style="margin: 30px">Continue</button>
#
# </div>
#
# <div class="canvasDiv" style="clear: left">
# <svg height="500" width="500" viewbox="0 0 500 500" mlns="http://www.w3.org/2000/svg" id="mainCanvas" style="float: left">
# <rect id="mainBox" height="480" width="480" x="10" y="10" style="outline: solid #000000 3px; fill:#ffffff"></rect>
# <rect id="leftOpperand" height="480" width="0" x="10" y="10"></rect>
# <rect id="rightOpperand" height="0" width="480" x="10" y="10"></rect>
# </svg>
# </div>
# <div>
# <p id="speech">Enter a fraction inside the boxes provided then click continue.</p>
# </div>
#
# <div style="clear: left; margin-left: 10px">
# <button type="button" id="resetFractionBoxBtn" class="btn btn-primary buttons">Reset</button>
# </div>
#
# -
# ### Multiplying a fraction by an integer
# In this section we will talk about multiplying a fraction like $\dfrac{4}{7}$, with an integer such as $3$. A good example of when this could be useful is when you need to double a recipe. <br>
#
# Doing multiplication of this form is simply a special case of multiplying two fractions together since any integer, such as $3$ in this case, can be rewritten as $\dfrac{3}{1}$. On a calculator, try inputting any number divided by $1$, and you will always get back the original number. <br>
#
# Let's demonstrate this with an example. To multiply the fraction $\dfrac{4}{7}$ and the integer $3$, remember that we can write $3$ as $\dfrac31$. We get
#
# $$\frac{4}{7}\times\frac{3}{1} = \frac{4\times 3}{7\times 1}= \frac{12}{7} $$
#
# *Note that $\dfrac{3}{1}$ is an improper fraction. Improper fractions follow all the same rules for multiplication as proper fractions.*
#
# The big take away from this is that the denominator does not change as it is simply multiplied by $1$. This means we did not change the _"whole"_, we only changed how many parts of the _"whole"_ we have (the numerator). In effect all we did was triple our fraction, since our constant was 3. <br>
#
# Let's practice what we just learned with a recipe example. Below you will see the ingredient list for the famous **Fresh Tomato and Basil Pasta Salad** recipe. This recipe makes enough for 4 servings, but we would like to double the recipe in order to serve 8 people. Apply what we have learned so far to double the ingredients list for the **tomato and basil pasta salad** in order to make 8 servings.
#
# (Enter your answer in the provided boxes. Fractions should be written using the _forward slash_ key "/" eg. 5/8. When your done click _check answer_ to see if you are correct!)
# + language="html"
# <div class="ingredientsList">
# <h1>Fresh Tomato and Basil Pasta Salad</h1>
# <img src="./images/pastaSalad.jpg" width=250 style="float: left; margin-right: 50px; box-shadow: 5px 6px 25px 3px grey">
#
# <ul style="max-width: 700px; margin-bottom">
# <li><label>3 medium ripe tomatoes, chopped --></label><input id="tomatoes" class="ingredientsInput"></input><label>tomatoes</label></li>
# <li><label>1/3 cup thinly sliced fresh basil --></label><input id="basil" class="ingredientsInput"></input><label>cup</label></li>
# <li><label>2 Tbsp. olive oil --></label><input id="olivOil" class="ingredientsInput"></input><label>Tbsp.</label></li>
# <li><label>1 clove garlic, minced --></label><input id="garlic" class="ingredientsInput"></input><label>clove</label></li>
# <li><label>1/2 tsp. salt --></label><input id="salt" class="ingredientsInput"></input><label>tsp.</label></li>
# <li><label>1/4 tsp. pepper --></label><input id="pepper" class="ingredientsInput"></input><label>tsp.</label></li>
# <li><label>8 oz. rotini pasta pasta, uncooked --></label><input id="pasta" class="ingredientsInput"></input><label>oz.</label></li>
# <li><label>3/4 cup Parmesan Style Grated Topping --></label><input id="parmesan" class="ingredientsInput"></input><label>cup</label></li>
# </ul>
# <button type="button" id="checkAnswerBtn">Check Answers</button>
# <button type="button" id="resetBtn">Reset</button>
# </div>
# <div>
# <h2 id="answerStatus"></h2>
# </div>
# -
# ### Conclusion
# Throughout this notebook we looked at how easy multiplying fractions together really is. We also looked at how to work with a fraction multiplied by a constant. Lets recap what we have learned:
#
# - When multiplying two fractions together we multiply the numerators together and the denominators together: $\dfrac{a}{b}\times\dfrac{c}{d}=\dfrac{a \times c}{b \times d} = \dfrac{ac}{bd}$
#
# - A constant can always be rewritten as the constant over 1: $c = \dfrac{c}{1}$
#
# - Multiplying a fraction with a constant, multiply the numerator by the constant and keep the denominator the same: $\dfrac{a}{b}\times c=\dfrac{a\times c}{b}=\dfrac{ac}{b}$
#
# - Multiplying two fractions together is the same as saying _a part of a part_: $\dfrac{a}{b}\times\dfrac{c}{d}$ is like saying $\dfrac{a}{b}$ **of** $\dfrac{c}{d}$ (The equation $\dfrac{3}{5}\times\dfrac{1}{4}$ is the same as _three fifths **of** one quarter_)
# + language="html"
# <script>
# var leftOpperand = {
# id: 'leftOpperand',
# numerator: Number(0),
# denominator: Number(0),
# colour: '#ff0066'
# };
#
# var rightOpperand = {
# id: 'rightOpperand',
# numerator: Number(0),
# denominator: Number(0),
# colour: '#0000ff'
# };
#
# var currentState = 0;
#
# var getOpperandInput = function(numeratorInput, denominatorInput, opperand) {
# opperand.numerator = document.getElementById(numeratorInput).value;
# opperand.denominator = document.getElementById(denominatorInput).value;
#
# }
#
# var verticalDivide = function(xVal, lineNum) {
# var i = xVal;
# while(lineNum > 0){
# addLine(Number(i + 10), Number(i + 10), 10, Number(d3.select('#mainBox').attr('height')) + 10);
# i += xVal;
# lineNum --;
# }
# };
#
# var horizontalDivide = function(xVal, lineNum) {
# var i = Number(xVal);
# while(lineNum > 0){
# addLine(10, Number(d3.select('#mainBox').attr('width')) + 10, Number(i + 10), Number(i +10));
# i += xVal;
# lineNum --;
# }
# };
#
# var addLine = function (x1, x2, y1, y2,) {
# var dashed = '0,0';
# var stroke = 2;
#
# d3.select('#mainCanvas').append('line')
# .attr('class', 'divLine ')
# .attr('x1', x1)
# .attr('x2', x2)
# .attr('y1', y1)
# .attr('y2', y2)
# .style('stroke', 'black')
# .style('stroke-width', stroke);
# };
#
# var fillBox = function(box, width, height, colour, opacity) {
# d3.select('#' + box.id)
# .style('fill', colour)
# .style('opacity', opacity)
# .transition().delay(function (d, i) {
# return i * 300;
# }).duration(500)
# .attr('width', width)
# .attr('height', height);
# };
#
# var changeOpacity = function(box, opacity) {
# d3.select('#' + box.id).transition().delay(function (d, i) {
# return i * 300;
# }).duration(500)
# .style('opacity', opacity);
#
# d3.selectAll('.divLine').transition().delay(function (d, i) {
# return i * 100;
# }).duration(200)
# .style('opacity', opacity);
# };
#
# var resetInputs = function() {
# d3.select('#continueBtn').attr('disabled', null);
# d3.selectAll('.divLine').remove();
# d3.select('#leftOpperand').attr('width', 0);
# d3.select('#rightOpperand').attr('height', 0);
# d3.select('#leftInputFractionText').text('').style('display', 'none');
# clearInput('oppNumerator');
# clearInput('oppDenominator');
# leftOpperand.numerator = Number(0);
# leftOpperand.denominator = Number(0);
# rightOpperand.numerator = Number(0);
# rightOpperand.denominator = Number(0);
#
# };
#
# var isValid = function(numerator, denominator) {
# if (numerator < 0 || numerator > 12) {
# return false;
# }
# if (denominator <= 0 || denominator > 12) {
# return false;
# }
# return (numerator < denominator);
# };
#
# var updateMathJax = function() {
# MathJax.Hub.Queue(["Typeset",MathJax.Hub]);
# };
#
# var showInputBox = function(inputId) {
# d3.select('#' + inputId).style('display', 'block');
# };
#
# var hideInputBox = function(inputId) {
# d3.select('#' + inputId).style('display', 'none');
# };
#
# var clearInput = function(inputId) {
# document.getElementById(inputId).value = '';
# }
#
# var stateControler = function(state) {
# currentState = state;
# setSpeech(state);
#
# switch(state) {
# case 0 :
# resetInputs();
# showInputBox('opperandInput');
# break;
# case 1 :
# getOpperandInput('oppNumerator', 'oppDenominator', leftOpperand);
# d3.select('#leftInputFractionText')
# .text('$\\frac{'+leftOpperand.numerator+'}{'+leftOpperand.denominator+'} \\times$')
# .style('display', 'block');
# updateMathJax();
# verticalDivide(Number(d3.select('#mainBox').attr('width')/leftOpperand.denominator), Number(leftOpperand.denominator - 1));
# hideInputBox('opperandInput');
# break;
# case 2 :
# fillBox(leftOpperand, Number(d3.select('#mainBox').attr('width')/leftOpperand.denominator) * leftOpperand.numerator, Number(d3.select('#mainBox').attr('height')), leftOpperand.colour, 1);
# clearInput('oppNumerator');
# clearInput('oppDenominator');
# showInputBox('opperandInput');
# break;
# case 3 :
# getOpperandInput('oppNumerator', 'oppDenominator', rightOpperand);
# d3.select('#leftInputFractionText')
# .text('$\\frac{'+leftOpperand.numerator+'}{'+leftOpperand.denominator+'} \\times$' + '$\\frac{'+rightOpperand.numerator+'}{'+rightOpperand.denominator+'}$');
# updateMathJax();
# changeOpacity(leftOpperand, 0);
# horizontalDivide(Number(d3.select('#mainBox').attr('height')/rightOpperand.denominator), Number(rightOpperand.denominator - 1));
# hideInputBox('opperandInput');
# break;
# case 4 :
# fillBox(rightOpperand, Number(d3.select('#mainBox').attr('width')), Number(d3.select('#mainBox').attr('height')/rightOpperand.denominator) * rightOpperand.numerator, rightOpperand.colour, 0.5);
# break;
# case 5 :
# changeOpacity(leftOpperand, 1);
# d3.select('#continueBtn').attr('disabled', true);
# break;
# default:
# console.log('not a valid of state, returning to state 0');
# stateControler(0);
# }
# };
#
# var speech = [
# "Enter a fraction in the boxes provided, then click continue.",
# "Great! Now we see that the square has been divided into rectangles of equal size. The number of rectangles is given by the denominator. Click continue when ready.",
# "Some of the equal parts have been filled in with pink. The numerator equals the number of pink rectangles. The ratio of the area in pink to the total area is our fraction. Enter another fraction to multiply then click continue.",
# "Let’s focus on the second fraction. The first one is temporarily hidden for clarity. As before, the number of rectangles we see equals the denominator. Click continue when ready.",
# "Now we have a blue section representing the numerator of the second fraction. Click continue to multiply these two fractions.",
# "Awesome! The first fraction is back and overlaid with the second fraction. The number of rectangles in the purple section is the numerator of our answer. Notice that this is the product of the numerators. The total number of rectangles is the denominator of the product, and this is just the product of the two denominators!"
# ];
#
# function setSpeech(state) {
# d3.select('#speech').text(speech[state]);
# };
#
# document.getElementById('continueBtn').onclick = function() {
# if(!isValid(Number(document.getElementById('oppNumerator').value), Number(document.getElementById('oppDenominator').value))){
# alert('Make sure your factions are proper and the denominators less than or equal to 12');
# }
# else {
# stateControler(currentState + 1);
# }
# };
#
# document.getElementById('resetFractionBoxBtn').onclick = function() {
# console.log("hello");
# resetInputs();
# stateControler(0);
# };
# </script>
# + language="html"
# <script type="text/javascript">
# var x = 2; //Recipie multiplyer
#
# getInput('checkAnswerBtn').onclick = function() {
# if(checkAnswers()) {
# d3.select('#answerStatus').text('Correct!! Good job.');
# } else {
# d3.select('#answerStatus').text('Not quite, keep trying!');
# }
# };
#
# getInput('resetBtn').onclick = function() {
# var inputs = document.getElementsByClassName('ingredientsInput');
# for(var i = 0; i < inputs.length; i++) {
# inputs[i].value = '';
# }
# d3.selectAll('.ingredientsInput').style('background-color', '#ffffff');
# d3.select('#answerStatus').text('');
# };
#
# function checkAnswers() {
# var isCorrect = true;
# if(!checkAnswer('tomatoes', x*3))
# isCorrect = false;
# if(!checkAnswer('basil', x*(1/3)))
# isCorrect = false;
# if(!checkAnswer('olivOil', x*2))
# isCorrect = false;
# if(!checkAnswer('garlic', x*1))
# isCorrect = false;
# if(!checkAnswer('salt', x*(1/2)))
# isCorrect = false;
# if(!checkAnswer('pepper', x*(1/4)))
# isCorrect = false;
# if(!checkAnswer('pasta', x*8))
# isCorrect = false;
# if(!checkAnswer('parmesan', x*(3/4)))
# isCorrect = false;
#
# return isCorrect;
# };
#
# function checkAnswer(id, ans) {
# if(eval(getInput(id).value) === ans) {
# return answerCorrect(id);
# }
# return answerIncorrect(id);
# };
#
# function answerCorrect(id) {
# d3.select('#' + id).style('background-color', '#76D177');
# return true;
# }
#
# function answerIncorrect(id) {
# d3.select('#' + id).style('background-color', '#BB4646');
# return false;
# }
#
# function getInput(id) {
# return document.getElementById(id);
# };
# </script>
# -
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| Mathematics/FractionMultiplication/fraction-multiplication.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <center>
# <hr>
# <h1>Complessità nei sistemi sociali</h1>
# <h2>Laurea Magistrale in Fisica Dei Sistemi Complessi</h2>
# <h2>A.A. 2016/17</h2>
# <h2>Dr. <NAME>, Dr. <NAME></h2>
# <h3>Community detection in networks</h3>
# <hr>
# </center>
# <center>
# <hr>
# <h1>Final Exercise</h1>
# <hr>
# </center>
# <ol>
# <li><h4>Write a Python function implementing the Girvan-Newman algorithm to find the community structure of a graph (see: https://arxiv.org/abs/cond-mat/0112110)</h4>
#
# <li><h4>Test your function on the famous Zachary Karate Club network.</h4>
#
# <li><h4>Display the network with color-coded nodes according to the communities found by the algorithm</h4>
# </ol>
# %pylab inline
import networkx as nx
import networkx.algorithms.centrality as nc
from operator import itemgetter
# ## The Karate Club is so important that NetworkX has a function to import it
G=nx.karate_club_graph()
eNum = G.number_of_edges()
nnodes=len(G)
eNum, nnodes
G.nodes()
# Note that the nodes are labelled from 0 to 33 while the most common labeling (as shown in Girvan 2002) starts from 1.
pos = nx.spring_layout(G)
nx.draw(G, pos)
nx.number_connected_components(G)
# ## First clustering function
def updateGraph1(G):
ebw = nc.edge_betweenness(G)
maxs = 0
for k, v in ebw.iteritems():
if maxs < v:
medge, maxs = k, v
G.remove_edge(medge[0],medge[1])
# ## Second clustering function
def updateGraph2(G):
ebw = nc.edge_betweenness(G)
edge_list=sorted(ebw.iteritems(), key=itemgetter(1))
medge=edge_list[-1][0]
G.remove_edge(medge[0],medge[1])
# ## Find the partition into two communities
while nx.is_connected(G):
#we remove links until the graph is connected
updateGraph1(G)
communities=[i for i in nx.connected_components(G)]
communities
color_community=[]
for i in xrange(0, len(G)):
if i in communities[0]:
color_community.append(0)
else:
color_community.append(1)
# To compare the results to Figure 4 of the paper by Girvan and Newman nodes' labels must be increased by 1.
H = nx.karate_club_graph()
labels_GN={}
for i in H:
labels_GN[i]=i+1
plt.figure(figsize=(10,8))
nx.draw(H, pos, labels=labels_GN, node_color=color_community, cmap=plt.cm.ocean, with_labels=True)
| 4-community-detection/.ipynb_checkpoints/nb08_community_detection-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="GrFpZXNQB9FW"
# #### Copyright 2018 Google LLC.
# + id="43_9Kh8LCDPK"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="YHK6DyunSbs4"
# # Cat vs. Dog Image Classification
# ## Exercise 2: Reducing Overfitting
# **_Estimated completion time: 30 minutes_**
#
# In this notebook we will build on the model we created in Exercise 1 to classify cats vs. dogs, and improve accuracy by employing a couple strategies to reduce overfitting: **data augmentation** and **dropout**.
#
# We will follow these steps:
#
# 1. Explore how data augmentation works by making random transformations to training images.
# 2. Add data augmentation to our data preprocessing.
# 3. Add dropout to the convnet.
# 4. Retrain the model and evaluate loss and accuracy.
#
# Let's get started!
# + [markdown] id="E3sSwzshfSpE"
# ## Exploring Data Augmentation
#
# Let's get familiar with the concept of **data augmentation**, an essential way to fight overfitting for computer vision models.
#
# In order to make the most of our few training examples, we will "augment" them via a number of random transformations, so that at training time, **our model will never see the exact same picture twice**. This helps prevent overfitting and helps the model generalize better.
#
# This can be done by configuring a number of random transformations to be performed on the images read by our `ImageDataGenerator` instance. Let's get started with an example:
# + id="XK-IN_zNgLlT"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# + [markdown] id="ijUDyVZtSgz3"
# These are just a few of the options available (for more, see the [Keras documentation](https://keras.io/preprocessing/image/). Let's quickly go over what we just wrote:
#
# - `rotation_range` is a value in degrees (0–180), a range within which to randomly rotate pictures.
# - `width_shift` and `height_shift` are ranges (as a fraction of total width or height) within which to randomly translate pictures vertically or horizontally.
# - `shear_range` is for randomly applying shearing transformations.
# - `zoom_range` is for randomly zooming inside pictures.
# - `horizontal_flip` is for randomly flipping half of the images horizontally. This is relevant when there are no assumptions of horizontal assymmetry (e.g. real-world pictures).
# - `fill_mode` is the strategy used for filling in newly created pixels, which can appear after a rotation or a width/height shift.
#
# Let's take a look at our augmented images. First let's set up our example files, as in Exercise 1.
#
# + [markdown] id="grzOIOhoY366"
# **NOTE:** The 2,000 images used in this exercise are excerpted from the ["Dogs vs. Cats" dataset](https://www.kaggle.com/c/dogs-vs-cats/data) available on Kaggle, which contains 25,000 images. Here, we use a subset of the full dataset to decrease training time for educational purposes.
# + id="dhztKtUSFMXp"
# !wget --no-check-certificate \
# https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip -O \
# /tmp/cats_and_dogs_filtered.zip
# + id="LWkSRoJRfvGL"
import os
import zipfile
local_zip = '/tmp/cats_and_dogs_filtered.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp')
zip_ref.close()
base_dir = '/tmp/cats_and_dogs_filtered'
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
# Directory with our training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
# Directory with our training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
# Directory with our validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
# Directory with our validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
train_cat_fnames = os.listdir(train_cats_dir)
train_dog_fnames = os.listdir(train_dogs_dir)
# + [markdown] id="02r1oXaegECk"
# Next, let's apply the `datagen` transformations to a cat image from the training set to produce five random variants. Rerun the cell a few times to see fresh batches of random variants.
# + id="ap-nt8Byfaov"
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from tensorflow.keras.preprocessing.image import array_to_img, img_to_array, load_img
img_path = os.path.join(train_cats_dir, train_cat_fnames[2])
img = load_img(img_path, target_size=(150, 150)) # this is a PIL image
x = img_to_array(img) # Numpy array with shape (150, 150, 3)
x = x.reshape((1,) + x.shape) # Numpy array with shape (1, 150, 150, 3)
# The .flow() command below generates batches of randomly transformed images
# It will loop indefinitely, so we need to `break` the loop at some point!
i = 0
for batch in datagen.flow(x, batch_size=1):
plt.figure(i)
imgplot = plt.imshow(array_to_img(batch[0]))
i += 1
if i % 5 == 0:
break
# + [markdown] id="eywLKduLmYPY"
# ## Add Data Augmentation to the Preprocessing Step
#
# Now let's add our data-augmentation transformations from [**Exploring Data Augmentation**](#scrollTo=E3sSwzshfSpE) to our data preprocessing configuration:
# + id="e8HgwcAbmdcu"
# Adding rescale, rotation_range, width_shift_range, height_shift_range,
# shear_range, zoom_range, and horizontal flip to our ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,)
# Note that the validation data should not be augmented!
val_datagen = ImageDataGenerator(rescale=1./255)
# Flow training images in batches of 32 using train_datagen generator
train_generator = train_datagen.flow_from_directory(
train_dir, # This is the source directory for training images
target_size=(150, 150), # All images will be resized to 150x150
batch_size=20,
# Since we use binary_crossentropy loss, we need binary labels
class_mode='binary')
# Flow validation images in batches of 32 using val_datagen generator
validation_generator = val_datagen.flow_from_directory(
validation_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
# + [markdown] id="K-3PrfDwDJjB"
# If we train a new network using this data augmentation configuration, our network will never see the same input twice. However the inputs that it sees are still heavily intercorrelated, so this might not be quite enough to completely get rid of overfitting.
# + [markdown] id="lYguAfH3gyv6"
# ## Adding Dropout
#
# Another popular strategy for fighting overfitting is to use **dropout**.
# + [markdown] id="VtDl3oEZo_7Z"
# **TIP:** To learn more about dropout, see [Training Neural Networks](https://developers.google.com/machine-learning/crash-course/training-neural-networks/video-lecture) in [Machine Learning Crash Course](https://developers.google.com/machine-learning/crash-course/).
# + [markdown] id="bi3c0YtwpRUr"
# Let's reconfigure our convnet architecture from Exercise 1 to add some dropout, right before the final classification layer:
# + id="SVC4FgxiDje6"
from tensorflow.keras import layers
from tensorflow.keras import Model
from tensorflow.keras.optimizers import RMSprop
# Our input feature map is 150x150x3: 150x150 for the image pixels, and 3 for
# the three color channels: R, G, and B
img_input = layers.Input(shape=(150, 150, 3))
# First convolution extracts 16 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(16, 3, activation='relu')(img_input)
x = layers.MaxPooling2D(2)(x)
# Second convolution extracts 32 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Conv2D(32, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# Third convolution extracts 64 filters that are 3x3
# Convolution is followed by max-pooling layer with a 2x2 window
x = layers.Convolution2D(64, 3, activation='relu')(x)
x = layers.MaxPooling2D(2)(x)
# Flatten feature map to a 1-dim tensor
x = layers.Flatten()(x)
# Create a fully connected layer with ReLU activation and 512 hidden units
x = layers.Dense(512, activation='relu')(x)
# Add a dropout rate of 0.5
x = layers.Dropout(0.5)(x)
# Create output layer with a single node and sigmoid activation
output = layers.Dense(1, activation='sigmoid')(x)
# Configure and compile the model
model = Model(img_input, output)
model.compile(loss='binary_crossentropy',
optimizer=RMSprop(lr=0.001),
metrics=['acc'])
# + [markdown] id="jKSgmOt5itEF"
# ## Retrain the Model
#
# With data augmentation and dropout in place, let's retrain our convnet model. This time, let's train on all 2,000 images available, for 30 epochs, and validate on all 1,000 validation images. (This may take a few minutes to run.) See if you can write the code yourself:
#
# + cellView="code" id="VWr-MDk4ksJr"
# WRITE CODE TO TRAIN THE MODEL ON ALL 2000 IMAGES FOR 30 EPOCHS, AND VALIDATE
# ON ALL 1,000 VALIDATION IMAGES
# + [markdown] id="OpFqg-R1g9n6"
# ### Solution
#
# Click below for the solution.
# + cellView="code" id="SdW6geEVi2S8"
history = model.fit_generator(
train_generator,
steps_per_epoch=100,
epochs=30,
validation_data=validation_generator,
validation_steps=50,
verbose=2)
# + [markdown] id="1LTWMLV6SUvP"
# Note that with data augmentation in place, the 2,000 training images are randomly transformed each time a new training epoch runs, which means that the model will never see the same image twice during training.
# + [markdown] id="IZqvC9UJlWc2"
# ## Evaluate the Results
#
# Let's evaluate the results of model training with data augmentation and dropout:
# + id="NKCjHegASXaA"
# Retrieve a list of accuracy results on training and validation data
# sets for each training epoch
acc = history.history['acc']
val_acc = history.history['val_acc']
# Retrieve a list of list results on training and validation data
# sets for each training epoch
loss = history.history['loss']
val_loss = history.history['val_loss']
# Get number of epochs
epochs = range(len(acc))
# Plot training and validation accuracy per epoch
plt.plot(epochs, acc)
plt.plot(epochs, val_acc)
plt.title('Training and validation accuracy')
plt.figure()
# Plot training and validation loss per epoch
plt.plot(epochs, loss)
plt.plot(epochs, val_loss)
plt.title('Training and validation loss')
# + [markdown] id="Ej-s7-_eShij"
# Much better! We are no longer overfitting, and we have gained ~3 validation accuracy percentage points (see the green line in the top chart). In fact, judging by our training profile, we could keep fitting our model for 30+ more epochs and we could probably get to ~80%!
# + [markdown] id="Q4mticgLs5Yf"
# ## Clean Up
#
# Before running the next exercise, run the following cell to terminate the kernel and free memory resources:
# + id="Pjaok2GqtBtI"
import os, signal
os.kill(os.getpid(), signal.SIGKILL)
| ml/pc/exercises/image_classification_part2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine Learning Exercises Solution
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# ## Exercise 1
#
# You've just been hired at a real estate investment firm and they would like you to build a model for pricing houses. You are given a dataset that contains data for house prices and a few features like number of bedrooms, size in square feet and age of the house. Let's see if you can build a model that is able to predict the price. In this exercise we extend what we have learned about linear regression to a dataset with more than one feature. Here are the steps to complete it:
#
# 1. Load the dataset ../data/housing-data.csv
# - plot the histograms for each feature
# - create 2 variables called X and y: X shall be a matrix with 3 columns (sqft,bdrms,age) and y shall be a vector with 1 column (price)
# - create a linear regression model in Keras with the appropriate number of inputs and output
# - split the data into train and test with a 20% test size
# - train the model on the training set and check its accuracy on training and test set
# - how's your model doing? Is the loss growing smaller?
# - try to improve your model with these experiments:
# - normalize the input features with one of the rescaling techniques mentioned above
# - use a different value for the learning rate of your model
# - use a different optimizer
# - once you're satisfied with training, check the R2score on the test set
# Load the dataset ../data/housing-data.csv
df = pd.read_csv('../data/housing-data.csv')
df.head()
df.columns
# plot the histograms for each feature
plt.figure(figsize=(15, 5))
for i, feature in enumerate(df.columns):
plt.subplot(1, 4, i+1)
df[feature].plot(kind='hist', title=feature)
plt.xlabel(feature)
# create 2 variables called X and y:
# X shall be a matrix with 3 columns (sqft,bdrms,age)
# and y shall be a vector with 1 column (price)
X = df[['sqft', 'bdrms', 'age']].values
y = df['price'].values
X
y
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
# create a linear regression model in Keras
# with the appropriate number of inputs and output
model = Sequential()
model.add(Dense(1, input_shape=(3,)))
model.compile(Adam(lr=0.8), 'mean_squared_error')
from sklearn.model_selection import train_test_split
# split the data into train and test with a 20% test size
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
len(X_train)
len(X)
# train the model on the training set and check its accuracy on training and test set
# how's your model doing? Is the loss growing smaller?
model.fit(X_train, y_train)
df.describe()
# try to improve your model with these experiments:
# - normalize the input features with one of the rescaling techniques mentioned above
# - use a different value for the learning rate of your model
# - use a different optimizer
df['sqft1000'] = df['sqft']/1000.0
df['age10'] = df['age']/10.0
df['price100k'] = df['price']/1e5
X = df[['sqft1000', 'bdrms', 'age10']].values
y = df['price100k'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
model = Sequential()
model.add(Dense(1, input_dim=3))
model.compile(Adam(lr=0.1), 'mean_squared_error')
model.fit(X_train, y_train, epochs=20)
from sklearn.metrics import r2_score
# +
# once you're satisfied with training, check the R2score on the test set
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_test_pred)))
# -
model.fit(X_train, y_train, epochs=20, verbose=0)
# +
# once you're satisfied with training, check the R2score on the test set
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
print("The R2 score on the Train set is:\t{:0.3f}".format(r2_score(y_train, y_train_pred)))
print("The R2 score on the Test set is:\t{:0.3f}".format(r2_score(y_test, y_test_pred)))
# -
# ## Exercise 2
#
# Your boss was extremely happy with your work on the housing price prediction model and decided to entrust you with a more challenging task. They've seen a lot of people leave the company recently and they would like to understand why that's happening. They have collected historical data on employees and they would like you to build a model that is able to predict which employee will leave next. The would like a model that is better than random guessing. They also prefer false negatives than false positives, in this first phase. Fields in the dataset include:
#
# - Employee satisfaction level
# - Last evaluation
# - Number of projects
# - Average monthly hours
# - Time spent at the company
# - Whether they have had a work accident
# - Whether they have had a promotion in the last 5 years
# - Department
# - Salary
# - Whether the employee has left
#
# Your goal is to predict the binary outcome variable `left` using the rest of the data. Since the outcome is binary, this is a classification problem. Here are some things you may want to try out:
#
# 1. load the dataset at ../data/HR_comma_sep.csv, inspect it with `.head()`, `.info()` and `.describe()`.
# - Establish a benchmark: what would be your accuracy score if you predicted everyone stay?
# - Check if any feature needs rescaling. You may plot a histogram of the feature to decide which rescaling method is more appropriate.
# - convert the categorical features into binary dummy columns. You will then have to combine them with the numerical features using `pd.concat`.
# - do the usual train/test split with a 20% test size
# - play around with learning rate and optimizer
# - check the confusion matrix, precision and recall
# - check if you still get the same results if you use a 5-Fold cross validation on all the data
# - Is the model good enough for your boss?
#
# As you will see in this exercise, the a logistic regression model is not good enough to help your boss. In the next chapter we will learn how to go beyond linear models.
#
# This dataset comes from https://www.kaggle.com/ludobenistant/hr-analytics/ and is released under [CC BY-SA 4.0 License](https://creativecommons.org/licenses/by-sa/4.0/).
# +
# load the dataset at ../data/HR_comma_sep.csv, inspect it with `.head()`, `.info()` and `.describe()`.
df = pd.read_csv('../data/HR_comma_sep.csv')
# -
df.head()
df.info()
df.describe()
# +
# Establish a benchmark: what would be your accuracy score if you predicted everyone stay?
df.left.value_counts() / len(df)
# -
# Predicting 0 all the time would yield an accuracy of 76%
# Check if any feature needs rescaling.
# You may plot a histogram of the feature to decide which rescaling method is more appropriate.
df['average_montly_hours'].plot(kind='hist')
df['average_montly_hours_100'] = df['average_montly_hours']/100.0
df['average_montly_hours_100'].plot(kind='hist')
df['time_spend_company'].plot(kind='hist')
# convert the categorical features into binary dummy columns.
# You will then have to combine them with
# the numerical features using `pd.concat`.
df_dummies = pd.get_dummies(df[['sales', 'salary']])
df_dummies.head()
df.columns
X = pd.concat([df[['satisfaction_level', 'last_evaluation', 'number_project',
'time_spend_company', 'Work_accident',
'promotion_last_5years', 'average_montly_hours_100']],
df_dummies], axis=1).values
y = df['left'].values
X.shape
# +
# do the usual train/test split with a 20% test size
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# +
# play around with learning rate and optimizer
model = Sequential()
model.add(Dense(1, input_dim=20, activation='sigmoid'))
model.compile(Adam(lr=0.5), 'binary_crossentropy', metrics=['accuracy'])
# -
model.summary()
model.fit(X_train, y_train)
y_test_pred = model.predict_classes(X_test)
from sklearn.metrics import confusion_matrix, classification_report
def pretty_confusion_matrix(y_true, y_pred, labels=["False", "True"]):
cm = confusion_matrix(y_true, y_pred)
pred_labels = ['Predicted '+ l for l in labels]
df = pd.DataFrame(cm, index=labels, columns=pred_labels)
return df
# +
# check the confusion matrix, precision and recall
pretty_confusion_matrix(y_test, y_test_pred, labels=['Stay', 'Leave'])
# -
print(classification_report(y_test, y_test_pred))
from keras.wrappers.scikit_learn import KerasClassifier
# +
# check if you still get the same results if you use a 5-Fold cross validation on all the data
def build_logistic_regression_model():
model = Sequential()
model.add(Dense(1, input_dim=20, activation='sigmoid'))
model.compile(Adam(lr=0.5), 'binary_crossentropy', metrics=['accuracy'])
return model
model = KerasClassifier(build_fn=build_logistic_regression_model,
epochs=10, verbose=0)
# -
from sklearn.model_selection import KFold, cross_val_score
# +
cv = KFold(5, shuffle=True)
scores = cross_val_score(model, X, y, cv=cv)
print("The cross validation accuracy is {:0.4f} ± {:0.4f}".format(scores.mean(), scores.std()))
# -
scores
# +
# Is the model good enough for your boss?
# -
# No, the model is not good enough for my boss, since it performs no better than the benchmark.
| solutions/3 Machine Learning Exercises Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Q1. 引入 train.csv 資料集
import pandas as pd
train = pd.read_csv('train.csv')
train.head()
# ## Q2.將traing資料的資料欄位名稱存成 list
train.columns.tolist()
# ## Q3.traing資料集年齡最大跟最小的是?
max(train.Age), min(train.Age)
train.Age.describe()
# ## Q4.這個traing資料集,各欄位遺漏值狀況為何?
# +
def count_col_null(df):
'''
Args:
df: input dataframe
Return:
null_data_df: to count null by each column with a dataframe
'''
null_data = []
for i in range(len(df.columns)):
null_data.append(sum(df.iloc[:, i].isnull()))
null_data_df = pd.DataFrame({
"columns": list(df.columns),
"null_counts": null_data
})
return null_data_df
count_col_null(train)
# -
# ## Q5.計算traing資料集男女的存活率
# [hint](https://pandas.pydata.org/docs/reference/api/pandas.Series.value_counts.html)
train.Survived[train.Sex == 'male'].value_counts(normalize=True)
train.Survived[train.Sex == 'female'].value_counts(normalize=True)
# ## Q6.計算traing資料集名稱有 Mr. 是幾人?
len(train[train.Name.str.find('Mr.')!= -1])
# ## Q7.traing資料集總共有幾種票價?
len(train.Fare.value_counts())
# ## Q8.依據是否生存為群組,traing資料集分別計算票價和年齡的總和、平均數、中位數
import numpy as np
train.groupby('Survived')['Fare', 'Age'].agg([len,np.sum, np.mean, np.median])
# ## Q9. train 資料集中,依據Embarked分組計算每個欄位的平均數、最小值、最大值、中位數
pd.set_option('display.max_columns', None)
train.groupby('Embarked').agg(['mean', 'min', 'max', 'median'])
# ## Q10. train 資料集中,回答超過60歲的男生且登船是C或Q的人數
train[(train.Age > 60)&(train.Sex == 'male')&(train.Embarked.isin(['C', 'Q']))]
len(train[(train.Age > 60)&(train.Sex == 'male')&(train.Embarked.isin(['C', 'Q']))])
# ## Q11. train 資料集中,找到index3到7的所有資料
train.iloc[3:8, :]
# ## Q12. train 資料集中,依照 Age、Fare 遞增排序資料
train.sort_values(['Age', 'Fare'])
# ## Q13. train 資料集中,把 male 換成 男生、female換成女生
# ### 創一個欄位之後再刪除
train['new_SEX'] = train.Sex.map({'male': '男生', 'female': '女生'})
train.head()
train.drop(['new_SEX'], axis=1, inplace=True)
train.head()
# ## Q14.將 train 和 test 合併起來為 data 資料集
test = pd.read_csv('test.csv')
data = pd.concat([train, test]).reset_index(drop=True)
data
# ## Q15.在 data 資料集,新增一個欄位年齡分組,分為以下群組(20歲以下、21歲-40歲、41歲-60歲、61歲-80歲、81歲-100歲、101歲以上)
# ### 若都不屬於以上群組,回覆 error
def age_group(age):
if age <= 20:
return '20歲以下'
elif age>=21 and age <=40 :
return '21歲-40歲'
elif age>=41 and age <=60 :
return '41歲-60歲'
elif age>=61 and age <=80 :
return '61歲-80歲'
elif age>=81 and age <=100 :
return '81歲-100歲'
elif age >=101:
return '101歲以上'
else:
return 'error'
data['Age_Group'] = data.apply(lambda x: age_group(x['Age']), axis=1)
data[['Age', 'Age_Group']]
| 02_Data_Preprocessing/02_S1_Pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [ml4ir](https://github.com/salesforce/ml4ir)
# #### open source, modular, python3, tensorflow2.0 library for IR based ML applications
# --------------------
# 
# ### First, let's load the data and take a look at it
# +
from ml4ir.base.io import file_io
import glob
import logging
import pandas as pd
import os
# Pandas options
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
# Setup logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logging.debug("Logger is initialized...")
# Load data
CSV_DATA_DIR = '../ml4ir/applications/ranking/tests/data/csv'
df = file_io.read_df_list(glob.glob(os.path.join(CSV_DATA_DIR, "train", "*.csv")), log=logger)
logger.info(df.shape)
df.head().T
# -
# ### Let's define the feature configuration for our data
#
# ### ... brace yourselves!
# +
# Set up the feature configurations
from ml4ir.base.features.feature_config import parse_config
from ml4ir.base.features.feature_config import ExampleFeatureConfig
from ml4ir.base.config.keys import TFRecordTypeKey
import json
feature_config_yaml = '''
query_key:
name: query_key
node_name: query_key
trainable: false
dtype: int64
log_at_inference: true
feature_layer_info:
type: numeric
shape: null
serving_info:
required: false
default_value: 0
tfrecord_type: context
label:
name: label
node_name: label
trainable: false
dtype: int64
log_at_inference: true
feature_layer_info:
type: numeric
shape: null
serving_info:
required: false
default_value: 0
tfrecord_type: sequence
features:
- name: feat_0
node_name: feat_0
trainable: true
dtype: float
log_at_inference: false
feature_layer_info:
type: numeric
shape: null
serving_info:
required: true
default_value: 0.0
tfrecord_type: sequence
- name: feat_1
node_name: feat_1
trainable: true
dtype: float
log_at_inference: false
feature_layer_info:
type: numeric
shape: null
serving_info:
required: true
default_value: 0.0
tfrecord_type: sequence
- name: feat_2
node_name: feat_2
trainable: true
dtype: float
log_at_inference: false
feature_layer_info:
type: numeric
shape: null
serving_info:
required: true
default_value: 0.0
tfrecord_type: sequence
- name: query_str
node_name: query_str
trainable: true
dtype: string
log_at_inference: true
feature_layer_info:
type: numeric
shape: null
fn: get_sequence_encoding
args:
encoding_type: bilstm
encoding_size: 128
embedding_size: 128
max_length: 20
preprocessing_info:
- fn: preprocess_text
args:
remove_punctuation: true
to_lower: true
- fn: strip_numbers
serving_info:
required: true
default_value: ""
tfrecord_type: context
- name: group
node_name: group
trainable: true
dtype: int64
log_at_inference: false
is_group_metric_key: true
feature_layer_info:
type: numeric
shape: null
fn: custom_categorical_embedding
args:
vocabulary_size: 16
embedding_size: 128
serving_info:
required: false
default_value: 0
tfrecord_type: context
- name: pos
node_name: pos
trainable: false
dtype: int64
log_at_inference: true
feature_layer_info:
type: numeric
shape: null
serving_info:
required: true
default_value: 0
tfrecord_type: sequence
'''
feature_config: ExampleFeatureConfig = parse_config(TFRecordTypeKey.EXAMPLE, feature_config_yaml, logger=logger)
logging.info(json.dumps(feature_config.get_all_features(), indent=4))
# -
# ## TFRecords - Examples vs SequenceExamples
# 
# ### Time to load the data and save awesome TFRecords
# +
from ml4ir.base.io import file_io
from ml4ir.base.data import tfrecord_writer
import glob
import os
# Load data
df = file_io.read_df_list(glob.glob(os.path.join(CSV_DATA_DIR, "train", "*.csv")))
# Save as TFRecord SequenceExample/Example
TFRECORD_DIR = '../data/pointwise_ranking_demo/'
if not os.path.exists(TFRECORD_DIR):
os.makedirs('../data/pointwise_ranking_demo/')
tfrecord_writer.write_from_df(df,
tfrecord_file=os.path.join(TFRECORD_DIR, 'file_0.tfrecord'),
feature_config=feature_config,
tfrecord_type=TFRecordTypeKey.EXAMPLE)
# Let's see what it looks like
df.head()
# -
# ### Load TFRecords and add custom preprocessing functions
# +
from ml4ir.base.data import tfrecord_reader
from tensorflow import print as tfprint
import tensorflow as tf
@tf.function
def strip_numbers(feature_tensor):
return tf.strings.regex_replace(feature_tensor, "[0-9]", "")
# Define per instance preprocessing functions
preprocessing_fns = {
"strip_numbers": strip_numbers
}
# Create a TFRecord dataset
dataset = tfrecord_reader.read(data_dir=TFRECORD_DIR,
feature_config=feature_config,
tfrecord_type=TFRecordTypeKey.EXAMPLE,
preprocessing_keys_to_fns=preprocessing_fns)
tfprint(next(iter(dataset.batch(5))))
# -
# ### Map, Filter, Filter, Batch the Dataset
# +
# Variety of map, reduce, filter, shuffle operations can be used here
# dataset = dataset.<map, filter, reduce>(tf_preprocess_fn)
# NOTE: This is lazy batching
dataset = dataset.batch(batch_size=128, drop_remainder=True)
# -
# ### Or... you can do all of that for train, val and test in _one_ step!
# +
from ml4ir.base.data.relevance_dataset import RelevanceDataset
from ml4ir.base.config.keys import DataFormatKey
relevance_dataset = RelevanceDataset(
data_dir=CSV_DATA_DIR,
data_format=DataFormatKey.CSV,
feature_config=feature_config,
tfrecord_type=TFRecordTypeKey.EXAMPLE,
batch_size=128,
preprocessing_keys_to_fns=preprocessing_fns,
logger=logger
)
tfprint(relevance_dataset.train)
tfprint(relevance_dataset.validation)
tfprint(relevance_dataset.test)
# -
# ## Let's define a model, already!
# ### Model Framework
# 
# ### Step 0: Define the Interaction Model
# +
from ml4ir.base.model.scoring.interaction_model import InteractionModel, UnivariateInteractionModel
from ml4ir.base.config.keys import TFRecordTypeKey
# Define custom feature layer ops
def get_categorical_embedding(input_feature, feature_info):
"""Embedding lookup for categorical features"""
feature_layer_info = feature_info.get("feature_layer_info")
return layers.Embedding(input_dim=feature_layer_info["args"]["vocabulary_size"],
output_dim=feature_layer_info["args"]["embedding_size"],
name="categorical_embedding_{}".format(feature_info.get("name")),
)(input_feature)
def signed_log(input_tensor, feature_info):
"""Signed log"""
feature_layer_info = feature_info.get("feature_layer_info")
return tf.expand_dims(
tf.math.log(
tf.add(input_tensor,
tf.cast(tf.constant(feature_layer_info["args"]["shift"]),
tf.float32)
)
),
axis=-1)
feature_layer_fns = {
"custom_categorical_embedding": get_categorical_embedding,
"signed_log": signed_log
}
interaction_model: InteractionModel = UnivariateInteractionModel(
feature_config=feature_config,
feature_layer_keys_to_fns=feature_layer_fns,
tfrecord_type=TFRecordTypeKey.EXAMPLE)
# -
# ### Step 1: Define the Scorer
# +
from ml4ir.base.model.scoring.scoring_model import ScorerBase, RelevanceScorer
from ml4ir.base.model.losses.loss_base import RelevanceLossBase
from tensorflow.keras import layers
from tensorflow.keras import losses
class MyCustomLoss(RelevanceLossBase):
def get_loss_fn(self, **kwargs):
"""
Define a sigmoid cross entropy loss
Additionally can pass in record positions to handle positional bias
"""
bce = losses.BinaryCrossentropy(reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
mask = kwargs.get("mask")
def _loss_fn(y_true, y_pred):
# NOTE: Can use any of the metadata features to qualify your loss here
return bce(y_true, y_pred)
return _loss_fn
def get_final_activation_op(self, output_name):
return lambda logits, mask: layers.Activation("sigmoid", name=output_name)(logits)
scorer: ScorerBase = RelevanceScorer.from_model_config_file(
model_config_file='../ml4ir/base/config/default_model_config.yaml',
interaction_model=interaction_model,
loss=MyCustomLoss(),
output_name="relevance_score")
logger.info(json.dumps(scorer.model_config, indent=4))
# -
# ### Step 2: Define Metrics
# +
from tensorflow.keras import metrics as kmetrics
# metrics = ['binary_accuracy', kmetrics.Precision(name='precision')]
metrics = ['binary_accuracy', kmetrics.Precision]
# -
# ### Step 3: Define Optimizer
# +
from tensorflow.keras.optimizers import Optimizer
from ml4ir.base.model.optimizer import get_optimizer
from ml4ir.base.config.keys import OptimizerKey
optimizer: Optimizer = get_optimizer(
optimizer_key=OptimizerKey.ADAM,
learning_rate=0.01,
learning_rate_decay=0.94,
learning_rate_decay_steps=1000,
gradient_clip_value=50,
)
# -
# ### Now... let's put it all together
# +
from ml4ir.base.model.relevance_model import RelevanceModel
from ml4ir.base.config.keys import OptimizerKey
relevance_model = RelevanceModel(
feature_config=feature_config,
scorer=scorer,
metrics=metrics,
optimizer=optimizer,
tfrecord_type=TFRecordTypeKey.EXAMPLE,
output_name="relevance_score",
logger=logger
)
# +
if not os.path.exists('../models'):
os.makedirs('../models')
if not os.path.exists('../logs'):
os.makedirs('../logs')
relevance_model.fit(relevance_dataset,
num_epochs=5,
models_dir='../models',
logs_dir='../logs',
monitor_metric='val_binary_accuracy',
monitor_mode='max')
# -
# ### Let's save the model(...and don't forget about serving signatures)
# 
# +
MODEL_DIR = '../models/pointwise_ranking_demo'
if not os.path.exists(MODEL_DIR):
os.makedirs(MODEL_DIR)
relevance_model.save(
models_dir=MODEL_DIR,
preprocessing_keys_to_fns=preprocessing_fns,
required_fields_only=True)
# -
# ### Reload the model for some predictions
# +
from ml4ir.base.config.keys import TFRecordTypeKey
relevance_model = RelevanceModel(
feature_config=feature_config,
tfrecord_type=TFRecordTypeKey.EXAMPLE,
model_file=os.path.join(MODEL_DIR, 'final/default/'),
logger=logger,
output_name="relevance_score"
)
logger.info("Is Keras model? {}".format(isinstance(relevance_model.model, tf.keras.Model)))
logger.info("Is compiled? {}".format(relevance_model.is_compiled))
relevance_model.predict(test_dataset=relevance_dataset.test).sample(25)
# -
# ### Let's see how the TFRecord serving signature works
# +
from tensorflow.keras import models as kmodels
from tensorflow import data
model = kmodels.load_model(
os.path.join(MODEL_DIR, 'final/tfrecord/'),
compile=False)
infer_fn = model.signatures["serving_tfrecord"]
dataset = data.TFRecordDataset(
glob.glob(os.path.join(CSV_DATA_DIR, "tfrecord", "test", "*.tfrecord")))
protos = next(iter(dataset.batch(10)))
logger.info("Example proto: \n{}".format(protos[0]))
logger.info("\n\n\nPredictions:")
logger.info(infer_fn(protos=protos))
# -
# ### Why you should onboard your ML application to ml4ir today!
#
# * Consistent code structure and modularization across projects
# * Scalable TFRecord data pipeline
# * Every ML application shouldn’t have to reinvent the wheel especially when there is barely any documentation on this.
# * Consistent file I/O overall
# * Consistent library versions across projects
# * Easily update versions and validate inference time impact, etc
# * Common Flowsnake enablement
# * We can define _git.soma/MLConfigs_ to track and automatically build docker images through strata from ml4ir.
# * Unified python ↔ JVM interoperability
# * Define integration tests
# * Allows us to build generic protobuf creation at runtime
# * Common training abstraction
# * Callbacks : checkpointing, early stopping, tensorboard, etc
# * Consistent way to save models
# * allows us to have generic deployment code
# * Shared metrics, losses, layers, etc.
# * Shared feature processing and feature layers across ML models
# * long term: shared NLP toolkit, probability toolkit
# * short term: categorical, text embeddings
# * Build models that can be trained with tight coupling:
# * transfer learning
# * shared embedding layers
# * multi task models
#
#
# > ### This is just the `end of the beginning` and we would love to take new passengers on this journey!
#
# 
# .
#
#
# .
#
#
# .
#
#
# .
#
#
# .
#
#
# .
#
#
# .
#
#
# .
#
# <center>Psst... You can file github issues -> <a href="https://github.com/salesforce/ml4ir/issues">HERE!</a></center>
# 
| python/notebooks/PointwiseRankingDemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp files
# -
# # files
# > Easily upload, download, and manage files on Server.
# +
# export
import requests
from yx_motor.api import API
class Files:
"Class for handling AAH files endpoints."
def __init__(self, api: API):
self.api = api
self.base_endpoint = "files/"
self.valid_conflict_actions = ["MERGE", "CREATE_COPY"]
def download_file(self, file_uuid: str, download_path: str, version: int = None):
response = self.api.get(
url=f"{self.base_endpoint}content",
params={"id": file_uuid, "version": version},
)
with open(download_path, "wb") as f:
f.write(response.content)
return response
def upload_file(
self,
filename: str,
upload_path: str,
description: str = None,
conflict_action: str = "CREATE_COPY",
):
if conflict_action:
self.validate_conflict_action(conflict_action)
upload_headers = {
"Content-Type": "application/json",
"Accept": "*/*",
"Accept-Language": "en-US,en;q=0.5",
"Accept-Encoding": "gzip,deflate",
"path": upload_path,
"description": description,
"conflict_action": conflict_action,
}
with open(filename, "rb") as f:
blob = f.read()
response = self.api.post(
url=f"{self.base_endpoint}", data=blob, non_default_headers=upload_headers
)
return response
def update_file(self):
# This seems to update metainfo, may not be needed for MVP
# TODO: MVP
pass
def get_file_versions(self, file_uuid: str):
response = self.api.get(
url=f"{self.base_endpoint}versions/", params={"fileUuid": file_uuid}
)
return response
def delete_file(self, asset_path: str, hard=False):
payload = {"assetPaths": [f"{asset_path}"]}
if hard == True:
targeturl = "remove"
else:
targeturl = "softDelete"
response = self.api.post(url=f"{self.base_endpoint}{targeturl}", json=payload)
return response
def move_file(
self,
source_path: str,
target_path: str,
move_type="moveFiles",
versions_action="ALL_VERSIONS",
conflicts_action="SKIP",
):
# TODO discuss move_type, move or copy
payload = {
"assets": [
{"sourcePath": f"{source_path}", "targetPath": f"{target_path}"}
],
"versionsAction": f"{versions_action}",
"conflictsAction": f"{conflicts_action}",
}
response = self.api.post(url=f"{self.base_endpoint}{move_type}", json=payload)
return response
def restore_deleted_file(self, asset_path: str = None, asset_id: str = None):
asset_paths = {}
asset_ids = {}
if asset_path:
asset_paths = {"assetPaths": [f"{asset_path}"]}
if asset_id:
asset_ids = {"assetIds": [f"{asset_id}"]}
payload = {**asset_paths, **asset_ids}
# Does onlyDescendants have other values besides True?
response = self.api.post(
url=f"{self.base_endpoint}restoreDeleted", json=payload
)
return response
def validate_conflict_action(self, conflict_action: str):
if conflict_action not in self.valid_conflict_actions:
raise ValueError(
f"Specified conflict action must be one of {self.valid_conflict_actions}"
)
else:
pass
# -
#hide
# just removing the insecure warning for now
# TODO: Secure requests and remove this code
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# ## Examples
#
# **Note**:
# The files commands are intended to be called by users through the Client class. Below, you will see that to accommodate this, we are creating the files object the same way the Client class does, by passing in an authenticated API object.
#
# In normal practice, a user will simply create a client object, and be able to call `client.files.download_file('xxxxxx-xxxxx')`.
#
# However, for this documentation, we are calling the files object directly to better demonstrate the Files class in an isolated fashion.
# ### Download Files Example
from nbdev.showdoc import *
show_doc(Files.download_file)
# The download_file method allows a user to pass in the unique identifier for the file, along with the path they wish to download the file to on the client machine. The file will be downloaded and saved to the path specified.
#
# **Arguments**:
# - file_uuid: The VFS unique identifier for the file the user wishes to download.
# - download_path: The path on the user's local file system which they wish the file to be persisted to once it is downloaded.
# - version: Optional. The version of the VFS asset the user wishes to donwload. Defaults to latest version if not specified. TODO: Need to double check this statement.
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.get = Mock()
api.get.return_value = unpickle_object(files_test_pickles.download_file)
# -
# Initialize the files object the same way the Client object does.
files = Files(api)
# Below is the unique identifier for the file we wish to download from the Alteryx Analytics Hub's Virtual File System (VFS).
asset_id = "9b75b3d8-56c1-4ce6-bfe1-9aabaea1e65f"
# Next, we will trigger the method. To better illustrate what the download_file method does, we are going to capture the response. It is important to note that this is not necessary. Simply calling the `files.download_file` command is all that is necessary to successfully download a file to the specified path.
# Next, let's trigger the download_file command.
response = files.download_file(file_uuid=asset_id,
download_path=f"example_downloads/some_name.yxmd")
# As you can see, the workflow has been downloaded to the specified location.
#
# Now, let's inspect the response object.
response.content # The requested file is attached to the response as a binary object.
# Here, we see that the binary object is indeed an Alteryx workflow.
# ### Upload Files Example
from nbdev.showdoc import *
show_doc(Files.upload_file)
# The upload_file method allows a user to upload a file from their local filesytem to the Alteryx Analytics Hub's Virtual File System (VFS).
#
# TODO: Need to rename the filename argument to file_path to avoid confusion.
#
# **Arguments**:
# - filename: Path to the file on the user's local file system
# - upload_path: The path on the AAH VFS the user wishes the file to be uploaded to.
# - description: Optional. Description the user wants to be attached to the file in the VFS.
# - conflict_action: Defaults to 'CREATE_COPY'. One of 'CREATE_COPY'| 'MERGE'
# - CREATE_COPY: If file already exists at specified path, a copy is created.
# - MERGE: If file already exists, new version is created at same location.
#
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.post = Mock()
api.post.return_value = unpickle_object(files_test_pickles.upload_file)
# -
# Initialize the files object the same way the Client object does.
files = Files(api)
# #### Simple Upload Example
#
# Let's upload a yxzp file in our local filesystem to a location on Alteryx Analytics Hub's VFS, specified via the upload_path parameter.
response = files.upload_file(filename=r"example_downloads/some_name.yxmd",
upload_path="/Workspaces/Public/example_1.yxzp")
response.json()
# Above, we can see that the response shows the file was successfully uploaded to the upload path we specified.
# #### Upload with duplicate file example
#
# What happens if we upload the same file twice? Let's run the same line of code a second time to see.
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.post = Mock()
api.post.return_value = unpickle_object(files_test_pickles.upload_duplicate_file)
# -
response = files.upload_file(filename=r"example_downloads/some_name.yxmd",
upload_path="/Workspaces/Public/example_1.yxzp")
response.json()
# If you look above, you see that a copy of the file was created in the same location, but now the file is named example_1(1).yxzp.
#
# NOTE: It WAS named that in the payload. Update at noon on 4/27/20 seems to have broken this, so that the UI shows the example_1(1) name, but the vfs path appears to now have a uid appended to it.
#
# This is because the default conflict action is 'CREATE_COPY'.
# #### Upload a file with MERGE command
#
# Let's now repeat the call, but this time let's specify 'MERGE' as the conflict action. Also, we're changing the source of the file to a larger one, to ensure that the contentSize property in the JSON response proves that the file has been updated.
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.post = Mock()
api.post.return_value = unpickle_object(files_test_pickles.upload_merge_file)
files = Files(api)
# -
response = files.upload_file(filename=r"example_downloads/some_name.yxmd",
upload_path="/Workspaces/Public/example_1.yxzp",
conflict_action='MERGE')
response.json()
# In the payload above, you can see that the version of the file is now 2, and that it's contentSize property is now 4055, bigger than the original 2176 value.
#
# This demonstrates that the file has indeed been updated in place, with a versioned copy.
#
# This MERGE command has a very powerful property:
#
# It uses a "content hash" on the file being uploaded to detect if the file has actually changed. If we run the above command again, with the same file, we will see that the file's version remains the same.
# Extract the file uuid from the response to demonstrate the get versions module later.
uploaded_file_uuid = response.json()['uuid']
from nbdev.showdoc import *
show_doc(Files.get_file_versions)
# The get_file_versions method returns a list of all of the versions for a given VFS asset.
#
# **Arguments**:
# - file_uuid: VFS Asset unique identifier for the VFS file you want to get versions for.
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.get = Mock()
api.get.return_value = unpickle_object(files_test_pickles.get_file_versions)
files = Files(api)
# -
response = files.get_file_versions(file_uuid=uploaded_file_uuid)
response.json()
from nbdev.showdoc import *
show_doc(Files.delete_file)
# The delete_file method deletes a given asset located at the specified asset path in the VFS. Optionally, the user can specify whether the delete is permanent (hard=True) or reversable (hard=False).
#
# **Arguments**:
# - asset_path: The VFS path of the asset the user wants to delete.
# - hard: (Optional, defaults to False) If true, specifies a hard (permanent) delete. If false, the deleted asset can be restored.
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.post = Mock()
api.post.return_value = unpickle_object(files_test_pickles.delete_file)
files = Files(api)
# -
response = files.delete_file(asset_path=r'/Workspaces/Public/example_1.yxzp',
hard=True)
response.json()
from nbdev.showdoc import *
show_doc(Files.move_file)
# The move_file method moves a given asset from the source_path in the VFS to the target_path. Optionally, the user can specify the move_type, versions_action, or conflicts_action.
#
# **Arguments**:
# - source_path: The VFS path of the asset the user wants to move.
# - target_path: The destination path in the VFS where the user wants the file moved to.
# - move_type: (optional, defaults to moveFiles) If 'moveFiles', moves the file from source_path to target_path. If 'copy', creates a copy of the file in the target_path
# - versions_action: (optional, defaults to ALL_VERSIONS) specifies if version history should be moved along with the file
# - conflicts_action: (optional, defaults to SKIP) specifies what should happen if a file with the same name already exists in the target_path. options are SKIP (no action taken), FAIL_OPERATION (return an error), MERGE (update the file in target_path with the new file from source_path).
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.post = Mock()
api.post.return_value = unpickle_object(files_test_pickles.move_file)
files = Files(api)
# -
# ### Simple move example
#
# Lets try using a move command to rename a file
response = files.move_file(
r"/Workspaces/Public/move_file_example.yxmd",
r"/Workspaces/Public/move_file_destination.yxmd"
)
response.json()
# ### Simple copy example
#
# Now, lets try copying a file.
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.post = Mock()
api.post.return_value = unpickle_object(files_test_pickles.copy_file)
files = Files(api)
# -
response = files.move_file(
r"/Workspaces/Public/move_file_example.yxmd",
r"/Workspaces/Public/copied_file.yxmd",
"copy"
)
response.json()
# as you can see, in the response, there is a successCopied json object which contains information on the new file, including the targetId (the uuid of the new file).
from nbdev.showdoc import *
show_doc(Files.restore_deleted_file)
# The restore_deleted_file method restores a specified asset. it requires either an asset_path or an asset_id. If both are specified, the asset_id is used. This can only be used to restore assets which were not hard deleted
#
# **Arguments**:
# - asset_path: (Optional) the 'trashPath' to the deleted asset including filename
# - asset_id: (Optional) the UUID of the deleted asset
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.post = Mock()
api.post.return_value = unpickle_object(files_test_pickles.restore_deleted)
files = Files(api)
# -
# ### Restore deleted file by uuid
#
# here we will restore a deleted file by pointing to a uuid
response = files.restore_deleted_file(asset_id='9b75b3d8-56c1-4ce6-bfe1-9aabaea1e65f')
response.json()
# ### Restore deleted file by asset_path
#
# here we will restore a deleted file using the 'trashPath' provided when an asset is deleted.
# +
#hide
# Unit test code for download files
from unittest.mock import Mock
from yx_motor.tests.utils.unit_test_helpers import (
files_test_pickles,
unpickle_object
)
api = Mock()
api.post = Mock()
api.post.return_value = unpickle_object(files_test_pickles.restore_deleted_by_path)
files = Files(api)
# -
response = files.restore_deleted_file(asset_path=r'/Trash/delete_file_example.yxmd')
response.json()
| 05_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Análise de redes
#
# _Rede_ (_network_) é uma forma de organizar e representar dados discretos. Elas diferem da forma tabular, em que linhas e colunas são as estruturas fundamentais, e funcionam com base em dois conceitos:
#
# 1. _entidades_, ou _atores_, ou ainda _nós_, e
# 2. _relacionamentos_, ou _links_, ou _arcos_, ou ainda, _conexões_.
#
# Casualmente, o conceito de _rede_ se confunde com o conceito matemático de _grafo_, para o qual as entidades são chamadas _vértices_ e os relacionamentos _arestas_. Usa-se a notação $G(V,E)$ para designar um grafo genérico $G$ com um conjunto $V$ de vértices e um conjunto $E$ de arestas. A Fig. {numref}`random-graph` esboça um grafo genérico.
# ```{figure} ../figs/17/random-graph.png
# ---
# width: 300px
# name: random-graph
# ---
# Grafo genérico contendo 6 vértices e 13 arestas.
# ```
# ## Redes complexas
#
# Com o barateamento dos recursos de computação no final do século XX, a _análise de redes complexas_ (do inglês _complex network analysis_, ou CNA) evoluiu como uma área de pesquisa independente. Desde então, tornou-se possível mapear bancos de dados enormes e extrair conhecimento a partir de um emaranhado complexo de interligações.
#
# No século XXI, percebemos um interesse explosivo em CNA. Algumas aplicações modernas incluem, mas não se limitam a:
#
# - transporte, para planejamento de malhas ferroviárias, rodovias e conexões entre cidades;
# - sociologia, para entender pessoas, seu comportamento, interação em redes sociais, orientações de pensamento e preferências;
# - energia, para sistematizar linhas de transmissão de energia elétrica;
# - biologia, para modelar redes de transmissão de doenças infecciosas;
# - ciência, para encontrar os núcleos de pesquisa mais influentes do mundo em um determinado campo do conhecimento.
#
#
# ## O módulo `networkx`
#
# Neste capítulo, introduziremos alguns conceitos relacionados à CNA, tais como componentes conexas, medidades de centralidade e visualização de grafos usando o módulo Python `networkx`. Este módulo tornou-se popular pela sua versatilidade. Alguns de seus pontos positivos são:
#
# - facilidade de instalação;
# - ampla documentação no [site oficial](https://networkx.org);
# - extenso conjunto de funções e algoritmos;
# - versatilidade para lidar com redes de até 100.000 nós.
#
# ```{note}
# Algumas ferramentas com potencial similar ao `networkx` são [`igraph`](https://igraph.org) e [`graph-tool`](https://graph-tool.skewed.de). Especificamente para visualização, você poderá se interessar pelo [`Graphviz`](https://www.graphviz.org) ou pelo [`Gephi`](https://gephi.org).
# ```
#
# Vamos praticar um pouco com este módulo para entender conceitos fundamentais. Em seguida, faremos uma aplicação. Supondo que você já tenha instalado o `networkx`, importe-o:
import networkx as nx
# ### Criação de grafos não dirigidos
# Em seguida vamos criar um grafo $G$ _não dirigido_. Isso significa que o sentido da aresta é irrelevante. Contudo, vale comentar que há situações em que o sentido da aresta importa. Neste caso, diz-se que o grafo é _dirigido_.
# +
# cria grafo não dirigido com 4 vértices
# inicializa
G = nx.Graph()
# adiciona arestas explicitamente
G.add_edge(1,2)
G.add_edge(1,3)
G.add_edge(2,3)
G.add_edge(3,4)
# -
# Em seguida, visualizamos o grafo com `draw_networkx`.
nx.draw_networkx(G)
# ### Adição e deleção de nós e arestas
# Podemos adicionar nós indvidualmente ou por meio de uma lista, bem como usar _strings_ como nome.
G.add_node('A')
G.add_nodes_from(['p',99,'Qq'])
G.add_node('Mn') # nó adicionado por engano
nx.draw_networkx(G)
# Podemos fazer o mesmo com arestas sobre nós existentes ou não existentes.
G.add_edge('A','p') # aresta individual
G.add_edges_from([(1,99),(4,'A')]) # aresta por lista (origem, destino)
G.add_edge('Mn','no') # 'no' não existente
nx.draw_networkx(G)
# Nós e arestas podem ser removidos de maneira similar.
G.remove_node('no')
G.remove_nodes_from(['Qq',99,'p'])
nx.draw_networkx(G)
G.remove_edge(1,2)
G.remove_edges_from([('A',4),(1,3)])
nx.draw_networkx(G)
# Para remover todas os nós e arestas do grafo, mas mantê-lo criado, usamos `clear`.
G.clear()
# Verificamos que não há nós nem arestas:
len(G.nodes()), len(G.edges)
# Para deletá-lo completamente, podemos fazer:
del G
# ### Criação de grafos aleatórios
# Podemos criar um grafo aleatório de diversas formas. Com `random_geometric_graph`, o grafo de _n_ nós uniformemente aleatórios fica restrito ao "cubo" unitário de dimensão `dim` e conecta quaisquer dois nós _u_ e _v_ cuja distância entre eles é no máximo `raio`.
# 30 nós com raio de conexão 0.2
n = 30
raio = 0.2
G = nx.random_geometric_graph(n,raio,dim=2)
nx.draw_networkx(G)
# 30 nós com raio de conexão 5
n = 30
raio = 5
G = nx.random_geometric_graph(n,raio,dim=2)
nx.draw_networkx(G)
# 12 nós com raio de conexão 1.15
n = 12
raio = 1.15
G = nx.random_geometric_graph(n,raio,dim=2)
nx.draw_networkx(G)
# 12 nós com raio de conexão 0.4
n = 12
raio = 0.4
G = nx.random_geometric_graph(n,raio,dim=2)
nx.draw_networkx(G)
# ### Impressão de listas de nós e de arestas
#
# Podemos acessar a lista de nós ou de arestas com:
G.nodes()
G.edges()
# Notemos que as arestas são descritas por meio de tuplas (_origem_,_destino_).
# Se especificarmos `data=True`, atributos adicionais são impressos. Para os nós, vemos `pos` como a posição espacial.
print(G.nodes(data=True))
# No caso das arestas, nenhum atributo existe para este grafo. Contudo, em grafos mais complexos, é comum ter _capacidade_ e _peso_ como atributos. Ambas são relevantes em estudos de _fluxo_, em que se associa a arestas uma "capacidade" de transporte e um "peso" de relevância.
print(G.edges(data=True))
# ### Criação de redes a partir de arquivos
#
# Um modo conveniente de criar redes é ler diretamente um arquivo contendo informações sobre a conectividade. O _dataset_ que usaremos a partir deste ponto em diante corresponde a uma rede representando a amizade entre usuários reais do Facebook. Cada usuário é representado por um vértice e um vínculo de amizade por uma aresta. Os dados são anônimos.
#
# Carregamos o arquivo _.txt_ com `networkx.read_edgelist`.
fb = nx.read_edgelist('../database/fb_data.txt')
len(fb.nodes), len(fb.edges)
# Vemos que esta rede possui 4039 usuários e 88234 vínculos de amizade. Você pode plotar o grafo para visualizá-lo, porém pode demorar um pouco...
# ## Propriedades relevantes
#
# Vejamos algumas propriedades de interesse de redes e grafos.
# ### Grau
#
# O _grau_ de um nó é o número de arestas conectadas a ele. Assim, o grau médio da rede do Facebook acima pode ser calculado por:
fb.number_of_edges()/fb.number_of_nodes()
# ou
fb.size()/fb.order()
# Ambos os resultados mostram que cada usuário nesta rede tem pelo menos 21 amizades.
# ### Caminho
#
# _Caminho_ é uma sequencia de nós conectados por arestas contiguamente. O _caminho mais curto_ em uma rede é o menor número de arestas a serem visitadas partindo de um nó de origem _u_ até um nó de destino _v_.
#
# A seguir, plotamos um caminho formado por 20 nós.
Gpath = nx.path_graph(20)
nx.draw_networkx(Gpath)
# ### Componente
#
# Um grafo é _conexo_ se para todo par de nós, existe um caminho entre eles. Uma _componente conexa_, ou simplesmente _componente_ de um grafo é um subconjunto de seus nós tal que cada nó no subconjunto tem um caminho para todos os outros.
#
# Podemos encontrar todas as componentes da rede do Facebook usando `connected_componentes`. Entretanto, o resultado final é um objeto _generator_. Para acessarmos as componentes, devemos usar um iterador.
# +
cc = nx.connected_components(fb)
# varre componentes e imprime os primeiros 5 nós
for c in cc:
print(list(c)[0:5])
# -
# Uma vez que há apenas uma lista impressa, temos que a rede do Facebook, na verdade, é uma componente única. De outra forma,
# há apenas 1 componente conexa, a própria rede
nx.number_connected_components(fb)
# ### Subgrafo
#
# _Subgrafo_ é um subconjunto dos nós de um grafo e todas as arestas que os conectam. Para selecionarmos um _subgrafo_ da rede Facebook, usamos `subgraph`. Os argumentos necessários são: o grafo original e uma lista dos nós de interesse. Abaixo, geramos uma lista aleatória de `ng` nós.
# +
from numpy.random import randint
# número de nós do subgrafo
ng = 40
# identifica nós (nomes são strings)
nodes_to_get = randint(1,fb.number_of_nodes(),ng).astype(str)
# extrai subgrafo
fb_sub = nx.subgraph(fb,nodes_to_get)
# plota
nx.draw_networkx(fb_sub)
# -
# Se fizermos alguma alteração no grafo original, pode ser que o número de componentes se altere. Vejamos:
# +
# copia grafo
fb_less = fb.copy()
# remove o nó '0'
fb_less.remove_node('0')
# novas componentes
nx.number_connected_components(fb_less)
# -
# Neste exemplo, a retirada de apenas um nó do grafo original resultou em 19 componentes, com número variável de elementos.
ncs = []
for c in nx.connected_components(fb_less):
ncs.append(len(c))
# número de componentes em ordem
sorted(ncs,reverse=True)
# ## Métricas de centralidade
#
# A _centralidade_ de um nó mede a sua importância relativa no grafo. Em outras palavras, nós mais "centrais" tendem a ser considerados os mais influentes, privilegiados ou comunicativos.
#
# Em uma rede social, por exemplo, um usuário com alta centralidade pode ser um _influencer_, um político, uma celebridade, ou até mesmo um malfeitor. Há diversas _métricas de centralidade_ disponíveis. Aqui veremos as 4 mais corriqueiras:
#
# - _centralidade de grau_ (_degree centrality_): definida pelo número de arestas de um nó;
# - _centralidade de intermediação_(_betweeness centrality_): definida pelo número de vezes em que o nó é visitado ao tomarmos o caminho mais curto entre um par de nós distintos deste. Esta centralidade pode ser imaginada como uma "ponte" ou "pedágio".
# - _centralidade de proximidade_ (_closeness centrality_): definida pelo inverso da soma das distâncias do nó de interesse a todos os outros do grafo. Ela quão "próximo" o nó é de todos os demais. Um nó com alta centralidade é aquele que, grosso modo, "dista por igual" dos demais.
# - _centralidade de autovetor_ (_eigenvector centrality_): definida pelo escore relativo para um nó tomando por base suas conexões. Conexões com nós de alta centralidade aumentam seu escore, ao passo que conexões com nós de baixa centralidade reduzem seu escore. De certa forma, ela mede como um nó está conectado a nós influentes.
#
# Em particular, um nó com alta centralidade de proximidade e alta centralidade de intermediação é chamado de _hub_.
#
# Vamos calcular as centralidades de um subgrafo da rede do Facebook. Primeiro, extraímos um subgrafo menor.
# +
# número de nós do subgrafo
ng = 400
# identifica nós (nomes são strings)
nodes_to_get = randint(1,fb.number_of_nodes(),ng).astype(str)
# extrai subgrafo
fb_sub_c = nx.subgraph(fb,nodes_to_get)
# +
import matplotlib.pyplot as plt
# centralidade de grau
deg = nx.degree_centrality(fb_sub_c)
nx.draw_networkx(fb_sub_c,
with_labels=False,
node_color=list(deg.values()),
alpha=0.6,
cmap=plt.cm.afmhot)
# -
# centralidade de intermediação
bet = nx.betweenness_centrality(fb_sub_c)
nx.draw_networkx(fb_sub_c,
with_labels=False,
node_color=list(bet.values()),
alpha=0.6,
cmap=plt.cm.afmhot)
# centralidade de proximidade
cln = nx.closeness_centrality(fb_sub_c)
nx.draw_networkx(fb_sub_c,
with_labels=False,
node_color=list(cln.values()),
alpha=0.6,
cmap=plt.cm.afmhot)
# centralidade de autovetor
eig = nx.eigenvector_centrality(fb_sub_c)
nx.draw_networkx(fb_sub_c,
with_labels=False,
node_color=list(eig.values()),
alpha=0.6,
cmap=plt.cm.afmhot)
# ## Layouts de visualização
#
# Podemos melhorar a visualização das redes alterando os layouts. O exemplo a seguir dispõe o grafo em um layout melhor, chamado de `spring`. Este layout acomoda a posição dos nós iterativamente por meio de um algoritmo especial. Além disso, a centralidade de grau está normalizada no intervalo [0,1] e escalonada.
#
# Com o novo plot, é possível distinguir "comunidades", sendo os maiores nós os mais centrais.
# +
from numpy import array
pos_fb = nx.spring_layout(fb_sub_c,iterations = 50)
nsize = array([v for v in deg.values()])
nsize = 500*(nsize - min(nsize))/(max(nsize) - min(nsize))
nodes = nx.draw_networkx_nodes(fb_sub_c, pos = pos_fb, node_size = nsize)
edges = nx.draw_networkx_edges(fb_sub_c, pos = pos_fb, alpha = .1)
# -
# Um layout aleatório pode ser plotado da seguinte forma:
pos_fb = nx.random_layout(fb_sub_c)
nx.draw_networkx(fb_sub_c,pos_fb,with_labels=False,alpha=0.5)
| _build/html/_sources/todo/18-analise-redes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os.path as osp
from PIL import Image
import torch.utils.data as data
# -
def make_datapath_list(rootpath):
imgpath_template = osp.join(rootpath, 'JPEGImages', '%s.jpg')
annopath_template = osp.join(rootpath, 'SegmentationClass', '%s.png')
train_id_names = osp.join(rootpath + 'ImageSets/Segmentation/train.txt')
val_id_names = osp.join(rootpath + 'ImageSets/Segmentation/val.txt')
train_img_list = list()
train_anno_list = list()
for line in open(train_id_names):
file_id = line.strip()
img_path = (imgpath_template % file_id)
anno_path = (annopath_template % file_id)
train_img_list.append(img_path)
train_anno_list.append(anno_path)
val_img_list = list()
val_anno_list = list()
for line in open(val_id_names):
file_id = line.strip()
img_path = (imgpath_template % file_id)
anno_path = (annopath_template % file_id)
val_img_list.append(img_path)
val_anno_list.append(anno_path)
return train_img_list, train_anno_list, val_img_list, val_anno_list
# +
rootpath = "./data/VOCdevkit/VOC2012/"
train_img_list, train_anno_list, val_img_list, val_anno_list = make_datapath_list(
rootpath=rootpath)
print(train_img_list[0])
print(train_anno_list[0])
# +
from utils.data_augumentation import Compose, Scale, RandomRotation, RandomMirror, Resize, Normalize_Tensor
class DataTransform():
def __init__(self, input_size, color_mean, color_std):
self.data_transform = {
'train': Compose([
Scale(scale=[0.5, 1.5]),
RandomRotation(angle=[-10, 10]),
RandomMirror(),
Resize(input_size),
Normalize_Tensor(color_mean, color_std)
]),
'val': Compose([
Resize(input_size),
Normalize_Tensor(color_mean, color_std)
])
}
def __call__(self, phase, img, anno_class_img):
return self.data_transform[phase](img, anno_class_img)
# -
class VOCDataset(data.Dataset):
def __init__(self, img_list, anno_list, phase, transform):
self.img_list = img_list
self.anno_list = anno_list
self.phase = phase
self.transform = transform
def __len__(self):
return len(self.img_list)
def __getitem__(self, index):
img, anno_class_img = self.pull_item(index)
return img, anno_class_img
def pull_item(self, index):
image_file_path = self.img_list[index]
img = Image.open(image_file_path)
anno_file_path = self.anno_list[index]
anno_class_img = Image.open(anno_file_path)
img, anno_class_img = self.transform(self.phase, img, anno_class_img)
return img, anno_class_img
# +
color_mean = (0.485, 0.456, 0.406)
color_std = (0.229, 0.224, 0.225)
train_dataset = VOCDataset(train_img_list, train_anno_list, phase="train", transform=DataTransform(
input_size=475, color_mean=color_mean, color_std=color_std))
val_dataset = VOCDataset(val_img_list, val_anno_list, phase="val", transform=DataTransform(
input_size=475, color_mean=color_mean, color_std=color_std))
print(val_dataset.__getitem__(0)[0].shape)
print(val_dataset.__getitem__(0)[1].shape)
print(val_dataset.__getitem__(0))
# +
batch_size = 8
train_dataloader = data.DataLoader(
train_dataset, batch_size=batch_size, shuffle=True)
val_dataloader = data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False)
dataloaders_dict = {"train": train_dataloader, "val": val_dataloader}
batch_iterator = iter(dataloaders_dict["val"])
imges, anno_class_imges = next(batch_iterator)
print(imges.size()) # torch.Size([8, 3, 475, 475])
print(anno_class_imges.size()) # torch.Size([8, 3, 475, 475])
# -
# 以上
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# +
index = 0
imges, anno_class_imges = train_dataset.__getitem__(index)
img_val = imges
img_val = img_val.numpy().transpose((1, 2, 0))
plt.imshow(img_val)
plt.show()
anno_file_path = train_anno_list[0]
anno_class_img = Image.open(anno_file_path)
p_palette = anno_class_img.getpalette()
anno_class_img_val = anno_class_imges.numpy()
anno_class_img_val = Image.fromarray(np.uint8(anno_class_img_val), mode="P")
anno_class_img_val.putpalette(p_palette)
plt.imshow(anno_class_img_val)
plt.show()
# +
index = 0
imges, anno_class_imges = val_dataset.__getitem__(index)
img_val = imges
img_val = img_val.numpy().transpose((1, 2, 0))
plt.imshow(img_val)
plt.show()
anno_file_path = train_anno_list[0]
anno_class_img = Image.open(anno_file_path)
p_palette = anno_class_img.getpalette()
anno_class_img_val = anno_class_imges.numpy()
anno_class_img_val = Image.fromarray(np.uint8(anno_class_img_val), mode="P")
anno_class_img_val.putpalette(p_palette)
plt.imshow(anno_class_img_val)
plt.show()
# -
# 以上
| 3_semantic_segmentation/3-2_DataLoader.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pickle, os, torch
import pandas as pd
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
pointer=osp.expanduser(f'../tboard_results/')
runs=['tboard_redshift_130122.csv','tboard_redshift_180122.csv']
result_list=[]
for run in runs:
results=pd.read_csv(osp.join(pointer, run))
pct = []
for case in results['case']:
if '50' in case:
pct.append(50)
if 'z0.0' in case:
pct.append(0)
if '75' in case:
pct.append(75)
if 'final' in case:
pct.append(100)
if '85' in case:
pct.append(85)
if '95' in case:
pct.append(95)
if '99' in case:
pct.append(99)
results['frac']=np.array(pct)
result_list.append(results)
result_list[0].columns=['case', 'scatter_m_star','lowest_m_star','epoch_exit',
'last20_m_star', 'last10_m_star', 'frac']
results = pd.concat(result_list)
mask=np.logical_and(results['lowest_m_star']<0.2, results['last10_m_star']<0.2)
results
# +
# frac=0
# results['lowest_m_star'][results['frac']==frac]*=0.98
# -
sigs, mus = [], []
sigs10, mus10 = [], []
for frac in np.unique(results['frac']):
z=results[results['frac']==frac]
mask=np.logical_and(z['lowest_m_star']<0.2, z['last10_m_star']<0.2)
z=z[mask]
sigs.append(np.std(z['lowest_m_star']))
mus.append(np.median(z['lowest_m_star']))
sigs10.append(np.std(z['last10_m_star']))
mus10.append(np.median(z['last10_m_star']))
percentiles = np.unique(results['frac'])
mus=np.array(mus)
fig, ax =plt.subplots(figsize=(10,7))
ax.errorbar(percentiles,mus, yerr=sigs, color='b', fmt='.',markersize=8,
capsize=4, ecolor='g', alpha=0.9, label='Best during training')
# ax.errorbar(percentiles+0.5, mus10, yerr=sigs10, color='k', fmt='.', markersize=8,
# capsize=4, ecolor='r', alpha=0.9, label='Median of last 20 epochs')
ax.plot(percentiles+0.5, mus, 'k--', alpha=0.4)
ax.set(xlabel='Percentile of merger tree left out', ylabel=r'Scatter ($\sigma (M_{*,true}-M_{*,pred})$) [dex]')
ax.set(title='Prediction error for 3xSAGEConv architecture for increasing cuts')
ax.legend()
mins, maxs = [], []
per = []
# sigs10, mus10 = [], []
for frac in np.unique(results['frac']):
z=results[results['frac']==frac]
mask=np.logical_and(z['lowest_m_star']<0.2, z['last10_m_star']<0.2)
z=z[mask]
mins.append(np.min(z['lowest_m_star']))
maxs.append(np.max(z['lowest_m_star']))
per.append(np.percentile(z['lowest_m_star'], [50-34,50+34]))
per = np.vstack(per)
per[:,0]=mus-per[:,0]
per[:,1]=per[:,1]-mus
per[0]
pos=np.array([ 0, 50, 75, 85, 95, 99, 102])
# +
import matplotlib
font = {'family' : 'Serif',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
fig, ax =plt.subplots(figsize=(8,5))
ax.errorbar(percentiles,mus, yerr=per.T, color='k', fmt='.',markersize=8,
capsize=4, ecolor='k', alpha=0.9, label='Median (16th, 84th percentile)')
# ax.hlines(0.079,0,100,color='k', linestyle='dashed', label='Lowest achieved error')
ax.plot(percentiles, mus, 'k--', alpha=0.9)
# ax.plot(percentiles, mins, 'b--', alpha=0.9, label='Lowest achieved error')
ax.set(xlabel='Percentage of merger history left out before final halo', ylabel=r'$\sigma ((M_{*})$ [dex]')
ax.set(title=r'$\sigma(M_{*})$ for trimmed merger trees',
xticks=pos)
ax.legend(fontsize=12)
# -
ax.set_xticklabels(percentiles, rotation = 90)
fig
fig.savefig('../paper_figures/trim_tree_smass.png', bbox_inches='tight')
pct_labels=[p for p in percentiles[:-1]]
pct_labels.append('Final halo')
per[0]
mus
percentiles
| analysis/trim_sweep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Loops
# [Datastics Lab](https://github.com/datasticslab/ISCB) | Created by [<NAME>](https://github.com/jubayer-hossain)
# ## Previous
# - What is Python?
# - Why Python Rocks in Research?
# - Little bit history of Python
# - Variables, Expressions, Comments
# - Data Types
# - Printing Messages
# - Operators in Python
# - Python Data Type Conversion
# - Python User Inputs
# - Algorithms and Flowcharts
# - Conditional Execution Patterns
#
# ## Today
# - Conditional Execution Patterns
# - `if` statement
# - `else` statement
# - `elif` statement
# ## `if` Statement
# ### Syntax
# ```python
# if condition:
# Statement...1
# Statement...2
# Statement...n
#
# ```
# - The `if` keyword
# - A Condition(that is an expression that evaluates True or False)
# - A colon
# - Starting on the next line, an **indented** block of code(called if clause)
#
# ### Flowchart
# 
# +
# Example-1
x = 5
if x > 3:
print("Smaller")
print("Inside if")
print("Outside if")
# -
# Example-2
if x < 3:
print("Smaller")
if x > 3:
print("Larger")
print("End")
# ## `else` Statement
# ### Syntax
# ```python
# if condition:
# Body of if block
# else:
# Body of else block
#
# ```
# - The `else` keyword
# - A colon
# - Starting on the next line, an **indented** block of code(called else clause)
#
# ### Flowchart
# 
a = -10
if a > 0:
print("Positive")
else:
print("Negative")
a = 10
if a > 0:
print("Positive")
else:
print("Negative")
a = -3
if a >= 0:
print("Positive")
else:
print("Negative")
# ## `elif` Statement
# ### Syntax
# ```python
# if test expression:
# Body of if
# elif test expression:
# Body of elif
# else:
# Body of else
# ```
# - The `elif` keyword
# - A Condition(that is an expression that evaluates True or False)
# - A colon
# - Starting on the next line, an **indented** block of code(called elif clause)
#
# ### Flowchart
# 
bmi = 20
if bmi <= 18.5:
print("Unhealthy")
elif bmi >= 18.5 and bmi < 24.5:
print("Normal")
elif bmi >= 24.5 and bmi < 30:
print("Healthy")
else:
print("Obese")
# +
# Even or Odd
A = int(input("Enter a number: "))
if A % 2 == 0:
print("Even")
else:
print("Odd")
# -
20 % 2
11 % 2
25 % 2
# ## Resources
# - https://www.python.org/doc/essays/blurb/
# - https://dev.to/duomly/10-reasons-why-learning-python-is-still-a-great-idea-5abh
# - https://www.stat.washington.edu/~hoytak/blog/whypython.html
# - https://www.programiz.com/python-programming
| notebooks/(F) Python Control Flow-I.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from __future__ import absolute_import, division, print_function
# +
# License: MIT
# -
# # GPNH-regularized convex coding of HadISST SST anomalies
#
# This notebook contains routines for performing a convex coding
# of SST anomalies.
# ## Packages
# +
# %matplotlib inline
import itertools
from math import pi
import os
import time
import cartopy.crs as ccrs
import cmocean
import matplotlib.dates as mdates
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import xarray as xr
from cartopy.util import add_cyclic_point
from mpl_toolkits.mplot3d import Axes3D
from sklearn.manifold import MDS, TSNE
# -
# ## Analysis parameters
# +
TIME_NAME = 'time'
LAT_NAME = 'latitude'
LON_NAME = 'longitude'
ANOMALY_NAME = 'sst_anom'
STANDARDIZED_ANOMALY_NAME = 'sst_std_anom'
# First and last years to retain for analysis
START_YEAR = 1870
END_YEAR = 2018
# First and last years of climatology base period
BASE_PERIOD_START_YEAR = 1981
BASE_PERIOD_END_YEAR = 2010
# Order of trend removed from anomalies
ANOMALY_TREND_ORDER = 1
# Zonal extents of analysis region
MIN_LATITUDE = -45.5
MAX_LATITUDE = 45.5
# Weighting used for EOFs
LAT_WEIGHTS = 'scos'
RESTRICT_TO_CLIMATOLOGY_BASE_PERIOD = False
# Number of random restarts to use
N_INIT = 100
# If cross-validation is used, number of cross-validation folds
N_FOLDS = 10
# -
# ## File paths
def get_gpnh_output_filename(input_file, lat_weights, n_components, lambda_W, n_init, cross_validate=False, n_folds=N_FOLDS):
"""Get GPNH output file corresponding to a given input file."""
basename, ext = os.path.splitext(input_file)
suffix = 'gpnh.{}.k{:d}.lambda_W{:5.3e}.n_init{:d}'.format(lat_weights, n_components, lambda_W, n_init)
if cross_validate:
suffix = '.'.join([suffix, 'n_folds{:d}'.format(n_folds)])
return '.'.join([basename, suffix]) + ext
# +
PROJECT_DIR = os.path.join(os.getenv('HOME'), 'projects', 'convex-dim-red-expts')
BIN_DIR = os.path.join(PROJECT_DIR, 'bin')
BASE_RESULTS_DIR = os.path.join(PROJECT_DIR, 'results')
RESULTS_DIR = os.path.join(BASE_RESULTS_DIR, 'hadisst', 'nc')
CSV_DIR = os.path.join(BASE_RESULTS_DIR, 'hadisst', 'csv')
PLOTS_DIR = os.path.join(BASE_RESULTS_DIR, 'hadisst', 'plt')
if not os.path.exists(RESULTS_DIR):
os.makedirs(RESULTS_DIR)
if not os.path.exists(CSV_DIR):
os.makedirs(CSV_DIR)
if not os.path.exists(PLOTS_DIR):
os.makedirs(PLOTS_DIR)
SST_ANOM_INPUT_FILE = os.path.join(RESULTS_DIR, 'HadISST_sst.anom.{:d}_{:d}.trend_order{:d}.nc'.format(
BASE_PERIOD_START_YEAR, BASE_PERIOD_END_YEAR, ANOMALY_TREND_ORDER))
SST_STD_ANOM_INPUT_FILE = os.path.join(RESULTS_DIR, 'HadISST_sst.std_anom.{:d}_{:d}.trend_order{:d}.nc'.format(
BASE_PERIOD_START_YEAR, BASE_PERIOD_END_YEAR, ANOMALY_TREND_ORDER))
if not os.path.exists(SST_ANOM_INPUT_FILE):
raise RuntimeError("Input data file '%s' does not exist" % SST_ANOM_INPUT_FILE)
if not os.path.exists(SST_STD_ANOM_INPUT_FILE):
raise RuntimeError("Input data file '%s' does not exist" % SST_STD_ANOM_INPUT_FILE)
# -
# ## Convex coding analysis of SST anomalies
#
# As for $k$-means and AA, the dictionary vectors are fitted using the first 90% of the
# unstandardized SST anomalies, and the remaining 10% of the data is used to
# get a rough estimate of the out-of-sample RMSE.
#
# The optimization if performed for k = 2, ..., 20 components, with random
# initial guesses. The fits are performed
# using the following script (see bin/run_hadisst_gpnh.py):
with open(os.path.join(BIN_DIR, 'run_hadisst_gpnh.py')) as ifs:
for line in ifs:
print(line.strip('\n'))
# +
lambda_W = 1000
max_n_components = 20
sst_k = []
sst_train_cost = []
sst_train_rmse = []
sst_test_cost = []
sst_test_rmse = []
for i in range(1, max_n_components + 1):
output_file = get_gpnh_output_filename(SST_ANOM_INPUT_FILE, LAT_WEIGHTS, i, lambda_W, N_INIT)
with xr.open_dataset(output_file) as ds:
sst_k.append(ds.sizes['component'])
sst_train_cost.append(float(ds.attrs['training_set_cost']))
sst_train_rmse.append(float(ds.attrs['training_set_rmse']))
sst_test_cost.append(float(ds.attrs['test_set_cost']))
sst_test_rmse.append(float(ds.attrs['test_set_rmse']))
sst_k = np.array(sst_k)
sst_train_cost = np.array(sst_train_cost)
sst_train_rmse = np.array(sst_train_rmse)
sst_test_cost = np.array(sst_test_cost)
sst_test_rmse = np.array(sst_test_rmse)
cost_output_file = 'HadISST_sst.anom.{:d}_{:d}.trend_order{:d}.gpnh.{}.lambda_W{:5.3e}.n_init{:d}.cost.csv'.format(
BASE_PERIOD_START_YEAR, BASE_PERIOD_END_YEAR, ANOMALY_TREND_ORDER, LAT_WEIGHTS, lambda_W, N_INIT)
cost_output_file = os.path.join(CSV_DIR, cost_output_file)
cost_data = np.zeros((sst_k.shape[0], 5))
cost_data[:, 0] = sst_k
cost_data[:, 1] = sst_train_cost
cost_data[:, 2] = sst_train_rmse
cost_data[:, 3] = sst_test_cost
cost_data[:, 4] = sst_test_rmse
header = 'n_components,training_set_cost,training_set_rmse,test_set_cost,test_set_rmse'
fmt = '%d,%16.8e,%16.8e,%16.8e,%16.8e'
np.savetxt(cost_output_file, cost_data, header=header, fmt=fmt)
# +
fig = plt.figure(figsize=(7, 5))
ax = plt.gca()
ax.plot(sst_k, sst_train_rmse, 'b-', label='Training set RMSE')
ax.plot(sst_k, sst_test_rmse, 'b:', label='Test set RMSE')
ax.grid(ls='--', color='gray', alpha=0.5)
ax.legend(fontsize=14)
ax.set_xlabel('Number of clusters', fontsize=14)
ax.set_ylabel('RMSE', fontsize=14)
ax.xaxis.set_major_locator(ticker.MultipleLocator(2))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(1))
ax.xaxis.set_major_formatter(ticker.FormatStrFormatter('%d'))
ax.tick_params(labelsize=14)
plt.show()
plt.close()
# +
n_components = 3
lambda_W = 0
output_file = get_gpnh_output_filename(SST_ANOM_INPUT_FILE, LAT_WEIGHTS, n_components, lambda_W, N_INIT)
gpnh_ds = xr.open_dataset(output_file)
components = gpnh_ds['component'].values
n_samples = gpnh_ds.sizes[TIME_NAME]
projection = ccrs.PlateCarree(central_longitude=180)
wrap_lon = True
cmap = cmocean.cm.thermal
component_vmins = np.empty(n_components)
vmin = None
for i, component in enumerate(components):
component_vmin = gpnh_ds['dictionary'].sel(component=component).min().item()
if vmin is None or component_vmin < vmin:
vmin = component_vmin
component_vmins[:] = vmin
component_vmaxs = np.empty(n_components)
vmax = None
for i, component in enumerate(components):
component_vmax = gpnh_ds['dictionary'].sel(component=component).max().item()
if vmax is None or component_vmax > vmax:
vmax = component_vmax
component_vmaxs[:] = vmax
ncols = 2 if n_components % 2 == 0 else 3
nrows = int(np.ceil(n_components / ncols))
height_ratios = np.ones((nrows + 1))
height_ratios[-1] = 0.1
fig = plt.figure(constrained_layout=False, figsize=(6 * ncols, 3 * nrows))
gs = gridspec.GridSpec(ncols=ncols, nrows=nrows + 1, figure=fig,
wspace=0.09, hspace=0.12,
height_ratios=height_ratios)
lat = gpnh_ds[LAT_NAME]
lon = gpnh_ds[LON_NAME]
row_index = 0
col_index = 0
for i, component in enumerate(components):
dictionary_data = gpnh_ds['dictionary'].sel(component=component).values
if wrap_lon:
dictionary_data, dictionary_lon = add_cyclic_point(dictionary_data, coord=lon)
else:
dictionary_lon = lon
lon_grid, lat_grid = np.meshgrid(dictionary_lon, lat)
ax = fig.add_subplot(gs[row_index, col_index], projection=projection)
ax.coastlines()
ax.set_global()
ax_vmin = component_vmins[i]
ax_vmax = component_vmaxs[i]
cs = ax.contourf(lon_grid, lat_grid, dictionary_data, # vmin=ax_vmin, vmax=ax_vmax,
cmap=cmap, transform=ccrs.PlateCarree())
cb = fig.colorbar(cs, pad=0.03, orientation='horizontal')
cb.set_label(r'Weighted SSTA/${}^\circ$C', fontsize=13)
ax.set_ylim([MIN_LATITUDE, MAX_LATITUDE])
ax.set_title('State {}'.format(component + 1), fontsize=14)
ax.set_aspect('equal')
fig.canvas.draw()
col_index += 1
if col_index == ncols:
col_index = 0
row_index += 1
output_file = os.path.join(PLOTS_DIR, 'HadISST_sst.anom.{:d}_{:d}.trend_order{:d}.gpnh.{}.k{:d}.lambda_W{:5.3e}.n_init{:d}.dictionary.unsorted.pdf'.format(
BASE_PERIOD_START_YEAR, BASE_PERIOD_END_YEAR, ANOMALY_TREND_ORDER, LAT_WEIGHTS,
n_components, lambda_W, N_INIT))
plt.savefig(output_file, bbox_inches='tight')
plt.show()
plt.close()
gpnh_ds.close()
# +
def to_1d_array(da):
"""Convert DataArray to flat array."""
flat_data = np.ravel(da.values)
missing_features = np.isnan(flat_data)
return flat_data[np.logical_not(missing_features)]
def pattern_correlation(state, eof):
"""Calculate pattern correlation between state and EOF."""
flat_state = to_1d_array(state)
flat_eof = to_1d_array(eof)
data = np.vstack([flat_state, flat_eof])
r = np.corrcoef(data, rowvar=True)
return r[0, 1]
def sort_states(ds, eofs_reference_file):
"""Sort states according to pattern correlation with EOFs."""
n_components = ds.sizes['component']
sort_order = []
with xr.open_dataset(eofs_reference_file) as eofs_ds:
n_eofs = eofs_ds.sizes['component']
for i in range(n_eofs):
correlations = np.empty((n_components,))
for k in range(n_components):
correlations[k] = pattern_correlation(
ds['dictionary'].sel(component=k),
eofs_ds['EOFs'].sel(component=i))
ordering = np.argsort(-np.abs(correlations))
for k in range(n_components):
if ordering[k] not in sort_order:
sort_order.append(ordering[k])
break
if np.size(sort_order) == n_components:
break
assert len(sort_order) <= n_components
assert np.size(np.unique(sort_order)) == np.size(sort_order)
if len(sort_order) < n_components:
unassigned = [i for i in range(n_components) if i not in sort_order]
sort_order += unassigned
assert len(sort_order) == n_components
assert np.size(np.unique(sort_order)) == np.size(sort_order)
sorted_ds = xr.zeros_like(ds)
for i in range(n_components):
sorted_ds = xr.where(sorted_ds['component'] == i,
ds.sel(component=sort_order[i]), sorted_ds)
for a in ds.attrs:
sorted_ds.attrs[a] = ds.attrs[a]
return sorted_ds
# +
n_components = 3
lambda_W = 0
output_file = get_gpnh_output_filename(SST_ANOM_INPUT_FILE, LAT_WEIGHTS, n_components, lambda_W, N_INIT)
gpnh_ds = xr.open_dataset(output_file)
eofs_reference_file = os.path.join(RESULTS_DIR, 'HadISST_sst.anom.{:d}_{:d}.trend_order{:d}.pca.{}.k{:d}.nc'.format(
BASE_PERIOD_START_YEAR, BASE_PERIOD_END_YEAR, ANOMALY_TREND_ORDER, LAT_WEIGHTS, n_components))
gpnh_ds = sort_states(gpnh_ds, eofs_reference_file)
# Calculate angle between leading principal axis and vector between first and second dictionary vectors
if n_components > 1:
with xr.open_dataset(eofs_reference_file) as eofs_ds:
first_eof = eofs_ds['EOFs'].sel(component=0).squeeze().fillna(0)
dictionary_difference = (gpnh_ds['dictionary'].sel(component=0) - gpnh_ds['dictionary'].sel(component=1)).squeeze().fillna(0)
overlap = first_eof.dot(dictionary_difference) / np.sqrt(first_eof.dot(first_eof) * dictionary_difference.dot(dictionary_difference))
print('cos(theta) = ', overlap)
components = gpnh_ds['component'].values
n_samples = gpnh_ds.sizes[TIME_NAME]
projection = ccrs.PlateCarree(central_longitude=180)
wrap_lon = True
cmap = cmocean.cm.thermal
component_vmins = np.empty(n_components)
vmin = None
for i, component in enumerate(components):
component_vmin = gpnh_ds['dictionary'].sel(component=component).min().item()
if vmin is None or component_vmin < vmin:
vmin = component_vmin
component_vmins[:] = vmin
component_vmaxs = np.empty(n_components)
vmax = None
for i, component in enumerate(components):
component_vmax = gpnh_ds['dictionary'].sel(component=component).max().item()
if vmax is None or component_vmax > vmax:
vmax = component_vmax
component_vmaxs[:] = vmax
ncols = 2 if n_components % 2 == 0 else 3
nrows = int(np.ceil(n_components / ncols))
height_ratios = np.ones((nrows + 1))
height_ratios[-1] = 0.1
fig = plt.figure(constrained_layout=False, figsize=(6 * ncols, 3 * nrows))
gs = gridspec.GridSpec(ncols=ncols, nrows=nrows + 1, figure=fig,
wspace=0.09, hspace=0.12,
height_ratios=height_ratios)
lat = gpnh_ds[LAT_NAME]
lon = gpnh_ds[LON_NAME]
row_index = 0
col_index = 0
for i, component in enumerate(components):
dictionary_data = gpnh_ds['dictionary'].sel(component=component).values
if wrap_lon:
dictionary_data, dictionary_lon = add_cyclic_point(dictionary_data, coord=lon)
else:
dictionary_lon = lon
lon_grid, lat_grid = np.meshgrid(dictionary_lon, lat)
ax = fig.add_subplot(gs[row_index, col_index], projection=projection)
ax.coastlines()
ax.set_global()
ax_vmin = component_vmins[i]
ax_vmax = component_vmaxs[i]
cs = ax.contourf(lon_grid, lat_grid, dictionary_data, # vmin=ax_vmin, vmax=ax_vmax,
cmap=cmap, transform=ccrs.PlateCarree())
cb = fig.colorbar(cs, pad=0.03, orientation='horizontal')
cb.set_label(r'Weighted SSTA/${}^\circ$C', fontsize=13)
ax.set_ylim([MIN_LATITUDE, MAX_LATITUDE])
ax.set_title('State {}, $\lambda_W = {:.2f}$'.format(component + 1, lambda_W), fontsize=14)
ax.set_aspect('equal')
fig.canvas.draw()
col_index += 1
if col_index == ncols:
col_index = 0
row_index += 1
output_file = os.path.join(PLOTS_DIR, 'HadISST_sst.anom.{:d}_{:d}.trend_order{:d}.gpnh.{}.k{:d}.lambda_W{:5.3e}.n_init{:d}.dictionary.sorted.pdf'.format(
BASE_PERIOD_START_YEAR, BASE_PERIOD_END_YEAR, ANOMALY_TREND_ORDER, LAT_WEIGHTS,
n_components, lambda_W, N_INIT))
plt.savefig(output_file, bbox_inches='tight')
plt.show()
plt.close()
gpnh_ds.close()
# +
def get_latitude_weights(da, lat_weights='scos', lat_name=LAT_NAME):
"""Get latitude weights."""
if lat_weights == 'cos':
return np.cos(np.deg2rad(da[lat_name])).clip(0., 1.)
if lat_weights == 'scos':
return np.cos(np.deg2rad(da[lat_name])).clip(0., 1.) ** 0.5
if lat_weights == 'none':
return xr.ones_like(da[lat_name])
raise ValueError("Invalid weights descriptor '%r'" % lat_weights)
def weight_and_flatten_data(da, weights=None, sample_dim=TIME_NAME):
"""Apply weighting to data and convert to 2D array."""
feature_dims = [d for d in da.dims if d != sample_dim]
original_shape = [da.sizes[d] for d in da.dims if d != sample_dim]
if weights is not None:
weighted_da = (weights * da).transpose(*da.dims)
else:
weighted_da = da
if weighted_da.get_axis_num(sample_dim) != 0:
weighted_da = weighted_da.transpose(*([sample_dim] + feature_dims))
n_samples = weighted_da.sizes[sample_dim]
n_features = np.product(original_shape)
flat_data = weighted_da.data.reshape(n_samples, n_features)
return flat_data
def run_mds(da, dictionary_da, n_components=2, lat_weights=LAT_WEIGHTS, metric=True,
n_init=4, max_iter=300, verbose=0, eps=0.001, n_jobs=None,
random_state=None, lat_name=LAT_NAME, sample_dim=TIME_NAME):
"""Run MDS on given data."""
feature_dims = [d for d in da.dims if d != sample_dim]
original_shape = [da.sizes[d] for d in da.dims if d != sample_dim]
# Get requested latitude weights
weights = get_latitude_weights(da, lat_weights=lat_weights,
lat_name=lat_name)
# Convert input data array to plain 2D array
flat_data = weight_and_flatten_data(da, weights=weights, sample_dim=sample_dim)
n_samples, n_features = flat_data.shape
# Remove any features/columns with missing data
missing_features = np.any(np.isnan(flat_data), axis=0)
valid_data = flat_data[:, np.logical_not(missing_features)]
# Add the climatological point for reference
valid_data = np.vstack([valid_data, np.zeros(valid_data.shape[1])])
# Append dictionary vectors to data to be projected
n_states = dictionary_da.sizes['component']
flat_dictionary = np.reshape(dictionary_da.values, (n_states, n_features))
valid_dictionary = flat_dictionary[:, np.logical_not(missing_features)]
valid_data = np.vstack([valid_data, valid_dictionary])
mds = MDS(n_components=n_components, metric=metric, n_init=n_init,
max_iter=max_iter, verbose=verbose, eps=eps, n_jobs=n_jobs,
random_state=random_state, dissimilarity='euclidean').fit(valid_data)
embedding_da = xr.DataArray(
mds.embedding_[:n_samples],
coords={sample_dim: da[sample_dim], 'component': np.arange(n_components)},
dims=[sample_dim, 'component'])
origin_da = xr.DataArray(
mds.embedding_[n_samples],
coords={'component': np.arange(n_components)},
dims=['component'])
dictionary_embed_da = xr.DataArray(
mds.embedding_[n_samples + 1:],
coords={'state': np.arange(dictionary_da.sizes['component']), 'component': np.arange(n_components)},
dims=['state', 'component'])
mds_ds = xr.Dataset(data_vars={'embedding': embedding_da, 'origin': origin_da, 'dictionary': dictionary_embed_da})
mds_ds.attrs['stress'] = '{:16.8e}'.format(mds.stress_)
return mds_ds
# +
sst_anom_ds = xr.open_dataset(SST_ANOM_INPUT_FILE)
sst_anom_ds = sst_anom_ds.where(
(sst_anom_ds[TIME_NAME].dt.year >= START_YEAR) &
(sst_anom_ds[TIME_NAME].dt.year <= END_YEAR), drop=True)
sst_anom_ds = sst_anom_ds.where(
(sst_anom_ds[LAT_NAME] >= MIN_LATITUDE) &
(sst_anom_ds[LAT_NAME] <= MAX_LATITUDE), drop=True)
sst_anom_da = sst_anom_ds[ANOMALY_NAME]
if RESTRICT_TO_CLIMATOLOGY_BASE_PERIOD:
clim_base_period = [int(sst_anom_ds.attrs['base_period_start_year']),
int(sst_anom_ds.attrs['base_period_end_year'])]
sst_anom_da = sst_anom_da.where(
(sst_anom_da[TIME_NAME].dt.year >= clim_base_period[0]) &
(sst_anom_da[TIME_NAME].dt.year <= clim_base_period[1]), drop=True)
# +
n_components = 3
lambda_W = 1000
output_file = get_gpnh_output_filename(SST_ANOM_INPUT_FILE, LAT_WEIGHTS, n_components, lambda_W, N_INIT)
gpnh_ds = xr.open_dataset(output_file)
eofs_reference_file = os.path.join(RESULTS_DIR, 'HadISST_sst.anom.{:d}_{:d}.trend_order{:d}.pca.{}.k{:d}.nc'.format(
BASE_PERIOD_START_YEAR, BASE_PERIOD_END_YEAR, ANOMALY_TREND_ORDER, LAT_WEIGHTS, n_components))
gpnh_ds = sort_states(gpnh_ds, eofs_reference_file)
mds_2d_scos = run_mds(sst_anom_da, gpnh_ds['dictionary'], n_components=2, lat_weights='scos', random_state=0)
n_samples = gpnh_ds.sizes[TIME_NAME]
fig = plt.figure(figsize=(7, 5))
ax = plt.gca()
ax.plot(mds_2d_scos['embedding'].sel(component=0), mds_2d_scos['embedding'].sel(component=1), '.')
markers = itertools.cycle(('.', 's', 'x', 'o', '+'))
for i in range(n_components):
ax.plot(mds_2d_scos['dictionary'].sel(state=i, component=0),
mds_2d_scos['dictionary'].sel(state=i, component=1),
marker=next(markers), ls='none',
label='State {:d}'.format(i + 1))
ax.plot(mds_2d_scos['origin'].sel(component=0), mds_2d_scos['origin'].sel(component=1), 'ko', markersize=8,
label='Mean state')
ax.grid(ls='--', color='gray', alpha=0.5)
ax.legend(fontsize=13)
ax.set_xlabel('Principal coordinate 1', fontsize=14)
ax.set_ylabel('Principal coordinate 2', fontsize=14)
ax.axes.tick_params(labelsize=13)
output_file = os.path.join(PLOTS_DIR, 'HadISST_sst.anom.{:d}_{:d}.trend_order{:d}.gpnh.{}.k{:d}.lambda_W{:5.3e}.n_init{:d}.mds.pdf'.format(
BASE_PERIOD_START_YEAR, BASE_PERIOD_END_YEAR, ANOMALY_TREND_ORDER, LAT_WEIGHTS,
n_components, lambda_W, N_INIT))
plt.savefig(output_file, bbox_inches='tight')
plt.show()
plt.close()
gpnh_ds.close()
# -
| notebooks/hadisst_gpnh_convex_coding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:lincs-complimentarity] *
# language: python
# name: conda-env-lincs-complimentarity-py
# ---
# ## Calculate median replicate reproducibility in Cell Painting with same sample size as L1000
#
# Code modified from @adeboyeML
# +
import os
import pathlib
import pandas as pd
import numpy as np
from collections import defaultdict
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
from pycytominer import feature_select
from statistics import median
import random
sns.set_style("darkgrid")
from scipy import stats
import pickle
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
np.warnings.filterwarnings('ignore', category=np.VisibleDeprecationWarning)
# -
np.random.seed(42)
# +
dose_recode_info = {
'dose_1': '0.04 uM', 'dose_2':'0.12 uM', 'dose_3':'0.37 uM',
'dose_4': '1.11 uM', 'dose_5':'3.33 uM', 'dose_6':'10 uM'
}
inv_dose_recode_info = {v: k.replace("dose_", "") for k, v in dose_recode_info.items()}
# +
# Load L1000 data to identify how many DMSOs
l1000_data_path = pathlib.Path("../L1000/L1000_lvl4_cpd_replicate_datasets/L1000_level4_cpd_replicates.csv.gz")
l1000_profile_df = pd.read_csv(l1000_data_path)
# Count how many DMSO samples
n_dmso_l1000 = l1000_profile_df.query("pert_iname == 'DMSO'").shape[0]
print(l1000_profile_df.shape)
l1000_profile_df.head()
# +
# Get treatment replicate counts per well
cardinality_df = (
l1000_profile_df
.groupby(["pert_iname", "det_well", "dose"])
["Metadata_broad_sample"]
.count()
.reset_index()
.rename(columns={"Metadata_broad_sample": "no_of_replicates"})
)
cardinality_df = cardinality_df.assign(dose_real = cardinality_df.dose.replace({int(x[-1]): dose_recode_info[x] for x in dose_recode_info}))
print(cardinality_df.shape)
cardinality_df.head()
# -
commit = "<PASSWORD>"
spherized_profile_link = f"https://github.com/broadinstitute/lincs-cell-painting/blob/{commit}\
/spherized_profiles/profiles/2016_04_01_a549_48hr_batch1_dmso_spherized_profiles_with_input_normalized_by_whole_plate.csv.gz?raw=true"
pertinfo_file = '../aligned_moa_CP_L1000.csv'
df_level4 = pd.read_csv(spherized_profile_link, compression='gzip',low_memory = False)
def recode_dose(dose_value):
"""This function recode the doses in Level-4 data to 8 distinct dose classes"""
doses = [0.04,0.12,0.37,1.11,3.33,10.0,20.0,25.0]
for x in range(len(doses)-1):
if (dose_value > 0.0) & (dose_value <= 0.04):
dose_value = 0.04
elif doses[x] <= round(dose_value,2) < doses[x+1]:
dose_value = doses[x]
return dose_value
df_level4['Metadata_dose_recode'] = df_level4['Metadata_mmoles_per_liter'].apply(recode_dose)
def feature_selection(df_lvl4):
"""
Perform feature selection by dropping columns with null values
(greater than 384 i.e. equivalent to one plate worth of cell profiles)
and highly correlated values from the data.
"""
metadata_columns = [x for x in df_lvl4.columns if (x.startswith("Metadata_"))]
df_lvl4_metadata = df_lvl4[metadata_columns].copy()
df_lvl4_features = df_lvl4.drop(metadata_columns, axis = 1)
null_cols = [col for col in df_lvl4_features.columns if df_lvl4_features[col].isnull().sum() > 384]
df_lvl4_features.drop(null_cols, axis = 1, inplace=True)
##feature selection was done already..prior to getting the spherized data!!
###df_lvl4_features = feature_select(df_lvl4_features, operation=["correlation_threshold", "variance_threshold"])
for col in df_lvl4_features.columns:
if df_lvl4_features[col].isnull().sum():
df_lvl4_features[col].fillna(value=df_lvl4_features[col].mean(), inplace = True)
df_meta_info = df_lvl4_metadata[['Metadata_broad_sample', 'Metadata_pert_id', 'Metadata_Plate', 'Metadata_Well',
'Metadata_broad_id', 'Metadata_moa', 'Metadata_dose_recode']].copy()
df_lvl4_new = pd.concat([df_meta_info, df_lvl4_features], axis=1)
return df_lvl4_new
df_level4_new = feature_selection(df_level4)
def merge_dataframe(df, pertinfo_file):
"""
This function merge aligned L1000 and Cell painting Metadata information dataframe
with the Level-4 data, change the values of the Metadata_dose_recode column
and create a new column 'replicate_name' that represents each replicate in the dataset
"""
df_pertinfo = pd.read_csv(pertinfo_file)
df_lvl4_new = df.merge(df_pertinfo, on='Metadata_broad_sample', how = 'outer')
no_cpds_df = df_lvl4_new[df_lvl4_new['pert_iname'].isnull()].copy().reset_index(drop = True)
df_lvl4_new.drop(df_lvl4_new[df_lvl4_new['pert_iname'].isnull()].index, inplace = True)
df_lvl4_new.reset_index(drop= True, inplace = True)
df_lvl4_new['Metadata_dose_recode'] = df_lvl4_new['Metadata_dose_recode'].map({0.0:0,0.04:1,0.12:2,0.37:3,1.11:4,
3.33:5,10.0:6,20.0:7})
df_lvl4_new['replicate_name'] = ['replicate_' + str(x) for x in range(df_lvl4_new.shape[0])]
return df_lvl4_new, no_cpds_df
df_level4_new, df_level4_no_cpds = merge_dataframe(df_level4_new, pertinfo_file)
##list of "Broad samples" WITHOUT Compounds after aligning L1000 and Cell painting MOAs
df_level4_no_cpds['Metadata_broad_sample'].unique().tolist()
def get_median_score(cpds_list, df):
"""
This function calculates the median score for each compound based on its replicates
"""
cpds_median_score = {}
for cpd in cpds_list:
cpd_replicates = df[df['pert_iname'] == cpd].copy()
cpd_replicates.drop(['Metadata_broad_sample', 'Metadata_pert_id', 'Metadata_dose_recode', 'Metadata_Plate',
'Metadata_Well', 'Metadata_broad_id', 'Metadata_moa', 'broad_id',
'pert_iname', 'moa', 'replicate_name'], axis = 1, inplace = True)
cpd_replicates_corr = cpd_replicates.astype('float64').T.corr(method = 'pearson').values
if len(cpd_replicates_corr) == 1:
median_val = 1
else:
median_val = median(list(cpd_replicates_corr[np.triu_indices(len(cpd_replicates_corr), k = 1)]))
cpds_median_score[cpd] = median_val
return cpds_median_score
def check_compounds(cpd_med_score, df):
"""
Check if all distinct compounds in the Level-4 dataframe are present
in the cpd_med_score dictionary, if not add the compounds as keys to the dictionary
and give them a null value.
"""
cpd_list = df['pert_iname'].unique().tolist()
cpd_keys = cpd_med_score.keys()
for cpd in cpd_list:
if cpd not in cpd_keys:
cpd_med_score[cpd] = np.nan
return cpd_med_score
def get_cpd_medianscores(df):
"""This function computes median scores for all compounds found in the Level-4 dataframe PER DOSE (1-6)"""
dose_list = list(set(df['Metadata_dose_recode'].unique().tolist()))[1:7]
for dose in dose_list:
df_dose = df[df['Metadata_dose_recode'] == dose].copy()
cpds_list = df_dose['pert_iname'].unique().tolist()
cpds_median_score = get_median_score(cpds_list, df_dose)
cpds_median_score = check_compounds(cpds_median_score, df)
sorted_med_score = {key:value for key, value in sorted(cpds_median_score.items(), key=lambda item: item[0])}
if dose == 1:
df_cpd_med_score = pd.DataFrame.from_dict(sorted_med_score, orient='index', columns = ['dose_1'])
else:
df_cpd_med_score['dose_' + str(dose)] = sorted_med_score.values()
return df_cpd_med_score
# +
# Randomly subset Cell Painting profiles to match replicate sample size of L1000
build_subsample_df = []
for idx, l1000_pert in cardinality_df.iterrows():
compound = l1000_pert.pert_iname
dose = int(l1000_pert.dose)
n_replicates = l1000_pert.no_of_replicates
random_sample_df = (
df_level4_new
.query("pert_iname == @compound")
.query("Metadata_dose_recode == @dose")
)
if n_replicates <= random_sample_df.shape[0]:
random_sample_df = random_sample_df.sample(n=n_replicates, replace=False)
build_subsample_df.append(random_sample_df)
# Combine results
build_subsample_df = pd.concat(build_subsample_df).reset_index(drop=True)
print(build_subsample_df.shape)
build_subsample_df.head()
# -
# Randomly sample DMSO
random_dmso_df = df_level4_new.query("Metadata_broad_sample == 'DMSO'").sample(n=n_dmso_l1000, replace=False)
df_level4_new_subsample = pd.concat([random_dmso_df, build_subsample_df]).reset_index(drop=True)
df_cpd_med_score = get_cpd_medianscores(df_level4_new_subsample)
df_cpd_med_score.head(10)
def drop_cpds_with_null(df):
"""
This function drop compounds with median scores of 1
or null values in any of the dose points (1-6)
"""
cpds_with_null = []
for cpd in df.index:
if any(df.loc[cpd] == 1) | any(df.loc[cpd].isnull()):
cpds_with_null.append(cpd)
df.drop(cpds_with_null, axis = 0, inplace = True)
return df
df_cpd_med_score = drop_cpds_with_null(df_cpd_med_score)
df_cpd_med_score.head(10)
def no_of_replicates_per_cpd(df, df_lvl4):
"""This function computes the numbers of replicates for each compound"""
dose_list = list(set(df_lvl4['Metadata_dose_recode'].unique().tolist()))[1:7]
cpds_no_of_reps = {}
for cpd in df.index:
num_of_reps = 0
df_cpd = df_lvl4[df_lvl4['pert_iname'] == cpd].copy()
for dose in dose_list:
df_dose = df_cpd[df_cpd['Metadata_dose_recode'] == dose].copy()
num_of_reps += df_dose.shape[0]
cpds_no_of_reps[cpd] = num_of_reps // len(dose_list)
df['no_of_replicates'] = cpds_no_of_reps.values()
return df
df_cpd_med_score = no_of_replicates_per_cpd(df_cpd_med_score, df_level4_new_subsample)
df_cpd_med_score["no_of_replicates"].unique()
df_cpd_med_score.shape
def save_to_csv(df, path, file_name, compress=None):
"""saves dataframes to csv"""
if not os.path.exists(path):
os.mkdir(path)
df.to_csv(os.path.join(path, file_name), index=False, compression=compress)
save_to_csv(df_cpd_med_score.reset_index().rename({'index':'cpd'}, axis = 1),
'cellpainting_lvl4_cpd_replicate_datasets', 'cpd_replicate_median_scores_subsample.csv')
save_to_csv(df_level4_new_subsample, 'cellpainting_lvl4_cpd_replicate_datasets',
'cp_level4_cpd_replicates_subsample.csv.gz', compress="gzip")
# Output files for visualization
results_dir = pathlib.Path("../results")
cpd_summary_file = pathlib.Path(f"{results_dir}/median_score_per_compound_CellPainting_subsample.tsv.gz")
# +
cpd_score_summary_df = (
df_cpd_med_score
.reset_index()
.rename(columns={"index": "compound"})
.melt(
id_vars=["compound", "no_of_replicates"],
value_vars=["dose_1", "dose_2", "dose_3", "dose_4", "dose_5", "dose_6"],
var_name="dose",
value_name="median_replicate_score"
)
)
cpd_score_summary_df.dose = cpd_score_summary_df.dose.replace(dose_recode_info)
cpd_score_summary_df.to_csv(cpd_summary_file, sep="\t", index=False)
cpd_score_summary_df.head()
| 1.Data-exploration/Profiles_level4/cell_painting/6.cellpainting_calculate_subsampled_replicatescores.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ##Learning at DSSG
#
# The primary goal of the Data Science for Social Good program is fellow training. We take learning seriously.
#
# DSSG Data Bootcamp is meant to kick of the three months of learning that is DSSG by making sure everyone can perform essential data science tasks using a common set of tools. Through project-based learning, DSSG Bootcamp will:
#
# 1. Increase capacity among fellows for data-intensive work using modern open source tools, infrastructure, workflows, resources, and practices.
#
# 2. Develop a strong sense of collaboration, cooperation, and peer learning among fellows, willingness to ask for help, willingness to share knowledge, and forums for sharing resources.
#
# 3. Help fellows learn a bit more about the City of Chicago in the process.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ####Here are some basic principals for training week
#
# - Effortful learning is the most effective form of learning
# - Try, Ask, Try Again, Teach
# - Individual and team learning matters more than output
# - Be open with your team members when you don’t know how to do something.
# - If your team member doesn’t know how to do something and you do, help them.
# - If you are doing something you already know how to do by yourself, you’re doing this wrong.
# - Google. Use it. It's great.
# - StackOverflow. Use it. It's greater.
# - DSSGOverflow. Ask a mentor or nearby team. They're the greatest.
# - Focus on doing tasks you don’t know how to do. Don’t take the easy route.
# - Be patient with yourself and with each other, this is not a race.
# - To improve team learning, try Mob Programming.
# -
| notebooks/01-Learning_at_DSSG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/toandaominh1997/Kaggle/blob/master/Data%20Science%20London%20Scikit/Data_Science_London_%2B_Scikit.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="4l28XE3xXR0j" colab_type="code" colab={}
import numpy as np
import pandas as pd
import os
import sys
# + id="LQxTpmbdXWQl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="1fd9cc23-1780-4fc5-930f-23573fb5bb76"
KAGGLE = False
path_train = ""
path_label = ""
path_test = ""
if(KAGGLE==True):
path_train = "../input/train.csv"
path_label = "../input/label.csv"
path_test = "../input/test.csv"
else:
from google.colab import drive
drive.mount("/content/drive/")
path_train = "/content/drive/My Drive/Datasets/ Data Science London + Scikit-learn/train.csv"
path_label = "/content/drive/My Drive/Datasets/ Data Science London + Scikit-learn/trainLabels.csv"
path_test = "/content/drive/My Drive/Datasets/ Data Science London + Scikit-learn/test.csv"
def read_data(filename):
df = pd.read_csv(filename, header=None)
return df
train = read_data(path_train)
label = read_data(path_label)
test = read_data(path_test)
# + id="pxtk-YEJX7cG" colab_type="code" colab={}
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train, label, test_size=0.2, random_state=42)
# + [markdown] id="qrpLOAHSbNVt" colab_type="text"
# # Logistic Regression
# + id="mhRWsqptYfun" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="0ff2b520-ebea-46b8-91c9-139daa19238b"
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(C=1.0)
lr.fit(X_train, y_train)
print("Accuracy score: ", accuracy_score(y_val, lr.predict(X_val)))
# + [markdown] id="p22ZyE5vbR02" colab_type="text"
# # Support Vector Machine
# + id="-8AmEfP2aaGk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="41992021-bee0-455c-d595-53f66765185d"
from sklearn.svm import SVC
svc = SVC(C=2.0)
svc.fit(X_train, y_train)
print("Accuracy score: ", accuracy_score(y_val, svc.predict(X_val)))
# + [markdown] id="MrgiUInBbtt9" colab_type="text"
# # Random Forest
# + id="pVaa9Pbnbkqz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="a1b15f68-a81d-49a4-c111-05e9242db7e4"
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier()
rf.fit(X_train, y_train)
print("Accuracy score: ", accuracy_score(y_val, rf.predict(X_val)))
# + [markdown] id="MiMfbKl9cbtv" colab_type="text"
# # Cross Validation
# + id="D-VOx06IcB8i" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 734} outputId="eb29eac9-5657-4617-c76a-9d433d09306a"
from sklearn.model_selection import KFold
train = np.asarray(train)
test = np.asarray(test)
label = np.asarray(label)
best = []
score =0
kf = KFold(n_splits=10)
for train_index, val_index in kf.split(X_train):
X_tr, X_v = train[train_index], train[val_index]
y_tr, y_v = label[train_index], label[val_index]
svc = SVC()
svc.fit(X_tr, y_tr)
ascore = accuracy_score(y_v, svc.predict(X_v))
if(ascore>score):
score = ascore
best = svc
print("Accuracy score: ", accuracy_score(y_v, best.predict(X_v)))
# + id="jt-YNwnlevKs" colab_type="code" colab={}
submit = pd.DataFrame()
submit['Id'] = range(1, test.shape[0]+1)
submit['Solution'] = best.predict(test)
submit.to_csv('submit.csv', index=False)
# + id="iHmHuWLDgpRP" colab_type="code" colab={}
| Data Science London Scikit/Data_Science_London_+_Scikit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convert CSV to a graph
import networkx as nx
import pandas as pd
def import_csv(filename):
""" import csv file into a Pandas dataframe """
return pd.read_csv(filename)
def preprocessing(filename):
""" make Pandas dataframe easier to work with by:
- deleting timestamp column
- making the names column into the row labels
"""
data = import_csv(filename)
del data['Timestamp'] #delete timestamp column
data = data.set_index('Name') # set names column to row labels
data.index.names = [None]
return data
data = preprocessing("Survey.csv")
data.head(3)
def initialize_graph(data):
""" build a graph with the name/identifiers as nodes """
num_rows = data.shape[0]
G = nx.Graph()
row_names = []
for (name, b) in data.iterrows():
row_names.append(name)
G.add_node(name)
return G
def build_graph(data):
""" iterates through all question answers and adds an edge when people agree
"""
for question, answers in data.iteritems():
print(answers)
for curr_name in row_names:
for compare_name in row_names:
if answers[curr_name] == answers[compare_name] and curr_name != compare_name:
G.add_edge(curr_name, compare_name)
return G
print(G.edges)
| Old/graph_builder.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this notebook, we generate data in a two step process:
#
# 1) We generate cluster centers from a Gaussian distribution with a fixed mean and standard deviation
#
# 2) For each cluster, we generate observed values
# %load_ext autoreload
# %autoreload 2
import numpy as np
import torch
# ## Parameters go here
# +
prior_mn = 20.0
prior_std = 5
cluster_stds = [1.0]*500 # Number of entries here determine number of clusters
n_obs_per_cluster = 10
# -
# ## Generate the data
n_clusters = len(cluster_stds)
ctrs = np.random.randn(n_clusters)*prior_std + prior_mn
data = [torch.tensor(np.random.randn(n_obs_per_cluster)*std_c + mn_c) for std_c, mn_c in zip(cluster_stds, ctrs)]
# ## Fit prior describing the data
fit_ctr = torch.tensor(30.0, requires_grad=True)
fit_std = torch.tensor(1.0, requires_grad=True)
optimizer = torch.optim.Adam(params=[fit_ctr, fit_std], lr=.1)
# +
n_smps = 1000
for i in range(1000):
ll = 0
optimizer.zero_grad()
for c_i in range(n_clusters):
c_l = 0
data_i = data[c_i]
for s_i in range(n_smps):
ctr = torch.randn(1)*torch.abs(fit_std) + fit_ctr
var_c = cluster_stds[c_i]**2
diffs = torch.exp((-.5*(data_i - ctr)**2)/var_c)
c_l+= torch.prod(diffs) # Ignoreing part that doeesn't depend on the ctr
ll = -1*torch.log(c_l/n_smps)
if i % 1 == 0:
print(str(i) + ': ' + str(ll.detach().numpy()))
print('fit_ctr: ' + str(fit_ctr.detach().numpy()))
print('fit_std: ' + str(abs(fit_std.detach().numpy())))
ll.backward()
optimizer.step()
# -
fit_ctr
fit_std
torch.prod(torch.tensor([1, 2, 4]))
np.mean(ctrs)
| development/Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # KNN
#
# KNN(K-Nearest Neighbors) is a non-parametric classification method used for classification and regression.
#
# The principle behind nearest neighbor methods is to find a predefined number of training samples closest in distance to the new point, and predict the label from these.
#
# * In KNN classification, the output is a class membership. An object is classified by a vote of its neighbors.
#
# * In KNN regression, the output is the average of the values of $k$ nearest neighbors.
#
# The optimal choice of the value $k$ is highly data-dependent: in general, a larger $k$ suppresses the effects of noise, but makes the classification boundaries less distinct.
# ## Examples
# +
"""KNN classification"""
from sklearn.neighbors import KNeighborsClassifier
X = [[0], [1], [2], [3]]
y = [0, 0, 1, 1]
clf = KNeighborsClassifier(n_neighbors=3,
algorithm="kd_tree")
clf.fit(X, y)
clf.predict([[1.1]]), clf.predict_proba([[0.9]])
# +
"""KNN regression"""
from sklearn.neighbors import KNeighborsRegressor
reg = KNeighborsRegressor(n_neighbors=2,
weights="uniform")
reg.fit(X, y)
reg.predict([[1.5]])
# -
# ## KDTree
#
# A k-d tree (short for k-dimensional tree) is a space-partitioning data structure for organizing points in a k-dimensional space.
#
# It's useful in searching nearest neighors in KNN.
#
# ```{image} images/kdtree.png
# :alt: kdtree
# :width: 500px
# :align: center
# ```
#
# As the above fiture, k-d tree first split dataset through y-axis into two equivalent subsets, then split each subset through x-axis, and so on.
#
# When searching for nearest neighors, k-d tree first locate the new instance's leaf, then only search in and near that leaf.
| machine-learning-book/c1.knn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mask Detection Demo - Automatic Pipeline (3 / 3)
#
# The following example demonstrates how to package a project and how to run an automatic pipeline to train, evaluate, optimize and serve the mask detection model using our saved MLRun functions from the previous notebooks.
#
# 1. [Set up the project](#section_1)
# 2. [Write and save the workflow](#section_2)
# 3. [Run the pipeline](#section_3)
# 4. [Test the pipeline](#section_4)
# Our **pipeline topology** will be:
#
# 
#
# <a id="section_1"></a>
# ## 1. Set up the project
#
# MLRun Project is a container for all the associated code, functions, jobs/workflows and artifacts. Projects can be mapped to git repositories to enable versioning, collaboration, and CI/CD. Read more about [MLRun projects](https://docs.mlrun.org/en/latest/projects/overview.html).
#
# We will register the functions (code packages) and the workflow in the project and execute, projects can be saved/packaged into a git repository and loaded on another cluster or be executed as part of an automated CI/CD workflow.
#
# Create a project using `mlrun.get_or_create_project` (Make sure to load it in case it already exists):
# +
import mlrun
import os
# Set our project's name:
project_name = "mask-detection"
# Create the project:
project = mlrun.get_or_create_project(name=project_name, context="./", user_project=True)
# -
# Before we continue, **please select the desired framework** (comment and uncomment the below lines as needed):
framework = "tf-keras"
# framework = "pytorch"
# ### 1.1. Register the training, evaluation and serving functions
# We'll use our training, evaluation and serving functions from the previous notebooks. To get them, we set them into the project using `project.set_function` and specify additional parameters such as the container image:
# +
# Get our train/evaluation function like in the first notebook:
project.set_function(
os.path.join(framework, "training-and-evaluation.py"),
name="training-and-evaluation",
kind="job",
image="mlrun/ml-models",
)
# Get our serving function like in the second notebook:
project.set_function(
os.path.join(framework, "serving.py"),
name="serving",
kind="serving",
image="mlrun/ml-models",
)
# -
# ### 1.2. Import the open archive and ONNX functions
#
# We will import the following [MLRun Marketplace](https://www.mlrun.org/marketplace/) functions to our project:
# * `open_archive` - Download the images dataset as seen in the first notebook.
# * `onnx_utils` - ONNX integration in MLRun, some utils functions for the ONNX framework, optimizing and converting models from different framework to ONNX using MLRun.
#
# > **Notice** we are using the same `project.set_function` with the `hub://` prefix.
# +
# Import the `open_archive` function to download the images:
project.set_function("hub://open_archive", name="open_archive")
# Import the `onnx_utils` function to optimize the model:
project.set_function("hub://onnx_utils:development", name="onnx_utils")
# -
# <a id="section_2"></a>
# ## 2. Write and save the workflow
#
# We will use 3 functions:
# * `mlrun.build_function` - Build an image to use in a MLRun function. ONNX requires extra packages so we will prepare its image in advance.
# * `mlrun.run_function` - Run a local or remote task as part of a local/kubeflow pipeline. Using this function to run our training and the rest of our functions will ensure that if the function is using output from another function, it will run only if the relying function is done.
# * `mlrun.deploy_function` - Deploy a real-time (nuclio-based) function. Our serving function will be deployed with this.
#
# All of the functions above can receive a function object or just the function name (if it is set using `project.set_function` as we did above). We will access our project's functions as needed using the `project.get_function` method, which returns a function object by its name.
#
# > **Note** The serving graph will now serve an ONNX model (not a tf.keras model like before), so we will use `mlrun.frameworks.onnx.ONNXModelServer`.
# +
# %%writefile workflow.py
import mlrun
from kfp import dsl
@dsl.pipeline(name="Mask Detection Pipeline")
def kfpipeline(
archive_url: str,
dataset_path: str,
epochs: int,
batch_size: int,
lr: float,
build_flag: int = 1,
download_data_flag: int = 1
):
# Get our project object:
project = mlrun.get_current_project()
# Write down the ONNX requirements:
onnx_requirements = [
"onnx~=1.10.1",
"onnxruntime~=1.8.1",
"onnxoptimizer~=0.2.0",
"onnxmltools~=1.9.0",
"tf2onnx~=1.9.0"
]
###########################################################
############### Build the ONNX image: ###############
###########################################################
# Build only if needed (meaning if 'build_flag' = 1):
with dsl.Condition(build_flag == 1) as build_condition:
mlrun.build_function(
function="onnx_utils",
base_image="mlrun/ml-models",
requirements=onnx_requirements
)
###########################################################
############### Download the dataset: ###############
###########################################################
# Download only if needed (meaning if 'download_data_flag' = 1):
with dsl.Condition(download_data_flag == 1) as download_data_condition:
# Mount it:
project.get_function("open_archive").apply(mlrun.mount_v3io())
# Run it using the 'open_archive' handler:
open_archive_run = mlrun.run_function(
function="open_archive",
handler="open_archive",
name="download_data",
inputs={"archive_url": archive_url},
params={"target_path": dataset_path}
)
####################################################
############### Train a model: ###############
####################################################
# Mount it:
project.get_function("training-and-evaluation").apply(mlrun.mount_v3io())
# Run it using our 'train' handler:
training_run = mlrun.run_function(
function="training-and-evaluation",
handler="train",
name="training",
params={
"dataset_path": dataset_path,
"batch_size": batch_size,
"lr": lr,
"epochs": epochs
},
outputs=["mask_detector"]
).after(download_data_condition)
###################################################################
############### Convert to ONNX and optimize: ###############
###################################################################
# Mount it:
project.get_function("onnx_utils").apply(mlrun.mount_v3io())
# Run it using the 'to_onnx' handler:
to_onnx_run = mlrun.run_function(
function="onnx_utils",
handler="to_onnx",
name="optimizing",
params={
"model_path": training_run.outputs['mask_detector'],
"onnx_model_name": 'onnx_mask_detector'
},
outputs=["onnx_mask_detector"],
).after(build_condition)
#########################################################
############### Evaluate the model: ###############
#########################################################
# Run it using our 'evaluate' handler:
evaluation_run = mlrun.run_function(
function="training-and-evaluation",
handler="evaluate",
name="evaluating",
params={
"model_path": training_run.outputs['mask_detector'],
"dataset_path": dataset_path,
"batch_size": batch_size
}
)
################################################################################
############### Deploy the model as a serverless function: ###############
################################################################################
# Get the function:
serving_function = project.get_function("serving")
# Mount it:
serving_function.apply(mlrun.mount_v3io())
# Set the topology and get the graph object:
graph = serving_function.set_topology("flow", engine="async")
# Build the serving graph:
graph.to(handler="resize", name="resize")\
.to(handler="preprocess", name="preprocess")\
.to(class_name="mlrun.frameworks.onnx.ONNXModelServer", name="onnx_mask_detector", model_path=project.get_artifact_uri("onnx_mask_detector"))\
.to(handler="postprocess", name="postprocess").respond()
# Set the desired requirements:
serving_function.with_requirements(requirements=onnx_requirements)
# Deploy the serving function:
mlrun.deploy_function("serving").after(to_onnx_run)
# -
# Note that after running the cell above, the `workflow.py` file is created. Saving your workflow to file allows you to run the project from a different environment.
#
# In order to take this project with the functions we set and the workflow we saved over to a different environemnt, first set the workflow to the project. The workflow can be set using `project.set_workflow`. After setting it, we will save the project by calling `project.save`. When loaded, it can be run from another environment from both code and from cli. For more information regarding saving and loading a MLRun project, see the [documentation](https://docs.mlrun.org/en/latest/projects/overview.html).
# +
# Register the workflow file:
workflow_name = "mask_detection_workflow"
project.set_workflow(workflow_name, "workflow.py")
# Save the project:
project.save()
# -
# <a id="section_3"></a>
# ## 3. Run the pipeline
#
# We can immediately run the project, or save it to a git repository and load/run it on another cluster or CI/CD workflow. In order to load the project from a git you should run the following command (read more about [projects and CI/CD](https://docs.mlrun.org/en/latest/projects/overview.html)):
#
# ```python
# project = mlrun.load_project(context="./", url="git://github.com/<org>/<project>.git")
# ```
# or use the CLI command:
#
# ```bash
# mlrun project -u "git://github.com/mlrun/project-demo.git" ./
# ```
#
# We will run the pipeline using `project.run` with the workflow name we used:
project.run(
name=workflow_name,
arguments={
"archive_url": mlrun.get_sample_path("data/prajnasb-generated-mask-detection/prajnasb_generated_mask_detection.zip"),
"dataset_path": os.path.abspath("./Dataset"),
"epochs": 3,
"batch_size": 32,
"lr": 1e-3,
"build_flag": 1,
"download_data_flag": 1
},
watch=True
)
# <a id="section_4"></a>
# ## 4. Test the pipeline
#
# We will test it as seen in the previous notebook (with the `image_urls` list and `print_image_classification` function):
# + jupyter={"source_hidden": true}
import requests
from PIL import Image
from io import BytesIO
import matplotlib.pyplot as plt
# The local mock server is searching to import the functions, so here we simply import them from the file:
import sys
sys.path.insert(0, os.path.abspath(f"./{framework}"))
from serving import resize, preprocess, postprocess
# Our predictions headers:
classes = ["with mask", "without mask"]
# The image urls to use in the tests, feel free to add your own images and test them out:
image_urls = [
'https://s3.envato.com/files/321053037/323%202020_June_PORTO_446-Edit.jpg', # With mask
'https://media.glamour.com/photos/5a425fd3b6bcee68da9f86f8/master/pass/best-face-oil.png' # Without mask
]
def print_image_classification(image_url: str, prediction: dict):
# Draw Image:
response = requests.get(image_url)
plt.imshow(Image.open(BytesIO(response.content)))
# Print the prediction:
print(f"Classified: {classes[prediction['class']]}\n")
print(f"With mask probability: {prediction['with_mask']}")
print(f"Without mask probability: {prediction['without_mask']}")
# -
print_image_classification(
image_url=image_urls[0],
prediction=project.get_function("serving").invoke(path='/predict', body={"data_url": [image_urls[0]]})
)
print_image_classification(
image_url=image_urls[1],
prediction=project.get_function("serving").invoke(path='/predict', body={"data_url": [image_urls[1]]})
)
| mask-detection/3-automatic-pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rheopy/rheofit/blob/master/notebooks/example%20appmode/yield%20stress%20model%20selection%20file%20upload_new.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="-ez3csx9sJcj"
# # Fit Flow curve
# Notebook to create interactive app with function:
# * Upload rheology xls file exported from trios with multitab option selected
# * Alternatively
# * Execute Fit to flow curve data with model or models selected from a list
# + id="i4mUuSrcsRET" cellView="form"
#@title
# !pip install ipympl
# !pip install git+https://github.com/rheopy/rheofit.git
from IPython.display import clear_output
clear_output()
# + id="pzeVoxFouZ8e" cellView="form"
#@title #Import cell
#@markdown * Run this cell to import the required libraies
# %matplotlib inline
import ipympl
import sys
sys.path.append("./../../")
import os
import ipywidgets as widgets
import pandas as pd
import matplotlib.pyplot as plt
import rheofit
import lmfit
# + id="mPndzUYasJcq" cellView="form" outputId="79978fbf-2115-4298-ddc6-75c237128951" colab={"base_uri": "https://localhost:8080/", "height": 49, "referenced_widgets": ["e72b2e7096a24ed5bdd43d8c112fa89d", "20246c3000bb4abea5fa4a85949184a4", "14f793e7d89249cbbfdf28b005bbd48f"]}
#@title # Select and upload data file (currently 1 file only)
#@markdown The assumed file format is xls file exported from trios with multitab option selected
import io
w = widgets.FileUpload(multiple=False)
w
# + id="iWSlJtfIsJcw" cellView="form" outputId="323c5fea-f659-45d9-8ad8-fc99899c7b9e" colab={"base_uri": "https://localhost:8080/", "height": 62, "referenced_widgets": ["a78eebf691f84510802aea190a0a0c66", "8ec5e0e08b4642e5afaf4076a4b746a3", "87b079d49ad146a9a0607ddf52327f5d", "6b6dc9a4dea6404ca61f8fa58dc12466", "0eb4a22212c14e62963cb56344d12a19", "5573a73878a54c8497573b899e9f8726", "c15ebef7d18d4ae385a09a0e529c9662", "4051e01c6c3b423887ffba45805dae16"]}
#@title
Shear_rate_col=widgets.Textarea(description='Shear Rate Column [1/s]')
Stress_col=widgets.Textarea(description='Shear Stress Column [Pa]')
display(widgets.HBox([Shear_rate_col,Stress_col]))
# + code_folding=[] id="qZKxtfkWsJc1" cellView="form" outputId="16e2b205-b926-44eb-f3e3-5881d31cd2a9" colab={"base_uri": "https://localhost:8080/", "height": 333, "referenced_widgets": ["f7064bb24eb243ccb89a81dfbe02923c", "e2be4b5c95c844be8efb31afd1d6f598", "26f7f15bc7c6448c890c21856b08aadd"]}
#@title # Select Rheological model from the list of available models
model_list=[rheofit.models.Bingham_model,
rheofit.models.casson_model,
rheofit.models.HB_model,
rheofit.models.Powerlaw_model,
rheofit.models.TC_model,
rheofit.models.carreau_model,
rheofit.models.carreau_model+rheofit.models.TC_model,
rheofit.models.TCn_model,
]
for model in model_list:
try:
print(model.name)
display(model.model_expression)
except:
pass
select_model_widget=widgets.Dropdown(options={item.name:item for item in model_list})
select_model_widget
# + id="JJ7S7K8_sJc8" cellView="form" outputId="9bf5f255-7037-4070-f149-21e920b73373" colab={"base_uri": "https://localhost:8080/", "height": 79, "referenced_widgets": ["17de98e5f136463681d31894a5f0cf3a", "57f8c5c2b524430cac2405fd4bf1a067", "5b3a0003e3854557a80f1848d34643f1", "ec0afd882c3a4cc28c934c95fbb05248", "ec090aaafcd84828bc9339e8b8e3d228", "f1f74f5df13743028029823c05aed47b"]}
#@title Select data range and iterative mode
data_limit_widget_min=widgets.Text(description='data_min',value='0.0001')
data_limit_widget_max=widgets.Text(description='data_max',value='1000')
display(data_limit_widget_min)
display(data_limit_widget_max)
# + id="Rwp52tq2sJdE" cellView="form"
#@title
def make_par_widget(fit_function):
if Shear_rate_col.value is not '':
shear_rate=pd.Series([y for y in (x.strip() for x in Shear_rate_col.value.splitlines()) if y],name='Shear rate').astype(float)
shear_stress=pd.Series([y for y in (x.strip() for x in Stress_col.value.splitlines()) if y],name='Stress').astype(float)
rheodata=pd.concat([shear_rate, shear_stress], axis=1)
else:
datafile_object=pd.ExcelFile(io.BytesIO(w.value[list(w.value.keys())[0]]['content']))
table_name_list = datafile_object.sheet_names
rheodata=datafile_object.parse(table_name_list[-1],skiprows=1).drop(0).reset_index().astype('float')
res_fit=select_model_widget.value.fit(rheodata['Stress'],x=rheodata['Shear rate'])
model_name_widget=widgets.Text(description='Model name',value=res_fit.model.name,
style={'description_width': 'initial'},
layout=widgets.Layout(width='80%'))
global iterate_wid
iterate_wid=widgets.Checkbox(description='Iterative data range selection',
value=False,
style={'description_width': 'initial'},
layout=widgets.Layout(width='80%'))
global weight_widget
weight_widget=widgets.Dropdown(description='Weight',options={'relative':None,'absolute':1},value=None,label='relative')
par_list_wid=[]
for param in res_fit.params:
par_list_wid.append(widgets.HBox([widgets.Text(value=param),
widgets.Text(description='min/val/max',value=str(res_fit.params[param].min)),
widgets.Text(value=str(res_fit.params[param].value)),
widgets.Text(value=str(res_fit.params[param].max)),
widgets.Checkbox(description='fix',value=not(res_fit.params[param].vary))
]))
box_layout = widgets.Layout(display='flex',
flex_flow='column',
align_items='stretch',
border='solid')
par_widgets=widgets.VBox([model_name_widget,iterate_wid,weight_widget]+par_list_wid,layout=box_layout)
display(par_widgets)
return par_widgets
def make_par_from_widget(par_widget):
params = lmfit.Parameters()
for par_info in par_widget.children[3:]:
params.add(par_info.children[0].value,
min=float(par_info.children[1].value),
value=float(par_info.children[2].value),
max=float(par_info.children[3].value),
vary=not(par_info.children[4].value)
)
return params
# + id="wj0M5EaHsJdK" cellView="form" outputId="569ea1c8-c633-452a-ee21-c0bfd92bf737" colab={"base_uri": "https://localhost:8080/", "height": 247, "referenced_widgets": ["e9da8debf4f54b4698875346cada3527", "784729925f4d4c649a59088147dd4d9c", "b92de1018b404d6fac91253be0c8ca37", "6066bd0d84db45ea86c2e8b60004d280", "<KEY>", "452bbefba5b64b8d9002338ab6e3eb3f", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "e58f46410c98436182658e912adfa392", "e2bb3805157e46a596b7fb0087b0897a", "4127a0018fa34a6e93960d4900e9a609", "433d32eefe8e404e8730973a25121ab1", "1d752082a3544ca7aa07a95ad03524ac", "<KEY>", "<KEY>", "<KEY>", "76b1f0c66a454d31b346eee85124bc04", "<KEY>", "<KEY>", "dabb180e2c094e1d97479f3683b00607", "2cb21d3955394ed9894c0ef51066d229", "d686639ede6f42ed8ef30fd9aa4cb8e9", "<KEY>", "<KEY>", "6eff5721662f469796ed1b5e92e16424", "<KEY>", "6634f629f3d848c1b1e3898a81e3a786", "63f43df7a4104428a46ea61d39809e86", "<KEY>", "052e10e7b2304d9fa4f9af80af831359", "8fca03ddff08455490b0da4b4fac8eb0", "<KEY>", "04cf34ed49cd48da9b0a8126896d6189", "0ac3f87cda3f47feb03c10b42a9dbe0e", "<KEY>", "<KEY>", "e70955564c6e4c039d7167518ce54775", "6597c75eb555475db8c5af5550ff464d", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "1cc7bc1e05454d7d9e67896388176154", "88dca1800b1e4d3ebfa16dd4192c5d2c", "<KEY>", "b368a4d085f14b6f9131506833dce0a6", "dc9bcce44f624510a3866b56594e07a6", "<KEY>", "15faedd7cc4e4eacac7dbcaf1644a8d4", "<KEY>", "<KEY>", "6809a8f33ede4a2891ac8f52a5c8f709", "<KEY>", "<KEY>", "a7e63c9c588a4cf4914b104ecc337e2a", "f45fe6c3b9ef4389b482d77c09ddea9c", "<KEY>", "6ec454e106b5469c8119a5dfdbf17e14", "4d3df33b645b4cc48a0d1d511be1dfdd", "<KEY>", "6f2405cc52a54fe8a20b6937469407b7", "<KEY>", "<KEY>"]}
#@title
parameter_display_widget=widgets.Output()
@parameter_display_widget.capture()
def prepare_analysis(sender):
clear_output()
model=select_model_widget.value
global par_wid
par_wid=make_par_widget(model)
prepare_analysis_button=widgets.Button(description='Prepare Analysis')
prepare_analysis_button.on_click(prepare_analysis)
display(prepare_analysis_button)
display(parameter_display_widget)
# + id="CnhHHZnHsJdZ" cellView="form" outputId="7f88767c-c8af-42d3-e645-56abfd690c86" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["0a794cfda6d042ada79ff8f99db335b1", "328742d3817b4b728fdc3f113582dfd6", "794adb9ebd3c421e8a3fb981bdd1f721", "4fd19d7f10c94402911f6c195bccc202", "bd2ad54fb8d3414bae482ac551eb690b", "d2fa094bc283496b956cea512eef08f7"]}
#@title
def execute_analysis(sender):
figlist=plt.get_fignums()
for i in figlist:
plt.close(i)
if Shear_rate_col.value is not '':
shear_rate=pd.Series([y for y in (x.strip() for x in Shear_rate_col.value.splitlines()) if y],name='Shear rate').astype(float)
shear_stress=pd.Series([y for y in (x.strip() for x in Stress_col.value.splitlines()) if y],name='Stress').astype(float)
rheodata=pd.concat([shear_rate, shear_stress], axis=1)
else:
datafile_object=pd.ExcelFile(io.BytesIO(w.value[list(w.value.keys())[0]]['content']))
table_name_list = datafile_object.sheet_names
rheodata=datafile_object.parse(table_name_list[-1],skiprows=1).drop(0).reset_index().astype('float')
condition=(rheodata['Shear rate']>float(data_limit_widget_min.value)) & (rheodata['Shear rate']<float(data_limit_widget_max.value))
data=rheodata[condition]
weights=1/data['Stress']
if weight_widget.value == 1:
weights=data['Stress']**0
model=select_model_widget.value
params=make_par_from_widget(par_wid)
global fit
if iterate_wid.value is True:
iterate=True
while iterate:
data=rheodata[condition]
weights=1/data['Stress']
if weight_widget.value == 1:
weights=data['Stress']**0
fit=model.fit(data['Stress'],x=data['Shear rate'], weights=weights, params=params)
if min(data['Shear rate'])<fit.params['TC_gammadot_crit'].value:
condition=(rheodata['Shear rate']>float(data_limit_widget_min.value)) & (rheodata['Shear rate']<float(data_limit_widget_max.value)) & (rheodata['Shear rate']>fit.params['TC_gammadot_crit'].value)
else:
iterate=False
else:
fit=model.fit(data['Stress'],x=data['Shear rate'], weights=weights, params=params)
res_table=rheofit.models.show_parameter_table(fit)
display(res_table)
display(fit)
plt.figure(figsize=(10,10))
display(rheofit.visualization.plot_fit_res(fit))
# fig=plt.figure()
# ax=fig.add_subplot(1,1,1)
# ax.plot(data['Shear rate'],data['Stress'],'o',label='Analized data')
# ax.plot(rheodata['Shear rate'],rheodata['Stress'],'o',label='Available data',mfc='none')
# ax.set_xscale('log')
# ax.set_yscale('log')
# ax.plot(rheodata['Shear rate'],fit.eval(x=rheodata['Shear rate']), label='Best fit')
# ax.set_xlabel('Shear rate [1/s]')
# ax.set_ylabel('Stress [Pa]')
# fig.show();
exec_analysis_button=widgets.Button(description='Execute Analysis')
exec_analysis_button.on_click(execute_analysis)
def clear_analysis(object):
figlist=plt.get_fignums()
for i in figlist:
plt.close(i)
clear_output()
display(exec_analysis_button)
display(clear_analysis_button)
clear_analysis_button=widgets.Button(description='Clear Analysis')
clear_analysis_button.on_click(clear_analysis)
display(exec_analysis_button)
display(clear_analysis_button)
| notebooks/example appmode/yield stress model selection file upload_new.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# PyTorch: Tensors and autograd
# -------------------------------
#
# A third order polynomial, trained to predict $y=\sin(x)$ from $-\pi$
# to $\pi$ by minimizing squared Euclidean distance.
#
# This implementation computes the forward pass using operations on PyTorch
# Tensors, and uses PyTorch autograd to compute gradients.
#
#
# A PyTorch Tensor represents a node in a computational graph. If ``x`` is a
# Tensor that has ``x.requires_grad=True`` then ``x.grad`` is another Tensor
# holding the gradient of ``x`` with respect to some scalar value.
#
#
# +
import torch
import math
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to run on GPU
# Create Tensors to hold input and outputs.
# By default, requires_grad=False, which indicates that we do not need to
# compute gradients with respect to these Tensors during the backward pass.
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
y = torch.sin(x)
# Create random Tensors for weights. For a third order polynomial, we need
# 4 weights: y = a + b x + c x^2 + d x^3
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Tensors during the backward pass.
a = torch.randn((), device=device, dtype=dtype, requires_grad=True)
b = torch.randn((), device=device, dtype=dtype, requires_grad=True)
c = torch.randn((), device=device, dtype=dtype, requires_grad=True)
d = torch.randn((), device=device, dtype=dtype, requires_grad=True)
learning_rate = 1e-6
for t in range(2000):
# Forward pass: compute predicted y using operations on Tensors.
y_pred = a + b * x + c * x ** 2 + d * x ** 3
# Compute and print loss using operations on Tensors.
# Now loss is a Tensor of shape (1,)
# loss.item() gets the scalar value held in the loss.
loss = (y_pred - y).pow(2).sum()
if t % 100 == 99:
print(t, loss.item())
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Tensors with requires_grad=True.
# After this call a.grad, b.grad. c.grad and d.grad will be Tensors holding
# the gradient of the loss with respect to a, b, c, d respectively.
loss.backward()
# Manually update weights using gradient descent. Wrap in torch.no_grad()
# because weights have requires_grad=True, but we don't need to track this
# in autograd.
with torch.no_grad():
a -= learning_rate * a.grad
b -= learning_rate * b.grad
c -= learning_rate * c.grad
d -= learning_rate * d.grad
# Manually zero the gradients after updating weights
a.grad = None
b.grad = None
c.grad = None
d.grad = None
print(f'Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3')
| source/pytorch/pytorch_with_examples/polynomial_autograd.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
# read video file
cap = cv2.VideoCapture(r'/home/divyanshu/Desktop/hollywood/videoclips/Butterfly Effect, The - 00696.avi')
# count frame
count = 0
# increment frame counter
def getFrameNumber(count):
count = count + 1
return count
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# if frame exists
if ret == True:
# Display results
# cv2.imshow("Results", frame)
# save current frame
cv2.imwrite(r"/home/divyanshu/Desktop/picture-output/%d.jpg"%getFrameNumber(count), frame)
# increment counter
count = count + 1
else:
break
# ESC to stop algorithm
# key = cv2.waitKey(7) % 0x100
# if key == 27:
# break
# Clear all windows
#cv2.destroyAllWindows()
# +
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('17.jpg')
mask = np.zeros(img.shape[:2],np.uint8)
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
rect = (50,50,450,290)
cv2.grabCut(img,mask,rect,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)
mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')
img = img*mask2[:,:,np.newaxis]
plt.imshow(img),plt.colorbar(),plt.show()
| grabcut.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Laboratorio 10
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import plot_confusion_matrix, classification_report
# %matplotlib inline
# -
breast_cancer = load_breast_cancer()
X, y = breast_cancer.data, breast_cancer.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
target_names = breast_cancer.target_names
# ## Ejercicio 1
#
# (1 pto.)
#
# Ajusta una regresión logística a los datos de entrenamiento y obtén el _accuracy_ con los datos de test. Utiliza el argumento `n_jobs` igual a $-1$, si aún así no converge aumenta el valor de `max_iter`.
#
# Hint: Recuerda que el _accuracy_ es el _score_ por defecto en los modelos de clasificación de scikit-learn.
lr = LogisticRegression(n_jobs=-1, max_iter=3000)
lr.fit(X_train, y_train)
print(f"Logistic Regression accuracy: {lr.score(X_test, y_test):0.2f}")
# ## Ejercicio 2
#
# (1 pto.)
#
# Utiliza `GridSearchCV` con 5 _folds_ para encontrar el mejor valor de `n_neighbors` de un modelo KNN.
# +
knn = KNeighborsClassifier()
knn_grid = {"n_neighbors": np.arange(2, 31)}
knn_cv = GridSearchCV(
estimator=knn,
param_grid=knn_grid
)
knn_cv.fit(X_train, y_train)
knn_cv.best_params_
# -
print(f"KNN accuray: {knn_cv.score(X_test, y_test):0.2f}")
# ## Ejercicio 3
#
# (1 pto.)
#
# ¿Cuál modelo escogerías basándote en los resultados anteriores? Justifica
# __Respuesta:__ Escogeria la regresion logistica porque obtuvimos un resultado mayor al knn, y eso implica que el modelo dara una mejor prediccion.
# ## Ejercicio 4
#
# (1 pto.)
#
# Para el modelo seleccionado en el ejercicio anterior.
#
# * Grafica la matriz de confusión (no olvides colocar los nombres originales en los _labels_).
# * Imprime el reporte de clasificación.
plot_confusion_matrix(lr, X_test, y_test, display_labels=target_names)
print(classification_report(y_test, lr.predict(X_test), target_names=breast_cancer.target_names))
| labs/lab10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sqlalchemy import create_engine, inspect, MetaData, Table
engine = create_engine('mysql://shukur:*******@localhost/datacamp')
engine.table_names()
# ### Loading a csv into database
import pandas as pd
with open('international_debt.csv', 'r') as file:
data_df = pd.read_csv(file)
data_df.to_sql('international_debt', con=engine, index=True, index_label='id', if_exists='replace')
# %load_ext sql
# %sql mysql://shukur:*******@localhost/datacamp
# ### 1. The World Bank's international debt data
# + language="sql"
#
# SELECT *
# FROM international_debt
# LIMIT 10;
# -
# ### 2. Finding the number of distinct countries
# + language="sql"
# SELECT DISTINCT(COUNT(country_name)) AS total_distinct_countries
# FROM international_debt;
# -
# ### 3. Finding out the distinct debt indicators
# + language="sql"
# SELECT DISTINCT(indicator_code) AS distinct_debt_indicators
# FROM international_debt
# ORDER BY distinct_debt_indicators;
# -
# ### 4. Totaling the amount of debt owed by the countries
# + language="sql"
# SELECT
# ROUND(SUM(debt)/1000000, 2) AS total_debt
# FROM international_debt;
# -
# ### 5. Country with the highest debt
# + language="sql"
# SELECT
# country_name,
# SUM(debt) AS total_debt
# FROM international_debt
# GROUP BY country_name
# ORDER BY total_debt DESC
# LIMIT 1;
# -
# ### 6. Average amount of debt across indicators¶
# + language="sql"
# SELECT
# indicator_code AS debt_indicator,
# indicator_name,
# AVG(debt) AS average_debt
# FROM international_debt
# GROUP BY debt_indicator, indicator_name
# ORDER BY average_debt
# LIMIT 10;
# -
# ### 7. The highest amount of principal repayments¶
# + language="sql"
# SELECT
# country_name,
# indicator_name
# FROM international_debt
# WHERE debt = (SELECT
# MAX(debt)
# FROM international_debt
# WHERE indicator_code ='DT.AMT.DLXF.CD');
# -
# ### 8. The most common debt indicator
# + language="sql"
# SELECT indicator_code, COUNT(*) AS indicator_count
# FROM international_debt
# GROUP BY indicator_code
# ORDER BY indicator_count DESC
# LIMIT 20;
# -
# ### 9. Other viable debt issues and conclusion
# + language="sql"
# SELECT country_name, MAX(debt) AS maximum_debt
# FROM international_debt
# GROUP BY country_name
# ORDER BY maximum_debt DESC
# LIMIT 10;
# -
| notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] tags=["worksheet-0"]
# # Create a month/day by Year view of the daily sea ice index data.
# + tags=["worksheet-0"]
tmp_dir = "../data"
# + tags=["worksheet-0"]
# !mkdir -p ../data
# !wget -P ../data -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/north/daily/data/NH_seaice_extent_final.csv
# !wget -P ../data -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/north/daily/data/NH_seaice_extent_nrt.csv
# !wget -P ../data -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/south/daily/data/SH_seaice_extent_final.csv
# !wget -P ../data -qN ftp://sidads.colorado.edu/pub/DATASETS/NOAA/G02135/south/daily/data/SH_seaice_extent_nrt.csv
# + [markdown] tags=["worksheet-0"]
# Variables to set before running:
#
# + tags=["worksheet-0"]
climatology_years = (1981, 2010)
# + tags=["worksheet-0"]
import datetime as dt
import numpy as np
import os
import pandas as pd
from pandas import ExcelWriter
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
pd.options.display.mpl_style = 'default'
# + tags=["worksheet-0"]
def parse_the_date(year, mm, dd):
return dt.date(int(year), int(mm), int(dd))
def slurp_csv(filename):
data = pd.read_csv(filename, header = None, skiprows=2,
names=["year", "mm", "dd", "extent", "missing", "source"],
parse_dates={'date':['year', 'mm', 'dd']},
date_parser=parse_the_date, index_col='date')
data = data.drop(['missing', 'source'], axis=1)
return data
def read_a_hemisphere(hemisphere):
the_dir = '../data'
final_prod_filename = os.path.join(the_dir, '{hemi}H_seaice_extent_final.csv'.format(hemi=hemisphere[0:1].upper()))
nrt_prod_filename = os.path.join(the_dir, '{hemi}H_seaice_extent_nrt.csv'.format(hemi=hemisphere[0:1].upper()))
final = slurp_csv(final_prod_filename)
nrt = slurp_csv(nrt_prod_filename)
all_data = pd.concat([final, nrt])
all_data.index = pd.to_datetime(all_data.index)
all_data = all_data.reindex(index=pd.date_range('1978-10-25', dt.date.today().strftime('%Y-%m-%d')))
all_data['hemi'] = hemisphere
return all_data
# + tags=["worksheet-0"]
def interpolate_column_excluding_extended_missing_periods(df_in, column, interpolated_column):
df = df_in.copy()
df['backfill'] = df[column].fillna(method='bfill', limit=1)
df['forwardfill'] = df[column].fillna(method='ffill', limit=1)
is_really_nan = pd.isnull(df['backfill']) | pd.isnull(df['forwardfill'])
df[interpolated_column] = df[column].interpolate()
df.loc[is_really_nan,interpolated_column] = np.nan
df = df.drop(['forwardfill', 'backfill'], axis=1)
return df
# + tags=["worksheet-0"]
def clim_string(climatology_years):
return '{0}-{1}'.format(climatology_years[0], climatology_years[1])
def get_climatological_means(df, column, climatology_years):
clim = df[(df.index.year >= climatology_years[0]) & (df.index.year <= climatology_years[1] )].copy()
clim = clim.groupby([clim.index.month, clim.index.day]).mean()[[column]]
clim = clim.rename(columns={column: clim_string(climatology_years)})
return clim
# + tags=["worksheet-0"]
import calendar
month_names = [calendar.month_name[x] for x in range(1,13)]
def prepare_daily_dataframe(column, means, df_in, title):
df = df_in.copy()
df = df[[column]].set_index([df.index.year, df.index.month, df.index.day]).unstack(0)
df.columns = df.columns.droplevel(0)
space = means.copy()
space['1981-2010'] = " "
space.rename(columns={'1981-2010': ' '}, inplace=True)
df = pd.concat([df, space, means.copy()], axis=1)
df[column] = title
df.set_index(column, append=True, inplace=True)
df = df.unstack(column)
df.columns = df.columns.reorder_levels([column, None])
df.index = df.index.set_levels(month_names, level=0)
return df
# + tags=["worksheet-0"]
def to_mon_day_rows_year_cols(column, df, title):
a = df.copy()
a = a[[column]].set_index([a.index.year, a.index.month, a.index.day]).unstack(0)
a.columns.set_levels([title], level=0)
return a
# + tags=["worksheet-0"]
def compute_anomaly_from_extent_df(df, title):
a = df.copy()
values = np.array(a.iloc[:, 0:-2])
clim = np.array(a.iloc[:, -1])
means = np.tile(clim, (values.shape[1],1)).T
anomalies = values - means
a = pd.DataFrame(data=anomalies, index=a.index, columns=a.columns[0:-2])
a.columns = a.columns.set_levels([title], level=0)
return a
# + tags=["worksheet-0"]
def compute_extent_and_5day_extent_for_hemisphere(hemisphere):
df = read_a_hemisphere(hemisphere)
df = interpolate_column_excluding_extended_missing_periods(df, 'extent', 'interpolated')
df['5 Day'] = pd.rolling_mean(df['interpolated'], window=5, min_periods=2)
df['Daily Change'] = df['5 Day'].diff(periods=1)
daily_means = get_climatological_means(df, 'interpolated', climatology_years)
five_day_means = get_climatological_means(df, '5 Day', climatology_years)
extent = prepare_daily_dataframe('extent', daily_means , df, 'Daily Extents : with climatological means based on interpolated data')
daily_change = to_mon_day_rows_year_cols('Daily Change', df, 'Daily Extent Change for 5 Day Average Extent')
average_extent = prepare_daily_dataframe('5 Day', five_day_means, df, 'Daily 5 Day Extents : with climatological means based on 5 day data')
extent_anomaly = compute_anomaly_from_extent_df(extent, 'Extent Anomaly')
avg_extent_anomaly = compute_anomaly_from_extent_df(average_extent, '5 Day Avg Ext Anomaly')
return {'Ext': extent, '5 Day Avg Ext': average_extent,
'Ext Anomaly': extent_anomaly, '5 Day Avg Ext Anomaly': avg_extent_anomaly,
'Daily Change': daily_change}
# + tags=["worksheet-0"]
def write_hemisphere(writer, df, abbv):
df['Ext'].to_excel(writer,"{0} Ext".format(abbv),float_format = "%.3f", index=True)
df['5 Day Avg Ext'].to_excel(writer,"{0} 5 Day Avg Ext".format(abbv),float_format = "%.3f")
# don't do daily anomaly.
# df['Ext Anomaly'].to_excel(writer,"{} Ext Anomaly".format(abbv),float_format = "%.3f")
df['5 Day Avg Ext Anomaly'].to_excel(writer,"{0} 5 Day Avg Ext Anomaly".format(abbv),float_format = "%.3f")
df['Daily Change'].to_excel(writer, "{0} Daily Change".format(abbv), float_format = "%.3f")
workbook = writer.book
# add colors blue with blue
format1 = workbook.add_format({'bg_color': '#CEC7FF',
'font_color': '#06009C'})
# add colors red with red
format2 = workbook.add_format({'bg_color': '#FFC7CE',
'font_color': '#9C0006'})
sheets = ["{} Daily Change".format(abbv), "{} 5 Day Avg Ext Anomaly".format(abbv)]
for sheet in sheets:
worksheet = writer.sheets[sheet]
worksheet.conditional_format('C3:ZZ369', {'type': 'cell',
'criteria': '>',
'value': 0,
'format': format1})
worksheet.conditional_format('C3:ZZ369', {'type': 'cell',
'criteria': '<',
'value': 0,
'format': format2})
# + tags=["worksheet-0"]
north = compute_extent_and_5day_extent_for_hemisphere('north')
south = compute_extent_and_5day_extent_for_hemisphere('south')
# + tags=["worksheet-0"]
writer = ExcelWriter('../output/Sea_Ice_Extent_Daily.xls', engine='xlsxwriter')
write_hemisphere(writer, north, 'NH')
write_hemisphere(writer, south, 'SH')
writer.save()
# + tags=["worksheet-0"]
# cleanup
# !cd ../data; rm -f NH_seaice_extent_final.csv NH_seaice_extent_nrt.csv SH_seaice_extent_final.csv SH_seaice_extent_nrt.csv
# + tags=["worksheet-0"]
| notebooks/Daily and 5-day Sea Ice Information.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
#将前面几章用while循环的习题,用for循环实现,并尽量写成函数。
def sum_(numbers):
total=0
for number in numbers:
total+=number
return total
numbers=[1,2,3,4,5,6,7,8,9,10]
print(sum_(numbers))
# + deletable=true editable=true
#练习1
list=input('请输入想要倒序的字符:')
def reverse():
print(list[len(s)-1:0:-1]+list[0])
reverse()
# +
#练习2
def triangle():
n=int(input('请输入行数:'))
a=str(input('请输入指定符号:'))
for i in range(n+1):
print(a*i)
triangle()
# +
#练习3
def pl():
word=input('请输入单词:')
if word.endswith('s') or word.endswith('ch') or word.endswith('sh'):
print(word,'es',sep='')
elif word.endswith('f'):
print(word[0:len(word)-1]+'ves',sep='')
elif word.endswith('y'):
print(word[0:len(word)-1]+'ies',sep='')
else:
print(word,'s',sep='')
pl()
# +
#练习4
def tixing():
n1=int(input('请输入上底长:'))
n2=int(input('请输入下底长:'))
s=str(input('请输入指定符号:'))
for i in range(n1,n2+1):
print(s*i)
tixing()
# -
| chapter2/homework/localization/4-19/201611680949.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .cpp
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: C++14
// language: C++14
// name: xeus-cling-cpp14
// ---
// 
// <center> <h1>C++ backend for the jupyter-leaflet map visualization library</h1> </center>
// <center> <h1>Layers control</h1> </center>
#include "xleaflet/xmap.hpp"
#include "xleaflet/xbasemaps.hpp"
#include "xleaflet/xtile_layer.hpp"
#include "xleaflet/xwms_layer.hpp"
#include "xleaflet/xlayers_control.hpp"
// +
auto map = xlf::map_generator()
.center({50, 354})
.zoom(4)
.finalize();
map
// +
auto nasa_layer = xlf::basemap({"NASAGIBS", "ModisTerraTrueColorCR"}, "2018-03-30");
map.add_layer(nasa_layer);
auto wms = xlf::wms_layer_generator()
.url("https://demo.boundlessgeo.com/geoserver/ows?")
.layers("nasa:bluemarble")
.name("nasa:bluemarble")
.finalize();
map.add_layer(wms);
// -
// ## Add a ``layers_control`` to the map
map.add_control(xlf::layers_control());
// ## And remove it
map.clear_controls();
| 2019-07-10-CICM/src/notebooks/xleaflet-layers-control.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bivariate Data
# Welcome to the Bivariate Data section. Bivariate Data is simply a dataset that has two values instead of one. In this section, we'll go over ways we can visualize and describe relationships between random variables. You'll see the familiar scatter plot, the details of correlation and covariance, $r^2$ measures, some practical tips, and much more!
#
# It's a short but an important one. Let's go.
#
# ## Relationships in data
# The general goal of the following techniques are to describe the relationships between two variables. For example, we want to know how house price is related to house size, or salary to experience, or year to GDP. This is such a crucial and fundamental goal of statistics because by finding a good relationship, we can make good predictions. In fact, one of the simplest methods of Machine Learning, linear regression, will model a linear relationship between an explanatory (i.e. input) variable and result (i.e. output) variable. That way, all the machine has to do is plug in the value of the explanatory variable to get the result variable. We'll look at linear regression in more detail in another notebook.
#
# ## Our data:
# We're going to use one of Seaborn's built-in data sets called 'tips' that has a list of restaurant tips. This makes it easy for anyone to grab the data. But again, don't focus on the details of using Seaborn or Pandas, as here we're just focused on examining our data.
#
# What we want to know is if there is a relationship between the total bill and the size of the tip. You've eaten out before, what is your guess?
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
tips = sns.load_dataset('tips')
type(tips)
# The `type` of our tips dataset is a pandas `DataFrame`. Think of a `DataFrame` as Python's version of an excel table. Again, don't worry about it too much. There's plent of resources out there on Pandas if you want to learn more.
#
# There are 7 variables in this dataset, but I'm going to scrub the table so we have the two pieces we want to examine: `total_bill` and `tip`.
tips.drop(labels=["sex", "smoker", "day", "time", "size"], axis=1, inplace=True)
# ### Basic statistics
# Now we want to examine our dataset. In particular, we want to see the mean, standard devaition, range, and the distribution. Refer to the Descriptive Statistics notebook for more details on how we do this manually. For now, we'll use the `Series` class from Pandas to do all of it for us:
tips["total_bill"].describe()
tips["tip"].describe()
# Take a couple minutes to examine those statistics. What do they tell you?
#
# ### Which mean?
# Think back to the previous lesson on Descriptive Statistics. We went over 3 different types of means. If we wanted to get the mean for the tip percent, which should we use?
#
# You might see the percent sign and jump straight for the geometric mean. Afterall, that's how we averaged stock returns. But remember that these are not returns but proportions -- in other words, a tip percent has no dependence on any previous value.
#
# You may also want to jump straight for the arithmetic mean by creating all of tip percent and averaging the value (in other words, $\frac{0.15 + 0.18 + 0.21}{3}=0.18$. This is a reasonable assumption, but is it the best mean?
#
# Well the answer depends on what data you are trying to describe! The arithmetic mean will give you the average tip percent, which answers the question "what tip percent can I expect any given customer to give?"
#
# But the harmonic mean answers a slightly different question: for every dollar I sell, how much tip can I expect to receive. (Remember, the harmonic mean is all about proportions). The difference is subtle, but important. Imagine if everyone that has small bills tips 20%, but everyone with large bills tips 10%. The man for my tip will be around 15%. But because those with large bills contribute so much more to my total bill amount, their tips will drag down the proportion and make it closer to 10%.
#
# So the harmonic mean may be better for predicting how many tips you expect given a total revenue, whereas the arithmetic mean will be better at predicting what tip percent my next customer will make.
#
# For our example, let's take a look at these two values:
# +
def arithmetic_mean(tip, bills):
n = tip.count()
return sum(tip/bills)/n
arithmetic_mean(tips["tip"], tips["total_bill"])
# +
def harmonic_mean(tip, bills):
return sum(tip)/sum(bills)
harmonic_mean(tips["tip"], tips["total_bill"])
# -
# Judging by these two numbers, we can expect any given person to tip about 16.08%. But at the end of the night, we can expect 15 cents for every dollar we sold. How is this so? This is an indication that the larger bills tend to tip less.
#
# We won't do any more analysis on tip percentage, but this was a valuable aside on knowing what you can glean by expressing your data in different ways.
#
# ## Histograms
# Back to our regularly scheduled program, let's take a look at our two distributions: `tip` and `total_bill`
fig, axs = plt.subplots(1,2, figsize=(10,5))
hist_kws={"edgecolor":"k", "lw":1}
sns.distplot(tips["total_bill"], kde=False, ax=axs[0], axlabel="Total Bill (dollars)", hist_kws=hist_kws)
sns.distplot(tips["tip"], kde=False, ax=axs[1], color='r', axlabel="Total Tip (dollars)", hist_kws=hist_kws)
# From the above, we can see that the distributions are roughly the same. It's not quite normal because of the right skew (that is, the tail is long and to the right). But even though these look similar, it does not say anything definitive about the relationship. For example, each tip could be completely independently matched up with the total bill, like in the extreme case of the \$10.00 max tip getting matched to the \$3.07 minimum bill (hey, it could happen!). For a better sense of this relationship, let's introduce the
#
# ## Scatter Plot
# Another familiar graph, it will plot the `total_bill` against the `tip`. In this case, `total_bill` is the explanatory variable and the `tip` is the result variable. This means we are looking at the `total_bill` value to explain the `tip` value.
#
# Let's take a look at the scatter plot (or `lmplot` as seaborn calls it) and all will become clear:
sns.lmplot(x="total_bill", y="tip", data=tips, fit_reg=False)
# Look at that! So much more useful than the histogram as we can now see the actual relationships. Each datapoint represents one total bill paired up with it's related tip. Obviously there's a strong relationship here. Since we know the average tip to be 16.1%, and a bill of 0 should give us a tip of 0, we can quickly guess that a linear relationship should be described by the following equation:
#
# $$ y = 0 + 0.161x $$
#
# Let's plot this line on the graph and see how accurate it might be.
yvals = 0.161 * tips["total_bill"]
plt.plot(tips["total_bill"], yvals)
sns.regplot(tips["total_bill"], tips["tip"], fit_reg=False)
# ###### That looks pretty good, but of course mathematicians aren't excited by "looks pretty good." They need numbers to back that claim up. We'll get in to finding the best line in the next notebook on linear regression, but for now be settled that it there's a clear positive relationship (meaning $y$ goes up as $x$ goes up) between the variables.
#
# _Refere back tou the section on "Which mean?" See how the higher bills tend to be below the blue line? This again is more confirmation that the larger bills tend to tip less. Of course in a small dataset, you can't call this a significant trend, but it does corrobrate the discrepency in our earlier analysis._
#
# ## Covariance and Correlation
# ### Covariance
# Let's now talk about how strong this positive relationship is. In other words, how sure can we be that if $x$ goes up $y$ will follow? Statisticians look at the way these values vary together with a term called _covariance_. When two variables tend to move together, their covariance is positive. If they move opposite, it's negative. If they are completely independent, it's 0.
#
# The covariance for $N$ samples with random variables $X$ and $Y$ is given by the formula
#
# $$ Q = \frac{1}{N-1} \sum_{i=1}^{N} (x_{i}-\bar{x})(y_i-\bar{y}) $$
#
# In Python:
# +
# A useful helper function will be to define the dot product between two vectors
# dot product is defined as <v1, v2> * <u1, u2> = v1*u1 + v2*u2
from operator import mul
def dot(v, u):
return sum(map(mul, v,u))
# Another useful helper function will be to take a list and output another list
# that contains the differences of the means
def diff_mean(values):
mean = values.mean() # use Panda's arithmetic mean function
deltas = []
for v in values:
deltas.append(v-mean)
return deltas
def covariance(x, y):
n = len(x)
return dot(diff_mean(x), diff_mean(y)) / (n-1)
covariance(tips["total_bill"], tips["tip"])
# -
# 8.3235 is a value without a lot of context. Think about what the units of that are: it's essentially 2022.61 dollars squared ($\$^2$). Do you every pay for things in dollars squared? Or if we use two variables with totally different units, like salary (dollars) and years, we get units of dollar-years. This is really difficult to get a sense of scale or closeness of relationship.
#
# ### Correlation
#
# Instead, statisticians will often use the correlation (or Pearson Correlation Coefficient to be formal) to report a number that everyone can understand. Correlation is a number between -1 and +1. If it's 1, there's a perfect positive relationship and vice versa. A correlation of 0 means there's absolutely no relationship. But keep in mind, just because there's a correlation of 0, doesn't mean that there's no _linear_ correlation between values.
#
# Take this helpful image from Wikipedia:
# 
#
# The correlation coefficient is almost always denoted by the variable $r$. The formula for a sample is:
#
# $$ r = \frac{\sum\nolimits_{i=1}^{n}(x_i-\bar{x})(y_i-\bar{y}))}{\sqrt{\sum\nolimits_{i=1}^{n}(x_i-\bar{x})^2\sum\nolimits_{i=1}^{n}(y_i-\bar{y})^2}} $$
#
# Before you freak out, we've seen all of these pieces before. This is just the covariance divided by the product of the standard deviations. Perhaps a simpler equation?
#
# $$ \rho_{X,Y} = \frac{cov(X,Y)}{\sigma_X\sigma_Y} $$
#
# Now in code:
# +
def correlation(x, y):
std_x = x.std() # again, just use Panda's method
std_y = y.std()
if std_x > 0 and std_y > 0:
return covariance(x,y) / (std_x*std_y)
else:
return 0
correlation(tips["total_bill"], tips["tip"])
# -
# ### Interpretation
#
# How are we to interpret that number? A lot of scientists will try to set guidelines, but it really depends on context and case. In instances where data is measured with precision and follows fundamental laws of nature, 0.675 is pretty low. Even a correlation of 0.8 may be low. But in social sciences, where people certainly do not follow any fundamental and rational laws of nature, that might be pretty strong. As you explore your dataset more and other datasets in similar fields, you'll start to develop more context around this number.
#
# ### Correlation $\ne$ Causation
#
# You hear this a lot, but it's worth repeating. Just because `total_bill` and `tip` are correlated, does not mean that the increase in `total_bill` _causes_ an increase in `tip`. (Although that's pretty clear in this case as `tip` is directly derived from `total_bill`, but regardless watch out). The aim of many people wielding statistics is to prove one particular point: _their point_. Those who want there to be causation will probably drop a headline like "Study finds new link between coffee and cancer!", whereas those who don't want to give up their morning joe (including yours truly), will point out that people who smoke probably also drink coffee. So how are you to know it's the coffees fault?
#
# Welcome to the wild world of statistics. Keep your wits and your logic about you and you'll do just fine here, kid.
| Statistics/Bivariate Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: reserver
# language: python
# name: reserver
# ---
import pandas as pd
url = 'https://data.open-power-system-data.org/time_series/2020-10-06/time_series_60min_singleindex.csv'
hourly_time_series_df = pd.read_csv(url,
sep=',',
index_col=0, # you can use the date as the index for pandas
parse_dates=[0]) # where is the time stamp?
hourly_time_series_df
| notebooks/Example DataSet for Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The groupby operation
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
plt.rc('figure', figsize=(12, 6))
pd.options.display.max_rows = 10
# I've provided the reviews by the top 100 reviewers.
# We'll use it for talking about groupby.
df = pd.read_csv("data/subset.csv.gz", parse_dates=['time'])
df.head()
df.info()
# ## Aside: Namespaces
#
# Pandas has been expanding its use of namespaces (or accessors) on `DataFrame` to group together related methods. This also limits the number of methods direclty attached to `DataFrame` itself, which can be overwhelming.
#
# Currently, we have these namespaces:
#
# - `.str`: defined on `Series` and `Index`es containing strings (object dtype)
# - `.dt`: defined on `Series` with `datetime` or `timedelta` dtype
# - `.cat`: defined on `Series` and `Indexes` with `category` dtype
# - `.plot`: defined on `Series` and `DataFrames`
# # Groupby
# Groupby operations come up in a lot of contexts.
# At its root, groupby about doing an operation on many subsets of the data, each of which shares something in common.
# The components of a groupby operation are:
# ## Components of a groupby
#
# 1. **split** a table into groups
# 2. **apply** a function to each group
# 3. **combine** the results into a single DataFrame or Series
# In pandas the `split` step looks like
#
# ```python
# df.groupby( grouper )
# ```
#
# `grouper` can be many things
#
# - Series (or string indicating a column in `df`)
# - function (to be applied on the index)
# - dict : groups by *values*
# - `levels=[ names of levels in a MultiIndex ]`
# ## Split
#
# Break a table into smaller logical tables according to some rule
gr = df.groupby('beer_name')
gr
# We haven't really done any actual work yet, but pandas knows what it needs to know to break the larger `df` into many smaller pieces, one for each distinct `beer_name`.
# ## Apply & Combine
#
# To finish the groupby, we apply a method to the groupby object.
# +
review_cols = ['review_appearance', 'review_aroma', 'review_overall',
'review_palate', 'review_taste']
df.groupby('beer_name')[review_cols].agg('mean')
# -
# In this case, the function we applied was `'mean'`.
# Pandas has implemented cythonized versions of certain common methods like mean, sum, etc.
# You can also pass in regular functions like `np.mean`.
#
# In terms of split, apply, combine, split was `df.groupby('beer_name')`.
# We apply the `mean` function by passing in `'mean'`.
# Finally, by using the `.agg` method (for aggregate) we tell pandas to combine the results with one output row per group.
#
# You can also pass in regular functions like `np.mean`.
df.groupby('beer_name')[review_cols].agg(np.mean).head()
# Finally, [certain methods](http://pandas.pydata.org/pandas-docs/stable/api.html#id35) have been attached to `Groupby` objects.
df.groupby('beer_name')[review_cols].mean()
# <div class="alert alert-success" data-title="Highest Variance">
# <h1><i class="fa fa-tasks" aria-hidden="true"></i> Exercise: Highest Variance</h1>
# </div>
#
# <p>Find the `beer_style`s with the greatest variance in `abv`.</p>
#
# - hint: `.std` calculates the standard deviation (`.var` for variance), and is available on `GroupBy` objects like `gr.abv`.
# - hint: use `.sort_values` to sort a Series by the values (it took us a while to come up with that name)
# %load solutions/groupby_abv.py
# ## `.agg` output shape
#
# The output shape is determined by the grouper, data, and aggregation
#
# - Grouper: Controls the output index
# * single grouper -> Index
# * array-like grouper -> MultiIndex
# - Subject (Groupee): Controls the output data values
# * single column -> Series (or DataFrame if multiple aggregations)
# * multiple columns -> DataFrame
# - Aggregation: Controls the output columns
# * single aggfunc -> Index in the colums
# * multiple aggfuncs -> MultiIndex in the columns (Or 1-D Index if groupee is 1-D)
#
# We'll go into MultiIndexes in a bit, but for know, think of them as regular Indexes with multiple levels (columns).
# single grouper, single groupee, single aggregation
df.groupby('beer_style').review_overall.agg('mean')
# multiple groupers, multiple groupee, single aggregation
df.groupby(['brewer_id', 'beer_name'])[review_cols].agg(['mean', 'min', 'max', 'std', 'count'])
# <div class="alert alert-success" data-title="Rating by length">
# <h1><i class="fa fa-tasks" aria-hidden="true"></i> Exercise: Rating by length</h1>
# </div>
#
# <p>Plot the relationship between review length (number of characters) and average `review_overall`.</p>
#
# - Hint: use `.plot(style='k.')`
# - We've grouped by columns so far, you can also group by any series with the same length
# %load solutions/groupby_00.py
# <div class="alert alert-success" data-title="Reviews by Length">
# <h1><i class="fa fa-tasks" aria-hidden="true"></i> Exercise: Reviews by Length</h1>
# </div>
#
# <p>Find the relationship between review length (number of **words** and average `review_overall`.)</p>
#
# - Hint: You can pass a [regular expression](https://docs.python.org/3/howto/regex.html#matching-characters) to any of the `.str` methods.
# %load solutions/groupby_00b.py
# <div class="alert alert-success" data-title="Rating by number of Reviews">
# <h1><i class="fa fa-tasks" aria-hidden="true"></i> Exercise: Rating by number of Reviews</h1>
# </div>
#
# <p>Find the relationship between the number of reviews for a beer and the average `review_overall`.</p>
#
# %load solutions/groupby_01.py
| pandas-tutorial-scipyconf-2018/03-groupby.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div class ="alert alert-warning">
#
#
# notebook consultable, exécutable, modifiable et téléchargeable en ligne :
#
#
# - se rendre à : https://github.com/nsi-acot/continuite_pedagogique_premiere
# - cliquer sur l'icone "launch binder" en bas de page
# - patienter quelques secondes que le serveur Jupyter démarre
# - naviguer dans le dossier `"./types_construits/tuples/"`
# - cliquer sur le nom de ce notebook
# </div>
# # <center> p-uplets(ou tuples)</center>
# Python propose un type de données appelé tuple, qui est assez semblable à une liste mais qui, comme les chaînes, n’est pas modifiable. Du point de vue de la syntaxe, un tuple est une collection d’éléments séparés par des virgules :
#Exécuter le code ci-dessous
tup1 = 1,2,3
tup2 = (6,7)
tup3 = 'abc','def'
tup4 = ('a',1,'b',2)
print(tup1,type(tup1))
print(tup2,type(tup2))
print(tup3,type(tup3))
print(tup4,type(tup4))
# A retenir :
# * De simples virgules suffisent à définir un tuple mais pour la lisibilité du code, il est préférable de l'enfermer dans des parenthèses.
# ### Opérations sur les tuples
#affectations
t= 7,8,9
a,b,c = t
print(a)
# opérateurs + et * : concanténation et répétition
t1= (3,2,1)
t2 = (6,5,4)
t3 = 2*t1+t2
print(t3)
# Accéder aux éléments
t4 = (2,4)
print(t4[0])
# longueur d'un tuple
print(len(t3))
# Parcours d'un tuple
t5 = (0,1,2,3,4,5,6,7,8,9)
for e in t5 :
print(e+1, end=' ')
# test in
b = 3 in (2,4,6)
print(b)
# les tuples ne sont pas des listes
t6 = (2,4,6,8)
t6.append(10)
#Ajouter un élément à un tuple
t7 = 1,2,3,4,5
t7 = t7 + (6,) # ne pas oublier la virgule et les parenthèses
print(t7)
# A retenir :
# * Les opérateurs de concaténation `+` et de multiplication `*` donnent les mêmes résultats que pour les chaînes et les listes.
# * On accède aux éléments d'un tuple comme avec les chaînes et les listes.
# * On peut déterminer la taille d’un tuple à l’aide de la fonction `len()`.
# * On peut le parcourir à l’aide d’une boucle `for`, utiliser l’instruction `in` pour savoir si un élément donné en fait partie, exactement comme pour une liste ou pour une chaîne.
# * Les tuples ne sont pas modifiables et on ne peut pas utiliser avec eux ni la méthode `.append()`ni l'instruction `del()`.
# * Les tuples sont moins souples que les listes mais c'est aussi leur force. On est sûr que leur contenu ne sera pas modifié par erreur. De plus ils sont beaucoup moins gourmands en ressources et sont éxécutés plus rapidement.
# ### Exercice 1 :
# Ecrire la fonction `reverse(t)` qui prend en paramètre un tuple de trois valeurs et qui renvoie un tuple contenant les 3 valeurs dans l'ordre inverse.
#
# Ainsi `reverse(1,2,3)` renvoie `(3,2,1)`
# +
#Réponse
def reverse(t):
return
reverse((1,2,3))
# -
# ### Exercice 2 :
# Ecrire la fonction `initiales` qui prend en paramètre une chaîne de caractères de type `'NOM Prénom'` et qui renvoie un tuple contenant les initiales des noms et prénoms passés en argument.
#
# Ainsi `initiales('<NAME>')` doit renvoyer `('J','D')`.
# +
#Réponse:
def initiales(identite):
return
initiales('<NAME>')
# -
# ### Exercice 3 :
# En utilisant le résultat précédent, compléter la fonction `initiale_nom(noms,lettre)` qui prend en paramètres un tuple de noms et prénoms formatés comme précédemment ainsi qu'un caractère et qui renvoie un tuple contenant les chaînes avec la même initiale de nom.
#
# Ainsi avec le tuple ci-dessous, `initiale_nom(stars,'S')` doit renvoyer `('<NAME>','<NAME>', '<NAME>')`
# +
#Réponse
stars = ('<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>',
'<NAME>','<NAME>','<NAME>','<NAME>','<NAME>','<NAME>')
def initiale_nom(noms, lettre):
return
#Appels
initiale_nom(stars,'S')
# -
# ### Exercice 4 :
# <img style='float:right;' src='fusions.png' width=200>
# On se place dans le contexte du codage des couleurs selon le système RGB, déjà vu dans une feuille précédente. Dans une image dite bitmap, chaque pixel contient une couleur composée de ses couches Rouge, Verte et Bleue, chacune de ces couches étant représentée par un entier compris entre $0$ et $255$
#
# Dans les logiciels de traitement d'images(Photoshop, Gimp,...), on travaille avec des calques d'images que l'on superpose et que l'on fusionne. En fonction des opérations mathématiques utilisées pour fusionner les calques, on obtient des rendus esthétiques différents:
# <img style='float:right;' src='http://www.info-3000.com/photoshop/modefusion02.jpg' width=400>
#
# Chacune des fonctions demandées correspondent à un mode de fusion de Photoshop , prennent en paramètres deux tuples de trois valeurs`pix1`et `pix2` correspondants aux deux pixels que l'on souhaite fusionner et renvoient un tuple contenant la couleur finale obtenue.
#
# Pour s'aider, voici une description des formules de certains modes de fusion( en milieu de page) :
# https://helpx.adobe.com/fr/after-effects/using/blending-modes-layer-styles.html
#
# On pourra tester avec les deux pixels `p1=(200,128,63)` et `p2=(125,205,50)`. Fonctions `min()` et `max()` autorisées !
p1 = (200,128,63)
p2 = (125,205,50)
# 1. `eclaircir(p1,p2)` renvoie `(200,205,63)`
# +
#1 :
def eclaircir(pix1, pix2):
r=0
g=0
b=0
return (r,g,b)
eclaircir(p1,p2)
# -
# 2. `obscurcir(p1,p2)` renvoie `(125,128,50)`
# +
#2 :
def obscurcir(pix1, pix2):
r= 0
g= 0
b= 0
return (r,g,b)
obscurcir(p1,p2)
# -
# 3. `difference(p1,p2)` renvoie `(75,155,13)`
# +
#3 :
def difference(pix1,pix2):
r= 0
g= 0
b= 0
return (r,g,b)
difference(p1,p2)
# -
# 4. `addition(p1,p2)` renvoie `(255,255,113)`
# +
#4 :
def addition(pix1,pix2):
r= 0
g= 0
b= 0
return (r,g,b)
addition(p1,p2)
# -
# 5. `soustraction(p1,p2)` renvoie `(0,77,0)`
# +
#5 :
def soustraction(pix1,pix2):
r= 0
g= 0
b= 0
return (r,g,b)
soustraction(p1,p2)
# -
# 6. `produit(p1,p2)` renvoie `(98,103,12)`
# +
#6 :
def produit(pix1,pix2):
r= 0
g= 0
b= 0
return (r,g,b)
produit(p1,p2)
# -
# 7. `division(p1,p2)` renvoie `(159,255,202)`
# +
#7 :
def division(pix1,pix2):
r= 0
g= 0
b= 0
return (r,g,b)
division(p1,p2)
# -
# 8. `superposition(p1,p2)` renvoie `(226,230,100)`
# +
#8 :
def superposition(pix1,pix2):
r= 0
g= 0
b= 0
return (r,g,b)
superposition(p1,p2)
# -
| types_construits/tuples/tuples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ml
# language: python
# name: ml
# ---
# Last week I scrapped a bunch of data from the Steam API using my [Steam Graph Project](https://github.com/jrtechs/SteamFriendsGraph).
# This project captures steam users, their friends, and the games that they own.
# Using the Janus-Graph traversal object, we are able to use the Gremlin graph query language to pull this data.
# Since I am storing the hours played in a game as a property on the relationship between a player and a game node, I had to do a join statement to get the hours property with the game information in a single query.
# ```java
# Object o = graph.con.getTraversal()
# .V()
# .hasLabel(Game.KEY_DB)
# .match(
# __.as("c").values(Game.KEY_STEAM_GAME_ID).as("gameID"),
# __.as("c").values(Game.KEY_GAME_NAME).as("gameName"),
# __.as("c").inE(Game.KEY_RELATIONSHIP).values(Game.KEY_PLAY_TIME).as("time")
# ).select("gameID", "time", "gameName").toList();
# WrappedFileWriter.writeToFile(new Gson().toJson(o).toLowerCase(), "games.json");
# ```
# Using the game indexing property on the players, I noted that I only ended up fully indexing the games of 481 games.
# ```java
# graph.con.getTraversal()
# .V()
# .hasLabel(SteamGraph.KEY_PLAYER)
# .has(SteamGraph.KEY_CRAWLED_GAME_STATUS, 1)
# .count().next()
# ```
# We now transition to python and Matlptlib to visualize the data exported from our JanusGraph Query as a json object.
# The dependencies for this [notebook](https://github.com/jrtechs/RandomScripts/tree/master/notebooks) can be installed using pip.
# !pip install pandas
# !pip install matplotlib
# The first step we are doing is importing our JSON data as a pandas dataframe.
# Pandas is a popular open-source data analysis and manipulation tool.
# I enjoy pandas because it has native integration with matplotlib and supports operations like aggregations and groupings.
# +
import matplotlib.pyplot as plt
import pandas as pd
games_df = pd.read_json('games.json')
games_df
# -
# Using the built-in matplotlib wrapper function, we can graph a historgram of the number of hours played in a game.
ax = games_df.hist(column='time', bins=20, range=(0, 4000))
ax=ax[0][0]
ax.set_title("Game Play Distribution")
ax.set_xlabel("Minutes Played")
ax.set_ylabel("Frequency")
# Notice that the vast majority of the games are rarely ever played, however, it is skewek to the right with a lot of outliers.
# We can change the scale to make it easier to view using the range parameter.
ax = games_df.hist(column='time', bins=20, range=(0, 100))
ax=ax[0][0]
ax.set_title("Game Play Distribution")
ax.set_xlabel("Minutes Played")
ax.set_ylabel("Frequency")
# If we remove games that have never been played, the distribution looks more normal.
ax = games_df.hist(column='time', bins=20, range=(2, 100))
ax=ax[0][0]
ax.set_title("Game Play Distribution")
ax.set_xlabel("Minutes Played")
ax.set_ylabel("Frequency")
# Although historgrams are useful, viewing the CDF is often more useful since it is easier to extract numerical information from.
ax = games_df.hist(column='time',density=True, range=(0, 2000), histtype='step',cumulative=True)
ax=ax[0][0]
ax.set_title("Game Play Distribution")
ax.set_xlabel("Minutes Played")
ax.set_ylabel("Frequency")
# 80% of people who owns a game plays it under 4 hours. In fact, nearly half of all downloaded or purchased steam games go un-played. This is a cool example of famous 80/20 principle -- aka Pareto principle. The Pareto principle states that for most events, roughly 80% of the effects from 20% of the causes.
#
# As mentioned earlier, the time in owned game distribution is heavily skewed to the right.
ax = plt.gca()
ax.set_title('Game Play Distribution')
ax.boxplot(games_df['time'], vert=False,manage_ticks=False, notch=True)
plt.xlabel("Game Play in Minutes")
ax.set_yticks([])
plt.show()
# Zooming in on the distribution we see that nearly half of all the purchased games go un-opened.
ax = plt.gca()
ax.set_title('Game Play Distribution')
ax.boxplot(games_df['time']/60, vert=False,manage_ticks=False, notch=True)
plt.xlabel("Game Play in Hours")
ax.set_yticks([])
ax.set_xlim([0, 10])
plt.show()
# Viewing the aggregate pool of hours in particular game data is insightful, however, comparing different games against each other is more interesting.
# In pandas, after we create a grouping on a column, we can aggregate it into metrics such as max, min, mean, etc.
# I am also sorting the data I get by count since we are more interested in "popular" games.
stats_df = (games_df.groupby("gamename")
.agg({'time': ['count', "min", 'max', 'mean']})
.sort_values(by=('time', 'count')))
stats_df
# To prevent one off essoteric games that I don't have a lot of data for, throwing off my metrics, I am disregarding any games that I have less than 10 values for.
stats_df = stats_df[stats_df[('time', 'count')] > 10]
stats_df
# We see that games on average have a play time of 5-hours in a box-plot.
ax = plt.gca()
ax.set_title('Game Play Distribution')
ax.boxplot(stats_df[('time', 'mean')]/60, vert=False,manage_ticks=False, notch=True)
plt.xlabel("Mean Game Play in Hours")
ax.set_xlim([0, 40])
ax.set_yticks([])
plt.show()
# I had a hunch that more popular games got played more, however, I feel like this data-set is still too small the verify this hunch.
stats_df.plot.scatter(x=('time', 'count'), y=('time', 'mean'))
We can create a new filtered dataframe that only contains the result of a single game.
cc_df = games_df[games_df['gamename'] == "counter-strike: global offensive"]
cc_df
# It is supprizing how many hours certain people play in Counter-Strike. The highest number in the dataset was 8,444 hours or 352 days!
ax = plt.gca()
ax.set_title('Game Play Distribution for Counter-Strike')
ax.boxplot(cc_df['time']/60, vert=False,manage_ticks=False, notch=True)
plt.xlabel("Game Play in Hours")
ax.set_yticks([])
plt.show()
# Viewing the distribution for a different game like Unturned, yields a vastly different distribution than Counter-Strike. I believe the key difference is that Counter-Strike is played competitively where Unturned is a more leasurely game. It is likely that competative gamers skew the distribution of Counter-Strike to be very high.
u_df = games_df[games_df['gamename'] == "unturned"]
u_df
ax = plt.gca()
ax.set_title('Game Play Distribution for Unturned')
ax.boxplot(u_df['time']/60, vert=False,manage_ticks=False, notch=True)
plt.xlabel("Game Play in Hours")
ax.set_yticks([])
plt.show()
# Next, I made a dataframe just containing the raw data points of games that had a aggregate count of over 80. For the sample size of the crawl that I did, having a count of 80, would make the game "popular". Since we only have 485 players indexed, having over 80 entries means that over 17% of people scrapped had the game. Glancing at the results, it is easy to verify that the games returned were very popular.
df1 = games_df[games_df['gamename'].map(games_df['gamename'].value_counts()) > 80]
df1['time'] = df1['time']/60
df1
ax = plt.gca()
ax.set_title('Steps Distribution for July\n')
df1.boxplot(column=["time"], by='gamename',ax=ax, notch=True, vert=False)
plt.xlabel("Hours Played")
plt.ylabel("Games")
plt.show()
ax = df1.boxplot(column=["time"], by='gamename', notch=True, vert=False)
fig = ax.get_figure()
fig.suptitle('')
ax.set_title('Play-time Distribution')
plt.xlabel("Hours Played")
ax.set_xlim([0, 2000])
plt.ylabel("Game")
plt.savefig("playTimes.png", dpi=300, bbox_inches = "tight")
# Overall this is really interesting to see how the distributions for different games vary. In the future I am going to re-run some of these analytics with even more data and possibly put them on my website as a interactive graph.
| notebooks/steam/steam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import os.path
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# ## Checkpoints
model_B = [
'9ZB-000-link-edges.660000',
'9ZB-000-link-edges.560000',
'9ZB-000-link-edges.460000',
'9ZB2-000-link-edges-4xlowerlr.660000',
'9ZB2-000-link-edges-4xlowerlr.560000',
'9ZB2-000-link-edges-4xlowerlr.460000',
]
# +
model_F = [
'9ZF-000-ablation-study-high-batch-size-2.660000',
'9ZF-000-ablation-study-high-batch-size-2.560000',
'9ZF-000-ablation-study-high-batch-size-2.460000',
]
model_F2 = [
'9ZF2-000-ablation-study-high-batch-size-augment-really-this-time.206250',
'9ZF2-000-ablation-study-high-batch-size-augment-really-this-time.176250',
'9ZF2-000-ablation-study-high-batch-size-augment-really-this-time.146250',
]
model_F3 = [
'9ZF3-000-ablation-study-remove-global-state.206250',
'9ZF3-000-ablation-study-remove-global-state.176250',
'9ZF3-000-ablation-study-remove-global-state.146250',
]
model_F5 = [
'9ZF5-000-ablation-study-no-pairs-embeddings-and-one-preprocessing-edge-pairs.206250',
'9ZF5-000-ablation-study-no-pairs-embeddings-and-one-preprocessing-edge-pairs.176250',
'9ZF5-000-ablation-study-no-pairs-embeddings-and-one-preprocessing-edge-pairs.146250',
]
# -
model_G = [
'9ZG-000-vanilla-deep-radam.3300000',
'9ZG-000-vanilla-deep-radam.3000000',
'9ZG-000-vanilla-deep-radam.2700000'
]
model_G2 = [
'9ZG2-000-vanilla-deep-radam-300-10.1006500',
'9ZG2-000-vanilla-deep-radam-300-10.856500',
'9ZG2-000-vanilla-deep-radam-300-10.706500',
]
model_G4 = [
'9ZG4-000-vanilla-deep-radam.660000',
'9ZG4-000-vanilla-deep-radam.560000',
'9ZG4-000-vanilla-deep-radam.460000'
]
model_G5 = [
'9ZG5-000-vanilla-deep-radam-mse-clip.660000',
'9ZG5-000-vanilla-deep-radam-mse-clip.560000',
'9ZG5-000-vanilla-deep-radam-mse-clip.460000',
]
model_I = [
'9ZI-007-distributionnal-loss.660000',
'9ZI-005-distributionnal-loss.560000',
'9ZI-005-distributionnal-loss.460000',
]
# # Submit / valid
# ## Utils
from common import *
from dataset.dataset_9ZB_117_edge_link import EdgeBasedDataset, DataLoader
from sklearn.model_selection import train_test_split
from torch_geometric.data import Batch
from tensorboardX import SummaryWriter
from scheduler_superconvergence_09J import *
from torch_geometric.data import DataListLoader
from torch_scatter import scatter_add
from importancer import get_tags, select_tags
# +
def init_dataset():
global train_loader
global train_small_loader
global valid_loader
global train_indices
global valid_indices
global submit_loader
if action == 'train':
if to_load:
train_indices = to_load['train_indices']
valid_indices = to_load['valid_indices']
else:
indices = list(range(len(dataset)))
train_indices, valid_indices = train_test_split(indices, test_size = 5000, random_state = 1234)
train_big_indices, train_small_indices = train_test_split(list(range(len(train_indices))), test_size = 5000, random_state = 1234)
train = torch.utils.data.Subset(dataset, train_indices)
train_small = torch.utils.data.Subset(train, train_small_indices)
valid = torch.utils.data.Subset(dataset, valid_indices)
if not parallel_gpu:
train_loader = DataLoader(train, batch_size = batch_size, drop_last = True, shuffle = True, follow_batch=['edge_attr_numeric'], num_workers=num_workers)
train_small_loader = DataLoader(train_small, batch_size = batch_size * valid_batch_size_factor, drop_last = True, shuffle = True, follow_batch=['edge_attr_numeric'], num_workers=num_workers)
valid_loader = DataLoader(valid, batch_size = batch_size * valid_batch_size_factor, drop_last = True, shuffle = True, follow_batch=['edge_attr_numeric'], num_workers=num_workers)
else:
train_loader = DataListLoader(train, batch_size = batch_size, shuffle = True, num_workers=num_workers)
valid_loader = DataListLoader(valid, batch_size = batch_size * valid_batch_size_factor, shuffle = True, num_workers=num_workers)
if False and "benchmark":
for batch in tqdm.tqdm_notebook(train_loader):
pass
else:
if not parallel_gpu:
submit_loader = DataLoader(dataset, batch_size = batch_size * valid_batch_size_factor, drop_last = False, shuffle = False, follow_batch=['edge_attr_numeric'], num_workers=num_workers)
else:
raise ValueError
if False and "benchmark":
for batch in tqdm.tqdm_notebook(submit_loader):
pass
def init_model():
global model
global MEGNetList
model = MEGNetList(
layer_count,
atom_embedding_count, bond_ebedding_count, global_embedding_count,
atom_input_size, bond_input_size, global_input_size,
hidden,
target_means, target_stds)
if to_load:
model.load_state_dict(to_load['model'])
if not parallel_gpu:
model = model.to(device)
else:
model = model.to('cuda:0')
def batch_submit():
global batch
with torch.no_grad():
# BATCH
batch = batch.to(device)
# PREDICT
out = model.forward(
[batch.x_numeric],
batch.x_embeddings,
[batch.edge_attr_numeric],
batch.edge_attr_embeddings,
[batch.u_numeric],
batch.u_embeddings,
batch.edge_index,
batch.batch,
batch.edge_attr_numeric_batch,
batch.y_types,
batch.cycles_edge_index,
batch.cycles_id,
batch.edges_connectivity_ids,
batch.edges_connectivity_features,
)
return out.squeeze(1).cpu().numpy()
def submit(loader):
global batch
model.eval()
predictions = dataset.bond_descriptors.reset_index()[['type', 'edge_index', 'atom_index_0', 'atom_index_1', 'molecule_id']]
predictions = predictions.loc[predictions['molecule_id'].isin(dataset.molecules_ids)]
predictions['prediction'] = np.nan
molecule_map = {k : v for k, v in zip(dataset.molecules_ids, dataset.molecules)}
predictions['molecule_name'] = predictions['molecule_id'].map(molecule_map)
predictions = predictions.set_index('molecule_id')
current_index = 0
for batch in tqdm.tqdm_notebook(loader):
try:
molecule_ids = batch.molecule_ids.numpy()
prediction = batch_submit()
predictions.loc[molecule_ids, 'prediction'] = prediction
except KeyboardInterrupt:
print("Escaping")
return "escape"
return predictions
# +
def load_submit_dataset():
global dataset
global global_embedding_count
global atom_embedding_count
global bond_ebedding_count
global global_numeric_count
global bond_numeric_count
global atom_numeric_count
global target_means
global target_stds
global atom_input_size
global bond_input_size
global global_input_size
global action
action = 'submit'
submit_dataset_name = 'test'
if action == 'train':
dataset = EdgeBasedDataset(name = 'train')
else:
dataset = EdgeBasedDataset(name = submit_dataset_name)
target_stats = dataset.bond_descriptors.loc[(dataset.bond_descriptors['type'] != 'VOID') & dataset.bond_descriptors.index.isin(dataset.dataset.loc[dataset.dataset['dataset'] == 'train', 'molecule_id'])].groupby('type_id')['scalar_coupling_constant'].agg(['std', 'median'])
target_means = target_stats['median'].values
target_stds = target_stats['std'].values
target_stats
# Inputs
sample = dataset[0]
print(sample)
global_embedding_count = dataset.global_embedding_count
atom_embedding_count = dataset.atom_embedding_count
bond_ebedding_count = dataset.bond_ebedding_count
global_numeric_count = sample.u_numeric.size(1)
bond_numeric_count = sample.edge_attr_numeric.size(1)
atom_numeric_count = sample.x_numeric.size(1)
atom_input_size = [(atom_numeric_count, hidden)]
bond_input_size = [(bond_numeric_count, hidden)]
global_input_size = [(global_numeric_count, hidden)]
def load_train_dataset():
global dataset
global global_embedding_count
global atom_embedding_count
global bond_ebedding_count
global global_numeric_count
global bond_numeric_count
global atom_numeric_count
global target_means
global target_stds
global atom_input_size
global bond_input_size
global global_input_size
global action
action = 'submit'
submit_dataset_name = 'train'
if action == 'train':
dataset = EdgeBasedDataset(name = 'train')
else:
dataset = EdgeBasedDataset(name = submit_dataset_name)
target_stats = dataset.bond_descriptors.loc[(dataset.bond_descriptors['type'] != 'VOID') & dataset.bond_descriptors.index.isin(dataset.dataset.loc[dataset.dataset['dataset'] == 'train', 'molecule_id'])].groupby('type_id')['scalar_coupling_constant'].agg(['std', 'median'])
target_means = target_stats['median'].values
target_stds = target_stats['std'].values
target_stats
# Inputs
sample = dataset[0]
print(sample)
global_embedding_count = dataset.global_embedding_count
atom_embedding_count = dataset.atom_embedding_count
bond_ebedding_count = dataset.bond_ebedding_count
global_numeric_count = sample.u_numeric.size(1)
bond_numeric_count = sample.edge_attr_numeric.size(1)
atom_numeric_count = sample.x_numeric.size(1)
atom_input_size = [(atom_numeric_count, hidden)]
bond_input_size = [(bond_numeric_count, hidden)]
global_input_size = [(global_numeric_count, hidden)]
def package_submit_prediction(submit_predictions):
assert submit_predictions.loc[submit_predictions['prediction'].notnull()].shape[0] == 7223027
script_dir = os.path.abspath(os.path.dirname(__file__))
test = pd.read_csv(script_dir + '/../../data/test.csv')
test['prediction'] = np.nan
pred_1 = pd.merge(test[['molecule_name', 'atom_index_0', 'atom_index_1']], submit_predictions[['molecule_name', 'atom_index_0', 'atom_index_1', 'prediction']], left_on = ['molecule_name', 'atom_index_0', 'atom_index_1'], right_on = ['molecule_name', 'atom_index_0', 'atom_index_1'], how = 'left')['prediction']
pred_2 = pd.merge(test[['molecule_name', 'atom_index_0', 'atom_index_1']], submit_predictions[['molecule_name', 'atom_index_0', 'atom_index_1', 'prediction']], left_on = ['molecule_name', 'atom_index_0', 'atom_index_1'], right_on = ['molecule_name', 'atom_index_1', 'atom_index_0'], how = 'left')['prediction']
test['prediction'] = pred_1
test.loc[test['prediction'].isnull(), 'prediction'] = pred_2
test = test[['id', 'prediction']].rename(columns = {'prediction' : 'scalar_coupling_constant'})
test = test.rename(columns = {'prediction' : 'scalar_coupling_constant'})
assert test['scalar_coupling_constant'].isnull().sum() == 0
assert test['scalar_coupling_constant'].notnull().sum() == 2505542
return test
def package_train_prediction(train_predictions):
assert train_predictions.loc[train_predictions['prediction'].notnull()].shape[0] == 13432555
script_dir = os.path.abspath(os.path.dirname(__file__))
train = pd.read_csv(script_dir + '/../../data/train.csv')
train['prediction'] = np.nan
pred_1 = pd.merge(train[['molecule_name', 'atom_index_0', 'atom_index_1']], train_predictions[['molecule_name', 'atom_index_0', 'atom_index_1', 'prediction']], left_on = ['molecule_name', 'atom_index_0', 'atom_index_1'], right_on = ['molecule_name', 'atom_index_0', 'atom_index_1'], how = 'left')['prediction']
pred_2 = pd.merge(train[['molecule_name', 'atom_index_0', 'atom_index_1']], train_predictions[['molecule_name', 'atom_index_0', 'atom_index_1', 'prediction']], left_on = ['molecule_name', 'atom_index_0', 'atom_index_1'], right_on = ['molecule_name', 'atom_index_1', 'atom_index_0'], how = 'left')['prediction']
train['prediction'] = pred_1
train.loc[train['prediction'].isnull(), 'prediction'] = pred_2
assert train['prediction'].isnull().sum() == 0
assert train['prediction'].notnull().sum() == 4658147
train['dataset'] = 'train'
indices = list(range(len(dataset)))
train_indices, valid_indices = train_test_split(indices, test_size = 5000, random_state = 1234)
valid_molecules = [dataset.molecules[i] for i in valid_indices]
train.loc[train['molecule_name'].isin(valid_molecules), 'dataset'] = 'valid'
assert train.loc[train['dataset'] == 'valid', 'molecule_name'].nunique() == 5000
return train
def analyze(train):
train = train.loc[train['dataset'] == 'valid']
train['mae'] = (train['prediction'] - train['scalar_coupling_constant']).abs()
print(np.log(train.groupby('type')['mae'].mean()).mean())
#print(np.log(train.groupby('type')['mae'].mean()))
# -
def init_valid_dataset():
global train_loader
global train_small_loader
global valid_loader
global train_indices
global valid_indices
global submit_loader
if action == 'train':
if to_load:
train_indices = to_load['train_indices']
valid_indices = to_load['valid_indices']
else:
indices = list(range(len(dataset)))
train_indices, valid_indices = train_test_split(indices, test_size = 5000, random_state = 1234)
train_big_indices, train_small_indices = train_test_split(list(range(len(train_indices))), test_size = 5000, random_state = 1234)
train = torch.utils.data.Subset(dataset, train_indices)
train_small = torch.utils.data.Subset(train, train_small_indices)
valid = torch.utils.data.Subset(dataset, valid_indices)
if not parallel_gpu:
train_loader = DataLoader(train, batch_size = batch_size, drop_last = True, shuffle = True, follow_batch=['edge_attr_numeric'], num_workers=num_workers)
train_small_loader = DataLoader(train_small, batch_size = batch_size * valid_batch_size_factor, drop_last = True, shuffle = True, follow_batch=['edge_attr_numeric'], num_workers=num_workers)
valid_loader = DataLoader(valid, batch_size = batch_size * valid_batch_size_factor, drop_last = True, shuffle = True, follow_batch=['edge_attr_numeric'], num_workers=num_workers)
else:
train_loader = DataListLoader(train, batch_size = batch_size, shuffle = True, num_workers=num_workers)
valid_loader = DataListLoader(valid, batch_size = batch_size * valid_batch_size_factor, shuffle = True, num_workers=num_workers)
if False and "benchmark":
for batch in tqdm.tqdm_notebook(train_loader):
pass
else:
if to_load:
train_indices = to_load['train_indices']
valid_indices = to_load['valid_indices']
else:
indices = list(range(len(dataset)))
train_indices, valid_indices = train_test_split(indices, test_size = 5000, random_state = 1234)
train_big_indices, train_small_indices = train_test_split(list(range(len(train_indices))), test_size = 5000, random_state = 1234)
train = torch.utils.data.Subset(dataset, train_indices)
train_small = torch.utils.data.Subset(train, train_small_indices)
valid = torch.utils.data.Subset(dataset, valid_indices)
if not parallel_gpu:
submit_loader = DataLoader(valid, batch_size = batch_size * valid_batch_size_factor, drop_last = False, shuffle = False, follow_batch=['edge_attr_numeric'], num_workers=num_workers)
else:
raise ValueError
if False and "benchmark":
for batch in tqdm.tqdm_notebook(submit_loader):
pass
def package_valid_prediction(train_predictions):
assert train_predictions.loc[train_predictions['prediction'].notnull()].shape[0] == 792626
script_dir = os.path.abspath(os.path.dirname(__file__))
train = pd.read_csv(script_dir + '/../../data/train.csv')
train['prediction'] = np.nan
pred_1 = pd.merge(train[['molecule_name', 'atom_index_0', 'atom_index_1']], train_predictions[['molecule_name', 'atom_index_0', 'atom_index_1', 'prediction']], left_on = ['molecule_name', 'atom_index_0', 'atom_index_1'], right_on = ['molecule_name', 'atom_index_0', 'atom_index_1'], how = 'left')['prediction']
pred_2 = pd.merge(train[['molecule_name', 'atom_index_0', 'atom_index_1']], train_predictions[['molecule_name', 'atom_index_0', 'atom_index_1', 'prediction']], left_on = ['molecule_name', 'atom_index_0', 'atom_index_1'], right_on = ['molecule_name', 'atom_index_1', 'atom_index_0'], how = 'left')['prediction']
train['prediction'] = pred_1
train.loc[train['prediction'].isnull(), 'prediction'] = pred_2
assert train['prediction'].isnull().sum() == 4382642
assert train['prediction'].notnull().sum() == 275505
train['dataset'] = 'train'
indices = list(range(len(dataset)))
train_indices, valid_indices = train_test_split(indices, test_size = 5000, random_state = 1234)
valid_molecules = [dataset.molecules[i] for i in valid_indices]
train.loc[train['molecule_name'].isin(valid_molecules), 'dataset'] = 'valid'
assert train.loc[train['dataset'] == 'valid', 'molecule_name'].nunique() == 5000
train = train.loc[train['prediction'].notnull()]
return train
# ## classical
import layers.layers_09ZB_link_edge
import layers.layers_09ZF_ablation_study
import layers.layers_09ZF3_ablation_study_remove_global_state
import layers.layers_09ZF5_ablation_study_no_edge_pairs_embeddings_and_one_preprocessing
import layers.layers_09ZI_distributionnal_loss
models_data = [
{
'names' : model_B,
'hidden' : 300,
'layer_count' : 6,
'batch_size' : 20,
'module' : layers.layers_09ZB_link_edge
},
{
'names' : model_F,
'hidden' : 300,
'layer_count' : 6,
'batch_size' : 20,
'module' : layers.layers_09ZF_ablation_study
},
{
'names' : model_F2,
'hidden' : 200,
'layer_count' : 6,
'batch_size' : 64,
'module' : layers.layers_09ZF_ablation_study
},
{
'names' : model_F3,
'hidden' : 200,
'layer_count' : 6,
'batch_size' : 64,
'module' : layers.layers_09ZF3_ablation_study_remove_global_state
},
{
'names' : model_F5,
'hidden' : 200,
'layer_count' : 6,
'batch_size' : 64,
'module' : layers.layers_09ZF5_ablation_study_no_edge_pairs_embeddings_and_one_preprocessing
},
{
'names' : model_G,
'hidden' : 500,
'layer_count' : 10,
'batch_size' : 4,
'module' : layers.layers_09ZB_link_edge
},
{
'names' : model_G2,
'hidden' : 300,
'layer_count' : 10,
'batch_size' : 20,
'module' : layers.layers_09ZB_link_edge
},
{
'names' : model_G4,
'hidden' : 300,
'layer_count' : 6,
'batch_size' : 20,
'module' : layers.layers_09ZB_link_edge
},
{
'names' : model_G5,
'hidden' : 300,
'layer_count' : 6,
'batch_size' : 20,
'module' : layers.layers_09ZB_link_edge
},
{
'names' : model_I,
'hidden' : 300,
'layer_count' : 6,
'batch_size' : 20,
'module' : layers.layers_09ZI_distributionnal_loss
},
]
device = 'cuda'
bin_count = 260 * 4
centers = np.linspace(-40, 220 - 1 / 4, bin_count)
delta = (centers[1] - centers[0]) / 2
centers += delta
delta
centers = torch.tensor(centers.reshape(1, -1), dtype = torch.float32).to(device)
def batch_submit():
global batch
with torch.no_grad():
# BATCH
batch = batch.to(device)
# PREDICT
out = model.forward(
[batch.x_numeric],
batch.x_embeddings,
[batch.edge_attr_numeric],
batch.edge_attr_embeddings,
[batch.u_numeric],
batch.u_embeddings,
batch.edge_index,
batch.batch,
batch.edge_attr_numeric_batch,
batch.y_types,
batch.cycles_edge_index,
batch.cycles_id,
batch.edges_connectivity_ids,
batch.edges_connectivity_features,
)
out = out * centers
return out.sum(dim = 1).cpu().numpy()
# +
num_workers = 7
device = 'cuda'
parallel_gpu = False
# Config
for model_data in models_data:
for model_name in model_data['names']:
sub_path = f'submissions/submission_{model_name}.csv'
valid_path = f'submissions/train_{model_name}.csv'
model_file = f'model.{model_name}.bin'
if not os.path.isfile(sub_path):
print("predict", model_name)
hidden = model_data['hidden']
layer_count = model_data['layer_count']
batch_size = model_data['batch_size']
MEGNetList = model_data['module'].MEGNetList
valid_batch_size_factor = 5
if not os.path.isfile(f'model_data/{model_file}'):
os.system(f'aws s3 cp s3://grjhuard-eu-west-1/model_data/{model_file} model_data/{model_file}')
to_load = torch.load(f'model_data/{model_file}', map_location = 'cpu')
# Predict
load_train_dataset()
init_model()
init_valid_dataset()
train_predictions = submit(submit_loader)
train = package_valid_prediction(train_predictions)
train.to_csv(valid_path, index = False)
analyze(train)
load_submit_dataset()
init_model()
init_dataset()
submit_predictions = submit(submit_loader)
test = package_submit_prediction(submit_predictions)
test.to_csv(sub_path, index = False)
os.system(f'rm model_data/{model_file}')
else:
print("already done", model_name)
| solutions/5/guillaume/401. Make subs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import csv
import pandas as pd
import scipy as sp
import matplotlib.mlab as mlab
import matplotlib.artist as artist
from pathlib import Path
import tkinter as tk
from tkinter import filedialog
import itertools
# Select the folder where the Master Sheet I share with you.
# + jupyter={"outputs_hidden": true}
askdirectory = filedialog.askdirectory() # show an "Open" dialog box and select the folder
path = Path(askdirectory)
data = pd.read_csv(path/('MasterSheet.csv'), encoding='utf-8') #read the Mster Sheet
data
# -
# These are the names in my data frame, since I'll use them over and over agian I'd rather just declare them.
# +
tubulin = '[Tubulin] ' r'$(\mu M)$'
tub = 'tub'
DCXconc = '[DCX] ' r'$(n M)$'
DCX = 'DCX'
Type = 'DCX Type'
Concentration = 'Concentration ' r'$(\mu M)$'
Length = 'Length ' r'$(\mu m)$'
Lifetime = 'Lifetime ' r'$(min)$'
GrowthRate = 'Growth Rate ' r'$(\mu m / min)$'
TimeToNucleate = 'Time to Nucleate ' r'$(min)$'
ShrinkageLength = 'Shrink Length ' r'$(\mu m)$'
ShrinkageLifetime = 'Shrink Lifetime ' r'$(min)$'
ShrinkageRate = 'Shrink Rate ' r'$(\mu m / min)$'
parameters = [GrowthRate,TimeToNucleate,Lifetime,ShrinkageRate]
# -
# # Fitting Data #
# First declare the functions you are going to fit to. Here x is the variable and the other inputs are the distribution's parameters.
# +
def gaussian(x, mu, sig):
return (np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) )/(sig*np.sqrt(2*np.pi))
def exponential(x, scale):
return ((np.exp(-x/scale) )/(scale))
def gamma(x, shape, scale):
return (np.power(x,shape-1)*np.exp(-x/ scale))/(sp.special.gamma(shape) * np.power(scale,shape))
# -
# Then I make a function to extract a particular set of data and make a histogram. When matplotlib.pyplot makes a histogram, it saves the info on the bins used and the value of each bin.
def make_hist(data, parameter, tubconc, dcxtype, dcxconc): #Dataframe, what paramenter I'm plotting (e.g. GrowthRate), tubulin concentration, which dcx mutant, DCX concentration.
selectdata = data[(data[tubulin]==tubconc)&(data[Type]==dcxtype)&(data[DCXconc]==dcxconc)] #this is specific to how my dataframe is organized, it just filters the data I'm interested in
if parameter == GrowthRate : #The Growthrate histogram ranges from 0 to 1.5 while the other go up to 30
maxbin = 1.5
binsize = 0.05
else:
maxbin = 30
binsize = 1
n, bins, patches = plt.hist(selectdata[parameter], bins=np.arange(0, maxbin + binsize, binsize), density=True); #extracting the histogrm info n is the value of a bin, patches is image info that we don't need
plt.clf() #This is so the image of the histogram doesn't appear, we don't need it right now
return n, bins
# Next is the 'Master' fitting function where I only have to give it a dataframe with all my data (the Master Sheet) and the parameter I want to plot (e.g GrowthRate). Inside the function I loop for every tubulin concentration, dcx mutant and DCX concentration. Then it uses the previous function to get the histogram info. With this info it fits a curve with optimize. The optimize function outputs the fitting coefficients and the variance matrix. From the matrix you can get the error after doing some simple math. Finally I make a dataframe that contains the coefficients and error for each condition.
def equation_fit(data, parameter):
if (parameter == GrowthRate) | (parameter == ShrinkageRate) : #Choose an equation given a paramenter to fit
equation = gaussian
elif parameter == TimeToNucleate :
equation = exponential
elif parameter == Lifetime :
equation = gamma
results = pd.DataFrame(columns=[] , index=[]) #Declare an empty dataframe where we'll later put the results in.
for tubconc in data[tubulin].unique(): #Lopping over all of my conditions
for dcxtype in data[Type].unique():
for dcxconc in data[DCXconc].unique():
n, bins = make_hist(data, parameter, tubconc, dcxtype, dcxconc) #Make one histogram per condition
if np.isnan(np.sum(n)) == True: #If the condition doesn't exist, skip the loop (eg. DCX Type = None, [DCX] = 50nM)
continue
if equation == gamma : #THe optimize function starts with a set of paramenters and iterates to minimize the error.
#The default starting parameter is 1, but the gamma function needs something other than the defualt to work.
coeff, var_matrix = sp.optimize.curve_fit(equation,bins[:-1],n,[2,1])
else :
coeff, var_matrix = sp.optimize.curve_fit(equation,bins[:-1],n) #Give optimize the function of interest, and the info we got from the histogram
variance = np.diagonal(var_matrix) #This is the math you have to do to extract the error from the output matrix
SE = np.sqrt(variance) #SE for Standard Error
#======Making a data frame========
results0 = pd.DataFrame(columns=[] , index=[]) #Declare a dataframe to put this loop's coefficients
for k in np.arange(0,len(coeff)):
header = [np.array([parameter]),np.array(['Coefficient '+ str(k)])]
r0 = pd.DataFrame([coeff[k],SE[k]], index=(['Value','SE']),columns= header)
results0 = pd.concat([results0, r0], axis=1, sort=False)
results0[tubulin] = tubconc #Adding the concentration info to thecoefficients we just saved
results0[Type] = dcxtype
results0[DCXconc] = dcxconc
results = pd.concat([results, results0], sort=False) #Concatenate to the big result dataframe
return results
# Then just run the function and violà done. This is all you have to do to get the coeffients, which are also the means for the exponential and gaussian, but for but for Lifetime there are a couple of more steps you have to do to on the gamma coefficients get the mean and mean error. I haven't included them right now to keep thing simple but if you're interested just ask :)
GrowthRateFit = equation_fit(data, GrowthRate);
TimeToNucleateFit = equation_fit(data, TimeToNucleate);
LifetimeFit = equation_fit(data, Lifetime);
ShrinkageRateFit = equation_fit(data, ShrinkageRate);
# Concatenate the results from above
ResultFit = pd.concat([GrowthRateFit, TimeToNucleateFit,LifetimeFit,ShrinkageRateFit], axis=1, sort=False)
ResultFit = ResultFit.loc[:,~ResultFit.columns.duplicated()]
# To plot the histogram with the fitted fuctions I use the following:
def plot_hist(data, tubconc, dcxtype, dcxconc) :
selectdata = data[(data[tubulin]==tubconc)&(data[Type]==dcxtype)&(data[DCXconc]==dcxconc)] #again select data from Master Sheet
fig, ax = plt.subplots(2,2,figsize=(15,15)) #declare figure
n = len(selectdata.dropna().index) #gets the how many microtubules you analyzed per histogram
c=0
for i in np.arange(len(ax)):
for j in np.arange(len(ax)):
parameter = parameters[c]
if parameter == GrowthRate : #same steps to make a histogram
maxbin = 1.5
binsize = 0.025
else:
maxbin = 30
binsize = 0.5
ax[i][j].hist(selectdata[parameter], bins=np.arange(0, maxbin + binsize, binsize), density=True);
ax[i][j].set_title(parameter)
ax[1][1].set_xlim(0,maxbin)
c += 1
selectcoeff = ResultFit[ResultFit[tubulin]==tubconc] #filter datafram for one [Tub]
x = np.arange(0, 1.5 + 0.025, 0.025)
# keep filtering the dataframe to obtain a specific coefficient
mu = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[0]]['Coefficient 0'].loc['Value']
sig = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[0]]['Coefficient 1'].loc['Value']
ax[0][0].plot(x, gaussian(x, mu, sig)); #plot a curve with the equation and its coefficients you just got. Grwoth rate
x = np.arange(0, 30 + 0.5, 0.5)
scale = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[1]]['Coefficient 0'].loc['Value']
ax[0][1].plot(x, exponential(x, scale)); #same for nucleation
shape = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[2]]['Coefficient 0'].loc['Value']
scale = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[2]]['Coefficient 1'].loc['Value']
ax[1][0].plot(x, gamma(x, shape, scale));#lifetime
mu = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[3]]['Coefficient 0'].loc['Value']
sig = selectcoeff[(selectcoeff[Type] == dcxtype)&(selectcoeff[DCXconc] == dcxconc)][parameters[3]]['Coefficient 1'].loc['Value']
ax[1][1].plot(x, gaussian(x, mu, sig)); #shrikage rate
return n
plot_hist(data, 6, 'P191R', 35)
# I haven't bothered to make these pretty or to show the coefficients and SE's, but it's all in the Results dataframe :)
# |
| notebooks/Sofia/onlyFitandPlotfit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Geodatenhandling 2
#
# **Inhalt:** Geopandas für Fortgeschrittene
#
# **Nötige Skills**
# - Basic pandas skills
# - Funktionen und pandas
# - Erste Schritte mit Geopandas
# - Geodatenhandling 1
#
# **Lernziele**
# - Punkte, Linien, Polygone revisited
# - Eigenschaften von geometrischen Shapes
# - Shapes modifizieren und kombinieren
# - Geodaten modifizieren und selektieren
# ## Das Beispiel
#
# Geschäfte in Chicago.
#
# Wir checken: In welchen Stadtteilen gibt es keine Lebensmittelläden, wo sind die "Food deserts"
#
# - `Boundaries - Census Tracts - 2010.zip`, census tracts in Chicago from [here](https://data.cityofchicago.org/Facilities-Geographic-Boundaries/Boundaries-Census-Tracts-2010/5jrd-6zik)
# - `Grocery_Stores_-_2013.csv`, grocery stores in Chicago from [here](https://data.cityofchicago.org/Community-Economic-Development/Grocery-Stores-2013/53t8-wyrc)
#
# **Credits to:**
# - http://www.jonathansoma.com/lede/foundations-2017/
# ## Setup
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point, LineString, Polygon
# %matplotlib inline
# ## Geometries
#
# Zum Aufwärmen, nochmals ein paar Shapes from scratch
# ### Point (again)
punkt1 = Point(5, 5)
punkt1
# ### Line (again)
linie1 = LineString([Point(20, 0), Point(0, 20)])
linie1
linie2 = LineString([Point(15, 0), Point(0, 15)])
linie3 = LineString([Point(25, 0), Point(0, 25)])
# ### Polygon (again)
polygon1 = Polygon([[0, 0], [10, 0], [10, 10], [0, 10]])
polygon1
# **Let's plot it together!**
df = pd.DataFrame({'geometry': [punkt1, linie1, linie2, linie3, polygon1]})
gdf = gpd.GeoDataFrame(df, geometry='geometry')
gdf
gdf.plot(alpha=0.5, linewidth=2, edgecolor='black', markersize=5)
# ## Shapes vergleichen
#
# Wir können geometrische Shapes auf verschiedene Weise miteinander "vergleichen".
# * **contains:** has the other object TOTALLY INSIDE (boundaries can't touch!!!) "a neighborhood CONTAINS restaurants"
# * **intersects:** is OVERLAPPING at ALL, unless it's just boundaries touching
# * **touches:** only the boundaries touch, like a tangent
# * **within:** is TOTALLY INSIDE of the other object "a restaurant is WITHIN a neighborhood"
# * **disjoint:** no touching!!! no intersecting!!!!
# * **crosses:** goes through but isn't inside - "a river crossing through a city"
#
# Referenz und weitere Vergleiche: http://geopandas.org/reference.html)
# Das funktioniert ganz einfach:
polygon1.contains(punkt1)
punkt1.contains(polygon1)
# **Quizfragen:**
#Liegt der Punkt 1 innerhalb von Polygon 1?
punkt1.within(polygon1)
#Berührt die Linie 1 das Polygon 1?
linie1.touches(polygon1)
#Überschneidet sich die Linie 3 mit dem Polygon 1?
linie3.intersects(polygon1)
#Überschneidet sich die Linie 2 mit dem Polygon 1?
linie2.intersects(polygon1)
#Ist das Polygon 1 völlig losgelöst von der Linie 3?
polygon1.disjoint(linie3)
# ## Import
#
# Und nun zu unserem Beispiel:
# **Ein Stadtplan von Chicago mit den Quartieren (census tracts)**
tracts = gpd.read_file("dataprojects/Food Deserts/Boundaries - Census Tracts - 2010/geo_export_085dcd7b-113c-4a6d-8d43-5926de1dcc5b.shp")
tracts.head(2)
tracts.plot()
# **Eine Liste aller Lebensmittelläden**
df = pd.read_csv("dataprojects/Food Deserts/Grocery_Stores_-_2013.csv")
df.head(2)
# Um von Pandas zu Geopandas zu gelangen:
# - Geometrie erstellen
# - Geodataframe erstellen
# - Koordinatensystem intialisieren
points = df.apply(lambda row: Point(row['LONGITUDE'], row['LATITUDE']), axis=1)
grocery_stores = gpd.GeoDataFrame(df, geometry=points)
grocery_stores.crs = {'init': 'epsg:4326'}
grocery_stores.plot()
# **Wir plotten mal alles zusammen**
ax = tracts.plot(figsize=(15,15), color='lightgrey', linewidth=0.25, edgecolor='white')
grocery_stores.plot(ax=ax, color='red', markersize=8, alpha = 0.8)
# ## Analyse
#
# Uns interessiert: Wo sind die Gebiete, in denen es in einem bestimmten Umkreis von Metern keine Lebensmittelläden gibt?
#
# Um das zu beantworten, müssen wir zuerst in ein brauchbares Koordinatensystem wechseln, das auf Metern basiert.
# ### Projektion ändern
#
# Wir entscheiden uns für eine Variante der Mercator-Projektion.
# Das ist praktisch, weil:
# - "Die wichtigste Eigenschaft der Mercator-Projektion ist ihre Winkeltreue. Diese bedeutet auch, dass in kleinen Bereichen der Längenmaßstab in allen Richtungen gleich ist." https://de.wikipedia.org/wiki/Mercator-Projektion
# - Die Koordinaten sind nicht in Längen-/Breitengrad, sondern in Metern angegeben (die CH-Koordinaten sind auch eine Variante der Mercator-Projektion)
grocery_stores = grocery_stores.to_crs({'proj': 'merc'})
tracts = tracts.to_crs({'proj': 'merc'})
# Andere Projektionen wären:
# - 'tmerc': transverse mercator
# - 'aea': albers equal area
# **Wir haben nun ein neues Koordinatensystem**
ax = tracts.plot(figsize=(15,15), color='lightgrey', linewidth=0.25, edgecolor='white')
grocery_stores.plot(ax=ax, color='red', markersize=8, alpha = 0.8)
# ### Buffer erstellen
#
# Wie sieht die Karte aus, wenn wir um jedes Lebensmittelgeschäft einen Kreis von 500 Metern ziehen?
ax = tracts.plot(figsize=(15,15), color='lightgrey', linewidth=0.25, edgecolor='white')
grocery_stores.buffer(500).plot(ax=ax, color='red', markersize=8, alpha=0.4)
# ### Union
#
# Nächster Schritt: Wir fügen alle Punkte zu einer Fläche zusammen
near_area = grocery_stores.buffer(500).unary_union
# Jetzt können wir testen, ob die einzelnen Quartiere diese Fläche berühren
tracts.disjoint(near_area)
tracts[tracts.disjoint(near_area)].plot()
# ### Plot
#
# Wir plotten dieselbe Karte wie vorher - und zusätzlich noch jene Tracts, welche die Punktefläche nicht berühren
# +
#Bisherige Karte
ax = tracts.plot(figsize=(15,15), color='lightgrey', linewidth=0.25, edgecolor='white')
grocery_stores.buffer(500).plot(ax=ax, color='red', markersize=8, alpha=0.4)
#Neu: Desert-Tracts
tracts[tracts.disjoint(near_area)].plot(ax=ax, color='darkblue', alpha=0.4)
ax.set_title('City tracts that have no grocery store within 500m distance')
# -
| 19 Geopandas/Geodatenhandling 2 L.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quickstart: Use Cases and Examples with *Interpretable OPE Evaluator*
# This notebook demonstrates an example of conducting Interpretable Evaluation for Off-Policy Evaluation (IEOE). We use logged bandit feedback data generated by modifying multiclass classification data using [`obp`](https://github.com/st-tech/zr-obp) and evaluate the performance of Direct Method (DM), Doubly Robust (DR), Doubly Robust with Shrinkage (DRos), and Inverse Probability Weighting (IPW).
#
# Our example contains the following three major steps:
#
# 1. Data Preparation
# 2. Setting Hyperparameter Spaces for Off-Policy Evaluation
# 3. Interpretable Evaluation for Off-Policy Evaluation
# +
import numpy as np
from sklearn.datasets import load_digits
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier as RandomForest
from sklearn.model_selection import RandomizedSearchCV
# import necesarry functions from obp
import obp
from obp.dataset import MultiClassToBanditReduction
from obp.policy import IPWLearner
from obp.ope import (
DirectMethod,
DoublyRobust,
DoublyRobustWithShrinkage,
InverseProbabilityWeighting,
)
# import interpretable ope evaluator from pyieoe
from pyieoe.evaluator import InterpretableOPEEvaluator
# -
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
obp.__version__
# ## 1. Data Preparation
#
# In order to conduct IEOE using `pyieoe`, we need to prepare logged bandit feedback data, action distributions of evaluation policies, and ground truth policy values of evaluation policies. Because `pyieoe` is built with the intention of being used with `obp`, these inputs must follow the conventions in `obp`. Specifically, logged bandit feedback data must be of type `BanditFeedback`, action distributions must be of type `np.ndarray`, and ground truth policy values must be of type `float` (or `int`).
#
# In this example, we generate logged bandit feedback data by modifying multiclass classification data and obtain two sets of evaluation policies along with their action distributions and ground truth policy values using `obp`. For a detailed explanation of this process, please refer to the [official docs](https://zr-obp.readthedocs.io/en/latest/_autosummary/obp.dataset.multiclass.html#module-obp.dataset.multiclass).
# +
# load raw digits data
X, y = load_digits(return_X_y=True)
# convert the raw classification data into the logged bandit dataset
dataset = MultiClassToBanditReduction(
X=X,
y=y,
base_classifier_b=LogisticRegression(random_state=12345),
alpha_b=0.8,
dataset_name="digits"
)
# split the original data into the training and evaluation sets
dataset.split_train_eval(eval_size=0.7, random_state=12345)
# obtain logged bandit feedback generated by the behavior policy
bandit_feedback = dataset.obtain_batch_bandit_feedback(random_state=12345)
# obtain action choice probabilities by an evaluation policy and its ground-truth policy value
action_dist_a = dataset.obtain_action_dist_by_eval_policy(
base_classifier_e=LogisticRegression(C=100, random_state=12345, max_iter=10000),
alpha_e=0.9
)
ground_truth_a = dataset.calc_ground_truth_policy_value(action_dist=action_dist_a)
action_dist_b = dataset.obtain_action_dist_by_eval_policy(
base_classifier_e=RandomForest(n_estimators=100, min_samples_split=5, random_state=12345),
alpha_e=0.9
)
ground_truth_b = dataset.calc_ground_truth_policy_value(action_dist=action_dist_b)
# -
# ## 2. Setting Hyperparameter Spaces for Off-Policy Evaluation
#
# An integral aspect of IEOE is the different sources of variance. The main sources of variance are evaluation policies, random states, hyperparameters of OPE estimators, and hyperparameters of regression models.
#
# In this step, we define the spaces from which the hyperparameters of OPE estimators / regression models are chosen. (The evaluation policy space is defined in the previous step, and the random state space will be defined in the next step.)
# +
# set hyperparameter space for ope estimators
# set hyperparameter space for the doubly robust with shrinkage estimator
# with the following code, lambda_ will be chosen from a logarithm uniform distribution over the interval [0.001, 1000]
lambda_ = {
"lower": 1e-3,
"upper": 1e3,
"log": True,
"type": float
}
dros_param = {"lambda_": lambda_}
# +
# set hyperparameter space for logistic regression using RandomizedSearchCV
from sklearn.utils.fixes import loguniform
logistic = LogisticRegression()
distributions = {
"C": loguniform(1e-2, 1e2)
}
clf_logistic = RandomizedSearchCV(logistic, distributions, random_state=0, n_iter=5)
# +
# set hyperparameter space for random forest classifier using RandomizedSearchCV
from scipy.stats import randint
randforest = RandomForest()
distributions = {
# n_estimators will be chosen from a uniform distribution over the interval [50, 100)
"n_estimators": randint(5e1, 1e2),
# max_depth will be chosen from a uniform distribution over the interval [2, 10)
"max_depth": randint(2, 10),
# min_samples_split will be chosen from a uniform distribution over the interval [2, 10)
"min_samples_split": randint(2, 10)
}
clf_randforest = RandomizedSearchCV(randforest, distributions, random_state=0, n_iter=5)
# -
# ## 3. Interpretable Evaluation for Off-Policy Evaluation
#
# With the above steps completed, we can finally conduct IEOE by utilizing the `InterpretableOPEEvaluator` class.
#
# Here is a brief description for each parameter that can be passed into `InterpretableOPEEvaluator`:
#
# - `random_states`: a list of integers representing the random_state used when performing OPE; corresponds to the number of iterations
# - `bandit_feedback`: a list of logged bandit feedback data
# - `evaluation_policies`: a list of tuples representing (ground truth policy value, action distribution)
# - `ope_estimators`: a list of OPE ope_estimators
# - `ope_estimator_hyperparams`: a dictionary mapping OPE estimator names to OPE estimator hyperparameter spaces defined in step 2
# - `regression_models`: a list of regression regression_models
# - `regression_model_hyperparams`: a dictionary mapping regression models to regression model hyperparameter spaces defined in step 2
# initializing class
evaluator = InterpretableOPEEvaluator(
random_states=np.arange(100),
bandit_feedbacks=[bandit_feedback],
evaluation_policies=[
(ground_truth_a, action_dist_a),
(ground_truth_b, action_dist_b)
],
ope_estimators=[
DirectMethod(),
DoublyRobust(),
DoublyRobustWithShrinkage(),
InverseProbabilityWeighting(),
],
ope_estimator_hyperparams={
DoublyRobustWithShrinkage.estimator_name: dros_param,
},
regression_models=[
clf_logistic,
clf_randforest
],
pscore_estimators=[
clf_logistic,
clf_randforest
]
)
# Once we have initialized `InterpretableOPEEvaluator`, we can call implemented methods to perform IEOE.
# estimate policy values
# we obtain a dictionary mapping ope estimator names to np.ndarray storing the estimated policy value for each iteration
policy_value = evaluator.estimate_policy_value()
print("dm:", policy_value["dm"][:3])
print("dr:", policy_value["dr"][:3])
print("dr-os:", policy_value["dr-os"][:3])
print("ipw:", policy_value["ipw"][:3])
# compute squared errors
# we obtain a dictionary mapping ope estimator names to np.ndarray storing the calculated squared error for each iteration
squared_error = evaluator.calculate_squared_error()
print("dm:", squared_error["dm"][:3])
print("dr:", squared_error["dr"][:3])
print("dr-os:", squared_error["dr-os"][:3])
print("ipw:", squared_error["ipw"][:3])
# visualize cdf of squared errors for all ope estimators
evaluator.visualize_cdf_aggregate(xmax=0.04)
# compute the au-cdf score (area under cdf of squared error over interval [0, thershold]), higher score is better
# we obtain a dictionary mapping ope estimator names to cvar scores
au_cdf = evaluator.calculate_au_cdf_score(threshold=0.004)
au_cdf
# by activating the `scale` option,
# we obtain au_cdf scores where the highest score is scaled to 1
au_cdf_scaled = evaluator.calculate_au_cdf_score(threshold=0.004, scale=True)
au_cdf_scaled
# compute the cvar score (expected value of squared error above probability alpha), lower score is better
# we obtain a dictionary mapping ope estimator names to cvar scores
cvar = evaluator.calculate_cvar_score(alpha=90)
cvar
# by activating the `scale` option,
# we obtain cvar scores where the lowest score is scaled to 1
cvar_scaled = evaluator.calculate_cvar_score(alpha=90, scale=True)
cvar_scaled
| examples/multiclass-rscv.ipynb |