code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.10 64-bit (''bert4rec'': conda)'
# name: python3
# ---
# +
import torch
import sys
sys.path.append('../')
from src.models import model_factory
from src.dataloaders import dataloader_factory
from src.datasets import dataset_factory
from src.trainers import trainer_factory
from src.utils.utils import *
from src.utils.options import parser
# +
args = parser.parse_args([])
args.data_path = '../data/ml-1m' # "../data/Beauty"
args.num_epochs = 100
args.trm_max_len = 50
ckpt_root = setup_train(args)
# -
# ## Build Dataset
dataset = dataset_factory(args)
train_loader, val_loader, test_loader, dataset = dataloader_factory(args, dataset)
# +
for batch in test_loader:
break
users, seqs, candidates, labels, length = batch
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
# -
# ## Evaluation Prep
# +
model = model_factory(args)
trainer = trainer_factory(args, model, train_loader, val_loader, test_loader, ckpt_root, dataset.data)
from tqdm import tqdm
from src.utils.utils import AverageMeterSet
def evaluate(session, test_loader, metric_ks, ranker):
average_meter_set = AverageMeterSet()
with torch.no_grad():
tqdm_dataloader = tqdm(test_loader)
for batch_idx, batch in enumerate(tqdm_dataloader):
users, seqs, candidates, labels, length = batch
if users.size(0) != args.test_batch_size:
continue
ort_inputs = {'seqs': to_numpy(seqs), 'candidates': to_numpy(candidates), 'length': to_numpy(length)}
scores = torch.Tensor(session.run(None, ort_inputs)[0])
res = ranker(scores)
metrics = {}
for i, k in enumerate(metric_ks):
metrics["NDCG@%d" % k] = res[2*i]
metrics["Recall@%d" % k] = res[2*i+1]
metrics["MRR"] = res[-3]
metrics["AUC"] = res[-2]
for k, v in metrics.items():
average_meter_set.update(k, v)
description_metrics = ['NDCG@%d' % k for k in metric_ks[:3]] +\
['Recall@%d' % k for k in metric_ks[:3]] + ['MRR'] + ['AUC'] + ['loss']
description = 'FINAL TEST: ' + ', '.join(s + ' {:.5f}' for s in description_metrics)
description = description.replace('NDCG', 'N').replace('Recall', 'R').replace('MRR', 'M').replace('Jaccard', 'J')
description = description.format(*(average_meter_set[k].avg for k in description_metrics))
tqdm_dataloader.set_description(description)
average_metrics = average_meter_set.averages()
return average_metrics
# -
# ## Convert To FP16 Model
# +
def convert_fp16():
import onnx
import onnxruntime
import onnxmltools
from onnxmltools.utils.float16_converter import convert_float_to_float16
onnx_model = onnx.load("model.onnx")
onnx.checker.check_model(onnx_model)
ort_session = onnxruntime.InferenceSession("model.onnx")
# compute ONNX Runtime output prediction
ort_inputs = {'seqs': to_numpy(seqs), 'candidates': to_numpy(candidates), 'length': to_numpy(length)}
ort_outs = ort_session.run(None, ort_inputs)
onnx_model = convert_float_to_float16(onnx_model)
onnxmltools.utils.save_model(onnx_model, "model_fp16.onnx")
convert_fp16()
# +
import onnxruntime
ort_session_fp16 = onnxruntime.InferenceSession("model_fp16.onnx")
ort_session = onnxruntime.InferenceSession("model.onnx")
# +
ort_inputs = {'seqs': to_numpy(seqs), 'candidates': to_numpy(candidates), 'length': to_numpy(length)}
ort_outs_fp16 = ort_session_fp16.run(None, ort_inputs)
ort_outs = ort_session.run(None, ort_inputs)
# -
res_fp16 = evaluate(ort_session_fp16, test_loader, args.metric_ks, trainer.ranker)
res = evaluate(ort_session, test_loader, args.metric_ks, trainer.ranker)
print(f"AUC Change: \t {res_fp16['AUC'] / res['AUC']}")
# ## Convert To Mixed-Precision Model
# +
def convert_mixed():
import onnx
from onnxruntime.quantization import quantize_dynamic, QuantType
quantize_dynamic("model_fp16.onnx", "model_mixed.onnx", weight_type=QuantType.QInt8)
convert_mixed()
# +
import onnxruntime
ort_session_mixed = onnxruntime.InferenceSession("model_mixed.onnx")
ort_session = onnxruntime.InferenceSession("model.onnx")
ort_inputs = {'seqs': to_numpy(seqs), 'candidates': to_numpy(candidates), 'length': to_numpy(length)}
ort_outs_mixed = ort_session_mixed.run(None, ort_inputs)
print(ort_outs_mixed)
# +
res_mixed = evaluate(ort_session_mixed, test_loader, args.metric_ks, trainer.ranker)
res = evaluate(ort_session, test_loader, args.metric_ks, trainer.ranker)
print(f"AUC Change: \t {res_mixed['AUC'] / res['AUC']}")
# -
| demo/precision.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Tesseract and Photographs
# Lets try a new example and bring together some of the things we have learned.
# Here's an image of a storefront, lets load it and try and get the name of the
# store out of the image
from PIL import Image
import pytesseract
# Lets read in the storefront image I've loaded into the course and display it
image=Image.open('readonly/storefront.jpg')
display(image)
# Finally, lets try and run tesseract on that image and see what the results are
pytesseract.image_to_string(image)
# +
# We see at the very bottom there is just an empty string. Tesseract is unable to take
# this image and pull out the name. But we learned how to crop the images in the
# last set of lectures, so lets try and help Tesseract by cropping out certain pieces.
#
# First, lets set the bounding box. In this image the store name is in a box
# bounded by (315, 170, 700, 270)
bounding_box=(315, 170, 700, 270)
# Now lets crop the image
title_image=image.crop(bounding_box)
# Now lets display it and pull out the text
display(title_image)
pytesseract.image_to_string(title_image)
# +
# Great, we see how with a bit of a problem reduction we can make that work. So now we have
# been able to take an image, preprocess it where we expect to see text, and turn that text
# into a string that python can understand.
#
# If you look back up at the image though, you'll see there is a small sign inside of the
# shop that also has the shop name on it. I wonder if we're able to recognize the text on
# that sign? Let's give it a try.
#
# First, we need to determine a bounding box for that sign. I'm going to show you a short-cut
# to make this easier in an optional video in this module, but for now lets just use the bounding
# box I decided on
bounding_box=(900, 420, 940, 445)
# Now, lets crop the image
little_sign=image.crop((900, 420, 940, 445))
display(little_sign)
# +
# All right, that is a little sign! OCR works better with higher resolution images, so
# lets increase the size of this image by using the pillow resize() function
# Lets set the width and height equal to ten times the size it is now in a (w,h) tuple
new_size=(little_sign.width*10,little_sign.height*10)
# Now lets check the docs for resize()
help(little_sign.resize)
# -
# We can see that there are a number of different filters for resizing the image. The
# default is Image.NEAREST. Lets see what that looks like
display(little_sign.resize( new_size, Image.NEAREST))
# I think we should be able to find something better. I can read it, but it looks
# really pixelated. Lets see what all the different resize options look like
options=[Image.NEAREST, Image.BOX, Image.BILINEAR, Image.HAMMING, Image.BICUBIC, Image.LANCZOS]
for option in options:
# lets print the option name
print(option)
# lets display what this option looks like on our little sign
display(little_sign.resize( new_size, option))
# +
# From this we can notice two things. First, when we print out one of the resampling
# values it actually just prints an integer! This is really common: that the
# API developer writes a property, such as Image.BICUBIC, and then assigns it to an
# integer value to pass it around. Some languages use enumerations of values, which is
# common in say, Java, but in python this is a pretty normal way of doing things.
# The second thing we learned is that there are a number of different algorithms for
# image resampling. In this case, the Image.LANCZOS and Image.BICUBIC filters do a good
# job. Lets see if we are able to recognize the text off of this resized image
# First lets resize to the larger size
bigger_sign=little_sign.resize(new_size, Image.BICUBIC)
# Lets print out the text
pytesseract.image_to_string(bigger_sign)
# +
# Well, no text there. Lets try and binarize this. First, let me just bring in the
# binarization code we did earlier
def binarize(image_to_transform, threshold):
output_image=image_to_transform.convert("L")
for x in range(output_image.width):
for y in range(output_image.height):
if output_image.getpixel((x,y))< threshold:
output_image.putpixel( (x,y), 0 )
else:
output_image.putpixel( (x,y), 255 )
return output_image
# Now, lets apply binarizations with, say, a threshold of 190, and try and display that
# as well as do the OCR work
binarized_bigger_sign=binarize(bigger_sign, 190)
display(binarized_bigger_sign)
pytesseract.image_to_string(binarized_bigger_sign)
# +
# Ok, that text is pretty useless. How should we pick the best binarization
# to use? Well, there are some methods, but lets just try something very simple to
# show how well this can work. We have an english word we are trying to detect, "FOSSIL".
# If we tried all binarizations, from 0 through 255, and looked to see if there were
# any english words in that list, this might be one way. So lets see if we can
# write a routine to do this.
#
# First, lets load a list of english words into a list. I put a copy in the readonly
# directory for you to work with
eng_dict=[]
with open ("readonly/words_alpha.txt", "r") as f:
data=f.read()
# now we want to split this into a list based on the new line characters
eng_dict=data.split("\n")
# Now lets iterate through all possible thresholds and look for an english word, printing
# it out if it exists
for i in range(150,170):
# lets binarize and convert this to s tring values
strng=pytesseract.image_to_string(binarize(bigger_sign,i))
# We want to remove non alphabetical characters, like ([%$]) from the text, here's
# a short method to do that
# first, lets convert our string to lower case only
strng=strng.lower()
# then lets import the string package - it has a nice list of lower case letters
import string
# now lets iterate over our string looking at it character by character, putting it in
# the comaprison text
comparison=''
for character in strng:
if character in string.ascii_lowercase:
comparison=comparison+character
# finally, lets search for comparison in the dictionary file
if comparison in eng_dict:
# and print it if we find it
print(comparison)
# +
# Well, not perfect, but we see fossil there among other values which are in the dictionary.
# This is not a bad way to clean up OCR data. It can useful to use a language or domain specific
# dictionary in practice, especially if you are generating a search engine for specialized language
# such as a medical knowledge base or locations. And if you scroll up and look at the data
# we were working with - this small little wall hanging on the inside of the store - it's not
# so bad.
#
# At this point you've now learned how to manipulate images and convert them into text. In the
# next module in this course we're going to dig deeper further into a computer vision library
# which allows us to detect faces among other things. Then, on to the culminating project!
| course_5/module_2-lec5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.13 64-bit (''venv'': conda)'
# name: python3
# ---
import sys
import config
sys.path.append(config.root)
from utils.geotif_io import readTiff
from utils.imgShow import imgShow, imsShow
import matplotlib.pyplot as plt
path_rsimg = config.root+'/data/l8_scene_05.tif'
l8_img,l8_img_info = readTiff(path_rsimg)
l8_img.shape
fig = plt.figure(figsize=(4,4))
imgShow(l8_img, color_bands=(3, 2, 1))
figure = plt.figure(figsize=(9,9))
img_list= [l8_img,l8_img]
img_name_list = ['111','222']
clip_list = [2,2]
col_bands_list = [(3,2,1),(5,4,3)]
imsShow(img_list, img_name_list, clip_list, col_bands_list)
| notebooks/img_show.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.1
# language: julia
# name: julia-1.4
# ---
# # ベイズ勉強会 Part 3 1次元ガウス分布のベイズ推論
# > 1次元ガウス分布のベイズ推論を実践する
#
# - toc: true
# - badges: true
# - comments: true
# - categories: [bayes]
# - image: images/gauss1_ml.gif
# ベイズ勉強会資料は『ベイズ推論による機械学習入門』{% fn 1 %}を元に、途中式計算をできるだけ省略せずに行ったものです。
# # 1次元ガウス分布
#
# 1次元のガウス分布(以下本ページでは単に「ガウス分布」と呼ぶ)は、次の確率密度関数で表される$x \in \mathbb{R}$を生成する確率分布である。
# > Important: 1次元ガウス分布の確率密度関数$$\mathcal{N}(x|\mu,\sigma^2) = \frac{1}{\sqrt{2 \pi \sigma^2}} exp\{-\frac{(x-\mu)^2}{2\sigma^2}\}$$
# パラメータは$\mu \in \mathbb{R}, \sigma^2 \in \mathbb{R^+}$の2つで、それぞれ平均と分散である。精度パラメータ$\lambda = \sigma^{-2}$を用いて書くこともある。精度で書くと以下のようになる。
# > Important: 精度で表した1次元ガウス分布の確率密度関数$$\mathcal{N}(x|\mu,\lambda^{-1}) = \frac{1}{\sqrt{2 \pi}} \lambda^{\frac{1}{2}}exp\{-\frac{1}{2}(x-\mu)^2 \lambda\}$$
# 以下、観測されたデータ$\mathcal{D}=\{x_1,\dots,x_N\}$の各点$x_n$と未知の観測$x_*$は同じ1次元ガウス分布から独立に生成されたと仮定してベイズ推論を行う。
#
# # 平均が未知、精度が既知の場合
#
# ガウス分布の2つのパラメータのうち、精度パラメータ$\lambda$が既知である状況でベイズ推論を行う。
#
# ## モデルの構築
#
# 平均$\mu$だけが未知という条件で、尤度関数をガウス分布にした場合、そのパラメータ$\mu$の事前分布はどうすればよいだろうか。$\mu$の条件は$\mu \in \mathbb{R}$であることのみであり、実数を1つ出力する分布を事前分布とすればベイズ推論ができそうだ。このような分布は様々だが、1次元ガウス分布を用いることで事後分布も1次元ガウス分布となることが知られている。つまり尤度関数が1次元ガウス分布の場合の共役事前分布は1次元ガウス分布である。これを用いて同時分布を構築すると以下のようになる。
#
# $$
# \begin{eqnarray}
# p(\mathcal{D},x_*,\mu) &=& p(\mathcal{D}|\mu)p(x_*|\mu)p(\mu) \\
# p(\mathcal{D}|\mu) &=& \Pi_{n=1}^{N} \mathcal{N}(x_n|\mu, \lambda^{-1}) \\
# p(x_*|\mu) &=& \mathcal{N}(x_*|\mu, \lambda^{-1}) \\
# p(\mu) &=& \mathcal{N}(\mu|m, \lambda_{\mu}^{-1})
# \end{eqnarray}
# $$
#
# $p(\mu)$の$m \in \mathbb{R}, \lambda_{\mu} \in \mathbb{R^+}$は固定されたハイパーパラメータである。
#
# ## 事後分布の推論
#
# 事後分布$p(\mu|\mathcal{D})$を求める。ベイズの定理から次のように書ける。分母は形状には関わらないので省く。
#
# $$
# \begin{eqnarray}
# p(\mu|\mathcal{D}) &\propto& p(\mathcal{D}|\mu) p(\mu) \\
# &=& \{\Pi_{n=1}^{N} p(x_n|\mu)\}p(\mu) \\
# &=& \{\Pi_{n=1}^{N} \mathcal{N}(x_n|\mu, \lambda^{-1})\} \mathcal{N}(\mu|m,\lambda_{\mu}^{-1})
# \end{eqnarray}
# $$
#
# 対数をとると、
#
# $$
# \begin{eqnarray}
# \ln p(\mu|\mathcal{D}) &=& \Sigma_{n=1}^{N} \ln \mathcal{N}(x_n|\mu,\lambda^{-1}) + \ln \mathcal{N}(\mu|m,\lambda_{\mu}^{-1}) + const. \\
# &=& \Sigma_{n=1}^{N} \{-\frac{1}{2}(x_n-\mu)^2 \lambda\} + \{-\frac{1}{2}(\mu-m)^2 \lambda_{\mu}\} + const. \\
# &=& -\frac{1}{2}\left[\Sigma_{n=1}^{N}\{(x_n^2 -2x_n\mu + \mu^2)\lambda\} + (\mu^2 - 2\mu m + m^2)\lambda_{\mu}\right] + const. \\
# &=& -\frac{1}{2}\{(N\lambda + \lambda_{\mu})\mu^2 - 2(\Sigma_{n=1}^{N} x_n \lambda + m \lambda_{\mu})\mu\} + const.
# \end{eqnarray}
# $$
#
# 最後の変形では$\mu$に関わらない値は$const.$に吸収させている。これを平方完成するとガウス分布の形になっていることがわかるがここでは結果から逆算的に求める。事後分布が次のようなガウス分布で書けるとする。
#
# $$
# p(\mu|\mathcal{D}) = \mathcal{N}(\mu|\hat{m},\hat{\lambda_{\mu}}^{-1})
# $$
#
# 対数をとると
#
# $$
# \begin{eqnarray}
# \ln p(\mu|\mathcal{D}) &=& -\frac{1}{2}(\mu-\hat{m})^2 \hat{\lambda_{\mu}} \\
# &=& -\frac{1}{2}\{\hat{\lambda_{\mu}} \mu^2 - 2\hat{m} \hat{\lambda_{\mu}} \mu\} + const.
# \end{eqnarray}
# $$
#
# となるので、事後分布のパラメータ$\hat{m},\hat{\lambda_{\mu}}$は次のように求まる。
#
# $$
# \begin{eqnarray}
# \hat{\lambda_{\mu}} &=& N \lambda + \lambda_{\mu} \\
# \hat{m} &=& \frac{\lambda \Sigma_{n=1}^{N} x_n + \lambda_{\mu}m}{\hat{\lambda_{\mu}}}
# \end{eqnarray}
# $$
#
# 以上で事後分布の推論が完了した。
# > Note: 更新された精度パラメータは、ハイパーパラメータに$N\lambda$だけ加えたものでありデータ数$N$が大きくなる程精度が上がる、すなわち事後分布のばらつきが小さくなりかつハイパーパラメータの影響が小さくなることを示している。平均パラメータはハイパーパラメータ$m$と$\Sigma{n=1}^{N}x_n$の重み付き和の形になっておりこれもデータ数が増えるほどハイパーパラメータの影響を受けにくくなることがわかる。
# ## 予測分布の算出
#
# モデル全体の事後分布をパラメータ$\mu$で周辺化することで未知の観測$x_*$に対する予測分布が得られる。ハット記号を付けるのが面倒なので、学習していない事前分布から予測分布を算出し、後で更新されたパラメータを代入してみることにする。
#
# $$
# \begin{eqnarray}
# p(x_*) &=& \int p(x_*|\mu) p(\mu) d\mu \\
# &=& \int \mathcal{N}(x_*|\mu,\lambda^{-1})\mathcal{N}(\mu|m,\lambda_{\mu}^{-1}) d\mu
# \end{eqnarray}
# $$
#
# これを直接計算するのは大変なので、ベイズの定理と対数をうまく使ってみる。ベイズの定理から、
#
# $$
# p(\mu|x_*) = \frac{p(x_*|\mu)p(\mu)}{p(x_*)}
# $$
#
# $p(x_*)$を左辺に置いて対数をとると、
#
# $$
# \ln p(x_*) = \ln p(x_*|\mu) - \ln p(\mu|x_*) + const.
# $$
#
# $\ln p(\mu)$は$x_*$には関係ないので$const.$とした。$p(\mu|x_*)$は$p(\mu|\mathcal{D})$に$\mathcal{D}=x_*$とすれば求まる。
#
# $$
# \begin{eqnarray}
# p(\mu|x_*) &=& \mathcal{N}(\mu|m(x_*), (\lambda + \lambda_{\mu})^{-1}) \\
# ただし m(x_*) &=& \frac{\lambda x_* + \lambda_{\mu}m}{\lambda + \lambda_{\mu}}
# \end{eqnarray}
# $$
#
# これと$p(x_*|\mu) = \mathcal{N}(\mu, \lambda^{-1})$を代入すると
#
#
# $$
# \begin{eqnarray}
# \ln p(x_*) &=& \ln p(x_*|\mu) - \ln p(\mu|x_*) + const. \\
# &=& -\frac{1}{2} \{ \lambda (x_* - \mu)^2 - (\lambda + \lambda_{\mu})(\mu - m(x_*))^2 \} + const. \\
# &=& -\frac{1}{2} \{ \lambda x_*^2 - 2 \lambda \mu x_* - \frac{\lambda^2 x_*^2 + 2 \lambda \lambda_{\mu} m x_*}{\lambda + \lambda_{\mu}} + 2 \lambda \mu x_* \} + const. \\
# &=& -\frac{1}{2} \{ \frac{\lambda^2 x_*^2 + \lambda \lambda_{\mu} x_*^2 - \lambda^2 x_*^2 - 2 \lambda \lambda_{\mu} m x_*}{\lambda + \lambda_{\mu}}\} + const. \\
# &=& -\frac{1}{2} \{ \frac{\lambda \lambda_{\mu}}{\lambda + \lambda_{\mu}} x_*^2 - \frac{2m\lambda\lambda_{\mu}}{\lambda + \lambda_{\mu}} x_* \} + const.
# \end{eqnarray}
# $$
#
# $x_*$の2次関数の形にできたので事後分布と同じく逆算的に計算すると、予測分布$p(x_*)$は
#
# $$
# \begin{eqnarray}
# p(x_*) &=& \mathcal{N} (x_* | \mu_*, \lambda_{*}^{-1}) \\
# ただし \lambda_* &=& \frac{\lambda \lambda_{\mu}}{\lambda + \lambda_{\mu}} \\
# \mu_* &=& m
# \end{eqnarray}
# $$
#
# となる。これに$\hat{m}, \hat{\lambda_{\mu}}$を代入することで学習した後の予測分布$p(x_*|\mathcal{D})$は
#
# $$
# \begin{eqnarray}
# p(x_*) &=& \mathcal{N} (x_* | \mu_*, \lambda_{*}^{-1}) \\
# ただし \lambda_* &=& \frac{\lambda (N\lambda + \lambda_{\mu})}{\lambda + (N\lambda + \lambda_{\mu})} \\
# \mu_* &=& \frac{\lambda \Sigma_{n=1}^{N} x_n + \lambda_{\mu}m}{N\lambda + \lambda_{\mu}}
# \end{eqnarray}
# $$
#
# と求まる。
# > Note: 精度から見た結果の意味
# >> 精度について逆数をとると意味がわかりやすい。
# >>
# >> $$\lambda_*^{-1} = \lambda^{-1} + \hat{\lambda_{\mu}}^{-1}$$
# >>
# >> 精度の逆数は分散なので、この式は「予測分布の分散は観測分布の分散と事後分布の分散の和である」という意味になる。
# >>
# >> 今回は観測分布の分散が実際の分散に等しいことを仮定しているので、データ数が増え事後分布の分散が小さくなれば予測分布は実際の分布の分散とほぼ一致する。
# # 平均が既知、精度が未知の場合
#
# 今度は、平均パラメータ$\mu$が既知で、精度パラメータ$\lambda$が未知の場合でベイズ推論を行う。
#
# ## モデルの構築
#
# 精度パラメータ$\lambda$は正の実数である必要がある。正の実数を出力する確率分布にはガンマ分布があり、平均既知精度未知の場合の1次元ガウス分布の共役事前分布として知られている。事前分布にガンマ分布を採用すると次のようにモデル構築することになる。
#
# $$
# \begin{eqnarray}
# p(\mathcal{D},x_*,\lambda) &=& p(\mathcal{D}|\lambda)p(x_*|\lambda)p(\lambda) \\
# p(\mathcal{D}|\lambda) &=& \Pi_{n=1}^{N} \mathcal{N}(x_n|\mu, \lambda^{-1}) \\
# p(x_*|\lambda) &=& \mathcal{N}(x_*|\mu, \lambda^{-1}) \\
# p(\lambda) &=& Gam(\lambda|a, b)
# \end{eqnarray}
# $$
#
# このモデルのハイパーパラメータは$a,b$である。$\mu$は既知の値という設定だが、$\mu$をハイパーパラメータとして推論していると考えることもできる。
# > Important: ガンマ分布
# >> ガンマ分布は$a, b \in \mathbb{R^+}$をパラメータに持ち、正の実数を生成する確率分布である。ガンマ分布の確率密度関数は次の通り。
# >>
# >> $$Gam(\lambda|a,b) = C_G(a,b)\lambda^{a-1}e^{-b\lambda}$$
# >>
# >> $C_G(a,b)$は正規化係数であり、
# >>
# >> $$C_G(a,b) = \frac{b^a}{\Gamma(a)}$$
# ## 事後分布の推論
#
# 事後分布$p(\lambda|\mathcal{D})$を推論する。ベイズの定理から
#
# $$
# \begin{eqnarray}
# p(\lambda|\mathcal{D}) &\propto& p(\mathcal{D}|\lambda)p(\lambda) \\
# &=& \{ \Pi_{n=1}^{N} p(x_n|\lambda) \} p(\lambda) \\
# &=& \{ \Pi_{n=1}^{N} \mathcal{N}(x_n|\mu, \lambda^{-1}) \} Gam(\lambda|a,b)
# \end{eqnarray}
# $$
#
# 対数をとる。
#
# $$
# \begin{eqnarray}
# \ln p(\lambda|\mathcal{D}) &=& \Sigma_{n=1}^{N} \ln \mathcal{N}(x_n|\mu,\lambda^{-1}) + \ln Gam(\lambda|a,b) + const. \\
# &=& \Sigma_{n=1}^{N}\{\frac{1}{2} \ln \lambda - \frac{(x_n-\mu)^2 \lambda}{2}\} + (a-1)\ln \lambda -b\lambda + const. \\
# &=& (\frac{N}{2}+a-1)\ln \lambda - \{\frac{1}{2}\Sigma_{n=1}^{N}(x_n-\mu)^2 + b\}\lambda + const.
# \end{eqnarray}
# $$
#
# 対数を戻せばこれはガンマ分布となることがわかる。
#
# $$
# \begin{eqnarray}
# p(\lambda|\mathcal{D}) &=& Gam(\lambda|\hat{a},\hat{b}) \\
# ただし \hat{a} &=& \frac{N}{2} + a \\
# \hat{b} &=& \frac{1}{2} \Sigma_{n=1}^{N} (x_n-\mu)^2 + b
# \end{eqnarray}
# $$
#
# ## 予測分布の算出
#
# 事後分布の形状が事前分布と同じなので、学習前の予測分布$p(x_*)$を計算すれば、学習後の$\hat{a},\hat{b}$を代入するだけで$p(x_*|\mathcal{D})$がわかる。
#
# $$
# p(x_*) = \int p(x_*|\lambda)p(\lambda) d\lambda
# $$
#
# を直接計算せずにベイズの定理と対数計算で簡単に計算してみる。
#
# $$
# p(\lambda|x_*) = \frac{p(x_*|\lambda)p(\lambda)}{p(x_*)}
# $$
#
# 対数をとり、$p(\lambda)$を定数にまとめれば
#
# $$
# \ln p(x_*) = \ln p(x_*|\lambda) - \ln p(\lambda|x_*) + const.
# $$
#
# $\ln p(\lambda|x_*)$は事後分布の形に合わせれば
#
# $$
# \begin{eqnarray}
# p(\lambda|x_*) &=& Gam(\lambda|\frac{1}{2}+a,b(x_*)) \\
# ただし b(x_*) &=& \frac{1}{2}(x_*-\mu)^2 + b
# \end{eqnarray}
# $$
#
# と書ける。これを代入して
#
# $$
# \begin{eqnarray}
# \ln p(x_*) &=& \ln \mathcal{N}(x_*|\mu,\lambda^{-1}) - \ln Gam(\lambda|\frac{1}{2}+a,b(x_*)) + cosnt. \\
# &=& \frac{1}{2} \ln \lambda - \frac{(x_*-\mu)^2 \lambda}{2} - (a-\frac{1}{2})\ln \lambda + \{\frac{1}{2}(x_*-\mu)^2 + b \} \lambda - \ln C_G(a+\frac{1}{2}, \frac{1}{2}(x_*-\mu)^2 + b) + const. \\
# &=& - (a+\frac{1}{2})\ln \{\frac{1}{2}(x_*-\mu)^2 + b\} + \Gamma(a+\frac{1}{2}) + const. \\
# &=& - \frac{2a+1}{2} \ln \{ 1 + \frac{1}{2b}(x_*-\mu)^2 \} + const.
# \end{eqnarray}
# $$
#
# となる。途中、$x_*$を含まない項は$const.$に吸収させている。また、$\lambda$に関わる項は消えている。この結果は1次元のStudentのt分布に対数をとったものと同じ形になっている。
# > Important: 1次元のStudentのt分布
# >> 1次元のStudentのt分布は次の確率密度関数で表される。
# >>
# >> $$St(x|\mu_s,\lambda_s,\nu_s) = \frac{\Gamma(\frac{\nu_s + 1}{2})}{\Gamma(\frac{\nu_s}{2})}(\frac{\lambda_s}{\pi \nu_s})^{\frac{1}{2}}\{ 1 + \frac{\lambda_s}{\nu_s} (x-\mu_s)^2 \}^{-\frac{\nu_s+1}{2}}$$
#
# >> 対数をとり$x$に関わらない部分をconst.にまとめると
# >>
# >> $$\ln St(x|\mu_s,\lambda_s,\nu_s) = -\frac{\nu_s+1}{2} \ln \{ 1 + \frac{\lambda_s}{\nu_s} (x-\mu_s)^2 \} + const.$$
# 対数t分布との対応を見れば、予測分布は次のように書けることがわかる。
#
# $$
# \begin{eqnarray}
# p(x_*) &=& St(x_*|\mu_s,\lambda_s,\nu_s) \\
# ただし \mu_s &=& \mu \\
# \lambda_s &=& \frac{a}{b} \\
# \nu_s &=& 2a
# \end{eqnarray}
# $$
#
# 学習により更新された$\hat{a},\hat{b}$を代入すると次のようになる。
#
# $$
# \begin{eqnarray}
# p(x_*|\mathcal{D}) &=& St(x_*|\mu_s,\lambda_s,\nu_s) \\
# ただし \mu_s &=& \mu \\
# \lambda_s &=& \frac{N+2a}{\Sigma_{n=1}^{N} (x_n-\mu)^2 + 2b} \\
# \nu_s &=& N + 2a
# \end{eqnarray}
# $$
# # 平均と精度がともに未知の場合
#
# 次に、平均と精度がともに未知の場合のベイズ推論を実践してみる。モデルのパラメータが2個になっても、やることは変わらない。
#
# ## モデルの構築
#
# 平均についてガウス事前分布を、精度についてガンマ事前分布をそれぞれ設定し次のような同時分布を作ることも可能である(尤度関数は前と同じなので省略)。
#
# $$
# \begin{eqnarray}
# p(\mathcal{D},x_*,\mu,\lambda) &=& p(\mathcal{D}|\mu,\lambda^{-1})p(x_*|\mu,\lambda^{-1})p(\mu)p(\lambda) \\
# p(\mu) &=& \mathcal{N}(\mu|m,\lambda_{\mu}^{-1}) \\
# p(\lambda) &=& Gam(\lambda|a,b)
# \end{eqnarray}
# $$
#
# が、実はガウス・ガンマ分布という事前分布を用いると事後分布もガウス・ガンマ分布となることが知られている。ここではガウス・ガンマ分布を用いる。
# > Important: ガウス・ガンマ分布
# >> ガウス・ガンマ分布は$m, \beta, a, b$をパラメータに持ち、$\mu, \lambda$という2つの確率変数を生成する確率分布である。確率密度関数は次のように書ける。
# >>
# >> $$
# \begin{eqnarray}
# p(\mu,\lambda) &=& NG(\mu,\lambda|m,\beta,a,b) \\
# &=& \mathcal{N}(\mu|m,(\beta \lambda)^{-1})Gam(\lambda|a,b)
# \end{eqnarray}
# $$
# ガウス事前分布・ガンマ事前分布を別々に設定する場合との違いは$\mu$が$\lambda$に条件づけられていることである。グラフィカルモデルで示すと次のようになる。
#
# 
#
# 
#
# ガウス・ガンマ分布を使った場合モデルは次のようになる(尤度関数は省略)。
#
# $$
# \begin{eqnarray}
# p(\mathcal{D},x_*,\mu,\lambda) &=& p(\mathcal{D}|\mu,\lambda^{-1})p(x_*|\mu,\lambda^{-1})p(\mu,\lambda) \\
# p(\mu,\lambda) &=& NG(\mu,\lambda|m,\beta,a,b)
# \end{eqnarray}
# $$
#
# ## 事後分布の推論
#
# $\mu$が$\lambda$に条件づけられているので、同時分布$p(\mathcal{D},\mu,\lambda)$は次のように変形できる。
#
# $$
# p(\mathcal{D},\mu,\lambda) = p(\mu|\lambda,\mathcal{D})p(\lambda|\mathcal{D})p(\mathcal{D})
# $$
#
# 未観測の変数の事後分布は同時分布を観測された変数の確率で割ることで求まるのでこの場合の事後分布は、
#
# $$
# \frac{p(\mathcal{D},\mu,\lambda)}{p(\mathcal{D})} = p(\mu|\lambda,\mathcal{D})p(\lambda|\mathcal{D})
# $$
#
# より、$p(\mu|\lambda,\mathcal{D})p(\lambda|\mathcal{D})$のことを指す。
#
# ### 平均に注目
#
# まず平均にのみ注目し$p(\mu|\lambda,\mathcal{D})$について考える。平均未知精度既知の場合の事後分布の結果に対し$\lambda$を$\beta \lambda$に置き換えれば、
#
# $$
# \begin{eqnarray}
# p(\mu|\lambda,\mathcal{D}) &=& \mathcal{N}(\mu|\hat{m},(\hat{\beta}\lambda)^{-1}) \\
# ただし \hat{\beta} &=& N + \beta \\
# \hat{m} &=& \frac{1}{\hat{\beta}}(\Sigma_{n=1}^{N} x_n + \beta m)
# \end{eqnarray}
# $$
#
# > Note: 置き換えによって$\lambda_{\mu}=\beta \lambda$となっていることを利用した。
#
# ### 精度に注目
#
# 次に精度に関わる部分$p(\lambda|\mathcal{D})$を求める。同時分布から、
#
# $$
# \begin{eqnarray}
# p(\lambda|\mathcal{D}) &=& \frac{p(\mathcal{D},\mu,\lambda)}{p(\mu|\lambda,\mathcal{D})p(\mathcal{D})} \\
# &\propto& \frac{p(\mathcal{D},\mu,\lambda)}{p(\mu|\lambda,\mathcal{D})} \\
# &=& \frac{p(\mathcal{D}|\mu,\lambda)p(\mu,\lambda)}{p(\mu|\lambda,\mathcal{D})} \\
# &=& \frac{\{\Pi_{n=1}^{N} \mathcal{N}(x_n|\mu,\lambda^{-1})\} \mathcal{N}(\mu|m,(\beta \lambda)^{-1})Gam(\lambda|a,b)}{\mathcal{N}(\mu|\hat{m},(\hat{\beta}\lambda)^{-1})}
# \end{eqnarray}
# $$
#
# 対数をとって整理していく。
#
# $$
# \begin{eqnarray}
# \ln p(\lambda|\mathcal{D}) &=& \Sigma_{n=1}^{N} \{\ln \mathcal{N}(x_n|\mu,\lambda^{-1}) \} + \ln \mathcal{N}(\mu|m,(\beta \lambda)^{-1}) + \ln Gam(\lambda|a,b) - \ln \mathcal{N}(\mu|\hat{m},(\hat{\beta}\lambda)^{-1}) + const. \\
# &=& \Sigma_{n=1}^{N} \{\frac{1}{2}\ln \lambda - \frac{(x_n-\mu)^2 \lambda}{2}\} + \frac{\ln \beta + \ln \lambda}{2} - \frac{(\mu-m)^2 \beta\lambda}{2} + (a-1)\ln\lambda -b\lambda - \frac{\ln \hat{\beta} + \ln \lambda}{2} + \frac{(\mu-\hat{m})^2 \hat{\beta}\lambda}{2} + const.\\
# &=& (\frac{N}{2} + a - 1)\ln \lambda - \frac{1}{2}\{ \Sigma_{n=1}^{N} x_n^2 - 2\mu \Sigma_{n=1}^{N} x_n + N\mu^2 + \beta \mu^2 - 2\mu m \beta + m^2\beta + 2 b - \mu^2 \hat{\beta} + 2\mu \hat{m} \hat{\beta} - \hat{m}^2 \hat{\beta} \}\lambda + const. \\
# &=& (\frac{N}{2} + a - 1)\ln \lambda - \frac{1}{2}\{ \Sigma_{n=1}^{N} x_n^2 + 2\mu(\hat{m}\hat{\beta}- \Sigma_{n=1}^{N} x_n - m \beta) + (N + \beta - \hat{\beta}) \mu^2 + m^2 \beta - \hat{m}^2 \hat{\beta} + 2b \} \lambda + const. \\
# &=& (\frac{N}{2} + a - 1)\ln \lambda - \{\frac{1}{2}(\Sigma_{n=1}^{N} x_n^2 + \beta m^2 - \hat{\beta} \hat{m}^2) + b\} \lambda + const.
# \end{eqnarray}
# $$
#
# ガンマ分布の定義式と照らし合わせて
#
# $$
# \begin{eqnarray}
# p(\lambda|\mathcal{D}) &=& Gam(\lambda|\hat{a},\hat{b}) \\
# ただし \hat{a} &=& \frac{N}{2} + a \\
# \hat{b} &=& \frac{1}{2}(\Sigma_{n=1}^{N} x_n^2 + \beta m^2 - \hat{\beta} \hat{m}^2) + b
# \end{eqnarray}
# $$
#
# ### まとめ
#
# 結局求めたい事後分布$p(\mu|\lambda,\mathcal{D})p(\lambda|\mathcal{D})$は更新されたハイパーパラメータ$\hat{m},\hat{\beta},\hat{a},\hat{b}$を持つガウス・ガンマ分布の形になる。
#
# $$
# \begin{eqnarray}
# p(\mu|\lambda,\mathcal{D})p(\lambda|\mathcal{D}) &=& \mathcal{N}(\mu|\hat{m},(\hat{\beta}\lambda)^{-1})Gam(\lambda|\hat{a},\hat{b}) \\
# &=& NG(\mu, \lambda|\hat{m},\hat{\beta},\hat{a},\hat{b})
# \end{eqnarray}
# $$
#
# ## 予測分布の導出
#
# 事前分布と事後分布が同じ形状であるから、学習前の予測分布を導出し、更新されたハイパーパラメータを代入することで学習後の予測分布を求めることができる。
#
# 学習前の予測分布は以下のように2つの変数を積分除去することで求められる。
#
# $$p(x_*) = \int \int p(x_*|\mu,\lambda)p(\mu,\lambda)d\mu d\lambda$$
#
# でもできれば積分はしたくないのでベイズの定理から求めてみる。
# > Note: 積分は式変形も面倒だしコンピュータにとっても計算コストが高いのでできるだけしたくない。いかに積分を回避するかが肝。
# ベイズの定理から
#
# $$
# p(\mu,\lambda|x_*) = \frac{p(x_*|\mu,\lambda)p(\mu,\lambda)}{p(x_*)}
# $$
#
# より
#
# $$
# \ln p(x_*) = \ln p(x_*|\mu,\lambda) - \ln p(\mu,\lambda|x_*) + const.
# $$
#
# 事後分布の結果を流用して、
#
# $$
# \begin{eqnarray}
# p(\mu, \lambda|x_*) &=& \mathcal{N}(\mu|m(x_*), \{(1+\beta)\lambda \}^{-1})Gam(\lambda|\frac{1}{2}+a,b(x_*)) \\
# ただし m(x_*) &=& \frac{x_*+\beta m}{1+\beta} \\
# b(x_*) &=& \frac{\beta}{2(1+\beta)}(x_*-m)^2 + b
# \end{eqnarray}
# $$
# > Note: $m(x_*),b(x_*)$の導出
# >> $\hat{m},\hat{b}$を$\mathcal{D}=x_*$として計算すると
# >>
# >>$$
# \begin{eqnarray}
# \hat{m} &=& \frac{1}{\hat{\beta}}(\Sigma_{n=1}^{N} x_n + \beta m) \\
# &=& \frac{1}{1+\beta}(x_*+\beta m)
# \end{eqnarray}
# $$
# >>
# >>$$
# \begin{eqnarray}
# \hat{b} &=& \frac{1}{2}(\Sigma_{n=1}^{N} x_n^2 + \beta m^2 - \hat{\beta} \hat{m}^2) + b \\
# &=& \frac{1}{2}\{x_*^2 + \beta m^2 - (1+\beta)(\frac{x_*+\beta m}{1+\beta})^2\} + b \\
# &=& \frac{1}{2(1+\beta)}\{(1+\beta)(x_*^2+\beta m^2) - x_*^2 - 2x_* \beta m - \beta^2 m^2\} + b \\
# &=& \frac{1}{2(1+\beta)}\{\beta x_*^2 - 2\beta x_* m + \beta m^2\} +b \\
# &=& \frac{\beta}{2(1+\beta)}(x_*-m)^2 + b
# \end{eqnarray}
# $$
# よって予測分布$p(x_*)$は
#
# $$
# \begin{eqnarray}
# \ln p(x_*) &=& \ln \mathcal{N}(x_*|\mu,\lambda) - \ln \mathcal{N}(\mu|m(x_*),\{(1+\beta)\lambda\}^{-1}) - \ln Gam(\lambda|\frac{1}{2}+a,b(x_*)) + const. \\
# &=& \frac{1}{2}\ln \lambda - \frac{(x_*-\mu)^2 \lambda}{2} - \frac{\ln(1+\beta) + \ln \lambda}{2} + \frac{(\mu-m(x_*))^2(1+\beta)\lambda}{2} - (a-\frac{1}{2})\ln \lambda + b(x_*)\lambda - \ln C_G(a+\frac{1}{2}, b(x_*)) + const. \\
# &=& - \frac{(x_*-\mu)^2 \lambda}{2}+ \frac{(\mu-m(x_*))^2(1+\beta)\lambda}{2}+ b(x_*)\lambda - (a+\frac{1}{2})\ln b(x_*) + const.
# \end{eqnarray}
# $$
#
#
# > Note: $x_*$について整理する。気合で計算する。
# $$
# \begin{eqnarray}
# \ln p(x_*) &=& -\frac{(1+\beta)\lambda}{2(1+\beta)}x_*^2 + \frac{2(1+\beta)\mu\lambda}{2(1+\beta)}x_* + \frac{(1+\beta)^2 \lambda}{2(1+\beta)}m(x_*)^2 - \frac{2(1+\beta)^2 \mu \lambda}{2(1+\beta)} m(x_*) + \frac{\beta \lambda}{2(1+\beta)} x_*^2 - \frac{2\beta m \lambda}{2(1+\beta)}x_* - (a+\frac{1}{2})\ln b(x_*) + const. \\
# &=& \frac{\beta \lambda - (1+\beta)\lambda}{2(1+\beta)} x_*^2 + \frac{2(1+\beta)\mu\lambda - 2\beta m \lambda}{2(1+\beta)} + \frac{\lambda}{2(1+\beta)} x_*^2 + \frac{2\beta m \lambda}{2(1+\beta)} x_* - \frac{2(1+\beta)\mu\lambda}{2(1+\beta)}x_* - (a+\frac{1}{2})\ln b(x_*) + const. \\
# &=& -(a+\frac{1}{2})\ln b(x_*) + const. \\
# &=& -\frac{1+2a}{2} \ln \{\frac{\beta}{2(1+\beta)}(x_*-m)^2 + b\} + const. \\
# &=& -\frac{1+2a}{2} \ln \{1 + \frac{\beta}{2(1+\beta)b}(x_*-m)^2\} + const.
# \end{eqnarray}
# $$
#
# これはStudentのt分布になる。
#
# $$
# \begin{eqnarray}
# p(x_*) &=& St(x_*|\mu_s,\lambda_s,\nu_s) \\
# ただし \mu_s &=& m \\
# \lambda_s &=& \frac{\beta a}{(1+\beta)b} \\
# \nu_s &=& 2a
# \end{eqnarray}
# $$
# したがって学習後の予測分布$p(x_*|\mathcal{D})$は更新されたハイパーパラメータを代入して
#
# $$
# \begin{eqnarray}
# p(x_*|\mathcal{D}) &=& St(x_*|\mu_s,\lambda_s,\nu_s) \\
# ただし \mu_s &=& \frac{\Sigma_{n=1}^{N} x_n + \beta m}{N+\beta} \\
# \lambda_s &=& \frac{N+\beta \frac{N}{2}+a}{(1+N+\beta)(\frac{\Sigma_{n=1}^{N} x_n^2 + \beta m^2 - (N+\beta)(\frac{\Sigma_{n=1}^{N} x_n + \beta m}{N+\beta})^2}{2} + b)} \\
# \nu_s &=& N + 2a
# \end{eqnarray}
# $$
#
# 疲れましたね。
# # Juliaで実装
#
# ## 解析解
# +
# パッケージのimport
# 乱数生成のためのパッケージ
using Random
# グラフ描画用のパッケージ
using Plots
# 確率分布の関数を呼び出すパッケージ
using Distributions
# +
# 真の分布のパラメータ
mu_true = 5
sigma_true = 5
# 0~100までの数列
Ns = 0:100;
# +
# 100個のデータ
Random.seed!(12)
data = rand(Normal(mu_true, sigma_true), last(Ns))
# 最初の5個
data[1:5]
# -
# ### 平均未知
# +
#hide_output
# 精度は既知
lambda = 1/sigma_true^2
# 事前分布のハイパーパラメータ
m = 0
lambda_mu = 1
# アニメーションをつけるためにStatsPlotsパッケージをimport
using StatsPlots
# ベイズ推論の進行過程をアニメーションに
animation = @gif for (i, N) in enumerate(Ns)
# データ点の和を計算
sum_x = sum(data[1:i-1])
# ハイパーパラメータを更新
lambda_mu_hat = i * lambda + lambda_mu
m_hat = (lambda * sum_x + lambda_mu * m)/lambda_mu_hat
# 事後確率分布
updated_belief = Normal(m_hat, sqrt(1/lambda_mu_hat))
# 予測分布のパラメータ
m_pred = m_hat
lambda_pred = lambda * lambda_mu_hat / (lambda + lambda_mu_hat)
# 予測分布
predict = Normal(m_pred, sqrt(1/lambda_pred))
# 描画用の関数
p1 = plot(updated_belief,
size = (500, 250),
xlabel = "mu",
ylabel = "",
legend = nothing,
xlim = (mu_true-3*sigma_true,mu_true+3*sigma_true),
fill=0, α=0.3, w=3)
vline!(p1, [mu_true])
p2 = plot(predict,
size = (500, 250),
xlabel = "x",
ylabel = "",
legend = nothing,
xlim = (mu_true-3*sigma_true,mu_true+3*sigma_true),
fill=0, α=0.3, w=3)
plot!(p2, Normal(mu_true, sigma_true),fill=0, α=0.2)
plot(p1, p2, layout=(1,2), title = "$N observations")
end;
# -
# 
#
# ### 精度未知
# +
#hide_output
# 平均は既知
mu = mu_true
# 事前分布のハイパーパラメータ
a = 1
b = 1
# ベイズ推論の進行過程をアニメーションに
animation = @gif for (i, N) in enumerate(Ns)
# ハイパーパラメータを更新
a_hat = i/2 + a
b_hat = sum((data[1:i-1] .- mu).^2)/2 + b
# 事後確率分布
updated_belief = Gamma(a_hat, 1/b_hat)
# 予測分布のパラメータ
mu_s = mu
lambda_s = a_hat/b_hat
nu_s = 2*a_hat
# 予測分布
predict = LocationScale(mu_s, sqrt(1/lambda_s), TDist(nu_s))
# 描画用の関数
p1 = plot(updated_belief,
size = (500, 250),
xlabel = "lambda",
ylabel = "",
legend = nothing,
xlim = (0, 2/sigma_true^2),
fill=0, α=0.3, w=3)
vline!(p1, [1/sigma_true^2])
p2 = plot(predict,
size = (500, 250),
xlabel = "x",
ylabel = "",
legend = nothing,
xlim = (mu_true-3*sigma_true,mu_true+3*sigma_true),
fill=0, α=0.3, w=3)
plot!(p2, Normal(mu_true, sigma_true),fill=0, α=0.2)
plot(p1, p2, layout=(1,2), title = "$N observations")
end;
# -
# 
#
# ### 平均・精度未知
# +
#hide_output
# 事前分布のハイパーパラメータ
m = 0
beta = 1
a = 1
b = 1
# ベイズ推論の進行過程をアニメーションに
animation = @gif for (i, N) in enumerate(Ns)
# ハイパーパラメータを更新
beta_hat = i + beta
m_hat = (sum(data[1:i-1])+beta*m)/beta_hat
a_hat = i/2 + a
b_hat = (sum(data[1:i-1].^2) + beta*m^2 - beta_hat*m_hat^2)/2 + b
# 事後確率分布
updated_gamma = Gamma(a_hat, 1/b_hat)
updated_normal = Normal(m_hat, sqrt(1/(beta_hat*rand(updated_gamma))))
# 予測分布のパラメータ
mu_s = m_hat
lambda_s = beta_hat*a_hat/((1+beta_hat)*b_hat)
nu_s = 2*a_hat
# 予測分布
predict = LocationScale(mu_s, sqrt(1/lambda_s), TDist(nu_s))
# 描画用の関数
p1 = plot(updated_normal,
size = (500, 250),
xlabel = "mu",
ylabel = "",
legend = nothing,
xlim = (mu_true-3*sigma_true,mu_true+3*sigma_true),
fill=0, α=0.3, w=3)
vline!(p1, [mu_true])
p2 = plot(updated_gamma,
size = (500, 250),
xlabel = "lambda",
ylabel = "",
legend = nothing,
xlim = (0, 2/sigma_true^2),
fill=0, α=0.3, w=3)
vline!(p2, [1/sigma_true^2])
p3 = plot(predict,
size = (500, 250),
xlabel = "x",
ylabel = "",
legend = nothing,
xlim = (mu_true-3*sigma_true,mu_true+3*sigma_true),
fill=0, α=0.3, w=3)
plot!(p3, Normal(mu_true, sigma_true),fill=0, α=0.2)
plot(p1, p2, p3,layout=(1,3), title = "$N observations")
end;
# -
# 
# ## Turing.jlによるハミルトニアンモンテカルロ
# +
# Load Turing and MCMCChains.
using Turing, MCMCChains
# Load the distributions library.
using Distributions
# Load StatsPlots for density plots.
using StatsPlots
# +
# @model gauss_model(y) = begin
# # パラメータの分布
# lambda ~ Gamma(1 ,1)
# mu ~ Normal(0, sqrt(1/(1*lambda)))
# # Nは観測点の数
# N = length(y)
# for n in 1:N
# # 尤度関数
# y[n] ~ Normal(mu, sqrt(1/lambda))
# end
# end;
@model gauss_model(y) = begin
# パラメータの分布
a = 1
b = 1
beta = 1
m = 0
lambda ~ Gamma(a ,b)
mu ~ Normal(m, sqrt(1/(beta*lambda)))
# Nは観測点の数
N = length(y)
for n in 1:N
# 尤度関数
y[n] ~ Normal(mu, sqrt(1/lambda))
end
end;
# +
# Settings of the Hamiltonian Monte Carlo (HMC) sampler.
iterations = 4000
ϵ = 0.01
τ = 10
# 4チェーン作って収束をみる
chains = psample(gauss_model(data), HMC(ϵ, τ), iterations, progress=false, 4)
# -
plot(chains)
# 得られたMCMCサンプルからランダムにパラメータを選びガウス分布から乱数生成することを繰り返すと、予測分布からのMCMCサンプルが得られる{% fn 2 %}。
# +
# warm-upを排除してサンプルをえる
chain1 = chains[1001:4000,:,1]
mu_mcmc = chain1[:mu].value
lambda_mcmc = chain1[:lambda].value
predict_mcmc = zeros(3000)
for i in 1:3000
mu_tmp = mu_mcmc[rand(1:3000)]
lambda_tmp = lambda_mcmc[rand(1:3000)]
x_tmp = rand(Normal(mu_tmp, sqrt(1/lambda_tmp)))
predict_mcmc[i] = x_tmp
end
# +
# 厳密解を計算
N = length(data)
# 事前分布のハイパーパラメータ
m = 0
beta = 1
a = 1
b = 1
# ハイパーパラメータを更新
beta_hat = N + beta
m_hat = (sum(data[1:N-1])+beta*m)/beta_hat
a_hat = N/2 + a
b_hat = (sum(data[1:N-1].^2) + beta*m^2 - beta_hat*m_hat^2)/2 + b
# 事後分布
updated_gamma = Gamma(a_hat, 1/b_hat)
updated_normal = Normal(m_hat, sqrt(1/(beta_hat*rand(updated_gamma))))
# 予測分布のパラメータ
mu_s = m_hat
lambda_s = beta_hat*a_hat/((1+beta_hat)*b_hat)
nu_s = 2*a_hat
# 予測分布
predict_dist = LocationScale(mu_s, sqrt(1/lambda_s), TDist(nu_s))
# 平均の事後分布のプロット
p1 = plot(chains[:,:,1][:mu], seriestype = :density, xlim = (mu_true-3*sigma_true,mu_true+3*sigma_true),
w = 2, c = :blue)
# Visualize a green density plot of posterior distribution in closed-form.
plot!(p1, updated_normal,
xlabel = "mu", ylabel = "", title = "", xlim = (mu_true-3*sigma_true,mu_true+3*sigma_true), label = "Closed-form",
fill=0, α=0.3, w=3, c = :lightgreen)
# Visualize the true probability of heads in red.
vline!(p1, [mu_true], label = "True mu", c = :red)
# 精度の事後分布のプロット
p2 = plot(chains[:,:,1][:lambda], seriestype = :density, xlim = (0, 2/sigma_true^2),
w = 2, c = :blue)
# Visualize a green density plot of posterior distribution in closed-form.
plot!(p2, updated_gamma,
xlabel = "labmda", ylabel = "", title = "", xlim = (0, 2/sigma_true^2), label = "Closed-form",
fill=0, α=0.3, w=3, c = :lightgreen)
# Visualize the true probability of heads in red.
vline!(p2, [(1/sigma_true)^2], label = "True lambda", c = :red)
plot(p1, p2, layout=(1,2))
# -
# 予測分布のプロット
p3 = plot(predict_mcmc, seriestype=:density, xlim = (mu_true-3*sigma_true,mu_true+3*sigma_true),
w = 2, c = :blue)
plot!(p3, predict_dist,
size = (500, 250),
xlabel = "x",
ylabel = "",
legend = nothing,
xlim = (mu_true-3*sigma_true,mu_true+3*sigma_true),
fill=0, α=0.3, w=3, c = :lightgreen)
# {{ '[須山敦志. 杉山将. ベイズ推論による機械学習入門. 講談社, 2017.](https://www.kspub.co.jp/book/detail/1538320.html)' | fndetail: 1 }}
# {{ '[松浦健太郎. StanとRでベイズ統計モデリング. 共立出版, 2016.](https://www.kyoritsu-pub.co.jp/bookdetail/9784320112421)のChapter 2' | fndetail: 2 }}
| _notebooks/2020-04-03-bayes_part3.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .js
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Javascript (Node.js)
// language: javascript
// name: javascript
// ---
// + [markdown] deletable=false
// # ibm_db.fetchSync()
// + [markdown] deletable=false
// ## Purpose:
// + [markdown] deletable=false
// Fetch a row of data from ODBCResult object synchronously.
// + [markdown] deletable=false
// ## Syntax:
// + [markdown] deletable=false
// result.fetchSync(option)
// + [markdown] deletable=false
// ## Parameters:
// + [markdown] deletable=false
// * __*option :*__ OPTIONAL - Object type.
//
// * `fetchMode` - Format of returned row data. By default row data get returned in object form. option = {fetchMode:3} will return row in array form.
//
// + [markdown] deletable=false
// ## Return values:
// + [markdown] deletable=false
// * If __successful__, fetch a `row` of data.
// * If __unsuccessful__, returns `Error`
// + [markdown] deletable=false
// ## Description:
// + [markdown] deletable=false
// The __ibm_db.fetchSync()__ API is used to synchronously fectch a row of data from an IBM Db2 server or database.<p>
// + [markdown] deletable=false
// ## Example:
//
// + deletable=false
/*
#----------------------------------------------------------------------------------------------#
# NAME: ibm_db-fetchSync.js #
# #
# PURPOSE: This program is designed to illustrate how to use the ibm_db.fetchSync() API to #
# synchronously fetch a row of data from an remote Db2 server. #
# #
# Additional APIs used: #
# ibm_db.open() #
# ibm_db.executeSync() #
# ibm_db.querySync() #
# ibm_db.fetchSync() #
# ibm_db.prepareSync() #
# ibm_db.closeSync() #
# #
# #
#----------------------------------------------------------------------------------------------#
# DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY #
# #
# (C) COPYRIGHT International Business Machines Corp. 2018 All Rights Reserved #
# Licensed Materials - Property of IBM #
# #
# US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA #
# ADP Schedule Contract with IBM Corp. #
# #
# The following source code ("Sample") is owned by International Business Machines #
# Corporation ("IBM") or one of its subsidiaries and is copyrighted and licensed, not sold. #
# You may use, copy, modify, and distribute the Sample in any form without payment to IBM, #
# for the purpose of assisting you in the creation of Python applications using the ibm_db #
# library. #
# #
# The Sample code is provided to you on an "AS IS" basis, without warranty of any kind. IBM #
# HEREBY EXPRESSLY DISCLAIMS ALL WARRANTIES, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. #
# Some jurisdictions do not allow for the exclusion or limitation of implied warranties, so #
# the above limitations or exclusions may not apply to you. IBM shall not be liable for any #
# damages you suffer as a result of using, copying, modifying or distributing the Sample, #
# even if IBM has been advised of the possibility of such damages. #
#----------------------------------------------------------------------------------------------#
*/
var ibmdb = require("ibm_db")
,cn = "DATABASE=dbName;HOSTNAME=myhost;PORT=dbport;PROTOCOL=TCPIP;UID=username;PWD=password";
ibmdb.open(cn, function (err, conn) {
if (conn) {
if (conn.connected) {
console.log("\n A database connection has been created successfully.\n");
}
else if (err) {
console.log(JSON.stringify(err));
return;
}
}
conn.querySync("create table hits (col1 varchar(40), col2 int)");
conn.querySync("insert into hits values ('something', 42)");
conn.querySync("insert into hits values ('fur', 43)");
var stmt = conn.prepareSync("select * from hits");
var result = stmt.executeSync();
var data = 0;
while (data = result.fetchSync({ fetchMode: 3 })) {
console.log(data);
}
result.closeSync();
conn.querySync("drop table hits");
conn.closeSync();
});
// -
| Jupyter_Notebooks/ibm_db-fetchSync.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 对象和类
# - 一个学生,一张桌子,一个圆都是对象
# - 对象是类的一个实例,你可以创建多个对象,创建类的一个实例过程被称为实例化,
# - 在Python中对象就是实例,而实例就是对象
# ## 定义类
# class ClassName:
#
# do something
#
# - class 类的表示与def 一样
# - 类名最好使用驼峰式
# - 在Python2中类是需要继承基类object的,在Python中默认继承,可写可不写
# - 可以将普通代码理解为皮肤,而函数可以理解为内衣,那么类可以理解为外套
# 类必须初始化,是用self,初始化自身
# 类里面所有的函数中的第一个变量不再是参数,而是一个印记
class mxt:
def __init__(self):
print("我初始化了")
def Print_(self,name,name2 = "lll"):
print("mxt lalal",name,name2)
maxueting = mxt()# ()代表直接走初始化函数
maxueting.Print_(name = "aa")
# 在类中,如果有参数需要多次使用,那么就可以将其设置为共享参数
class mxt:
def __init__(self,num1,num2):
print("我初始化了")
#参数共享
self.num1 = num1
self.num2 = num2
print(self.num1,self.num2)
def sum(self,name):
print(name)
return self.num1 + self.num2
def cheng(self):
return self.num1 * self.num2
maxueting = mxt(num1 = 1,num2 = 2)
maxueting.sum(name = "lll")
# 输入一个数判断奇偶,输入一个年份判断平年闰年
class panduan:
def __init__(self):
print("我初始化了")
def jiou(self,shu):
if shu % 2 == 0:
print("偶")
else:
print("奇")
def year(self,nianfen):
if nianfen % 4 == 0 and nianfen % 100 != 0 or nianfen % 400 == 0:
print("闰年")
else:
print("平年")
# 第一步,创建累的实例
a = panduan()
a.jiou(shu = 2)
# ## 定义一个不含初始化__init__的简单类
# class ClassName:
#
# joker = “Home”
#
# def func():
# print('Worker')
#
# - 尽量少使用
#
#
# ## 定义一个标准类
# - __init__ 代表初始化,可以初始化任何动作
# - 此时类调用要使用(),其中()可以理解为开始初始化
# - 初始化内的元素,类中其他的函数可以共享
# 
# - Circle 和 className_ 的第一个区别有 __init__ 这个函数
# - 。。。。 第二个区别,类中的每一个函数都有self的这个“参数”
# ## 何为self?
# - self 是指向对象本身的参数
# - self 只是一个命名规则,其实可以改变的,但是我们约定俗成的是self,也便于理解
# - 使用了self就可以访问类中定义的成员
# <img src="../Photo/86.png"></img>
# ## 使用类 Cirlcle
# ## 类的传参
# - class ClassName:
#
# def __init__(self, para1,para2...):
#
# self.para1 = para1
#
# self.para2 = para2
# ## EP:
# - A:定义一个类,类中含有两个功能:
# - 1、产生3个随机数,获取最大值
# - 2、产生3个随机数,获取最小值
# - B:定义一个类,(类中函数的嵌套使用)
# - 1、第一个函数的功能为:输入一个数字
# - 2、第二个函数的功能为:使用第一个函数中得到的数字进行平方处理
# - 3、第三个函数的功能为:得到平方处理后的数字 - 原来输入的数字,并打印结果
# 共享参数
import random
class mxt2:
def __init__(self):
pass
def func1(self):
self.num = eval(input(">>"))
print("输入的数字是:",self.num)
def func2(self):
self.square = self.num ** 2
print("平方数是:",self.square)
def func3(self):
self.cha = self.square - self.num
print("差是",self.cha)
a = mxt2()
a.func1()
a.func2()
a.func3()
class mxt2:
def __init__(self):
self.account = "123"
self.password = "<PASSWORD>"
def Account(self):
self.acc = input("请输入账号>>")
def Password(self):
self.passwor = input("请输入密码:>>")
def Check(self):
if self.acc == self.account and self.passwor == self.password:
print("Succes")
else:
print("Failed")
def Start(self):
self.Account()
self.Password()
self.Check()
a = mxt2()
a.Account()
a.Password()
a.Check()
class mxt2:
def __init__(self):
self.account = "123"
self.password = "<PASSWORD>"
def Account(self):
self.acc = input("请输入账号>>")
def Password(self):
self.passwor = input("请输入密码:>>")
def Check(self):
if self.acc == self.account and self.passwor == self.password:
print("Succes")
else:
self.Verify()
def Verify(self):
Verify_Var = 123
print("验证码是",Verify_Var)
while 1:
User_Verify = eval(input("请输入验证码:>>"))
if User_Verify == Verify_Var:
print("Failed")
break
def Start(self):
self.Account()
self.Password()
self.Check()
a = mxt2()
a.Start()
class mxt:
"""
Implement Login Class.
"""
def __init__(self):
"""
Initialization class
Arguments:
---------
name: xxx
None.
Returns:
--------
None.
"""
self.account = '123'
self.password = '<PASSWORD>'
def Account(self):
"""
Input Account value
Arguments:
---------
None.
Returns:
--------
None.
"""
self.acc = input('请输入账号:>>')
def Password(self):
"""
Input Password value
Arguments:
---------
None.
Returns:
--------
None.
"""
self.passwor = input('请输入密码:>>')
def Check(self):
"""
Check account and password
Note:
----
we need "and" connect.
if account and password is right, then login OK.
else: running Veriy func.
"""
if self.acc == self.account and self.passwor == self.password:
print('Success')
else:
# running Verify !
self.Verify()
def Verify(self):
"""
Verify ....
"""
Verify_Var = 123
print('验证码是:',Verify_Var)
while 1:
User_Verify = eval(input('请输入验证码:>>'))
if User_Verify == Verify_Var:
print('Failed')
break
def Start(self):
"""
Start definelogistics.
"""
self.Account()
self.Password()
self.Check()
# +
# 引号里是解释文档
"""
"""是注释
# -
import random
class mxt:
def __init__(self):
print("初始化")
self.a=random.randint(0,10)
self.b=random.randint(0,10)
self.c=random.randint(0,10)
def max_(self):
print(self.a,self.b,self.c)
return max(self.a,self.b,self.c)
def min_(self):
print(self.a,self.b,self.c)
return min(sef.a,self.b,self.c)
a = mxt()
a.max_()
# ## 类的继承
# - 类的单继承
# - 类的多继承
# - 继承标识
# > class SonClass(FatherClass):
#
# def __init__(self):
#
# FatherClass.__init__(self)
# +
# 只要有继承就要写括号
class A:
def __init__(self):
self.a = "a"
def a_(self):
print("class A")
class C:
# 多继承自左向右,前面覆盖后面
#两个下划线定义私有变量,前面加,后面不加,不能继承
class B(A):
def __init__(self):
# 告诉A,B即将继承A
A.__init__(self)
def b_(self):
print(self.a)
self.a_()
# -
bb = B()
bb.b_()
bb.__dir__()
# ## 私有数据域(私有变量,或者私有函数)
# - 在Python中 变量名或者函数名使用双下划线代表私有 \__Joker, def \__Joker():
# - 私有数据域不可继承
# - 私有数据域强制继承 \__dir__()
# 
# ## EP:
# 
# 
# 
#
# 私有变量不可继承,不可调用,但是可以在内部使用
# +
# @staticmethod 静态调用(使用很少)
# -
# ## 类的其他
# - 类的封装
# - 实际上就是将一类功能放在一起,方便未来进行管理
# - 类的继承(上面已经讲过)
# - 类的多态
# - 包括装饰器:将放在以后处理高级类中教
# - 装饰器的好处:当许多类中的函数需要使用同一个功能的时候,那么使用装饰器就会方便许多
# - 装饰器是有固定的写法
# - 其包括普通装饰器与带参装饰器
# # Homewor
# ## UML类图可以不用画
# ## UML 实际上就是一个思维图
# - 1
# 
class A:
def __init__(self):
print("我初始化了")
def juxing(self):
self.width = 1
self.heightd = 2
def Area(self):
self.area = self.width * self.heightd
print("面积是:>>",self.area)
def getPrermeter(self):
self.getprermeter = (self.width + self.heightd) * 2
print("周长是:>>",self.getprermeter)
a = A()
a.getPrermeter()
a.Area()
# - 2
# 
class Account:
def __init__(self):
print("我初始化了")
def ID(self):
self.id = 0
self.zhanghue = 20000
self.qu = 2500
self.cun = 3000
self.lilv = 4.5 / 100
print("")
def
# - 3
# 
class Fan:
def __init__(self):
"""
定义初始化
"""
print("我初始化了")
def SUDU(self):
"""
定义速度,慢速度是1,中速度是2,快速度是3
"""
self.SLOW = 1
self.MEDIUM = 2
self.FAST = 3
print("最慢的速度是:>>",self.SLOW,"中速度:>>",self.MEDIUM,"最快速度:>>",self.FAST)
def duixiang1(self):
"""
对第一个对象定义,速度是快,状态是开,半径是10,颜色是黄色
"""
self.speed1 = 3
self.on1 = "True"
self.radius1 = 10
self.color1 = "yellow"
print("第一个对象的速度是:>>",self.speed1,"状态:>>",self.on1,"半径是:>>",self.radius1,"颜色是:>>",self.color1)
def duixiang2(self):
"""
对第二个对象定义,速度是中速,状态是默认关,半径是5,颜色是蓝色
"""
self.speed2 = 2
self.on2 = "False"
self.radius2 = 5
self.color2 = "blue"
print("第二个对象的速度是:>>",self.speed2,"状态:>>",self.on2,"半径是:>>",self.radius2,"颜色是:>>",self.color2)
a = Fan()
a.SUDU()
a.duixiang1()
a.duixiang2()
# - 4
# 
# 
import math
class RegularPolygon:
def __init__(self):
"""
边数默认值是3,边长默认值是1,x默认值是0,y默认值是0
"""
self.bianshu = 3
self.bianchang = 1
self.x = 0
self.y = 0
print("我初始化了")
def RegularPolygon(self):
"""
周长等于边数乘边长,面积套用上面的公式
"""
self.zhouchang = self.bianshu * self.bianchang
self.mianji = (self.bianshu * self.bianchang * self.bianchang) / (4 * math.tan(math.pi / self.bianshu))
print("周长是:>>",self.zhouchang,"面积是:>>",self.mianji)
a = RegularPolygon()
a.RegularPolygon()
# - 5
# 
class LinearEquation:
def __init__(self):
"""
输入a,b,c,d,e,f的值
"""
self.a = eval(input('输入a值'))
self.b = eval(input('输入b值'))
self.c = eval(input('输入c值'))
self.d = eval(input('输入d值'))
self.e = eval(input('输入e值'))
self.f = eval(input('输入f值'))
print("我初始化了")
def isSolvable(self):
"""
判断是不是true,如果分母不等于0,则true
否则无解
"""
if self.a * self.d - self.b * self.c != 0:
print("true")
else:
print("无解")
def jie(self):
"""
算出x和y的值
"""
self.getX = (self.e * self.d - self.b * self.f) / (self.a * self.d - self.b * self.c)
self.getY = (self.a * self.f - self.e * self.c) / (self.a * self.d - self.b * self.c)
print("X的值是:>>",round(self.getX,2),"Y的值是:>>",round(self.getY,2))
a = LinearEquation()
a.isSolvable()
a.jie()
# - 6
# 
class LinearEquation:
def __init__(self):
"""
初始化
"""
print("我初始化了")
def duandian1(self):
"""
输入第一条直线的两个端点值
"""
self.x1 = eval(input("请输入第一个端点的x值"))
self.y1 = eval(input("请输入第一个端点的y值"))
self.x2 = eval(input("请输入第二个端点的x值"))
self.y2 = eval(input("请输入第二个端点的y值"))
def duandian2(self):
"""
输入第二条直线的两个端点值
"""
self.x3 = eval(input("请输入第三个端点的x值"))
self.y3 = eval(input("请输入第三个端点的y值"))
self.x4 = eval(input("请输入第四个端点的x值"))
self.y4 = eval(input("请输入第四个端点的y值"))
def zhixian1(self):
"""
根据第一条直线的两个端点值算出第一条直线的斜率k1和截距b1
"""
self.k1 = (self.y1 - self.y2) / (self.x1 - self.x2)
self.b1 = self.y1 - self.k1 * self.x1
print("第一条直线的斜率和截距是:>>",self.k1,self.b1)
def zhixian2(self):
"""
根据第二条直线的两个端点值算出第二条直线的斜率k2和截距b2
"""
self.k2 = (self.y3 - self.y4) / (self.x3 - self.x4)
self.b2 = self.y3 - self.k2 * self.x3
print("第二条直线的斜率和截距是:>>",self.k2,self.b2)
def jiaodian(self):
"""
根据第一条直线和第二条直线的斜率和截距算出交点x和y
"""
self.x = (self.b2 - self.b1) / (self.k1 - self.k2)
self.y = self.k1 * self.x + self.b1
print("交点是:>>",self.x,self.y)
a = LinearEquation()
a.duandian1()
a.duandian2()
a.zhixian1()
a.zhixian2()
a.jiaodian()
# - 7
# 
| 7.23.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Gimarose/OOP-1-2/blob/main/Activity_2_Temperatureipynb.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="9RgwjDsVTJne" outputId="cf9b6f2b-1186-4038-db8a-316ccc692fcd"
#Write a python program that converts the temperature Celsius to Fahrenheit. Create a class name: Temperature
#Create Celsius as attribute name, Temp() as method, and temp1 as object name. F= 1.8xC + 32
class Temperature:
def __init__(self,Celsius):
self.Celsius = Celsius
def Temp(self):
return ((1.8*self.Celsius)+ 32)
Input_temp= float(input("Input temperature in Celsius:"))
temp1 = Temperature(Input_temp)
print(round(temp1.Temp(),2))
| Activity_2_Temperatureipynb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# load the raw dataset
import numpy as np
import pandas as pd
from pandas import DataFrame
rawdf = pd.read_csv("unc.edu_PANCAN_IlluminaHiSeq_RNASeqV2.geneExp.tsv", sep="\t", index_col=0)
rawdf.head()
# -
# transpose raw data set so row is patient samples and column is list of genes
processeddf = rawdf.transpose()
# check to see if the raw data set has any missing values
processeddf.isnull().any().any()
# check if the any row( i.e. sample) has all zero values so we can delete them
removed_all_zero_rows_df = processeddf.loc[(processeddf!=0).any(axis=1),:]
removed_all_zero_rows_df.shape
# answer is no; all 10471 samples have values; proceed to next
# check to see if any duplicate rows/samples
processeddf.index.get_duplicates()
# answe is no; move to next step
# check to see if any duplicate columns/features
processeddf.transpose().index.get_duplicates()
# answer is no; move to next step
# +
# create the data label file (original data)
# step 1: load tissue source site file
dfSampleSource = pd.read_csv("tissueSourceSite.tsv", sep="\t", keep_default_na=False, na_values=[])
# step 2: load disease study information file
dfSampleAbb = pd.read_csv("diseaseStudy.tsv", sep="\t",keep_default_na=False, na_values=[])
dfSampleAbb = dfSampleAbb.reset_index()
# step 3: add "study abbreviation" on disease study file to tissue source sit file
dfSampleSourceAddOn = dfSampleSource.merge(dfSampleAbb[['Study Name', 'Study Abbreviation']], on=['Study Name'])
# step 4: create a new file (i.e. dflabels) for class labels
dflabels = DataFrame(columns = ['labels'], index = processeddf.index)
# step 5: add class labels to dflabels
for i, row in processeddf.iterrows():
for diseaseType in dfSampleSourceAddOn['TSS Code']:
if(i[5:7] == diseaseType):
dflabels.loc[i] = dfSampleSourceAddOn.loc[dfSampleSourceAddOn['TSS Code'] == diseaseType, 'Study Abbreviation'].iloc[0]
# step 6: check to see if dflabels file is all filled with tumor type
dflabels.isnull().any().any()
# -
# count # of samples per class
from collections import Counter
print (Counter(dflabels['labels']))
# least class CHOL has 45 samples
# count total 3 of classes
len(Counter(dflabels['labels']))
# +
# randomly non-repeatly select 45 samples per class
alltumortype = Counter(dflabels['labels']).keys()
undersample45df = DataFrame(columns = processeddf.columns)
for i in alltumortype:
OnetypeAll = dflabels.loc[dflabels['labels'] == i].index
OnetypeAll45 = np.random.choice(OnetypeAll, 45, replace=False)
undersample45df = undersample45df.append(processeddf.loc[OnetypeAll45])
# -
# confirm total has 1458 samples selected
undersample45df.shape
# +
# create the data label file
# step 1: load tissue source site file
dfSampleSource = pd.read_csv("tissueSourceSite.tsv", sep="\t", keep_default_na=False, na_values=[])
# step 2: load disease study information file
dfSampleAbb = pd.read_csv("diseaseStudy.tsv", sep="\t",keep_default_na=False, na_values=[])
dfSampleAbb = dfSampleAbb.reset_index()
# step 3: add "study abbreviation" on disease study file to tissue source sit file
dfSampleSourceAddOn = dfSampleSource.merge(dfSampleAbb[['Study Name', 'Study Abbreviation']], on=['Study Name'])
# step 4: create a new file (i.e. dflabels) for class labels
dflabels = DataFrame(columns = ['labels'], index = undersample45df.index)
# step 5: add class labels to dflabels
for i, row in undersample45df.iterrows():
for diseaseType in dfSampleSourceAddOn['TSS Code']:
if(i[5:7] == diseaseType):
dflabels.loc[i] = dfSampleSourceAddOn.loc[dfSampleSourceAddOn['TSS Code'] == diseaseType, 'Study Abbreviation'].iloc[0]
# step 6: check to see if dflabels file is all filled with tumor type
dflabels.isnull().any().any()
# -
# export dflabels to a csv file so later on we can use this file as class label file
dflabels.to_csv('project_class_labels_45.csv')
# export undersample45df to a csv file so later on we can use this file as class label file
undersample45df.to_csv('project_data_down_45.csv')
| project_class_labels_mapping_45_per_class.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cooler
# language: python
# name: cooler
# ---
# +
# %matplotlib inline
import matplotlib.pyplot as plt
plt.style.use('seaborn-poster')
from matplotlib import colors
import numpy as np
import pandas as pd
import cooler
import cooltools
import cooltools.expected
import bioframe
import os
import pickle
import sys
#clr = cooler.Cooler('/groups/tanaka/Projects/axolotl-genome/AmexG_v6.0/AmexG_v6.0_DD/work/TADs/HiCUP/cooler/zoom/AL1_DpnII.cooler.zoom::/resolutions/10000')
#regions = pd.read_csv('/groups/tanaka/Projects/axolotl-genome/AmexG_v6.0/AmexG_v6.0_DD/work/manuscript/rebuttal/contact_probabilities/tads.bed', sep='\t', header=None, names=['chrom', 'start', 'end'])
#cvd = cooltools.expected.diagsum(clr=clr, regions=regions, transforms={'balanced': lambda p: p['count']*p['weight1']*p['weight2']})
# -
cvd = pd.read_pickle('/groups/tanaka/Projects/axolotl-genome/AmexG_v6.0/AmexG_v6.0_DD/work/manuscript/rebuttal/contact_probabilities/merged.dat')
cvd.size
| jupyter/_intra_TAD_probs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Figure examples
# ## figure.bbands
#
# plots Bollinger Bands
# +
from yahoofinancials import YahooFinancials as YF
import numpy as np
import pandas as pd
import bokelper as bkh
# prepare a dataframe for plotting
symbol = 'JPY=X'
rawdata = YF(symbol).get_historical_price_data(
'2018-01-01','2018-12-31', 'daily')[symbol]['prices']
df = pd.DataFrame(rawdata)
df.index = pd.to_datetime(df.formatted_date)
# plot
bkh.output_notebook()
fig = bkh.figure(plot_width = 800, plot_height = 400,x_axis_type='datetime',
y_range=(100, 120))
fig.bbands(df.close, window = 20)
fig.candle(df)
fig.legend.click_policy = 'mute'
bkh.show(fig)
# -
# ## figure.candle
#
# plots a candle chart
# +
from yahoofinancials import YahooFinancials as YF
import numpy as np
import pandas as pd
import bokelper as bkh
# prepare a dataframe for plotting
symbol = 'JPY=X'
rawdata = YF(symbol).get_historical_price_data(
'2018-01-01','2018-12-31', 'daily')[symbol]['prices']
df = pd.DataFrame(rawdata)
df.index = pd.to_datetime(df.formatted_date)
# plot
bkh.output_notebook()
fig1 = bkh.figure(plot_width=800, plot_height=500, x_axis_type='datetime',
x_range=(df.index[-75], df.index[-1]),
y_range=(df.low[-75:].min() * 0.98, df.high[-25:].max() * 1.02),
toolbar_location='above',
)
fig1.candle(df)
fig2 = bkh.figure(plot_width=800, plot_height=100, x_axis_type='datetime',
toolbar_location=None,
)
fig2.line(df.index, (df.high + df.low)/2)
fig2.add_tools(bkh.RangeTool(x_range=fig1.x_range, y_range=fig1.y_range))
bkh.show(bkh.Column(fig1, fig2))
# -
# ## figure.hist
# +
from yahoofinancials import YahooFinancials as YF
import numpy as np
import pandas as pd
import bokelper as bkh
# prepare a dataframe for plotting
symbol = 'JPY=X'
rawdata = YF(symbol).get_historical_price_data(
'2018-01-01','2018-12-31', 'daily')[symbol]['prices']
df = pd.DataFrame(rawdata)
df.index = pd.to_datetime(df.formatted_date)
bkh.output_notebook()
fig = bkh.figure(plot_width=800, plot_height=500)
price_moving_band = (df.high - df.low) / df.open * 100
fig.hist(price_moving_band)
bkh.show(fig)
# -
# ## figure.grayscale
# +
import bokelper as bkh
from sklearn import datasets
bkh.output_notebook()
digits = datasets.load_digits()
fig = bkh.figure(
plot_width=100, plot_height=150,
toolbar_location=None,
tooltips=[("x", "$x"), ("y", "$y"), ("value", "@image")])
fig.grayscale(digits['data'][0].reshape(8, 8), 4)
bkh.show(fig)
# -
| notebooks/figure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # statsmodels Principal Component Analysis
# *Key ideas:* Principal component analysis, world bank data, fertility
#
# In this notebook, we use principal components analysis (PCA) to analyze the time series of fertility rates in 192 countries, using data obtained from the World Bank. The main goal is to understand how the trends in fertility over time differ from country to country. This is a slightly atypical illustration of PCA because the data are time series. Methods such as functional PCA have been developed for this setting, but since the fertility data are very smooth, there is no real disadvantage to using standard PCA in this case.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import statsmodels.api as sm
from statsmodels.multivariate.pca import PCA
plt.rc("figure", figsize=(16,8))
plt.rc("font", size=14)
# -
# The data can be obtained from the [World Bank web site](http://data.worldbank.org/indicator/SP.DYN.TFRT.IN), but here we work with a slightly cleaned-up version of the data:
data = sm.datasets.fertility.load_pandas().data
data.head()
# Here we construct a DataFrame that contains only the numerical fertility rate data and set the index to the country names. We also drop all the countries with any missing data.
columns = list(map(str, range(1960, 2012)))
data.set_index('Country Name', inplace=True)
dta = data[columns]
dta = dta.dropna()
dta.head()
# There are two ways to use PCA to analyze a rectangular matrix: we can treat the rows as the "objects" and the columns as the "variables", or vice-versa. Here we will treat the fertility measures as "variables" used to measure the countries as "objects". Thus the goal will be to reduce the yearly fertility rate values to a small number of fertility rate "profiles" or "basis functions" that capture most of the variation over time in the different countries.
# The mean trend is removed in PCA, but its worthwhile taking a look at it. It shows that fertility has dropped steadily over the time period covered in this dataset. Note that the mean is calculated using a country as the unit of analysis, ignoring population size. This is also true for the PC analysis conducted below. A more sophisticated analysis might weight the countries, say by population in 1980.
ax = dta.mean().plot(grid=False)
ax.set_xlabel("Year", size=17)
ax.set_ylabel("Fertility rate", size=17);
ax.set_xlim(0, 51)
# Next we perform the PCA:
pca_model = PCA(dta.T, standardize=False, demean=True)
# Based on the eigenvalues, we see that the first PC dominates, with perhaps a small amount of meaningful variation captured in the second and third PC's.
fig = pca_model.plot_scree(log_scale=False)
# Next we will plot the PC factors. The dominant factor is monotonically increasing. Countries with a positive score on the first factor will increase faster (or decrease slower) compared to the mean shown above. Countries with a negative score on the first factor will decrease faster than the mean. The second factor is U-shaped with a positive peak at around 1985. Countries with a large positive score on the second factor will have lower than average fertilities at the beginning and end of the data range, but higher than average fertility in the middle of the range.
fig, ax = plt.subplots(figsize=(8, 4))
lines = ax.plot(pca_model.factors.iloc[:,:3], lw=4, alpha=.6)
ax.set_xticklabels(dta.columns.values[::10])
ax.set_xlim(0, 51)
ax.set_xlabel("Year", size=17)
fig.subplots_adjust(.1, .1, .85, .9)
legend = fig.legend(lines, ['PC 1', 'PC 2', 'PC 3'], loc='center right')
legend.draw_frame(False)
# To better understand what is going on, we will plot the fertility trajectories for sets of countries with similar PC scores. The following convenience function produces such a plot.
idx = pca_model.loadings.iloc[:,0].argsort()
# First we plot the five countries with the greatest scores on PC 1. These countries have a higher rate of fertility increase than the global mean (which is decreasing).
def make_plot(labels):
fig, ax = plt.subplots(figsize=(9,5))
ax = dta.loc[labels].T.plot(legend=False, grid=False, ax=ax)
dta.mean().plot(ax=ax, grid=False, label='Mean')
ax.set_xlim(0, 51);
fig.subplots_adjust(.1, .1, .75, .9)
ax.set_xlabel("Year", size=17)
ax.set_ylabel("Fertility", size=17);
legend = ax.legend(*ax.get_legend_handles_labels(), loc='center left', bbox_to_anchor=(1, .5))
legend.draw_frame(False)
labels = dta.index[idx[-5:]]
make_plot(labels)
# Here are the five countries with the greatest scores on factor 2. These are countries that reached peak fertility around 1980, later than much of the rest of the world, followed by a rapid decrease in fertility.
idx = pca_model.loadings.iloc[:,1].argsort()
make_plot(dta.index[idx[-5:]])
# Finally we have the countries with the most negative scores on PC 2. These are the countries where the fertility rate declined much faster than the global mean during the 1960's and 1970's, then flattened out.
make_plot(dta.index[idx[:5]])
# We can also look at a scatterplot of the first two principal component scores. We see that the variation among countries is fairly continuous, except perhaps that the two countries with highest scores for PC 2 are somewhat separated from the other points. These countries, Oman and Yemen, are unique in having a sharp spike in fertility around 1980. No other country has such a spike. In contrast, the countries with high scores on PC 1 (that have continuously increasing fertility), are part of a continuum of variation.
fig, ax = plt.subplots()
pca_model.loadings.plot.scatter(x='comp_00',y='comp_01', ax=ax)
ax.set_xlabel("PC 1", size=17)
ax.set_ylabel("PC 2", size=17)
dta.index[pca_model.loadings.iloc[:, 1] > .2].values
| examples/notebooks/pca_fertility_factors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Random Effects Neural Networks with Edward and Keras
# Bayesian probabilistic models provide a nimble and expressive framework for modeling "small-world" data. In contrast, deep learning offers a more rigid yet much more powerful framework for modeling data of massive size. [Edward](http://edwardlib.org/) is a probabilistic programming library that bridges this gap: "black-box" variational inference enables us to fit extremely flexible Bayesian models to large-scale data. Furthermore, these models themselves may take advantage of classic deep-learning architectures of arbitrary complexity.
#
# Edward uses [TensorFlow](https://www.tensorflow.org/) for symbolic gradients and data flow graphs. As such, it interfaces cleanly with other libraries that do the same, namely [TF-Slim](https://research.googleblog.com/2016/08/tf-slim-high-level-library-to-define.html), [PrettyTensor](https://github.com/google/prettytensor) and [Keras](https://keras.io/). Personally, I've been working often with the latter, and am consistently delighted by the ease with which it allows me to specify complex neural architectures.
#
# The aim of this work is to lay a practical foundation for Bayesian modeling in Edward, then explore how, and how easily, we can extend these models in the direction of classical deep learning via Keras. It will give both a conceptual overview of the models below, as well as notes on the practical considerations of their implementation — what worked and what didn't. Finally, this work will conclude with concrete ways in which to extend these models further, of which there are many.
#
# If you're just getting started with Edward or Keras, I recommend first perusing the [Edward tutorials](http://edwardlib.org/tutorials) and [Keras documentation](https://keras.io/) respectively.
#
# To "pull us down the path," we build three models in additive fashion: a Bayesian linear regression model, a Bayesian linear regression model with random effects, and a neural network with random effects. We fit them on the [Zillow Prize](https://www.kaggle.com/c/zillow-prize-1) dataset, which asks us to predict `logerror` (in house-price estimate, i.e. the "Zestimate") given metadata for a list of homes. These models are intended to be demonstrative, not performant: they will not win you the prize in their current form.
# +
import edward as ed
from edward.models import Normal
from keras.layers import Input, Dense
from keras.regularizers import l2
from keras import backend as K
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import scale
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# %matplotlib inline
plt.style.use('seaborn-whitegrid')
# +
# ensure you are using TensorFlow as your Keras backend
sess = ed.get_session()
K.set_session(sess)
INIT_OP = tf.global_variables_initializer()
# -
# ## Data preparation
# After importing the data we rename its columns as per <NAME>z's [Exploratory Analysis - Zillow](https://www.kaggle.com/philippsp/exploratory-analysis-zillow) kernel on [kaggle.com](https://www.kaggle.com).
properties_df = pd.read_csv('data/properties.csv', low_memory=False)
transactions_df = pd.read_csv('data/transactions.csv')
# +
properties_df = properties_df.rename(columns={
'parcelid': 'id_parcel',
'yearbuilt': 'build_year',
'basementsqft': 'area_basement',
'yardbuildingsqft17': 'area_patio',
'yardbuildingsqft26': 'area_shed',
'poolsizesum': 'area_pool',
'lotsizesquarefeet': 'area_lot',
'garagetotalsqft': 'area_garage',
'finishedfloor1squarefeet': 'area_firstfloor_finished',
'calculatedfinishedsquarefeet': 'area_total_calc',
'finishedsquarefeet6': 'area_base',
'finishedsquarefeet12': 'area_live_finished',
'finishedsquarefeet13': 'area_liveperi_finished',
'finishedsquarefeet15': 'area_total_finished',
'finishedsquarefeet50': 'area_unknown',
'unitcnt': 'num_unit',
'numberofstories': 'num_story',
'roomcnt': 'num_room',
'bathroomcnt': 'num_bathroom',
'bedroomcnt': 'num_bedroom',
'calculatedbathnbr': 'num_bathroom_calc',
'fullbathcnt': 'num_bath',
'threequarterbathnbr': 'num_75_bath',
'fireplacecnt': 'num_fireplace',
'poolcnt': 'num_pool',
'garagecarcnt': 'num_garage',
'regionidcounty': 'region_county',
'regionidcity': 'region_city',
'regionidzip': 'region_zip',
'regionidneighborhood': 'region_neighbor',
'taxvaluedollarcnt': 'tax_total',
'structuretaxvaluedollarcnt': 'tax_building',
'landtaxvaluedollarcnt': 'tax_land',
'taxamount': 'tax_property',
'assessmentyear': 'tax_year',
'taxdelinquencyflag': 'tax_delinquency',
'taxdelinquencyyear': 'tax_delinquency_year',
'propertyzoningdesc': 'zoning_property',
'propertylandusetypeid': 'zoning_landuse',
'propertycountylandusecode': 'zoning_landuse_county',
'fireplaceflag': 'flag_fireplace',
'hashottuborspa': 'flag_tub',
'buildingqualitytypeid': 'quality',
'buildingclasstypeid': 'framing',
'typeconstructiontypeid': 'material',
'decktypeid': 'deck',
'storytypeid': 'story',
'heatingorsystemtypeid': 'heating',
'airconditioningtypeid': 'aircon',
'architecturalstyletypeid': 'architectural_style'
})
transactions_df = transactions_df.rename(columns={
'parcelid': 'id_parcel',
'transactiondate': 'date'
})
# -
# ### Build training DataFrame
data = transactions_df.merge(properties_df, how='left', left_on='id_parcel', right_on='id_parcel')
# ### Drop columns containing too many nulls
# Bayesian probabilistic models allow us to flexibly model *missing* data itself. To this end, we conceive of a given predictor as a vector of both:
# 1. Observed values.
# 2. Parameters in place of missing values, which will form a posterior distribution for what this value might have been.
#
# In a (partially-specified, for brevity) linear model, this might look as follows:
#
# $$
# y_i \sim \mathcal{N}(\mu_i, \sigma)\\
# \mu_i = \alpha + \beta_N N_i\\
# N_i \sim \mathcal{N}(\nu, \sigma_N)\\
# $$
#
# where $N_i$ is our sometimes-missing predictor. When $N_i$ is observed, $\mathcal{N}(\nu, \sigma_N)$ serves as a likelihood: given this data-point, we tweak retrodictive distributions on the parameters $(\nu, \sigma_N)$ by which it was produced. Conversely, when $N_i$ is missing it serves as a prior: after learning distributions of $(\nu, \sigma_N)$ we can generate a likely value of $N_i$ itself. Finally, inference will give us (presumably-wide) distributions on the model's belief in what was the true value of each missing $N_i$ conditional on the data observed.
#
# I tried this in Edward, albeit briefly, to no avail. <NAME> gives an [example](https://discourse.edwardlib.org/t/how-to-handle-missing-values-in-gaussian-matrix-factorization/95/2) of how one might accomplish this task in the case of Gaussian Matrix Factorization. In my case, I wasn't able to apply a 2-D missing-data-mask placeholder to a 2-D data placeholder via [`tf.gather`](https://www.tensorflow.org/api_docs/python/tf/gather) nor [`tf.gather_nd`](https://www.tensorflow.org/api_docs/python/tf/gather_nd). With more effort, I'm sure I could make this work. Help appreciated.
#
# For now, we'll first drop columns containing too many null values, then, after choosing a few of the predictors most correlated with the target, drop the remaining rows containing nulls.
keep_cols = data.columns[ data.isnull().mean() < .25 ]
data = data[keep_cols]
# ### Which columns are most correlated with the target?
# +
float_cols = [col for col in data.columns if data[col].dtype == np.float64]
data[float_cols]\
.corr()['logerror']\
.abs()\
.sort_values(ascending=False)\
.head(10)
# -
# ### Drop rows with null values
data.dropna(inplace=True)
data.reset_index(inplace=True)
# ### Select three fixed-effect predictors
fixed_effect_predictors = [
'area_live_finished',
'num_bathroom',
'build_year'
]
# ### Select one random-effect predictor
zip_codes = data['region_zip'].astype('category').cat.codes
# ### Split data into train, validation sets
# +
train_index = data.sample(frac=0.5).index
val_index = data.drop(train_index).index
X = data.drop('logerror', axis=1)[fixed_effect_predictors]
X = scale(X)
y = data['logerror'].values
X_train = X[train_index]
y_train = y[train_index]
X_val = X[val_index]
y_val = y[val_index]
print('Dataset sizes:')
print(f' X_train: {X_train.shape}')
print(f' X_val: {X_val.shape}')
print(f' y_train: {y_train.shape}')
print(f' y_val: {y_val.shape}')
# -
# # Bayesian linear regression
# Using three fixed-effect predictors we'll fit a model of the following form:
#
# $$
# y_i \sim \mathcal{N}(\mu_i, 1)\\
# \mu_i = \alpha + \beta x_i\\
# \alpha \sim \mathcal{N}(0, 1)\\
# \beta \sim \mathcal{N}(0, 1)\\
# $$
#
# Having normalized our data to have mean 0 and unit-variance, we place our priors on a similar scale.
#
# To infer posterior distributions of the model's parameters conditional on the data observed we employ variational inference — one of three inference classes supported in Edward. This approach posits posterior inference as posterior *approximation* via *optimization*, where optimization is done via stochastic, gradient-based methods. This is what enables us to scale complex probabilistic functional forms to large-scale data.
#
# For an introduction to variational inference and Edward's API thereof, please reference:
# - [Edward: Inference of Probabilistic Models](http://edwardlib.org/tutorials/inference)
# - [Edward: Variational Inference](http://edwardlib.org/tutorials/variational-inference)
# - [Edward: KL(q||p) Minimization](http://edwardlib.org/tutorials/klqp)
# - [Edward: API and Documentation - Inference](http://edwardlib.org/api/inference)
#
# Additionally, I provide an introduction to the basic math behind variational inference and the [ELBO](https://www.cs.princeton.edu/courses/archive/fall11/cos597C/lectures/variational-inference-i.pdf) in a blog post of mine: [Further Exploring Common Probabilistic Models](http://willwolf.io/2017/07/06/further-exploring-common-probabilistic-models/).
# ### Fit model
# For the approximate q-distributions, we apply the [softplus function](https://www.tensorflow.org/api_docs/python/tf/nn/softplus) — `log(exp(z) + 1)` — to the scale parameter values at the suggestion of the Edward docs.
# +
N, D = X_train.shape
# fixed-effects placeholders
fixed_effects = tf.placeholder(tf.float32, [N, D])
# fixed-effects parameters
β_fixed_effects = Normal(loc=tf.zeros(D), scale=tf.ones(D))
α = Normal(loc=tf.zeros(1), scale=tf.ones(1))
# model
μ_y = α + ed.dot(fixed_effects, β_fixed_effects)
y = Normal(loc=μ_y, scale=tf.ones(N))
# approximate fixed-effects distributions
qβ_fixed_effects = Normal(
loc=tf.Variable(tf.random_normal([D])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([D])))
)
qα = Normal(
loc=tf.Variable(tf.random_normal([1])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([1])))
)
# -
# ### Infer parameters
# +
latent_vars = {
β_fixed_effects: qβ_fixed_effects,
α: qα
}
sess.run(INIT_OP)
inference = ed.KLqp(latent_vars, data={fixed_effects: X_train, y: y_train})
inference.run(n_samples=5, n_iter=250)
# -
# ### Criticize model
def visualize_data_fit(X, y, β, α, title_prefix, n_samples=10):
'''Plot lines generated via samples from parameter distributions of the first
two fixed effects, vs. observed data points.
Args:
X (np.array) : A design matrix of observed fixed effects.
y (np.array) : A vector of observed responses.
β (ed.RandomVariable) : A multivariate distribution of fixed-effect parameters.
α (ed.RandomVariable) : A univariate distribution of the model's intercept term.
title_prefix (str) : A string to append to the beginning of the title.
n_samples (int) : The number of lines to plot as drawn from the parameter distributions.
'''
# draw samples from parameter distributions
β_samples = β.sample(n_samples).eval()
α_samples = α.sample(n_samples).eval()
# plot the first two dimensions of `X`, vs. `y`
fig = plt.figure(figsize=(12, 9))
ax = plt.axes(projection='3d')
ax.scatter(X[:, 0], X[:, 1], y)
plt.title(f'{title_prefix} Parameter Samples vs. Observed Data')
# plot lines defined by parameter samples
inputs = np.linspace(-10, 10, num=500)
for i in range(n_samples):
output = inputs * β_samples[i][0] + inputs * β_samples[i][1] + α_samples[i][0]
ax.plot(inputs, inputs, output)
# #### Visualize data fit given parameter priors
visualize_data_fit(X_train, y_train, β_fixed_effects, α, 'Prior', n_samples=10)
# #### Visualize data fit given parameter posteriors
visualize_data_fit(X_train, y_train, qβ_fixed_effects, qα, 'Posterior', n_samples=10)
# It appears as if our model fits the data along the first two dimensions. This said, we could improve this fit considerably. This will become apparent when we compute the MAE on our validation set.
# #### Inspect residuals
# +
def compute_mean_absolute_error(y_posterior, X_val_feed_dict, y_val=y_val):
data = {y_posterior: y_val}
data.update(X_val_feed_dict)
mae = ed.evaluate('mean_absolute_error', data=data)
print(f'Mean absolute error on validation data: {mae:1.5}')
def plot_residuals(y_posterior, X_val_feed_dict, title, y_val=y_val):
y_posterior_preds = y_posterior.eval(feed_dict=X_val_feed_dict)
plt.figure(figsize=(9, 6))
plt.hist(y_posterior_preds - y_val, edgecolor='white', linewidth=1, bins=30, alpha=.7)
plt.axvline(0, color='#A60628', linestyle='--')
plt.xlabel('`y_posterior_preds - y_val`', fontsize=14)
plt.ylabel('Count', fontsize=14)
plt.title(title, fontsize=16)
# -
param_posteriors = {
β_fixed_effects: qβ_fixed_effects.mean(),
α: qα.mean()
}
X_val_feed_dict = {
fixed_effects: X_val
}
y_posterior = ed.copy(y, param_posteriors)
print(f'Mean validation `logerror`: {y_val.mean()}')
compute_mean_absolute_error(y_posterior, X_val_feed_dict)
plot_residuals(y_posterior, X_val_feed_dict, title='Linear Regression Residuals')
# "The residuals appear normally distributed with mean 0: this is a good sanity check for the model."<sup>1</sup> However, with respect to the magnitude of the mean of the validation `logerror`, our validation score is terrible. This is likely due to the fact that three predictors are not nearly sufficient for capturing the variation in the response. (Additionally, because the response itself is an *error*, it should be fundamentally harder to capture than the thing actually being predicted — the house price. This is because Zillow's team has already built models to capture this signal, then effectively threw the remaining "uncaptured" signal into this competition, i.e. "figure out how to get right the little that we got wrong.")
# #### Inspect parameter posteriors
# +
# draw samples from posteriors
qβ_fixed_effects_samples = qβ_fixed_effects.sample(1000).eval()
qα_samples = qα.sample(1000).eval()
# plot samples
plt.figure(figsize=(16, 10))
for dimension in range(D):
subplot = plt.subplot(221 + dimension)
plt.hist(qβ_fixed_effects_samples[:, dimension], edgecolor='white', linewidth=1, bins=30, alpha=.7)
plt.axvline(0, color='#A60628', linestyle='--')
title = f'Posterior Distribution of `{fixed_effect_predictors[dimension]}` Effect'
plt.ylabel('Count', fontsize=14)
plt.title(title, fontsize=16)
subplot = plt.subplot(221 + dimension + 1)
plt.hist(qα_samples, edgecolor='white', linewidth=1, bins=30, alpha=.7)
plt.axvline(0, color='#A60628', linestyle='--')
title = f'Posterior Distribution of Fixed Intercept α'
plt.ylabel('Count', fontsize=14)
plt.title(title, fontsize=15)
# -
# In keeping with the definition of multivariate linear regression itself, the above parameter posteriors tell us: "conditional on the assumption that the log-error and fixed effects can be related by a straight line, what is the predictive value of one variable once I already know the values of all other variables?"<sup>2</sup>
# ## Bayesian linear regression with random effects
# Random effects models — also known as hierarchical models — allow us to ascribe distinct behaviors to different "clusters" of observations, i.e. groups that may each act in a materially unique way. Furthermore, these models allow us to infer these tendencies in a *collaborative* fashion: while each cluster is assumed to behave differently, it can learn its parameters by heeding to the behavior of the population at large. In this example, we assume that houses in different zipcodes — holding all other predictors constant — should be priced in different ways.
#
# For clarity, let's consider the two surrounding extremes:
# 1. Estimate a single set of parameters for the population, i.e. the vanilla, [scikit-learn linear regression](http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LinearRegression.html), Bayesian or not. This confers no distinct behaviors to houses in different zipcodes.
# 2. Estimate a set of parameters for each individual zipcode, i.e. split the data into its cluster groups and estimate a single model for each. This confers maximally distinct behaviors to houses in different zip codes: the behavior of one cluster knows nothing about that of the others.
#
# Random-effects models "walk the line" between these two approaches — between maximally *underfitting* and maximally *overfitting* the behavior of each cluster. To this effect, its parameter estimates exhibit the canonical "shrinkage" phenomenon: the estimate for a given parameter is balanced between the within-cluster expectation and the global expectation. Smaller clusters exhibit larger shrinkage; larger clusters, i.e. those for which we've observed more data, are more bullheaded (in typical Bayesian fashion). A later plot illustrates this point.
#
# We specify our random-effects functional form as follows:
#
# ``` python
# μ_y = α + α_random_effects + ed.dot(fixed_effects, β_fixed_effects)
# y = Normal(loc=μ_y, scale=tf.ones(N))
# ```
#
# With respect to the previous model, we've simply added `α_random_effects` to the mean of our response. As such, this is a *varying-intercepts* model: the intercept term will be different for each cluster. To this end, we learn the *global* intercept `α` as well as the *offsets* from this intercept `α_random_effects` — a random variable with as many dimensions as there are zipcodes. In keeping with the notion of "offset," we ascribe it a prior of `(0, σ_zc)`. This approach allows us to flexibly extend the model to include more random effects, e.g. city, architecture style, etc. With only one, however, we could have equivalently included the global intercept *inside* of our prior, i.e. `α_random_effects ~ Normal(α, σ_zc)`, with priors on both `α` and `σ_zc` as per usual. This way, our random effect would no longer be a zip-code-specific *offset* from the global intercept, but a vector of zip-code-specific intercepts outright.
#
# Finally, as <NAME> notes, "we can think of the `σ_zc` parameter for each cluster as a crude measure of that cluster's "relevance" in explaining variation in the response variable."<sup>3</sup>
# ### Fit model
# +
n_zip_codes = len(set(zip_codes))
# random-effect placeholder
zip_codes_ph = tf.placeholder(tf.int32, [N])
# random-effect parameter
σ_zip_code = tf.sqrt(tf.exp(tf.Variable(tf.random_normal([]))))
α_zip_code = Normal(loc=tf.zeros(n_zip_codes), scale=σ_zip_code * tf.ones(n_zip_codes))
# model
α_random_effects = tf.gather(α_zip_code, zip_codes_ph)
μ_y = α + α_random_effects + ed.dot(fixed_effects, β_fixed_effects)
y = Normal(loc=μ_y, scale=tf.ones(N))
# approximate random-effect distribution
qα_zip_code = Normal(
loc=tf.Variable(tf.random_normal([n_zip_codes])),
scale=tf.nn.softplus(tf.Variable(tf.random_normal([n_zip_codes])))
)
# -
# ### Infer parameters
# +
latent_vars = {
β_fixed_effects: qβ_fixed_effects,
α: qα,
α_zip_code: qα_zip_code
}
sess.run(INIT_OP)
inference = ed.KLqp(latent_vars, data={fixed_effects: X_train, zip_codes_ph: zip_codes[train_index], y: y_train})
inference.run(n_samples=5, n_iter=250)
# -
# ### Criticize model
param_posteriors = {
β_fixed_effects: qβ_fixed_effects.mean(),
α: qα.mean(),
α_zip_code: qα_zip_code.mean()
}
X_val_feed_dict = {
fixed_effects: X_val,
zip_codes_ph: zip_codes[val_index]
}
y_posterior = ed.copy(y, param_posteriors)
compute_mean_absolute_error(y_posterior, X_val_feed_dict)
# #### Inspect residuals
plot_residuals(y_posterior, X_val_feed_dict, title='Linear Regression with Random Effects Residuals')
# ### Plot shrinkage
# To illustrate shrinkage we'll pare our model down to intercepts only (removing the fixed effects entirely). We'll first fit a random-effects model on the full dataset then compute the cluster-specific-intercept posterior means. Next, we'll fit a separate model to each individual cluster and compute the intercept posterior mean of each. The plot below shows how estimates from the former can be viewed as "estimates from the latter — shrunk towards the global-intercept posterior mean."
#
# Finally, <span style="color: #377eb8">blue</span>, <span style="color: #4daf4a">green</span> and <span style="color: #ff7f00">orange</span> points represent small, medium and large clusters respectively. As mentioned before, the larger the cluster size, i.e. the more data points we've observed belonging to a given cluster, the *less* prone it is to shrinkage towards the mean.
# #### Estimate random-effects intercepts
# +
# model
μ_y = α + α_random_effects
y = Normal(loc=μ_y, scale=tf.ones(N))
latent_vars = {
α: qα,
α_zip_code: qα_zip_code
}
# infer parameters
sess.run(INIT_OP)
inference = ed.KLqp(latent_vars, data={zip_codes_ph: zip_codes[train_index], y: y_train})
inference.run(n_samples=5, n_iter=250)
# compute global intercept posterior mean
global_intercept_posterior_mean = qα.mean().eval()
# compute random-effects posterior means
random_effects_posterior_means = global_intercept_posterior_mean + qα_zip_code.mean().eval()
random_effects_posterior_means = pd.Series(random_effects_posterior_means, index=range(0, n_zip_codes),
name='random_effects_posterior_means')
# -
# #### Estimate within-cluster intercepts
# +
# select a few clusters of varying size
zip_codes_df = pd.DataFrame({'cluster_size': zip_codes[train_index].value_counts()})
zip_codes_df['cluster_size_group'] = pd.cut(zip_codes_df['cluster_size'], 3, labels=['small', 'medium', 'large'])
zip_codes_df = zip_codes_df.groupby('cluster_size_group').head(20)
# build individual models for each cluster
within_cluster_posterior_means = {}
for zip_code in zip_codes_df.index.unique():
# compute mask, number of observations
mask = zip_codes[train_index] == zip_code
N_ = mask.sum()
# instantiate model for current cluster
fixed_effects = tf.placeholder(tf.float32, [N_, D])
μ_y = α
y = Normal(loc=μ_y, scale=tf.ones(N_))
# fit model
sess.run(INIT_OP)
inference = ed.KLqp(latent_vars, data={y: y_train[mask]})
inference.run(n_samples=5, n_iter=250)
# compute mean of qα for this zip code
within_cluster_posterior_means[zip_code] = qα.mean().eval()[0]
within_cluster_posterior_means = pd.Series(within_cluster_posterior_means, name='within_cluster_posterior_means')
# -
# prepare for plotting: join, give colors, sort, reset index
zip_codes_df = zip_codes_df\
.join(within_cluster_posterior_means)\
.join(random_effects_posterior_means)
zip_codes_df['cluster_size_color'] = zip_codes_df['cluster_size_group'].map({'small': '#377eb8', 'medium': '#4daf4a', 'large': '#ff7f00'})
zip_codes_df = zip_codes_df\
.sort_values(by='cluster_size')\
.reset_index(drop=True)
# +
# plot shrinkage
plt.figure(figsize=(14, 10))
plt.xlim(-1, len(zip_codes_df) + 1)
plt.scatter(zip_codes_df.index, zip_codes_df['random_effects_posterior_means'], facecolors='none',
edgecolors=zip_codes_df['cluster_size_color'], s=50, linewidth=1,
alpha=1, label='Random-Effects-α Posterior Mean')
plt.scatter(zip_codes_df.index, zip_codes_df['within_cluster_posterior_means'], c=zip_codes_df['cluster_size_color'],
s=100, alpha=.7, label='Within-Cluster-α Posterior Mean')
plt.axhline(global_intercept_posterior_mean, color='#A60628', linestyle='--', label='Global-α Mean')
plt.xticks([])
plt.xlabel('Cluster \n(In Order of Increasing Cluster Size)', fontsize=14)
plt.ylabel('α', fontsize=14)
plt.title('Shrinkage in Mean-α Estimates from Within-Cluster vs. Random Effects Linear Regression', fontsize=16)
plt.legend()
# -
# ## Neural network with random effects
# Neural networks are powerful function approximators. Keras is a library that lets us flexibly define complex neural architectures. Thus far, we've been approximating the relationship between our fixed effects and response variable with a simple dot product; can we leverage Keras to make this relationship more expressive? Is it painless? Finally, how does it integrate with Edward's existing APIs and constructs? Can we couple nimble generative models with deep neural networks?
#
# While my experimentation was brief, all answers point delightfully towards "yes" for two simple reasons:
# 1. Edward and Keras both run on TensorFlow.
# 2. "Black-box" variational inference makes everything scale.
#
# This said, we must be nonetheless explicit about what's "Bayesian" and what's not, i.e. for which parameters do we infer full (approximate) posterior distributions, and for which do we infer point estimates of the posterior distribution.
#
# Below, we drop a `neural_network` in place of our dot product. Our latent variables remain `β_fixed_effects`, `α` and `α_zip_code`: while we will infer their full (approximate) posterior distributions as before, we'll only compute *point estimates* for the parameters of the neural network as in the typical case. Conversely, to the best of my knowledge, to infer full distributions for the latter, we'll need to specify our network manually in raw TensorFlow, i.e. ditch Keras entirely. We then treat our weights and biases as standard latent variables and infer their approximate posteriors via variational inference. Edward's documentation contains a straightforward [tutorial](http://edwardlib.org/tutorials/bayesian-neural-network) to this end.
# ### Fit model
# +
def neural_network(fixed_effects, λ=.001, input_dim=D):
dense = Dense(5, activation='tanh', kernel_regularizer=l2(λ))(fixed_effects)
output = Dense(1, activation='linear', name='output', kernel_regularizer=l2(λ))(dense)
return K.squeeze(output, axis=1)
# model
fixed_effects = tf.placeholder(tf.float32, [N, D])
μ_y = α + α_random_effects + neural_network(fixed_effects)
y = Normal(loc=μ_y, scale=tf.ones(N))
# +
latent_vars = {
β_fixed_effects: qβ_fixed_effects,
α: qα,
α_zip_code: qα_zip_code
}
sess.run(INIT_OP)
inference = ed.KLqp(latent_vars, data={fixed_effects: X_train, zip_codes_ph: zip_codes[train_index], y: y_train})
optimizer = tf.train.RMSPropOptimizer(0.01, epsilon=1.0)
inference.initialize(optimizer=optimizer)
inference.run(n_samples=5, n_iter=1000)
# -
# ### Criticize model
param_posteriors = {
β_fixed_effects: qβ_fixed_effects.mean(),
α: qα.mean(),
α_zip_code: qα_zip_code.mean()
}
X_val_feed_dict = {
fixed_effects: X_val,
zip_codes_ph: zip_codes[val_index]
}
y_posterior = ed.copy(y, param_posteriors)
compute_mean_absolute_error(y_posterior, X_val_feed_dict)
# #### Inspect residuals
plot_residuals(y_posterior, X_val_feed_dict, title='Neural Network with Random Effects Residuals')
# # Future work
# We've now laid a stable, if trivially simple foundation for building models with Edward and Keras. From here, I see two distinct paths to building more expressive probabilistic models using these tools:
# 1. Build probabilistic models in Edward, and abstract deep-network-like subgraphs into Keras layers. This allows us to flexibly define complex neural architectures, e.g. a [video question answering model](https://keras.io/getting-started/functional-api-guide/#video-question-answering-model), with a nominal amount of code, yet restricts us from, or at least makes it awkward to, infer full posterior distributions for the subgraph parameters.
# 2. Build probabilistic models in Edward, and specify deep-network-like subgraphs with raw TensorFlow — ditching Keras entirely. Defining deep-network-like subgraphs becomes more cumbersome, while inferring full posterior distributions for the subgraph parameters becomes more natural and consistent with the flow of Edward code.
#
# This work has shown a few basic variants of (generalized) Bayesian linear regression models. From here, there's tons more to explore — varying-slopes models, Gaussian process regression, mixture models and probabilistic matrix factorizations to name a random few.
#
# Edward and Keras have proven a flexible, expressive and powerful duo for performing inference in deep probabilistic models. The models we built were simple; the only direction to go, and to go rather painlessly, is more.
#
# Many thanks for reading.
# # References
# 1. [Edward - Linear Mixed Effects Models](http://edwardlib.org/tutorials/linear-mixed-effects-models)
# 2. McElreath, Richard. "Chapter 5." Statistical Rethinking: A Bayesian Course with Examples in R and Stan. Boca Raton, FL: CRC, Taylor & Francis Group, 2016. N. pag. Print.
# 3. McElreath, Richard. "Chapter 12." Statistical Rethinking: A Bayesian Course with Examples in R and Stan. Boca Raton, FL: CRC, Taylor & Francis Group, 2016. N. pag. Print.
# 4. [The Best Of Both Worlds: Hierarchical Linear Regression in PyMC3](http://twiecki.github.io/blog/2014/03/17/bayesian-glms-3/)
# 5. [Keras as a simplified interface to TensorFlow](https://blog.keras.io/keras-as-a-simplified-interface-to-tensorflow-tutorial.html)
# 6. [Mixture Density Networks with Edward, Keras and TensorFlow](http://cbonnett.github.io/MDN_EDWARD_KERAS_TF.html)
# 7. [Use a Hierarchical Model](http://sl8r000.github.io/ab_testing_statistics/use_a_hierarchical_model/)
# 8. McElreath, Richard. "Chapter 14." Statistical Rethinking: A Bayesian Course with Examples in R and Stan. Boca Raton, FL: CRC, Taylor & Francis Group, 2016. N. pag. Print.
| Notebooks/Bayesian_Deep_Learning/zillow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="OObB3XxZxY5q"
# #%tensorflow_version 2.x
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
print(tf.__version__)
import math
# get some random numbers
X = 10.0 * np.random.rand(20000)
X = pd.DataFrame(data=X)
# save some for testing
x_train = X.sample(frac=0.8)
x_test = X.drop(x_train.index)
# generate the ground truth
y_train = x_train.apply(math.sin, axis=1)
y_test = x_test.apply(math.sin, axis=1)
print(x_train, y_train)
# + colab={} colab_type="code" id="UMjZrTC_E_2D"
fig = plt.figure(figsize=(9,9))
plt.ylim([-2, 2])
plt.scatter(x_train, y_train)
# + colab={} colab_type="code" id="oGLNw9naGBSU"
#--------MODEL BUILDING
num_params = len(x_train.keys())
print(num_params)
model = tf.keras.Sequential([
tf.keras.layers.InputLayer([num_params], name="Input_Layer"),
tf.keras.layers.Dense(64, activation='relu', name="dense_01"),
tf.keras.layers.Dense(64, activation='relu', name="dense_02"),
tf.keras.layers.Dense(64, activation='relu', name="dense_03"),
# 1 node in the output for the sin(x)
tf.keras.layers.Dense(1, name="Output_Layer")
])
learning_rate = 0.001
model.compile(optimizer=tf.keras.optimizers.RMSprop(learning_rate),
# loss function to minimize
loss='mae',
# list of metrics to monitor
metrics=['mae',])
model.summary()
# + colab={} colab_type="code" id="WzgjOeF1eLos"
tf.keras.utils.plot_model(
model,
to_file="model.png",
show_shapes=True,
show_layer_names=True,
rankdir="LR",
expand_nested=True,
dpi=96,
)
# + colab={} colab_type="code" id="q9QFRFpdGFV4"
# Fit/Train model on training data
history = model.fit(x_train, y_train,
batch_size=20,
epochs=10,
validation_split=0.2,
verbose=1)
# + colab={} colab_type="code" id="Es_jAzhSGNIh"
#--------MONITOR
# Plot training & validation loss values
fig = plt.figure(figsize=(12,9))
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validate'], loc='upper left')
plt.show()
# + colab={} colab_type="code" id="29Gwuyq8GSVI"
#--------EVALUATE
loss, mae = model.evaluate(x_test, y_test, verbose=2)
print('Loss:', loss, 'MAE:', mae)
# + colab={} colab_type="code" id="Zm-8h-oMGZ-X"
#--------PREDICT
p_test = model.predict(x_test)
fig = plt.figure(figsize=(9,9))
#plt.xlim([-3, 3])
plt.ylim([-2, 2])
#a = plt.axes(aspect='equal')
plt.scatter(x_test, p_test)
plt.scatter(x_test, y_test)
# + colab={} colab_type="code" id="MZ7zZV7OGe8G"
#----------PLOT True Values vs. Predictions
fig = plt.figure(figsize=(9,9))
a = plt.axes(aspect='equal')
plt.scatter(y_test, p_test)
plt.xlabel('True Values')
plt.ylabel('Predictions')
lims = [0, 1]
plt.xlim(lims)
plt.ylim(lims)
# draw a diagonal line for comparison
plt.plot(lims, lims)
plt.show()
# + colab={} colab_type="code" id="JYu5wSkXFxeT"
#---------PLOT the distribution of errors
fig = plt.figure(figsize=(9,9))
error = p_test.flatten() - y_test
plt.hist(error, bins = 5)
plt.xlabel("Prediction Error")
plt.ylabel("Count")
plt.show()
# -
| notebooks/siggraph2020/Class 3 - Sinx (homework).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/jhmartel/fp/blob/master/_notebooks/2022-04-26-Einstein_Maxwell.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="rc2iYgrs_FXH"
# # Einstein and Maxwell
# > "Einstein SR, Homogeneous Wave Equation, Lorentz Group, Maxwell. This is update of a previous post. The Critical Foundations of SR and Light Propagation in Vacuum is basically complete. We have a critical analysis of Einstein's alleged proof of Lorentz invariance of spherical lightwaves, and other things. What the article really needs is a stronger conclusion with a study of a modified Fizeau sawtooth experiment."
#
# - toc: false
# - branch: master
# - badges: false
# - comments: true
# - author: JHM
# - categories: [einstein, maxwell, wave equation, lorentz, SR]
#
# + [markdown] id="Zhjr9zExlRq2"
# We've been working on an [essay](https://github.com/jhmartel/SR/) on the foundations of special relativity (SR). Why do we invest so much time and effort into SR foundations ? Well honestly because i think the foundations are never taught, so there's alot to say. Moreover the SR converts tend to ignore foundations, evading them and jumping ahead to their conclusions. So the task of foundations is typically neglected by adherents, and it's left to the skeptics to develop the foundational issues. And the student of the history of SR will know that there has _always_ been a strong skeptic school in SR (and GR) and this school frequently included Einstein himself at various times in his life, and for good reason. There is _much_ to critically examine in the SR theory, and this the purpose of our essay.
#
# + [markdown] id="6DeaSfLzdWjl"
# # Einstein SR and Maxwell
#
# Einstein's theory of special relativity (SR) arose from Einstein's study of Maxwell's equations (ME) circa 1870 AD. But what is the logical and mathematical relation between SR and Maxwell? This is interesting question because they are essentially antagonistic. Although Einstein was much influenced by the Maxwellian field theory viewpoint, his own early work (1905) was based on the antithetical _photon_ model of light.
#
# Briefly, by Maxwell equations we understand that in a given reference frame $K$ we have the existence of electric and magnetic fields $E,B$ satisfying the four equations on $div(E), div(B)$ and $curl(E)$ and $curl(B)$ relative to a charge volume density $\rho$ and electric current density $J$. In vacuum where $\rho=0$ and $J=0$, composing the first-order Maxwell equations together yields the second-order fact that the coordinate components of $E,B$ satisfy wave equations with speed of propagation $c=\sqrt{\epsilon_0 \mu_0}$. This usually leads to the idea that electromagnetic field disturbances travel at the speed of $c$ in aether. And indeed Maxwell's equations expressly assume an aether as the medium by the which the electromagnetic radiation travels.
#
# Now this author does not really accept Maxwell's field equations as being satisfactory. For example, the magnetic field $B$ is not a rectifiable or reifiable field, meaning it has only _potential_ and not any material substance. The same could be said for Maxwell's electric field, which again is a potential field describing the force experienced by a charged test particle.
# This is the so-called _continental field-theoretic viewpoint_ after Maxwell, etc.
#
# However Maxwell's equations were not satisfactory in their predictions on the photoelectric effect. For example, is light a disturbance in the electric _or_ the magnetic field? If light is such a disturbance, then Maxwell equations predict the interaction of the $E$-wave (or is it $B$-wave) with charged test particles.
#
# Here it's interesting to compare Einstein's 1905 explanation of the photoelectric effect using the photon particle theory of light. Thus we tend to interpret Einstein's developments of SR from a photon or corpuscular point of view.
#
# _Problem_: The classical homogeneous wave equation has the property of the velocity being _dependant_ on the receiver velocity relative to the medium. But SR argues that the assumption on the "rectilinear uniform propagation of light" somehow yields a wave equation where velocity is receiver _independant_. But how? [We do not address this important issue here].
#
#
#
# + [markdown] id="ZUn8k825hsXw"
#
# # SR and Lorentz Groups
#
# The null result of the Michelson-Morley experiments led to Einstein's postulating the Lorentz transformations relating space and time variables $x,t$. Undoubtedly the theory of SR is summarized in the representations of the Lorentz group of linear transformations, namely the isometry group designated $O(ds^2)=O(3,1)$ and its standard linear action on ${\bf{R}}^{3,1}$.
#
# For the mathematician, once a single linear representation is given, there are many algebraic constructions possible to obtain further representations, for example the symmetric and alternating representations. We develop this idea further to try and bridge the assumptions of SR to Maxwell's equations, and especially the wave equation.
#
# Now we discuss several group representations (i.e. linear group actions).
#
# First we begin with the standard linear representation $$\rho_0:{\bf{R}}^{1,3} \times L \to {\bf{R}}^{1,3}$$ which is the linear representation $\rho_0$ represented by left matrix multiplication $(v, \lambda) \mapsto \lambda.v$.
#
# Next we *dualize*.
#
# Let $C({\bf{R}}^{3,1})$ be the space of polynomial functions on the space. Abbreviate $C:=C({\bf{R}}^{3,1})$. Naturally we assemble $C$ from the dual functionals $\lambda\in {({\bf{R}}^{3,1})}^*$. Taking products and polynomials in the dual functions $\lambda$ we obtain the contragradient represention $$\rho_0^*:C( {\bf{R}}^{3,1}) \times L \to C({\bf{R}}^{3,1}). $$
#
# The idea is that the vector spaces $V$ and $V^*$ are isomorphic (non canonically) in finite dimensions. Moreover the algebra generated by $V^*$ yields an (infinite-dimensional) space of polynomial functions on $V$.
#
# Now what are vector fields?
#
# In differential topology, the vector fields $\frac{\partial }{\partial x}$ *act* on functions as derivations, i.e. as linear maps $$\frac{\partial }{\partial x}: C \to C $$ satisfying Liebniz product formula. Iterating these linear maps generates an algebra of operators on $C$, namely the operators polynomial in $\partial / \partial x$. On the other hand, the differential $dx$ itself as contained in the cotangent space is _not_ an algebra.
#
# Iterating the derivations $\frac{\partial }{\partial x}\circ \frac{\partial }{\partial y}=\frac{\partial^2 }{\partial x \partial y}$ leads to the usual linear differential operators on $C$. We are specially interested in d'Alembert's operator $$\square:= \frac{-1}{c^2} \frac{\partial^2}{\partial t^2} +\frac{\partial^2}{\partial x^2}+\frac{\partial^2}{\partial y^2}+\frac{\partial^2}{\partial z^2} .$$
#
# Our main proposal, and this is not yet altogether rigorous, is to identify the Minkowski squared line element $ds^2$ as *dual* in a certain yet-to-be-defined algebraic sense to the d'Alembert operator $\square$. The difficulty is that the symmetric product of the differential operators $\partial/ \partial x$ and $\partial / \partial y$ is distinct from the _composition_ of the differential operators $\partial^2 / \partial x \partial y$.
#
# The term $dx^2$ in Minkowski's line element is formally a section of the $(T^*)^{\otimes 2}$ bundle over the manifold space, here ${\bf{R}}^{4}$. So here is the informal computation. Let us formally relabel the variables $$x_0, x_1, x_2, x_3 = t,x,y,z, $$ respectively. Now the choice of Lorentz metric $h$ can be represented as a square symmetric matrix $$[h]=
# \begin{pmatrix} -c^2 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{pmatrix}.$$
#
# The choice of $h$ allows us to define an isometry between the differential forms and vector fields, i.e. secord order linear operators. Specifically, $h$ allows us to define explicit isometry between symmetric $(2,0)$ tensors and symmetric $(0,2)$ tensors.
#
# __Lemma__: The metric $h$ identifies the dual of $ds^2$ with d'Alembert's wave operator $\square$. That is $(ds^2)^* = \square.$
#
# Proof. The proof is basic linear algebra. First one needs prove that the $h$-dual of $dx_0$ is $dx_0^* =-c^{-2} \frac{\partial}{\partial x_0}$. Likewise we find $dx_i^*=\frac{\partial}{\partial x_i}$ for $i=1,2,3$. This is all in the tangent space, i.e. between $(1,0)$ and $(0,1)$ tensors. Now we consider the squares, i.e. the symmetric $(2,0)$ tensors. We find that $$(ds^2)^*=-c^2 (dx_0^*)^2 + (dx_1^*)^2 + (dx_2^*)^2 + (dx_3^*)^2, $$ and which is equal to $$-c^{-2} (\frac{\partial}{\partial x_0})^2 + (\frac{\partial}{\partial x_1})^2 +(\frac{\partial}{\partial x_2})^2+(\frac{\partial}{\partial x_3})^2, $$ which is equal to d'Alembert's square operator $\square$ as desired. [See our remarks above on the nonrigorous nature of this argument, and is the subject of investigation.]
#
# The Lorentz invariance of $\square$ shows the solutions to the homogeneous wave equation (HWE) are Lorentz covariant and $\square \phi =0$ if and only if $\square \lambda \cdot \phi =0$ for every Lorentz transformation $\lambda \in L$. This is the wave equation version of the fact that the null cone $ds^2=0$ is Lorentz covariant.
#
#
#
#
#
#
# + [markdown] id="1sfrivMFQJWB"
# Now Einstein's (A12) postulates the *uniform rectilinear* propagation of light in vacuum. This would suggest a corpuscular model of light, being represented as affine parameterized lines $$s\mapsto (s, x(s), y(s), z(s))=(s, \gamma(s)) $$ in ${\bf{R}}^{3,1}$ satisfying $D^2_{ss} \gamma =0$.
#
# Is the equation $D^2_{ss} \gamma=0$ Lorentz covariant? (Yes?)
#
# But what are the corresponding "*uniform rectilinear*" solutions $\phi$ for the dual HWE: $~~\square \phi=0$ ? Compare [this](https://ccrma.stanford.edu/~jos/pasp/Spherical_Waves_Point_Source.html).
# + [markdown] id="gIfECdy-Yarz"
# An idea: there has always been correspondance between lines in $V$ (one-dimensional linear subspaces) and quadratic functionals via the Segre embedding, or $\lambda \mapsto \lambda^2$ where $\lambda\in V^*$ is a linear functional.
#
# The following questions will be answered below:
#
# * Are the quadratic functions $q(x)=h(v,x)^2/2$ solutions to $\square =0$ for null vector $v\in N$? (Yes, we prove below).
#
# * Can we find quadratic functions $q$ whose level sets are everywhere orthogonal to the null cone $N$ ?
#
# The idea would be to derive some canonical solutions $\square q=0$ from quadratics arising from vectors on the null cone.
#
# If $v$ belongs to null cone, then $q(x):=h(v,x)^2/2$ for $x\in V$ defines a quadratic function on $V$ with $q(v)=h(v,v)^2=0$.
#
# It's clear that $q$ is minimized along $v^\perp$, i.e. $q(x,v)=0$ for all $x\in v^\perp$ and $v\in v^\perp$. Here $v^\perp$ consists of all $u$ such that $h(u,v)=0$.
#
# __Lemma__. _For every vector $v\in {\bf{R}}^{3,1}$, let $q(x):=h(v,x)^2/2$ be the quadratic form defined by $v$. Then $\square q=0$ if and only if $v \in N$ and $h(v,v)=0$._
#
# _Proof._ We claim that $\square q=h(v,v)$ when $q(u)=h(v,u)^2/2$. If the vector $v$ has coordinates $v=\langle v_t, v_x, v_y, v_z \rangle$, then $h(v,x)^2$ is equal to $$(-c^2 v_t t + v_xx+ v_yy+ v_zz)^2/2,$$ which is equal to $$c^4 v_t^2 t^2 +v_x^2 x^2 + v_y^2 y^2 + v_z^2 z^2 + (mixed~ terms).$$
# Applying d'Alembert's operator we find $$\square q =2( -c^2 v_t^2+v_x^2 + v_y^2 + v_z^2)=2 h(v,v),$$ since $\square(mixed~~terms)=0$ and the claim follows.
#
# Thus we find that null vectors $v\in N$ yield solutions $q_v$ to HWE.
#
# A superposition principle also applies, where any signed measure $\mu \in \mathscr{M}(N)$ yields a $\mu$-averaged solution $q(x):=\int_N q_v (x) d\mu(v)$ to the HWE. Here it would be useful to have a representation theorem, something like, if $\phi$ is any solution of HWE, then $\phi$ can be represented as a $\mu$-average of the $h_v$ as described above.
#
# [To do: establish the conservation of energy for the HWE from the same principles.]
#
| _notebooks/2022-04-26-Einstein_Maxwell.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
def logistic(r, x):
return r*x*(1-x)
n = 10000
r = np.linspace(2.5, 4.0, n)
iterations = 1000
last = 100
x = 1e-5*np.ones(n)
# +
plt.subplot(211)
for i in range(iterations):
x = logistic(r, x)
lyapunov += np.log(abs(r - 2*r*x))
if i >= (iterations - last):
plt.plot(r, x, ",k", alpha = .02)
plt.xlim(2.5, 4)
plt.title("Feigenbaum-Diagramm")
# -
| notebooks/.ipynb_checkpoints/Feigenbaum Diagramm-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1. Similaridade por cosseno
#
# Neste exemplo iremos entender como funciona um sistema de recomendação que calcula a similaridade entre objetos utilizando produto escalar.
# +
# Carregando tabelas users, reviews, places
# users: Alan, Barbara, Carlos, Denis, Edgar
# reviews: uid, pid, value
# places: 3 sushis, 3 pizzarias, 3 cantinas, 2 fast-food
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
# -
# Carregando tableas
users = pd.read_csv("../data/users.csv")
reviews = pd.read_csv("../data/reviews.csv")
places = pd.read_csv("../data/places.csv")
users.head()
places.head(n=10)
# # Calculando similaridade entre lojas
#
# Considere o um vetor `v` onde cada usuário `uid` representa um eixo e a nota atribuída pelo usuário representa a intensidade da direção deste vetor. Para calcular a similaridade entre duas lojas, vamos calcular o cosseno entre estas duas listas de vetores por meio da seguinte fórmula:
#
# > SUM(a*b)/(SQRT(SUM(aˆ2)) * SQRT(SUM(bˆ2)))
#
# Esta fórmula nos permite calcular a diferença pelo valor do cosseno para o ângulo retornado. Quando os vetores são idênticos o ângulo entre eles é igual 0. Ao calcularmos o cosseno deste ângulo temos que `cos 0 = 1` portanto a similaridade é **máxima**. De forma análoga, quando os vetores forem diferentes o valor será igual a 0.
#
# Como estamos trabalhando com notas positivas (notas 0 a 5) é esperado que o resultado esteja sempre entre 0~1.
def cosine(r1=None, r2=None):
"""
Filter ratings lists by users that evaluated both places.
Each user is considered a vector the we must compare the ratings from same person.
After filtering users we calculate the cosine similarity which will give us the similarity between the two vectors.
The formula for the cosine similarity is:
> SUM(a*b)/(SQRT(SUM(aˆ2)) * SQRT(SUM(bˆ2)))
"""
v1 = {r["uid"]: r["review"] for i, r in r1.iterrows()}
v2 = {r["uid"]: r["review"] for i, r in r2.iterrows()}
uids = set(v1.keys()) & set(v2.keys())
dot = sum([v1[uid] * v2[uid] for uid in uids])
len1 = sum([v1[uid] * v1[uid] for uid in uids])
len2 = sum([v2[uid] * v2[uid] for uid in uids])
return dot/(math.sqrt(len1) * math.sqrt(len2))
# +
sims = np.empty((len(places) + 1, len(places) + 1))
sims.fill(0)
# Selecionar todas as avaliações de usuários que classificaram ambos os lugares
for index, place1 in places.iterrows():
for index, place2 in places.iterrows():
sim = cosine(
reviews[reviews["pid"] == place1["id"]],
reviews[reviews["pid"] == place2["id"]]
)
sims[place1["id"]][place2["id"]] = sim
#print("sim('{0}', '{1}') = {2}".format(place1["name"], place2["name"], sim))
# Total de relacionamentos calculados
print("Total: {0} relacionamentos calculados.".format((sims.shape[0] - 1) * (sims.shape[1] - 1)))
# +
def predict(pid, reviews):
"""
Predicts user rating based on previous ratings and similarity between places.
"""
s1 = sum([sims[r["pid"]][pid] * r["review"] for i, r in reviews.iterrows()])
s2 = 1 + sum([sims[r["pid"]][pid] for i, r in reviews.iterrows()])
return s1/s2
# Prevendo estabelecimento para o usuário
for i, user in users.iterrows():
for i, place in places.iterrows():
if not len(reviews[(reviews.uid == user["id"]) & (reviews.pid == place["id"])]):
print("* ", end="")
print("Pred('{0}', '{1}') = ".format(place["name"], user["name"]), end="")
print(predict(place["id"], reviews[reviews["uid"] == user["id"]]))
#print("Pred('{0}', '{1}') = ".format(place["name"], user["name"]), end="")
#print(predict(place["id"], reviews[reviews["uid"] == user["id"]]))
| Notebooks/1. Similaridade por cosseno.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Table of Contents
# * Setup
# * Libraries
# * Constants
#
# * Functions
# * Configurations
# * Data Exploration
# * Model 1
# * Evaluation Model 1
# * Re-train Model 1
# * Evaluation Model 1.1
# * Optimizer Experiment
# * Kernel-size & Epoch Experiment
# * Model Comparison
# * Evaluation of best Model
# ## Libraries
# +
"""
<
!pip install -r requirements.txt
!pip list
>
""";
import functools
import os
import time
import warnings
from datetime import datetime
from typing import Dict, List, Tuple, Union
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
import numpy as np
import torch
import torch.optim as optim
import torchvision
from sklearn.metrics import (
classification_report,
confusion_matrix,
accuracy_score,
f1_score, )
from torch import nn, optim
from torch.autograd import Variable
# -
# ## Constants
# +
DATA_DIR: str = './data'
TRAIN_DATA_DIR: str = './data/train_data'
TEST_DATA_DIR: str = './data/test_data'
# Default learning rate and batch size
LR: float = 1e-4
MINI_BATCH_SIZE: int = 128
CLASSES: Dict[int, str] = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot',
}
# Search space is e.g., (4) kernel size: (8, 16, 32, 64) epochs
SEARCH_SPACE: Dict[int, List[int]] = {
4: [8, 16, 32, 64],
6: [8, 16, 32, 64],
8: [8, 16, 32, 64],
}
# -
# ## Functions
def check_dir_and_check_data(_data_dir=DATA_DIR):
"""
Checks if a data directory exists and if it is populated.
Download of data starts automatically if directory does
not exists or exists and is empty.
"""
if os.path.exists(_data_dir) and len(os.listdir(_data_dir)) == 0:
print("Data directory exists.", end='\n\n')
TRAIN_DATA = torchvision.datasets.FashionMNIST(
root=TRAIN_DATA_DIR,
train=True,
transform=torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()]),
download=True,
)
TEST_DATA = torchvision.datasets.FashionMNIST(
root=TEST_DATA_DIR,
train=False,
transform=torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()]),
download=True,
)
return TRAIN_DATA, TEST_DATA
elif os.path.exists(_data_dir) and len(os.listdir(_data_dir)) != 0:
print("Data directory exists and is already populated.",)
TRAIN_DATA = torchvision.datasets.FashionMNIST(
root=TRAIN_DATA_DIR,
train=True,
transform=torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()]),
download=False,
)
TEST_DATA = torchvision.datasets.FashionMNIST(
root=TEST_DATA_DIR,
train=False,
transform=torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()]),
download=False,
)
return TRAIN_DATA, TEST_DATA
else:
print(f"Create '{_data_dir}' directory.", end='\n\n')
os.makedirs(_data_dir)
print("Data directory exists.", end='\n\n')
TRAIN_DATA = torchvision.datasets.FashionMNIST(
root=TRAIN_DATA_DIR,
train=True,
transform=torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()]),
download=True,
)
TEST_DATA = torchvision.datasets.FashionMNIST(
root=TEST_DATA_DIR,
train=False,
transform=torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()]),
download=True,
)
return TRAIN_DATA, TEST_DATA
def visualize_one_image(
data, image_id: int,
size: Tuple[int, int] = (500, 500)
):
""" Visualize one image at a time given the image ID.
"""
_img, _label = data[image_id]
fig = px.imshow(
torchvision.transforms.ToPILImage()(_img),
title=f'Example: {image_id} <br>Label : { CLASSES[_label]}',
color_continuous_scale='RdBu_r',
origin='upper',
height=size[0],
width=size[1],
)
return fig
def timer(func):
""" Wrapper to keep track of elapsed time of experiments.
"""
@functools.wraps(func)
def _timer(*args, **kwargs):
_start = time.perf_counter()
value = func(*args, **kwargs)
_end = time.perf_counter()
_elapsed_time = _end - _start
print(f"Elapsed time: {_elapsed_time:.4f} seconds.")
return value, _elapsed_time
return _timer
def weight_reset(m) -> None:
""" Reset weigths before each experiment!
"""
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
m.reset_parameters()
def evaluate_model(eval_model):
""" Routine to evaluate the FashionMnist models.
"""
fashion_mnist_eval_dataloader = torch.utils.data.DataLoader(
TEST_DATA, batch_size=10000, shuffle=False, )
eval_mini_batch_losses = []
for _, (images, labels) in enumerate(fashion_mnist_eval_dataloader):
output = eval_model(images)
loss = nn.NLLLoss()(output, labels)
eval_mini_batch_losses.append(loss.data.item())
y_true = TEST_DATA.targets
y_pred = torch.argmax(
eval_model(iter(fashion_mnist_eval_dataloader).next()[0]), dim=1, )
return (
np.mean(eval_mini_batch_losses),
accuracy_score(
y_true,
y_pred,
),
classification_report(
y_true,
y_pred,
),
confusion_matrix(
y_true,
y_pred,
),
f1_score(
y_true,
y_pred,
average='weighted',
),
)
def model_experiments_matrix(
metric_matrix,
title: str = 'Model Experiment',
xlabel: str = "Class Accuracies",
ylabel: str = "Model",
colorlabel: str = "Hits",
):
""" Plots the confusion matrix of an experiment.
"""
fig = px.imshow(
metric_matrix,
title=title,
template='none',
labels=dict(
x=xlabel,
y=ylabel,
color=colorlabel,
),
x=[*CLASSES.values()],
y=[*CLASSES.values()],
aspect='equal',
color_continuous_scale='RdBu',
zmin=0,
zmax=1000,
)
return fig
# ## Configurations
# +
print(torch.__version__)
print(np.__version__)
# Library settings
os.environ['TZ'] = 'Europe/London'
warnings.filterwarnings('ignore')
seed = 7
np.random.seed(seed)
torch.manual_seed(seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu').type
torch.cuda.manual_seed(seed)
print(f'Notebook with {str(device)} computation enabled.', end='\n\n')
# Download data
TRAIN_DATA, TEST_DATA = check_dir_and_check_data(DATA_DIR)
# Sanity Check
assert len(TRAIN_DATA) == 60000
assert len(TEST_DATA) == 10000
# Experiments
print(f"Number of experiments: {len(SEARCH_SPACE.keys()) * len(SEARCH_SPACE[4])}")
# Plotly
import plotly.io as pio
pio.renderers.default = 'iframe_connected'
# -
# ## Data Exploration
image_id = np.random.randint(len(TRAIN_DATA), size=1)[0]
visualize_one_image(TRAIN_DATA, image_id, size=(400, 500)).show()
print(type(TRAIN_DATA))
print(type(TEST_DATA))
print(TRAIN_DATA)
print(TEST_DATA)
# ## Model 1
# Model 1 is the first implementation. Its aim is to be slightly better than the baseline. That is an accuracy greater than 1/10 = 0.1. The class FashionMnist is set up to be used throughout the notebook. The experiments later will focus on the kernel size and the epochs. Hence the kernel size and padding are parmeters.
class FashionMnist(nn.Module):
""" Defined model for this notebook.
"""
def __init__(
self,
kernel_size: int,
padding: int,
):
"""
:param kernel_size: Variable kernel size for different CNNs.
:param padding: Padding respective to the kernel size.
"""
super(FashionMnist, self).__init__()
self.conv1 = nn.Conv2d(
in_channels=1, out_channels=6, kernel_size=kernel_size,
stride=1, padding=padding, )
self.pool1 = nn.MaxPool2d(
kernel_size=2, stride=2, padding=0, )
self.conv2 = nn.Conv2d(
in_channels=6, out_channels=16, kernel_size=kernel_size,
stride=1, padding=padding, )
self.pool2 = nn.MaxPool2d(
kernel_size=2, stride=2, padding=0, )
self.linear1 = nn.Linear(16 * 4 * 4, 120, bias=True, )
self.relu1 = nn.ReLU(inplace=True, )
self.linear2 = nn.Linear(120, 84, bias=True, )
self.relu2 = nn.ReLU(inplace=True,)
self.linear3 = nn.Linear(84, 10, )
self.logsoftmax = nn.LogSoftmax(dim=1, )
def forward(self, images):
x = self.conv1(images)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = x.view(-1, 16 * 4 * 4)
x = self.relu1(self.linear1(x))
x = self.relu2(self.linear2(x))
x = self.logsoftmax(self.linear3(x))
return x
@timer
def train_model_1(
num_epochs: int,
kernel_size: int = 5,
padding: int = 0,
mini_batch_size = MINI_BATCH_SIZE,
lr=LR,
criterion=nn.NLLLoss(),
):
"""
Function to execute the training of model_1.
All parameters are fixed, except for the epochs.
"""
# Inputs fixed for first model
_model_1 = FashionMnist(kernel_size, padding)
_model_1.apply(weight_reset)
_model_1 = _model_1.to(device)
_model_1.train()
optimizer = optim.SGD(params=_model_1.parameters(), lr=LR, )
criterion.to(device)
fashion_mnist_train_dataloader = torch.utils.data.DataLoader(
TRAIN_DATA, batch_size=MINI_BATCH_SIZE, shuffle=True, )
train_epoch_losses_model_1 = []
for epoch in range(num_epochs):
train_mini_batch_losses = []
for _, (images, labels) in enumerate(fashion_mnist_train_dataloader):
images = images.to(device)
labels = labels.to(device)
output = _model_1(images)
_model_1.zero_grad()
loss = criterion(output, labels)
loss.backward()
optimizer.step()
train_mini_batch_losses.append(loss.data.item())
train_epoch_loss = np.mean(train_mini_batch_losses)
train_epoch_losses_model_1.append(train_epoch_loss)
print(f'Epoch {epoch}: {train_epoch_loss:.5}')
return _model_1, train_epoch_losses_model_1
# Train model 1 and catch the model aswell as the losses
model_1, model_1_losses = train_model_1(num_epochs=10)
# ## Evaluation Model 1
# +
fig = px.line(
model_1[1],
title='Model_1 Loss',
labels={
'index': 'Training Epoch',
'value': 'Loss',
'variable': 'Model',
},
template='none',
log_y=False,
)
fig.show()
"""
Interestingly the loss has a linear scope. I guess the plot is so much
zoomed in (only 10 epochs) that the slope appears linear. The conclusion
is that more epochs are needed and this model is a true 'vanilla'.
""";
# -
model_1_loss, model_1_acc, model_1_report, model_1_cm, model_1_f1 = evaluate_model(model_1[0])
# +
print(f"Loss: {model_1_loss}")
print(f"Accuracy: {model_1_acc}")
print(f"F1-score: {model_1_f1}")
print("Classiification report:")
print(model_1_report)
"""
The accuracy confirms the previous conclusion the model barely started
to learn. The accuracy is just above 0.1. The classification report
confirms that aswell. Interestingly the model started to learn one class
first.
""";
# +
model_experiments_matrix(
model_1_cm, title='Model_1 Experiment',
xlabel='True Label',
ylabel='Predicted Label',
colorlabel='Hits', )
"""
The confusion matrix mirrors the classification report. One class is learned
first.
""";
# -
# ## Re-Train Model 1
# Model 1 showed that one class appears to be learned first. To test this hypothesis I retrain the model with more epochs and expect more classes to be learned.
model_1_1, model_1_1_losses = train_model_1(num_epochs=50)
model_1_1_loss, model_1_1_acc, model_1_1_report, model_1_1_cm, model_1_1_f1 = evaluate_model(model_1_1[0])
# ## Evaluation Model 1.1
print(f"Loss: {model_1_1_loss}")
print(f"Accuracy: {model_1_1_acc}")
print(f"F1-score: {model_1_1_f1}")
print("Classiification report:")
print(model_1_1_report)
# +
model_experiments_matrix(
model_1_1_cm,
title='Model_1 Experiment [2]',
xlabel='True Label',
ylabel='Predicted Label',
colorlabel='Hits', )
"""
The classification report and the confusion matrix provide evidence
to confirm the hypothesis. The classes seem to be learned 'one-by-one'
""";
# -
# ## Optimizer Experiment
# +
# Find best optimizer
model = FashionMnist(5, 0)
model = model.to(device)
# Data
fashion_mnist_train_dataloader = torch.utils.data.DataLoader(
TRAIN_DATA, batch_size=MINI_BATCH_SIZE, shuffle=True, )
optimizers = [
torch.optim.Adadelta(model.parameters(), lr=LR,),
torch.optim.Adagrad(model.parameters(), lr=LR,),
torch.optim.Adam(model.parameters(), lr=LR,),
torch.optim.RMSprop(model.parameters(), lr=LR,),
torch.optim.SGD(model.parameters(), lr=LR,),
]
# Hint from lecture, that nll is better?
criteria = [
torch.nn.NLLLoss(),
]
# +
"""
Train 5 models with constant inputs except for the optimizer.
The losses are saved after each epoch and model.
""";
num_epochs = 20
mini_batch_size = MINI_BATCH_SIZE
overall_losses = []
train_epoch_losses = []
model.train()
for optimizer in optimizers:
for criterion in criteria:
# reset model
model.apply(weight_reset)
train_epoch_losses = []
print(f"Optimizer: {optimizer}\nCriterion: {criterion}\n")
criterion.to(device)
#optimizer.to(device)
for epoch in range(num_epochs):
train_mini_batch_losses = []
for _, (images, labels) in enumerate(fashion_mnist_train_dataloader):
images = images.to(device)
labels = labels.to(device)
output = model(images)
model.zero_grad()
loss = criterion(output, labels)
loss.backward()
optimizer.step()
train_mini_batch_losses.append(loss.data.item())
train_epoch_loss = np.mean(train_mini_batch_losses)
print('Epoch: {} train-loss: {}'.format(str(epoch), str(train_epoch_loss)))
train_epoch_losses.append(train_epoch_loss)
overall_losses.append(train_epoch_losses)
print(overall_losses)
print("-" * 90)
# -
df_optimizers = pd.DataFrame(
overall_losses,
index=[
'Adadelta-NLL',
'Adagrad-NLL',
'Adam-NLL',
'RMSprop-NLL',
'SGD-NLL',
],
columns=['Epoch_' + str(epoch) for epoch in list(range(1, 21, 1))],
).T
# +
"""
The experiment results in a pd.df with 5 columns and 20 epochs each.
""";
df_optimizers
# +
fig = px.line(
df_optimizers,
title='Optimizer Experiment',
labels={
'index': 'Training Epoch',
'value': '<b>Log</b> Loss',
'variable': 'Combination',
},
template='none',
log_y=True,
)
fig.show()
"""
The experiment shows, that SGD unfortunately is the worst optimizer
for this data. That explains the linear slope in Task 1.
Adam and RMSprop do a much better job with RMSprop doing a slightly
better job (this changes after each iteration!).
""";
# -
# ## Kernel-Size & Epochs Experiment
@timer
def train_model_2(
num_epochs: int,
kernel_size: int,
padding: int,
mini_batch_size = MINI_BATCH_SIZE,
lr=LR,
criterion=nn.NLLLoss(),
):
overall_losses = []
train_epoch_losses = []
fashion_mnist_train_dataloader = torch.utils.data.DataLoader(TRAIN_DATA, batch_size=mini_batch_size, shuffle=True)
model = FashionMnist(kernel_size, padding)
model.train()
optimizer = optim.RMSprop(params=model.parameters(), lr=lr)
criterion.to(device)
# Adjust LR
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode='min', factor=0.1, patience=3, threshold=0.0001, threshold_mode='abs',)
for epoch in range(num_epochs):
train_mini_batch_losses = []
for _, (images, labels) in enumerate(fashion_mnist_train_dataloader):
images = images.to(device)
labels = labels.to(device)
output = model(images)
model.zero_grad()
loss = criterion(output, labels)
loss.backward()
# Must be after loss!
scheduler.step(loss)
optimizer.step()
train_mini_batch_losses.append(loss.data.item())
train_epoch_loss = np.mean(train_mini_batch_losses)
print(f'Epoch: {epoch} train-loss: {train_epoch_loss}')
train_epoch_losses.append(train_epoch_loss)
overall_losses.append(train_epoch_losses)
print(overall_losses)
print("-" * 90)
return model, overall_losses
# +
"""
Iterate over the search space (kernel sizes and epochs) and record
key evaluation metrics after each model. The experiment will result
in 12 models with their metrics.
""";
experiment_losses = []
experiment_model_accs = []
experiment_model_f1s = []
for kernel_size in SEARCH_SPACE.keys():
for epoch in SEARCH_SPACE[kernel_size]:
print(kernel_size)
print(epoch)
# Decide which padding to use
padding = 0
if kernel_size == 6:
padding = 1
if kernel_size == 8:
padding = 2
current_model, current_model_losses = train_model_2(epoch, kernel_size, padding)
_, current_model_acc, _, _, current_model_f1 = evaluate_model(current_model[0])
print(f"Kernel: {kernel_size}x{kernel_size}\n"\
f"Epoch: {epoch}\n"
f"Accuracy: {current_model_acc}\n"
f"F1-score: {current_model_f1}\n\n")
experiment_model_accs.append(round(current_model_acc, 4))
experiment_model_f1s.append(round(current_model_f1, 4))
experiment_losses.append(current_model_losses)
print(experiment_model_accs)
print(experiment_model_f1s)
# +
fig = go.Figure(
data=[
go.Surface(
z=np.array(experiment_model_accs).reshape(4,3),
x=[4, 6, 8, ],
y=[8, 16, 32, 64,],
),
go.Surface(
z=np.array(experiment_model_f1s).reshape(4,3),
x=[4, 6, 8, ],
y=[8, 16, 32, 64,],
),
],
)
fig.update_traces(contours_z=dict(
show=True,
usecolormap=True,
highlightcolor="limegreen",
project_z=True,
))
fig.update_layout(
title='Accuracy & F1-Score over Kernel Size and no. of Epochs',
autosize=True,
width=800,
height=800,
margin=dict(
l=65,
r=50,
b=65,
t=90, ),
)
camera = dict(
up=dict(x=0, y=0, z=1), # z-axis up
center=dict(x=0, y=0, z=0), # default
eye=dict(x=1.5, y=1.5, z=0.8)
)
fig.update_layout(
scene=dict(
xaxis_title='Kernel size',
yaxis_title='# Epochs',
zaxis_title='Test Accuracy',
xaxis=dict(nticks=3, range=[4, 8],),
yaxis=dict(nticks=4, range=[8, 64],),
zaxis=dict(nticks=10, range=[0.1, 0.7],),
),
scene_camera=camera,
)
fig.write_html('./plots/surface_plot.html')
#fig.show() # comment out if problems with WebGL
# -
# < WebGl causes problems with plotting a 3D plot in google colab. >
# The surface plot displays the accuracy and the average f1-scores for each model. We can observe a slight trend upwards with increasing epochs and a dip at kernel size 6x6 across all epochs. I did not know that accuracy and f1-score are that close related, but it makes sense because they assess the same metric (value counts per class) and scale it down between 0 and 1. The upper surface is accuracy and the lower surface are the f1-scores.
# Note that the z-axis is scaled between 0.1 and 0.7.
#
# <img align="center" style="max-width: 600px" src="./plots/surface_plot.png">
# +
fig = px.imshow(
np.array(experiment_model_accs).reshape(4,3).T,
title='Model Comparison',
template='none',
labels=dict(
x='# Epochs',
y='Kernel Size',
color='Accuracy',
),
x=['*8', '*16', '*32', '*64',],
y=['*4', '*6', '*8', ],
aspect='equal',
color_continuous_scale='RdBu',
zmin=0,
zmax=1,
)
fig.show()
"""
The surface plot can be sclaed down to a simple heatmap aswell.
The color is scaled between 0 and 1. This illustrates that the
models are each closely related. None shows a much much better
performance.
""";
# -
# ## Evaluation Model 2
model_2_loss, model_2_acc, model_2_report, model_2_cm, model_2_f1 = evaluate_model(current_model[0])
print(f"Loss: {model_2_loss}")
print(f"Accuracy: {model_2_acc}")
print(f"F1-score: {model_2_f1}")
print("Classiification report:")
print(model_2_report)
# +
fig = px.imshow(
model_2_cm,
title='Confusion Matrix',
labels=dict(
x="True Label",
y="Predicted Label",
color="Hits",
),
x=[*CLASSES.values()],
y=[*CLASSES.values()],
template='none',
aspect='equal',
color_continuous_scale='RdBu',
zmin=0,
zmax=1000,
)
fig.show()
"""
The confusion matrix shows an improved 'learning' across classes.
When compared to the vanilla model more classes are predicted correctly.
""";
| src/deep_learning/pytorch_example.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.0
# language: julia
# name: julia-1.0
# ---
# # Tutorial
#
# ## Installing everything
#
# To install Interact, simply type
# ```julia
# Pkg.add("Interact")
# ```
# in the REPL.
#
# The basic behavior is as follows: Interact provides a series of widgets. Each widget has an output that can be directly inspected or used to trigger some callbacks (i.e. run some code as soon as the widget changes value): the abstract supertype that gives this behavior is called `AbstractObservable`. Let's see this in practice.
#
# ## Displaying a widget
using Interact
ui = button()
display(ui)
# Note that `display` works in a [Jupyter notebook](https://github.com/JuliaLang/IJulia.jl) or in [Atom/Juno IDE](https://github.com/JunoLab/Juno.jl).
# Interact can also be deployed in Jupyter Lab, but that requires installing an extension first:
cd(Pkg.dir("WebIO", "assets"))
;jupyter labextension install webio
;jupyter labextension enable webio/jupyterlab_entry
# To deploy the app as a standalone Electron window, one would use [Blink.jl](https://github.com/JunoLab/Blink.jl):
using Blink
w = Window()
body!(w, ui);
# The app can also be served in a webpage via [Mux.jl](https://github.com/JuliaWeb/Mux.jl):
using Mux
WebIO.webio_serve(page("/", req -> ui), rand(8000:9000)) # serve on a random port
# ## Adding behavior
# The value of our button can be inspected using `getindex`:
ui[]
# In the case of a button, the observable represents the number of times it has been clicked: click on it and check the value again.
# For now however this button doesn't do anything. This can be changed by adding callbacks to it.
#
# To add some behavior to the widget we can use the `on` construct. `on` takes two arguments, a function and an `AbstractObservable`. As soon as the observable is changed, the function is called with the latest value.
on(println, ui)
# If you click again on the button you will see it printing the number of times it has been clicked so far.
#
# *Tip*: anonymous function are very useful in this programming paradigm. For example, if you want the button to say "Hello!" when pressed, you should use:
on(n -> println("Hello!"), ui)
# *Tip n. 2*: using the `[]` syntax you can also set the value of the widget:
ui[] = 33;
# ### Observables: the implementation of a widget's output
# The updatable container that only has the output of the widget but not the widget itself is a `Observable` and can be accessede using `observe(ui)`, though it should normally not be necessary to do so.
# To learn more about `Observables` and `AbstractObservable`, check out their documentation [here](https://juliagizmos.github.io/Observables.jl/latest/).
# ## What widgets are there?
#
# Once you have grasped this paradigm, you can play with any of the many widgets available:
filepicker() |> display # value is the path of selected file
textbox("Write here") |> display # value is the text typed in by the user
autocomplete(["Mary", "Jane", "Jack"]) |> display # as above, but you can autocomplete words
checkbox(label = "Check me!") |> display # value is a boolean describing whether it's ticked
toggle(label = "I have read and agreed") |> display # same as a checkbox but styled differently
slider(1:100, label = "To what extent?", value = 33) |> display # value is the number selected
# As well as the option widgets, that allow to choose among options:
dropdown(["a", "b", "c"]) |> display # value is option selected
togglebuttons(["a", "b", "c"]) |> display # value is option selected
radiobuttons(["a", "b", "c"]) |> display # value is option selected
# The option widgets can also take as input a dictionary (ordered dictionary is preferrable, to avoid items getting scrambled), in which case the label displays the key while the output stores the value:
s = dropdown(OrderedDict("a" => "Value 1", "b" => "Value 2"))
display(s)
s[]
# ## Creating custom widgets
#
# Interact allows the creation of custom composite widgets starting from simpler ones.
# Let's say for example that we want to create a widget that has three sliders and a color
# that is updated to match the RGB value we gave with the sliders.
# +
import Colors
using Plots
function mycolorpicker()
r = slider(0:255, label = "red")
g = slider(0:255, label = "green")
b = slider(0:255, label = "blue")
output = Interact.@map Colors.RGB(&r/255, &g/255, &b/255)
plt = Interact.@map plot(sin, color = &output)
wdg = Widget(["r" => r, "g" => g, "b" => b], output = output)
@layout! wdg hbox(plt, vbox(:r, :g, :b)) ## custom layout: by default things are stacked vertically
end
# -
# And now you can simply instantiate the widget with
mycolorpicker()
# Note the `&r` syntax: it means automatically update the widget as soon as the
# slider changes value. See `Interact.@map` for more details.
# If instead we wanted to only update the plot when a button is pressed we would do:
function mycolorpicker()
r = slider(0:255, label = "red")
g = slider(0:255, label = "green")
b = slider(0:255, label = "blue")
update = button("Update plot")
output = Interact.@map (&update; Colors.RGB(r[]/255, g[]/255, b[]/255))
plt = Interact.@map plot(sin, color = &output)
wdg = Widget(["r" => r, "g" => g, "b" => b, "update" => update], output = output)
@layout! wdg hbox(plt, vbox(:r, :g, :b, :update)) ## custom layout: by default things are stacked vertically
end
# ## A simpler approach for simpler cases
#
# While the approach sketched above works for all sorts of situations, there is a specific macro to simplify it in some specific case. If you just want to update some result (maybe a plot) as a function of some parameters (discrete or continuous) simply write `@manipulate` before the `for` loop. Discrete parameters will be replaced by `togglebuttons` and continuous parameters by `slider`: the result will be updated as soon as you click on a button or move the slider:
width, height = 700, 300
colors = ["black", "gray", "silver", "maroon", "red", "olive", "yellow", "green", "lime", "teal", "aqua", "navy", "blue", "purple", "fuchsia"]
color(i) = colors[i%length(colors)+1]
ui = @manipulate for nsamples in 1:200,
sample_step in slider(0.01:0.01:1.0, value=0.1, label="sample step"),
phase in slider(0:0.1:2pi, value=0.0, label="phase"),
radii in 0.1:0.1:60
cxs_unscaled = [i*sample_step + phase for i in 1:nsamples]
cys = sin.(cxs_unscaled) .* height/3 .+ height/2
cxs = cxs_unscaled .* width/4pi
dom"svg:svg[width=$width, height=$height]"(
(dom"svg:circle[cx=$(cxs[i]), cy=$(cys[i]), r=$radii, fill=$(color(i))]"()
for i in 1:nsamples)...
)
end
# or, if you want a plot with some variables taking discrete values:
# +
using Plots
x = y = 0:0.1:30
freqs = OrderedDict(zip(["pi/4", "π/2", "3π/4", "π"], [π/4, π/2, 3π/4, π]))
mp = @manipulate for freq1 in freqs, freq2 in slider(0.01:0.1:4π; label="freq2")
y = @. sin(freq1*x) * sin(freq2*x)
plot(x, y)
end
# -
# ## Widget layout
#
# To create a full blown web-app, you should learn the layout tools that the CSS framework you are using provides. See for example the [columns](https://bulma.io/documentation/columns/) and [layout](https://bulma.io/documentation/layout/) section of the Bulma docs. You can use [WebIO](https://github.com/JuliaGizmos/WebIO.jl) to create from Julia the HTML required to create these layouts.
#
# However, this can be overwhelming at first (especially for users with no prior experience in web design). A simpler solution is [CSSUtil](https://github.com/JuliaGizmos/CSSUtil.jl), a package that provides some tools to create simple layouts.
loadbutton = filepicker()
hellobutton = button("Hello!")
goodbyebutton = button("Good bye!")
ui = vbox( # put things one on top of the other
loadbutton,
hbox( # put things one next to the other
pad(1em, hellobutton), # to allow some white space around the widget
pad(1em, goodbyebutton),
)
)
display(ui)
# ## Update widgets as function of other widgets
#
# Sometimes the full structure of the GUI is not known in advance. For example, let's imagine we want to load a DataFrame and create a button per column. Not to make it completely trivial, as soon as a button is pressed, we want to plot a histogram of the corresponding column.
#
# *Important note*: this app needs to run in Blink, as the browser doesn't allow us to get access to the local path of a file.
#
# We start by adding a `filepicker` to choose the file, and only once we have a file we want to update the GUI. this can be done as follows:
loadbutton = filepicker()
columnbuttons = Observable{Any}(dom"div"())
# `columnbuttons` is the `div` object that will contain all the relevant buttons. it is an `Observable` as we want its value to change over time.
# To add behavior, we can use `map!`:
using CSV, DataFrames
data = Observable{Any}(DataFrame)
map!(CSV.read, data, loadbutton)
# Now as soon as a file is uploaded, the `Observable` `data` gets updated with the correct value. Now, as soon as `data` is updated, we want to update our buttons.
# +
function makebuttons(df)
buttons = button.(names(df))
dom"div"(hbox(buttons))
end
map!(makebuttons, columnbuttons, data)
# -
# We are almost done, we only need to add a callback to the buttons. The cleanest way is to do it during button initialization, meaning during our `makebuttons` step:
using Plots
plt = Observable{Any}(plot()) # the container for our plot
function makebuttons(df)
buttons = button.(string.(names(df)))
for (btn, name) in zip(buttons, names(df))
map!(t -> histogram(df[name]), plt, btn)
end
dom"div"(hbox(buttons))
end
# To put it all together:
# +
using CSV, DataFrames, Interact, Plots
loadbutton = filepicker()
columnbuttons = Observable{Any}(dom"div"())
data = Observable{Any}(DataFrame)
plt = Observable{Any}(plot())
map!(CSV.read, data, loadbutton)
function makebuttons(df)
buttons = button.(string.(names(df)))
for (btn, name) in zip(buttons, names(df))
map!(t -> histogram(df[name]), plt, btn)
end
dom"div"(hbox(buttons))
end
map!(makebuttons, columnbuttons, data)
ui = dom"div"(loadbutton, columnbuttons, plt)
# -
# And now to serve it in Blink:
using Blink
w = Window()
body!(w, ui)
# *This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*
| doc/notebooks/tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import pandas as pd
import yfinance as yf
from datetime import datetime
import pytz
from numba import njit
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt import base_optimizer
import vectorbt as vbt
from vectorbt.generic.nb import nanmean_nb
from vectorbt.portfolio.nb import order_nb, sort_call_seq_nb
from vectorbt.portfolio.enums import SizeType, Direction
# +
# Define params
symbols = ['FB', 'AMZN', 'NFLX', 'GOOG', 'AAPL']
start_date = datetime(2017, 1, 1, tzinfo=pytz.utc)
end_date = datetime(2020, 1, 1, tzinfo=pytz.utc)
num_tests = 2000
vbt.settings.array_wrapper['freq'] = 'days'
vbt.settings.returns['year_freq'] = '252 days'
vbt.settings.portfolio['seed'] = 42
vbt.settings.portfolio.stats['incl_unrealized'] = True
# +
yfdata = vbt.YFData.download(symbols, start=start_date, end=end_date)
print(yfdata.symbols)
# +
ohlcv = yfdata.concat()
print(ohlcv.keys())
# -
price = ohlcv['Close']
# Plot normalized price series
(price / price.iloc[0]).vbt.plot().show_svg()
returns = price.pct_change()
print(returns.mean())
print(returns.std())
print(returns.corr())
# ## vectorbt: Random search
# ### One-time allocation
# +
np.random.seed(42)
# Generate random weights, n times
weights = []
for i in range(num_tests):
w = np.random.random_sample(len(symbols))
w = w / np.sum(w)
weights.append(w)
print(len(weights))
# +
# Build column hierarchy such that one weight corresponds to one price series
_price = price.vbt.tile(num_tests, keys=pd.Index(np.arange(num_tests), name='symbol_group'))
_price = _price.vbt.stack_index(pd.Index(np.concatenate(weights), name='weights'))
print(_price.columns)
# +
# Define order size
size = np.full_like(_price, np.nan)
size[0, :] = np.concatenate(weights) # allocate at first timestamp, do nothing afterwards
print(size.shape)
# +
# Run simulation
pf = vbt.Portfolio.from_orders(
close=_price,
size=size,
size_type='targetpercent',
group_by='symbol_group',
cash_sharing=True
) # all weights sum to 1, no shorting, and 100% investment in risky assets
print(len(pf.orders))
# -
# Plot annualized return against volatility, color by sharpe ratio
annualized_return = pf.annualized_return()
annualized_return.index = pf.annualized_volatility()
annualized_return.vbt.scatterplot(
trace_kwargs=dict(
mode='markers',
marker=dict(
color=pf.sharpe_ratio(),
colorbar=dict(
title='sharpe_ratio'
),
size=5,
opacity=0.7
)
),
xaxis_title='annualized_volatility',
yaxis_title='annualized_return'
).show_svg()
# +
# Get index of the best group according to the target metric
best_symbol_group = pf.sharpe_ratio().idxmax()
print(best_symbol_group)
# -
# Print best weights
print(weights[best_symbol_group])
# Compute default stats
print(pf.iloc[best_symbol_group].stats())
# ### Rebalance monthly
# +
# Select the first index of each month
rb_mask = ~_price.index.to_period('m').duplicated()
print(rb_mask.sum())
# +
rb_size = np.full_like(_price, np.nan)
rb_size[rb_mask, :] = np.concatenate(weights) # allocate at mask
print(rb_size.shape)
# +
# Run simulation, with rebalancing monthly
rb_pf = vbt.Portfolio.from_orders(
close=_price,
size=rb_size,
size_type='targetpercent',
group_by='symbol_group',
cash_sharing=True,
call_seq='auto' # important: sell before buy
)
print(len(rb_pf.orders))
# +
rb_best_symbol_group = pf.sharpe_ratio().idxmax()
print(rb_best_symbol_group)
# -
print(weights[rb_best_symbol_group])
print(rb_pf.iloc[rb_best_symbol_group].stats())
def plot_allocation(rb_pf):
# Plot weights development of the portfolio
rb_asset_value = rb_pf.asset_value(group_by=False)
rb_value = rb_pf.value()
rb_idxs = np.flatnonzero((rb_pf.asset_flow() != 0).any(axis=1))
rb_dates = rb_pf.wrapper.index[rb_idxs]
fig = (rb_asset_value.vbt / rb_value).vbt.plot(
trace_names=symbols,
trace_kwargs=dict(
stackgroup='one'
)
)
for rb_date in rb_dates:
fig.add_shape(
dict(
xref='x',
yref='paper',
x0=rb_date,
x1=rb_date,
y0=0,
y1=1,
line_color=fig.layout.template.layout.plot_bgcolor
)
)
fig.show_svg()
plot_allocation(rb_pf.iloc[rb_best_symbol_group]) # best group
# ### Search and rebalance every 30 days
# Utilize low-level API to dynamically search for best Sharpe ratio and rebalance accordingly. Compared to previous method, we won't utilize stacking, but do search in a loop instead. We also will use days instead of months, as latter may contain a various number of trading days.
# +
srb_sharpe = np.full(price.shape[0], np.nan)
@njit
def pre_sim_func_nb(c, every_nth):
# Define rebalancing days
c.segment_mask[:, :] = False
c.segment_mask[every_nth::every_nth, :] = True
return ()
@njit
def find_weights_nb(c, price, num_tests):
# Find optimal weights based on best Sharpe ratio
returns = (price[1:] - price[:-1]) / price[:-1]
returns = returns[1:, :] # cannot compute np.cov with NaN
mean = nanmean_nb(returns)
cov = np.cov(returns, rowvar=False) # masked arrays not supported by Numba (yet)
best_sharpe_ratio = -np.inf
weights = np.full(c.group_len, np.nan, dtype=np.float_)
for i in range(num_tests):
# Generate weights
w = np.random.random_sample(c.group_len)
w = w / np.sum(w)
# Compute annualized mean, covariance, and Sharpe ratio
p_return = np.sum(mean * w) * ann_factor
p_std = np.sqrt(np.dot(w.T, np.dot(cov, w))) * np.sqrt(ann_factor)
sharpe_ratio = p_return / p_std
if sharpe_ratio > best_sharpe_ratio:
best_sharpe_ratio = sharpe_ratio
weights = w
return best_sharpe_ratio, weights
@njit
def pre_segment_func_nb(c, find_weights_nb, history_len, ann_factor, num_tests, srb_sharpe):
if history_len == -1:
# Look back at the entire time period
close = c.close[:c.i, c.from_col:c.to_col]
else:
# Look back at a fixed time period
if c.i - history_len <= 0:
return (np.full(c.group_len, np.nan),) # insufficient data
close = c.close[c.i - history_len:c.i, c.from_col:c.to_col]
# Find optimal weights
best_sharpe_ratio, weights = find_weights_nb(c, close, num_tests)
srb_sharpe[c.i] = best_sharpe_ratio
# Update valuation price and reorder orders
size_type = SizeType.TargetPercent
direction = Direction.LongOnly
order_value_out = np.empty(c.group_len, dtype=np.float_)
for k in range(c.group_len):
col = c.from_col + k
c.last_val_price[col] = c.close[c.i, col]
sort_call_seq_nb(c, weights, size_type, direction, order_value_out)
return (weights,)
@njit
def order_func_nb(c, weights):
col_i = c.call_seq_now[c.call_idx]
return order_nb(
weights[col_i],
c.close[c.i, c.col],
size_type=SizeType.TargetPercent
)
# -
ann_factor = returns.vbt.returns.ann_factor
# Run simulation using a custom order function
srb_pf = vbt.Portfolio.from_order_func(
price,
order_func_nb,
pre_sim_func_nb=pre_sim_func_nb,
pre_sim_args=(30,),
pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(find_weights_nb, -1, ann_factor, num_tests, srb_sharpe),
cash_sharing=True,
group_by=True
)
# Plot best Sharpe ratio at each rebalancing day
pd.Series(srb_sharpe, index=price.index).vbt.scatterplot(trace_kwargs=dict(mode='markers')).show_svg()
print(srb_pf.stats())
plot_allocation(srb_pf)
# You can see how weights stabilize themselves with growing data.
# +
# Run simulation, but now consider only the last 252 days of data
srb252_sharpe = np.full(price.shape[0], np.nan)
srb252_pf = vbt.Portfolio.from_order_func(
price,
order_func_nb,
pre_sim_func_nb=pre_sim_func_nb,
pre_sim_args=(30,),
pre_segment_func_nb=pre_segment_func_nb,
pre_segment_args=(find_weights_nb, 252, ann_factor, num_tests, srb252_sharpe),
cash_sharing=True,
group_by=True
)
# -
pd.Series(srb252_sharpe, index=price.index).vbt.scatterplot(trace_kwargs=dict(mode='markers')).show_svg()
print(srb252_pf.stats())
plot_allocation(srb252_pf)
# A much more volatile weight distribution.
# ## PyPortfolioOpt + vectorbt
# ### One-time allocation
# +
# Calculate expected returns and sample covariance amtrix
avg_returns = expected_returns.mean_historical_return(price)
cov_mat = risk_models.sample_cov(price)
# Get weights maximizing the Sharpe ratio
ef = EfficientFrontier(avg_returns, cov_mat)
weights = ef.max_sharpe()
clean_weights = ef.clean_weights()
pyopt_weights = np.array([clean_weights[symbol] for symbol in symbols])
print(pyopt_weights)
# +
pyopt_size = np.full_like(price, np.nan)
pyopt_size[0, :] = pyopt_weights # allocate at first timestamp, do nothing afterwards
print(pyopt_size.shape)
# +
# Run simulation with weights from PyPortfolioOpt
pyopt_pf = vbt.Portfolio.from_orders(
close=price,
size=pyopt_size,
size_type='targetpercent',
group_by=True,
cash_sharing=True
)
print(len(pyopt_pf.orders))
# -
# Faster than stacking solution, but doesn't let you compare weights.
print(pyopt_pf.stats())
# ### Search and rebalance monthly
# You can't use third-party optimization packages within Numba (yet).
#
# Here you have two choices:
# 1) Use `os.environ['NUMBA_DISABLE_JIT'] = '1'` before all imports to disable Numba completely
# 2) Disable Numba for the function, but also for every other function in the stack that calls it
#
# We will demonstrate the second option.
def pyopt_find_weights(sc, price, num_tests): # no @njit decorator = it's a pure Python function
# Calculate expected returns and sample covariance matrix
price = pd.DataFrame(price, columns=symbols)
avg_returns = expected_returns.mean_historical_return(price)
cov_mat = risk_models.sample_cov(price)
# Get weights maximizing the Sharpe ratio
ef = EfficientFrontier(avg_returns, cov_mat)
weights = ef.max_sharpe()
clean_weights = ef.clean_weights()
weights = np.array([clean_weights[symbol] for symbol in symbols])
best_sharpe_ratio = base_optimizer.portfolio_performance(weights, avg_returns, cov_mat)[2]
return best_sharpe_ratio, weights
# +
pyopt_srb_sharpe = np.full(price.shape[0], np.nan)
# Run simulation with a custom order function
pyopt_srb_pf = vbt.Portfolio.from_order_func(
price,
order_func_nb,
pre_sim_func_nb=pre_sim_func_nb,
pre_sim_args=(30,),
pre_segment_func_nb=pre_segment_func_nb.py_func, # run pre_segment_func_nb as pure Python function
pre_segment_args=(pyopt_find_weights, -1, ann_factor, num_tests, pyopt_srb_sharpe),
cash_sharing=True,
group_by=True,
use_numba=False # run simulate_nb as pure Python function
)
# -
pd.Series(pyopt_srb_sharpe, index=price.index).vbt.scatterplot(trace_kwargs=dict(mode='markers')).show_svg()
print(pyopt_srb_pf.stats())
plot_allocation(pyopt_srb_pf)
| examples/PortfolioOptimization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="FbSC8fnUdRPE"
# # Demonstrating PERCIVAL
# + [markdown] colab_type="text" id="x6smWODqLlw5"
# See "Learning Bayes' theorem with a neural network for gravitational-wave inference" by <NAME> and <NAME> ([arXiv:1904.05355](http://www.arxiv.org/abs/1904.05355)).
# + [markdown] colab_type="text" id="HY8JGQundYdj"
# *<NAME>, 9/23/2019*
# + [markdown] colab_type="text" id="BZfUerCfLgba"
# ## Install
# + [markdown] colab_type="text" id="tjaNMYo9LwmN"
# Install the `TrueBayes` Python package from [source on GitHub](https://github.com/vallis/truebayes).
# + colab={"base_uri": "https://localhost:8080/", "height": 203} colab_type="code" id="OkptpO2kLlw8" outputId="cf0dfa08-859c-40dc-b57a-775f7d95ee41"
# !pip install --upgrade git+https://github.com/vallis/TrueBayes.git
# + [markdown] colab_type="text" id="4GDEWFJXLite"
# ## Imports
# + [markdown] colab_type="text" id="C23xM7gRL7U-"
# Just standard packages in the Python data-science stack, plus [PyTorch](https://pytorch.org) and our own `TrueBayes`. BTW, if you are on Google Colaboratory, be sure to run this notebook with a GPU-enabled instance.
# + colab={} colab_type="code" id="d-N1CC4XNw_Q"
import os
import math
# + colab={} colab_type="code" id="2SHx8o9uNpfY"
import numpy as np
import matplotlib.pyplot as pp
# %matplotlib inline
# + colab={} colab_type="code" id="aWzBmV_gTKFP"
import torch
# + colab={} colab_type="code" id="ePrSDdRWT2eT"
import truebayes.network
import truebayes.geometry
import truebayes.roman
import truebayes.loss
import truebayes.like
import truebayes.plot
# + [markdown] colab_type="text" id="o0lDYgLTNzjy"
# ## Source-parameter posterior inference for a 3D template family (chirp mass, mass ratio, SNR)
# + [markdown] colab_type="text" id="gzeaJ5_RSqHv"
# ### Set up networks
# + [markdown] colab_type="text" id="2sG5-whRM_H4"
# The PERCIVAL network consists of the input layer for the 482 ROMAN weights (241 complex numbers); of 8 1024-wide hidden layers; and of an output layer describing the six parameters of a two-dimensional joint normal distribution. By default, we work in single precision; `softmax=True` is required for Gaussian-mixture posteriors.
# + colab={} colab_type="code" id="sdVaa05_TGbf"
Net_roman_G2 = truebayes.network.makenet([241*2] + [1024] * 8 + [1*6], softmax=False)
nrg2 = Net_roman_G2()
# + [markdown] colab_type="text" id="iCoG3n6tP9Xl"
# Here is how we train this network. We set up a training-set *factory* over a rectangular Mc-nu region, uniformly distributed in SNR. This factory will produce batches of 100000 signal + noise ROMAN coefficients, with the corresponding "true" parameters. Noise is normalized (set to 1), as appropriate for training with noisy signals.
#
# Specifying `varx` as `['Mc','nu']` returns a training set appropriate for 1D training.
# + colab={} colab_type="code" id="OkNl-3-qOKlr"
trainingset = lambda: truebayes.roman.syntrain(snr=[8,16], size=100000, varx=['Mc','nu'], region=[[0.26,0.47], [0.2,0.25]])
# + [markdown] colab_type="text" id="mmLMQV9uPXAx"
# As an example, we train over 100 iterations (1e6 total signals), which takes a few minutes on Google Colab's K80 Tesla GPU. You'd need a factor of 10 or 100 more to achieve a sufficiently trained network.
# + colab={"base_uri": "https://localhost:8080/", "height": 137} colab_type="code" id="i2o_uiztODA5" outputId="c1287aa6-e78d-4075-9347-3f69fcba9950"
# %%time
truebayes.roman.syntrainer(nrg2, trainingset, lossfunction=truebayes.loss.kllossGn2,
iterations=100, initstep=1e-4, finalv=1e-8)
# + [markdown] colab_type="text" id="P6HsUolUQAZC"
# To demonstrate PERCIVAL in inference mode without you having to train it, we're including a trained network in the `TrueBayes` package. After loading the network we set it into inference model to skip all the PyTorch back-propagation accounting.
#
# + colab={} colab_type="code" id="uthPs0ccNUJd"
nrg2.load_state_dict(torch.load(truebayes.roman.datadir + 'percival/Mc-nu_l1024x8_g1_SNR8-16_2d.pt'))
nrg2.eval();
# + [markdown] colab_type="text" id="t3ycE_Q2TbSL"
# ### Setup test examples and template factories
# + [markdown] colab_type="text" id="Q_-hZcPDTnG5"
# To test our network, we need more examples (let's say 5000), drawn from the same parameter region where we have trained it. We get them with another factory. (Note that we set `varall=True`, to save all source parameters. Below we grab those into arrays.)
# + colab={} colab_type="code" id="e1TTIUkYUI0V"
mutest = truebayes.roman.syntrain(snr=[8,16], size=5000, varx='Mc', region=[[0.26,0.47], [0.2,0.25]], noise=1, varall=True, seed=2)
# + colab={} colab_type="code" id="0Nh_F4i6XjLn"
Mc = mutest[0][:,0]
nu = mutest[0][:,1]
snr = mutest[0][:,4]
# we will be sorting our plots by chirp mass
idx = np.argsort(Mc)
# + [markdown] colab_type="text" id="8UUlvVB2TaI2"
# We need more factories to compare the output of our network with actual posteriors. These draw parameters and make signals in the same parameter region, but without noise.
# + colab={} colab_type="code" id="9we6HUsgWPJa"
likeset_Mc = lambda: truebayes.roman.syntrain(snr=[8,16], size=100000, varx='Mc', region=[[0.26,0.47], [0.2,0.25]], noise=0)
# + colab={} colab_type="code" id="m_cRjMJvF8VJ"
likeset_nu = lambda: truebayes.roman.syntrain(snr=[8,16], size=100000, varx='nu', region=[[0.26,0.47], [0.2,0.25]], noise=0)
# + colab={} colab_type="code" id="Flo5pdBFNnmf"
likeset_2 = lambda: truebayes.roman.syntrain(snr=[8,16], size=100000, varx=['Mc','nu'], region=[[0.26,0.47], [0.2,0.25]], noise=0)
# + [markdown] colab_type="text" id="0CBGp7FUKcu4"
# ### Plot 1-D histograms from 1-D and 2-D networks
# + [markdown] colab_type="text" id="EB11T96uXrWc"
# Evaluate 1D posterior histograms (using the actual likelihood, not the network) for the first 24 signals in the test set.
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="ejxRUjsXU5Be" outputId="7ea7e9f5-f36d-4c5e-d46e-18b08150723d"
# %%time
sl_Mc = truebayes.like.synlike(mutest[2][:24,:], likeset_Mc, iterations=10000000)
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="cj8SFtzfbFbm" outputId="eaf1b2db-afb4-4a89-c186-6c5fa35a65b8"
# %%time
sl_nu = truebayes.like.synlike(mutest[2][:24,:], likeset_nu, iterations=10000000)
# + [markdown] colab_type="text" id="jNZbPLa5VJuZ"
# Compare likelihood- and network-based histograms for chirp mass and mass ratio.
# + colab={"base_uri": "https://localhost:8080/", "height": 506} colab_type="code" id="LlfuvkRmdHYW" outputId="913cf051-a7e6-42a8-9bed-7f2a0ec44925"
truebayes.plot.plotgauss(*mutest, net=nrg2, varx='Mc', like=sl_Mc, twodim=True, istart=6)
# + colab={"base_uri": "https://localhost:8080/", "height": 506} colab_type="code" id="hIPe7YB9baqk" outputId="2f2ddb5a-ec3c-4e99-9bee-7ce50c5d6be1"
truebayes.plot.plotgauss(*mutest, net=nrg2, varx='nu', like=sl_nu, twodim=True, istart=6)
# + [markdown] colab_type="text" id="G1J21C9Aoux2"
# ### Plot 2-D histogram from 2-D network
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="T-hoysPRozhy" outputId="8839a3c9-63b5-4c34-ac92-f183ad9f0d98"
# %%time
sl2 = truebayes.like.synlike(mutest[2][:24,:], likeset_2, iterations=10000000)
# + colab={"base_uri": "https://localhost:8080/", "height": 506} colab_type="code" id="mH62Ng_iZt2V" outputId="94f49db6-1a44-4ccf-e971-699a624239b6"
truebayes.plot.makecontour(*mutest, net=nrg2, like=sl2, istart=6)
# + [markdown] colab_type="text" id="yz6q7djCU7LS"
# ### Compare 1D and 2D means and variances.
# + [markdown] colab_type="text" id="J19FXL0LXFTq"
# We use the training-set factories defined above to compute likelihood-based conditional means and std deviations.
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="FKjsRxUhfS_q" outputId="9827beff-6dc0-4e1d-8fc6-d73feab48a02"
# %%time
lm, le = truebayes.like.synmean(mutest[2], likeset_Mc, iterations=10000000)
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="vEZX_GIHWAQr" outputId="603b0bb3-c4a0-490e-a771-0303e6a7dd09"
# %%time
lm_nu, le_nu = truebayes.like.synmean(mutest[2], likeset_nu, iterations=10000000)
# + colab={"base_uri": "https://localhost:8080/", "height": 71} colab_type="code" id="s012pgi0XDXO" outputId="34018f35-1ca8-4e6c-981c-79730426fcbb"
# %%time
lm2, le2, lc2 = truebayes.like.synmean(mutest[2], likeset_2, iterations=10000000)
# + [markdown] colab_type="text" id="mjYaLBTiXL0z"
# By contrast, these are the predictions of the PERCIVAL network.
# + colab={} colab_type="code" id="_-aieaOfXPf_"
nm2, ne2, nc2 = truebayes.loss.netmeanGn2(mutest[2], net=nrg2)
# + [markdown] colab_type="text" id="ryI2dBPSXVt9"
# Let's plot! We start with the differences between the predicted and actual conditional means.
# + colab={"base_uri": "https://localhost:8080/", "height": 389} colab_type="code" id="6lE9MH7A03ha" outputId="bcc01069-10b9-4d0f-9862-4e9917609523"
pp.figure(figsize=(12,5))
idx = np.argsort(Mc)
pp.subplot(1,2,1)
pp.plot(Mc[idx], nm2[idx,0] - lm2[idx,0], 'x', label='net2D - like', alpha=0.2)
pp.xlabel('Mc'); pp.ylabel('delta Mc'); pp.legend()
pp.axis(ymin=-0.1,ymax=0.1)
pp.subplot(1,2,2)
pp.plot(Mc[idx], nm2[idx,1] - lm2[idx,1], 'x', label='net2D - like', alpha=0.2)
pp.xlabel('Mc'); pp.ylabel('delta nu'); pp.legend()
pp.axis(ymin=-0.4,ymax=0.4)
pp.tight_layout()
# + [markdown] colab_type="text" id="1hfr1F4rWVV6"
# We continue with the fractional error in the predicted standard deviations vs. the actual values.
# + colab={"base_uri": "https://localhost:8080/", "height": 389} colab_type="code" id="vhAMDHv51MRt" outputId="007ddc42-86a4-4adf-f840-9ebf42bc2bf7"
pp.figure(figsize=(12,5))
idx = np.argsort(Mc)
pp.subplot(1,2,1)
pp.plot(Mc[idx], (ne2[idx,0] - le2[idx,0])/le2[idx,0], 'x', label='net2D - like', alpha=0.2)
pp.xlabel('Mc'); pp.ylabel('frac delta stderr Mc'); pp.legend(); pp.axis(ymax=4)
pp.subplot(1,2,2)
pp.plot(Mc[idx], (ne2[idx,1] - le2[idx,1])/le2[idx,1], 'x', label='net2D - like', alpha=0.2)
pp.xlabel('Mc'); pp.ylabel('frac delta stderr nu'); pp.legend(); pp.axis(ymax=4)
pp.tight_layout()
# + [markdown] colab_type="text" id="JXCquLwyW2rt"
# Last, we compare correlation values.
# + colab={"base_uri": "https://localhost:8080/", "height": 320} colab_type="code" id="91ZhUOzmZ5i0" outputId="2ba362ac-a877-4954-8740-92495d78c8e5"
ncov = nc2 / ne2[:,0] / ne2[:,1]
lcov = lc2 / le2[:,0] / le2[:,1]
pp.plot(Mc[idx], ncov[idx] - lcov[idx], '.', label='net2D - like', alpha=0.2)
pp.xlabel('Mc'); pp.ylabel('frac delta cov Mc eta'); pp.legend(); pp.axis(ymin=-0.5, ymax=0.5)
| notebooks/Percival.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <div class="contentcontainer med left" style="margin-left: -50px;">
# <dl class="dl-horizontal">
# <dt>Title</dt> <dd> HeatMap Element (radial) </dd>
# <dt>Dependencies</dt> <dd>Matplotlib</dd>
# <dt>Backends</dt> <dd><a href='./RadialHeatMap.ipynb'>Matplotlib</a></dd> <dd><a href='../bokeh/RadialHeatMap.ipynb'>Bokeh</a></dd>
# </dl>
# </div>
# +
import numpy as np
import pandas as pd
import holoviews as hv
from holoviews import opts
hv.extension('matplotlib')
hv.output(fig='svg')
opts.defaults(opts.HeatMap(fig_size=250))
# -
# A radial ``HeatMap`` is well suited to discover **periodic patterns** and **trends** in **time series** data and other cyclic variables. A radial HeatMap can be plotted simply by activating the ``radial`` plot option on the ``HeatMap`` element.
#
# Here we will create a synthetic dataset of a value varying by the hour of the day and day of the week:
# +
days = 31
hours = 24
size = days*hours
def generate_hourly_periodic_data(x):
periodic_weekly = np.sin(x*2*np.pi / (24*7))
periodic_daily = np.sin(x*2*np.pi / 24)
noise = np.random.random(size=x.size)
return periodic_weekly + periodic_daily + noise
x = np.linspace(0, size, size)
y = generate_hourly_periodic_data(x)
date_index = pd.date_range(start="2017-10-01", freq="h", periods=size)
kdim_segment = date_index.strftime("%H:%M")
kdim_annular = date_index.strftime("%A %d")
df = pd.DataFrame({"values": y, "hour": kdim_segment, "day": kdim_annular}, index=date_index)
# -
# As with a regular ``HeatMap`` the data should consist of two index variables or key dimensions and one or more value dimensions. Here we declare the 'hour' and 'day' as the key dimensions. For a radial HeatMap to make sense the first key dimension, which will correspond to the radial axis, should be periodic. Here the variable is 'hour', starting at midnight at the top:
hv.HeatMap(df, ["hour", "day"]).opts(radial=True)
# The resulting plot is quite bare so we may want to customize it, there are a number of things we can do to make the plot clearer:
#
# 1. Increase the inner padding with the ``radius_inner`` option.
# 2. Increase the number of ticks along the radial axis using ``xticks``
# 3. Add radial separator marks with the ``xmarks`` option.
# 4. Change the colormap using the ``cmap`` style option.
hv.HeatMap(df, ["hour", "day"]).opts(
opts.HeatMap(cmap='viridis', radial=True, radius_inner=0.2, xmarks=8, xticks=8, ymarks=4))
| examples/reference/elements/matplotlib/RadialHeatMap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# The turtle Module
import turtle
# +
bob = turtle.Turtle()
# For loop
for i in range(4):
bob.fd(100) # move forward
bob.lt(90) # turn left
turtle.home()
# +
# Encapsulation of the for loop
def square(t,sz):
for i in range(4):
t.fd(sz)
t.lt(90)
square(bob,100)
alice = turtle.Turtle()
square(alice,200)
# +
# Generalization
def polygon(t, n, length):
angle = 360 / n
for i in range(n):
t.fd(length)
t.lt(angle)
# polygon(bob, 7, 70) # this would work but it's better to do ...
polygon(bob, n=7, length=70) # i.e. use 'keyword arguments'
# +
# Interface Design
import math
def circle_unclean(t, r):
circumference = 2 * math.pi * r
n = 50
length = circumference / n
polygon(t, n, length)
def circle_clean(t, r):
circumference = 2 * math.pi * r
n = int(circumference / 3) + 1
length = circumference / n
polygon(t, n, length)
# circle_clean has a cleaner interface because there,
# the user doesn't need to understand the implementation details so
# he could modify n (reduce for small circles and increase for large)
circle_clean(alice,200)
# +
# Refactoring (READ THE TEXT, THIS IS A GOOD CONCEPT)
# We can't use polygon or circle to draw an arc.
# So, we step back on our definition of polygon
# and instead define polyline as the actual starting point
# to derive polygon from
def polyline(t, n, length, angle):
for i in range(n):
t.fd(length)
t.lt(angle)
# Rewrite polygon
def polygon(t, n, length):
angle = 360.0 / n
polyline(t, n, length, angle)
# Write arc
def arc(t, r, angle):
arc_length = 2 * math.pi * r * angle / 360
n = int(arc_length / 3) + 1
step_length = arc_length / n
step_angle = float(angle) / n
polyline(t, n, step_length, step_angle)
# Rewrite circle
def circle(t, r):
arc(t, r, 360)
# This process—rearranging a program to improve interfaces
# and facilitate code reuse—is called refactoring.
# In this case, we noticed that there was similar code in arc
# and polygon, so we “factored it out” into polyline.
# -
turtle.done() # lets us close the Python Turtle Graphics window
# +
# A Development Plan
# 1. Write simple code: each functionality in baby steps
# 2. Encapsulate coherent pieces into a function with suitable name
# 3. Generalize the function by adding parameters
# 4. Repeat 1-3 to obtain segmented code
# 5. Look for opportunities to refactor similar parts of the code
# +
# docstring: An important part of interface design
def polyline_with_documentation(t, n, length, angle):
"""Draws n line segments with the given length and
angle (in degrees) between them. t is a turtle.
"""
for i in range(n):
t.fd(length)
t.lt(angle)
# Always check the doc strings of functions.
import numpy
# numpy.median?
# Or just visit: https://numpy.org/doc/stable/reference/generated/numpy.median.html
# +
# Debugging
# the contract: the caller agrees to provide certain parameters
# and the function agrees to do certain work
# preconditions: polyline requires that t has to be a Turtle etc.
# postconditions: the function's responsibilities e.g. draw lines
# +
# #%run -i 'mypolygon.py'
#turtle.done()
| ThinkPython2e/Chapter4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="L3KIZndDdgt4" colab_type="code" outputId="4a7c5d11-0d13-4523-ca40-b28330450912" colab={"base_uri": "https://localhost:8080/", "height": 151}
# !pip install mxnet-cu100
# + [markdown] id="1YN4PT_Idstj" colab_type="text"
# #MXNet (Using Gluon)
# + id="CLGHa8i6WEaA" colab_type="code" colab={}
import mxnet as mx
from mxnet import nd, autograd, gluon
from mxnet.gluon import nn
import matplotlib.pyplot as plt
# + [markdown] id="YT12DVVY--hJ" colab_type="text"
# ##First we need to understand how to do convolution in gluon
# + [markdown] id="XSDMJUkd_UsJ" colab_type="text"
# ####Getting a sample image from web
# + id="82UQ60sO_Fbb" colab_type="code" outputId="1170c6c5-80fc-4595-f1a5-dc1fbf0cff36" colab={"base_uri": "https://localhost:8080/", "height": 34}
import urllib.request
urllib.request.urlretrieve('https://i.stack.imgur.com/B2DBy.jpg', 'B2DBy.jpg')
# + id="z_t0Blm529jz" colab_type="code" outputId="782d1a1c-25ca-4f7b-c0c5-8d4802603b6d" colab={"base_uri": "https://localhost:8080/", "height": 319}
image = mx.image.imread('B2DBy.jpg', flag = 0).astype('float32').as_in_context(mx.gpu())
# flag = 0 to get grayscale image with channel 1, flag = 1 to get coloured image with channel 3
# this time we are not using .flatten() to load the image,
# because we need number of channels (1 in this case) in order to feed the image to gluon cnn layer.
plt.imshow(image.flatten().asnumpy(), cmap = 'gray')
plt.show()
print(image.shape)
image = nd.moveaxis(image, 2, 0)
# converting image shape to channel first format,
# because gluon cnn works much faster with this layout,
# so by default mxnet gluon uses layout = "NCHW" (No of images, No of of channels, Hight, Width)
# Althout you can set layout to "NHWC" if you want to feed image with channel last layout
print(image.shape)
image = image.expand_dims(0)
# expand_dims adds 1 more dimention to the provide axis.
# SO WHY ARE WE DOING THAT??
# Because gluon layers takes argument with shape (batch_size, *shape of image(in Either NCHW(which is default) or NHWC(which you need to set))).
# and in our case batch_size is 1(as we have only one image for this example)
print(image.shape)
# + id="aZjgJoGI_w2H" colab_type="code" colab={}
conv = nn.Conv2D(channels = 1, kernel_size = (3, 3))
conv.initialize(mx.init.Xavier(), ctx = mx.gpu())
# channels = 1 means that we would like to get channel 1 for output.
# so output shape would be like (batch_size, channels, hight, width)
# think of channels as no of kernels we want to have. (yes, no of kernels determines how many channels next layer would have.)
# for n no of kernels we would have n no of channels in next layer
# + [markdown] id="_i7JNHbWCT1b" colab_type="text"
# ###Now lets run this conv layer on our image
# + id="-yiStPjACSVg" colab_type="code" outputId="0af24fed-8ae6-4428-87a5-75c1fd520150" colab={"base_uri": "https://localhost:8080/", "height": 286}
output = conv(image)
plt.imshow(output[0][0].asnumpy(), cmap = 'gray')
output.shape
# as we passed channels 1 hence we get shape as below. (==, channels, ==, ==)
# + colab_type="code" id="sA_op-XINXiB" colab={}
# now lets try channels 3
conv = nn.Conv2D(channels = 3, kernel_size = (3, 3))
conv.initialize(mx.init.Xavier(), ctx = mx.gpu())
# + id="7leKzKKUNgqX" colab_type="code" outputId="12050cc9-d311-4b00-8957-8ef648e09b34" colab={"base_uri": "https://localhost:8080/", "height": 790}
output = conv(image)
plt.imshow(output[0][0].asnumpy(), cmap = 'gray')
plt.show()
plt.imshow(output[0][1].asnumpy(), cmap = 'gray')
plt.show()
plt.imshow(output[0][2].asnumpy(), cmap = 'gray')
plt.show()
# since we passed 3 channels so it will generate 3 random filters instead of 1.
# and return 3 "convoluted" images. so no of channel output will be 3.
output.shape
# as we passed channels 3 hence we will get 3 channels in output layer.
# + [markdown] id="lwY7lgW9EVqp" colab_type="text"
# ###So now we understand how to create single layer of cnn in gluon, lets create a model filled with stack of cnn layers.
# + [markdown] id="Ul-UtBKWEkug" colab_type="text"
# ###But first lets get our data
# + id="huuK9vLaWEcO" colab_type="code" colab={}
def transform(data, label):
return data.swapaxes(0, 2).astype('float32')/255.0, label.astype('float32')
train_cifar = gluon.data.vision.CIFAR10(train = True, transform = transform)
test_cifar = gluon.data.vision.CIFAR10(train = False, transform = transform)
# + id="qfy3gUtQH0zU" colab_type="code" colab={}
classes = {0: 'airplane', 1: 'automobile', 2: 'bird', 3: 'cat', 4: 'deer', 5: 'dog', 6: 'frog', 7: 'horse', 8: 'ship', 9: 'truck'}
# + id="sAXcT8nSWEYo" colab_type="code" outputId="c8942fa2-a7eb-4e96-e04f-9a1bbf664846" colab={"base_uri": "https://localhost:8080/", "height": 34}
image, label = train_cifar[456]
print(image.shape, label.shape)
# + id="SIQSLxiFWEXg" colab_type="code" outputId="fdbff571-345a-401b-b9cf-f597766b694f" colab={"base_uri": "https://localhost:8080/", "height": 608}
# we need to swap axes because matplotlib accepts image with layout NHWC not NCHW
fig = plt.figure(figsize=(10, 10))
for i in range(16):
fig.add_subplot(4, 4, i + 1)
plt.imshow(train_cifar[i][0].swapaxes(0, 2).asnumpy())
plt.title(classes[train_cifar[i][1]])
plt.axis('off')
plt.show()
# the images might look like bad and blurry, but its perfect for a deep learning models.
# + [markdown] id="oF-WKS-xX73T" colab_type="text"
# ##Creating data loader for training and testing
# + id="IVmMwE94WEWY" colab_type="code" colab={}
batch_size = 64
training_data = gluon.data.DataLoader(train_cifar, batch_size = batch_size, shuffle = True)
testing_data = gluon.data.DataLoader(test_cifar, batch_size = batch_size, shuffle = False)
# + [markdown] id="5HUXvMyHYKoM" colab_type="text"
# ##Building the model
# + id="u7Vdh54QWESq" colab_type="code" colab={}
model = nn.HybridSequential()
model.add(nn.Conv2D(channels = 32, kernel_size = 3, activation = 'relu'))
model.add(nn.AvgPool2D())
model.add(nn.Conv2D(channels = 64, kernel_size = 3, activation = 'relu'))
model.add(nn.AvgPool2D())
model.add(nn.Conv2D(channels = 128, kernel_size = 3, activation = 'relu'))
model.add(nn.AvgPool2D())
model.add(nn.Flatten())
model.add(nn.Dense(256, 'relu'))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(128, 'relu'))
model.add(nn.Dropout(0.5))
model.add(nn.Dense(10))
model.initialize(mx.init.Xavier(), mx.gpu())
model.hybridize()
# + id="isGaPKDajr_s" colab_type="code" outputId="995cbfd9-aa6d-4a27-e2b6-bded4759821b" colab={"base_uri": "https://localhost:8080/", "height": 252}
model
# + id="a909gMC5jXqe" colab_type="code" colab={}
model(image.expand_dims(0).as_in_context(mx.gpu()))
# + [markdown] id="HW4Jzb46EyyD" colab_type="text"
# ##Defining our objective function
# + id="avO1uf5fItMV" colab_type="code" colab={}
objective = gluon.loss.SoftmaxCrossEntropyLoss()
# + [markdown] id="3qr1NTDkE7Kh" colab_type="text"
# ##Defining our parameters optimizer
# + id="hvFKa4WuI9c-" colab_type="code" colab={}
optimizer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': 0.001})
# + [markdown] id="lbHoiGM3ZrQY" colab_type="text"
# ##Defining our accuracy function
# + id="-3osTW97ZuHR" colab_type="code" colab={}
metric = mx.metric.Accuracy()
# + [markdown] id="UlkEn5XxFPkd" colab_type="text"
# ##Training the model
# + id="kVao6lbWCizF" colab_type="code" outputId="451605b6-5600-4768-d1ca-b372ff55930b" colab={"base_uri": "https://localhost:8080/", "height": 370}
epochs = 10
losses = []
accs = []
import time
start = time.time()
for epoch in range(epochs):
batch_begin = time.time()
cumulative_loss = 0
metric.reset()
for batches, (features, labels) in enumerate(training_data, 1):
features = features.as_in_context(mx.gpu())
labels = labels.as_in_context(mx.gpu())
with autograd.record():
output = model(features)
loss = objective(output, labels)
loss.backward()
optimizer.step(batch_size)
cumulative_loss += loss.mean()
metric.update(labels, output)
acc = metric.get()[1]
losses.append(cumulative_loss.asscalar())
accs.append(acc)
print(f'Epoch: {epoch} | Loss: {cumulative_loss.asscalar()/(batches):.5f} | Accuracy: {acc:.5f}')
print(f'Took: {time.time() - batch_begin}')
print(f'Total time taken: {time.time() - start}')
# + [markdown] id="JJOc7DOepwEE" colab_type="text"
# ##Lets see test accuracy
# + id="dvvQC3oQYu5A" colab_type="code" colab={}
metric.reset()
for features, labels in testing_data:
features = features.as_in_context(mx.gpu())
labels = labels.as_in_context(mx.gpu())
predictions = model(features)
metric.update(labels, predictions)
print(f'Test Accuracy: {metric.get()[1]:.5f}')
# + id="hIBgjWVBYu3t" colab_type="code" colab={}
def plot(losses = losses, accs = accs, model = model):
plt.plot(losses, 'r')
plt.title('Loss during Training')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show()
plt.plot(accs, 'g')
plt.title('Accuracy during Training')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.show()
fig = plt.figure(figsize = (15, 15))
for i in range(16):
features, labels = test_cifar[i]
features = features.as_in_context(mx.gpu()).expand_dims(0)
pred = model(features).argmax(1).asscalar()
fig.add_subplot(4, 4, i + 1)
plt.imshow(features[0].swapaxes(0, 2).asnumpy())
plt.title(f'Target: {classes[labels]}, Predicted: {classes[pred]}')
plt.axis('off')
plt.show()
# + id="mkorWu1EYu2b" colab_type="code" colab={}
plot()
| MXNet Gluon Implementations/09. Convolutional_Neural_Networks_MXNet_(Using_Gluon).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/deepakims/fastai/blob/master/AI6_Python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="3wM-evXV-n_R" colab_type="text"
# # StackOverflow :- So you want to write *_Pythonic code_* in *_Python_*
# ## Learn These
# 1. Lambda and Comphresions
# 2. Decorators
# 3. Context Managers
# 4. Iterators and Generators
# 6. Functools and Itertools
# 5. Data Classes
# 7. Some Sweet Python tips and tricks
# + [markdown] id="SFNSd1dc-n_c" colab_type="text"
#
# + [markdown] id="fF8FKHcy-n_g" colab_type="text"
# # _Lambda and Comphresions_
# + [markdown] id="oAHs9rP9-n_k" colab_type="text"
# #### Functions in python are first class Objects that means you can assign them to variable, store them in data structure, pass them as a parameter to other functions and even return them from other function
# + id="NaYY3Hdj-n_m" colab_type="code" outputId="68658a99-bb37-4ef8-9356-d7c5ee9a17fe" colab={}
# addition function
def add(x, y):
return x+y
print (f" function is add(5,6) = {add(5,6)}")
# you can assign them to other variable
myAdd = add
# wait you can also delete the add function and the myAdd still points to underlying function
del add
print (f" function is myAdd(5,6) = {myAdd(5,6)}")
#functions have their own set of attributes
print(f"{myAdd.__name__}")
# to see a complete list of attributes of a function type dir(myAdd) in console
# + id="4Urg-3hX-n_1" colab_type="code" outputId="125885c7-8c69-4c11-cb1c-87547d3783d1" colab={}
# functions as data structures
List_Funcs = [str.upper , str.lower , str.title]
for f in List_Funcs:
print (f , f("aI6-saturdays"))
# + [markdown] id="N86aCwnE-n_8" colab_type="text"
# ### So lambdas are a sweet Little anonymous Single-Expression functions
# + id="N6Gwkv0o-n__" colab_type="code" outputId="b873ed6f-f151-48eb-ce6a-72175984d8d9" colab={}
add_lambda = lambda x , y : x+y # lambda automaticaly returns the value after colon
print(f"lambda value add_lambda(2,3)= {add_lambda(2,3)}") #you call lambda function as normal functions
# + [markdown] id="VwjnJw1g-oAG" colab_type="text"
# ## You :- But Wait it's an anonymous function and how can you give it a name
# ## StackOverflow :- Relax, Searching for another example
# + id="bteU2cL0-oAI" colab_type="code" outputId="967335bc-ef73-463b-a4bb-f58652f6461c" colab={}
def someFunc(func):
quote = func("We will democratize AI ")
return quote
# here the lambda function is passes to a normal function
# the lambda here is anonymous and the parameter my_sentence = We will democratize AI
# so we are adding some text of ours and returning the string
someFunc(lambda my_sentence: my_sentence+"by teaching everyone AI")
# + id="vvjOqLk4-oAU" colab_type="code" colab={}
# here is one more example
tuples = [(1, 'd'), (2, 'b'), (4, 'a'), (3, 'c')]
sorted(tuples, key=lambda x: x[1])
# + id="bJsO9ehE-oAa" colab_type="code" outputId="0e9e542d-50aa-45e7-cc91-7ef3748e7519" colab={}
# list comphrensions
l = [ x for x in range(20)]
even_list = [x for x in l if x%2==0]
even_list_with_Zero = [x if x%2==0 else 0 for x in l ]
l , even_list ,even_list_with_Zero
# + id="_g1c3N1l-oAg" colab_type="code" outputId="716f8319-8208-43b6-d94b-271dbba74313" colab={}
# dictionary comphrension
d = {x: x**2 for x in range(2,6)}
flip_key_value = {value:key for key,value in d.items()}
d , flip_key_value
# + [markdown] id="kLfBxjMg-oAl" colab_type="text"
# # Decorators
# Python’s decorators allow you to extend and modify the
# behavior of a callable (functions, methods, and classes) without permanently modifying the callable itself.
#
# Any sufficiently generic functionality you can tack on to an existing
# class or function’s behavior makes a great use case for decoration.
# This includes the following:
#
# 1. logging
# 2. enforcing access control and authentication
# 3. instrumentation and timing functions
# 4. rate-limiting
# 5. caching, and more
#
# Imagine that you some 50 functions in your code. Now that all functions are working you being a great programmer thought of optimizing each function by checking the amount of time it takes and also you need to log the input/output of few functions. what are you gonna do ?
#
# Without decorators you might be spending the next three days modifying each of those 50 functions and clutter them up with your manual logging calls. Fun times, right?
#
# + id="76s-SiLw-oAm" colab_type="code" outputId="c0e3e099-7e40-4f97-d49e-afe3876fea5d" colab={}
def my_decorator(func):
return func # It's simple right it takes a function as it's parameter and returns it
def someFunc():
return "Deep learning is fun"
someFunc = my_decorator(someFunc) # it is similar to i = i + 1
print(f" someFunc value = {someFunc()}")
# + id="cfBvsw3V-oAr" colab_type="code" outputId="1aaa6ff4-9424-4f12-8089-86b476cfa803" colab={}
# now just to add syntatic sugar to the code so that we can brag how easy and terse python code is
# we gonna write this way
def my_decorator(func):
return func
@my_decorator # the awesomeness of this block of code lies here which can be used as toggle switch
def someFunc():
return "Deep learning is fun"
print(f" someFunc value = {someFunc()}")
# + [markdown] id="3A_eLkId-oAx" colab_type="text"
# ## Stackoverflow :- Now that you got a little taste of Decorators let's write another decorator that actually does something and modifies the behavior of the decorated function.
# + id="bsE_eQh7-oAy" colab_type="code" outputId="edf3b48a-ab0f-4607-8860-566de7e3dd25" colab={}
# This blocks contains and actual implementation of decorator
import time
#import functools
def myTimeItDeco(func):
#@functools.wraps(func)
def wrapper(*args,**kwargs):
starttime = time.time()
call_of_func = func(*args,**kwargs) # this works because you can function can be nested and they remember the state
function_modification = call_of_func.upper()
endtime = time.time()
return f" Executed output is {function_modification} and time is {endtime-starttime} "
return wrapper
@myTimeItDeco
def myFunc(arg1,arg2,arg3): # some arguments of no use to show how to pass them in code
"""Documentation of a obfuscate function"""
time.sleep(2) # just to show some complex calculation
return "You had me at Hello world"
myFunc(1,2,3) , myFunc.__doc__ , myFunc.__name__
# + [markdown] id="XpTKYoF_-oA4" colab_type="text"
# ## You :- Why didn't I got the doc and the name of my function. Hmmm....
# ## StackOverflow :- Great Programmers use me as there debugging tool, so use It.
# Hints functools.wrap
# + [markdown] id="r5p-3VY4-oA5" colab_type="text"
# ### StackOverflow : - Applying Multiple Decorators to a Function (This is really fascinating as it's gonna confuse you)
# + id="0vKAryKF-oA7" colab_type="code" outputId="0d6654c4-3688-4e44-eb41-953b9a7a9722" colab={}
def strong(func):
def wrapper():
return '<strong>' + func() + '</strong>'
return wrapper
def emphasis(func):
def wrapper():
return '<em>' + func() + '</em>'
return wrapper
@strong
@emphasis
def greet():
return 'Hello!'
greet()
# this is your assignment to understand it hints strong(emphasis(greet))()
# + id="UZoXE-fp-oBE" colab_type="code" colab={}
#Disclaimer Execute this at your own risk
#Only 80's kids will remember this
import dis
dis.dis(greet)
# + [markdown] id="cs6RzfZG-oBM" colab_type="text"
# # Context Managers
# + id="_LOsIZVM-oBN" colab_type="code" colab={}
# let's open a file and write some thing into it
file = open('hello.txt', 'w')
try:
file.write('Some thing')
finally:
file.close()
# + [markdown] id="01pf0d8w-oBU" colab_type="text"
# ## You :- ok now that I have wrote something into the file I want to read it, but try and finally again, it suck's. There should be some other way around
#
# ## StackOverflow :- Context manger for your Rescue
# + id="gm0mKzJr-oBX" colab_type="code" outputId="6762ff43-448d-41f3-ed3a-263fc64e1edc" colab={}
with open("hello.txt") as file:
print(file.read())
# + [markdown] id="DpaaZoDP-oBd" colab_type="text"
# ## You :- That's pretty easy but what is *_with_*
# ## Stackoverflow :-
# It helps python programmers like you to simplify some common resource management patterns by abstracting their functionality
# and allowing them to be factored out and reused.
#
# So in this case you don't have to open and close file all done for you automatically.
# + id="Re35stdH-oBe" colab_type="code" colab={}
# A good pattern for with use case is this
some_lock = threading.Lock()
# Harmful:
some_lock.acquire()
try:
# Do something complicated because you are coder and you love to do so ...
finally:
some_lock.release()
# Better :
with some_lock:
# Do something awesome Because you are a Data Scientist...
# + [markdown] id="W5XZgbc0-oBh" colab_type="text"
# ## You :- But I want to use *_with_* for my own use case how do I do it?
#
# ## StackOverflow :- Use Data Models and relax
# + id="BeixqJiM-oBi" colab_type="code" outputId="72b87981-e75d-457b-d83f-7a41c9d6912e" colab={}
# Python is language full of hooks and protocol
# Here MyContextManger abides context manager protocol
class MyContextManger:
def __init__(self, name):
self.name=name
# with statement automatically calls __enter__ and __exit__ methods
def __enter__(self): ## Acquire the lock do the processing in this method
self.f = open(self.name,"r")
return self.f
def __exit__(self,exc_type,exc_val,exc_tb): ## release the lock and free allocated resources in this method
if self.f:
self.f.close()
with MyContextManger("hello.txt") as f:
print(f.read())
# + [markdown] id="l76IHYZ0-oBn" colab_type="text"
# ## You :- It works but what are those parameters in __exit__ method
# ## Stackoverflow :- Google It !
# ## You :- But writing a class in python is hectic, I want to do functional Programming
# ## StackOverflow :- Use Decorators
# + id="te16IbXM-oBp" colab_type="code" outputId="84e8c26d-5ee2-4189-de41-d5cb33b1b8cf" colab={}
from contextlib import contextmanager
@contextmanager
def mySimpleContextManager(name):
try:
f = open(name, 'r')
yield f
finally:
f.close()
with mySimpleContextManager("hello.txt") as f:
print(f.read())
# + [markdown] id="WK_zD0Ru-oBt" colab_type="text"
# ## You :- Ok, that's what I call a pythonic code but what is yeild
# ## Stackoverflow :- Hang On!
# + [markdown] id="xETmxZh6-oBv" colab_type="text"
# # Iterators and Generators
#
# An iterator is an object representing a stream of data; this object returns the data one element at a time. A Python iterator must support a method called __next__() that takes no arguments and always returns the next element of the stream. If there are no more elements in the stream, __next__() must raise the StopIteration exception. Iterators don’t have to be finite, though; it’s perfectly reasonable to write an iterator that produces an infinite stream of data.
#
# The built-in iter() function takes an arbitrary object and tries to return an iterator that will return the object’s contents or elements, raising TypeError if the object doesn’t support iteration. Several of Python’s built-in data types support iteration, the most common being lists and dictionaries. An object is called iterable if you can get an iterator for it.
# + id="YaGmTwXl-oBw" colab_type="code" outputId="8271b224-f7d5-4a35-bfc9-181e4484bcb6" colab={}
l = [1,2,3]
it = l.__iter__() ## same as iter(l)
it.__next__() ## gives 1
next(it) ## gives 2
next(it) ## gives 3
next(it) ## gives error StopIteration
# + id="PxPxG02z-oB1" colab_type="code" outputId="562369b2-fc08-4264-e821-d97e5a86faae" colab={}
#lets replicate the simple range method
class MyRange:
def __init__(self,start,stop):
self.start = start -1
self.stop = stop
def __iter__(self):
return self
def __next__(self):
self.start = self.start+1
if self.start<self.stop:
return self.start
else:
raise StopIteration()
for i in MyRange(2,10):
print(i)
# + [markdown] id="rQ54E8qJ-oB4" colab_type="text"
# ## You :- Again a class
# ## StackOverflow :- OK here's a easy way Use Generators
# They Simplify writing Iterators, kind of iterable you can only iterate over once. Generators do not store the values in memory, they generate the values on the fly so no storage is required.
# So you ask one value it will generate and spit it out
# + id="EHA78gz4-oB5" colab_type="code" outputId="e63e4ab2-011e-489a-bd51-c4c0155a4254" colab={}
def myRange(start,stop):
while True:
if start<stop:
yield start
start = start+1
else:
return
for i in myRange(2,10):
print(i)
# + [markdown] id="8dY6Eky_-oB8" colab_type="text"
# You’re doubtless familiar with how regular function calls work in Python or C. When you call a function, it gets a private namespace where its local variables are created. When the function reaches a return statement, the local variables are destroyed and the value is returned to the caller. A later call to the same function creates a new private namespace and a fresh set of local variables. But, what if the local variables weren’t thrown away on exiting a function? What if you could later resume the function where it left off? This is what generators provide; they can be thought of as resumable functions.
#
# Any function containing a yield keyword is a generator function; this is detected by Python’s bytecode compiler which compiles the function specially as a result.
#
# When you call a generator function, it doesn’t return a single value; instead it returns a generator object that supports the iterator protocol. On executing the yield expression, the generator outputs the value start , similar to a return statement. The big difference between yield and a return statement is that on reaching a yield the generator’s state of execution is suspended and local variables are preserved. On the next call to the generator’s __next__() method, the function will resume executing.
# + id="8bxDmoTc-oB-" colab_type="code" outputId="6ffedd39-bd42-4e5f-c77d-3de3fc3178a1" colab={}
# generator comphresnsive
l = ( x for x in range(20))
l
# + id="IhZly899-oCB" colab_type="code" outputId="81e4e00c-20be-44b5-aa75-98e364a12f3f" colab={}
[*l]
# + [markdown] id="W8OlUd2p-oCG" colab_type="text"
# Let’s look in more detail at built-in functions often used with iterators.
#
# Two of Python’s built-in functions, map() and filter() duplicate the features of generator expressions:
#
# map(f, iterA, iterB, ...) returns an iterator over the sequence
#
# f(iterA[0], iterB[0]), f(iterA[1], iterB[1]), f(iterA[2], iterB[2]), ....
#
# filter(predicate, iter)
# returns an iterator over all the sequence elements that meet a certain condition, and is similarly duplicated by list comprehensions. A predicate is a function that returns the truth value of some condition; for use with filter(), the predicate must take a single value.
# + id="_WA65CNd-oCH" colab_type="code" outputId="7349aac4-1b96-4c12-a9b8-28992896a954" colab={}
# why did it returned an empty list think?
[*map(lambda x :x **2 , l)]
# + id="DXaURGRI-oCM" colab_type="code" outputId="70df2a21-19ff-4726-c40e-e0544d26e83f" colab={}
[*filter(lambda x :x%2!=0,l)]
# + [markdown] id="YzbLjJYR-oCQ" colab_type="text"
# ### zip(iterA, iterB, ...) takes one element from each iterable and returns them in a tuple:
# + id="LhPvvS04-oCS" colab_type="code" outputId="4f17e174-ad4f-48bf-dbac-dcbdba1535c5" colab={}
z = zip(['a', 'b', 'c'], (1, 2, 3))
for x , y in z:
print (x,y)
# + [markdown] id="O9efL6lc-oCV" colab_type="text"
# # Functools and Itertools (This is going to blow your mind)
# + [markdown] id="ghvUljMG-oCX" colab_type="text"
# These two python modules are super helpful in writing Efficient Functional Code
# + id="rk5PgVVi-oCZ" colab_type="code" outputId="aade05f1-cdb9-470d-8f64-a609a298e57e" colab={}
# reduce
from functools import reduce
l = (x for x in range(1,10))
reduce(lambda x,y : x+y , l)
# + [markdown] id="A04HOkpV-oCb" colab_type="text"
# For programs written in a functional style, you’ll sometimes want to construct variants of existing functions that have some of the parameters filled in. Consider a Python function f(a, b, c); you may wish to create a new function g(b, c) that’s equivalent to f(1, b, c); you’re filling in a value for one of f()’s parameters. This is called “partial function application”.
#
# The constructor for partial() takes the arguments (function, arg1, arg2, ..., kwarg1=value1, kwarg2=value2). The resulting object is callable, so you can just call it to invoke function with the filled-in arguments.
# + id="wpta440N-oCc" colab_type="code" outputId="4fc89de8-cd06-46c0-b006-e776835c35af" colab={}
from functools import partial
def log(message, subsystem):
"""Write the contents of 'message' to the specified subsystem."""
print('%s: %s' % (subsystem, message))
...
server_log = partial(log, subsystem='server')
server_log('Unable to open socket')
# + id="aeAa3aXg-oCe" colab_type="code" colab={}
from itertools import islice ,takewhile,dropwhile
# here is a very simple implementation of the fibonacci sequence
def fib(x=0 , y=1):
while True:
yield x
x , y = y , x+y
# + id="VOd3mWVQ-oCm" colab_type="code" outputId="46110a7b-b01e-45b7-c75c-81208be15cc8" colab={}
list(islice(fib(),10))
# + id="03-pjr1M-oCp" colab_type="code" outputId="145a2a9c-5fb7-4825-8bcc-80ef80c66110" colab={}
list(takewhile(lambda x : x < 5 , islice(fib(),10)))
# + id="P0hJVZHX-oCt" colab_type="code" outputId="1bfbcbc1-73cf-4736-ca1d-2a1589a863d2" colab={}
list(dropwhile(lambda x : x < 5 , islice(fib(),10)))
# + id="X9J3SfR2-oCv" colab_type="code" outputId="9f540607-962f-4b5c-d1a9-9f5444b04574" colab={}
list(dropwhile(lambda x :x<11 , takewhile(lambda x : x < 211 , islice(fib(),15))))
# + [markdown] id="U5oCmp3B-oCz" colab_type="text"
# ### To read more about itertools https://docs.python.org/3.6/howto/functional.html#creating-new-iterators
# + [markdown] id="NpTW4w6--oC0" colab_type="text"
# # Some Python Tricks
# + id="3s9akuKw-oC1" colab_type="code" outputId="4442a189-3de0-489e-c145-ed9124310424" colab={}
#normal calculator prorgramm
def calculator(operator , x , y):
if operator=="add":
return x+y
elif operator=="sub":
return x-y
elif operator == "div":
return x/y
elif operator=="mul":
return x*y
else :
return "unknow"
calculator("add",2,3)
# + id="ApyReI6U-oC3" colab_type="code" outputId="dcda17e2-2701-4e4e-a5ee-1a2e11a66e0e" colab={}
#Pythonic way
calculatorDict = {
"add":lambda x,y:x+y,
"sub":lambda x,y:x-y,
"mul":lambda x,y:x*y,
"div":lambda x,y:x/y
}
calculatorDict.get("add",lambda x , y:None)(2,3)
# + id="ZCmP903M-oC7" colab_type="code" outputId="e5fbf574-309d-40bd-cb7d-0e078fd0ae0d" colab={}
# because we are repeating x,y in all lambda so better approach
def calculatorCorrected(operator,x,y):
return {
"add":lambda :x+y,
"sub":lambda :x-y,
"mul":lambda :x*y,
"div":lambda :x/y
}.get(operator , lambda :"None")()
calculatorCorrected("add",2,3)
# + id="w2yGvUOH-oDC" colab_type="code" outputId="1ae36be6-9f4a-49b7-872f-4ad97e769552" colab={}
# How to merge multiple dictionaries
x = {1:2,3:4}
y = {3:5,6:7}
{**x,**y}
# + id="Nc6GjQSM-oDF" colab_type="code" outputId="ea434315-4fff-42f7-e359-be321ab0b7dc" colab={}
# how to merge multiple list you gussed it correct
a = [1,2,3]
b=[2,3,4,5]
[*a,*b]
# + id="lF6whETW-oDI" colab_type="code" outputId="5055e2ec-843c-4d7b-d000-fdf2206e68c2" colab={}
# Named tuple
from collections import namedtuple
from sys import getsizeof
vector = namedtuple("Vector" , ["x","y","z","k"])(11,12,212,343)
vector,vector[0], vector.y # can be accessed lke list and dic
# + id="YdD3x9Ks-oDL" colab_type="code" outputId="bf217749-e2ae-41d4-ee0e-225a9c11b2e2" colab={}
# how to manage a dictionary with count
from collections import Counter
pubg_level3_bag = Counter()
kill = {"kar98":1 , "7.76mm":60}
pubg_level3_bag.update(kill)
print(pubg_level3_bag)
more_kill = {"7.76mm":30 , "scarl":1 , "5.56mm":30}
pubg_level3_bag.update(more_kill)
print(pubg_level3_bag)
# + id="dIISZ6Pj-oDO" colab_type="code" colab={}
# don't remove element from the front of a list in python use instead deque
from collections import deque
# + id="eJpOF_vl-oDR" colab_type="code" colab={}
# for Datastructure with locking functionality use queue module in python
# + id="sWgtaqi2-oDU" colab_type="code" outputId="f726d2dc-8cbd-471a-a6cc-80f406123ce6" colab={}
# how to check if the data structure is iterable
from collections import Iterable
isinstance([1,2,3] , Iterable)
# + [markdown] id="w0DxokDW-oDX" colab_type="text"
# # Refer these To be *GREAT IN PYTHON*
# + [markdown] id="7s0NbCZp-oDX" colab_type="text"
# ### [Python Doc](https://docs.python.org/3/)
#
# ### [Real Python](https://realpython.com/)
#
# ### [Youtube PyData](https://www.youtube.com/user/PyDataTV)
#
# + [markdown] id="PA6OHcU_-oDZ" colab_type="text"
# # THE END
# + id="pHGBokQH-oDZ" colab_type="code" colab={}
| AI6_Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Grid as a PNG
# First you have to import the meteomatics module and the datetime module
# +
import datetime as dt
import meteomatics.api as api
from __future__ import print_function
# -
# Input here your username and password from your meteomatics profile
###Credentials:
username = 'python-community'
password = '<PASSWORD>'
# Input here the name that your png should get.
filename_png = 'grid_temperature_switzerland.png'
# Input here the date and time you want to get as a datetime object.
startdate_png = dt.datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
# Choose the parameter you want to get. You can only chose one parameter at a time. Check here which parameters are available: https://www.meteomatics.com/en/api/available-parameters/
parameter_png = 't_2m:C'
# Input here the limiting coordinates of the extract you want to look at. You can also change the resolution.
lat_N = 49
lon_W = 5.5
lat_S = 44
lon_E = 11
res_lat = 0.01
res_lon = 0.01
# In the following, the request will start. If there is an error in the request as for example a wrong parameter or a date that doesn't exist, you get a message.
print("grid as a png:")
try:
api.query_grid_png(filename_png, startdate_png, parameter_png, lat_N, lon_W, lat_S, lon_E, res_lat, res_lon,
username, password)
print("filename = {}".format(filename_png))
except Exception as e:
print("Failed, the exception is {}".format(e))
# Your png file is now saved with the assigned name.
# If the picture is too small, make the resolution smaller. Hence, there are more points on the same distance and the picture gets bigger, but it also takes more time to generate the picture.
#
# 
| examples/notebooks/05_Grid_as_a_png.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2 with Spark 1.6
# language: python
# name: python2
# ---
# ## Overview
#
# This notebook loads the movie rating data from DSX's local storage then it trains an *alternating least square* (ALS) model using Spark's Machine Learning library (MLlib).<br>
# For more information on Spark ALS, see here:
# - http://spark.apache.org/docs/latest/mllib-collaborative-filtering.html#collaborative-filtering
# - https://github.com/jadianes/spark-movie-lens
# ## Load the data
# +
from pyspark.mllib.recommendation import Rating
ratingsRDD = sc.textFile('ratings.dat') \
.map(lambda l: l.split("::")) \
.map(lambda p: Rating(
user = int(p[0]),
product = int(p[1]),
rating = float(p[2]),
)).cache()
# -
# ## Split into training and testing
# Next we split the data into training and testing data sets
# +
(training, test) = ratingsRDD.randomSplit([0.8, 0.2])
numTraining = training.count()
numTest = test.count()
# verify row counts for each dataset
print("Total: {0}, Training: {1}, test: {2}".format(ratingsRDD.count(), numTraining, numTest))
# -
# ## Build the recommendation model using ALS on the training data
# I've chosen some values for the ALS parameters. You should probaly experiment with different values.
# +
from pyspark.mllib.recommendation import ALS
rank = 50
numIterations = 20
lambdaParam = 0.1
model = ALS.train(training, rank, numIterations, lambdaParam)
# -
# Extract the product (movie) features
# +
import numpy as np
pf = model.productFeatures().cache()
pf_keys = pf.sortByKey().keys().collect()
pf_vals = pf.sortByKey().map(lambda x: list(x[1])).collect()
Vt = np.matrix(np.asarray(pf.values().collect()))
# -
# Simulate a new user rating a movie
# +
full_u = np.zeros(len(pf_keys))
full_u.itemset(1, 5) # user has rated product_id:1 = 5
recommendations = full_u*Vt*Vt.T
print("predicted rating value", np.sort(recommendations)[:,-10:])
top_ten_recommended_product_ids = np.where(recommendations >= np.sort(recommendations)[:,-10:].min())[1]
print("predict rating prod_id", np.array_repr(top_ten_recommended_product_ids))
# -
| notebooks/Step 04 - Realtime Recommendations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics.regression import r2_score, mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from dbn.tensorflow import SupervisedDBNRegression
# -
# Read the dataset
ROAD = "Taft Ave."
YEAR = "2015"
EXT = ".csv"
WINDOWSIZE = 1
FILENAME = "eng_win" + str(WINDOWSIZE) + "_mmda_" + ROAD + "_" + YEAR
#FILENAME = "noeng" + "_mmda_" + ROAD + "_" + YEAR
original_dataset = pd.read_csv("data/mmda/" + FILENAME + EXT, skipinitialspace=True)
original_dataset = original_dataset.fillna(0)
# ##### Preparing Traffic Dataset
# +
original_dataset = pd.read_csv("data/mmda/" + FILENAME + EXT, skipinitialspace=True)
original_dataset = original_dataset.fillna(0)
traffic_dataset = original_dataset
#print("Start : " + str(original_dataset.columns[0:original_dataset.shape[1]][5]))
#print("End : " + str(original_dataset.columns[0:original_dataset.shape[1]][traffic_dataset.shape[1]-1]))
# Remove date time. Remove unused columms
#0-2 = dt + lineName + stationName || 3-4 - statusN - statusS || 5-14 - original weather variables
#15-46 - engineered traffic
cols_to_remove = [0, 1, 2]
traffic_dataset.drop(traffic_dataset.columns[[cols_to_remove]], axis=1, inplace=True)
traffic_dataset.head()
# +
# To-be Predicted variable
Y = traffic_dataset.statusS
Y = Y.shift(-3)
Y = Y.fillna(0)
Y = Y.round(5)
# Other data
X = traffic_dataset
#X = dataset
#X.statusS = X.statusS.round(5)
# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.67, shuffle=False)
X_train = np.array(X_train)
X_test = np.array(X_test)
Y_train = np.array(Y_train)
Y_test = np.array(Y_test)
# Data scaling
min_max_scaler = MinMaxScaler()
X_train = min_max_scaler.fit_transform(X_train)
# -
# Training
regressor = SupervisedDBNRegression(hidden_layers_structure=[5, 10, 15],
learning_rate_rbm=0.01,
learning_rate=0.01,
n_epochs_rbm=3,
n_iter_backprop=5,
batch_size=16,
activation_function='relu')
regressor.fit(X_train, Y_train)
# Test
X_test = min_max_scaler.transform(X_test)
Y_pred = regressor.predict(X_test)
print('Done.\nR-squared: %f\nMSE: %f' % (r2_score(Y_test, Y_pred), mean_squared_error(Y_test, Y_pred)))
# +
print(len(Y_pred))
temp = []
for i in range(len(Y_pred)):
temp.append(Y_pred[i][0])
d = {'Predicted': temp, 'Actual': Y_test}
df = pd.DataFrame(data=d)
df.head()
# -
df
# +
# Save the model
regressor.save('models/pm1-witheng.pkl')
# # Restore
# classifier = SupervisedDBNClassification.load('model.pkl')
# # Test
# Y_pred = classifier.predict(X_test)
# print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
# -
df.to_csv("output/pm1_eng_output_" + FILENAME + EXT, encoding='utf-8')
| PREDICTION-MODEL-1 (with New Features).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={"duration": 0.017496, "end_time": "2020-12-03T19:38:42.866407", "exception": false, "start_time": "2020-12-03T19:38:42.848911", "status": "completed"} tags=[]
# # Import Libraries
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" papermill={"duration": 7.26544, "end_time": "2020-12-03T19:38:50.148236", "exception": false, "start_time": "2020-12-03T19:38:42.882796", "status": "completed"} tags=[]
from PIL import Image
import numpy as np
import os
import json
import imagesize
import pandas as pd
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
import traceback
import sys
# %matplotlib inline
sns.set_style()
# to divide our data into train and validation set
from sklearn.model_selection import train_test_split
#to encode our labels
from tensorflow.keras.utils import to_categorical
#to build our model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Conv2D,MaxPool2D,Flatten,Dropout
# Our optimizer options
from keras.optimizers import RMSprop
from keras.optimizers import Adam
#Callback options
from tensorflow.keras.callbacks import EarlyStopping
from keras.callbacks import ReduceLROnPlateau
#importing image data generator for data augmentation
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#for the final prediction report
from sklearn.metrics import classification_report ,confusion_matrix
from keras.applications.resnet50 import ResNet50
from tensorflow.keras.callbacks import ModelCheckpoint
from keras.applications.inception_v3 import InceptionV3
from keras.applications.vgg16 import VGG16
from tensorflow.keras.models import save_model, load_model
# + [markdown] papermill={"duration": 0.015399, "end_time": "2020-12-03T19:38:50.179908", "exception": false, "start_time": "2020-12-03T19:38:50.164509", "status": "completed"} tags=[]
# # Load Data
# + papermill={"duration": 0.164618, "end_time": "2020-12-03T19:38:50.360155", "exception": false, "start_time": "2020-12-03T19:38:50.195537", "status": "completed"} tags=[]
base_dir = '../input/til2020/'
with open(base_dir + r"train.json","r") as file:
train_data = json.load(file)
with open(base_dir + r"val.json","r") as file:
val_data = json.load(file)
train_annotations = train_data['annotations']
train_images = train_data['images']
categories = train_data['categories']
val_annotations = val_data['annotations']
val_images = val_data['images']
category_mapping = {}
for category_item in categories:
category_mapping[category_item['id']] = category_item['name']
train_id_to_path_mapping = {}
for image_item in train_images:
train_id_to_path_mapping[image_item['id']] = image_item['file_name']
val_id_to_path_mapping = {}
for image_item in val_images:
val_id_to_path_mapping[image_item['id']] = image_item['file_name']
for annotation in train_annotations:
annotation['image_path'] = '../input/til2020/train/train/'+train_id_to_path_mapping[annotation['image_id']]
annotation['cat'] = category_mapping[annotation['category_id']]
annotation['bbox'] = list(map(int,annotation['bbox']))
for annotation in val_annotations:
annotation['image_path'] = '../input/til2020/val/val/'+val_id_to_path_mapping[annotation['image_id']]
annotation['cat'] = category_mapping[annotation['category_id']]
annotation['bbox'] = list(map(int,annotation['bbox']))
annotations = train_annotations + val_annotations
# + [markdown] papermill={"duration": 0.015417, "end_time": "2020-12-03T19:38:50.391533", "exception": false, "start_time": "2020-12-03T19:38:50.376116", "status": "completed"} tags=[]
# # Downsample Dresses Data
# + papermill={"duration": 0.028029, "end_time": "2020-12-03T19:38:50.435252", "exception": false, "start_time": "2020-12-03T19:38:50.407223", "status": "completed"} tags=[]
categories
# + papermill={"duration": 0.025988, "end_time": "2020-12-03T19:38:50.477812", "exception": false, "start_time": "2020-12-03T19:38:50.451824", "status": "completed"} tags=[]
train_annotations[1]
# + papermill={"duration": 0.047636, "end_time": "2020-12-03T19:38:50.542916", "exception": false, "start_time": "2020-12-03T19:38:50.495280", "status": "completed"} tags=[]
indexes = []
train_annotations_2 = []
counts = {'tops':0,'trousers':0,'outerwear':0,'dresses':0,'skirts':0,}
for idx, annot in enumerate(train_annotations):
if annot['cat'] == 'dresses':
indexes.append(idx)
else:
train_annotations_2.append(annot)
counts[annot['cat']]+=1
print("Total Train Annotations:", len(train_annotations))
print("Dresses Count:", len(indexes))
print("Category Wise Count:")
print(counts)
max_samples = 8500
for i in indexes[:max_samples]:
train_annotations_2.append(train_annotations[i])
print("New Train Annotations:", len(train_annotations_2))
# + [markdown] papermill={"duration": 0.017735, "end_time": "2020-12-03T19:38:50.578278", "exception": false, "start_time": "2020-12-03T19:38:50.560543", "status": "completed"} tags=[]
# # Confirm Number of Noise Images
# + papermill={"duration": 0.086897, "end_time": "2020-12-03T19:38:50.682908", "exception": false, "start_time": "2020-12-03T19:38:50.596011", "status": "completed"} tags=[]
import os
print(len(os.listdir('../input/noise-image-generation')))
# + [markdown] papermill={"duration": 0.017868, "end_time": "2020-12-03T19:38:50.719246", "exception": false, "start_time": "2020-12-03T19:38:50.701378", "status": "completed"} tags=[]
# # Prepare Images To Train a Model
# + papermill={"duration": 0.050518, "end_time": "2020-12-03T19:38:50.787770", "exception": false, "start_time": "2020-12-03T19:38:50.737252", "status": "completed"} tags=[]
# Adding Noise Category Id and Name
categories.append({'id':6, 'name':'noise'})
print(categories)
# Converting Categories to One-Hot Encoded Vectors
new_categories = [x['name'] for x in categories]
print(new_categories)
encoded_categories = to_categorical(list(range(len(new_categories))), num_classes=len(new_categories))
print(encoded_categories)
category_mapping = {x:encoded_categories[i] for i,x in enumerate(new_categories)}
print(category_mapping)
# Add Noise Images to Annotations
for i, path in enumerate(os.listdir('../input/noise-image-generation')):
record = {'area': None,
'iscrowd': 0,
'id': -1,
'image_id': -1,
'category_id': 6,
'bbox': None,
'image_path': '../input/image-classification/'+path,
'cat': 'noise'}
if i < 3200:
train_annotations_2.append(record)
else:
val_annotations.append(record)
print(len(train_annotations_2))
# + papermill={"duration": 0.033171, "end_time": "2020-12-03T19:38:50.839944", "exception": false, "start_time": "2020-12-03T19:38:50.806773", "status": "completed"} tags=[]
def get_cropped_image(img, bbox):
start_x, start_y, width, height = bbox
cropped_img = img[start_y:start_y+height, start_x:start_x+width]
return cropped_img
def get_reshaped_image(img, new_shape=(224,224)):
resized_image = cv2.resize(img, new_shape, interpolation = cv2.INTER_NEAREST)
return resized_image
def rescale_bbox(bbox, current_img_shape, new_img_shape=(224,224)):
x_ratio = new_img_shape[0] / current_img_shape[0]
y_ratio = new_img_shape[1] / current_img_shape[1]
new_x = bbox[0] * x_ratio
new_y = bbox[1] * y_ratio
new_width = bbox[2] * x_ratio
new_height = bbox[3] * y_ratio
return new_x, new_y, new_width, new_height
# + papermill={"duration": 1119.03925, "end_time": "2020-12-03T19:57:29.898307", "exception": false, "start_time": "2020-12-03T19:38:50.859057", "status": "completed"} tags=[]
ignore_flip = ('dresses', 'noise')
def transform_data(annotations, samples_per_cat=None, cats=None):
features = []
labels = []
max_check = False
cat_count = {}
if samples_per_cat is not None:
max_check = True
cat_count = {x:0 for x in cats}
else:
samples_per_cat = sys.maxsize
for i, annotation in enumerate(annotations):
img_path = annotation['image_path']
cat = annotation['cat']
bbox = annotation['bbox']
try:
if max_check:
if cat in cats:
if cat_count[cat] >= samples_per_cat:
continue
else:
continue
img = cv2.imread(img_path)
if img is None:
continue
if cat == 'noise':
resized_image = get_reshaped_image(img, new_shape=(128,128))
else:
#x,y,w,h = rescale_bbox(bbox, (img.shape[0],img.shape[1]))
cropped_image = get_cropped_image(img, bbox)
resized_image = get_reshaped_image(cropped_image, new_shape=(128,128))
features.append(resized_image)
labels.append(category_mapping[cat])
cat_count[cat] += 1
if cat not in ignore_flip:
features.append(cv2.flip(resized_image,1))
labels.append(category_mapping[cat])
cat_count[cat] += 1
if i != 0 and i % 1000 == 0:
print("Processed Images: ",i)
#print(resized_image.shape)
#plt.imshow(resized_image)
#plt.title(cat)
#plt.show()
except:
print(f"Error in image: bbox={bbox}, img_path={img_path}, cat={cat}")
traceback.print_exc()
return features, labels
max_samples = 10000
# cats = {'tops','trousers'}
cats = set(new_categories)
train_features, train_labels = transform_data(train_annotations_2, samples_per_cat=max_samples, cats=cats)
print(len(train_features))
print(len(train_labels))
#print(train_data[0])
#print(labels[0])
test_features, test_labels = transform_data(val_annotations, samples_per_cat=max_samples, cats=cats)
print(len(train_features))
print(len(train_labels))
train_features_2 = np.asarray(train_features)
print(train_features_2.shape)
train_labels_2 = np.asarray(train_labels)
print(train_labels_2.shape)
print(len(test_features))
print(len(test_labels))
test_features_2 = np.asarray(test_features)
print(test_features_2.shape)
test_labels_2 = np.asarray(test_labels)
print(test_labels_2.shape)
# + [markdown] papermill={"duration": 0.025811, "end_time": "2020-12-03T19:57:29.950452", "exception": false, "start_time": "2020-12-03T19:57:29.924641", "status": "completed"} tags=[]
# # Train a Model
# + papermill={"duration": 0.036976, "end_time": "2020-12-03T19:57:30.012997", "exception": false, "start_time": "2020-12-03T19:57:29.976021", "status": "completed"} tags=[]
batch_size = 64
input_shape = (128, 128, 3)
epoch = 100
# + papermill={"duration": 0.035891, "end_time": "2020-12-03T19:57:30.074859", "exception": false, "start_time": "2020-12-03T19:57:30.038968", "status": "completed"} tags=[]
# batch_size = 512
# input_shape = (128, 128, 3)
# epoch = 100
# resnet_50 = ResNet50(weights=None, input_shape=input_shape, classes=len(categories))
# summarize the model
# resnet_50.summary()
# resnet_50.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
# early_stop= EarlyStopping(monitor='val_loss',patience=10)
# learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
# patience=10,
# verbose=1,
# factor=0.5,
# min_lr=0.00001)
# mcp_save = ModelCheckpoint('.mdl_resnet50_wts.hdf5', save_best_only=True, monitor='val_loss', mode='min')
# resnet_50.fit(train_features_2, train_labels_2,
# epochs=epoch,
# batch_size=batch_size,
# validation_data=(test_features_2,test_labels_2),
# callbacks=[early_stop, mcp_save, learning_rate_reduction])
# metrics=pd.DataFrame(resnet_50.history.history)
# metrics
# + [markdown] papermill={"duration": 0.026251, "end_time": "2020-12-03T19:57:30.127439", "exception": false, "start_time": "2020-12-03T19:57:30.101188", "status": "completed"} tags=[]
# ## Inception V3
# + papermill={"duration": 0.035802, "end_time": "2020-12-03T19:57:30.189328", "exception": false, "start_time": "2020-12-03T19:57:30.153526", "status": "completed"} tags=[]
# inception_v3 = InceptionV3(weights=None, input_shape=input_shape, classes=len(categories))
# summarize the model
# inception_v3.summary()
# inception_v3.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
# early_stop= EarlyStopping(monitor='val_loss',patience=10)
# learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
# patience=10,
# verbose=1,
# factor=0.5,
# min_lr=0.00001)
# mcp_save = ModelCheckpoint('.mdl_inceptionv3_wts.hdf5', save_best_only=True, monitor='val_loss', mode='min')
# inception_v3.fit(train_features_2, train_labels_2,
# epochs=epoch,
# batch_size=batch_size,
# validation_data=(test_features_2,test_labels_2),
# callbacks=[early_stop, mcp_save, learning_rate_reduction])
# metrics=pd.DataFrame(inception_v3.history.history)
# metrics
# + [markdown] papermill={"duration": 0.026117, "end_time": "2020-12-03T19:57:30.242508", "exception": false, "start_time": "2020-12-03T19:57:30.216391", "status": "completed"} tags=[]
# ## Train VGG16
# + papermill={"duration": 666.80427, "end_time": "2020-12-03T20:08:37.073490", "exception": false, "start_time": "2020-12-03T19:57:30.269220", "status": "completed"} tags=[]
vgg16 = VGG16(weights=None, input_shape=input_shape, classes=len(categories))
# summarize the model
# vgg16.summary()
vgg16.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
early_stop= EarlyStopping(monitor='val_loss',patience=10)
mcp_save = ModelCheckpoint('mdl_vgg16_wts_2.hdf5', save_best_only=True, monitor='val_loss', mode='min')
learning_rate_reduction = ReduceLROnPlateau(monitor='val_accuracy',
patience=10,
verbose=1,
factor=0.5,
min_lr=0.00001)
vgg16.fit(train_features_2, train_labels_2,
epochs=epoch,
batch_size=batch_size,
validation_data=(test_features_2,test_labels_2),
callbacks=[early_stop, learning_rate_reduction])
# Save the model
filepath = './vgg16_2'
save_model(vgg16, filepath)
metrics=pd.DataFrame(vgg16.history.history)
metrics
| Object Detection/code/image-classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Comparing surrogate models
#
# <NAME>, July 2016.
# Reformatted by <NAME> 2020
#
# .. currentmodule:: skopt
#
# Bayesian optimization or sequential model-based optimization uses a surrogate
# model to model the expensive to evaluate function `func`. There are several
# choices for what kind of surrogate model to use. This notebook compares the
# performance of:
#
# * gaussian processes,
# * extra trees, and
# * random forests
#
# as surrogate models. A purely random optimization strategy is also used as
# a baseline.
#
print(__doc__)
import numpy as np
np.random.seed(123)
import matplotlib.pyplot as plt
# ## Toy model
#
# We will use the :class:`benchmarks.branin` function as toy model for the expensive function.
# In a real world application this function would be unknown and expensive
# to evaluate.
#
#
# +
from skopt.benchmarks import branin as _branin
def branin(x, noise_level=0.):
return _branin(x) + noise_level * np.random.randn()
# +
from matplotlib.colors import LogNorm
def plot_branin():
fig, ax = plt.subplots()
x1_values = np.linspace(-5, 10, 100)
x2_values = np.linspace(0, 15, 100)
x_ax, y_ax = np.meshgrid(x1_values, x2_values)
vals = np.c_[x_ax.ravel(), y_ax.ravel()]
fx = np.reshape([branin(val) for val in vals], (100, 100))
cm = ax.pcolormesh(x_ax, y_ax, fx,
norm=LogNorm(vmin=fx.min(),
vmax=fx.max()),
cmap='viridis_r')
minima = np.array([[-np.pi, 12.275], [+np.pi, 2.275], [9.42478, 2.475]])
ax.plot(minima[:, 0], minima[:, 1], "r.", markersize=14,
lw=0, label="Minima")
cb = fig.colorbar(cm)
cb.set_label("f(x)")
ax.legend(loc="best", numpoints=1)
ax.set_xlabel("X1")
ax.set_xlim([-5, 10])
ax.set_ylabel("X2")
ax.set_ylim([0, 15])
plot_branin()
# -
# This shows the value of the two-dimensional branin function and
# the three minima.
#
#
# ## Objective
#
# The objective of this example is to find one of these minima in as
# few iterations as possible. One iteration is defined as one call
# to the :class:`benchmarks.branin` function.
#
# We will evaluate each model several times using a different seed for the
# random number generator. Then compare the average performance of these
# models. This makes the comparison more robust against models that get
# "lucky".
#
#
# +
from functools import partial
from skopt import gp_minimize, forest_minimize, dummy_minimize
func = partial(branin, noise_level=2.0)
bounds = [(-5.0, 10.0), (0.0, 15.0)]
n_calls = 60
# +
def run(minimizer, n_iter=5):
return [minimizer(func, bounds, n_calls=n_calls, random_state=n)
for n in range(n_iter)]
# Random search
dummy_res = run(dummy_minimize)
# Gaussian processes
gp_res = run(gp_minimize)
# Random forest
rf_res = run(partial(forest_minimize, base_estimator="RF"))
# Extra trees
et_res = run(partial(forest_minimize, base_estimator="ET"))
# -
# Note that this can take a few minutes.
#
#
# +
from skopt.plots import plot_convergence
plot = plot_convergence(("dummy_minimize", dummy_res),
("gp_minimize", gp_res),
("forest_minimize('rf')", rf_res),
("forest_minimize('et)", et_res),
true_minimum=0.397887, yscale="log")
plot.legend(loc="best", prop={'size': 6}, numpoints=1)
# -
# This plot shows the value of the minimum found (y axis) as a function
# of the number of iterations performed so far (x axis). The dashed red line
# indicates the true value of the minimum of the :class:`benchmarks.branin` function.
#
# For the first ten iterations all methods perform equally well as they all
# start by creating ten random samples before fitting their respective model
# for the first time. After iteration ten the next point at which
# to evaluate :class:`benchmarks.branin` is guided by the model, which is where differences
# start to appear.
#
# Each minimizer only has access to noisy observations of the objective
# function, so as time passes (more iterations) it will start observing
# values that are below the true value simply because they are fluctuations.
#
#
| 0.8/_downloads/b4f910a92676697b8c1c26c50df6d7af/strategy-comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# 
#
#
# # Goals of this Lesson
# - Understand the Bias-Variance Tradeoff
#
# - Ensembling Methods
# - Bagging
# - Voting
# - Stacking
#
# ### References
# - Chapter 8 of [*Elements of Statistical Learning* by <NAME>](http://statweb.stanford.edu/~tibs/ElemStatLearn/printings/ESLII_print10.pdf)
# - [A Few Useful Things to Know about Machine Learning](https://homes.cs.washington.edu/~pedrod/papers/cacm12.pdf)
# - [SciKit-Learn's documentation on ensemble methods](http://scikit-learn.org/stable/modules/ensemble.html)
#
# ## 0. Preliminaries
# First we need to import Numpy, Pandas, MatPlotLib...
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import time
# %matplotlib inline
# Again we need functions for shuffling the data and calculating classification errrors.
# +
### function for shuffling the data and labels
def shuffle_in_unison(features, labels):
rng_state = np.random.get_state()
np.random.shuffle(features)
np.random.set_state(rng_state)
np.random.shuffle(labels)
### calculate classification errors
# return a percentage: (number misclassified)/(total number of datapoints)
def calc_classification_error(predictions, class_labels):
n = predictions.size
num_of_errors = 0.
for idx in xrange(n):
if (predictions[idx] >= 0.5 and class_labels[idx]==0) or (predictions[idx] < 0.5 and class_labels[idx]==1):
num_of_errors += 1
return num_of_errors/n
# -
# ### 0.1 Load the dataset of paintings
# We are going to use the Bob Ross paintings dataset throughout this session. Let's again load the data and run PCA...
# +
from sklearn.decomposition import PCA
# load the 403 x 360,000 matrix
br_paintings = np.load(open('../data/bob_ross/bob_ross_paintings.npy','rb'))
# perform PCA again
pca = PCA(n_components=400)
start_time = time.time()
pca_paintings = pca.fit_transform(br_paintings)
end_time = time.time()
# remove the br_paintings from memory
br_paintings = None
print "Training took a total of %.2f seconds." %(end_time-start_time)
print "Preserved percentage of original variance: %.2f%%" %(pca.explained_variance_ratio_.sum() * 100)
print "Dataset is now of size: %d x %d"%(pca_paintings.shape)
# -
# We want to formulate a binary classification problem. In the data folder there's a file that has labels denoting what is in each painting (tree, mountain, etc.). Let's load it...
br_labels_data = pd.read_csv('../data/bob_ross/elements-by-episode.csv')
br_labels_data.head()
# Let's make two classes: 1 = 'painting contains hill or mountain', 0 = 'doesn't contain hill/mountain':
labels = (br_labels_data['HILLS'] + br_labels_data['MOUNTAIN'] + br_labels_data['MOUNTAINS'] \
+ br_labels_data['SNOWY_MOUNTAIN'] > 0).astype('int8').as_matrix()
print "Contains mountain?: "+str(bool(labels[5]))
recon_img = pca.inverse_transform(pca_paintings[5,:])
plt.imshow(np.reshape(recon_img, (300, 400, 3)))
plt.show()
# Make training and test split...
# +
# set the random number generator for reproducability
np.random.seed(182)
# shuffle data
N = pca_paintings.shape[0]
shuffle_in_unison(pca_paintings, labels)
# split into train and test sets
train_features = pca_paintings[:int(.8*N), :]
test_features = pca_paintings[int(.8*N):, :]
train_labels = labels[:int(.8*N)]
test_labels = labels[int(.8*N):]
# -
# ### 0.2 Run a baseline classifier
# In order to see the improvements that ensembling provides, let's train a baseline logistic regression classifier for later comparison.
# +
np.random.seed(182)
from sklearn.linear_model import LogisticRegression
# initialize and train a logistic regression model
lr_model = LogisticRegression()
lr_model.fit(train_features, train_labels)
# compute error on test data
lr_predictions = lr_model.predict(test_features)
one_model_test_error_rate = calc_classification_error(lr_predictions, test_labels)
print "Classification error on test set: %.2f%%" %(one_model_test_error_rate*100)
# compute the baseline error since the classes are imbalanced
print "Baseline Error: %.2f%%" %((sum(test_labels)*100.)/len(test_labels))
# -
# ## 1. The Bias-Variance Tradeoff
#
# When faced with an important decision, its common to ask multiple people for their advice. Why should a classification decision be any different? If computer power is not a limiting factor--which is usually the case--why not train multiple classifiers and combine their predictions? This is exactly what *ensembling* classifiers does. In this section we'll cover three methods for combining classifiers: bagging, averaging, and stacking. But first, let's examine why one classifier is usually not enough. It can be formalized as a tradeoff between *bias* and *variance*.
#
# Recall the squared loss function: $$\mathcal{L} = \sum_{i}^{N} (y_{i} - f(\mathbf{x}_{i}))^{2}. $$ This loss is over a particular training set {$\mathbf{X}, \mathbf{y}$} but we are really interested in the loss over all possible datasets we could have observed, $\{\mathbf{X}, \mathbf{y}\} \sim p(\mathcal{D})$: $$\mathbb{E}_{p(\mathcal{D})}[\mathcal{L}] = \mathbb{E}_{p(\mathcal{D})}[(y_{i} - f(\mathbf{x}_{i}))^{2}]. $$ After some [algebraic manipulations](https://en.wikipedia.org/wiki/Bias%E2%80%93variance_tradeoff#Derivation), we can re-write the expected loss as $$\mathbb{E}_{p(\mathcal{D})}[\mathcal{L}] = (f^{*}(\mathbf{x}_{i}) - \mathbb{E}[f(\mathbf{x}_{i})])^{2} + \text{Var}[f(\mathbf{x}_{i})] + \text{constant (error)}. $$ The first term, $(f^{*}(\mathbf{x}_{i}) - \mathbb{E}[f(\mathbf{x}_{i})])^{2}$, is the squared difference between the expected value of the classifier $f$ and the **perfect, true** classifier $f^{*}$. This difference is known as the *bias* of a classifier. For instance, a linear model has a strong bias since its functional form is rather simple (unless the optimial classifier is also a linear function). The second term, $\text{Var}[f(\mathbf{x}_{i})]$, is the variance of our classifier. Basically, this term captures the variability in outputs. The main point is that if a classifier has *low* bias, meaning it is a very powerful function, then it will usually have high *variance* since this power allows it to generate a wide range of outputs. And vice versa. What I just said can be represented graphically as
# 
# Ensembling classifiers all but always produces better performance because it **reduces variance without incurring bias**.
# ## 2. Bootstrap Aggregating ('Bagging')
# In section 1, when I mentioned using multiple classifiers, you probably thought I was talking about training and combining several different kinds of classifiers. We will do that. But first we'll discuss something simpler: training the same classifier on multiple datasets.
#
# ### 2.1 Bootstrap Resampling
# We (always) want more training data, but unfortunately, it is not available. We can use the training data we do have and resample it *with replacement* to generate additional 'fake' datasets. Formally, our original dataset is $$\{\mathbf{y}, \mathbf{X}\} \sim p(\mathcal{D}),$$ where $p(\mathcal{D})$ is the unknown population distribution. We then treat the original data as a substitute for the population, writing $$\{\mathbf{\tilde y}, \mathbf{ \tilde X}\}_{1} , \{\mathbf{\tilde y}, \mathbf{ \tilde X}\}_{2}, ... \sim \{\mathbf{y}, \mathbf{X}\}.$$ $\{\mathbf{\tilde y}, \mathbf{ \tilde X}\}$ are called bootstrap (re)samples. Usually, they contain the same number of instances as the original training set. A diagram showing sampling with replacement is below
# 
# Now let's write some Python code to generate Bootstrap samples from a given dataset...
### function for bootstrap resampling
def bootstrap_resample(features, labels, n_resamples):
N = features.shape[0]
idxs = np.arange(N)
# numpy's choice() handles the sampling w/ replacement
resampled_idxs = np.random.choice(idxs, size=(N,n_resamples), replace=True)
boot_samps_x = []
boot_samps_y = []
for i in xrange(n_resamples):
boot_samps_x.append(features[resampled_idxs[:,i],:])
boot_samps_y.append(labels[resampled_idxs[:,i]])
return boot_samps_x, boot_samps_y
# ### 2.2 Training on Bootstrap Samples
# Given the Bootstrap datasets, we next train a classifier on each dataset and then combine their predictions. These classifiers can all be instances of the model, as is the case in the code below, or different ones. Once the models are trained, we can combine them in two way: by averaging the probabilities or the predictions. Both methods are shown below.
def fit_and_predict_on_bootstrap_samples(model, bootstrapped_features, bootstrapped_labels, \
test_features, n_bootstrap_samples):
n_test = test_features.shape[0]
ensemb_probs = np.zeros((n_test,))
ensemb_preds = np.zeros((n_test,))
for idx in xrange(n_bootstrap_samples):
print "training model #%d" %(idx+1)
model.fit(bootstrapped_features[idx], bootstrapped_labels[idx])
ensemb_probs += model.predict_proba(test_features)[:,1]
ensemb_preds += model.predict(test_features)
ensemb_probs /= n_bootstrap_samples
ensemb_preds /= n_bootstrap_samples
ensemb_probs = np.around(ensemb_probs)
ensemb_preds = np.around(ensemb_preds)
return ensemb_probs, ensemb_preds
# Now run the function...
# +
np.random.seed(182)
n_bootstrap_samples = 7
bootstrapped_features, bootstrapped_labels = bootstrap_resample(train_features, train_labels, n_bootstrap_samples)
ensembled_probs, ensembled_preds = \
fit_and_predict_on_bootstrap_samples(lr_model, bootstrapped_features, bootstrapped_labels, \
test_features, n_bootstrap_samples)
print
print "Averaging probabilities: classification error on test set is %.2f%%" \
%(calc_classification_error(ensembled_probs, test_labels)*100)
print "Averaging predictions: classification error on test set is %.2f%%" \
%(calc_classification_error(ensembled_preds, test_labels)*100)
print
print "One logistic regression model error: %.2f%%"%(one_model_test_error_rate*100)
# compute the baseline error since the classes are imbalanced
print "Baseline error: %.2f%%" %((sum(test_labels)*100.)/len(test_labels))
# -
# ## 2. Voting
# Now we'll consider combining a diverse set of classifers, each trained on an identitical copy of the data. But first, we need to quickly introduce three new types of classifiers. Unfortunately, we don't have enough time to cover each in detail and will have to use them as somewhat of a black-box.
#
# ### 2.1 Overview of Three Classifiers: Decision Tree, k-Nearest Neighbors, and Naive Bayes
#
# 
#
# SciKit-Learn Documentation:
# - [Decision Tree](http://scikit-learn.org/stable/modules/tree.html#classification)
# - [k-Nearest Neighbors](http://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbors-classification)
# - [Naive Bayes](http://scikit-learn.org/stable/modules/naive_bayes.html#gaussian-naive-bayes, https://www.analyticsvidhya.com/blog/2015/09/naive-bayes-explained/)
#
# ### 2.2 Voting via Averaging Predictions
#
# $$ \hat f(\mathbf{x}_{i}) = \frac{1}{4} \hat y_{\text{DT}} + \frac{1}{4} \hat y_{\text{kNN}} + \frac{1}{4} \hat y_{\text{NB}} + \frac{1}{4} \hat y_{\text{LogReg}}.$$
# +
np.random.seed(182)
# import the three new classifiers
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
# initialize models
d_tree_model = DecisionTreeClassifier()
knn_model = KNeighborsClassifier(n_neighbors=5)
nb_model = GaussianNB()
# fit models
d_tree_model.fit(train_features, train_labels)
knn_model.fit(train_features, train_labels)
nb_model.fit(train_features, train_labels)
# predict on test data
tree_predictions = d_tree_model.predict(test_features)
knn_predictions = knn_model.predict(test_features)
nb_predictions = nb_model.predict(test_features)
# average predictions
# add in the logistic regression predictions calcuated previously
avg_predictions = np.around((tree_predictions + knn_predictions + nb_predictions + lr_predictions)/4.)
print "Averaging predictions: classification error on test set is %.2f%%" \
%(calc_classification_error(avg_predictions, test_labels)*100)
print
print "One logistic regression model error: %.2f%%"%(one_model_test_error_rate*100)
# compute the baseline error since the classes are imbalanced
print "Baseline error: %.2f%%" %((sum(test_labels)*100.)/len(test_labels))
# -
# ## 3. Stacking Models
# When we performed the averaging above, we used this formula: $$ \hat f(\mathbf{x}_{i}) = \frac{1}{4} \hat y_{\text{DT}} + \frac{1}{4} \hat y_{\text{kNN}} + \frac{1}{4} \hat y_{\text{NB}} + \frac{1}{4} \hat y_{\text{LogReg}}.$$ That is, we gave each classifier equal weighting. While this approach is reasonable, probably it would be better to give an unequal weighting to each classifer, allowing the 'smartest' model to contribute most to the decision. We can accomplish this by training a second-level logistic regression classifier on the predicted probabilities: $$ \hat f(\mathbf{x}_{i}) = \sigma ( \alpha_{1} f_{\text{DT}}(\mathbf{x}_{i}) + \alpha_{2}f_{\text{kNN}}(\mathbf{x}_{i}) + \alpha_{3}f_{\text{NB}}(\mathbf{x}_{i}) + \alpha_{4}f_{\text{LogReg}}(\mathbf{x}_{i})) \text{ where } \sigma (\cdot) \text{ is the logistic function}.$$ A depiction of the pipeline is below:
# 
#
# +
np.random.seed(182)
### TRAINING
# calculate probabilities on the training data
tree_probs = d_tree_model.predict_proba(train_features)[:,1][np.newaxis].T
knn_probs = knn_model.predict_proba(train_features)[:,1][np.newaxis].T
nb_probs = nb_model.predict_proba(train_features)[:,1][np.newaxis].T
logReg_probs = lr_model.predict_proba(train_features)[:,1][np.newaxis].T
# combine into a new 'feature' matrix
train_probs_matrix = np.hstack([tree_probs, knn_probs, nb_probs, logReg_probs])
# train logistic regression
meta_classifier = LogisticRegression()
meta_classifier.fit(train_probs_matrix, train_labels)
# plot the weights learned for each classifier
f,ax = plt.subplots()
ticks = np.arange(4)
ax.bar(ticks, meta_classifier.coef_[0])
ax.set_xticks(ticks+.4)
ax.set_xticklabels(['Decision Tree', 'kNN', 'Naive Bayes', 'Log. Regression'])
ax.set_title('Weights Learned for Each Classifier')
plt.show()
# +
### TESTING
# calculate probabilities on the test data
tree_probs = d_tree_model.predict_proba(test_features)[:,1][np.newaxis].T
knn_probs = knn_model.predict_proba(test_features)[:,1][np.newaxis].T
nb_probs = nb_model.predict_proba(test_features)[:,1][np.newaxis].T
logReg_probs = lr_model.predict_proba(test_features)[:,1][np.newaxis].T
# combine into a new 'feature' matrix
test_probs_matrix = np.hstack([tree_probs, knn_probs, nb_probs, logReg_probs])
stacked_predictions = meta_classifier.predict(test_probs_matrix)
print "Averaging predictions: classification error on test set is %.2f%%" %(calc_classification_error(stacked_predictions, test_labels)*100)
print
print "One logistic regression model error: %.2f%%"%(one_model_test_error_rate*100)
# compute the baseline error since the classes are imbalanced
print "Baseline error: %.2f%%" %((sum(test_labels)*100.)/len(test_labels))
# +
train_stacked_predictions = meta_classifier.predict(train_probs_matrix)
print "Stacking train error: %.2f%%" %(calc_classification_error(train_stacked_predictions, train_labels))
# -
# Ah! The training error is zero percent--a dead giveaway that the added power of stacking caused us to overfit. One way of prevent this overfitting is to split the training data and fit the base learners and meta-classifier on different subsets.
# ## <span style="color:red">STUDENT ACTIVITY (until end of session)</span>
#
#
# ## 4. Mini Competition
#
# For the remainder of the session, we'll have a mini predictive modeling competition on the [Labeled Faces in the Wild](http://vis-www.cs.umass.edu/lfw/) dataset. Your task to get as low of an error on the test set as possible by empolying all the techniques we covered today: dimensionality reduction (PCA), Bagging, stacking, etc. Be mindful to save some data as a validation set.
#
# The code below will load the data, display an image, convert it to feature vectors, and train a logistic regression classifier.
# +
from sklearn.datasets import fetch_lfw_pairs
lfw_train = fetch_lfw_pairs(subset='train')
lfw_test = fetch_lfw_pairs(subset='test')
lfw_train_pairs = lfw_train['pairs']
lfw_train_targets = lfw_train['target']
lfw_test_pairs = lfw_test['pairs']
lfw_test_targets = lfw_test['target']
print "The training data is of size: %d instances x %d faces x %d pixels x %d pixels" %(lfw_train_pairs.shape)
print "The test data is of size: %d instances x %d faces x %d pixels x %d pixels" %(lfw_test_pairs.shape)
# -
# Let's visualize the images...
# +
face_idx=0
# subplot containing first image
ax1 = plt.subplot(1,2,1)
ax1.imshow(lfw_train_pairs[face_idx,0,:,:],cmap='Greys_r')
# subplot containing second image
ax2 = plt.subplot(1,2,2)
ax2.imshow(lfw_train_pairs[face_idx,1,:,:],cmap='Greys_r')
plt.show()
# -
# As a last step of pre-processing, let's flatten the data tensor...
# +
train_x = np.reshape(lfw_train_pairs, (2200, 5828))
train_y = lfw_train_targets
test_x = np.reshape(lfw_test_pairs, (1000, 5828))
test_y = lfw_test_targets
# print the shapes just to check its what we expect
print train_x.shape
print train_y.shape
print test_x.shape
print test_y.shape
# -
# And just to get you started, here's code to train a logistic regression classifier...
# +
np.random.seed(182)
# initialize and train a logistic regression model
lr_model = LogisticRegression()
lr_model.fit(train_x, train_y)
# compute error on test data
lr_predictions = lr_model.predict(test_x)
one_model_test_error_rate = calc_classification_error(lr_predictions, test_y)
print "Classification error on test set: %.2f%%" %(one_model_test_error_rate*100)
# compute the baseline error since the classes are imbalanced
print "Baseline Error: %.2f%%" %((sum(test_y)*100.)/len(test_y))
# -
# For some perspective, state of the art performance on this dataset is [around 5%](http://udrc.eng.ed.ac.uk/sites/udrc.eng.ed.ac.uk/files/publications/class_specific2014.pdf). [This paper from 1991](https://www.cs.ucsb.edu/~mturk/Papers/mturk-CVPR91.pdf) used PCA and a distance metric to get around 40% error.
| Session 7 - Ensembles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="n4JYrwFD-70-" colab_type="code" colab={}
# # %tensorflow_version 1.x
# + id="h2oB35p1rz9B" colab_type="code" colab={}
# -*- coding: utf-8 -*-
'''DenseNet and DenseNet-FCN models for Keras.
DenseNet is a network architecture where each layer is directly connected
to every other layer in a feed-forward fashion (within each dense block).
For each layer, the feature maps of all preceding layers are treated as
separate inputs whereas its own feature maps are passed on as inputs to
all subsequent layers. This connectivity pattern yields state-of-the-art
accuracies on CIFAR10/100 (with or without data augmentation) and SVHN.
On the large scale ILSVRC 2012 (ImageNet) dataset, DenseNet achieves a
similar accuracy as ResNet, but using less than half the amount of
parameters and roughly half the number of FLOPs.
DenseNets can be extended to image segmentation tasks as described in the
paper "The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for
Semantic Segmentation". Here, the dense blocks are arranged and concatenated
with long skip connections for state of the art performance on the CamVid dataset.
# Reference
- [Densely Connected Convolutional Networks](https://arxiv.org/pdf/1608.06993.pdf)
- [The One Hundred Layers Tiramisu: Fully Convolutional DenseNets for Semantic
Segmentation](https://arxiv.org/pdf/1611.09326.pdf)
This implementation is based on the following reference code:
- https://github.com/gpleiss/efficient_densenet_pytorch
- https://github.com/liuzhuang13/DenseNet
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import keras.backend as K
from keras.engine.topology import get_source_inputs
from keras.layers import Activation
from keras.layers import AveragePooling3D
from keras.layers import BatchNormalization
from keras.layers import Conv3D
from keras.layers import Conv3DTranspose
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import GlobalAveragePooling3D
from keras.layers import GlobalMaxPooling3D
from keras.layers import Input
from keras.layers import MaxPooling3D
from keras.layers import Reshape
from keras.layers import UpSampling3D
from keras.layers import concatenate
from keras.models import Model
from keras.regularizers import l2
# from keras_contrib.layers import SubPixelUpscaling
def DenseNet3D(input_shape=None,
depth=40,
nb_dense_block=3,
growth_rate=12,
nb_filter=-1,
nb_layers_per_block=-1,
bottleneck=False,
reduction=0.0,
dropout_rate=0.0,
weight_decay=1e-4,
subsample_initial_block=False,
include_top=True,
input_tensor=None,
pooling=None,
classes=10,
activation='softmax',
transition_pooling='avg'):
'''Instantiate the DenseNet architecture.
The model and the weights are compatible with both
TensorFlow and Theano. The dimension ordering
convention used by the model is the one
specified in your Keras config file.
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 224, 3)` (with `channels_last` dim ordering)
or `(3, 224, 224, 224)` (with `channels_first` dim ordering).
It should have exactly 4 inputs channels,
and width and height should be no smaller than 8.
E.g. `(224, 224, 224, 3)` would be one valid value.
depth: number or layers in the DenseNet
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. -1 indicates initial
number of filters will default to 2 * growth_rate
nb_layers_per_block: number of layers in each dense block.
Can be a -1, positive integer or a list.
If -1, calculates nb_layer_per_block from the network depth.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be nb_dense_block
bottleneck: flag to add bottleneck blocks in between dense blocks
reduction: reduction factor of transition blocks.
Note : reduction value is inverted to compute compression.
dropout_rate: dropout rate
weight_decay: weight decay rate
subsample_initial_block: Changes model type to suit different datasets.
Should be set to True for ImageNet, and False for CIFAR datasets.
When set to True, the initial convolution will be strided and
adds a MaxPooling3D before the initial dense block.
include_top: whether to include the fully-connected
layer at the top of the network.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True.
activation: Type of activation at the top layer. Can be one of
'softmax' or 'sigmoid'. Note that if sigmoid is used,
classes must be 1.
transition_pooling: `avg` for avg pooling (default), `max` for max pooling,
None for no pooling during scale transition blocks. Please note that this
default differs from the DenseNetFCN paper in accordance with the DenseNet
paper.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid input shape.
'''
if activation not in ['softmax', 'sigmoid']:
raise ValueError('activation must be one of "softmax" or "sigmoid"')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_dense_net(classes, img_input, include_top, depth, nb_dense_block,
growth_rate, nb_filter, nb_layers_per_block, bottleneck,
reduction, dropout_rate, weight_decay,
subsample_initial_block, pooling, activation,
transition_pooling)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='densenet')
return model
def DenseNet3D_FCN(input_shape, nb_dense_block=5, growth_rate=16, nb_layers_per_block=4,
reduction=0.0, dropout_rate=0.0, weight_decay=1E-4,
init_conv_filters=48, include_top=True, input_tensor=None,
classes=1, activation='softmax', upsampling_conv=128,
upsampling_type='deconv', early_transition=False,
transition_pooling='max', initial_kernel_size=(3, 3, 3)):
'''Instantiate the DenseNet FCN architecture.
Note that when using TensorFlow,
for best performance you should set
`image_data_format='channels_last'` in your Keras config
at ~/.keras/keras.json.
# Arguments
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_layers_per_block: number of layers in each dense block.
Can be a positive integer or a list.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
reduction: reduction factor of transition blocks.
Note : reduction value is inverted to compute compression.
dropout_rate: dropout rate
weight_decay: weight decay factor
init_conv_filters: number of layers in the initial convolution layer
include_top: whether to include the fully-connected
layer at the top of the network.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(32, 32, 32, 3)` (with `channels_last` dim ordering)
or `(3, 32, 32, 32)` (with `channels_first` dim ordering).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 8.
E.g. `(200, 200, 200, 3)` would be one valid value.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True.
activation: Type of activation at the top layer. Can be one of 'softmax'
or 'sigmoid'. Note that if sigmoid is used, classes must be 1.
upsampling_conv: number of convolutional layers in upsampling via subpixel
convolution
upsampling_type: Can be one of 'deconv', 'upsampling' and
'subpixel'. Defines type of upsampling algorithm used.
batchsize: Fixed batch size. This is a temporary requirement for
computation of output shape in the case of Deconvolution2D layers.
Parameter will be removed in next iteration of Keras, which infers
output shape of deconvolution layers automatically.
early_transition: Start with an extra initial transition down and end with
an extra transition up to reduce the network size.
initial_kernel_size: The first Conv3D kernel might vary in size based on the
application, this parameter makes it configurable.
# Returns
A Keras model instance.
'''
upsampling_type = upsampling_type.lower()
if upsampling_type not in ['upsampling', 'deconv', 'subpixel']:
raise ValueError('Parameter "upsampling_type" must be one of "upsampling", '
'"deconv" or "subpixel".')
if input_shape is None:
raise ValueError('For fully convolutional models, '
'input shape must be supplied.')
if type(nb_layers_per_block) is not list and nb_dense_block < 1:
raise ValueError('Number of dense layers per block must be greater than 1. '
'Argument value was %d.' % nb_layers_per_block)
if activation not in ['softmax', 'sigmoid']:
raise ValueError('activation must be one of "softmax" or "sigmoid"')
if activation == 'sigmoid' and classes != 1:
raise ValueError('sigmoid activation can only be used when classes = 1')
# Determine proper input shape
min_size = 2 ** nb_dense_block
if K.image_data_format() == 'channels_first':
if input_shape is not None:
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size) or
(input_shape[3] is not None and input_shape[3] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) +
', got `input_shape=' + str(input_shape) + '`')
else:
input_shape = (classes, None, None, None)
else:
if input_shape is not None:
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) +
', got `input_shape=' + str(input_shape) + '`')
else:
input_shape = (None, None, None, classes)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = __create_fcn_dense_net(classes, img_input, include_top, nb_dense_block,
growth_rate, reduction, dropout_rate, weight_decay,
nb_layers_per_block, upsampling_conv, upsampling_type,
init_conv_filters, input_shape, activation,
early_transition, transition_pooling,
initial_kernel_size)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='fcn-densenet')
return model
def DenseNet3DImageNet121(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet3D(input_shape, depth=121, nb_dense_block=4, growth_rate=32,
nb_filter=64, nb_layers_per_block=[6, 12, 24, 16],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def DenseNet3DImageNet169(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet3D(input_shape, depth=169, nb_dense_block=4, growth_rate=32,
nb_filter=64, nb_layers_per_block=[6, 12, 32, 32],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def DenseNet3DImageNet201(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet3D(input_shape, depth=201, nb_dense_block=4, growth_rate=32,
nb_filter=64, nb_layers_per_block=[6, 12, 48, 32],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def DenseNet3DImageNet264(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet3D(input_shape, depth=264, nb_dense_block=4, growth_rate=32,
nb_filter=64, nb_layers_per_block=[6, 12, 64, 48],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def DenseNetImageNet161(input_shape=None,
bottleneck=True,
reduction=0.5,
dropout_rate=0.0,
weight_decay=1e-4,
include_top=True,
input_tensor=None,
pooling=None,
classes=1000,
activation='softmax'):
return DenseNet3D(input_shape, depth=161, nb_dense_block=4, growth_rate=48,
nb_filter=96, nb_layers_per_block=[6, 12, 36, 24],
bottleneck=bottleneck, reduction=reduction,
dropout_rate=dropout_rate, weight_decay=weight_decay,
subsample_initial_block=True, include_top=include_top,
input_tensor=input_tensor,
pooling=pooling, classes=classes, activation=activation)
def name_or_none(prefix, name):
return prefix + name if (prefix is not None and name is not None) else None
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None,
weight_decay=1e-4, block_prefix=None):
'''
Adds a convolution layer (with batch normalization and relu),
and optionally a bottleneck layer.
# Arguments
ip: Input tensor
nb_filter: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
bottleneck: if True, adds a bottleneck convolution block
dropout_rate: dropout rate
weight_decay: weight decay factor
block_prefix: str, for unique layer naming
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
# Returns
output tensor of block
'''
with K.name_scope('ConvBlock'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name=name_or_none(block_prefix, '_bn'))(ip)
x = Activation('relu')(x)
if bottleneck:
inter_channel = nb_filter * 4
x = Conv3D(inter_channel, (1, 1, 1), kernel_initializer='he_normal',
padding='same', use_bias=False,
kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_bottleneck_Conv3D'))(x)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name=name_or_none(block_prefix, '_bottleneck_bn'))(x)
x = Activation('relu')(x)
x = Conv3D(nb_filter, (3, 3, 3), kernel_initializer='he_normal', padding='same',
use_bias=False, name=name_or_none(block_prefix, '_Conv3D'))(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def __dense_block(x, nb_layers, nb_filter, growth_rate, bottleneck=False,
dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True,
return_concat_list=False, block_prefix=None):
'''
Build a dense_block where the output of each conv_block is fed
to subsequent ones
# Arguments
x: input keras tensor
nb_layers: the number of conv_blocks to append to the model
nb_filter: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
growth_rate: growth rate of the dense block
bottleneck: if True, adds a bottleneck convolution block to
each conv_block
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: if True, allows number of filters to grow
return_concat_list: set to True to return the list of
feature maps along with the actual output
block_prefix: str, for block unique naming
# Return
If return_concat_list is True, returns a list of the output
keras tensor, the number of filters and a list of all the
dense blocks added to the keras tensor
If return_concat_list is False, returns a list of the output
keras tensor and the number of filters
'''
with K.name_scope('DenseBlock'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x_list = [x]
for i in range(nb_layers):
cb = __conv_block(x, growth_rate, bottleneck, dropout_rate, weight_decay,
block_prefix=name_or_none(block_prefix, '_%i' % i))
x_list.append(cb)
x = concatenate([x, cb], axis=concat_axis)
if grow_nb_filters:
nb_filter += growth_rate
if return_concat_list:
return x, nb_filter, x_list
else:
return x, nb_filter
def __transition_block(ip, nb_filter, compression=1.0, weight_decay=1e-4,
block_prefix=None, transition_pooling='max'):
'''
Adds a pointwise convolution layer (with batch normalization and relu),
and an average pooling layer. The number of output convolution filters
can be reduced by appropriately reducing the compression parameter.
# Arguments
ip: input keras tensor
nb_filter: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
compression: calculated as 1 - reduction. Reduces the number
of feature maps in the transition block.
weight_decay: weight decay factor
block_prefix: str, for block unique naming
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, nb_filter * compression, rows / 2, cols / 2)`
if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows / 2, cols / 2, nb_filter * compression)`
if data_format='channels_last'.
# Returns
a keras tensor
'''
with K.name_scope('Transition'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name=name_or_none(block_prefix, '_bn'))(ip)
x = Activation('relu')(x)
x = Conv3D(int(nb_filter * compression), (1, 1, 1), kernel_initializer='he_normal',
padding='same', use_bias=False, kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_Conv3D'))(x)
if transition_pooling == 'avg':
x = AveragePooling3D((2, 2, 2), strides=(2, 2, 2))(x)
elif transition_pooling == 'max':
x = MaxPooling3D((2, 2, 2), strides=(2, 2, 2))(x)
return x
def __transition_up_block(ip, nb_filters, type='deconv', weight_decay=1E-4,
block_prefix=None):
'''Adds an upsampling block. Upsampling operation relies on the the type parameter.
# Arguments
ip: input keras tensor
nb_filters: integer, the dimensionality of the output space
(i.e. the number output of filters in the convolution)
type: can be 'upsampling', 'subpixel', 'deconv'. Determines
type of upsampling performed
weight_decay: weight decay factor
block_prefix: str, for block unique naming
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
# Output shape
4D tensor with shape:
`(samples, nb_filter, rows * 2, cols * 2)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows * 2, cols * 2, nb_filter)` if data_format='channels_last'.
# Returns
a keras tensor
'''
with K.name_scope('TransitionUp'):
if type == 'upsampling':
x = UpSampling3D(name=name_or_none(block_prefix, '_upsampling'))(ip)
elif type == 'subpixel':
x = Conv3D(nb_filters, (3, 3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), use_bias=False,
kernel_initializer='he_normal',
name=name_or_none(block_prefix, '_Conv3D'))(ip)
# x = SubPixelUpscaling(scale_factor=2,
# name=name_or_none(block_prefix, '_subpixel'))(x)
x = Conv3D(nb_filters, (3, 3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), use_bias=False,
kernel_initializer='he_normal',
name=name_or_none(block_prefix, '_Conv3D'))(x)
else:
x = Conv3DTranspose(nb_filters, (3, 3, 3), activation='relu', padding='same',
strides=(2, 2, 2), kernel_initializer='he_normal',
kernel_regularizer=l2(weight_decay),
name=name_or_none(block_prefix, '_Conv3DT'))(ip)
return x
def __create_dense_net(nb_classes, img_input, include_top, depth=40, nb_dense_block=3,
growth_rate=12, nb_filter=-1, nb_layers_per_block=-1,
bottleneck=False, reduction=0.0, dropout_rate=None,
weight_decay=1e-4, subsample_initial_block=False, pooling=None,
activation='softmax', transition_pooling='avg'):
''' Build the DenseNet model
# Arguments
nb_classes: number of classes
img_input: tuple of shape (channels, rows, columns) or (rows, columns, channels)
include_top: flag to include the final Dense layer
depth: number or layers
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters. Default -1 indicates initial number
of filters is 2 * growth_rate
nb_layers_per_block: number of layers in each dense block.
Can be a -1, positive integer or a list.
If -1, calculates nb_layer_per_block from the depth of the network.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
bottleneck: add bottleneck blocks
reduction: reduction factor of transition blocks. Note : reduction value is
inverted to compute compression
dropout_rate: dropout rate
weight_decay: weight decay rate
subsample_initial_block: Changes model type to suit different datasets.
Should be set to True for ImageNet, and False for CIFAR datasets.
When set to True, the initial convolution will be strided and
adds a MaxPooling3D before the initial dense block.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
activation: Type of activation at the top layer. Can be one of 'softmax' or
'sigmoid'. Note that if sigmoid is used, classes must be 1.
transition_pooling: `avg` for avg pooling (default), `max` for max pooling,
None for no pooling during scale transition blocks. Please note that this
default differs from the DenseNetFCN paper in accordance with the DenseNet
paper.
# Returns
a keras tensor
# Raises
ValueError: in case of invalid argument for `reduction`
or `nb_dense_block`
'''
with K.name_scope('DenseNet'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
if reduction != 0.0:
if not (reduction <= 1.0 and reduction > 0.0):
raise ValueError('`reduction` value must lie between 0.0 and 1.0')
# layers in each dense block
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block) # Convert tuple to list
if len(nb_layers) != nb_dense_block:
raise ValueError('If `nb_dense_block` is a list, its length must match '
'the number of layers provided by `nb_layers`.')
final_nb_layer = nb_layers[-1]
nb_layers = nb_layers[:-1]
else:
if nb_layers_per_block == -1:
assert (depth - 4) % 3 == 0, ('Depth must be 3 N + 4 '
'if nb_layers_per_block == -1')
count = int((depth - 4) / 3)
if bottleneck:
count = count // 2
nb_layers = [count for _ in range(nb_dense_block)]
final_nb_layer = count
else:
final_nb_layer = nb_layers_per_block
nb_layers = [nb_layers_per_block] * nb_dense_block
# compute initial nb_filter if -1, else accept users initial nb_filter
if nb_filter <= 0:
nb_filter = 2 * growth_rate
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
if subsample_initial_block:
initial_kernel = (7, 7, 7)
initial_strides = (2, 2, 2)
else:
initial_kernel = (3, 3, 3)
initial_strides = (1, 1, 1)
x = Conv3D(nb_filter, initial_kernel, kernel_initializer='he_normal',
padding='same', name='initial_Conv3D', strides=initial_strides,
use_bias=False, kernel_regularizer=l2(weight_decay))(img_input)
if subsample_initial_block:
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5,
name='initial_bn')(x)
x = Activation('relu')(x)
x = MaxPooling3D((3, 3, 3), strides=(2, 2, 2), padding='same')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter,
growth_rate, bottleneck=bottleneck,
dropout_rate=dropout_rate,
weight_decay=weight_decay,
block_prefix='dense_%i' % block_idx)
# add transition_block
x = __transition_block(x, nb_filter, compression=compression,
weight_decay=weight_decay,
block_prefix='tr_%i' % block_idx,
transition_pooling=transition_pooling)
nb_filter = int(nb_filter * compression)
# The last dense_block does not have a transition_block
x, nb_filter = __dense_block(x, final_nb_layer, nb_filter, growth_rate,
bottleneck=bottleneck, dropout_rate=dropout_rate,
weight_decay=weight_decay,
block_prefix='dense_%i' % (nb_dense_block - 1))
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5, name='final_bn')(x)
x = Activation('relu')(x)
if include_top:
if pooling == 'avg':
x = GlobalAveragePooling3D()(x)
elif pooling == 'max':
x = GlobalMaxPooling3D()(x)
x = Dense(nb_classes, activation=activation)(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling3D()(x)
elif pooling == 'max':
x = GlobalMaxPooling3D()(x)
return x
def __create_fcn_dense_net(nb_classes, img_input, include_top, nb_dense_block=5,
growth_rate=12, reduction=0.0, dropout_rate=None,
weight_decay=1e-4, nb_layers_per_block=4,
nb_upsampling_conv=128, upsampling_type='deconv',
init_conv_filters=48, input_shape=None, activation='softmax',
early_transition=False, transition_pooling='max',
initial_kernel_size=(3, 3, 3)):
''' Build the DenseNet-FCN model
# Arguments
nb_classes: number of classes
img_input: tuple of shape (channels, rows, columns, height) or (rows, columns, height, channels)
include_top: flag to include the final Dense layer
nb_dense_block: number of dense blocks to add to end (generally = 3)
growth_rate: number of filters to add per dense block
reduction: reduction factor of transition blocks. Note : reduction value
is inverted to compute compression
dropout_rate: dropout rate
weight_decay: weight decay
nb_layers_per_block: number of layers in each dense block.
Can be a positive integer or a list.
If positive integer, a set number of layers per dense block.
If list, nb_layer is used as provided. Note that list size must
be (nb_dense_block + 1)
nb_upsampling_conv: number of convolutional layers in upsampling via subpixel
convolution
upsampling_type: Can be one of 'upsampling', 'deconv' and 'subpixel'. Defines
type of upsampling algorithm used.
input_shape: Only used for shape inference in fully convolutional networks.
activation: Type of activation at the top layer. Can be one of 'softmax' or
'sigmoid'. Note that if sigmoid is used, classes must be 1.
early_transition: Start with an extra initial transition down and end with an
extra transition up to reduce the network size.
transition_pooling: 'max' for max pooling (default), 'avg' for average pooling,
None for no pooling. Please note that this default differs from the DenseNet
paper in accordance with the DenseNetFCN paper.
initial_kernel_size: The first Conv3D kernel might vary in size based on the
application, this parameter makes it configurable.
# Returns
a keras tensor
# Raises
ValueError: in case of invalid argument for `reduction`,
`nb_dense_block` or `nb_upsampling_conv`.
'''
with K.name_scope('DenseNetFCN'):
concat_axis = 1 if K.image_data_format() == 'channels_first' else -1
if concat_axis == 1: # channels_first dim ordering
_, rows, cols, height = input_shape
else:
rows, cols, height, _ = input_shape
if reduction != 0.0:
if not (reduction <= 1.0 and reduction > 0.0):
raise ValueError('`reduction` value must lie between 0.0 and 1.0')
# check if upsampling_conv has minimum number of filters minimum
# is set to 12, as at least 3 color channels are needed for correct upsampling
if not (nb_upsampling_conv > 12 and nb_upsampling_conv % 4 == 0):
raise ValueError('Parameter `nb_upsampling_conv` number of channels must '
'be a positive number divisible by 4 and greater than 12')
# layers in each dense block
if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:
nb_layers = list(nb_layers_per_block) # Convert tuple to list
if len(nb_layers) != (nb_dense_block + 1):
raise ValueError('If `nb_dense_block` is a list, its length must be '
'(`nb_dense_block` + 1)')
bottleneck_nb_layers = nb_layers[-1]
rev_layers = nb_layers[::-1]
nb_layers.extend(rev_layers[1:])
else:
bottleneck_nb_layers = nb_layers_per_block
nb_layers = [nb_layers_per_block] * (2 * nb_dense_block + 1)
# compute compression factor
compression = 1.0 - reduction
# Initial convolution
x = Conv3D(init_conv_filters, initial_kernel_size,
kernel_initializer='he_normal', padding='same',
name='initial_Conv3D', use_bias=False,
kernel_regularizer=l2(weight_decay))(img_input)
x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5, name='initial_bn')(x)
x = Activation('relu')(x)
nb_filter = init_conv_filters
skip_list = []
if early_transition:
x = __transition_block(x, nb_filter, compression=compression,
weight_decay=weight_decay, block_prefix='tr_early',
transition_pooling=transition_pooling)
# Add dense blocks and transition down block
for block_idx in range(nb_dense_block):
x, nb_filter = __dense_block(x, nb_layers[block_idx], nb_filter,
growth_rate, dropout_rate=dropout_rate,
weight_decay=weight_decay,
block_prefix='dense_%i' % block_idx)
# Skip connection
skip_list.append(x)
# add transition_block
x = __transition_block(x, nb_filter, compression=compression,
weight_decay=weight_decay,
block_prefix='tr_%i' % block_idx,
transition_pooling=transition_pooling)
# this is calculated inside transition_down_block
nb_filter = int(nb_filter * compression)
# The last dense_block does not have a transition_down_block
# return the concatenated feature maps without the concatenation of the input
block_prefix = 'dense_%i' % nb_dense_block
_, nb_filter, concat_list = __dense_block(x, bottleneck_nb_layers, nb_filter,
growth_rate,
dropout_rate=dropout_rate,
weight_decay=weight_decay,
return_concat_list=True,
block_prefix=block_prefix)
skip_list = skip_list[::-1] # reverse the skip list
# Add dense blocks and transition up block
for block_idx in range(nb_dense_block):
n_filters_keep = growth_rate * nb_layers[nb_dense_block + block_idx]
# upsampling block must upsample only the feature maps (concat_list[1:]),
# not the concatenation of the input with the feature maps (concat_list[0].
l = concatenate(concat_list[1:], axis=concat_axis)
t = __transition_up_block(l, nb_filters=n_filters_keep,
type=upsampling_type, weight_decay=weight_decay,
block_prefix='tr_up_%i' % block_idx)
# concatenate the skip connection with the transition block
x = concatenate([t, skip_list[block_idx]], axis=concat_axis)
# Dont allow the feature map size to grow in upsampling dense blocks
block_layer_index = nb_dense_block + 1 + block_idx
block_prefix = 'dense_%i' % (block_layer_index)
x_up, nb_filter, concat_list = __dense_block(x,
nb_layers[block_layer_index],
nb_filter=growth_rate,
growth_rate=growth_rate,
dropout_rate=dropout_rate,
weight_decay=weight_decay,
return_concat_list=True,
grow_nb_filters=False,
block_prefix=block_prefix)
if early_transition:
x_up = __transition_up_block(x_up, nb_filters=nb_filter,
type=upsampling_type,
weight_decay=weight_decay,
block_prefix='tr_up_early')
if include_top:
x = Conv3D(nb_classes, (1, 1, 1), activation='linear', padding='same',
use_bias=False)(x_up)
if K.image_data_format() == 'channels_first':
channel, row, col, height = input_shape
else:
row, col, height, channel = input_shape
x = Reshape((row * col * height, nb_classes))(x)
x = Activation(activation)(x)
x = Reshape((row, col, height, nb_classes))(x)
else:
x = x_up
return x
# + [markdown] id="DcnH_jSYj1XK" colab_type="text"
# Exemple
#
#
#
# ```
# # model = DenseNet3DImageNet121((224,224,224,1), classes=10, pooling='avg')
# ```
#
#
# + id="jVDghrCQ53Fu" colab_type="code" colab={}
| CNNs 3D/densenet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
spark = SparkSession.builder \
.master("local") \
.appName("ImageClassification") \
.config("spark.executor.memory", "6gb") \
.getOrCreate()
import pyspark.sql.functions as f
import sparkdl as dl
dfMessi = dl.readImages('football/messi/').withColumn('label', f.lit(0))
dfRonaldo = dl.readImages('football/ronaldo/').withColumn('label', f.lit(1))
dfMessi.show(n=10,truncate=False)
dfRonaldo.show(n=10,truncate=False)
trainDFmessi, testDFmessi = dfMessi.randomSplit([66.7, 33.3], seed =12)
trainDFronaldo, testDFronaldo = dfRonaldo.randomSplit([66.7, 33.3], seed=12)
print('The number of images in trainDFmessi is {}'.format(trainDFmessi.toPandas().shape[0]))
print('The number of images in testDFmessi is {}'.format(testDFmessi.toPandas().shape[0]))
print('The number of images in trainDFronaldo is {}'.format(trainDFronaldo.toPandas().shape[0]))
print('The number of images in testDFronaldo is {}'.format(testDFronaldo.toPandas().shape[0]))
trainDF = trainDFmessi.unionAll(trainDFronaldo)
testDF = testDFmessi.unionAll(testDFronaldo)
print('The number of images in the training data is {}' .format(trainDF.toPandas().shape[0]))
print('The number of images in the testing data is {}' .format(testDF.toPandas().shape[0]))
# +
from pyspark.ml.classification import LogisticRegression
from pyspark.ml import Pipeline
vectorizer = dl.DeepImageFeaturizer(inputCol="image", outputCol="features", modelName='InceptionV3')
logreg = LogisticRegression(maxIter=30,labelCol = "label", featuresCol="features")
pipeline = Pipeline(stages=[vectorizer, logreg])
pipeline_model = pipeline.fit(trainDF)
# -
predictDF = pipeline_model.transform(testDF)
predictDF.select('label', 'prediction').show(n = testDF.toPandas().shape[0], truncate=False)
predictDF.crosstab('prediction', 'label').show()
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
scoring = predictDF.select("prediction", "label")
accuracy_score = MulticlassClassificationEvaluator(metricName="accuracy")
rate = accuracy_score.evaluate(scoring)*100
print("accuracy: {}%" .format(round(rate,2)))
# +
from pyspark.ml.evaluation import BinaryClassificationEvaluator
binaryevaluator = BinaryClassificationEvaluator(rawPredictionCol="prediction")
binary_rate = binaryevaluator.evaluate(predictDF)*100
print("accuracy: {}%" .format(round(binary_rate,2)))
# +
logregFT = LogisticRegression(
regParam=0.05,
elasticNetParam=0.3,
maxIter=15,labelCol = "label", featuresCol="features")
pipelineFT = Pipeline(stages=[vectorizer, logregFT])
pipeline_model_FT = pipelineFT.fit(trainDF)
# -
predictDF_FT = pipeline_model_FT.transform(testDF)
predictDF_FT.crosstab('prediction', 'label').show()
binary_rate_FT = binaryevaluator.evaluate(predictDF_FT)*100
print("accuracy: {}%" .format(round(binary_rate_FT,2)))
| Chapter13/code/Image+Classification+with+TensorFlow+on+Spark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: allen
# language: python
# name: allen
# ---
# ## DiMSUM Confidence Scores
#
# - Modified dimsum_to_jsonl.py to include label field by composing subtag fields into one string
# - Read dimsum using dataset_reader
# - Convert all.csv to all_newlabels.csv
# Notebook starts in notebooks folder. Change working directory back to streusle-tagger
# %cd ../../../
# +
# System imports
import json
import math
import os
import pickle
import sys
from copy import deepcopy
# Add parent of streusle-tagger to path (streusle should be in this folder)
sys.path.append("../streusle")
# External imports
import allennlp.nn.util as util
import numpy as np
import pandas as pd
from allennlp.common import Params
from allennlp.common.util import import_submodules
from allennlp.data.dataset_readers import DatasetReader
from allennlp.training.util import datasets_from_params
import_submodules("streusle_tagger")
params = Params.from_file("training_config/streusle_bert_large_cased/streusle_bert_large_cased_no_constraints.jsonnet")
datasets = datasets_from_params(deepcopy(params))
dataset_reader_params = deepcopy(params).pop("dataset_reader")
dataset_reader = DatasetReader.from_params(dataset_reader_params)
# +
with open("calibration/consolidated_labels.pickle", "rb") as f:
new_labels = pickle.load(f)
dimsum_consolidated_path = "calibration/confidence_scores/dimsum_test/all_consolidated.csv"
# +
labels_df = pd.read_csv("calibration/labels_dict.csv")
dimsum_test_path = "data/dimsum16/dimsum16_test_updated_labeled_reformatted.json"
def read(file_path):
with open(file_path, 'r') as tagging_file:
tagging_data = json.load(tagging_file)
for i, x in enumerate(tagging_data):
if i % 200 == 0:
print(i)
tokens = [_ for _ in x["tokens"]]
# Get their associated upos
upos_tags = [_ for _ in x["upos_tags"]]
# Get their associated lemma
lemmas = [_ for _ in x["lemmas"]]
labels = [_ for _ in x["label"]]
yield dataset_reader.text_to_instance(tokens, upos_tags, lemmas, labels)
dimsum_test = list(read(dimsum_test_path))
# -
ground = []
for i in dimsum_test:
ground.extend(i.get("tags").labels)
# +
corrected_ground = []
# If there's a noun or verb supersense label, the lexcat has to be NOUN or VERB (exceptions are usually due to MWEs)
for g in ground:
if "-n." in g and "NOUN" not in g:
x = g[:g.index("-", 1) + 1] + "NOUN" + g[g.index("-", 2):]
elif "-v." in g and "VERB" not in g:
x = g[:g.index("-", 1) + 1] + "VERB" + g[g.index("-", 2):]
else:
x = g
corrected_ground.append(x)
# +
# Merge the CSVs for all sentences and save the merged version
confidence_scores_path = "calibration/confidence_scores/dimsum_test"
dfs = []
for filename in os.listdir(confidence_scores_path):
if filename.startswith("0") and filename.endswith(".csv"):
df = pd.read_csv(f"{confidence_scores_path}/{filename}")
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
df.to_csv(f"{confidence_scores_path}/all.csv", index=False)
# -
dimsum_df = pd.read_csv(f"{confidence_scores_path}/all.csv")
dimsum_df.head()
# Currently running confidence scores, so only use corrected ground up to what has been calculated so far.
dimsum_df["Ground"] = corrected_ground[0:len(dimsum_df)]
# +
new_df_columns = ["Token Index", "Tokens", "Predicted Tag", "Predicted Index", "Ground", "Ground Index"] + list(new_labels.keys())
new_df_columns
new_df = pd.DataFrame(columns=new_df_columns)
new_df["Token Index"] = dimsum_df["Unnamed: 0"]
new_df["Tokens"] = dimsum_df["Tokens"]
old_index_to_new_label = {}
for k, v in new_labels.items():
for num in v:
old_index_to_new_label[num] = k
new_labels_list = list(new_labels)
new_label_to_new_index = dict(zip(new_labels_list, list(range(len(new_labels_list)))))
# -
# Get predicted labels and indexes using consolidated labelset
predicted_labels = []
predicted_label_indexes = []
for index in dimsum_df["Predicted Tag Indexes"]:
new_label = old_index_to_new_label[index]
predicted_labels.append(new_label)
new_index = new_label_to_new_index[new_label]
predicted_label_indexes.append(new_index)
# +
labels_map = {"CCONJ": "CONJ",
"DISC": "X",
"INF": "PART",
"INF.P" : "PART",
"N": "NOUN",
"P": "ADP",
"POSS": "PART",
"PP": "ADP",
"PRON.POSS": "PRON",
"V":
"VERB",
"V.IAV":"VERB",
"V.LVC.cause": "VERB",
"V.LVC.full": "VERB",
"V.VID": "VERB",
"V.VPC.full": "VERB",
"V.VPC.semi": "VERB",
"_": "X"}
ground_indexes = []
for i, label in enumerate(dimsum_df["Ground"]):
label = label.replace("natural_object", "naturalobject")
label = label.replace("PROPN", "NOUN")
# For dealing with manually annotated cases where "lexcat=" note is provided
if "lexcat=" in label:
new_lexcat = label[label.index("=") + 1:]
mapped_new_lexcat = labels_map[new_lexcat] if new_lexcat in labels_map else new_lexcat
new_label = label[:label.index("-") + 1] + mapped_new_lexcat
ground_indexes.append(new_label_to_new_index[new_label])
continue
try:
if label.startswith("I"):
ground_indexes.append(new_label_to_new_index["I-X"])
elif label.startswith("i"):
ground_indexes.append(new_label_to_new_index["i-X"])
else:
ground_indexes.append(new_label_to_new_index[label])
except:
print(i, "\t", label)
ground_indexes.append("-1")
# -
new_df["Predicted Tag"] = predicted_labels
new_df["Predicted Index"] = predicted_label_indexes
new_df["Ground"] = dimsum_df["Ground"]
new_df["Ground Index"] = ground_indexes
new_df.head()
def sum_scores(new_label, row_index):
score_sum = 0
for i in new_labels[new_label]:
score_sum += dimsum_df[str(i)][row_index]
return score_sum
# +
score_column_names = list(new_df.columns)[6:]
score_columns = {}
for c in score_column_names:
score_columns[c] = []
for i, row in new_df.iterrows():
for c in score_column_names:
score_columns[c].append(sum_scores(c, i))
for c in score_column_names:
new_df[c] = score_columns[c]
# -
new_df.head()
new_df.to_csv(dimsum_consolidated_path, index=False)
| calibration/notebooks/dimsum/003_converting_dimsum_confidence_scores.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Diva-python3.6
# language: python
# name: diva-python3.6
# ---
# Plot the glider tracks on a map with the SST as overlay.
import alborexdata
import os
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
import alborexpaths
from alborexpaths import coordinates1, coordinates2
from importlib import reload
import warnings
import matplotlib.cbook
warnings.filterwarnings("ignore",category=matplotlib.cbook.mplDeprecation)
logger = alborexdata.configure_logging("./logs/alborexFigure4.log")
# ## Load data
# ### Coastline
loncoast, latcoast = alborexdata.read_lonlat_coast(alborexpaths.coastfile)
# ### Gliders
gliders = [alborexdata.Glider(), alborexdata.Glider()]
gliders[0].get_from_netcdf(alborexpaths.gliderfiles[0])
gliders[1].get_from_netcdf(alborexpaths.gliderfiles[1])
# ### SST
# +
for sstfile, sstremote in zip(alborexpaths.sstfiles, alborexpaths.sstremotefiles):
if not(os.path.exists(sstfile)):
logger.info("Downloading data file {}".format(os.path.basename(sstfile)))
urllib.request.urlretrieve(sstremote, sstfile)
else:
logger.info("SST file {} already downloaded".format(os.path.basename(sstfile)))
# Read data from file
sst = alborexdata.SST()
sst.read_from_oceancolorL2(alborexpaths.sstfiles[1])
sst.apply_qc()
# -
# ### Front position
f = alborexdata.Front()
f.get_from_file(alborexpaths.frontfile)
f.smooth()
# # Plot
# Create the projection
m = Basemap(projection='merc', llcrnrlon=coordinates2[0], llcrnrlat=coordinates2[2],
urcrnrlon=coordinates2[1], urcrnrlat=coordinates2[3],
lat_ts=0.5 * (coordinates2[2] + coordinates2[3]), resolution='h')
# ## Make the plot
# +
figname = "fig06"
fignamesst = "fig06_sst"
figtitle = "Glider tracks"
logger.info("Making figure {0}".format(figname))
fig = plt.figure(figsize=(10, 10))
ax = plt.subplot(111)
m.ax = ax
# Add the coastline
for i in range(0, len(loncoast)):
m.plot(np.array(loncoast[i]), np.array(latcoast[i]),
color='k', linewidth=.5, latlon=True)
# Grid
alborexdata.add_map_grid(m, coordinates1, dlon=.25, dlat=.25,
fontname='Times New Roman', fontsize=14,
linewidth=0.2, zorder=1, color=".6")
# Front position
m.plot(f.lon, f.lat, "--", color=".25", linewidth=15, latlon=True, alpha=.5)
xf, yf = m(-0.95, 37.05)
plt.text(xf, yf, "Front", fontsize=18, ha='left', va="center")
colorlist = ["k", ".75"]
for ii, glider in enumerate(gliders):
# Remove masked values
glider.remove_masked_coords()
# Get indices for the different days
day_indices, date_list = glider.get_day_indices(ndays=1)
# Add first point
if ii==0:
deploymenttext = "Coastal glider deployment"
m.plot(glider.lon, glider.lat, marker='o', color=colorlist[ii], latlon=True,
linestyle='-', ms=.1, zorder=3)
else:
deploymenttext = "Deep glider deployment"
m.plot(glider.lon, glider.lat, color=colorlist[ii], linewidth=2, latlon=True,
linestyle='--', ms=.1, zorder=3)
m.plot(glider.lon, glider.lat, color="k", latlon=True,
linestyle=':', ms=.1, zorder=3)
m.plot(glider.lon[0], glider.lat[0], latlon=True, linewidth=0,
marker='>', markersize=10, color=colorlist[ii], label=deploymenttext)
xp, yp = m(glider.lon[day_indices[:-1]], glider.lat[day_indices[:-1]])
cc = 0
for xx, yy, tt in zip(xp, yp, date_list[:-1]):
if np.mod(cc, 2) == 0:
haparam = "left"
vaparam = "bottom"
else:
haparam = "right"
vaparam = "top"
plt.text(xx + 1000 * (-1)** cc, yy + 1000 * (-1)** cc, tt.strftime("%Y-%m-%d"),
bbox=dict(facecolor='.95', alpha=0.85),
va=vaparam, ha=haparam, fontsize=16)
cc += 1
m.plot(glider.lon[day_indices[:-1]], glider.lat[day_indices[:-1]], marker='*',
linewidth=0, color=colorlist[ii], ms=10, latlon=True)
plt.legend(loc=3, fontsize=18, framealpha=1)
plt.title(figtitle, fontsize=20)
plt.savefig(os.path.join(alborexpaths.figdir, figname), dpi=300, bbox_inches='tight')
# Add SST as background
pcm = m.pcolormesh(sst.lon, sst.lat, sst.field, latlon=True,
vmin=18.5, vmax=20.5, cmap=plt.cm.gray_r)
cb = plt.colorbar(pcm, shrink=.8, extend="both", pad=.1)
cb.set_label("($^{\circ}$C)", rotation=0, ha="left")
plt.savefig(os.path.join(alborexpaths.figdir, fignamesst), dpi=300, bbox_inches='tight')
plt.show()
plt.close()
# -
| python/figure4_GliderTracks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:mesa]
# language: python
# name: conda-env-mesa-py
# ---
# # Demographic Prisoner's Dilemma
#
# The Demographic Prisoner's Dilemma is a family of variants on the classic two-player [Prisoner's Dilemma](https://en.wikipedia.org/wiki/Prisoner's_dilemma), first developed by [<NAME>](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.8.8629&rep=rep1&type=pdf). The model consists of agents, each with a strategy of either Cooperate or Defect. Each agent's payoff is based on its strategy and the strategies of its spatial neighbors. After each step of the model, the agents adopt the strategy of their neighbor with the highest total score.
#
# The specific variant presented here is adapted from the [Evolutionary Prisoner's Dilemma](http://ccl.northwestern.edu/netlogo/models/PDBasicEvolutionary) model included with NetLogo. Its payoff table is a slight variant of the traditional PD payoff table:
#
# <table>
# <tr><td></td><td>**Cooperate**</td><td>**Defect**</td></tr>
# <tr><td>**Cooperate**</td><td>1, 1</td><td>0, *D*</td></tr>
# <tr><td>**Defect**</td><td>*D*, 0</td><td>0, 0</td></tr>
# </table>
#
# Where *D* is the defection bonus, generally set higher than 1. In these runs, the defection bonus is set to $D=1.6$.
#
# The Demographic Prisoner's Dilemma demonstrates how simple rules can lead to the emergence of widespread cooperation, despite the Defection strategy dominiating each individual interaction game. However, it is also interesting for another reason: it is known to be sensitive to the activation regime employed in it.
#
# Below, we demonstrate this by instantiating the same model (with the same random seed) three times, with three different activation regimes:
#
# * Sequential activation, where agents are activated in the order they were added to the model;
# * Random activation, where they are activated in random order every step;
# * Simultaneous activation, simulating them all being activated simultaneously.
#
#
# +
from pd_grid.model import PdGrid
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec
# %matplotlib inline
# -
# ## Helper functions
# +
bwr = plt.get_cmap("bwr")
def draw_grid(model, ax=None):
"""
Draw the current state of the grid, with Defecting agents in red
and Cooperating agents in blue.
"""
if not ax:
fig, ax = plt.subplots(figsize=(6, 6))
grid = np.zeros((model.grid.width, model.grid.height))
for agent, x, y in model.grid.coord_iter():
if agent.move == "D":
grid[y][x] = 1
else:
grid[y][x] = 0
ax.pcolormesh(grid, cmap=bwr, vmin=0, vmax=1)
ax.axis("off")
ax.set_title("Steps: {}".format(model.schedule.steps))
# -
def run_model(model):
"""
Run an experiment with a given model, and plot the results.
"""
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(231)
ax2 = fig.add_subplot(232)
ax3 = fig.add_subplot(233)
ax4 = fig.add_subplot(212)
draw_grid(model, ax1)
model.run(10)
draw_grid(model, ax2)
model.run(10)
draw_grid(model, ax3)
model.datacollector.get_model_vars_dataframe().plot(ax=ax4)
# Set the random seed
seed = 21
# ## Sequential Activation
m = PdGrid(50, 50, "Sequential", seed=seed)
run_model(m)
# ## Random Activation
m = PdGrid(50, 50, "Random", seed=seed)
run_model(m)
# ## Simultaneous Activation
m = PdGrid(50, 50, "Simultaneous", seed=seed)
run_model(m)
| examples/pd_grid/analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy
import matplotlib.pyplot
import scipy.special
# %matplotlib inline
# # 뉴럴네트워크
class NeuralNetwork:
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
self.lr = learningrate
self.wih = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.onodes, -0.5), (self.onodes, self.hnodes))
self.activation_function = lambda x: scipy.special.expit(x)
#print(self.wih)
#print(self.who)
pass
def train(self, inputs_list, targets_list):
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
#print('inputs_list.shape:', inputs_list.shape, ', inputs.shape:', inputs.shape)
#print('targets_list.shape:', targets_list.shape, ', targets.shape:', targets.shape)
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
#print('hidden_inputs.shape:', hidden_inputs.shape, ', hidden_outputs.shape:', hidden_outputs.shape)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
#print('final_inputs.shape:', final_inputs.shape, ', final_outputs.shape:', final_outputs.shape)
output_errors = targets - final_outputs
hidden_errors = numpy.dot(self.who.T, output_errors)
#print('output_errors.shape:', output_errors.shape, ', hidden_errors.shape:', hidden_errors.shape)
self.who += self.lr * numpy.dot((output_errors*final_outputs *
(1.0 - final_outputs)), numpy.transpose(hidden_outputs))
self.wih += self.lr * numpy.dot((hidden_errors*hidden_outputs *
(1.0 - hidden_outputs)), numpy.transpose(inputs))
#print('self.who.shape:', self.who.shape, ', self.wih.shape:', self.wih.shape)
#print(output_errors)
#print(final_outputs)
#print(output_errors * final_outputs)
def query(self, inputs_list):
inputs = numpy.array(inputs_list, ndmin=2).T
#print(inputs_list)
#print(inputs)
hidden_inputs = numpy.dot(self.wih, inputs)
hidden_outputs = self.activation_function(hidden_inputs)
final_inputs = numpy.dot(self.who, hidden_outputs)
final_outputs = self.activation_function(final_inputs)
return final_outputs
# # 초기값 설정, 객체 생성, 데이터 로드
# +
input_nodes = 784
hidden_nodes = 500
output_nodes = 10
learning_rate = 0.1
n = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)
data_file = open("mnist_dataset/mnist_train.csv", 'r')
#data_file = open("mnist_dataset/mnist_train_100.csv", 'r')
data_list = data_file.readlines()
data_file.close()
test_data_file = open("mnist_dataset/mnist_test.csv", 'r')
#test_data_file = open("mnist_dataset/mnist_test_10.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# -
# # 정상 동작 여부 테스트
all_values = data_list[0].split(',')
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
print(inputs.shape)
targets = numpy.zeros(output_nodes) + 0.01
print(targets.shape)
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
print(all_values[0])
matplotlib.pyplot.imshow(inputs.reshape(28,28), cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
# # 10회 학습 및 테스트
for idx in range(10):
for record in data_list:
all_values = record.split(',')
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
targets = numpy.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
score_card = []
for record in test_data_list:
all_values =record.split(',')
result = n.query((numpy.asfarray(all_values[1:])/255.0 * 0.99)+0.01)
if int(all_values[0]) == numpy.argmax(result):
score_card.append(1)
else:
score_card.append(0)
score_card_array = numpy.asarray(score_card)
print( "performance[", idx, "]=", score_card_array.sum() / score_card_array.size)
# # 10회 학습 및 테스트
for idx in range(10):
for record in data_list:
all_values = record.split(',')
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
targets = numpy.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
score_card = []
for record in test_data_list:
all_values =record.split(',')
result = n.query((numpy.asfarray(all_values[1:])/255.0 * 0.99)+0.01)
if int(all_values[0]) == numpy.argmax(result):
score_card.append(1)
else:
score_card.append(0)
score_card_array = numpy.asarray(score_card)
print( "performance[", idx, "]=", score_card_array.sum() / score_card_array.size)
# # 10회 학습 및 테스트
for idx in range(10):
for record in data_list:
all_values = record.split(',')
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
targets = numpy.zeros(output_nodes) + 0.01
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
score_card = []
for record in test_data_list:
all_values =record.split(',')
result = n.query((numpy.asfarray(all_values[1:])/255.0 * 0.99)+0.01)
if int(all_values[0]) == numpy.argmax(result):
score_card.append(1)
else:
score_card.append(0)
score_card_array = numpy.asarray(score_card)
print( "performance[", idx, "]=", score_card_array.sum() / score_card_array.size)
# # 10건 데이터 로드 및 테스트
test_data_file = open("mnist_dataset/mnist_test_10.csv", 'r')
test_data_list10 = test_data_file.readlines()
test_data_file.close()
score_card = []
for idx in range(10):
all_values = test_data_list[idx].split(',')
image_array = numpy.asfarray(all_values[1:]).reshape((28,28))
matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
result = n.query((numpy.asfarray(all_values[1:])/255.0 * 0.99)+0.01)
print(all_values[0], ' ', numpy.argmax(result))
if int(all_values[0]) == numpy.argmax(result):
score_card.append(1)
else:
score_card.append(0)
#print(result)
print(score_card)
score_card_array = numpy.asarray(score_card)
print( "performance=", score_card_array.sum() / score_card_array.size)
score_card = []
for idx in range(10):
all_values = test_data_list[idx].split(',')
image_array = numpy.asfarray(all_values[1:]).reshape((28,28))
matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
result = n.query((numpy.asfarray(all_values[1:])/255.0 * 0.99)+0.01)
print(all_values[0], ' ', numpy.argmax(result))
if int(all_values[0]) == numpy.argmax(result):
score_card.append(1)
else:
score_card.append(0)
#print(result)
print(score_card)
score_card = []
for idx in range(10):
all_values = test_data_list[idx].split(',')
image_array = numpy.asfarray(all_values[1:]).reshape((28,28))
matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
result = n.query((numpy.asfarray(all_values[1:])/255.0 * 0.99)+0.01)
print(all_values[0], ' ', numpy.argmax(result))
if int(all_values[0]) == numpy.argmax(result):
score_card.append(1)
else:
score_card.append(0)
#print(result)
print(score_card)
# # 직접 작성한 이미지로 테스트
import imageio
score_card = []
for idx in range(10):
img_array = imageio.imread('mnist_dataset/' + str(idx) + '.png') # read a standard image
print(img_array.shape) # im is a numpy array
matplotlib.pyplot.imshow(img_array, cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
gray = lambda rgb : numpy.dot(rgb[... , :3] , [0.299 , 0.587, 0.114])
img_array = gray(img_array)
#print(img_array)
#matplotlib.pyplot.imshow(img_array, cmap='Greys', interpolation='None')
#matplotlib.pyplot.show()
#print(img_array.shape)
img_data = 255.0 - img_array.reshape(784)
#print(img_data)
matplotlib.pyplot.imshow(img_data.reshape(28,28), cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
img_data = img_data/250*0.9 + 0.01
#img_data = numpy.minimum(img_data, numpy.full(784, 0.99))
#img_data = img_data - 25
#img_data = numpy.minimum(img_data, numpy.full(784, 0.99))
#img_data = numpy.maximum(img_data, numpy.full(784, 0.01))
matplotlib.pyplot.imshow((img_data*255.0).reshape(28,28), cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
#print(img_data.shape)
#print(img_data)
result = n.query(img_data)
print(result)
print(numpy.argmax(result))
if idx == numpy.argmax(result):
score_card.append(1)
else:
score_card.append(0)
print(score_card)
print( "performance=", numpy.array(score_card).sum() / len(score_card))
score_card = []
for idx in range(10):
img_array = imageio.imread('mnist_dataset/' + str(idx) + '.png') # read a standard image
print(img_array.shape) # im is a numpy array
matplotlib.pyplot.imshow(img_array, cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
gray = lambda rgb : numpy.dot(rgb[... , :3] , [0.299 , 0.587, 0.114])
img_array = gray(img_array)
#print(img_array)
#matplotlib.pyplot.imshow(img_array, cmap='Greys', interpolation='None')
#matplotlib.pyplot.show()
#print(img_array.shape)
img_data = 255.0 - img_array.reshape(784)
#print(img_data)
matplotlib.pyplot.imshow(img_data.reshape(28,28), cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
#img_data = img_data/25*0.9 + 0.01
#img_data = numpy.minimum(img_data, numpy.full(784, 0.99))
img_data = img_data - 5
img_data = numpy.minimum(img_data, numpy.full(784, 0.99))
img_data = numpy.maximum(img_data, numpy.full(784, 0.01))
matplotlib.pyplot.imshow((img_data*255.0).reshape(28,28), cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
#print(img_data.shape)
#print(img_data)
result = n.query(img_data)
print(result)
print(numpy.argmax(result))
if idx == numpy.argmax(result):
score_card.append(1)
else:
score_card.append(0)
print(score_card)
print( "performance=", numpy.array(score_card).sum() / len(score_card))
score_card = []
for idx in range(10):
img_array = imageio.imread('mnist_dataset/' + str(idx) + '.png') # read a standard image
print(img_array.shape) # im is a numpy array
matplotlib.pyplot.imshow(img_array, cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
gray = lambda rgb : numpy.dot(rgb[... , :3] , [0.299 , 0.587, 0.114])
img_array = gray(img_array)
#print(img_array)
#matplotlib.pyplot.imshow(img_array, cmap='Greys', interpolation='None')
#matplotlib.pyplot.show()
#print(img_array.shape)
img_data = 255.0 - img_array.reshape(784)
#print(img_data)
matplotlib.pyplot.imshow(img_data.reshape(28,28), cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
img_data = img_data/30 + 0.01
img_data = numpy.minimum(img_data, numpy.full(784, 0.99))
#img_data = img_data - 5
#img_data = numpy.minimum(img_data, numpy.full(784, 0.99))
#img_data = numpy.maximum(img_data, numpy.full(784, 0.01))
matplotlib.pyplot.imshow((img_data*255.0).reshape(28,28), cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
#print(img_data.shape)
#print(img_data)
result = n.query(img_data)
print(result)
print(numpy.argmax(result))
if idx == numpy.argmax(result):
score_card.append(1)
else:
score_card.append(0)
print(score_card)
print( "performance=", numpy.array(score_card).sum() / len(score_card))
# # 기타 테스트 코드
all_values = data_list[0].split(',')
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 1
print(inputs.shape)
targets = numpy.zeros(output_nodes) + 0.01
print(targets.shape)
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
print(all_values[0])
matplotlib.pyplot.imshow(inputs.reshape(28,28), cmap='Greys', interpolation='None')
matplotlib.pyplot.show()
len(data_list)
data_list[0]
all_values = data_list[1].split(',')
image_array = numpy.asfarray(all_values[1:]).reshape((28,28))
print(all_values)
print(image_array)
matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation='None')
matplotlib.pyplot.imshow(image_array, cmap='Greys')
scaled_input = (numpy.asfarray(all_values[1:])/ 255.0 * 0.99) + 0.01
print(scaled_input)
onodes = 10
targets = numpy.zeros(onodes)+0.01
targets[int(all_values[0])] = 0.99
targets
n.train(scaled_input, targets)
a = numpy.asarray([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
print(a)
print(type(a))
print(a.shape)
print(a.ndim)
print(len(a))
print(a.size)
a = [1]
a
b = (1)
b
type(a)
c = (1,)
print(c)
print(len(c))
print(type(c))
| 2020-03-NeuralNet/2020-03-24-2-first-nerual-network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Physics 256
# ## Lecture 11 - Plot Types
# <img src="http://matplotlib.org/_static/logo2.png" width=400px>
import style
style._set_css_style('../include/bootstrap.css')
# ## Last Time
#
# ### [Notebook Link: 10_PlotCustomization.ipynb](./10_PlotCustomization.ipynb)
#
# - using LaTeX in plot labels
# - meshgrid for multi-dimensional data sets
# - plot customization
#
# ## Today
#
# - plot types
#
# ## Plotting with Matplotlib
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib notebook
plt.style.use('../include/notebook.mplstyle');
# %config InlineBackend.figure_format = 'svg'
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
# ## Plot Types
#
# Matplotlib includes almost any imaginable type of chart or graph. A good starting point to determine what is the best type of graph for your particular data is:
from IPython.display import Image
Image(filename='../data/choosing_a_good_chart.png')
# ### Histograms
# +
# get 10000 gaussian distributed random numbers
h = np.random.randn(10000)
# the number of bins for our histogram
numBins = 100
plt.hist(h,numBins,edgecolor='w', density=True, alpha=0.5)
# let's add the theoretical value
x = np.arange(-5,5,0.01)
y = np.exp(-x**2/2)/np.sqrt(2.0*np.pi)
plt.plot(x,y,'-', color=colors[0])
# we can save our figure to disk
#plt.savefig('output/histogram.pdf')
# -
# ### Errorbars
# errorbars
x = np.arange(0, 4, 0.2)
y = np.exp(-x)
dy = 0.1 * np.abs(np.random.randn(len(y)))
dx = 0.1 * np.abs(np.random.randn(len(y)))
plt.errorbar(x,y,xerr=dx,yerr=dy,marker='o',markersize=8.0,capsize=0.0,elinewidth=1.0)
# ### Bar charts
# +
bar = {'A': 40, 'B': 70, 'C': 30, 'D': 85}
bar = sorted(bar.items())
keys = []
vals = []
for i,cbar in enumerate(bar):
plt.bar(i, cbar[1], alpha=0.5,color=colors[i])
keys.append(cbar[0])
vals.append(cbar[1])
plt.xticks(np.arange(len(bar))+0.4,keys)
plt.yticks(vals)
# -
# ### Pie charts
# pie chart
plt.figure(4,figsize=(6,6));
aleast = {'Boston':84, 'New York':103, 'Tampa Bay':96, 'Toronto':67, 'Baltimore':54}
explode = [0.0, 0.0, 0.0, 0.0, 0.1]
plt.pie(list(aleast.values()), labels=list(aleast.keys()), explode=explode, autopct='%3.1f%%', colors=colors[:]);
# ### Scatter plots
#
# Useful and effecient for large amounts of data points
# scatter graph
x = np.random.randn(1000)
y = np.random.randn(1000)
size = 50*np.random.randn(1000)
col = np.random.rand(1000)
plt.scatter(x, y, s=size, c=col)
# ### Polar plots
# polar plot
theta = np.linspace(0, 2.0*np.pi, 100)
plt.polar(3*theta, theta/5);
plt.polar(theta, np.cos(4*theta));
# ### Annotating plots
# +
# Produce a linear plot with an intercept
iL = np.linspace(0, 0.1, 100)
yint = 0.15
y = yint + 2.0*iL
plt.plot(iL,y,marker='None', linestyle='-')
plt.axis([0,0.09,0,0.4])
plt.xlabel(r'$1/L$')
plt.ylabel(r'$f(L)$')
# Our first label in data coordinates
plt.text(0.002,0.25,r'$f(0) = 0.15$', fontsize=18)
# The label in figure coordinates
plt.figtext(0.2,0.2,'intercept = 0.15', fontsize=18, color=colors[1])
# Add a label and an arrow
plt.annotate(r'$L \to \infty$',xy=(0, 0.15), xytext=(0.02, 0.08),\
fontsize=30, color=colors[2],\
arrowprops=dict(color=colors[2],shrink=0.1))
# -
# <div class="span alert alert-success">
# <h2> Team Programming challenge </h2>
# <h3> Determine a graphical solution for $x(a)$ (with at least 6 points) to the transcendental equation: </h3>
# </div>
# \begin{equation}
# x = \tanh\left(\frac{x}{a} \right)
# \end{equation}
# +
def trans(x,a):
''' A transcendental equationb x = tanh(x/a). '''
# add equation
return 0
# choose a range of a values and x values
# you might need to explore a bit with these
amin,amax = 0,1
xmin,xmax,Δx = 0,1,0.01
a = np.linspace(amin,amax,6)
x = np.arange(xmin,xmax,Δx)
# plot the LHS
plt.plot('-')
# Now plot the RHS for different values of a
for i,ca in enumerate(a):
plt.plot('-', label=f'{ca:5.3f}')
plt.legend()
# -
# Visually identiry solutions using the notebook feature and add the values of x here
sol = []
# Plot the solution
plt.plot()
plt.xlabel(r'$a$')
plt.ylabel(r'$x$')
# %load solutions/transcendental.py
| 4-assets/BOOKS/Jupyter-Notebooks/Overflow/11_PlotTypes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
Define a function that takes an image, number of x and y points,
# camera matrix and distortion coefficients
def corners_unwarp(img, nx, ny, mtx, dist):
# Convert undistorted image to grayscale
gray = cv2.cvtColor(undist, cv2.COLOR_BGR2GRAY)
# Use the OpenCV undistort() function to remove distortion
undist = cv2.undistort(img, mtx, dist, None, mtx)
# Search for corners in the grayscaled image
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
# If we found corners, draw them! (just for fun)
cv2.drawChessboardCorners(undist, (nx, ny), corners, ret)
# Choose offset from image corners to plot detected corners
# This should be chosen to present the result at the proper aspect ratio
# My choice of 100 pixels is not exact, but close enough for our purpose here
offset = 100 # offset for dst points
# Grab the image shape
img_size = (gray.shape[1], gray.shape[0])
# For source points I'm grabbing the outer four detected corners
src = np.float32([corners[0], corners[nx-1], corners[-1], corners[-nx]])
# For destination points, I'm arbitrarily choosing some points to be
# a nice fit for displaying our warped result
# again, not exact, but close enough for our purposes
dst = np.float32([[offset, offset], [img_size[0]-offset, offset],
[img_size[0]-offset, img_size[1]-offset],
[offset, img_size[1]-offset]])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(undist, M, img_size)
# Return the resulting image and matrix
return warped, M
# +
Examples of Useful Code
You need to pass a single color channel to the cv2.Sobel() function, so first convert it to grayscale:
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
Note: Make sure you use the correct grayscale conversion depending on how you've read in your images. Use cv2.COLOR_RGB2GRAY if you've read in an image using mpimg.imread(). Use cv2.COLOR_BGR2GRAY if you've read in an image using cv2.imread().
Calculate the derivative in the xx direction (the 1, 0 at the end denotes xx direction):
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
Calculate the derivative in the yy direction (the 0, 1 at the end denotes yy direction):
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
Calculate the absolute value of the xx derivative:
Convert the absolute value image to 8-bit:
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
Note: It's not entirely necessary to convert to 8-bit (range from 0 to 255) but in practice, it can be useful in the event that you've written a function to apply a particular threshold, and you want it to work the same on input images of different scales, like jpg vs. png. You could just as well choose a different standard range of values, like 0 to 1 etc.
Create a binary threshold to select pixels based on gradient strength:
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
plt.imshow(sxbinary, cmap='gray')
Result
# -
# # Magnitude of the gradient
# +
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
# Read in an image
image = mpimg.imread('signs_vehicles_xygrad.png')
# Define a function that applies Sobel x and y,
# then computes the magnitude of the gradient
# and applies a threshold
def mag_thresh(img, sobel_kernel=3, mag_thresh=(0, 255)):
# Apply the following steps to img
# 1) Convert to grayscale
gray = cv2.cvtColor(im, cv2.COLOR_RGB2GRAY)
# 2) Take the gradient in x and y separately
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1)
# 3) Calculate the magnitude
magnitude = np.sqrt (np.sum ((np.sqr(sobelx), np.sqr(sobely))))
# 4) Scale to 8-bit (0 - 255) and convert to type = np.uint8
scaled_magnitude = np.uint8(255*magnitude/np.max(magnitude)
# 5) Create a binary mask where mag thresholds are met
sxbinary = np.zeros_like(scaled_magnitude)
sxbinary[(scaled_magnitude >= thresh_min) & (scaled_magnitude <= thresh_max)] = 1
# 6) Return this mask as your binary_output image
return binary_output
# Run the function
mag_binary = mag_thresh(image, sobel_kernel=3, mag_thresh=(30, 100))
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=50)
ax2.imshow(mag_binary, cmap='gray')
ax2.set_title('Thresholded Magnitude', fontsize=50)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
| scratchpad.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Reproducible Data Science with Renku
# + [markdown] slideshow={"slide_type": "subslide"}
# ## notebooks/03-renku/index.ipynb
#
# You should be at this location in the repository.
# + [markdown] slideshow={"slide_type": "slide"}
# If you need help, use a red post-it, raise your hand, or ask on Gitter: https://gitter.im/SwissDataScienceCenter/renku
# + [markdown] slideshow={"slide_type": "slide"}
# # Renku
#
# Renku is software for doing data science that is directly and conceptually reproducible.
#
# It has two parts:
#
# * renku CLI (think `git`)
# * Renkulab server (think `GitLab` or `GitHub`)
# + [markdown] slideshow={"slide_type": "notes"}
# Renku is a tool for reproducible data science that we are developing at the [Swiss Data Science Center](http://datascience.ch/). It's quite new (only about 1.5 years old as of Jul 2019) and very actively being developed, with many new features underway.
#
# Renku is made up of two parts: the renku command-line interface, and the Renkulab server. The distinction is similar to git vs. GitLab. `git` is a set of command-line tools for using version control on a project. GitLab is a server application for managing multiple projects and giving others access to them.
#
# Similarly, `renku` is a set of command-line tools for working reproducibly; Renkulab is a server for sharing and collaborating on projects, which includes a zero-install environment for running code, including, but not limited to notebooks.
#
# Just as with GitHub and git, projects can be started on the server (e.g., [renkulab.io](https://renkulab.io)), or locally, on your laptop or desktop computer. And it is easy to transition a project from one location to the other.
#
# In this tutorial, we will start are project on our laptops, and, in the end, move them to Renkulab where we can share and collaborate with others.
# + [markdown] slideshow={"slide_type": "slide"}
# <img alt="renku knowledge graph" src="tutorial-images/evap_adelaide-reduced.svg" width="600"/>
#
# ([Evaluation of the Vegetation Optimality Model along the North-Australian Tropical Transect using a fully Open Science approach by Nijzink, Schymanski, et. al.](https://doi.org/10.5281/zenodo.3274346
# ))
# + [markdown] slideshow={"slide_type": "notes"}
# Here is an example of some work that was done using renku. By using renku, it is possible to work reproducibly, documenting the process as a side effect.
# + [markdown] slideshow={"slide_type": "skip"}
# # Renku's building blocks
#
# <table class="table table-condensed" style="font-size: 16px; margin: 10px;">
# <thead>
# <tr>
# <th>Tool</th>
# <th>Environment</th>
# <th>Code</th>
# <th>Data</th>
# <th>Workflow</th>
# <th>Provenance</th>
# </tr>
# </thead>
# <tbody>
# <tr style="font-size:24px;">
# <th><a href="https://renkulab.io">Renku</a></th>
# <td>Docker</td>
# <td>git</td>
# <td>git-lfs</td>
# <td>CWL</td>
# <td>PROV-O/RDF</td>
# </tr>
# </tbody>
# </table>
#
#
# + [markdown] slideshow={"slide_type": "notes"}
# Renku combines many tools that you may be familiar with and packages them in a unified way. Renku is a sort of "syntatic sugar" for the building blocks: users are allowed to peek under the covers and work directly with git, e.g., if that is convenient.
# + [markdown] slideshow={"slide_type": "slide"}
# # Task
#
# Working with data from [US Dept. of Transportation, Bureau of Transportation Statistics](https://www.transtats.bts.gov), we will answer the following question:
#
# - How many flights were there to Austin, TX in Jan 2019
# + [markdown] slideshow={"slide_type": "notes"}
# The tools we will used for the task are a bit oversized for such a simple question. But it will give us an opportunity to look at reproducibility in an understandable and managable context.
# + [markdown] slideshow={"slide_type": "slide"}
# # Approach
# + [markdown] slideshow={"slide_type": "notes"}
# In the hands-on, we will be doing our data science using Jupyter Notebooks. Notebooks have their [detractors](https://docs.google.com/presentation/d/1n2RlMdmv1p25Xy5thJUhkKGvjtV-dkAIsUXP-AL4ffI/edit#slide=id.g362da58057_0_1), and they make good points, but their popularity is also undeniable.
#
# Renku does not specifically target notebooks — it can work with any kind of program — but it is possible to use renku in combination with notebooks.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Ten Simple Rules
#
# [Ten Simple Rules for Reproducible Research in Jupyter Notebooks](https://arxiv.org/abs/1810.08055)
#
# <img src="./tutorial-images/ten-simple-rules-fig-1.png" alt="Ten Simple Rules Fig. 1" width="900px" />
# + [markdown] slideshow={"slide_type": "notes"}
# [Ten Simple Rules for Reproducible Research in Jupyter Notebooks](https://arxiv.org/abs/1810.08055) provides a good set of best practices for working with notebooks. We adapt their suggestions to leverage the extra support provided by Renku.
#
# Their advice is essentially the same as what we have been discussing, but they provide some tips for handling problems specific to notebooks.
#
# Two of these problems are: 1. cells can be executed in any order; 2. it is difficult to provide parameters to notebooks. 1. complicates reproducibility, 2. makes reuse hard.
#
# The authors suggest using [Papermill](https://papermill.readthedocs.io/en/latest/), which solves both of these problems. Using papermill, it is possible to parameterize notebooks, and it is possible to execute them in a reproducible way.
# + [markdown] slideshow={"slide_type": "skip"}
# ## Hats
#
# * "Renku" Hat
# * "Pandas" Hat
# + [markdown] slideshow={"slide_type": "notes"}
# As we work through the tutorial, we will be alternating between two different hats: our "pandas" hat and our "renku" hat. When we have our pandas hat on, we will be working within the widely-known pandas eco-system. In terms of data science, the real work happens here. But, we are not going to dedicate much of our attention to this part, and it is possible to work through the tutorial with little to no pandas knowledge.
# + [markdown] slideshow={"slide_type": "slide"}
# # Cast of Characters
#
# <table class="table table-striped" style="font-size: 18px; margin: 10px;">
# <tbody>
# <tr>
# <th width="20%"><code>!</code></th>
# <td>IPython syntax for executing a shell command</td>
# </tr>
# <tr>
# <th width="20%"><code>cp</code></th>
# <td>In practice, we would be writing the code, notebooks, and other files we work with. But, in this tutorial, we are going to write them by copying a pre-written version.</td>
# </tr>
# <tr>
# <th width="20%"><code>git status;</code><br>
# <code>git add;</code><br>
# <code>git commit</code>
# </th>
# <td>As we work, we will be committing to git to keep track of changes we make and the reasons for making them.</td>
# </tr>
# <tr>
# <th width="20%"><a href="https://papermill.readthedocs.io/en/latest/">papermill</a></th>
# <td>Tool for parameterizing and running notebooks in a reproducible way. It takes a notebook and its parameters as input, and produces a new notebook as output. We will use it together with <code>renku run</code></td>
# </tr>
# <tr>
# <th width="10%"><code>renku</code></th>
# <td>Tools for reproducible data science.</td>
# </tr>
# </tbody>
# </table>
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Hands-on with Renku (1h 30m)
#
# <table class="table table-striped" style="font-size: 18px; margin: 10px;">
# <tbody>
# <tr>
# <th width="10%">30 min</th>
# <td width="10%"><a href="01-GettingStarted.ipynb">Starting</a></td>
# <td style="text-align: left">Starting a project, importing data</td>
# </tr>
# <tr>
# <th width="10%">30 min</th>
# <td width="10%"><a href="02-1-BuildPipeline.ipynb">Pipeline</a></td>
# <td style="text-align: left">Build a pipeline that performs an analysis</td>
# </tr>
# <tr>
# <th width="10%">30 min</th>
# <td width="10%"><a href="03-Sharing.ipynb">Sharing</td>
# <td style="text-align: left">Sharing results and collaborating using <a href="https://renkulab.io">renkulab.io</a>.</td>
# </tr>
# </tbody>
# </table>
| notebooks/03-renku/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] graffitiCellId="id_m7du2yn"
#
# *Let us solve a problem that we solved previously in the last lesson without using recursion. It will make you realize that recursion can make our code look simpler.*
#
# ### Problem Statement
# You are given a non-negative number in the form of list elements. For example, the number `123` would be provided as `arr = [1, 2, 3]`. Add one to the number and return the output in the form of a new list.
#
# **Example 1:**
# * `input = [1, 2, 3]`
# * `output = [1, 2, 4]`
#
# **Example 2:**
# * `input = [1, 2, 9]`
# * `output = [1, 3, 0]`
#
# **Example 3:**
# * `input = [9, 9, 9]`
# * `output = [1, 0, 0, 0]`
#
# ### Exercise - Write the RECURSIVE function definition here
# + graffitiCellId="id_c3itdzc"
def add_one(arr):
"""
:param: arr - list of digits representing some number x
return a list with digits represengint (x + 1)
"""
pass
# + [markdown] graffitiCellId="id_isswxel"
# <span class="graffiti-highlight graffiti-id_isswxel-id_r39lv1k"><i></i><button>Show Solution</button></span>
# + [markdown] graffitiCellId="id_okc7e08"
# ### Test - Let's test your function
# + graffitiCellId="id_ejl0gzn"
# A helper function for Test Cases
def test_function(test_case):
arr = test_case[0]
solution = test_case[1]
output = add_one(arr)
for index, element in enumerate(output):
if element != solution[index]:
print("Fail")
return
print("Pass")
# + graffitiCellId="id_r171xpy"
# Test Case 1
arr = [0]
solution = [1]
test_case = [arr, solution]
test_function(test_case)
# + graffitiCellId="id_hxtikz2"
# Test Case 2
arr = [1, 2, 3]
solution = [1, 2, 4]
test_case = [arr, solution]
test_function(test_case)
# + graffitiCellId="id_ov2b5hw"
# Test Case 3
arr = [9, 9, 9]
solution = [1, 0, 0, 0]
test_case = [arr, solution]
test_function(test_case)
| Data Structures/Recursion/Add-One-Again.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <pre>
# Author: <a href="https://www.doclrogers.com"><NAME></a>
# License: MIT
# Description: Training a handwritten digit classifier for use in a mobile application.
# </pre>
# # Setup
# ## Imports
# +
# Imports
import os
import cv2
print('Open CV2 version', cv2.__version__)
import matplotlib
import matplotlib.pyplot as plt
print('Matplotlib version', matplotlib.__version__)
import numpy as np
print('NumPy version', np.__version__)
import tensorflow as tf
from tensorflow.contrib import lite
print('TensorFlow version', tf.__version__)
# Plotting inline
# %matplotlib inline
# -
# Data set
MNIST = tf.keras.datasets.mnist
# Set random seed for consistency
np.random.seed(64)
# ## Read in Data
# +
(x_train, y_train),(x_test, y_test) = MNIST.load_data()
class_names = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print('Image size:', x_train[0,].shape)
# -
print('Sample raw MNIST image:')
plt.imshow(x_train[4,])
plt.show()
# NOTE: Image is white writing on black background
# ## Pre-process Data
#
# * Scale pixels from \[0, 255\] to \[0, 1\]
# * Add Gaussian noise to the image to help improve results when captured with real-world mobile cameras
# * Invert colors for black writing on white background
# +
# Training set
x_train = x_train / 255.0 # Scale
x_train = np.maximum.reduce([np.random.normal(0.3, 0.1, x_train.shape), x_train]) # Add noise
x_train = 1 - x_train # Invert colors
# Test set
x_test = x_test / 255.0
x_test = np.maximum.reduce([np.random.normal(0.3, 0.1, x_test.shape), x_test])
x_test = 1 - x_test
# -
print('Sample preprocessed MNIST image:')
plt.imshow(x_train[4,])
plt.show()
# # Train
#
# Create a basic neural network for classifying MNIST images (See [Get Started with TensorFlow](https://www.tensorflow.org/tutorials)). Then fit to the training set.
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10)
# # Evaluate
#
# Evaluate on some handwritten digits of your own!
images = []
images.append(cv2.imread('../images/mnist/IMG_20190503_210852.jpg', 0))
images.append(cv2.imread('../images/mnist/IMG_20190503_210856.jpg', 0))
images.append(cv2.imread('../images/mnist/IMG_20190503_210859.jpg', 0))
images.append(cv2.imread('../images/mnist/IMG_20190503_210903.jpg', 0))
images.append(cv2.imread('../images/mnist/IMG_20190503_210907.jpg', 0))
images.append(cv2.imread('../images/mnist/IMG_20190503_210920.jpg', 0))
images.append(cv2.imread('../images/mnist/IMG_20190503_210927.jpg', 0))
images.append(cv2.imread('../images/mnist/IMG_20190503_210931.jpg', 0))
images.append(cv2.imread('../images/mnist/IMG_20190503_210936.jpg', 0))
images.append(cv2.imread('../images/mnist/IMG_20190503_210949.jpg', 0))
# +
# Pre-process each image for inference
images_p = []
for img in images:
tmp = cv2.resize(img, (28, 28)) / 255
images_p.append(tmp)
plt.imshow(tmp)
plt.show()
images_p = np.stack(images_p)
# -
# Predict results
results = model.predict(images_p)
results.shape
print('Probabilities for each class:')
results[3]
# This image should be a "4", thus the highest probablity value should be at index=3.
np.argmax(results, 1)
# Overall, on my handwritten digits, the model get 6 of 10 correct (the first 6 coincidentally).
# # Export model
# Keras format
model.save('mnist.h5')
# Convert to TensorFlow lite
converter = lite.TFLiteConverter.from_keras_model_file('mnist.h5')
mdl = converter.convert()
with open('mnist.tflite', 'wb') as mfile:
mfile.write(mdl)
| notebooks/MNIST_GaussianNoise_TrainTest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Replacing ligand parameters in an already-parametrized system using `openmm-forcefields`
#
# This example applies SMIRNOFF-format parameters to a BRD4 inhibitor from the [living review on binding free energy benchmark systems](https://www.annualreviews.org/doi/abs/10.1146/annurev-biophys-070816-033654) by <NAME> Gilson. The BRD4 system comes from the [accompanying GitHub repository](https://github.com/MobleyLab/benchmarksets/tree/master/input_files/BRD4).
#
# This example uses the [`openmm-forcefields` package](http://github.com/openmm/openmm-forcefields) to add a [residue template generator](http://docs.openmm.org/latest/userguide/application.html#adding-residue-template-generators) to the [`openmm.app.ForceField`](http://docs.openmm.org/latest/api-python/generated/openmm.app.forcefield.ForceField.html#openmm.app.forcefield.ForceField) to allow Open Force Field small molecule parameters to be generated on the fly when parameterizing a system containing protein, small molecules, ions, and water. This example is meant to illustrate how to apply parameters to a single ligand, but it's also easy to process many ligands.
#
# ### Loading the already-parametrized system
# +
# Retrieve protein and ligand files for BRD4 and a docked inhibitor from the benchmark systems GitHub repository
# https://github.com/MobleyLab/benchmarksets
import requests
repo_url = (
"https://raw.githubusercontent.com/MobleyLab/benchmarksets/master/input_files/"
)
sources = {
"system.pdb": repo_url
+ "BRD4/prmtop-coords/BRD4-1.pdb", # complete system (protein+ligand+solvent+ions)
"ligand.sdf": repo_url + "BRD4/sdf/ligand-1.sdf", # ligand molecular identity
}
for (filename, url) in sources.items():
r = requests.get(url)
open(filename, "w").write(r.text)
# +
try:
from openmm.app import PDBFile
except ImportError:
from simtk.openmm.app import PDBFile
# Read complete system in OpenMM PDBFile
system_pdb = "system.pdb"
pdbfile = PDBFile(system_pdb)
# We have to remove H1-H2 bonds in waters if they are present
# AMBER's 'ambpdb -conect' adds these H1-H2 bonds, so we must remove them
def fix_water_bonds(topology):
# TODO: We should create a simpler way to do this within OpenMM's Topology object
n_bonds_before = sum(1 for bond in topology.bonds())
try:
from openmm.app.element import hydrogen
except ImportError:
from simtk.openmm.app.element import hydrogen
bonds_to_delete = [
index
for (index, bond) in enumerate(topology.bonds())
if ((bond.atom1.element == hydrogen) and (bond.atom2.element == hydrogen))
]
bonds_to_delete.reverse()
for index in bonds_to_delete:
topology._bonds.pop(index)
n_bonds_after = sum(1 for bond in topology.bonds())
print(f"{n_bonds_before - n_bonds_after} H-H bonds removed")
fix_water_bonds(pdbfile.topology)
# +
import numpy as np
try:
from openmm import unit
except ImportError:
from simtk import unit
# Load the definition of the small molecule in the system from an SDF file
from openff.toolkit.topology import Molecule
ligand = Molecule.from_file("ligand.sdf")
# Patch until openmmforcefield 0.7.3 is released
if ligand.partial_charges is None:
ligand.partial_charges = np.zeros([ligand.n_particles]) * unit.elementary_charge
# +
# Create an OpenMM ForceField object with AMBER ff14SB and TIP3P with compatible ions
try:
from openmm import app
except ImportError:
from simtk.openmm import app
forcefield = app.ForceField(
"amber/protein.ff14SB.xml",
"amber/tip3p_standard.xml",
"amber/tip3p_HFE_multivalent.xml",
)
# Use the SMIRNOFF residue template generator to load the openff-2.0.0 ("Sage") that knows about the ligand
from openmmforcefields.generators import SMIRNOFFTemplateGenerator
smirnoff = SMIRNOFFTemplateGenerator(forcefield="openff-2.0.0", molecules=ligand)
# Register the SMIRNOFF template generator
forcefield.registerTemplateGenerator(smirnoff.generator)
# -
# Create a parameterized OpenMM System from the PDB topology without bond constraints so we can convert to other packages
system = forcefield.createSystem(
pdbfile.topology,
nonbondedMethod=app.PME,
constraints=None,
rigidWater=False,
removeCMMotion=False,
)
# ### Create a ParmEd Structure object to export to other formats
# +
# Create the complex Structure
import parmed
complex_structure = parmed.openmm.load_topology(pdbfile.topology, system=system)
# Copy over the original coordinates and box vectors
complex_structure.coordinates = pdbfile.positions
complex_structure.box_vectors = pdbfile.topology.getPeriodicBoxVectors()
# -
# Save the final PDB file to make sure the conversion worked
complex_structure.save("new-system.pdb", overwrite=True)
# ### Export to AMBER and GROMACS formats
#
# We started off in AMBER format, and presumably may want to continue in that format -- so let's write out to AMBER and GROMACS format:
# +
# Export the Structure to AMBER files
complex_structure.save("complex-openff.prmtop", overwrite=True)
complex_structure.save("complex-openff.inpcrd", overwrite=True)
# Export the Structure to Gromacs files
complex_structure.save("complex-openff.gro", overwrite=True)
complex_structure.save("complex-openff.top", overwrite=True)
# -
# That should conclude our work in this example. However, perhaps we should just doublecheck by ensuring we can actually run some dynamics on the combined system without any trouble.
#
#
# ## As a test, run some dynamics on the combined system
#
# First, we create an OpenMM system, as we've done in other examples here. We can do this, in this case, using ParmEd's built-in `createSystem` functionality already attached to the combined `Structure`. We ask for a reasonable cutoff, constrained hydrogen bonds (note that **this keyword argument overrides the fact that we use the `unconstrained` force field above**; the ligand (and all other molecules in the system) **will** have covalent bonds to hydrogen constrainted), PME, and rigid water:
# +
from parmed.openmm import NetCDFReporter
try:
from openmm import LangevinIntegrator, app
except ImportError:
from simtk.openmm import LangevinIntegrator, app
system = complex_structure.createSystem(
nonbondedMethod=app.PME,
nonbondedCutoff=9 * unit.angstrom,
constraints=app.HBonds,
rigidWater=True,
)
# -
# Next we'll set up the integrator, a reporter to write the trajectory, pick the timestep, and then go on to minimize the energy and run a very short amount of dynamics after setting the temperature to 300K:
# +
integrator = LangevinIntegrator(
300 * unit.kelvin, 1 / unit.picosecond, 0.001 * unit.picoseconds
)
simulation = app.Simulation(complex_structure.topology, system, integrator)
# Depending on where your system came from, you may want to
# add something like (30, 30, 30)*Angstrom to center the protein
# (no functional effect, just visualizes better)
# simulation.context.setPositions(complex_structure.positions + np.array([30, 30, 30])*unit.angstrom)
simulation.context.setPositions(complex_structure.positions)
nc_reporter = NetCDFReporter("trajectory.nc", 10)
simulation.reporters.append(nc_reporter)
# -
# Show the initial potential energy
potential_energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
print(potential_energy)
# Minimize the energy
simulation.minimizeEnergy()
minimized_coords = simulation.context.getState(getPositions=True).getPositions()
# Run some dynamics
simulation.context.setVelocitiesToTemperature(300 * unit.kelvin)
simulation.step(1000)
# Show the final potential energy
potential_energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
print(potential_energy)
| examples/swap_amber_parameters/swap_existing_ligand_parameters_with_openmmforcefields.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
def dashatize(n):
if not isinstance(n, int):
return 'None'
n = abs(n)
if len(str(n)) == 1:
return str(n)
digit_list = []
for index, digit in enumerate(str(n)):
if (index == 0) & (int(digit) % 2):
digit_list.append(digit + '-')
elif (index == 0):
digit_list.append(digit)
elif (index == len(str(n)) -1) & (int(digit) % 2):
digit_list.append('-' + digit)
elif int(digit) % 2:
digit_list.append('-' + digit + '-')
else:
digit_list.append(digit)
output_string = (''.join(digit_list)).replace('--','-')
return output_string
dashatize(2244536643428)
| kata-6/Dashatize it.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Forecasting with a Bespoke Model
# Now that I have a basic model working, I wanted to try something more involved. The next method for forecasting sales that should be used is to use a bespoke time-series model.
#
# In this case I model the current dataset and try to predict using that model. I decided to fit multiple sine functions to the data, to represent the weekly and monthly trends. This is done in quite a simplistic but novel way but dampening noise in different ways using a moving average function. This makes long term trends and short term trends easier to see.
#
# ### Import packages, read pickle file
# +
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from os import path
import pickle
import scipy.optimize as opt
pd.options.display.max_seq_items = 2000
# -
openfile=open( r"C:\Users\Joseph\Documents\Beamly\TakeHomeTask\beamly_case_study_noNaNs.pickle", "rb")
df=pickle.load(openfile)
openfile.close()
# ### Impute missing product band
#
# create the function using values fitted in notebook 5, then apply that funciton to create a new column containing the new product band sales (for 30to40). Check the header to see if values have been created correctly.
# +
def salescount_poisson(B12,B23):
"""
Function to estimate sales for the productBand 30to40 using two other product bands.
"""
var1,var2=0.7552276624532916,0.5290400984433794
return (var1*B12+var2*B23)/2.
# -
forecast_august=df.groupby(by=['productBand', 'date']).sum()['total_products'].unstack(0)
forecast_august['between30and40']=forecast_august.apply(lambda x:salescount_poisson(x['between10and20'],x['between20and30']),axis=1)
forecast_august.drop(forecast_august.index[len(forecast_august)-1],inplace=True)
forecast_august
# ### Fitting the model
# The movingaverage function is more important in this model. It is used to smooth the data, so that monthly and weekly trends can be modeled one after the other.
def movingaverage(interval, window_size):
"""
A function to calculate a moving average, useful for looking at data trends and for dampening noise in a less artificial way.
"""
window= np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
# ## Forecast New Line
# first look at the current data for the new line. The sales for July and half of August are avalible for the new line, we just need to forecast for the last two weeks of august.
f, ax = plt.subplots(1,figsize=(18,6))
forecast_august['between30and40'].plot(kind='bar', ax=ax,color='blue',alpha=0.6,label='between30and40')
move_av_30to40=movingaverage(forecast_august['between30and40'].values,4)
ax.plot(move_av_30to40,color='blue')
# #### Forecast Model
# I use a simple sine function to model the trends.
def monthly_trend_forecaster(t,x):
"""
Simple sine function where:
- x[0]=ampitude
- x[1]=frequency
- x[2]=phase
- x[3]=mean (offset)
"""
return x[0]*np.sin(x[1]*t+x[2]) + x[3]
# Can't use date as the index, so reset index
forecast_august.reset_index(inplace=True)
# First we want to forecast the more global trend, the model can be quite sensitive to the initial guess values (this is common for sin function fitting, where you need good guess values). The most important guess value is the frequency (guess_freq).
#between20and30 first
y=movingaverage(forecast_august['between30and40'].values,6)
x1=forecast_august.index
guess_mean = np.mean(y)
guess_std = 3*np.std(y)/(2**0.5)/(2**0.5)
guess_phase = 0
guess_freq = 0.3
guess_amp = 1
guess= [guess_amp, guess_freq, guess_phase, guess_mean]
errfunc = lambda o, x1, y: monthly_trend_forecaster(x1,o) - y # Distance to the target function, through linear combination.
fullout = opt.leastsq(errfunc, guess[:], args=(x1, y),full_output=1)
o1,cov_o1,infodict,mesg,ier=fullout
perr = np.sqrt(np.diag(cov_o1)) # error is equal to the diagonal of the covariance matrix.
print(o1,perr)
print(np.mean(y))
# Try to calculate a basic error for this calculation
error_comb_o1=np.sqrt((perr[0]/o1[0])**2+(perr[1]/o1[1])**2+(perr[2]/o1[2])**2)+(perr[3]/o1[3])
print(error_comb_o1)
# #### Plot the fitted global trend
f, ax = plt.subplots(1,figsize=(18,6))
forecast_august['between30and40'].plot(kind='bar', ax=ax,color='blue',alpha=0.6,label='between30and40')
ax.plot(y,color='blue')
ax.plot(x1,monthly_trend_forecaster(x1,o1),color='k')
# #### Model short term trend
# Now want to model the short term trend using a smaller moving average smooth. the frequency is also set to be larger.
#
#between20and30 first
move_av_30to40=movingaverage(forecast_august['between30and40'].values,2)
y=move_av_30to40
x1=forecast_august.index
guess_mean = np.mean(y)
guess_std = 3*np.std(y)/(2**0.5)/(2**0.5)
guess_phase = 0
guess_freq = 0.8
guess_amp = 1
guess= [guess_amp, guess_freq, guess_phase, guess_mean]
errfunc = lambda o, x1, y: monthly_trend_forecaster(x1,o) - y # Distance to the target function, through linear combination.
fullout = opt.leastsq(errfunc, guess[:], args=(x1, y),full_output=1)
o2,cov_o2,infodict,mesg,ier=fullout
perr2 = np.sqrt(np.diag(cov_o2))
print(o2,cov_o2)
print(np.mean(y))
f, ax = plt.subplots(1,figsize=(18,6))
forecast_august['between30and40'].plot(kind='bar', ax=ax,color='blue',alpha=0.6,label='between30and40')
ax.plot(move_av_30to40,color='blue')
ax.plot(x1,monthly_trend_forecaster(x1,o2),color='k')
# Again, try to give a crude estimate of the errror
error_comb_o2=np.sqrt((perr2[0]/o2[0])**2+(perr2[1]/o2[1])**2+(perr2[2]/o2[2])**2)+(perr2[3]/o2[3])
print(error_comb_o2)
# ## Combined Forecasting Model
#
# Combine the error from the two models, as these models will need to be combined later anyway.
error_percent_tot=np.sqrt(error_comb_o1**2+error_comb_o2**2)
# Combine the two fitted sine functions to give the forecasting model and plot that model.
#def comb_func(x1,o1,o2):
# return (monthly_trend_forecaster(x1,o1)+monthly_trend_forecaster(x1,o2))/2.#This dampens the signal a little, better to use the lower function
def comb_func(x1,o1,o2):
return (monthly_trend_forecaster(x1,o1)*monthly_trend_forecaster(x1,o2))/np.mean(monthly_trend_forecaster(x1,o2))
f, ax = plt.subplots(1,figsize=(18,6))
forecast_august['between30and40'].plot(kind='bar', ax=ax,color='blue',alpha=0.6,label='between30and40')
ax.plot(move_av_30to40,color='blue')
ax.plot(x1,comb_func(x1,o1,o2),color='k')
# ## Forecast Sales for last two weeks
# Now we have modelled the current dataset, we can use this to predict the sales in the future for any productBand. Although we will just do the band 30to40.
#
#
# ##### Note:
# The model does not use the timestamp, but just day, starting from 0. We want to model the last two weeks of August, in coding terms this is from days 44->60
# Create an array with the new days, then model the total product sales
newdates=np.arange(44,61,1)
b34sales=comb_func(newdates,o1,o2)
# Create a dataframe from this.
df_b34=pd.DataFrame(b34sales,index=newdates)
df_b34
# Assign new dates to this dataframe. starting from 08-14, as this was missing data as well.
import datetime
def assigndates(row):
r44=pd.to_datetime('2018-08-14')
year = r44.year
month = r44.month
day = r44.day+(row.name-44)
return datetime.date(year, month, day)
df_b34['date']=df_b34.apply(lambda row:assigndates(row),axis=1)
df_b34
# Assign the date to the index.
df_b34.reset_index(inplace=True)
df_b34.set_index('date',inplace=True)
df_b34.head()
# Drop the old index, then rename the sales index
df_b34.drop(['index'],axis=1,inplace=True)
df_b34.rename(columns={0: "forecast_B30to40"},inplace=True)
# Apply the error to the sales forecast, currently a very simple error propagation formulae.
df_b34['forecast_error']=df_b34.apply(lambda x:x*error_percent_tot)
df_b34.head()
# ### Plot forecasted Sales
# This is the final predicted sales for the new product band.
f, ax = plt.subplots(1,figsize=(18,6))
df_b34['forecast_B30to40'].plot(kind='bar',yerr=df_b34['forecast_error'], ax=ax,color='blue',alpha=0.6,label='Forecast_30to40')
| notebooks/7_Forecasting_Bespoke_Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# # !wget https://f000.backblazeb2.com/file/malaya-speech-model/data/audio-iium.zip
# # !unzip -q audio-iium.zip
# # !wget https://f000.backblazeb2.com/file/malaya-speech-model/data/audio-wattpad.zip
# # !unzip -q audio-wattpad.zip
# # !wget https://f000.backblazeb2.com/file/malaya-speech-model/data/news-speech.zip
# # !unzip -q news-speech.zip
# +
# # !wget https://f000.backblazeb2.com/file/malaya-speech-model/collections/transcript-news.json
# # !wget https://f000.backblazeb2.com/file/malaya-speech-model/collections/shuffled-iium.json
# # !wget https://f000.backblazeb2.com/file/malaya-speech-model/collections/transcript-wattpad.json
# -
import parselmouth
import librosa
import pyworld as pw
from sklearn.preprocessing import StandardScaler
import numpy as np
import os
# +
# # !pip3 install malaya-gpu -U --no-deps
# +
import yaml
with open('config.yaml') as fopen:
config = yaml.load(fopen)
config
# +
import numpy as np
# https://github.com/TensorSpeech/TensorFlowTTS/blob/master/tensorflow_tts/utils/outliers.py
def is_outlier(x, p25, p75):
"""Check if value is an outlier."""
lower = p25 - 1.5 * (p75 - p25)
upper = p75 + 1.5 * (p75 - p25)
return x <= lower or x >= upper
def remove_outlier(x, p_bottom: int = 25, p_top: int = 75):
"""Remove outlier from x."""
p_bottom = np.percentile(x, p_bottom)
p_top = np.percentile(x, p_top)
indices_of_outliers = []
for ind, value in enumerate(x):
if is_outlier(value, p_bottom, p_top):
indices_of_outliers.append(ind)
x[indices_of_outliers] = 0.0
x[indices_of_outliers] = np.max(x)
return x
# +
import re
_pad = 'pad'
_start = 'start'
_eos = 'eos'
_punctuation = "!'(),.:;? "
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
MALAYA_SPEECH_SYMBOLS = (
[_pad, _start, _eos] + list(_special) + list(_punctuation) + list(_letters)
)
# -
def tts_encode(string: str, add_eos: bool = True):
r = [MALAYA_SPEECH_SYMBOLS.index(c) for c in string if c in MALAYA_SPEECH_SYMBOLS]
if add_eos:
r = r + [MALAYA_SPEECH_SYMBOLS.index('eos')]
return r
# +
from unidecode import unidecode
import malaya
normalizer = malaya.normalize.normalizer(date = False, time = False)
def put_spacing_num(string):
string = re.sub('[A-Za-z]+', lambda ele: ' ' + ele[0] + ' ', string)
return re.sub(r'[ ]+', ' ', string).strip()
def convert_to_ascii(string):
return unidecode(string)
def collapse_whitespace(string):
return re.sub(_whitespace_re, ' ', string)
def cleaning(string, normalize = True, add_eos = False):
sequence = []
string = convert_to_ascii(string)
string = string.replace('&', ' dan ')
string = re.sub(r'[ ]+', ' ', string).strip()
if string[-1] in '-,':
string = string[:-1]
if string[-1] not in '.,?!':
string = string + '.'
if normalize:
string = normalizer.normalize(string,
check_english = False,
normalize_entity = False,
normalize_text = False,
normalize_url = True,
normalize_email = True,
normalize_year = True)
string = string['normalize']
else:
string = string
string = put_spacing_num(string)
string = ''.join([c for c in string if c in MALAYA_SPEECH_SYMBOLS])
string = re.sub(r'[ ]+', ' ', string).strip()
string = string.lower()
return string, tts_encode(string, add_eos = add_eos)
# -
from glob import glob
from tqdm import tqdm
import json
audios = glob('audio/*.wav')
len(audios)
with open('transcript-news.json') as fopen:
transcribe = json.load(fopen)
txts = []
for f in audios:
t = transcribe[int(f.split('/')[-1].replace('.wav', ''))]
txts.append(t)
news = list(zip(audios, txts))
audios = glob('audio-iium/*.wav')
len(audios)
with open('shuffled-iium.json') as fopen:
transcribe = json.load(fopen)
txts = []
for f in audios:
t = transcribe[int(f.split('/')[-1].replace('.wav', ''))]
txts.append(t)
iium = list(zip(audios, txts))
audios = glob('audio-wattpad/*.wav')
len(audios)
with open('transcript-wattpad.json') as fopen:
transcribe = json.load(fopen)
txts = []
for f in audios:
t = transcribe[int(f.split('/')[-1].replace('.wav', ''))]
txts.append(t)
wattpad = list(zip(audios, txts))
cleaning('hello nama saya Husein_-')
# +
import malaya_speech
from malaya_speech import Pipeline
vad = malaya_speech.vad.webrtc()
def process(txts, silent_trail = 500, maxlen = 25):
txts = txts[0]
audios, mels, text_ids, f0s, energies, pitches = [], [], [], [], [], []
for f in tqdm(txts):
r, text = f
text = cleaning(text)
audio, _ = malaya_speech.load(r, sr = config['sampling_rate'])
if (len(audio) / config['sampling_rate']) > maxlen:
print('skipped, audio too long')
continue
if config['trim_silence']:
y_= malaya_speech.resample(audio, config['sampling_rate'], 16000)
y_ = malaya_speech.astype.float_to_int(y_)
frames = list(malaya_speech.generator.frames(audio, 30, config['sampling_rate']))
frames_ = list(malaya_speech.generator.frames(y_, 30, 16000, append_ending_trail = False))
frames_webrtc = [(frames[no], vad(frame)) for no, frame in enumerate(frames_)]
grouped_deep = malaya_speech.group.group_frames(frames_webrtc)
grouped_deep = malaya_speech.group.group_frames_threshold(grouped_deep, 0.1)
r = []
for no, g in enumerate(grouped_deep):
if g[1]:
g = g[0].array
else:
if no == 0:
g = g[0].array[-200:]
elif no == (len(grouped_deep) - 1):
g = g[0].array[:silent_trail]
else:
g = np.concatenate([g[0].array[:silent_trail], g[0].array[-silent_trail:]])
r.append(g)
audio = np.concatenate(r)
D = librosa.stft(
audio,
n_fft=config['fft_size'],
hop_length=config['hop_size'],
win_length=config['win_length'],
window=config['window'],
pad_mode='reflect',
)
S, _ = librosa.magphase(D)
fmin = 0 if config["fmin"] is None else config["fmin"]
fmax = sampling_rate // 2 if config["fmax"] is None else config["fmax"]
mel_basis = librosa.filters.mel(
sr=config['sampling_rate'],
n_fft=config["fft_size"],
n_mels=config["num_mels"],
fmin=fmin,
fmax=fmax,
)
mel = np.log10(np.maximum(np.dot(mel_basis, S), 1e-10)).T
audio = np.pad(audio, (0, config["fft_size"]), mode="edge")
audio = audio[: len(mel) * config['hop_size']]
_f0, t = pw.dio(
audio.astype(np.double),
fs=config['sampling_rate'],
f0_ceil=fmax,
frame_period=1000 * config['hop_size'] / config['sampling_rate'],
)
f0 = pw.stonemask(audio.astype(np.double), _f0, t, config['sampling_rate'])
if len(f0) >= len(mel):
f0 = f0[: len(mel)]
else:
f0 = np.pad(f0, (0, len(mel) - len(f0)))
# extract energy
energy = np.sqrt(np.sum(S ** 2, axis=0))
f0 = remove_outlier(f0)
energy = remove_outlier(energy)
mel_len = len(mel)
snd = parselmouth.Sound(audio,sampling_frequency=22050)
pitch = snd.to_pitch(time_step=snd.duration / (mel_len + 3)
).selected_array['frequency']
if config["global_gain_scale"] > 0.0:
audio *= config["global_gain_scale"]
if len(energy[energy != 0]) == 0 or len(f0[f0 != 0]) == 0:
print('skipped')
continue
audios.append(audio)
mels.append(mel)
text_ids.append(text)
f0s.append(f0)
energies.append(energy)
pitches.append(pitch)
return [[audios, mels, text_ids, f0s, energies, pitches]]
# -
import matplotlib.pyplot as plt
import IPython.display as ipd
i = 11
r = process((wattpad[i: i + 5],))[0]
k = 2
ipd.Audio(r[0][k], rate = 22050)
r[2][k][0]
nrows = 2
fig, ax = plt.subplots(nrows = nrows, ncols = 1)
fig.set_figwidth(10)
fig.set_figheight(nrows * 3)
mel_outputs_ = np.reshape(r[1][k], [-1, 80])
im = ax[0].imshow(np.rot90(mel_outputs_), aspect='auto', interpolation='none')
fig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax[0])
ax[1].plot(r[0][k])
plt.show()
# +
import mp
audios, mels, text_ids, f0s, energies, pitches = [], [], [], [], [], []
# -
results = mp.multiprocessing(wattpad, process, cores = 10, returned = True)
for result in results:
audios.extend(result[0])
mels.extend(result[1])
text_ids.extend(result[2])
f0s.extend(result[3])
energies.extend(result[4])
pitches.extend(result[5])
results = mp.multiprocessing(iium, process, cores = 10, returned = True)
for result in results:
audios.extend(result[0])
mels.extend(result[1])
text_ids.extend(result[2])
f0s.extend(result[3])
energies.extend(result[4])
pitches.extend(result[5])
results = mp.multiprocessing(news, process, cores = 10, returned = True)
for result in results:
audios.extend(result[0])
mels.extend(result[1])
text_ids.extend(result[2])
f0s.extend(result[3])
energies.extend(result[4])
pitches.extend(result[5])
# +
scaler_mel = StandardScaler(copy=False)
scaler_energy = StandardScaler(copy=False)
scaler_f0 = StandardScaler(copy=False)
scaler_pitch = StandardScaler(copy=False)
for mel, f0, energy, pitch in zip(mels, f0s, energies, pitches):
scaler_mel.partial_fit(mel)
scaler_energy.partial_fit(energy[energy != 0].reshape(-1, 1))
scaler_f0.partial_fit(f0[f0 != 0].reshape(-1, 1))
scaler_pitch.partial_fit(pitch[pitch != 0].reshape(-1, 1))
# -
for i in tqdm(range(len(mels))):
mels[i] = scaler_mel.transform(mels[i])
directory_stats = 'husein-stats-v3'
def save_statistics_to_file(scaler_list, config):
os.system(f'mkdir {directory_stats}')
for scaler, name in scaler_list:
stats = np.stack((scaler.mean_, scaler.scale_))
np.save(
os.path.join(f"{directory_stats}/stats{name}.npy"),
stats.astype(np.float32),
allow_pickle=False,
)
scaler_list = [(scaler_mel, ""), (scaler_energy, "_energy"), (scaler_f0, "_f0"),
(scaler_pitch, "_pitch")]
save_statistics_to_file(scaler_list, config)
directory = 'output-husein-v2'
# !rm -rf {directory}
os.system(f'mkdir {directory}')
directories = ['audios', 'mels', 'text_ids', 'f0s', 'energies', 'pitches']
for d in directories:
os.system(f'mkdir {directory}/{d}')
for i in tqdm(range(len(mels))):
np.save(f'{directory}/audios/{i}.npy', audios[i])
np.save(f'{directory}/mels/{i}.npy', mels[i])
np.save(f'{directory}/text_ids/{i}.npy', text_ids[i])
np.save(f'{directory}/f0s/{i}.npy', f0s[i])
np.save(f'{directory}/energies/{i}.npy', energies[i])
np.save(f'{directory}/pitches/{i}.npy', pitches[i])
| pretrained-model/prepare-tts/preprocess-husein.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbsphinx="hidden"
# [Index](Index.ipynb) - [Back](Widget List.ipynb) - [Next](Widget Events.ipynb)
# -
# # Output widgets: leveraging Jupyter's display system
import ipywidgets as widgets
# + [markdown] slideshow={"slide_type": "slide"}
# The `Output` widget can capture and display stdout, stderr and [rich output generated by IPython](http://ipython.readthedocs.io/en/stable/api/generated/IPython.display.html#module-IPython.display). You can also append output directly to an output widget, or clear it programmatically.
# -
out = widgets.Output(layout={'border': '1px solid black'})
out
# After the widget is created, direct output to it using a context manager. You can print text to the output area:
with out:
for i in range(10):
print(i, 'Hello world!')
# Rich output can also be directed to the output area. Anything which displays nicely in a Jupyter notebook will also display well in the `Output` widget.
from IPython.display import YouTubeVideo
with out:
display(YouTubeVideo('eWzY2nGfkXk'))
# We can even display complex mimetypes, such as nested widgets, in an output widget.
with out:
display(widgets.IntSlider())
# We can also append outputs to the output widget directly with the convenience methods `append_stdout`, `append_stderr`, or `append_display_data`.
out = widgets.Output(layout={'border': '1px solid black'})
out.append_stdout('Output appended with append_stdout')
out.append_display_data(YouTubeVideo('eWzY2nGfkXk'))
out
# We can clear the output by either using `IPython.display.clear_output` within the context manager, or we can call the widget's `clear_output` method directly.
out.clear_output()
# `clear_output` supports the keyword argument `wait`. With this set to `True`, the widget contents are not cleared immediately. Instead, they are cleared the next time the widget receives something to display. This can be useful when replacing content in the output widget: it allows for smoother transitions by avoiding a jarring resize of the widget following the call to `clear_output`.
#
# Finally, we can use an output widget to capture all the output produced by a function using the `capture` decorator.
# + tags=["raises-exception"]
@out.capture()
def function_with_captured_output():
print('This goes into the output widget')
raise Exception('As does this')
function_with_captured_output()
# -
# `out.capture` supports the keyword argument `clear_output`. Setting this to `True` will clear the output widget every time the function is invoked, so that you only see the output of the last invocation. With `clear_output` set to `True`, you can also pass a `wait=True` argument to only clear the output once new output is available. Of course, you can also manually clear the output any time as well.
out.clear_output()
# ### Output widgets as the foundation for interact
#
# The output widget forms the basis of how interact and related methods are implemented. It can also be used by itself to create rich layouts with widgets and code output. One simple way to customize how an interact UI looks is to use the `interactive_output` function to hook controls up to a function whose output is captured in the returned output widget. In the next example, we stack the controls vertically and then put the output of the function to the right.
# +
a = widgets.IntSlider(description='a')
b = widgets.IntSlider(description='b')
c = widgets.IntSlider(description='c')
def f(a, b, c):
print('{}*{}*{}={}'.format(a, b, c, a*b*c))
out = widgets.interactive_output(f, {'a': a, 'b': b, 'c': c})
widgets.HBox([widgets.VBox([a, b, c]), out])
# -
# ### Debugging errors in callbacks with the output widget
#
# On some platforms, like JupyterLab, output generated by widget callbacks (for instance, functions attached to the `.observe` method on widget traits, or to the `.on_click` method on button widgets) are not displayed anywhere. Even on other platforms, it is unclear what cell this output should appear in. This can make debugging errors in callback functions more challenging.
#
# An effective tool for accessing the output of widget callbacks is to decorate the callback with an output widget's capture method. You can then display the widget in a new cell to see the callback output.
# +
debug_view = widgets.Output(layout={'border': '1px solid black'})
@debug_view.capture(clear_output=True)
def bad_callback(event):
print('This is about to explode')
return 1.0 / 0.0
button = widgets.Button(
description='click me to raise an exception',
layout={'width': '300px'}
)
button.on_click(bad_callback)
button
# -
debug_view
# ### Integrating output widgets with the logging module
#
# While using the `.capture` decorator works well for understanding and debugging single callbacks, it does not scale to larger applications. Typically, in larger applications, one might use the [logging](https://docs.python.org/3/library/logging.html) module to print information on the status of the program. However, in the case of widget applications, it is unclear where the logging output should go.
#
# A useful pattern is to create a custom [handler](https://docs.python.org/3/library/logging.html#handler-objects) that redirects logs to an output widget. The output widget can then be displayed in a new cell to monitor the application while it runs.
# +
import ipywidgets as widgets
import logging
class OutputWidgetHandler(logging.Handler):
""" Custom logging handler sending logs to an output widget """
def __init__(self, *args, **kwargs):
super(OutputWidgetHandler, self).__init__(*args, **kwargs)
layout = {
'width': '100%',
'height': '160px',
'border': '1px solid black'
}
self.out = widgets.Output(layout=layout)
def emit(self, record):
""" Overload of logging.Handler method """
formatted_record = self.format(record)
new_output = {
'name': 'stdout',
'output_type': 'stream',
'text': formatted_record+'\n'
}
self.out.outputs = (new_output, ) + self.out.outputs
def show_logs(self):
""" Show the logs """
display(self.out)
def clear_logs(self):
""" Clear the current logs """
self.out.clear_output()
logger = logging.getLogger(__name__)
handler = OutputWidgetHandler()
handler.setFormatter(logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s'))
logger.addHandler(handler)
logger.setLevel(logging.INFO)
# -
handler.show_logs()
# +
handler.clear_logs()
logger.info('Starting program')
try:
logger.info('About to try something dangerous...')
1.0/0.0
except Exception as e:
logger.exception('An error occurred!')
# -
# ### Interacting with output widgets from background threads
#
# Jupyter's `display` mechanism can be counter-intuitive when displaying output produced by background threads. A background thread's output is printed to whatever cell the main thread is currently writing to. To see this directly, create a thread that repeatedly prints to standard out:
#
# ```python
# import threading
# import time
#
# def run():
# for i in itertools.count(0):
# time.sleep(1)
# print('output from background {}'.format(i))
#
# t = threading.Thread(target=run)
# t.start()
# ```
#
# This always prints in the currently active cell, not the cell that started the background thread.
#
# This can lead to surprising behaviour in output widgets. During the time in which output is captured by the output widget, *any* output generated in the notebook, regardless of thread, will go into the output widget.
#
# The best way to avoid surprises is to *never* use an output widget's context manager in a context where multiple threads generate output. Instead, we can pass an output widget to the function executing in a thread, and use `append_display_data()`, `append_stdout()`, or `append_stderr()` methods to append displayable output to the output widget.
# +
import threading
from IPython.display import display, HTML
import ipywidgets as widgets
import time
def thread_func(something, out):
for i in range(1, 5):
time.sleep(0.3)
out.append_stdout('{} {} {}\n'.format(i, '**'*i, something))
out.append_display_data(HTML("<em>All done!</em>"))
display('Display in main thread')
out = widgets.Output()
# Now the key: the container is displayed (while empty) in the main thread
display(out)
thread = threading.Thread(
target=thread_func,
args=("some text", out))
thread.start()
# -
thread.join()
# ### Using the output widget with matplotlib
#
# You can also use the output widget it self to build your own interaction with graphical outputs such as matplotlib.
#
# When using matplotlib, it is important that your update function clears the output widget state using the `wait=True` flag to prevent flickering of the display while interacting. A simple example:
# +
import numpy as np
import ipywidgets as widgets
import matplotlib.pyplot as plt
out = widgets.Output(layout=widgets.Layout(height='300px'))
x = np.linspace(0,1,100)
def update_plot(w):
with out:
out.clear_output(wait=True)
plt.plot(x, x**p_widget.value)
plt.show()
p_widget = widgets.FloatSlider(min=0, max=2, step=0.1, value = 1)
update_plot([])
p_widget.observe(update_plot)
display(p_widget, out)
# -
# Note that using the default `inline` driver, the long delay of the refresh from the matplotlib plot can result in the interaction falling behind, leaving a long trail of pending plot updates which can persist for a long time. This long waiting time after stopping the interaction can be mitigated using [throttling](https://ipywidgets.readthedocs.io/en/latest/examples/Widget%20Events.html?highlight=throttle#Throttling), as described in the Widget Events section of the documentation. If used with the code above, this will result in a flicker free interaction with the plot that is slow to update (due to the fundamental limits of matplotlib `inline`) but minimizes the "pipeline" of update commands that have to be processed when the user stops interacting with the widgets.
| docs/source/examples/Output Widget.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Read c3D files: output xarray
#
# > <NAME>
# > [Laboratory of Biomechanics and Motor Control](http://demotu.org/)
# > Federal University of ABC, Brazil
# Let's use the [EZC3D](https://github.com/pyomeca/ezc3d) library to open c3d files.
#
# > [EZC3D](https://github.com/pyomeca/ezc3d) is an easy to use reader, modifier and writer for C3D format files.
# + [markdown] toc=true
# <h1>Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Python-setup" data-toc-modified-id="Python-setup-1"><span class="toc-item-num">1 </span>Python setup</a></span><ul class="toc-item"><li><span><a href="#Configuration" data-toc-modified-id="Configuration-1.1"><span class="toc-item-num">1.1 </span>Configuration</a></span></li></ul></li><li><span><a href="#Using-EZC3D-directly-and-return-an-ezc3d-object" data-toc-modified-id="Using-EZC3D-directly-and-return-an-ezc3d-object-2"><span class="toc-item-num">2 </span>Using EZC3D directly and return an ezc3d object</a></span></li><li><span><a href="#Using-EZC3D-directly-and-return-an-ezc3d-swig-object" data-toc-modified-id="Using-EZC3D-directly-and-return-an-ezc3d-swig-object-3"><span class="toc-item-num">3 </span>Using EZC3D directly and return an ezc3d swig object</a></span><ul class="toc-item"><li><span><a href="#Platform-data" data-toc-modified-id="Platform-data-3.1"><span class="toc-item-num">3.1 </span>Platform data</a></span></li></ul></li><li><span><a href="#Using-EZC3D-indirectly-via-custom-functions-and-return-a-xarray-object" data-toc-modified-id="Using-EZC3D-indirectly-via-custom-functions-and-return-a-xarray-object-4"><span class="toc-item-num">4 </span>Using EZC3D indirectly via custom functions and return a xarray object</a></span></li></ul></div>
# -
# ## Python setup
# +
import numpy as np
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import sys, os
import xarray as xr
import ezc3d
from pyversions import versions
sys.path.insert(1, r'./../functions')
import read_c3d_xr
versions();
# -
# ### Configuration
sns.set_context('paper', font_scale=1.2, rc={"lines.linewidth": 2})
sns.set_style("darkgrid")
xr.set_options(keep_attrs=True)
# ## Using EZC3D directly and return an ezc3d object
fname = r'./../data/S0401cin03.c3d'
print(fname)
x = ezc3d.c3d(fname)
read_c3d_xr.printdict(x)
# ## Using EZC3D directly and return an ezc3d swig object
c3d = ezc3d.c3d(fname).c3d_swig
all_labels = c3d.pointNames()
all_labels
# ### Platform data
# +
c3d = ezc3d.c3d(fname, extract_forceplat_data=True)
pf_0 = c3d['data']['platform'][0]
pf_0['unit_force'] # Units of forces
pf_0['unit_moment'] # Units of moments
pf_0['unit_position'] # Units of center of pressure
pf_0['cal_matrix'] # Calibration matrix
pf_0['corners'] # Position of the corners
pf_0['origin'] # Position of the origin
pf_0['force'] # Force data
pf_0['moment'] # Moment data
pf_0['center_of_pressure'] # Center of pressure data
pf_0['Tz'] # Moment at center of pressure data
# -
pf_0['unit_position']
pf_0['origin']
pf_0['corners'].T
pf_0['moment'][:, 200]
pf_0['Tz'][:, 200]
pf_0['center_of_pressure'][:, 200]
# ## Using EZC3D indirectly via custom functions and return a xarray object
prm = read_c3d_xr.get_parameters(fname)
prm
data = read_c3d_xr.read_c3d(fname, var='POINT', prm=prm)
data
data = read_c3d_xr.read_c3d(fname, var='ANALOG', prm=prm)
data
data = read_c3d_xr.read_c3d(fname, var='GRF', prm=prm)
data
data.Time
g = data.plot.line(x="Time", col='Axis', sharey=False, color='g',
size=3, aspect=1.25)
read_c3d_xr.plot_lines(g.axes, data.Time.attrs['events'], show=True)
g = read_c3d_xr.plot(data)
data = read_c3d_xr.read_c3d(fname, var='ANGLE', prm=prm)
data
v = [data.Time.attrs['side'] + ang for ang in ['Hip', 'Knee', 'Ankle']]
g = data.sel(Var=v).plot.line(x='Time', row='Var', col='Axis', sharey=False,
color='g', size=2.5, aspect=1.5)
read_c3d_xr.plot_lines(g.axes, data.Time.attrs['events'], show=True)
data = read_c3d_xr.read_c3d(fname, var='MOMENT', prm=prm)
data
v = [data.Time.attrs['side'] + ang for ang in ['Hip', 'Knee', 'Ankle']]
g = data.sel(Var=v).plot.line(x='Time', row='Var', col='Axis', sharey=False,
color='g', size=2.5, aspect=1.5)
read_c3d_xr.plot_lines(g.axes, data.Time.attrs['events'], show=True)
data = read_c3d_xr.read_c3d(fname, var='POWER', prm=prm)
data
v = [data.Time.attrs['side'] + ang for ang in ['Hip', 'Knee', 'Ankle']]
g = data.sel(Var=v).plot.line(x='Time', row='Var', col='Axis', sharey=False,
color='g', size=2.5, aspect=1.5)
read_c3d_xr.plot_lines(g.axes, data.Time.attrs['events'], show=True)
data = read_c3d_xr.read_c3d(fname, var='OXFORD', prm=prm)
data
v = [data.Time.attrs['side'] + ang for ang in ['HFTBA', 'FFHFA', 'FFTBA', 'HXFFA']]
g = data.sel(Var=v).plot.line(x='Time', row='Var', col='Axis', sharey=False,
color='g', size=2.5, aspect=1.5)
read_c3d_xr.plot_lines(g.axes, data.Time.attrs['events'], show=True)
| renan/notebooks/read_c3d_xr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (spectrome)
# language: python
# name: spectrome
# ---
# # Comparing canonical functional networks vs. complex structural eigenmodes.
# +
from ipywidgets import interactive, widgets, fixed
from surfer import Brain as surface
from sklearn.preprocessing import minmax_scale
import os
import nibabel as nib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# spectrome imports
from spectrome.brain import Brain
from spectrome.utils import functions, path
# +
# %gui qt
# set up Pysurfer variables
subject_id = "fsaverage"
hemi = ["lh","rh"]
surf = "white"
"""
Read in the automatic parcellation of sulci and gyri.
"""
hemi_side = "lh"
aparc_file = os.path.join(os.environ["SUBJECTS_DIR"],
subject_id, "label",
hemi_side + ".aparc.annot")
labels, ctab, names = nib.freesurfer.read_annot(aparc_file)
# -
# function for viewing canonical networks:
def get_fc_values(fc_df, labels, fc_name):
# get our data ready in both hemispheres
fc_network = fc_df.loc[fc_name].values
lh_cort = minmax_scale(fc_network[0:34])
rh_cort = minmax_scale(fc_network[34:68])
# for pysurfer requirements
lh_pad = np.insert(lh_cort, [0, 3], [0, 0])
rh_pad = np.insert(rh_cort, [0, 3], [0, 0])
lh_fc = lh_pad[labels]
rh_fc = rh_pad[labels]
fc_brain = surface(
subject_id,
"both",
surf,
background="white",
alpha=0.3,
title="Canonical Networks",
)
fc_brain.add_data(lh_fc, hemi="lh", thresh=0.15, colormap=plt.cm.autumn_r, remove_existing=True)
fc_brain.add_data(rh_fc, hemi="rh", thresh=0.15, colormap=plt.cm.autumn_r, remove_existing=True)
fc_brain.scale_data_colormap(color_fmin, color_fmid, color_fmax, transparent=False)
return lh_fc, rh_fc
# +
fc_names = [
"Visual",
"Limbic",
"Default",
"Somatomotor",
"Frontoparietal",
"Ventral_Attention",
"Dorsal_Attention",
]
color_fmin, color_fmid, color_fmax = 0.1, 0.5, 0.9
# Load Pablo's canonical networks in DK atlas:
fc_dk = np.load("../data/com_dk.npy", allow_pickle=True).item()
fc_dk_normalized = pd.read_csv("../data/DK_dictionary_normalized.csv").set_index(
"Unnamed: 0"
)
# -
interactive(
get_fc_values,
fc_df=fixed(fc_dk_normalized),
labels=fixed(labels),
fc_name=widgets.RadioButtons(
options=fc_names, value="Limbic", description="Select canonical network"
),
)
# +
## This for-loop is for generating and saving figures for the paper don't run unless you really want to.
## Whatever is being generated here you can get with the interactive widget in the previous cell
for names in fc_names:
lh, rh = get_fc_values(fc_dk_normalized, labels = labels, fc_name = names)
# Generate FOV figures for 1 hemisphere first
sb = surface(subject_id, 'lh', surf, background = "white", alpha = 1, title = "Canonical Network")
sb.add_data(lh, hemi = 'lh', thresh = 0.15, colormap = plt.cm.autumn_r, remove_existing = True)
sb.scale_data_colormap(color_fmin, color_fmid, color_fmax, transparent = False)
sb.show_view('lat')
sb.save_image('%s_lat.svg' % names)
sb.show_view('med')
sb.save_image('%s_med.svg' % names)
# Generate FOV for both hemisphere dorsal view
sb = surface(subject_id, "both", surf, background = "white", alpha = 1, title = "Canonical Network")
sb.add_data(rh, hemi = 'rh', thresh = 0.15, colormap = plt.cm.autumn_r, remove_existing = True)
sb.add_data(lh, hemi = 'lh', thresh = 0.15, colormap = plt.cm.autumn_r, remove_existing = True)
sb.scale_data_colormap(color_fmin, color_fmid, color_fmax, transparent = False)
## save figures?
sb.show_view('dor')
sb.save_image('%s_dor.svg' % names)
# -
# ### Now we visualize the best matching complex structural eigenmodes for each network
# +
## Load the optimized parameters first
data_path = "../data"
h5_path = os.path.join(data_path, "default.h5")
bh_default = path.read_hdf5(h5_path)
print('Default network parameters:' + str(np.round(bh_default['x'],2)) + ' at R=' + str(-np.round(bh_default['fun'],2)))
h5_path = os.path.join(data_path, "dorsal.h5")
bh_dorsal = path.read_hdf5(h5_path)
print('Doral Attention network parameters:' + str(np.round(bh_dorsal['x'],2)) + ' at R=' + str(-np.round(bh_dorsal['fun'],2)))
h5_path = os.path.join(data_path, "fronto.h5")
bh_front = path.read_hdf5(h5_path)
print('Frontoparietal network parameters:' + str(np.round(bh_front['x'],2)) + ' at R=' + str(-np.round(bh_front['fun'],2)))
h5_path = os.path.join(data_path, "limbic.h5")
bh_limbic = path.read_hdf5(h5_path)
print('Limbic network parameters:' + str(np.round(bh_limbic['x'],2)) + ' at R=' + str(-np.round(bh_limbic['fun'],2)))
h5_path = os.path.join(data_path, "motor.h5")
bh_motor = path.read_hdf5(h5_path)
print('Somatomotor network parameters:' + str(np.round(bh_motor['x'],2)) + ' at R=' + str(-np.round(bh_motor['fun'],2)))
h5_path = os.path.join(data_path, "ventral.h5")
bh_ventral = path.read_hdf5(h5_path)
print('Ventral Attention network parameters:' + str(np.round(bh_ventral['x'],2)) + ' at R=' + str(-np.round(bh_ventral['fun'],2)))
h5_path = os.path.join(data_path, "visual.h5")
bh_visual = path.read_hdf5(h5_path)
print('Visual network parameters:' + str(np.round(bh_visual['x'],2)) + ' at R=' + str(-np.round(bh_visual['fun'],2)))
# + [markdown] pycharm={"name": "#%% md\n"}
# Plot networks:
# +
from scipy.stats import spearmanr
def pysurfer_prep(pysurf_in, labels, atlas="DK"):
scaled_in = minmax_scale(pysurf_in)
if atlas == "DK":
padded = np.insert(scaled_in, [0, 3], [0, 0])
else:
padded = scaled_in
pysurf_out = padded[labels]
return pysurf_out
def eigmode2plot(labels, alpha_optimized, k_optimized, fc_name, lap_type="complex"):
hcp_dir = "../data"
thr_colors = 0.35
# Compute eigenmode with Brain:
brain = Brain.Brain()
brain.add_connectome(hcp_dir)
brain.reorder_connectome(brain.connectome, brain.distance_matrix)
brain.bi_symmetric_c()
brain.reduce_extreme_dir()
if lap_type == "complex":
brain.decompose_complex_laplacian(
alpha=alpha_optimized, k=k_optimized, num_ev=86
)
elif lap_type == "real":
brain.add_regular_laplacian_eigenmodes(
alpha=alpha_optimized, k=k_optimized, num_ev=86, vis=False
)
# Compute the spearman correlation again for both single eigenmode:
canon_network = np.nan_to_num(fc_dk_normalized.loc[fc_name].values)
corrs = np.squeeze(np.zeros([brain.norm_eigenmodes.shape[1], 1]))
for e in np.arange(0, len(corrs)):
spearman_corr = spearmanr(
np.squeeze(canon_network), brain.norm_eigenmodes[:, e]
)[0]
corrs[e] = spearman_corr
# Sort eigenmode by performance:
ntw_opt_corr = np.round(corrs, 3)
ordered_corr = np.argsort(-ntw_opt_corr)
# For single best eigenmode:
K = 1
canon_network = np.nan_to_num(fc_dk_normalized.loc[fc_name].values).reshape(-1, 1)
corr_eigs = brain.norm_eigenmodes[:, ordered_corr[0:K]]
# prepare eigmodes for pysurfer:
lh_best = pysurfer_prep(corr_eigs[0:34], labels)
rh_best = pysurfer_prep(corr_eigs[34:68], labels)
# For top 10 combined:
K = 10
corr_eigs = brain.norm_eigenmodes[:, ordered_corr[0:K]]
coef, r, _, _ = np.linalg.lstsq(corr_eigs, canon_network, rcond=None)
comb_eig = np.squeeze(np.matmul(corr_eigs, np.asarray(coef)))
# pysurfer:
lh_combined = pysurfer_prep(comb_eig[0:34], labels)
rh_combined = pysurfer_prep(comb_eig[34:68], labels)
# visualize:
# best eigenmode first:
best_min = 0.20+lh_best.min()
best_max = 0.95*lh_best.max()
best_mid = 0.70*lh_best.max()
sb = surface(subject_id, "lh", surf, background="white", alpha=1)
sb.add_data(lh_best, hemi="lh", thresh=thr_colors, colormap=plt.cm.autumn_r, remove_existing=True)
sb.scale_data_colormap(best_min, best_mid, best_max, transparent=False)
## show lateral and medial views of left hemisphere and save figures
sb.show_view("lat")
sb.save_image("%s_ScaledBest_Lat.svg" % fc_name)
sb.show_view("med")
sb.save_image("%s_ScaledBest_Med.svg" % fc_name)
## dorsal view with both hemispheres:
sb = surface(subject_id, "both", surf, background="white", alpha=1)
sb.add_data(rh_best, hemi="rh", thresh=thr_colors, colormap=plt.cm.autumn_r, remove_existing=True)
sb.add_data(lh_best, hemi="lh", thresh=thr_colors, colormap=plt.cm.autumn_r, remove_existing=True)
sb.scale_data_colormap(best_min, best_mid, best_max, transparent=False)
## save figures?
sb.show_view("dor")
sb.save_image("%s_ScaledBest_Dor.svg" % fc_name)
# combination:
# best eigenmode first:
combine_min, combine_max, combine_mid = 0.20+lh_combined.min(), 0.95*lh_combined.max(), 0.75*lh_combined.max()
sb = surface(subject_id, "lh", surf, background="white", alpha=1)
sb.add_data(lh_combined, hemi="lh", thresh=thr_colors, colormap=plt.cm.autumn_r, remove_existing=True)
sb.scale_data_colormap(combine_min, combine_mid, combine_max, transparent=False)
## show lateral and medial views of left hemisphere and save figures
sb.show_view("lat")
sb.save_image("%s_ScaledCombined_Lat.svg" % fc_name)
sb.show_view("med")
sb.save_image("%s_ScaledCombined_Med.svg" % fc_name)
## dorsal view with both hemispheres:
sb = surface(subject_id, "both", surf, background="white", alpha=1)
sb.add_data(rh_combined, hemi="rh", thresh=thr_colors, colormap=plt.cm.autumn_r, remove_existing=True)
sb.add_data(lh_combined, hemi="lh", thresh=thr_colors, colormap=plt.cm.autumn_r, remove_existing=True)
sb.scale_data_colormap(combine_min, combine_mid, combine_max, transparent=False)
## save figures?
sb.show_view("dor")
sb.save_image("%s_ScaledCombined_Dor.svg" % fc_name)
return lh_best, rh_best, lh_combined, rh_combined, ordered_corr
# -
# limbic network:
lh_best, rh_best, lh_combined, rh_combined, ordered_corr = eigmode2plot(
labels, bh_default["x"][0], bh_default["x"][1], fc_name="Default"
)
# figures are saved in current directory
print('Best eigenmode is #:' + str(ordered_corr[0]))
# Visual:
lh_best, rh_best, lh_combined, rh_combined, ordered_corr = eigmode2plot(
labels, bh_visual["x"][0], bh_visual["x"][1], fc_name="Visual"
)
# the figures are saved in current directory
print(ordered_corr[0])
# Frontoparietal:
lh_best, rh_best, lh_combined, rh_combined, ordered_corr = eigmode2plot(
labels, bh_front["x"][0], bh_front["x"][1], fc_name="Frontoparietal"
)
# the figures are saved in current directory
print(ordered_corr[0])
# Somatomotor:
lh_best, rh_best, lh_combined, rh_combined, ordered_corr = eigmode2plot(
labels, bh_motor["x"][0], bh_motor["x"][1], fc_name="Somatomotor"
)
# the figures are saved in current directory
print(ordered_corr[0])
# Ventral Attention:
lh_best, rh_best, lh_combined, rh_combined, ordered_corr = eigmode2plot(
labels, bh_ventral["x"][0], bh_ventral["x"][1], fc_name="Ventral_Attention"
)
# the figures are saved in current directory
print(ordered_corr[0])
# Dorsal Attention:
lh_best, rh_best, lh_combined, rh_combined, ordered_corr = eigmode2plot(
labels, bh_dorsal["x"][0], bh_dorsal["x"][1], fc_name="Dorsal_Attention"
)
# the figures are saved in current directory
print(ordered_corr[0])
| notebooks/00_canonical_fc_maps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import pandas as pd
from rdkit import DataStructs, Chem
from rdkit.Chem import AllChem
save_path = "./data/l1000/max10/"
smile_name = 'all_max10.txt'
smile_list = pd.read_csv(save_path+smile_name,squeeze=True).astype(str).tolist()
len(smile_list)
smile_list[0]
smile_list[4]
fps = [Chem.RDKFingerprint(Chem.MolFromSmiles(x)) for x in smile_list]
len(fps)
fps[0]==fps[55]
DataStructs.FingerprintSimilarity(fps[0],fps[55])
dis_pair={}
# +
def find_most_sim_mol(smile_list):
'''
smile_list: list of smiles
'''
most_sim_dict={}
fps = [Chem.RDKFingerprint(Chem.MolFromSmiles(x)) for x in smile_list]
for i,x in enumerate(fps):
cur_sim = 0
cur_idx = -1
if i not in most_sim_dict:
for j,y in enumerate(fps):
if i!=j and x!=y:
sim = DataStructs.FingerprintSimilarity(x,y)
if cur_sim < sim:
#print(sim)
cur_sim = sim
cur_idx = j
most_sim_dict[i]=cur_idx
#most_sim_dict[cur_idx]=i
# elif i in most_sim_dict: #already exist
# pass
# print(i)
return most_sim_dict
# -
d= find_most_sim_mol(smile_list)
len(d)
d.values()[:10]
# +
d.values()
lst = [smile_list[idx]for idx in d.values()]
lst[:10]
# -
smile_list[:10]
DataStructs.FingerprintSimilarity(Chem.RDKFingerprint(Chem.MolFromSmiles('NCCCCCC(O)=O')),Chem.RDKFingerprint(Chem.MolFromSmiles('CCCC(CCC)C(O)=O')))
DataStructs.FingerprintSimilarity(Chem.RDKFingerprint(Chem.MolFromSmiles('CCCC(CCC)C(O)=O')),Chem.RDKFingerprint(Chem.MolFromSmiles('NCCCCCC(O)=O')))
| .ipynb_checkpoints/similarity-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright 2019 NVIDIA Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# -
# <img src="https://upload.wikimedia.org/wikipedia/en/6/6d/Nvidia_image_logo.svg" style="width: 90px; float: right;">
#
# # QA Inference on BERT using TensorRT
# ## 1. Overview
#
# Bidirectional Embedding Representations from Transformers (BERT), is a method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks.
#
# The original paper can be found here: https://arxiv.org/abs/1810.04805.
#
# ### 1.a Learning objectives
#
# This notebook demonstrates:
# - Inference on Question Answering (QA) task with BERT Base/Large model
# - The use fine-tuned NVIDIA BERT models
# - Use of BERT model with TRT
# ## 2. Requirements
#
# Please refer to the ReadMe file
# ## 3. BERT Inference: Question Answering
#
# We can run inference on a fine-tuned BERT model for tasks like Question Answering.
#
# Here we use a BERT model fine-tuned on a [SQuaD 2.0 Dataset](https://rajpurkar.github.io/SQuAD-explorer/) which contains 100,000+ question-answer pairs on 500+ articles combined with over 50,000 new, unanswerable questions.
# ### 3.a Paragraph and Queries
#
# The paragraph and the questions can be customized by changing the text below. Note that when using models with small sequence lengths, you should use a shorter paragraph:
# #### Paragraph:
# +
paragraph_text = "The Apollo program, also known as Project Apollo, was the third United States human spaceflight program carried out by the National Aeronautics and Space Administration (NASA), which accomplished landing the first humans on the Moon from 1969 to 1972. First conceived during Dwight D. Eisenhower's administration as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was later dedicated to President <NAME>'s national goal of landing a man on the Moon and returning him safely to the Earth by the end of the 1960s, which he proposed in a May 25, 1961, address to Congress. Project Mercury was followed by the two-man Project Gemini. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972, and was supported by the two-man Gemini program which ran concurrently with it from 1962 to 1966. Gemini missions developed some of the space travel techniques that were necessary for the success of the Apollo missions. Apollo used Saturn family rockets as launch vehicles. Apollo/Saturn vehicles were also used for an Apollo Applications Program, which consisted of Skylab, a space station that supported three manned missions in 1973-74, and the Apollo-Soyuz Test Project, a joint Earth orbit mission with the Soviet Union in 1975."
# Short paragraph version for BERT models with max sequence length of 128
short_paragraph_text = "The Apollo program was the third United States human spaceflight program. First conceived as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was dedicated to President <NAME>'s national goal of landing a man on the Moon. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972 followed by the Apollo-Soyuz Test Project a joint Earth orbit mission with the Soviet Union in 1975."
# -
# #### Question:
question_text = "What project put the first Americans into space?"
#question_text = "What year did the first manned Apollo flight occur?"
#question_text = "What President is credited with the original notion of putting Americans in space?"
#question_text = "Who did the U.S. collaborate with on an Earth orbit mission in 1975?"
# In this example we ask our BERT model questions related to the following paragraph:
#
# **The Apollo Program**
# _"The Apollo program, also known as Project Apollo, was the third United States human spaceflight program carried out by the National Aeronautics and Space Administration (NASA), which accomplished landing the first humans on the Moon from 1969 to 1972. First conceived during Dwight D. Eisenhower's administration as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was later dedicated to President <NAME>'s national goal of landing a man on the Moon and returning him safely to the Earth by the end of the 1960s, which he proposed in a May 25, 1961, address to Congress. Project Mercury was followed by the two-man Project Gemini. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972, and was supported by the two-man Gemini program which ran concurrently with it from 1962 to 1966. Gemini missions developed some of the space travel techniques that were necessary for the success of the Apollo missions. Apollo used Saturn family rockets as launch vehicles. Apollo/Saturn vehicles were also used for an Apollo Applications Program, which consisted of Skylab, a space station that supported three manned missions in 1973-74, and the Apollo-Soyuz Test Project, a joint Earth orbit mission with the Soviet Union in 1975."_
#
# The questions and relative answers expected are shown below:
#
# - **Q1:** "What project put the first Americans into space?"
# - **A1:** "Project Mercury"
# - **Q2:** "What program was created to carry out these projects and missions?"
# - **A2:** "The Apollo program"
# - **Q3:** "What year did the first manned Apollo flight occur?"
# - **A3:** "1968"
# - **Q4:** "What President is credited with the original notion of putting Americans in space?"
# - **A4:** "<NAME>"
# - **Q5:** "Who did the U.S. collaborate with on an Earth orbit mission in 1975?"
# - **A5:** "Soviet Union"
# - **Q6:** "How long did Project Apollo run?"
# - **A6:** "1961 to 1972"
# - **Q7:** "What program helped develop space travel techniques that Project Apollo used?"
# - **A7:** "Gemini Mission"
# - **Q8:** "What space station supported three manned missions in 1973-1974?"
# - **A8:** "Skylab"
# ## Data Preprocessing
# Let's convert the paragraph and the question to BERT input with the help of the tokenizer:
# +
import data_processing as dp
import tokenization
tokenizer = tokenization.FullTokenizer(vocab_file="/workspace/models/fine-tuned/bert_tf_v2_large_fp16_128_v2/vocab.txt", do_lower_case=True)
# The maximum number of tokens for the question. Questions longer than this will be truncated to this length.
max_query_length = 64
# When splitting up a long document into chunks, how much stride to take between chunks.
doc_stride = 128
# The maximum total input sequence length after WordPiece tokenization.
# Sequences longer than this will be truncated, and sequences shorter
max_seq_length = 128
# Extract tokens from the paragraph
doc_tokens = dp.convert_doc_tokens(short_paragraph_text)
# Extract features from the paragraph and question
features = dp.convert_examples_to_features(doc_tokens, question_text, tokenizer, max_seq_length, doc_stride, max_query_length)
# -
# ## TensorRT Inference
import tensorrt as trt
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
import ctypes
import os
ctypes.CDLL("libnvinfer_plugin.so", mode=ctypes.RTLD_GLOBAL)
ctypes.CDLL("/workspace/TensorRT/demo/BERT/build/libcommon.so", mode=ctypes.RTLD_GLOBAL)
ctypes.CDLL("/workspace/TensorRT/demo/BERT/build/libbert_plugins.so", mode=ctypes.RTLD_GLOBAL)
# +
import pycuda.driver as cuda
import pycuda.autoinit
import numpy as np
import time
# Load the BERT-Large Engine
with open("/workspace/TensorRT/demo/BERT/bert_large_128.engine", "rb") as f, \
trt.Runtime(TRT_LOGGER) as runtime, \
runtime.deserialize_cuda_engine(f.read()) as engine, \
engine.create_execution_context() as context:
# We always use batch size 1.
input_shape = (1, max_seq_length)
input_nbytes = trt.volume(input_shape) * trt.int32.itemsize
# Allocate device memory for inputs.
d_inputs = [cuda.mem_alloc(input_nbytes) for binding in range(3)]
# Create a stream in which to copy inputs/outputs and run inference.
stream = cuda.Stream()
# Specify input shapes. These must be within the min/max bounds of the active profile (0th profile in this case)
# Note that input shapes can be specified on a per-inference basis, but in this case, we only have a single shape.
for binding in range(3):
context.set_binding_shape(binding, input_shape)
assert context.all_binding_shapes_specified
# Allocate output buffer by querying the size from the context. This may be different for different input shapes.
h_output = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.float32)
d_output = cuda.mem_alloc(h_output.nbytes)
print("\nRunning Inference...")
eval_start_time = time.time()
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0], features["input_ids"], stream)
cuda.memcpy_htod_async(d_inputs[1], features["segment_ids"], stream)
cuda.memcpy_htod_async(d_inputs[2], features["input_mask"], stream)
# Run inference
context.execute_async_v2(bindings=[int(d_inp) for d_inp in d_inputs] + [int(d_output)], stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(h_output, d_output, stream)
# Synchronize the stream
stream.synchronize()
eval_time_elapsed = time.time() - eval_start_time
print("-----------------------------")
print("Running Inference in {:.3f} Sentences/Sec".format(1.0/eval_time_elapsed))
print("-----------------------------")
# -
# ## Data Post-Processing
# Now that we have the inference results let's extract the actual answer to our question
for index, batch in enumerate(h_output):
start_logits = batch[:, 0]
end_logits = batch[:, 1]
# The total number of n-best predictions to generate in the nbest_predictions.json output file
n_best_size = 20
# The maximum length of an answer that can be generated. This is needed
# because the start and end predictions are not conditioned on one another
max_answer_length = 30
(prediction, nbest_json, scores_diff_json) = \
dp.get_predictions(doc_tokens, features, start_logits, end_logits, n_best_size, max_answer_length)
print("Processing output {:} in batch".format(index))
print("Answer: '{}'".format(prediction))
print("with prob: {:.3f}%".format(nbest_json[0]['probability'] * 100.0))
| demo/BERT/python/BERT_TRT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # BigQuery query magic
#
# Jupyter magics are notebook-specific shortcuts that allow you to run commands with minimal syntax. Jupyter notebooks come with many [built-in commands](https://ipython.readthedocs.io/en/stable/interactive/magics.html). The BigQuery client library, `google-cloud-bigquery`, provides a cell magic, `%%bigquery`. The `%%bigquery` magic runs a SQL query and returns the results as a pandas `DataFrame`.
# ## Run a query on a public dataset
#
# The following example queries the BigQuery `usa_names` public dataset. `usa_names` is a Social Security Administration dataset that contains all names from Social Security card applications for births that occurred in the United States after 1879.
#
# The following example shows how to invoke the magic (`%%bigquery`), and how to pass in a standard SQL query in the body of the code cell. The results are displayed below the input cell as a pandas [`DataFrame`](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html).
# %%bigquery
SELECT name, SUM(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_current`
GROUP BY name
ORDER BY count DESC
LIMIT 10
# ## Display verbose output
#
# As the query job is running, status messages below the cell update with the query job ID and the amount of time the query has been running. By default, this output is erased and replaced with the results of the query. If you pass the `--verbose` flag, the output will remain below the cell after query completion.
# %%bigquery --verbose
SELECT name, SUM(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_current`
GROUP BY name
ORDER BY count DESC
LIMIT 10
# ## Explicitly specify a project
#
# By default, the `%%bigquery` magic command uses your default project to run the query. You may also explicitly provide a project ID using the `--project` flag. Note that your credentials must have permissions to create query jobs in the project you specify.
project_id = 'your-project-id'
# %%bigquery --project $project_id
SELECT name, SUM(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_current`
GROUP BY name
ORDER BY count DESC
LIMIT 10
# ## Assign the query results to a variable
#
# To save the results of your query to a variable, provide a variable name as a parameter to `%%bigquery`. The following example saves the results of the query to a variable named `df`. Note that when a variable is provided, the results are not displayed below the cell that invokes the magic command.
# %%bigquery df
SELECT name, SUM(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_current`
GROUP BY name
ORDER BY count DESC
LIMIT 10
df
# ## Run a parameterized query
#
# Parameterized queries are useful if you need to run a query with certain parameters that are calculated at run time. Note that the value types must be JSON serializable. The following example defines a parameters dictionary and passes it to the `--params` flag. The key of the dictionary is the name of the parameter, and the value of the dictionary is the value of the parameter.
params = {"limit": 10}
# %%bigquery --params $params
SELECT name, SUM(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_current`
GROUP BY name
ORDER BY count DESC
LIMIT @limit
| notebooks/tutorials/bigquery/BigQuery query magic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from random import sample
a1 = str(input('Primeiro aluno: '))
a2 = str(input('Segundo aluno: '))
a3 = str(input('Terceiro aluno: '))
a4 = str(input('Quarto aluno: '))
lista_alunos = [a1, a2, a3, a4]
print('A ordem de apresentação é')
print(sample(lista_alunos, len(lista_alunos)))
| Python/Exercicios_Curso_em_Videos/ex020.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.7 ('knowbert')
# language: python
# name: python3
# ---
# +
import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from allennlp.models.archival import load_archive
from allennlp.data import DatasetReader, DataIterator
from allennlp.common import Params
from allennlp.nn.util import move_to_device
from kb.include_all import BertPretrainedMaskedLM, KnowBert
# %load_ext autoreload
# %autoreload 2
import warnings
warnings.filterwarnings("ignore")
# +
evaluation_file = "/nas/home/gujiashe/trans/knowbert_ppl_top10.tsv"
model_archive="knowbert_wiki_wordnet_model"
random_candidates=False
archive = load_archive(model_archive)
model = archive.model
vocab = model.vocab
params = archive.config
model.multitask = False
model.multitask_kg = False
model.cuda()
model.eval()
for p in model.parameters():
p.requires_grad_(False)
# -
config = Params.from_file("/nas/home/gujiashe/kb/knowbert_wiki_wordnet_model/config.json")
# +
# reader_params = config.pop('dataset_reader')
# if reader_params['type'] == 'multitask_reader':
# reader_params = reader_params['dataset_readers']['language_modeling']
# # reader_params['num_workers'] = 0
# if random_candidates:
# for k, v in reader_params['base_reader']['tokenizer_and_candidate_generator']['entity_candidate_generators'].items():
# v['random_candidates'] = True
# print(reader_params.as_dict())
# dataset_reader = DatasetReader.from_params(Params(reader_params))
iterator = DataIterator.from_params(Params({
"type": "self_attn_bucket",
"batch_size_schedule": "base-11gb-fp32",
"iterator":{
"type": "bucket",
"batch_size": 1,
"sorting_keys": [["tokens", "num_tokens"]],
"max_instances_in_memory": 2500,
}
}))
iterator.index_with(model.vocab)
# # instances = reader.read(evaluation_file)
# print("Start!")
reader_params = config.pop('dataset_reader')
if reader_params['type'] == 'multitask_reader':
reader_params = reader_params['dataset_readers']['language_modeling']
# reader_params['num_workers'] = 0
validation_reader_params = {
"type": "kg_probe",
"tokenizer_and_candidate_generator": reader_params['base_reader']['tokenizer_and_candidate_generator'].as_dict()
}
dataset_reader = DatasetReader.from_params(Params(validation_reader_params))
vocab = dataset_reader._tokenizer_and_candidate_generator.bert_tokenizer.vocab
token2word = {}
for k, v in vocab.items():
token2word[v] = k
# -
instances = dataset_reader.read(evaluation_file)
instances[0]
ppls = []
instances = dataset_reader.read(evaluation_file)
for batch_no, batch in enumerate(tqdm.tqdm(iterator(instances, num_epochs=1))):
b = move_to_device(batch, 0)
loss = model(**b)
ppl = np.exp(model.get_metrics()["lm_loss_wgt"])
# print(ppl)
ppls.append(ppl)
model.get_metrics(reset=True)
# if batch_no % 100 == 0:
# print(model.get_metrics())
# break
# ppls
# print(model.get_metrics())
len(ppls)
# +
data = pd.read_csv('/nas/home/gujiashe/trans/yago310_ppls_top10.tsv', sep="\t", header=0, index_col=0)
data['ppls'] = ppls
vis = set()
for index in range(8, 11):
if data["rank"][index*10]>10 or data["rank"][index*10] in vis:
continue
vis.add(data["rank"][index*10])
print(data["rank"][index*10])
plt.scatter(data["rank"][index*10], 0)
df = pd.Series(ppls[index*10:index*10 + 10])
normalized_df=(df-df.min())/(df.max()-df.min())
plt.plot([i for i in range(1, 11)], normalized_df)
plt.xlabel("rank")
plt.ylabel("ppl")
# -
data.to_csv('/nas/home/gujiashe/trans/yago310_ppls_top10_kb.tsv', sep="\t")
import datetime
import csv
with open('/data02/wikidata-frame-completion/data/birth_table.tsv', 'r') as t:
i = 0
sentences_birth = open("sentences_birth.tsv", "w")
tsv_writer = csv.writer(sentences_birth, delimiter='\t')
for line in t:
i+=1
if i==1:
continue
person, mother, place, date = line.split('\t')
if len(mother) == 0:
continue
person, lang = person.split("@")
if lang != "en":
continue
person = person.strip("''").split()
# print("person: ", person, "mother: ", mother, "place: ", place, "date: ", date, len(place), len(date))
# s = "[PERSON] was born on [DATE_OF_BIRTH] at [PLACE_OF_BIRTH]. His mother is [MOTHER]."
s = person.copy()
s.extend(["was", "born"])
spans = []
spans.append([0, len(person) - 1])
len_place = len(place)
len_date = len(date)
if len_place == 0 and len_date < 2:
continue
if len_place > 0:
place, lang = place.split("@")
place = place.strip("''").split()
if lang == "en":
s.append("at")
s.extend(place)
spans.append([len(s) - len(place), len(s) - 1])
if len_date > 1:
date = date.split('T')[0][1:]
year, month, day = date.split('-')
year = int(year)
month = int(month)
day = int(day)
d = datetime.date(year, month, day)
date = d.strftime("%d %B %Y").split()
s.append("on")
s.extend(date)
spans.append([len(s) - 3, len(s) - 1])
s.append('.')
if len(mother) > 0:
mother, lang = mother.split("@")
if lang == "en":
mother = mother.strip("''").split()
s.extend(["His", "mother", "is"])
s.extend(mother)
spans.append([len(s) - len(mother), len(s) - 1])
# print(s)
s = " ".join(s)
for span in spans:
tsv_writer.writerow([str(span[0]) + " " + str(span[1])] + [s])
# print(spans)
# print(s)
# if i>10:
# breaks
sentences_birth.close()
print(i)
import datetime
import csv
with open('/data02/wikidata-frame-completion/data/birth_table.tsv', 'r') as t:
i = 0
sentences_birth = open("sentences_place.tsv", "w")
tsv_writer = csv.writer(sentences_birth, delimiter='\t')
for line in t:
i+=1
if i==1:
continue
person, mother, place, date = line.split('\t')
if len(mother) == 0:
continue
person, lang = person.split("@")
if lang != "en":
continue
person = person.strip("''").split()
# print("person: ", person, "mother: ", mother, "place: ", place, "date: ", date, len(place), len(date))
# s = "[PERSON] was born on [DATE_OF_BIRTH] at [PLACE_OF_BIRTH]. His mother is [MOTHER]."
s = person.copy()
s.extend(["was", "born"])
spans = []
# spans.append([0, len(person) - 1])
len_place = len(place)
len_date = len(date)
if len_place == 0 and len_date < 2:
continue
if len_place > 0:
place, lang = place.split("@")
place = place.strip("''").split()
if lang == "en":
s.append("at")
s.extend(place)
spans.append([len(s) - len(place), len(s) - 1])
if len_date > 1:
date = date.split('T')[0][1:]
year, month, day = date.split('-')
year = int(year)
month = int(month)
day = int(day)
d = datetime.date(year, month, day)
date = d.strftime("%d %B %Y").split()
s.append("on")
s.extend(date)
# spans.append([len(s) - 3, len(s) - 1])
s.append('.')
if len(mother) > 0:
mother, lang = mother.split("@")
if lang == "en":
mother = mother.strip("''").split()
s.extend(["His", "mother", "is"])
s.extend(mother)
# spans.append([len(s) - len(mother), len(s) - 1])
# print(s)
s = " ".join(s)
for span in spans:
tsv_writer.writerow([str(span[0]) + " " + str(span[1])] + [s])
# print(spans)
# print(s)
# if i>10:
# breaks
sentences_birth.close()
import datetime
import csv
with open('/data02/wikidata-frame-completion/data/birth_table1.tsv', 'r') as t:
i = 0
sentences_birth = open("sentences_birth.tsv", "w")
tsv_writer = csv.writer(sentences_birth, delimiter='\t')
for line in t:
i+=1
if i==1:
continue
person, mother, place, date = line.split('\t')
if len(mother) == 0:
continue
def process(raw):
raw = raw.split("@")
content, lang = raw
content = content.strip("''").split()
return content, lang
person, lang = process(person)
if lang != "en":
continue
mother, lang = process(mother)
if lang != "en":
continue
place, lang = process(place)
if lang != "en":
continue
# print("person: ", person, "mother: ", mother, "place: ", place, "date: ", date, len(place), len(date))
# “[MOTHER] gave birth to [PERSON] at [PLACE] on [DATE]”
idx = 0
spans = []
spans.append([idx, idx + len(mother) - 1])
idx+=len(mother) + 3
spans.append([idx, idx + len(person) - 1])
idx+=len(person) + 1
spans.append([idx, idx + len(place) - 1])
idx+=len(place) + 1
date = date.split('T')[0][1:]
year, month, day = date.split('-')
year = int(year)
month = int(month)
day = int(day)
d = datetime.date(year, month, day)
date = d.strftime("%d %B %Y").split()
spans.append([idx, idx + 2])
s = mother + ["gave", "birth", "to"] + person + ["at"] + place + ["on"] + date + ["."]
s = " ".join(s)
for span in spans:
tsv_writer.writerow([str(span[0]) + " " + str(span[1])] + [s])
# print(spans)
# print(s)
# if i>10:
# breaks
sentences_birth.close()
print(i)
import datetime
import csv
with open('/data02/wikidata-frame-completion/data/birth_table1.tsv', 'r') as t:
i = 0
sentences_birth = open("sentences_person.tsv", "w")
tsv_writer = csv.writer(sentences_birth, delimiter='\t')
for line in t:
i+=1
if i==1:
continue
person, mother, place, date = line.split('\t')
if len(mother) == 0:
continue
def process(raw):
raw = raw.split("@")
content, lang = raw
content = content.strip("''").split()
return content, lang
person, lang = process(person)
if lang != "en":
continue
mother, lang = process(mother)
if lang != "en":
continue
place, lang = process(place)
if lang != "en":
continue
# print("person: ", person, "mother: ", mother, "place: ", place, "date: ", date, len(place), len(date))
# “[MOTHER] gave birth to [PERSON] at [PLACE] on [DATE]”
idx = 0
spans = []
# spans.append([idx, idx + len(mother) - 1])
idx+=len(mother) + 3
spans.append([idx, idx + len(person) - 1])
idx+=len(person) + 1
# spans.append([idx, idx + len(place) - 1])
idx+=len(place) + 1
date = date.split('T')[0][1:]
year, month, day = date.split('-')
year = int(year)
month = int(month)
day = int(day)
d = datetime.date(year, month, day)
date = d.strftime("%d %B %Y").split()
# spans.append([idx, idx + 2])
s = mother + ["gave", "birth", "to"] + person + ["at"] + place + ["on"] + date + ["."]
s = " ".join(s)
for span in spans:
tsv_writer.writerow([str(span[0]) + " " + str(span[1])] + [s])
# print(spans)
# print(s)
# if i>10:
# breaks
sentences_birth.close()
print(i)
# +
# kgtk query -i Person0.tsv -i Mother0.tsv -i Place0.tsv -i Date0.tsv -i labels.en.tsv.gz --match "Person: (q)-[]->(n), Mother: (q)-[]->(m), labels: (m)-[]->(mm), place: (q)-[]->(p), labels: (p)-[]->(pp), date: (q)-[]->(d)" –-return "n as Person, mm as Mother, pp as Place_of_birth, d as Date_of_birth" -o birth_table1.tsv
# kgtk query -i Person0.tsv -i Mother0.tsv -i Place0.tsv -i Date0.tsv -i labels.en.tsv.gz --match "Person: (q)-[]->(n), Mother:(q)-[]->(m), Place: (q)-[]->(p), Date: (q)-[]->(d), labels: (m)-[]->(mm), labels: (p)-[]->(pp)" --return "n as Person, mm as Mother, pp as Place_of_birth, d as Date_of_birth" -o birth_table1.tsv
| process_birth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.12 ('nbdev')
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/gtbook/gtsam-examples/blob/main/Pose2SLAMExample_g2o.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Pose2 SLAM with g2o Files
#
# The example below is a Pose SLAM example that reads from "g2o" files:
# %pip -q install gtbook # also installs latest gtsam pre-release
import gtsam
import matplotlib.pyplot as plt
from gtsam.utils import plot
# Set some parameters:
maxIterations = 100
# Read the file:
g2oFile = gtsam.findExampleDataFile("noisyToyGraph.txt")
graph, initial = gtsam.readG2o(g2oFile, is3D=False)
# Add prior on the pose having index (key) = 0
priorModel = gtsam.noiseModel.Diagonal.Variances(
gtsam.Point3(1e-6, 1e-6, 1e-8))
graph.add(gtsam.PriorFactorPose2(0, gtsam.Pose2(), priorModel))
# Create Gauss-Newton optimizer and optimize:
# +
params = gtsam.GaussNewtonParams()
params.setVerbosity("Termination")
params.setMaxIterations(maxIterations)
# parameters.setRelativeErrorTol(1e-5)
# Create the optimizer ...
optimizer = gtsam.GaussNewtonOptimizer(graph, initial, params)
# ... and optimize
result = optimizer.optimize()
print("Optimization complete")
print("initial error = ", graph.error(initial))
print("final error = ", graph.error(result))
# -
print("\nFactor Graph:\n{}".format(graph))
print("\nInitial Estimate:\n{}".format(initial))
print("Final Result:\n{}".format(result))
resultPoses = gtsam.utilities.extractPose2(result)
for i in range(resultPoses.shape[0]):
plot.plot_pose2(1, gtsam.Pose2(resultPoses[i, :]))
plt.show()
| Pose2SLAMExample_g2o.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from pyiron_base import Project
pr = Project("test")
pr.remove_jobs(recursive=True, silently=True)
job = pr.create.job.MyscriptJob("script")
job.input
job.run()
job["output"].to_object().to_builtin()
job = pr.load("script")
job
| example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import diff_classifier.features as ft
from diff_classifier.msd import all_msds, nth_diff, msd_calc
import numpy.testing as npt
import pandas.util.testing as pdt
import numpy as np
import pandas as pd
import math
frames = 10
d = {'Frame': np.linspace(1, frames, frames),
'X': np.linspace(1, frames, frames)+5,
'Y': np.linspace(1, frames, frames)+3}
df = pd.DataFrame(data=d)
df['MSDs'], df['Gauss'] = msd_calc(df)
assert ft.boundedness(df) == (1.0, 1.0, 0.0453113379707355)
ft.boundedness(df)
frames = 10
d = {'Frame': np.linspace(1, frames, frames),
'X': np.sin(np.linspace(1, frames, frames)+3),
'Y': np.cos(np.linspace(1, frames, frames)+3)}
df = pd.DataFrame(data=d)
df['MSDs'], df['Gauss'] = msd_calc(df)
assert ft.boundedness(df) == (0.9603705868989502, 2.7476524601589434, 0.03576118370932313)
| notebooks/development/02_01_18_Testing_functions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
import requests
from pyspark.sql import SparkSession
import pyspark.sql.functions as F
spark = (SparkSession
.builder
.appName('Wildfire data pipeline')
.master("local[*]")
.getOrCreate())
spark.sparkContext.setLogLevel('INFO')
spark
# # Loading Data
# +
modis_file = "MODIS_C6_Global_24h.csv"
viirs_file = "VNP14IMGTDL_NRT_Global_24h.csv"
def download(url, file_name):
r = requests.get(url)
with open(file_name,'wb') as f:
f.write(r.content)
def download_files():
base_url = "https://firms.modaps.eosdis.nasa.gov/data/active_fire/"
# Download the MODIS data file
download(base_url + "c6/csv/" + modis_file, 'data/' + modis_file)
# Download the VIIRS data file
download(base_url + "viirs/csv/" + viirs_file, 'data/' + viirs_file)
# -
download_files()
# %ls data
# # Data Pipeline
viirs_df = (spark
.read
.format("csv")
.option("header", True)
.option("inferSchema", True)
.load('data/' + viirs_file))
viirs_df.show(10)
viirs_df.printSchema()
viirs_df.count()
viirs_df2 = (viirs_df
.withColumn("acq_time_min", F.expr("acq_time % 100"))
.withColumn("acq_time_hr", F.expr("int(acq_time / 100)"))
.withColumn("acq_time2", F.unix_timestamp(F.col("acq_date"), 'yyyy-MM-dd'))
.withColumn("acq_time3", F.expr("acq_time2 + acq_time_min * 60 + acq_time_hr * 3600"))
.withColumn("acq_datetime", F.from_unixtime(F.col("acq_time3")))
.drop("acq_date", "acq_time", "acq_time_min", "acq_time_hr", "acq_time2", "acq_time3")
.withColumnRenamed("confidence", "confidence_level")
.withColumn("brightness", F.lit(None))
.withColumn("bright_t31", F.lit(None)))
viirs_df2.show(10)
viirs_df2.printSchema()
viirs_df2.rdd.getNumPartitions()
df = viirs_df2.groupby('confidence_level').count()
count = viirs_df2.count()
df = df.withColumn('%', F.round(F.expr(f"100 / {count} * count"), 2))
df.show()
# +
low = 40
high = 100
modis_df = spark.read.format("csv") \
.option("header", True) \
.option("inferSchema", True) \
.load('data/' + modis_file) \
.withColumn("acq_time_min", F.expr("acq_time % 100")) \
.withColumn("acq_time_hr", F.expr("int(acq_time / 100)")) \
.withColumn("acq_time2", F.unix_timestamp(F.col("acq_date"))) \
.withColumn("acq_time3", F.expr("acq_time2 + acq_time_min * 60 + acq_time_hr * 3600")) \
.withColumn("acq_datetime", F.from_unixtime(F.col("acq_time3"))) \
.drop("acq_date", "acq_time", "acq_time_min", "acq_time_hr", "acq_time2", "acq_time3") \
.withColumn("confidence_level", F.when(F.col("confidence") <= low, "low")
.when((F.col("confidence") > low) & (F.col("confidence") < high), "nominal")
.when(F.isnull(F.col("confidence")), "high")
.otherwise(F.col("confidence"))) \
.drop("confidence") \
.withColumn("bright_ti4", F.lit(None)) \
.withColumn("bright_ti5", F.lit(None))
modis_df.show()
modis_df.printSchema()
# -
df = modis_df.groupBy("confidence_level").count()
count = modis_df.count()
df = df.withColumn("%", F.round(F.expr("100 / {} * count".format(count)), 2))
df.show()
combined_df = viirs_df2.unionByName(modis_df)
combined_df.show()
combined_df.count()
count
combined_df.rdd.getNumPartitions()
# +
combined_df.write.format("parquet") \
.mode("overwrite") \
.save("data/fires_parquet")
output_df = combined_df.filter("confidence_level = 'high'") \
.repartition(1)
output_df.write.format("csv") \
.option("header", True) \
.mode("overwrite") \
.save("data/high_confidence_fires_csv")
| notebooks/Spark-In-Action/End-to-End-Pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# + deletable=true editable=true
from glob import glob
import os
# + deletable=true editable=true
mds = glob('datasets/mds/*')
# + deletable=true editable=true
pca = glob('datasets/pca/*')
# + deletable=true editable=true
for file in mds:
if 'wmd' in file:
print(file)
# + deletable=true editable=true
pca
# + deletable=true editable=true
for file in pca:
if 'wdm' in file:
print(file)
# + deletable=true editable=true
agglo_files = glob('datasets/labels/agglomerative/*')
# + deletable=true editable=true
for file in agglo_files:
if 'wmd' in file:
print(file)
# -
def labels_filename(distance_filename, subdir, **kwargs):
kwarg_list = []
for k, v in kwargs.items():
kwarg_list.append("_".join(map(str,(k, v))))
joined_kwarg_list = "_".join(kwarg_list)
return os.path.join('datasets/labels',
subdir,
distance_filename.replace('datasets/', '')
.replace('/', '_')
.replace('.npy', "_"+joined_kwarg_list+'.npy'))
dist_files = glob('datasets/tfidf/angular-distance/*') + glob('datasets/tfidf/euclidean-distance/*') + \
glob('datasets/cbow/mean-vec/angular-distance/*') + glob('datasets/cbow/mean-vec/euclidean-distance/*') +\
glob('datasets/sg/mean-vec/angular-distance/*') + glob('datasets/sg/mean-vec/euclidean-distance/*') + \
glob('datasets/cbow/wmd/*') + glob('datasets/sg/wmd/*')
dist_files_features =\
glob('datasets/tfidf/mds/*') + glob('datasets/cbow/mds/*') + glob('datasets/sg/mds/*') +\
glob('datasets/cbow/pca/*') + glob('datasets/sg/pca/*') +\
glob('datasets/tfidf/lsi/*')
dist_files_lsi_cos = glob('datasets/tfidf/lsi-cos-dist/*')
len(dist_files), len(dist_files_features)
for n in [2,10,40]:
print(n)
for fname in dist_files_lsi_cos:
if not os.path.isfile(labels_filename(fname, 'agglomerative', n=n)):
print(fname)
files = glob('datasets/labels/agglomerative/*')
files_no_n = [fname[:fname.rindex('_n_')] + '.npy' for filename in files]
unique_files = list(set(files_no_n))
fname = files[0]
fname.replace('_n_10.npy', '.npy')
fname
fname[:fname.rindex('_n_')] + '.npy'
# +
for n in [2,10,40]:
for unique_file in unique_files:
# fname =
if not os.path.isfile(unique_file.replace('.npy', '_n_{}.npy'.format(n))):
print(unique_file)
| experiments_count.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 15장. K-최근접 이웃
# 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://nbviewer.jupyter.org/github/rickiepark/machine-learning-with-python-cookbook/blob/master/15.ipynb"><img src="https://jupyter.org/assets/main-logo.svg" width="28" />주피터 노트북 뷰어로 보기</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/rickiepark/machine-learning-with-python-cookbook/blob/master/15.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
# </td>
# </table>
# ## 15.1 샘플의 최근접 이웃 찾기
# +
# 라이브러리를 임포트합니다.
from sklearn import datasets
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler
# 데이터를 로드합니다.
iris = datasets.load_iris()
features = iris.data
# 표준화 객체를 만듭니다.
standardizer = StandardScaler()
# 특성을 표준화합니다.
features_standardized = standardizer.fit_transform(features)
# k=2인 최근접 이웃 모델을 만듭니다.
nearest_neighbors = NearestNeighbors(n_neighbors=2).fit(features_standardized)
# 새로운 샘플을 만듭니다.
new_observation = [ 1, 1, 1, 1]
# 이 샘플과 가장 가까운 이웃의 인덱스와 거리를 찾습니다.
distances, indices = nearest_neighbors.kneighbors([new_observation])
# 최근접 이웃을 확인합니다.
features_standardized[indices]
# -
nearestneighbors_euclidean = NearestNeighbors(
n_neighbors=2, metric='euclidean').fit(features_standardized)
# 거리를 확인합니다.
distances
# +
# 유클리디안 거리를 기반으로 각 샘플에 대해 (자기 자신을 포함한) 세 개의 최근접 이웃을 찾습니다.
nearestneighbors_euclidean = NearestNeighbors(
n_neighbors=3, metric="euclidean").fit(features_standardized)
# 각 샘플의 (자기 자신을 포함한) 3개의 최근접 이웃을 나타내는 리스트의 리스트
nearest_neighbors_with_self = nearestneighbors_euclidean.kneighbors_graph(
features_standardized).toarray()
# 최근접 이웃 중에서 1로 표시된 자기 자신을 제외시킵니다.
for i, x in enumerate(nearest_neighbors_with_self):
x[i] = 0
# 첫 번째 샘플에 대한 두 개의 최근접 이웃을 확인합니다.
nearest_neighbors_with_self[0]
# -
# ### 붙임
# +
# 이 샘플과 가장 가까운 이웃의 다섯개의 인덱스를 찾습니다.
indices = nearest_neighbors.kneighbors(
[new_observation], n_neighbors=5, return_distance=False)
# 최근접 이웃을 확인합니다.
features_standardized[indices]
# +
# 반경 0.5 안에 있는 모든 샘플의 인덱스를 찾습니다.
indices = nearest_neighbors.radius_neighbors(
[new_observation], radius=0.5, return_distance=False)
# 반경 내의 이웃을 확인합니다.
features_standardized[indices[0]]
# +
# 반경 내의 이웃을 나타내는 리스트의 리스트
nearest_neighbors_with_self = nearest_neighbors.radius_neighbors_graph(
[new_observation], radius=0.5).toarray()
# 첫 번째 샘플에 대한 반경 내의 이웃을 확인합니다.
nearest_neighbors_with_self[0]
# -
# ## 15.2 K-최근접 이웃 분류기 만들기
# +
# 라이브러리를 임포트합니다.
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
# 데이터를 로드합니다.
iris = datasets.load_iris()
X = iris.data
y = iris.target
# 표준화 객체를 만듭니다.
standardizer = StandardScaler()
# 특성을 표준화합니다.
X_std = standardizer.fit_transform(X)
# 5개의 이웃을 사용한 KNN 분류기를 훈련합니다.
knn = KNeighborsClassifier(n_neighbors=5, n_jobs=-1).fit(X_std, y)
# 두 개의 샘플을 만듭니다.
new_observations = [[ 0.75, 0.75, 0.75, 0.75],
[ 1, 1, 1, 1]]
# 두 샘플의 클래스를 예측합니다.
knn.predict(new_observations)
# -
# 각 샘플이 세 클래스에 속할 확률을 확인합니다.
knn.predict_proba(new_observations)
# ### 붙임
# +
# 라이브러리를 임포트합니다.
from sklearn.neighbors import KNeighborsRegressor
from sklearn import datasets
# 데이터를 로드하고 두 개의 특성만 선택합니다.
boston = datasets.load_boston()
features = boston.data[:,0:2]
target = boston.target
# 최근접 회귀 모델을 만듭니다.
knn_regressor = KNeighborsRegressor(n_neighbors=10)
# 모델을 훈련합니다.
model = knn_regressor.fit(features, target)
# -
# 첫 번째 샘플의 타깃 값을 예측하고 1000을 곱합니다.
model.predict(features[0:1])[0]*1000
# +
import numpy as np
indices = model.kneighbors(features[0:1], return_distance=False)
np.mean(target[indices]) * 1000
# -
# ## 15.3 최선의 이웃 개수 결정하기
# +
# 라이브러리를 임포트합니다.
from sklearn.neighbors import KNeighborsClassifier
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import GridSearchCV
# 데이터를 로드합니다.
iris = datasets.load_iris()
features = iris.data
target = iris.target
# 표준화 객체를 만듭니다.
standardizer = StandardScaler()
# KNN 분류기를 만듭니다.
knn = KNeighborsClassifier(n_neighbors=5, n_jobs=-1)
# 파이프라인을 만듭니다.
pipe = Pipeline([("standardizer", standardizer), ("knn", knn)])
# 탐색 영역의 후보를 만듭니다.
search_space = [{"knn__n_neighbors": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}]
# 그리드 서치를 만듭니다.
classifier = GridSearchCV(
pipe, search_space, cv=5, verbose=0).fit(features, target)
# -
# 최선의 이웃 개수 (k)
classifier.best_estimator_.get_params()["knn__n_neighbors"]
# ## 15.4 반지름 기반의 최근접 이웃 분류기 만들기
# +
# 라이브러리를 임포트합니다.
from sklearn.neighbors import RadiusNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
# 데이터를 로드합니다.
iris = datasets.load_iris()
features = iris.data
target = iris.target
# 표준화 객체를 만듭니다.
standardizer = StandardScaler()
# 특성을 표준화합니다.
features_standardized = standardizer.fit_transform(features)
# 반지름 이웃 분류기를 훈련합니다.
rnn = RadiusNeighborsClassifier(
radius=.5, n_jobs=-1).fit(features_standardized, target)
# 두 개의 샘플을 만듭니다.
new_observations = [[ 1, 1, 1, 1]]
# 두 샘플의 클래스를 예측합니다.
rnn.predict(new_observations)
# -
# ### 붙임
# +
# 반지름 이웃 분류기를 훈련합니다.
rnn = RadiusNeighborsClassifier(
radius=.5, outlier_label=-1, n_jobs=-1).fit(features_standardized, target)
rnn.predict([[100, 100, 100, 100]])
| 15.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep Learning
# ## A. Data Preparation
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cufflinks as cf
sp = pd.read_csv('data_csv.csv')
sp.isnull().values.any()
sp.isnull().sum()
sp[sp.isnull().values.any(axis=1)].head()
## Droping the column
sp = sp.drop('PE10', axis=1)
## fillna with forward fill
sp=sp.fillna(method='ffill', axis=1)
sp.tail()
sp.isnull().sum()
# +
## Use the below code to create a new variable so as to detect increament or decrement of CPI
cnt=1
target=[]
for i in sp['Consumer Price Index']:
if sp['Consumer Price Index'][cnt] > i:
print(sp['Consumer Price Index'][cnt], '>' , i , '1')
target.append(1)
else:
print(sp['Consumer Price Index'][cnt], '<' , i , '0')
target.append(0)
cnt+=1
# -
target=pd.DataFrame(data=target, columns=['CPI_Inc'],dtype=int)
sp=sp[:1767]
frames=[sp,target]
sp=pd.concat(frames, axis=1, join='outer')
sp.tail()
# +
sp3=sp.drop('Date',axis=1)
test=sp3[-40:]
train=sp3[:-40]
#Train
sp1=np.array(train)
#Test
sp2=np.array(test)
# +
### To change the dimension of the data for deep learning model
from numpy import array
def split_sequences(sequences, n_steps):
X, y = list(), list()
for i in range(len(sequences)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the dataset
if end_ix > len(sequences):
break
# gather input and output parts of the pattern
seq_x, seq_y = sequences[i:end_ix, :-1], sequences[end_ix-1, -1]
X.append(seq_x)
y.append(seq_y)
return array(X), array(y)
# +
# choose a number of time steps
n_steps = 8
# convert into input/output
X1, y1 = split_sequences(sp1, n_steps)
# convert into input/output
X2, y2 = split_sequences(sp2, n_steps)
print(X1.shape, y1.shape)
print(X2.shape, y2.shape)
n_features = X2.shape[2]
n_features
# -
# # B. CNN Model:
# # Part A - Deep Learning model
# ___Hyperparameters:___<br>
# filters=128<br>
# kernel_size=3<br>
# Activation functions= relu<br>
# loss=mean_squared_error<br>
# optimizer= adam<br>
# univariate cnn example
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import BatchNormalization
from keras.layers.convolutional import Conv1D
from keras.layers.convolutional import MaxPooling1D
from keras import metrics
from keras import optimizers
# +
# define model
model1 = Sequential()
model1.add(Conv1D(filters=128, kernel_size=3, activation='relu', input_shape=(n_steps, n_features)))
model1.add(MaxPooling1D(pool_size=2))
model1.add(BatchNormalization())
model1.add(Flatten())
model1.add(Dropout(0.25))
model1.add(Dense(20, activation='relu'))
model1.add(Dropout(0.25))
model1.add(Dense(1))
# -
model1.compile(optimizer='adam', loss='mean_squared_error',metrics=[metrics.mae, 'accuracy'])
history=model1.fit(X1, y1, batch_size=32, epochs=10, verbose=2)
# +
plt.plot(history.history['accuracy'])
plt.plot(history.history['loss'])
plt.title('Model accuracy, Loss')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
# -
# ### Validation:
loss, mean_absolute_error, accuracy =model1.evaluate(X2, y2, batch_size=32, verbose=2)
print('Accuracy on Test Data :- ')
print('\t loss:',loss , ' mean_absolute_error:',mean_absolute_error, ' accuracy:',accuracy)
# __For the baseline model, Loss is Higher and Accuracy on validation data is 63%__
# # Activation function
# ___Hyperparameters:___<br>
# filters=128<br>
# kernel_size=3<br>
# Activation functions= elu, softmax<br>
# loss=mean_squared_error<br>
# optimizer= adam<br>
# +
# define model
model2 = Sequential()
model2.add(Conv1D(filters=128, kernel_size=3, activation='elu', input_shape=(n_steps, n_features)))
model2.add(MaxPooling1D(pool_size=2))
model2.add(BatchNormalization())
model2.add(Flatten())
model2.add(Dropout(0.25))
model2.add(Dense(20, activation='softmax'))
model2.add(Dropout(0.25))
model2.add(Dense(1))
# -
adam=optimizers.Adam(learning_rate=0.4, beta_1=0.9, beta_2=0.999, amsgrad=False)
model2.compile(optimizer='adam', loss='mean_squared_error',metrics=[metrics.mae, 'accuracy'])
history=model2.fit(X1, y1, batch_size=32, epochs=10, verbose=2)
# +
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
# -
# ### Validation:
loss, mean_absolute_error, accuracy =model2.evaluate(X2, y2, batch_size=32, verbose=2)
print('Accuracy on Test Data :- ')
print('\t loss:',loss , ' mean_absolute_error:',mean_absolute_error, ' accuracy:',accuracy)
# __After changing the activation functions in the current network, loss get reduced but with decrease in accuracy on validation set which is 30%.__
# # Cost function / loss
# ___Hyperparameters:___<br>
# filters=128<br>
# kernel_size=3<br>
# Activation functions= elu, softmax<br>
# loss= hinge, logcosh<br>
# optimizer= adam<br>
# __1. Cost function= hinge:__
model2.compile(optimizer='adam', loss='hinge',metrics=[metrics.mae, 'accuracy'])
history=model2.fit(X1, y1, batch_size=32, epochs=10, verbose=2)
# +
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
# -
# __Accuracy :__
loss, mean_absolute_error, accuracy =model2.evaluate(X2, y2, batch_size=32, verbose=2)
print('Accuracy on Test Data :- ')
print('\t loss:',loss , ' mean_absolute_error:',mean_absolute_error, ' accuracy:',accuracy)
# __2. Cost function= logcosh:__
model2.compile(optimizer='adam', loss='logcosh',metrics=[metrics.mae, 'accuracy'])
history=model2.fit(X1, y1, batch_size=32, epochs=10, verbose=2)
# +
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
# -
# __Accuracy :__
loss, mean_absolute_error, accuracy =model2.evaluate(X2, y2, batch_size=32, verbose=2)
print('Accuracy on Test Data :- ')
print('\t loss:',loss , ' mean_absolute_error:',mean_absolute_error, ' accuracy:',accuracy)
# __For the cost function, logcosh is better than hinge for better accuracy.__
# # Epochs
# ___Hyperparameters:___<br>
# filters=128<br>
# kernel_size=3<br>
# Activation functions= elu, softmax<br>
# loss= logcosh<br>
# optimizer= adam<br>
# epochs=200 <br>
model2.compile(optimizer='adam', loss='logcosh',metrics=[metrics.mae, 'accuracy'])
history=model2.fit(X1, y1, batch_size=32, epochs=200, verbose=2)
# +
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
# -
# __Accuracy :__
loss, mean_absolute_error, accuracy =model2.evaluate(X2, y2, batch_size=32, verbose=2)
print('Accuracy on Test Data :- ')
print('\t loss:',loss , ' mean_absolute_error:',mean_absolute_error, ' accuracy:',accuracy)
# __After increasing number of epochs in the model, accuracy increased and loss decreased.__
# # Gradient estimation
# ___Hyperparameters:___<br>
# filters=128<br>
# kernel_size=3<br>
# Activation functions= elu, softmax<br>
# loss= logcosh<br>
# optimizer= RMSprop, Adagrad<br>
# epochs=200 <br>
# __1. optimizers : RMSprop__
RMSprop=optimizers.RMSprop(learning_rate=0.4, rho=0.9)
model2.compile(optimizer='RMSprop', loss='logcosh',metrics=[metrics.mae, 'accuracy'])
history=model2.fit(X1, y1, batch_size=32, epochs=200, verbose=2)
# +
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
# -
# __Accuracy :__
loss, mean_absolute_error, accuracy =model2.evaluate(X2, y2, batch_size=32, verbose=2)
print('Accuracy on Test Data :- ')
print('\t loss:',loss , ' mean_absolute_error:',mean_absolute_error, ' accuracy:',accuracy)
# __2. optimizers : Adagrad__
Adagrad=optimizers.Adagrad(learning_rate=0.001)
model2.compile(optimizer='Adagrad', loss='logcosh',metrics=[metrics.mae, 'accuracy'])
model2.fit(X1, y1, batch_size=32, epochs=200, verbose=2)
# +
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
# -
loss, mean_absolute_error, accuracy =model2.evaluate(X2, y2, batch_size=32, verbose=2)
print('Accuracy on Test Data :- ')
print('\t loss:',loss , ' mean_absolute_error:',mean_absolute_error, ' accuracy:',accuracy)
# __Optimizer: Adagrad is better suit for this model than RMSprop. Although, both are close enough in terms of accuracy and loss.__
# # Network Architecture
# ___Hyperparameters:___<br>
# filters=256<br>
# kernel_size=2<br>
# Activation functions= relu, elu, softmax<br>
# loss=logcosh<br>
# optimizer= adagrad<br>
# epoch=100<br>
# +
# define model
model3 = Sequential()
model3.add(Conv1D(filters=256, kernel_size=3, activation='relu', input_shape=(n_steps, n_features)))
model3.add(MaxPooling1D(pool_size=2))
model3.add(BatchNormalization())
model3.add(Dense(128, activation='elu'))
model3.add(Dropout(0.25))
model3.add(Conv1D(filters=128, kernel_size=2, activation='elu'))
model3.add(MaxPooling1D(pool_size=2))
model3.add(BatchNormalization())
model3.add(Dropout(0.25))
model3.add(Dense(64))
model3.add(Dropout(0.25))
model3.add(Flatten())
model3.add(Dropout(0.25))
model3.add(Dense(20, activation='softmax'))
model3.add(Dropout(0.25))
model3.add(Dense(1))
# -
Adagrad=optimizers.Adagrad(learning_rate=0.4)
model3.compile(optimizer='Adagrad', loss='logcosh',metrics=[metrics.mae, 'accuracy'])
history=model3.fit(X1, y1, batch_size=32, epochs=100, verbose=2)
# +
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
# -
loss, mean_absolute_error, accuracy =model3.evaluate(X2, y2, batch_size=32, verbose=2)
print('Accuracy on Test Data :- ')
print('\t loss:',loss , ' mean_absolute_error:',mean_absolute_error, ' accuracy:',accuracy)
# __After changing the number of layers and the size of the layers, both accuracy and loss got improve.__
# # Network initialization
# ___Hyperparameters:___<br>
# filters=256<br>
# kernel_size=3,2<br>
# Activation functions= relu, elu, softmax<br>
# kernel_initializer=Ones, RandomNormal <br>
# loss=mean_squared_error<br>
# optimizer= adam<br>
# +
# define model
# Default : kernel_initializer='glorot_uniform'
model4 = Sequential()
model4.add(Conv1D(filters=256, kernel_size=3, activation='relu', kernel_initializer='Ones' , input_shape=(n_steps, n_features)))
model4.add(MaxPooling1D(pool_size=2))
model4.add(BatchNormalization())
model4.add(Conv1D(filters=128, kernel_size=2, activation='elu',kernel_initializer='RandomNormal'))
model4.add(MaxPooling1D(pool_size=2))
model4.add(BatchNormalization())
model4.add(Dropout(0.25))
model4.add(Flatten())
model4.add(Dropout(0.25))
model4.add(Dense(20, activation='softmax'))
model4.add(Dropout(0.25))
model4.add(Dense(1))
# -
Adagrad=optimizers.Adagrad(learning_rate=0.01)
model4.compile(optimizer='Adagrad', loss='logcosh',metrics=[metrics.mae, 'accuracy'])
history=model4.fit(X1, y1, batch_size=32, epochs=200, verbose=2)
# +
plt.plot(history.history['accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
plt.plot(history.history['loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Valid'], loc='upper left')
plt.show()
# -
loss, mean_absolute_error, accuracy =model4.evaluate(X2, y2, batch_size=32, verbose=2)
print('Accuracy on Test Data :- ')
print('\t loss:',loss , ' mean_absolute_error:',mean_absolute_error, ' accuracy:',accuracy)
# __After changing the kernel initialization, accuracy on validation set got increase.__
# # Conclusion:
# After performing analysis on the CNN model, it is observed that, <br>
# >1. For the baseline model, Loss is Higher and Accuracy on validation data is 63%.<br>
# >2. The cost function logcosh is appropriate for this model than hinge.<br>
# >3. Higher the number of epochs, better the model is.<br>
# >4. Change in Optimizer can increase the accuracy and it helps to reduce the loss. Adagrad is better suit for this type of model than RMSprop<br>
# >5. Network architecture is as important as hyperparameters. By adding more layers and with proper use of hyperparamters, we can achieve higher accuracy.<br>
# >6. After changing the kernel initialization, accuracy on validation set got increase.
#
# For Multivariate Tabular data, Convolution Neural Network can produce good results but handling of data For the CNN model is difficult.
# # Author:
# <NAME><br>
# Information Systems <br>
# Northeastern University <br>
# # Citation:
# References: <br>
# https://keras.io/models<br>
# https://machinelearningmastery.com/ <br>
# https://karpathy.github.io/2019/04/25/recipe<br>
# # Licensing
# Copyright 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| s&p500.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv("carprices.csv")
df
dummies = pd.get_dummies(df['Car Model'])
dummies
merged = pd.concat([df,dummies],axis='columns')
merged
final = merged.drop(["Car Model","<NAME> class"],axis='columns')
final
X = final.drop('Sell Price($)',axis='columns')
X
y = final['Sell Price($)']
y
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X,y)
model.score(X,y)
# **Price of mercedez benz that is 4 yr old with mileage 45000**
model.predict([[45000,4,0,0]])
# **Price of BMW X5 that is 7 yr old with mileage 86000**
model.predict([[86000,7,0,1]])
| Program's_Contributed_By_Contributors/AI-Summer-Course/py-master/ML/5_one_hot_encoding/Exercise/exercise_one_hot_encoding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.1 64-bit (''base'': conda)'
# name: python3
# ---
# # Bank Marketing Data Set
#
# ## Load a saved model and predict
#
# The data is related with direct marketing campaigns of a Portuguese banking institution. The marketing campaigns were based on phone calls. A number of features such as age, kind of job, marital status, education level, credit default, existence of housing loan, etc. were considered. The classification goal is to predict if the client will subscribe (yes/no) a term deposit.
#
# More information regarding the data set is at https://archive.ics.uci.edu/ml/datasets/bank+marketing#. For tutorials use only.
#
# <font color=blue>__ _The objective is to show the need for storing a model. A rudimentary hack to load the model is implemented._ __</font>
#
# ## Attribute Information:
#
# ### Input variables:
# #### Bank client data:
# 1. age (numeric)
# 2. job : type of job (categorical: 'admin.','blue-collar','entrepreneur','housemaid','management','retired','self-employed','services','student','technician','unemployed','unknown')
# 3. marital : marital status (categorical: 'divorced','married','single','unknown'; note: 'divorced' means divorced or widowed)
# 4. education (categorical: 'basic.4y','basic.6y','basic.9y','high.school','illiterate','professional.course','university.degree','unknown')
# 5. default: has credit in default? (categorical: 'no','yes','unknown')
# 6. housing: has housing loan? (categorical: 'no','yes','unknown')
# 7. loan: has personal loan? (categorical: 'no','yes','unknown')
#
# #### Related with the last contact of the current campaign:
# 8. contact: contact communication type (categorical: 'cellular','telephone')
# 9. month: last contact month of year (categorical: 'jan', 'feb', 'mar', ..., 'nov', 'dec')
# 10. day_of_week: last contact day of the week (categorical: 'mon','tue','wed','thu','fri')
# 11. duration: last contact duration, in seconds (numeric). Important note: this attribute highly affects the output target (e.g., if duration=0 then y='no'). Yet, the duration is not known before a call is performed. Also, after the end of the call y is obviously known. Thus, this input should only be included for benchmark purposes and should be discarded if the intention is to have a realistic predictive model.
#
# #### Other attributes:
# 12. campaign: number of contacts performed during this campaign and for this client (numeric, includes last contact)
# 13. pdays: number of days that passed by after the client was last contacted from a previous campaign (numeric; 999 means client was not previously contacted)
# 14. previous: number of contacts performed before this campaign and for this client (numeric)
# 15. poutcome: outcome of the previous marketing campaign (categorical: 'failure','nonexistent','success')
#
# #### Social and economic context attributes:
# 16. emp.var.rate: employment variation rate - quarterly indicator (numeric)
# 17. cons.price.idx: consumer price index - monthly indicator (numeric)
# 18. cons.conf.idx: consumer confidence index - monthly indicator (numeric)
# 19. euribor3m: euribor 3 month rate - daily indicator (numeric)
# 20. nr.employed: number of employees - quarterly indicator (numeric)
#
# ### Output variable (desired target):
# 21. y - has the client subscribed a term deposit? (binary: 'yes','no')
#
# ## Import packages
from hana_ml import dataframe
from hana_ml.algorithms.pal import linear_model
from hana_ml.algorithms.pal import clustering
from hdbcli import dbapi
import numpy as np
import matplotlib.pyplot as plt
import logging
# ## Setup logging
logging.basicConfig()
logger = logging.getLogger('hana_ml.ml_base')
logger.setLevel(logging.ERROR)
logger.addHandler(logging.NullHandler())
# ## Setup connection and data sets
# The data is loaded into 4 tables - full set, test set, training set, and the validation set:
# <li>DBM2_RFULL_TBL</li>
# <li>DBM2_RTEST_TBL</li>
# <li>DBM2_RTRAINING_TBL</li>
# <li>DBM2_RVALIDATION_TBL</li>
#
# To do that, a connection is created and passed to the loader.
#
# There is a config file, <b>config/e2edata.ini</b> that controls the connection parameters and whether or not to reload the data from scratch. In case the data is already loaded, there would be no need to load the data. A sample section is below. If the config parameter, reload_data is true then the tables for test, training, and validation are (re-)created and data inserted into them.
#
# #########################<br>
# [hana]<br>
# url=host.sjc.sap.corp<br>
# user=username<br>
# passwd=<PASSWORD><br>
# port=3xx15<br>
# <br>
#
# #########################<br>
from hana_ml.algorithms.pal.utility import DataSets, Settings
url, port, user, pwd = Settings.load_config("../../config/e2edata.ini")
connection_context = dataframe.ConnectionContext(url, port, user, pwd)
full_set, training_set, validation_set, test_set = DataSets.load_bank_data(connection_context)
features = ['AGE','JOB','MARITAL','EDUCATION','DBM_DEFAULT', 'HOUSING','LOAN','CONTACT','DBM_MONTH','DAY_OF_WEEK','DURATION','CAMPAIGN','PDAYS','PREVIOUS','POUTCOME','EMP_VAR_RATE','CONS_PRICE_IDX','CONS_CONF_IDX','EURIBOR3M','NREMPLOYED']
label = "LABEL"
# # Load the model
# __ _lr = model_persistence.load(connection_context, 'nk_lr', 'DEVUSER')_ __
lr = linear_model.LogisticRegression(solver='newton',
thread_ratio=0.1, max_iter=1000, pmml_export='single-row',
stat_inf=True, tol=0.000001, class_map0='no', class_map1='yes')
lr.result_ = connection_context.table("MYMODEL")
# ## Load the model by model storage service
# +
from hana_ml.model_storage import ModelStorage
#MODEL_SCHEMA='STORAGE'
# model storage must use the same connection than the model
model_storage = ModelStorage(connection_context=connection_context,
#schema=MODEL_SCHEMA
)
lr = model_storage.load_model(name='Model A', version=1)
# -
# # Scoring
# Do the scoring on the validation and test sets
validation_accuracy_val = lr.score(validation_set, 'ID', features, label, class_map0='no', class_map1='yes')
print('Validation accuracy=%f' %(validation_accuracy_val))
test_accuracy_val = lr.score(test_set, 'ID', features, label)
print('Test accuracy=%f' %(test_accuracy_val))
predict_on_train_data = lr.predict(training_set, 'ID', features)
print(predict_on_train_data.head(2).collect())
# ### Define a simple predict function
def predict(connection_context, lr, age, job, marital_status, education, dbm_default, housing, loan, contact, dbm_month, day_of_week, duration, campaign, pdays, previous, poutcome, emp_var_rate, cons_price_idx, cons_conf_idx, euribor3m, nremployed):
#sql = 'create local temporary table #t("ID" INTEGER CS_INT, \
dt = 'drop table #t'
sql = 'create local temporary table #t("ID" INTEGER CS_INT,\
"AGE" INTEGER CS_INT,\
"JOB" VARCHAR(256),\
"MARITAL" VARCHAR(100),\
"EDUCATION" VARCHAR(256),\
"DBM_DEFAULT" VARCHAR(100),\
"HOUSING" VARCHAR(100),\
"LOAN" VARCHAR(100),\
"CONTACT" VARCHAR(100),\
"DBM_MONTH" VARCHAR(100),\
"DAY_OF_WEEK" VARCHAR(100),\
"DURATION" DOUBLE CS_DOUBLE,\
"CAMPAIGN" INTEGER CS_INT,\
"PDAYS" INTEGER CS_INT,\
"PREVIOUS" INTEGER CS_INT,\
"POUTCOME" VARCHAR(100),\
"EMP_VAR_RATE" DOUBLE CS_DOUBLE,\
"CONS_PRICE_IDX" DOUBLE CS_DOUBLE,\
"CONS_CONF_IDX" DOUBLE CS_DOUBLE,\
"EURIBOR3M" DOUBLE CS_DOUBLE,\
"NREMPLOYED" INTEGER CS_INT)'
insert = 'insert into #t values(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)'
with connection_context.connection.cursor() as cur:
try:
print('11111')
cur.execute(dt)
except:
print('1111')
pass
print('1211')
print(sql)
ret = cur.execute(sql)
if ret == True:
ret = cur.execute(insert, [99999, age, job, marital_status, education, dbm_default, housing, loan, contact, dbm_month, day_of_week, duration, campaign, pdays, previous, poutcome, emp_var_rate, cons_price_idx, cons_conf_idx, euribor3m, nremployed])
df = dataframe.DataFrame(connection_context, 'select * from #t')
prediction = lr.predict(df, 'ID', ['AGE','JOB','MARITAL','EDUCATION','DBM_DEFAULT', 'HOUSING','LOAN','CONTACT','DBM_MONTH','DAY_OF_WEEK','DURATION','CAMPAIGN','PDAYS','PREVIOUS','POUTCOME','EMP_VAR_RATE','CONS_PRICE_IDX','CONS_CONF_IDX','EURIBOR3M','NREMPLOYED'])
return prediction
p = predict(connection_context, lr, 56, "housemaid","married","basic.4y","no","no","no","telephone","may","mon",261,1,999,0,"nonexistent",1.1,93.994,-36.4,4.857,5191)
p.collect()
p2 = predict(connection_context, lr, 49,"entrepreneur","married","university.degree","unknown","yes","no","telephone","may","mon",1042,1,999,0,"nonexistent",1.1,93.994,-36.4,4.857,5191)
p2.collect()
predict_on_train_data.filter("CLASS='yes'").head(5).collect()
p3 = predict(connection_context, lr, 32,"admin.","married","university.degree","no","yes","no","cellular","aug","fri",1366,1,999,0,"nonexistent",1.4,93.444,-36.1,4.964,5228.1)
p3.collect()
# + jupyter={"outputs_hidden": true}
| Python-API/pal/notebooks/bankDirectLoadAndPredict.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (py39)
# language: python
# name: py39
# ---
# + [markdown] papermill={"duration": 0.017805, "end_time": "2021-04-28T00:43:44.959630", "exception": false, "start_time": "2021-04-28T00:43:44.941825", "status": "completed"} tags=[]
# A notebook that contains evaluation timeseries and correlation plots that compare data from the ORCA buoy at Point Williams in Main Basin to the model data. The data used are daily averages of the modeled and observed data.
# + papermill={"duration": 0.045714, "end_time": "2021-04-28T00:43:45.016206", "exception": false, "start_time": "2021-04-28T00:43:44.970492", "status": "completed"} tags=[]
import sys
sys.path.append('/ocean/kflanaga/MEOPAR/analysis-keegan/notebooks/Tools')
# + papermill={"duration": 5.274797, "end_time": "2021-04-28T00:43:50.307023", "exception": false, "start_time": "2021-04-28T00:43:45.032226", "status": "completed"} tags=[]
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import netCDF4 as nc
import datetime as dt
from salishsea_tools import evaltools as et, viz_tools, places, geo_tools
import gsw
import pickle
import matplotlib.gridspec as gridspec
import matplotlib as mpl
import matplotlib.dates as mdates
import cmocean as cmo
from matplotlib.colors import LogNorm
import Keegan_eval_tools as ket
#import scipy.io as so
# + papermill={"duration": 0.01752, "end_time": "2021-04-28T00:43:50.340386", "exception": false, "start_time": "2021-04-28T00:43:50.322866", "status": "completed"} tags=[]
choosepoint=False
if choosepoint==True:
with nc.Dataset('/data/eolson/results/MEOPAR/NEMO-forcing-new/grid/bathymetry_201702.nc') as bathy:
navlat=bathy.variables['nav_lat'][:,:]
navlon=bathy.variables['nav_lon'][:,:]
with nc.Dataset('/data/eolson/results/MEOPAR/NEMO-forcing-new/grid/mesh_mask201702.nc') as mesh:
tmask=mesh.variables['tmask'][0,:,:,:]
indj,indi=geo_tools.find_closest_model_point(-123.93,49.75,navlon,navlat,land_mask=np.abs(tmask[0,:,:]-1))
print(indj,indi)
fig,ax=plt.subplots(1,1,figsize=(3,4))
ax.pcolormesh(tmask[0,:,:])
ax.plot(282,598,'r*')
ax.set_ylim(550,700)
ax.set_xlim(150,300)
viz_tools.set_aspect(ax)
fig,ax=plt.subplots(1,1,figsize=(3,2))
ax.pcolormesh(tmask[:,598,:])
ax.set_ylim(40,0)
ax.plot(282,0,'r*')
#ax.set_ylim(550,700)
ax.set_xlim(150,300)
#viz_tools.set_aspect(ax)
else:
indj,indi=places.PLACES['Egmont']['NEMO grid ji']
# + papermill={"duration": 0.024036, "end_time": "2021-04-28T00:43:50.375803", "exception": false, "start_time": "2021-04-28T00:43:50.351767", "status": "completed"} tags=["parameters"]
year=2019
chlToN=1.8
indk=0
mooring='PointWilliams'
saveloc='/ocean/kflanaga/MEOPAR/savedData/King_CountyData'
# + papermill={"duration": 0.016731, "end_time": "2021-04-28T00:43:50.412673", "exception": false, "start_time": "2021-04-28T00:43:50.395942", "status": "completed"} tags=["injected-parameters"]
# Parameters
saveloc = "/ocean/kflanaga/MEOPAR/savedData/King_CountyData"
chlToN = 1.8
indk = 0
year = 2018
Mooring = "PointWilliams"
# + papermill={"duration": 0.014266, "end_time": "2021-04-28T00:43:50.439432", "exception": false, "start_time": "2021-04-28T00:43:50.425166", "status": "completed"} tags=[]
datelims=(dt.datetime(year,1,1),dt.datetime(year,1,1))
start_date=datelims[0]
end_date=datelims[1]
citez=1.0
# + papermill={"duration": 0.027597, "end_time": "2021-04-28T00:43:50.477920", "exception": false, "start_time": "2021-04-28T00:43:50.450323", "status": "completed"} tags=[]
##### Loading in pickle file data
with open(os.path.join(saveloc,f'daily_data_{mooring}_{year}.pkl'),'rb') as hh:
df0=pickle.load(hh)
# + papermill={"duration": 0.015137, "end_time": "2021-04-28T00:43:50.508467", "exception": false, "start_time": "2021-04-28T00:43:50.493330", "status": "completed"} tags=[]
Lat=df0.Lat.unique()[0]
Lon=df0.Lon.unique()[0]
# + papermill={"duration": 0.105557, "end_time": "2021-04-28T00:43:50.625449", "exception": false, "start_time": "2021-04-28T00:43:50.519892", "status": "completed"} tags=[]
ii,jj=geo_tools.get_ij_coordinates(Lat,Lon,
grid_loc='/ocean/kflanaga/MEOPAR/grid/grid_from_lat_lon_mask999.nc')
# + papermill={"duration": 0.050868, "end_time": "2021-04-28T00:43:50.688310", "exception": false, "start_time": "2021-04-28T00:43:50.637442", "status": "completed"} tags=[]
PATH= '/results2/SalishSea/nowcast-green.201905/'
start_date = df0['dtUTC'].iloc[0]
end_date = df0['dtUTC'].iloc[-1]+dt.timedelta(days=1)
flen=1
namfmt='nowcast'
filemap={'nitrate':'ptrc_T','diatoms':'ptrc_T','ciliates':'ptrc_T','flagellates':'ptrc_T',
'vosaline':'grid_T','votemper':'grid_T'}
fdict={'ptrc_T':1,'grid_T':1}
df0['i']=ii
df0['j']=jj
df0['k']=3
# + papermill={"duration": 397.80855, "end_time": "2021-04-28T00:50:28.512335", "exception": false, "start_time": "2021-04-28T00:43:50.703785", "status": "completed"} tags=[]
# note: I only ran the first 365 data points to save time
df=et.matchData(df0,filemap,fdict,start_date,end_date,namfmt,PATH,1,preIndexed=True);
# + papermill={"duration": 0.024712, "end_time": "2021-04-28T00:50:28.553392", "exception": false, "start_time": "2021-04-28T00:50:28.528680", "status": "completed"} tags=[]
df['mod_chl']=chlToN*(df['mod_diatoms']+df['mod_ciliates']+df['mod_flagellates'])
df['log_mod_chl']=ket.logt(df['mod_chl'])
df['log_chl']=ket.logt(df['Chl'])
# + papermill={"duration": 0.033858, "end_time": "2021-04-28T00:50:28.600672", "exception": false, "start_time": "2021-04-28T00:50:28.566814", "status": "completed"} tags=[]
def quick_ts(obsvar,modvar):
ps=[]
fig,ax=plt.subplots(1,1,figsize=(12,8))
#Plotting the data
p0,=ax.plot(df['dtUTC'],df[obsvar],'r.',label='Obs')
ps.append(p0)
p0,=ax.plot(df['dtUTC'],df[modvar],'c-',label='Mod')
ps.append(p0)
#altering the labels.
ax.set_xlabel(f'Date',fontsize=20)
ax.set_ylabel(f'{obsvar}',fontsize=20)
ax.set_title(str(2007), fontsize=20)
ax.xaxis.set_tick_params(labelsize=16)
ax.yaxis.set_tick_params(labelsize=16)
legend = plt.legend(handles=ps,bbox_to_anchor=[1,.6,0,0])
plt.setp(ax.get_xticklabels(), rotation=30, horizontalalignment='right')
M = 11
xticks = mpl.ticker.MaxNLocator(M)
ax.xaxis.set_major_locator(xticks)
plt.gca().add_artist(legend)
yearsFmt = mdates.DateFormatter('%y %d %b')
ax.xaxis.set_major_formatter(yearsFmt)
return ps, ax
def quick_varvar(ax,df,obsvar,modvar,lims):
ps=et.varvarPlot(ax,df,obsvar,modvar)
ax.set_xlabel('Obs',fontsize=20)
ax.set_ylabel('Model',fontsize=20)
ax.plot(lims,lims,'k-',alpha=.5)
ax.set_xlim(lims)
ax.set_ylim(lims)
ax.set_aspect(1)
return ps
# + [markdown] papermill={"duration": 0.018666, "end_time": "2021-04-28T00:50:28.639893", "exception": false, "start_time": "2021-04-28T00:50:28.621227", "status": "completed"} tags=[]
# # Chlorophyll
# + papermill={"duration": 0.230908, "end_time": "2021-04-28T00:50:28.892012", "exception": false, "start_time": "2021-04-28T00:50:28.661104", "status": "completed"} tags=[]
obsvar='Chl'
modvar='mod_chl'
ps,ax=quick_ts(obsvar,modvar)
ax.set_title('Chlorophyll Timeseries',fontsize=20)
# + papermill={"duration": 0.144009, "end_time": "2021-04-28T00:50:29.050400", "exception": false, "start_time": "2021-04-28T00:50:28.906391", "status": "completed"} tags=[]
obsvar='Chl'
modvar='mod_chl'
lims=(0,60)
fig,ax=plt.subplots(1,1,figsize=(8,8))
ps=quick_varvar(ax,df,obsvar,modvar,lims)
ax.set_title('Observed vs Model Chlorophyll',fontsize=20)
# + [markdown] papermill={"duration": 0.027907, "end_time": "2021-04-28T00:50:29.092579", "exception": false, "start_time": "2021-04-28T00:50:29.064672", "status": "completed"} tags=[]
# # Log Transformed Chlorophyll
# + papermill={"duration": 0.167504, "end_time": "2021-04-28T00:50:29.274035", "exception": false, "start_time": "2021-04-28T00:50:29.106531", "status": "completed"} tags=[]
obsvar='log_chl'
modvar='log_mod_chl'
ps,ax=quick_ts(obsvar,modvar)
ax.set_title('Observed vs Modeled log10[Chlorophyll]',fontsize=20)
ax.set_ylim(-1,2)
# + papermill={"duration": 0.122454, "end_time": "2021-04-28T00:50:29.412768", "exception": false, "start_time": "2021-04-28T00:50:29.290314", "status": "completed"} tags=[]
obsvar='log_chl'
modvar='log_mod_chl'
lims=(-2,6)
fig,ax=plt.subplots(1,1,figsize=(8,8))
ps=quick_varvar(ax,df,obsvar,modvar,lims)
ax.set_title('Observed vs Model log10[Chlorophyll]',fontsize=20)
# + [markdown] papermill={"duration": 0.031861, "end_time": "2021-04-28T00:50:29.461849", "exception": false, "start_time": "2021-04-28T00:50:29.429988", "status": "completed"} tags=[]
# # Salinity
# + papermill={"duration": 0.169911, "end_time": "2021-04-28T00:50:29.658108", "exception": false, "start_time": "2021-04-28T00:50:29.488197", "status": "completed"} tags=[]
obsvar='SA'
modvar='mod_vosaline'
ps,ax=quick_ts(obsvar,modvar)
ax.set_title('Observed vs Modeled Salinity',fontsize=20)
# + papermill={"duration": 0.135319, "end_time": "2021-04-28T00:50:29.813332", "exception": false, "start_time": "2021-04-28T00:50:29.678013", "status": "completed"} tags=[]
obsvar='SA'
modvar='mod_vosaline'
lims=(0,40)
fig,ax=plt.subplots(1,1,figsize=(8,8))
ps=quick_varvar(ax,df,obsvar,modvar,lims)
ax.set_title('Observed vs Model Salinity',fontsize=20)
# + [markdown] papermill={"duration": 0.037192, "end_time": "2021-04-28T00:50:29.871290", "exception": false, "start_time": "2021-04-28T00:50:29.834098", "status": "completed"} tags=[]
# # Temperature
# + papermill={"duration": 0.171186, "end_time": "2021-04-28T00:50:30.062781", "exception": false, "start_time": "2021-04-28T00:50:29.891595", "status": "completed"} tags=[]
obsvar='CT'
modvar='mod_votemper'
ps,ax=quick_ts(obsvar,modvar)
ax.set_title('Observed vs Modeled Temperature',fontsize=20)
# + papermill={"duration": 0.133361, "end_time": "2021-04-28T00:50:30.219215", "exception": false, "start_time": "2021-04-28T00:50:30.085854", "status": "completed"} tags=[]
obsvar='CT'
modvar='mod_votemper'
lims=(0,40)
fig,ax=plt.subplots(1,1,figsize=(8,8))
ps=quick_varvar(ax,df,obsvar,modvar,lims)
ax.set_title('Observed vs model Temperature',fontsize=20)
# + [markdown] papermill={"duration": 0.025975, "end_time": "2021-04-28T00:50:30.269096", "exception": false, "start_time": "2021-04-28T00:50:30.243121", "status": "completed"} tags=[]
# # Nitrate
# + papermill={"duration": 0.229716, "end_time": "2021-04-28T00:50:30.522328", "exception": false, "start_time": "2021-04-28T00:50:30.292612", "status": "completed"} tags=[]
obsvar='NO23'
modvar='mod_nitrate'
ps,ax=quick_ts(obsvar,modvar)
ax.set_title('Observed vs Modeled Nitrate',fontsize=20)
# + papermill={"duration": 0.143307, "end_time": "2021-04-28T00:50:30.692144", "exception": false, "start_time": "2021-04-28T00:50:30.548837", "status": "completed"} tags=[]
obsvar='NO23'
modvar='mod_nitrate'
lims=(0,40)
fig,ax=plt.subplots(1,1,figsize=(8,8))
ps=quick_varvar(ax,df,obsvar,modvar,lims)
ax.set_title('Observed vs Model Nitrate',fontsize=20)
| notebooks/Evaluations/Continuous_Timeseries/King_County/Daily_Timeseries/PointWilliams/2018_PointWilliams_Timeseries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # The Pessimistic Machine
#
# The Pessimistic is a proof of concept for style adaptation. The machine takes a sentence as input and returns a sentence addressing a similar object but conveying a negative sentiment.
#
# The machine needs to be fed with the latent representations of the sentences from the dataset which can be obtained using the script *compute_latent_representations.py*.
# +
import pandas as pd
import numpy as np
import time
import datetime
import json
from tqdm import tqdm
import os
import tensorflow as tf
import seaborn as sns
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from bokeh.io import output_notebook
from bokeh.plotting import figure, output_file, show, ColumnDataSource
from bokeh.models import HoverTool
output_notebook()
from data_utils_LMR import prepare_data,read_data, EncoderDecoder
from model import Vrae as Vrae_model
from batch import Generator
prepare_data(1000)
training_dir = 'logs/'
training_dir += 'no_char2word'
# sentiment analyzer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
sentimentAnalyzer = SentimentIntensityAnalyzer()
def getSentimentScore(sentence):
scores = sentimentAnalyzer.polarity_scores(sentence)
return (scores['neg'], scores['neu'] ,scores['pos'])
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def string2bool(st):
if st.lower() == "true":
return True
else:
return False
with open(training_dir +'/flags.json', 'r') as fp:
FLAGS = dotdict(json.loads( fp.read() ) )
for k,v in FLAGS.iteritems():
print k,':',v
n_samples = 5000#int(FLAGS.batch_size)
# -
# ## k-NN Decoder
labels = []
zs = []
with tf.gfile.GFile(training_dir + "/latent_representations.txt" , mode="r") as source_file:
source = source_file.readline()
counter = 0
while source:
source = source_file.readline()
if len(source.split('|')) > 1:
z_ = [ float(u) for u in source.split('|')[1].split(',')]
if len(z_) == 16:
labels.append(source.split('|')[0])
zs.append(z_ )
counter += 1
print len(zs), 'points'
from sklearn.neighbors import KDTree
kdt = KDTree(np.array(zs), leaf_size=1,metric='euclidean')
def getNeighbor(zz, n_similar = 5):
"""
take a z value and returns the neighrest neighbor in the latent space from the training set
"""
dist, ind = kdt.query( zz, k=n_similar)
return [ labels[k] for k in list(ind[0]) ], dist
# ## The VAE model
# +
with open(training_dir +'/training_parameters.json', 'r') as fp:
training_parameters = json.loads( fp.read() )
# vocabulary encoder-decoder
encoderDecoder = EncoderDecoder()
num_symbols = encoderDecoder.vocabularySize()
# prepare data
sentences, ratings = read_data( max_size=None,
max_sentence_size=training_parameters['seq_max'],
min_sentence_size=int(FLAGS.sequence_min),
test=False)
print len(sentences), " sentences"
encoderDecoder = EncoderDecoder()
config = tf.ConfigProto(
device_count = {'GPU': 0}, # do not use GPU for testing
)
FLAGS.peephole = False
# load model
vrae_model = Vrae_model(char2word_state_size = int(FLAGS.char2word_state_size),
char2word_num_layers = int(FLAGS.char2word_num_layers),
encoder_state_size = int(FLAGS.encoder_state_size),
encoder_num_layers = int(FLAGS.encoder_num_layers),
decoder_state_size = int(FLAGS.decoder_state_size),
decoder_num_layers = int(FLAGS.decoder_num_layers),
latent_dim=int(FLAGS.latent_dim),
batch_size=n_samples,
num_symbols=num_symbols,
latent_loss_weight=float(FLAGS.latent_loss_weight),
dtype_precision=FLAGS.dtype_precision,
cell_type=FLAGS.cell,
peephole=FLAGS.peephole,
input_keep_prob=float(FLAGS.input_keep_prob),
output_keep_prob=float(FLAGS.output_keep_prob),
sentiment_feature = string2bool(FLAGS.use_sentiment_feature),
use_char2word = string2bool(FLAGS.use_char2word)
)
def zToXdecoded(session,z_sample,s_length):
x_reconstruct = vrae_model.zToX(session,z_sample,s_length)
return encoderDecoder.prettyDecode( np.argmax(x_reconstruct[0], axis= 1) )
# -
# ## The Pessimist Machine
def MachineSays(sess,u,n_sample = 20):
#print train_dir
sent = getSentimentScore(u)
sent_index = 0 # grumpy
zz = vrae_model.XToz(sess, *encoderDecoder.encodeForTraining(u),sentiment=getSentimentScore(u))[0]
res, dist = getNeighbor( [list(zz)] ,n_sample)
if u.lower() in res:
res.remove(u.lower())
out = []
for uu in sorted(zip(res,list(dist[0])), key=lambda x : getSentimentScore(x[0])[sent_index] , reverse=True):
out.append(uu[0])
return out
#print "\n",dist
answers = []
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
saver.restore(sess, "./"+training_dir+'/model.ckp')
for uu in MachineSays(sess,"I like this movie.", 50):
print uu
# +
us = [ "I totally loved it.",
"I was really bad.",
"it was terrible.",
"the acting was mostly good.",
"I liked this movie.",
"it was a nice movie.",
"the story was amazing.",
"it was not bad.",
"The acting was good.",
"The music was good.",
]
answers = []
saver = tf.train.Saver()
with tf.Session(config=config) as sess:
saver.restore(sess, "./"+training_dir+'/model.ckp')
for u in us:
answers.append( MachineSays(sess,u)[0] )
df = pd.DataFrame()
df["input"] = us
df["answer"] = answers
df
# -
print df.to_latex()
# ## neighbordhood
# +
us = [ "I totally loved it.",
"I was really bad.",
"it was terrible.",
"the acting was mostly good.",
"I liked this movie.",
"it was a nice movie.",
"the story was amazing.",
"it was not bad.",
"The acting was good.",
"The music was good.",
]
saver = tf.train.Saver()
df = pd.DataFrame()
with tf.Session(config=config) as sess:
saver.restore(sess, "./"+training_dir+'/model.ckp')
#for _ in range(20):
for u in us:
k = int(np.random.random() * len(sentences))
u = encoderDecoder.prettyDecode(sentences[k])
l = MachineSays(sess,u,20)
while len(l) < 20:
l.append("")
df[u] = l
df
# -
print df.to_latex()
| The Pessimist Machine.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Load libraries
# !pip install -r requirements.txt
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import sys
import os
import numpy as np
import pandas as pd
from PIL import Image
import torch
import torch.nn as nn
import torch.utils.data as D
from torch.optim.lr_scheduler import ExponentialLR
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import transforms
from torchvision import models
from ignite.engine import Events, create_supervised_trainer, create_supervised_evaluator
# from scripts.ignite import create_supervised_evaluator, create_supervised_trainer
from ignite.metrics import Loss, Accuracy
from ignite.contrib.handlers.tqdm_logger import ProgressBar
from ignite.handlers import EarlyStopping, ModelCheckpoint
from ignite.contrib.handlers import LinearCyclicalScheduler, CosineAnnealingScheduler
import random
from tqdm import tqdm_notebook
from sklearn.model_selection import train_test_split
from efficientnet_pytorch import EfficientNet, utils as enet_utils
from scripts.evaluate import eval_model
from scripts.plates_leak import apply_plates_leak
import gc
import warnings
warnings.filterwarnings('ignore')
# -
# !ls /storage/rxrxai
# ## Define dataset and model
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
img_dir = '/storage/rxrxai'
path_data = '/storage/rxrxai'
stats_df = pd.read_csv(path_data + f'/pixel_stats_agg.csv')
model_name = 'efficientnet-b1'
device = 'cuda'
batch_size = 16
torch.manual_seed(0)
init_lr = 3e-4
end_lr = 1e-7
classes = 1108
# -
std_mean = stats_df[(stats_df['cell'] == 'ALL') & (stats_df['channel'] == 1.)][['std', 'mean']]
std_mean
std_mean.iloc[0]['std']
std_mean.iloc[0]['mean']
# +
channel_transforms = {}
for channel in range(1, 7):
std_mean = stats_df[(stats_df['cell'] == 'ALL') & (stats_df['channel'] == float(channel))][['std', 'mean']]
mean = std_mean.iloc[0]['mean']
std = std_mean.iloc[0]['std']
channel_transforms[channel] = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[mean], std=[std])
])
class ImagesDS(D.Dataset):
def __init__(self, df, img_dir=img_dir, mode='train', validation=False, channels=[1,2,3,4,5,6]):
self.records = df.to_records(index=False)
self.mode = mode
self.img_dir = img_dir
self.len = df.shape[0]
self.validation = validation
self.channels = channels
def _get_img_path(self, index, channel, site):
experiment, well, plate = self.records[index].experiment, self.records[index].well, self.records[index].plate
return '/'.join([self.img_dir,self.mode,experiment,f'Plate{int(plate)}',f'{well}_s{site}_w{channel}.png'])
@staticmethod
def _load_img_as_tensor(file_name, channel):
with Image.open(file_name) as img:
return channel_transforms[channel](img)
def __getitem__(self, index):
img1 = torch.cat([self._load_img_as_tensor(self._get_img_path(index, ch, 1), ch) for ch in self.channels])
# img2 = torch.cat([self._load_img_as_tensor(self._get_img_path(index, ch, 2), ch) for ch in self.channels])
# if random.random() > 0.5 and not self.validation:
# img1, img2 = img2, img1
if self.mode == 'train':
return img1, int(self.records[index].sirna)
else:
return img1, self.records[index].id_code
def __len__(self):
return self.len
# +
# dataframes for training, cross-validation, and testing
df_train = pd.read_csv(path_data+'/train.csv')
df_val = pd.read_csv(path_data+'/validation.csv')
df_val = df_val.drop(['ds', 'cell', 'aug'], axis=1)
df_train = df_train[~df_train.isin(df_val)].dropna()
df_test = pd.read_csv(path_data+'/test.csv')
# pytorch training dataset & loader
ds = ImagesDS(df_train, mode='train', validation=False)
loader = D.DataLoader(ds, batch_size=batch_size, shuffle=True, num_workers=8)
# pytorch cross-validation dataset & loader
ds_val = ImagesDS(df_val, mode='train', validation=True)
val_loader = D.DataLoader(ds_val, batch_size=batch_size, shuffle=True, num_workers=8)
# pytorch test dataset & loader
ds_test = ImagesDS(df_test, mode='test', validation=True)
tloader = D.DataLoader(ds_test, batch_size=1, shuffle=False, num_workers=8)
# +
# class DenseNetTwoInputs(nn.Module):
# def __init__(self):
# super(DenseNetTwoInputs, self).__init__()
# self.classes = 1108
# model = models.densenet121(pretrained=True)
# num_ftrs = model.classifier.in_features
# model.classifier = nn.Identity()
# # let's make our model work with 6 channels
# trained_kernel = model.features.conv0.weight
# new_conv = nn.Conv2d(6, 64, kernel_size=7, stride=2, padding=3, bias=False)
# with torch.no_grad():
# new_conv.weight[:,:] = torch.stack([torch.mean(trained_kernel, 1)]*6, dim=1)
# model.features.conv0 = new_conv
# self.densenet = model
# self.fc = nn.Linear(num_ftrs * 2, self.classes)
# def forward(self, x1, x2):
# x1_out = self.densenet(x1)
# x2_out = self.densenet(x2)
# N, _, _, _ = x1.size()
# x1_out = x1_out.view(N, -1)
# x2_out = x2_out.view(N, -1)
# out = torch.cat((x1_out, x2_out), 1)
# out = self.fc(out)
# return out
# model = DenseNetTwoInputs()
# model.train()
# +
class EfficientNetTwoInputs(nn.Module):
def __init__(self):
super(EfficientNetTwoInputs, self).__init__()
self.classes = 1108
model = EfficientNet.from_pretrained(model_name, num_classes=1108)
num_ftrs = model._fc.in_features
model._fc = nn.Identity()
# accept 6 channels
trained_kernel = model._conv_stem.weight
new_conv = enet_utils.Conv2dStaticSamePadding(6, 32, kernel_size=(3, 3), stride=(2, 2), bias=False, image_size=512)
with torch.no_grad():
new_conv.weight[:,:] = torch.stack([torch.mean(trained_kernel, 1)]*6, dim=1)
model._conv_stem = new_conv
self.resnet = model
self.fc = nn.Linear(num_ftrs * 2, self.classes)
def forward(self, x1, x2):
x1_out = self.resnet(x1)
x2_out = self.resnet(x2)
N, _, _, _ = x1.size()
x1_out = x1_out.view(N, -1)
x2_out = x2_out.view(N, -1)
out = torch.cat((x1_out, x2_out), 1)
out = self.fc(out)
del N, _, x1_out, x2_out
return out
# model = EfficientNetTwoInputs()
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = torch.nn.Linear(num_ftrs, classes)
# let's make our model work with 6 channels
trained_kernel = model.conv1.weight
new_conv = nn.Conv2d(6, 64, kernel_size=7, stride=2, padding=3, bias=False)
with torch.no_grad():
new_conv.weight[:,:] = torch.stack([torch.mean(trained_kernel, 1)]*6, dim=1)
model.conv1 = new_conv
model.train()
# -
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=init_lr)
# +
metrics = {
'loss': Loss(criterion),
'accuracy': Accuracy(),
}
trainer = create_supervised_trainer(model, optimizer, criterion, device=device)
val_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device)
# -
# #### EarlyStopping
# +
# handler = EarlyStopping(patience=30, score_function=lambda engine: engine.state.metrics['accuracy'], trainer=trainer)
# val_evaluator.add_event_handler(Events.COMPLETED, handler)
# -
# #### LR Scheduler
# +
scheduler = CosineAnnealingScheduler(optimizer, 'lr', init_lr, end_lr, len(loader))
trainer.add_event_handler(Events.ITERATION_STARTED, scheduler)
# @trainer.on(Events.ITERATION_COMPLETED)
# def print_lr(engine):
# epoch = engine.state.epoch
# iteration = engine.state.iteration
# if epoch < 2 and iteration % 100 == 0:
# print(f'Iteration {iteration} | LR {optimizer.param_groups[0]["lr"]}')
# -
# #### Compute and display metrics
@trainer.on(Events.EPOCH_COMPLETED)
def compute_and_display_val_metrics(engine):
epoch = engine.state.epoch
metrics = val_evaluator.run(val_loader).metrics
print("Validation Results - Epoch: {} | Average Loss: {:.4f} | Accuracy: {:.4f} "
.format(engine.state.epoch, metrics['loss'], metrics['accuracy']))
# #### Save best epoch only
# !mkdir -p models
# +
def get_saved_model_path(epoch):
return f'models/Model_{model_name}_{epoch + 49}.pth'
best_acc = 0.
best_epoch = 1
best_epoch_file = ''
@trainer.on(Events.EPOCH_COMPLETED)
def save_best_epoch_only(engine):
epoch = engine.state.epoch
global best_acc
global best_epoch
global best_epoch_file
best_acc = 0. if epoch == 1 else best_acc
best_epoch = 1 if epoch == 1 else best_epoch
best_epoch_file = '' if epoch == 1 else best_epoch_file
metrics = val_evaluator.run(val_loader).metrics
if metrics['accuracy'] > best_acc:
prev_best_epoch_file = get_saved_model_path(best_epoch)
if os.path.exists(prev_best_epoch_file):
os.remove(prev_best_epoch_file)
best_acc = metrics['accuracy']
best_epoch = epoch
best_epoch_file = get_saved_model_path(best_epoch)
print(f'\nEpoch: {best_epoch} - New best accuracy! Accuracy: {best_acc}\n\n\n')
torch.save(model.state_dict(), best_epoch_file)
# -
# #### Progress bar - uncomment when testing in notebook
pbar = ProgressBar(bar_format='')
pbar.attach(trainer, output_transform=lambda x: {'loss': x})
# #### Train
print('Training started\n')
trainer.run(loader, max_epochs=50)
# #### Evaluate
all_preds, _ = eval_model(model, tloader, best_epoch_file, path_data)
apply_plates_leak(all_preds)
| experiment14.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Input and Output
# Pandas has a lot of functionality, but before you can explore or use it, you'll most likely want to access some data from an external source. You'll also likely want to store results for use later or be able to export results to other tools or to share with others. Pandas has a lot of great options in the area of Input/Output, but with a large number of choices we need to put some thought into what options to use and when.
#
# In this post, I'm going to do a quick overview of some basic I/O for the major options that pandas supports. All of this is available in the documentation, but instead of focusing on details here, I want to get a real world dataset and go over the basic code required to write this data set and then read it back with the same values and types represented in the set. This will give us a basic overview of all of the APIs.
#
# For an input data set, I'll use the [Yahoo! Finance API](https://pypi.org/project/yfinance/) to grab some historical stock market data. This will allow us to see the handling of data types like strings, dates, and numeric values.
#
# All of these examples were first written and tested with Python 3.8.6 and pandas 1.1.4, using a separate virtualenv created using [pyenv](https://www.wrighters.io/2020/11/07/use-pyenv-and-virtual-environments-to-manage-python-complexity).
#
# First, let's install our dependencies using pip (run this cell if needed). If you're using anaconda, you may need to install some of these separately depending on how you setup your environment.
#
# For each of the I/O options below, I'll explain which of these installs are needed.
# %pip install yfinance pandas jupyter matplotlib openpyxl xlrd tables pyarrow
# +
import os
import yfinance as yf
import pandas as pd
import matplotlib.pyplot as plt
msft = yf.Ticker('MSFT')
prices = msft.history(period='max')
prices.head()
# -
prices.shape
prices.describe()
prices.index
prices.dtypes
prices['Close'].plot()
# ## Our data
# Now we have a pandas ```DataFrame``` with a date index, floating point values for Open, High, Low, Close, Dividends, and Stock Splits. The Volume is an integer type. This will let us explore some basics for how to persist and read data in the various formats available to us in pandas.
#
# In this post, we'll look at the following formats: CSV, JSON, Microsoft Excel, HDF5, Feather, Parquet, Pickle, and SQL. These are the format that are supported in pandas with ```to_xxx``` methods and provide local storage of data. For each case, we'll look at only the basics of the API so that we can write and read back the ```DataFrame``` with the same datatypes. For each type, we'll also look at a few basics like the size of the file on disk and the time it takes to read and write the data. Note that this is a small ```DataFrame```, so we won't be exploring the true advantages of compression and speed for some of the formats with a smaller amount of data. Later, we can look at that sort of detail.
#
# Note: I don't mention the html format here, since it's not really a good way to store local data, but is more useful for simple web scraping or generating reports. I also won't mention Stata or SAS in this article, mainly because most users will not choose to use either format unless they need to integrate with those platforms, in which case they won't have much choice in the matter of what storage to choose. Also note that Msgpack support was dropped in pandas 1.0, so while it's in the documentation, it's use is discouraged going forward.
#
# For each storage type, I've created a method that takes in the source ```DataFrame``` and writes it out to local storage, then returns the ```DataFrame``` that was created, along with the stat values of the file it was stored in. This will allow us to run some tests on the results and build a summary table of our differences.
#
# I've also created a comparison method that will compare our original ```DataFrame``` with the one that was written to disk and then recreated from the saved data. It should be the same, minus small changes due to the number of significant digits that the storage mechanism will use.
#
# ## CSV
# First, the most used format for persisting data in the world is probably Comma Separated Values, or CSV. CSV is far from perfect as a format, but is so commonly used that everyone should be able to use it and understand the major issues most users of pandas will encounter with it. The ```DataFrame.to_csv``` and ```pd.read_csv``` methods have a number of arguments and are worthy of a separate article. It's worth noting that the method ```pd.read_table``` is just calling ```read_csv``` but with a Tab (```\t```) as a separator instead of a comma. For now, we will just write our prices ```DataFrame``` as is. When reading our CSV, we need to use a few options to generate a similar result. First, we need to specify our index column so that a new default index is not created, and second, we give the method a hint that our index column is a date so that it can be converted properly.
#
# ### Advantages
# * Widely supported
# * Easy to manipulate and debug with a text editor, or a spreadsheet tool
# * No special libraries or tools needed
# * Simple to break dataset into smaller chunks
#
# ### Disadvantages
# 1. Not efficient
# 1. Lossy for some datatypes
# 1. Not a clear standard, so usually requires some investigation of data to set up
# 1. Cumbersome for large datasets
def compare_dfs(df1, df2):
# at a minimum, we expect the index to be an exact match
assert (df1.index == df2.index).all()
# we also need all the columns to exist
assert (df1.columns == df2.columns).all()
for col in df1.columns:
if df1[col].dtype == 'int64':
# integer columns will be an exact match
assert (df1[col] == df2[col]).all()
elif df1[col].dtype == 'float64':
# floating point will not be exact, but needs to be close
assert ((df1[col] - df2[col]).abs() < 1e-10).all()
def read_and_write_csv(df, filename):
df.to_csv(filename)
df2 = pd.read_csv(filename, index_col=0, parse_dates=[0])
return df2, os.stat(filename)
# ## JSON
# For JSON (JavaScript Object Notation), the ```DataFrame``` is stored as a single object with each column as a member of that object, consisting of members of the keys of the index and values being the values from the column. So it may look something like this:
# ```
# {"Open":{"511056000000":0.0563667971, ..},
# "Close": {"511056000000":0.0533667232, ..}
# }
# ```
#
# In general, I don't see many people using JSON as a storage format for pandas.
#
# ### Advantages
# * Widely supported
# * Somewhat easy to manipulate and debug with a text editor
# * No special libraries or tools needed
#
# ### Disadvantages
# * Not the most efficient and readable method of storage. Hand editing is not simple.
# * Very cumbersome with larger datasets
def read_and_write_json(df, filename):
df.to_json(filename)
df2 = pd.read_json(filename)
return df2, os.stat(filename)
# ## Microsoft Excel
# Using the openpyxl and xlrd packages, pandas is able to read and write Excel files. While maybe not the best long term source of storage, being able interact with Excel is a very important feature for many users. If work teams have data already in Excel and maintain it there, being able to read it into pandas is a necessary feature. Also, many third parties build Excel add-ins, so a common workflow can be to pull data into Excel first, then read it into pandas.
#
# To use Excel, you need to install openpyxl and xlrd.
#
# ### Advantages
# * Excel can make a great data editor, and it's highly likely to be used by businesses to keep lots of valuable business data.
# * Many vendors integrate with Excel, so this can be the quickest and most reliable way to get data into python
#
# ### Disadvantages
# * To manually edit the file, you need to use a spreadsheet tool like Excel
# * Once multiple worksheets are stored in a workbook, life gets a little more complicated
# * Maintaining all formatting and formulas, not to mention macros, can be difficult.
# * Not good for very large data sets
def read_and_write_excel(df, filename):
df.to_excel(filename)
df2 = pd.read_excel(filename, index_col=0)
return df2, os.stat(filename)
# ## HDF5
# HDF5 is a technology suite that includes a data model, portable file format, software, and a set of tools for managing complex data objects and metadata. For this example, we'll just look at basic persistence. However, HDF5 has support for a number of great features, like extremely large datasets, heirarchical data, and compression.
#
# To use HDF5, you need to install tables.
#
# ### Advantages
# * Support for large datasets
# * Supports hierarchical data
# * Advanced tools for maintaining data
#
# ### Disadvantages
# * More complexity
# * More dependencies
def read_and_write_hdf(df, filename):
df.to_hdf(filename, key='prices', mode='w')
df2 = pd.read_hdf(filename, key='prices')
return df2, os.stat(filename)
# ## Feather
# Feather is a format designed specifically for dataframes and is written by pandas creator, <NAME>. It's interopable with R, and supports typical data types that would be used in pandas ```DataFrames```, such as timestamps, boolean values, a wide array of numeric types, and categorical values. It's intended to be faster and more efficient than other formats. Feather is now part of the Apache Arrow project.
#
# To use feather, you need to install pyarrow.
#
# ### Advantages
# * Handles datatypes typical in dataframes better than other formats
# * More efficient
#
# ### Disadvantages
# * Requires other dependencies
# * Not as widely supported by other tools
def read_and_write_feather(df, filename):
# Note that feather doesn't allow for non-default indexes, so the index needs to be stored as a column
df.reset_index().to_feather(filename)
df2 = pd.read_feather(filename).set_index('Date')
return df2, os.stat(filename)
# ## Parquet
# Parquet is a compressed, efficient columnar data representation that was developed for use in the Hadoop ecosystem. The intention is that it support very efficient compression and encoding schemes.
#
# To use parquet, you need to install pyarrow or fastparquet.
#
# ### Advantages
# * Efficient
# * May use much less space
# * Supports complex nested data structures
#
# ### Disadvantages
# * Requires other dependencies
# * More complex
def read_and_write_parquet(df, filename):
df.to_parquet(filename)
df2 = pd.read_parquet(filename)
return df2, os.stat(filename)
# ## Pickle
# Pickle support is also built into pandas. For many users, pickle is a good choice for a quick way to save off data and reload it elsewhere since it's built into Python to begin with.
#
# ### Advantages
# * Widely known
#
# ### Disadvantages
# * Not inherently secure, shouldn't be trusted when loading from external sources since it can result in code execution
# * Not guaranteed to be able to unpickle objects from very old versions of pandas
# * Not ideal for sharing data with non-Python users
def read_and_write_pickle(df, filename):
df.to_pickle(filename)
df2 = pd.read_pickle(filename)
return df2, os.stat(filename)
# ## SQL
# Pandas also has wide support for SQL databases, both to read and write data. The backend database can be any database supported by SQLAlchemy with a driver. But without SQLAlchemy installed, the fallback is to use SQLite.
#
# ### Advantages
# * Widely used
# * Databases tools are plentiful and can be used for maintaining data
# * Highly likely that many uses cases will require querying a database
#
# ### Disadvantages
# * Database setups can be complex and require extra infrastructure
# * Drivers or extra installs are needed for databases besides SQLite
# +
import sqlite3
def read_and_write_sql(df, filename):
conn = sqlite3.connect(filename)
# so we can rerun this method
conn.execute('drop table if exists prices')
# this avoids warnings about spaces in column names
df.columns = [c.replace(' ', '_') for c in df.columns]
df.to_sql('prices', conn)
df.columns = [c.replace('_', ' ') for c in df.columns]
df2 = pd.read_sql('select * from prices', conn, parse_dates=['Date']).set_index('Date')
df2.columns = [c.replace('_', ' ') for c in df2.columns]
return df2, os.stat(filename)
# -
# For a quick summary of these methods (which I wrote and debugged first), I'll run them all and compare the timings and file sizes for each. Note that I purposefully did not choose any extra compression of this data, even though it is available in some formats. I also am using a fairly small dataframe, so performance improvements for some formats will not show up until dealing with very large datasets.
# %timeit read_and_write_csv(prices, 'prices.csv')
# %timeit read_and_write_json(prices, 'prices.json')
# %timeit read_and_write_excel(prices, 'prices.xlsx')
# %timeit read_and_write_hdf(prices, 'prices.h5')
# %timeit read_and_write_feather(prices, 'prices.feather')
# %timeit read_and_write_parquet(prices, 'prices.parquet')
# %timeit read_and_write_pickle(prices, 'prices.pkl')
# %timeit read_and_write_sql(prices, 'prices.db')
# In terms of timing, it's pretty clear that Pickle is the fastest, and hdf5, feather, and parquet are also fairly quick. CSV, JSON, and Excel are much slower. SQL databases will depend a lot on whether the storage is local or not, and if a remote server, how fast the network is and the database server itself. This is just a quick test, a more realistic test would look at various levels of compression, much larger datasets, and different combinations of requirements.
for func, file in [(read_and_write_csv, 'prices.csv'), (read_and_write_json, 'prices.json'),
(read_and_write_excel, 'prices.xlsx'), (read_and_write_hdf, 'prices.h5'),
(read_and_write_feather, 'prices.feather'), (read_and_write_parquet, 'prices.parquet'),
(read_and_write_pickle, 'prices.pkl'), (read_and_write_sql, 'prices.db')]:
df2, s = func(prices, file)
compare_dfs(prices, df2)
print(func, s.st_size/1024)
# Because of the repeated text data in the JSON file, it's far larger than any of the other data files. We see feather is the smallest, with parquet also smaller than most of the others. For a better comparison, we should look at each method's storage options, since compression will make a big difference in size, especially for repeated data.
#
# ### Conclusion
# In summary, pandas has a wide variety of I/O options. Most of the time, choosing which option to use will be dictated by the format in which data is already available. When starting a new project, looking at all the options is a good idea. This post gives a quick overview of what is available in pandas, the basics of calling those APIs, and a rough comparison of data storage size and speed of access. Hopefully ths will motivate you to explore some of the other options that you haven't used yet with pandas.
| pandas/io_pandas_overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="eymXTgON6VDw"
# # Image Inpainting Based on Partial Convolutions in Keras
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="i9XpNMpS6VDx" outputId="335c7c93-4b3c-4c36-ffbc-874e37e40fbf"
from keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger
from keras.preprocessing.image import load_img, img_to_array
from inpainter_utils.pconv2d_data import DataGenerator, torch_preprocessing, torch_postprocessing
from inpainter_utils.pconv2d_model import pconv_model
import matplotlib.pyplot as plt
import numpy as np
# SETTINGS:
IMG_DIR_TRAIN = "data/images/train/"
IMG_DIR_VAL = "data/images/validation/"
IMG_DIR_TEST = "data/images/test/"
VGG16_WEIGHTS = "data/vgg16_weights/vgg16_pytorch2keras.h5"
WEIGHTS_DIR = "callbacks/weights/"
TB_DIR = "callbacks/tensorboard/"
CSV_DIR = "callbacks/csvlogger/"
BATCH_SIZE = 5
STEPS_PER_EPOCH = 2500
EPOCHS_STAGE1 = 70
EPOCHS_STAGE2 = 50
LR_STAGE1 = 0.0002
LR_STAGE2 = 0.00005
STEPS_VAL = 100
BATCH_SIZE_VAL = 4
IMAGE_SIZE = (512, 512)
# + [markdown] colab_type="text" id="hx7JNQEe6VD2"
# ## Data generators
# + colab={} colab_type="code" id="jd56DzbnHpym"
# DATA GENERATORS:
train_datagen = DataGenerator(preprocessing_function=torch_preprocessing, horizontal_flip=True)
train_generator = train_datagen.flow_from_directory(
IMG_DIR_TRAIN,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE
)
val_datagen = DataGenerator(preprocessing_function=torch_preprocessing)
val_generator = val_datagen.flow_from_directory(
IMG_DIR_VAL,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE_VAL,
seed=22,
mask_init_seed=1,
total_steps=STEPS_VAL,
shuffle=False
)
# Create testing generator
test_datagen = DataGenerator(preprocessing_function=torch_preprocessing)
test_generator = test_datagen.flow_from_directory(
IMG_DIR_TEST,
target_size=IMAGE_SIZE,
batch_size=BATCH_SIZE
)
# + [markdown] colab_type="text" id="8SbedHEK6VD3"
# ## Training
# ### Stage 1. Initial training (BN enabled)
# + colab={"base_uri": "https://localhost:8080/", "height": 1748} colab_type="code" id="Iw4NWfLk6VD4" outputId="51ec8ddb-4aa3-463f-b644-e00c445ac346"
#LAST_CHECKPOINT = "callbacks/weights/initial/weights.70-2.02-1.95.hdf5"
model = pconv_model(lr=LR_STAGE1, image_size=IMAGE_SIZE, vgg16_weights=VGG16_WEIGHTS)
#model.load_weights(LAST_CHECKPOINT)
# + colab={} colab_type="code" id="XgM2a1XrIcVm"
model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS_STAGE1,
validation_data=val_generator,
validation_steps=STEPS_VAL,
callbacks=[
CSVLogger(CSV_DIR + "initial/log.csv", append=True),
TensorBoard(log_dir=TB_DIR + "initial/", write_graph=True),
ModelCheckpoint(WEIGHTS_DIR + "initial/weights.{epoch:02d}-{val_loss:.2f}-{loss:.2f}.hdf5", monitor="val_loss", verbose=1, save_weights_only=True)
]
)
# + [markdown] colab_type="text" id="IieKWBQ1KQ7v"
# ### Stage 2. Fine-tuning (BN frozen in encoder)
# + colab={} colab_type="code" id="epB7sp_-LT-w"
LAST_CHECKPOINT = WEIGHTS_DIR + "initial/weights.80-1.94-1.83.hdf5"
model = pconv_model(fine_tuning=True, lr=LR_STAGE2, image_size=IMAGE_SIZE, vgg16_weights=VGG16_WEIGHTS)
model.load_weights(LAST_CHECKPOINT)
# + colab={} colab_type="code" id="l8Y6_ZTwLpQ8"
model.fit_generator(
train_generator,
steps_per_epoch=STEPS_PER_EPOCH,
initial_epoch=EPOCHS_STAGE1,
epochs=EPOCHS_STAGE1 + EPOCHS_STAGE2,
validation_data=val_generator,
validation_steps=STEPS_VAL,
callbacks=[
CSVLogger(CSV_DIR + "fine_tuning/log.csv", append=True),
TensorBoard(log_dir=TB_DIR + "fine_tuning/", write_graph=True),
ModelCheckpoint(WEIGHTS_DIR + "fine_tuning/weights.{epoch:02d}-{val_loss:.2f}-{loss:.2f}.hdf5", monitor="val_loss", verbose=1, save_weights_only=True)
]
)
# + [markdown] colab_type="text" id="9t0s7Jvu6VD6"
# ---
# ## Prediction
# ### Load the model:
# + colab={} colab_type="code" id="GuWrTjVUJgF1"
LAST_CHECKPOINT = WEIGHTS_DIR + "fine_tuning/weights.120-1.73-1.78.hdf5"
model = pconv_model(predict_only=True, image_size=IMAGE_SIZE)
model.load_weights(LAST_CHECKPOINT)
k = 1
# + [markdown] colab_type="text" id="h_dAuNxENVcs"
# ### First, try images with random masks from the train set:
# + colab={} colab_type="code" id="NFMq9XxdNePl"
# Make a prediction for a batch of examples:
(input_img, mask), orig_img = next(test_generator)
output_img = model.predict([input_img, mask])
# Post-processing:
orig_img = torch_postprocessing(orig_img)
input_img = torch_postprocessing(input_img) * mask # the (0,0,0) masked pixels are made grey by post-processing
output_img = torch_postprocessing(output_img)
output_comp = input_img.copy()
output_comp[mask == 0] = output_img[mask == 0]
fig, axes = plt.subplots(input_img.shape[0], 2, figsize=(15, 29))
for i in range(input_img.shape[0]):
#axes[i,0].imshow(orig_img[i])
axes[i,0].imshow(input_img[i])
axes[i,1].imshow(output_img[i])
#axes[i,2].imshow(output_comp[i])
axes[i,0].tick_params(bottom=False, left=False, labelbottom=False, labelleft=False)
axes[i,1].tick_params(bottom=False, left=False, labelbottom=False, labelleft=False)
axes[0,0].set_title('Masked image')
axes[0,1].set_title('Prediction')
plt.tight_layout()
plt.savefig("data/examples/{}_result.png".format(k), bbox_inches='tight', pad_inches=0)
plt.show()
k += 1
# + [markdown] colab_type="text" id="p0JPQo5NJjDy"
# ### Second, try on your own images and masks:
# + colab={} colab_type="code" id="HULQvllo6VD7" outputId="dc86af49-c499-4721-f328-f5d002c5a477"
img_fname = "data/examples/own_image.jpg"
mask_fname = "data/examples/own_mask.jpg"
# Mask is assumed to have masked pixels in black and valid pixels in white
# Loading and pre-processing:
orig_img = img_to_array(load_img(img_fname, target_size=IMAGE_SIZE))
orig_img = orig_img[None,...]
mask = load_img(mask_fname, target_size=IMAGE_SIZE)
mask = (img_to_array(mask) == 255).astype(np.float32)
mask = mask[None,...]
# Prediction:
output_img = model.predict([torch_preprocessing(orig_img.copy()) * mask, mask])
# Post-processing:
output_img = torch_postprocessing(output_img)
input_img = orig_img * mask
output_comp = input_img.copy()
output_comp[mask == 0] = output_img[mask == 0]
# Plot:
fig, axes = plt.subplots(2, 2, figsize=(20,20))
axes[0,0].imshow(orig_img[0].astype('uint8'))
axes[0,0].set_title('Original image')
axes[0,1].imshow(mask[0])
axes[0,1].set_title('Mask')
axes[1,0].imshow(input_img[0].astype('uint8'))
axes[1,0].set_title('Masked image')
axes[1,1].imshow(output_img[0])
axes[1,1].set_title('Prediction')
for ax in axes.flatten():
ax.tick_params(bottom=False, left=False, labelbottom=False, labelleft=False)
plt.tight_layout()
plt.savefig("data/examples/own_image_result.png", bbox_inches='tight', pad_inches=0)
# + colab={} colab_type="code" id="2P7wCOLf6VED"
| inpainter_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# language: python
# name: python3
# ---
# # Assignment
#
# 1. Write digit 0-9 on paper.
# 2. Take photograph of each digit
# 3. load each digit as shape (20,20) in gray
# 4. Using the k=4 with the highest accuracy (Train:Test = 2:1).
# 5. Test your images and compare the predictions
#
# +
import glob
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
def BGR2RGB(img):
return img[:,:,::-1]
def BGR2GRAY(img):
img = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
return img
def GRAY2RGB(img):
img = cv.cvtColor(img, cv.COLOR_GRAY2RGB)
return img
images = [cv.imread(file) for file in glob.glob("../../essential/assets/digit/*.jpg")]
# +
row, col = 2,5
# for i in range(len(images)):
# print('\nOriginal image shape:', images[i].shape)
plt.figure(figsize=(15,5))
for i in range(len(images)):
plt.subplot(row,col,i+1)
plt.imshow(BGR2RGB(images[i]))
plt.suptitle('Original Images', fontsize=20)
plt.title(f'{i}', fontsize=16)
plt.tight_layout()
plt.show()
# +
img = np.array(images, dtype="uint8")
width, height = 20, 20
dim = (width, height)
digits_list = []
for i in range(len(img)):
digit = cv.resize(img[i], dim, interpolation=cv.INTER_AREA)
digits_list.append(digit)
# for i in range(len(digits_ls)):
# print('\nResized image shape:', digits_ls[0].shape)
# +
a, b = 2,5
plt.figure(figsize=(15,5))
for i in range(len(digits_list)):
plt.subplot(a,b,i+1)
plt.imshow(BGR2RGB(digits_list[i]))
plt.suptitle('Resized Original', fontsize=20)
plt.title(f'{i}', fontsize=16)
plt.tight_layout()
plt.show()
plt.figure(figsize=(15,5))
for i in range(len(digits_list)):
plt.subplot(a,b,i+1)
plt.imshow(GRAY2RGB(BGR2GRAY(digits_list[i])))
plt.suptitle('Resized Grayscale', fontsize=20)
plt.title(f'{i}', fontsize=16)
plt.tight_layout()
plt.show()
# +
imgThresh_list = []
for i in range(len(digits_list)):
imgGray = BGR2GRAY(digits_list[i])
ret, imgThresh = cv.threshold(imgGray, 125, 255, cv.THRESH_BINARY_INV)
imgThresh_list.append(GRAY2RGB(imgThresh))
print('\nThresholded image shape:', imgThresh_list[0].shape)
row, col = 2,5
plt.figure(figsize=(15,5))
for i in range(len(imgThresh_list)):
plt.subplot(row, col, i+1)
plt.imshow(imgThresh_list[i])
plt.suptitle('Thresholded Images', fontsize=20)
plt.title(f'{i}', fontsize=16)
plt.tight_layout()
plt.show()
# +
import cv2 as cv
import matplotlib.pyplot as plt
import numpy as np
filename = "../../samples/data/digits.png"
imgGray = cv.imread(filename, cv.IMREAD_GRAYSCALE)
print(imgGray.shape)
#### get all the digits
IMG_SIZE = 20
rowNum = imgGray.shape[0] / IMG_SIZE
colNum = imgGray.shape[1] / IMG_SIZE
rows = np.vsplit(imgGray, rowNum)
digits = []
for row in rows:
rowCells = np.hsplit(row, colNum)
for digit in rowCells:
digits.append(digit)
# convert list to np.array
digits = np.array(digits)
print("digits", digits.shape)
# labels
DIGITS_CLASS = 10
repeatNum = len(digits) / DIGITS_CLASS
labels = np.repeat(np.arange(DIGITS_CLASS), repeatNum)
print("labels", labels.shape)
#### get features
features = []
for digit in digits:
img_pixel = np.float32(digit.flatten())
features.append(img_pixel)
features = np.squeeze(features)
print("features", features.shape)
# shuffle features and labels
# seed random for constant random value
rand = np.random.RandomState(321)
shuffle = rand.permutation(features.shape[0])
features, labels = features[shuffle], labels[shuffle]
# split into training and testing
splitRatio = [2, 1]
sumRatio = sum(splitRatio)
partition = np.array(splitRatio) * len(features) // sumRatio
partition = np.cumsum(partition)
featureTrain, featureTest = np.array_split(features, partition[:-1])
labelTrain, labelTest = np.array_split(labels, partition[:-1])
print("featureTrain", featureTrain.shape)
print("featureTest", featureTest.shape)
print("labelTrain", labelTrain.shape)
print("labelTest", labelTest.shape)
# Train the KNN model:
print("Training KNN model")
knn = cv.ml.KNearest_create()
knn.train(featureTrain, cv.ml.ROW_SAMPLE, labelTrain)
# Test the created model:
k=4
ret, prediction, neighbours, dist = knn.findNearest(featureTest, k)
# Compute the accuracy:
accuracy = (np.squeeze(prediction) == labelTest).mean() * 100
print("Accuracy k = {}: {}".format(k, accuracy))
# +
# Convert the thresholded image into array
for i in range(len(imgThresh_list)):
imgThresh_list[i] = np.float32(imgThresh_list[i])
imgThreshArr = np.array(imgThresh_list, np.float32)
imgThreshArr = imgThreshArr.reshape((-1,20,20))
print('Digits shape:', digits.shape)
print('My images shape:', imgThreshArr.shape)
# +
# Testing KNN Model
print('Testing New Images\n')
# New Test Label
DIGITS_CLASS = 10
repeatNum = len(imgThreshArr) / DIGITS_CLASS
labels = np.repeat(np.arange(DIGITS_CLASS), repeatNum)
# print("Test labels", labels.shape)
# New Test Features
features = []
for digit in imgThreshArr:
img_pixel = np.float32(digit.flatten())
features.append(img_pixel)
features = np.squeeze(features)
print("Original featureTest", featureTest.shape)
print("Original labelTest", labelTest.shape)
print("New featureTest", features.shape)
print("New labelTest", labels.shape)
ret, prediction, neighbours, dist = knn.findNearest(features, k)
accuracy = (np.squeeze(prediction) == labels).mean() * 100
print("\nk = {}: Accuracy {:.2f} %".format(k, accuracy))
| assignments/essential/assignment_16.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ACP
# + [markdown] tags=["hide"]
# Must see:
#
# * **EXCELLENT TUTORIAL**: https://stats.stackexchange.com/questions/2691/making-sense-of-principal-component-analysis-eigenvectors-eigenvalues
# * **EXCELLENT TUTORIAL**: https://plot.ly/ipython-notebooks/principal-component-analysis/
# * https://stackoverflow.com/questions/18299523/basic-example-for-pca-with-matplotlib
# + tags=["hide"]
# %matplotlib inline
# +
import numpy as np
from sklearn.preprocessing import StandardScaler
mu = np.zeros(2)
cov = np.array([[1.07, 0.63],
[0.63, 0.64]])
print("Cov:", cov)
# Check eigen vectors #################################
theorical_eigen_val, theorical_eigen_vect = np.linalg.eig(cov)
print("Theorical eigen vectors:", theorical_eigen_vect)
print("Theorical eigen values:", theorical_eigen_val)
# Make samples ########################################
X = np.random.multivariate_normal(mean=mu, cov=cov, size=[100])
#print("X:", X)
# Standardizing data ##################################
# TODO: explain why it's required...
X = StandardScaler().fit_transform(X)
# Compute the covariance matrix #######################
empirical_cov = np.cov(X.T)
print("Empirical cov:", empirical_cov)
# Compute eigen vectors ###############################
empirical_eigen_val, empirical_eigen_vect = np.linalg.eig(empirical_cov)
print("Empirical eigen vectors:", empirical_eigen_vect)
print("Empirical eigen values:", empirical_eigen_val)
# Project data in the new space #######################
projected_data = np.dot(X, empirical_eigen_vect)
# Plot ################################################
fig, ax = plt.subplots(figsize=(8, 8))
ax.plot(*X.T, '.r')
p1x = X[:,0].min()
p2x = X[:,0].max()
for axis in empirical_eigen_vect:
start, end = mu, mu + projected_data.std(axis=0).mean() * axis
ax.annotate(
'', xy=end, xycoords='data',
xytext=start, textcoords='data',
arrowprops=dict(facecolor='red', width=2.0))
ex, ey = empirical_eigen_vect[:,0]
p1y = ex * p1x / ey
p2y = ex * p2x / ey
ax.plot([p1x, p2x], [p1y, p2y], '-b')
ex, ey = empirical_eigen_vect[:,1]
p1y = ex * p1x / ey
p2y = ex * p2x / ey
ax.plot([p1x, p2x], [p1y, p2y], ':b')
ax.axis('equal');
# -
plt.plot(projected_data, '.r')
| nb_sci_maths/maths_pca_fr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Eager Execution 튜토리얼: 기초
#
# 본 노트북은 텐서플로우의 eager execution의 기능을 소개하기 위한 기초 자료입니다. 다음과 같은 내용을 포함하고 있습니다:
# * 필요한 패키지 불러오기
# * eager execution 활성화
# * TensorFlow 텐서와 변수를 만들고 사용하기
# * TensorFlow와 상호작용하며 사용하기
# * eager execution 활성화 상태에서 GPU 사용하기
#
# 본 노트북은 그레디언트와 같은 모델링 토픽은 다루고 있지 않습니다.
# # Step 1: Eager 불러오기
# eager execution을 위해서 다음과 같이 import 하세요:
# +
from __future__ import absolute_import, division, print_function
import os
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.contrib.eager as tfe
tf.enable_eager_execution()
print("TensorFlow version: {}".format(tf.VERSION))
print("Eager execution: {}".format(tf.executing_eagerly()))
# +
print(tf.add(1, 2))
print(tf.add([1, 2], [3, 4]))
print(tf.square(5))
print(tf.reduce_sum([1, 2, 3]))
print(tf.encode_base64("hello world"))
print("")
x = tf.constant(2)
y = tf.constant(3)
print(x * y + 1)
# 대부분의 TensorFlow 연산은 eager execution에서 즉시 사용가능합니다.
# 다음과 같이 즉시 값을 반환해줍니다.
print(tf.contrib.signal.hamming_window(x * y + 1))
# +
import numpy as np
ones = np.ones([3, 3])
print("1로 구성된 numpy 3x3행렬은:")
print(ones)
print("")
print("42를 곱하면:")
print(tf.multiply(ones, 42))
# -
x = tf.get_variable(name="x", shape=[], dtype=tf.float32, initializer=tf.zeros_initializer)
# +
# 이 구문은 변수의 실제 값을 출력하지 않습니다:
print("Printing a TensorFlow Variable:")
print(x)
print("")
# TensorFlow 변수는 텐서에 대한 참조를 나타냅니다.
# `read_value()` 함수는 변수의 현재 값에 접근할 수 있도록 합니다.
# Tensorflow 변수는 tf.get_variable()에 정의된대로 자동적으로 초기화 됩니다.
print("Printing a TensorFlow Variable's value using .read_value():")
print(x.read_value())
print("")
print("Printing a TensorFlow Variable's value using .read_value().numpy():")
print(x.read_value().numpy())
# +
x.assign(42)
print(x.read_value())
x.assign_add(3)
print(x.read_value())
# +
print(x + 3)
# 이 코드는 숫자의 리스트에 대해 자동으로 브로드캐스팅 됩니다.
print(x * [1, 2, 4])
# -
vector = tf.constant([10.0, 20.0, 30.0, 40.0])
# 2번째, 3번째 인자로 전달된 `begin`과 `size`가 `vector`의 범위 내에 포함되어서 잘 동작합니다.
print(tf.slice(vector, [1], [3]))
# 이 코드는 동작하지 않습니다.
# 3번째 인자인 `size`가 `vector`의 범위를 넘어서는 인덱스를 요청했기 때문입니다.
# 에러는 즉시 발생합니다.
try:
print(tf.slice(vector, [1], [4]))
except tf.OpError as e:
print("Caught error: %s" % e)
# +
# 본 예제 코드는 사용자의 노트북이 CUDA GPU 환경에서 동작 할 경우에만 작동합니다.
# 아래 구문이 해당 사항을 체크합니다.
is_gpu_available = tfe.num_gpus() > 0
# 임의의 Tensors를 만듭니다.
SIZE = 1000
cpu_tensor = tf.random_normal([SIZE, SIZE])
if is_gpu_available:
gpu_tensor = cpu_tensor.gpu()
else:
print("GPU not available.")
# +
# CPU 기반 행렬 곱셈 시간 측정
print("CPU에서 matmul에 걸리는 시간:")
# %time tf.matmul(cpu_tensor, cpu_tensor)
# +
# GPU 기반 행렬 곱셈 시간 측정
if is_gpu_available:
# 처음엔 GPU 초기화 때문에 시간이 걸립니다. :
print("GPU에서 첫번째 matmul에 걸리는 시간:")
# %time tf.matmul(gpu_tensor, gpu_tensor)
print()
# 연속적으로 사용할 경우 훨씬 빠릅니다.:
print("GPU에서 두번째 matmul에 걸리는 시간:")
# %time tf.matmul(gpu_tensor, gpu_tensor)
# +
# 두번째 시간을 GPU를 위한 데모로 측정, 첫번째는 초기화 과정으로 통과 시킵니다:
cpu_tensor = tf.random_normal([SIZE, SIZE])
print("CPU에서 matmul에 걸리는 시간:")
# %time tf.matmul(cpu_tensor, cpu_tensor)
print()
if is_gpu_available:
gpu_tensor = cpu_tensor.gpu()
print("GPU에서 matmul에 걸리는 시간:")
# %time tf.matmul(gpu_tensor, gpu_tensor)
# -
| examples/notebooks/1_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Kaggle Titanic competition
# https://www.kaggle.com/c/titanic
# +
#Import pandas and matplotlib
import pandas as pd
import matplotlib.pyplot as plt
import re
#Render matplotlibs inside the notebook
# %matplotlib inline
#Change default matplotlib style
pd.set_option('display.mpl_style', 'default')
plt.rcParams['figure.figsize'] = (15, 5)
# -
#Load train and test data
df = pd.read_csv("raw_data/train.csv")
df['dataset'] = 'train'
test = pd.read_csv("raw_data/test.csv")
test['dataset'] = 'test'
#Merge dataframes (so we apply the same transformations to both datasets)
df = df.append(test, ignore_index=True)
#Assign passenger id as index column
df.index = df['PassengerId']
#Drop passenger id column
df.drop(['PassengerId'], axis=1, inplace=True)
#Print columns information
df.info()
#Take a look at the data
df.head()
# ## Cabin
# https://www.kaggle.com/c/titanic/forums/t/4693/is-cabin-an-important-predictor/25690
df.Cabin[df.Cabin.isnull()] = 'U'
df['deck'] = df.Cabin.map(lambda x: x[0])
df.drop(['Cabin'], axis=1, inplace=True)
# ## Ticket
# +
#ticket_prefix = df.Ticket.map(lambda x: x[:2])
#This isn't necessary
#df['ticket_prefix'] = ticket_prefix
#pd.crosstab(ticket_prefix, df.Survived)
# -
# ## Name
#Let's take a look at some of the names
df.Name.head(4)
# Each name has a title (like Mrs or Mr), maybe that helps to predict survival.
#For each name, extract the title
name_title = df.Name.map(lambda name: re.search('.*,{1}\s{1}([a-zA-Z\s]+)\.{1}.*', name).group(1))
df['name_title'] = name_title
#Create a table to compare it with survival
np_tab = pd.crosstab(name_title, df.Survived)
np_tab
# Seems like being a Mr does not help to survive (that's a proxy for 2-class male), on the other side being a Miss or Mrs helps a lot. Let's compute some features using this new information.
set(name_title)
'''is_man = name_title.isin(['Capt', 'Don', 'Rev', 'Mr', 'Dr', 'Col', 'Major', 'Master', 'Ms'])
is_woman = name_title.isin(['Miss', 'Mrs', 'Dona'])
is_sir = name_title.isin(['Sir'])
is_lady = name_title.isin(['Jonkheer', 'Mme', 'Lady', 'Mlle', 'the Countess'])
name_title[is_man] = 'man'
name_title[is_woman] = 'woman'
name_title[is_sir] = 'sir'
name_title[is_lady] = 'lady'
df['name_title'] = name_title'''
sums = np_tab.apply(lambda row: row[0]+row[1], axis=1)
n_passengers = df.shape[0]
np_tab['percentage_not_survived'] = np_tab.loc[:,0]*100/sums
np_tab['percentage_survived'] = np_tab.iloc[:,1]*100/sums
#np_tab.drop(np_tab.columns[[0,1]], axis=1, inplace=True)
np_tab = np_tab.sort(['percentage_survived'])
np_tab
# ## Fare
df[df.Fare.isnull()]
df.boxplot(column='Fare', by='Pclass')
df.loc[1044,'Fare'] = df[df.Pclass==3]['Fare'].median()
# ## Age
df[df.Fare.isnull()]['Fare'] = 1
df.Age.describe()
no_age = df[df.Age.isnull()]
no_age.shape
df.boxplot(column='Age', by='Pclass')
df.boxplot(column='Age', by='Sex')
# Let's use Pclass to estimate the age, using the median por each Pclass
median_ages = df[['Pclass','Age','Sex']].groupby(['Pclass','Sex']).median()
median_ages
def estimate_age(row):
if pd.isnull(row.Age):
return float(median_ages.ix[row.Pclass].ix[row.Sex])
return row.Age
df['EstimatedAge'] = df.apply(estimate_age, axis=1)
df.drop('Age', axis=1, inplace=True)
df.head()
# ## Embarked
df[df.Embarked.isnull()]
pd.crosstab(df.Embarked, df.Pclass)
pd.crosstab(df.Embarked, df.Survived)
df.loc[61,'Embarked'] = 'S'
df.loc[829,'Embarked'] = 'S'
# ## SibSp and Parch
df['FamSize'] = df.SibSp + df.Parch
df[['FamSize', 'Survived']][df.dataset=='train'].groupby('FamSize').count()
# ## Sex
df[['Sex', 'Survived']][df.dataset=='train'].groupby('Sex').count()
# ## Interaction features - Fare/Age
# +
#df['fare_over_age'] = df['Fare']/df['EstimatedAge']
# -
# ## Generate clean datasets
#Encode sex as dummies
sex_dummies = pd.get_dummies(df['Sex'])
df = df.join(sex_dummies)
#Encode embarked as a categorical variable
embarked_dummies = pd.get_dummies(df['Embarked'], prefix='embarked')
df = df.join(embarked_dummies)
#Encode name_title as dummies
name_title_dummies = pd.get_dummies(df['name_title'])
df = df.join(name_title_dummies)
#Encode deck as dummies
deck_dummies = pd.get_dummies(df['deck'], prefix='deck')
df = df.join(deck_dummies)
df.head()
#Drop unnecessary variables
df.drop('Embarked', axis=1, inplace=True)
df.drop('Name', axis=1, inplace=True)
df.drop('Ticket', axis=1, inplace=True)
df.drop('Sex', axis=1, inplace=True)
df.drop('name_title', axis=1, inplace=True)
df.drop('deck', axis=1, inplace=True)
df.head()
train = df[df['dataset']=='train']
test = df[df['dataset']=='test']
train.drop('dataset', axis=1, inplace=True)
test.drop(['dataset','Survived'], axis=1, inplace=True)
train.to_csv("train_clean.csv", index_label='PassengerId')
test.to_csv("test_clean.csv", index_label='PassengerId')
| data-exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import pickle
import torch
params = pickle.load(open('/Users/conor/Documents/PHD_RESEARCH/ACTIVE_SEARCH_AS_RL/rlkit/data/tabular-active-search-neg-entropy-low-noise/tabular_active_search_neg_entropy_low_noise_2020_11_09_15_07_08_0000--s-0/params.pkl','rb'))
data = pd.read_csv('/Users/conor/Documents/PHD_RESEARCH/ACTIVE_SEARCH_AS_RL/rlkit/data/tabular-active-search-neg-entropy-low-noise/tabular_active_search_neg_entropy_low_noise_2020_11_09_15_07_08_0000--s-0/progress.csv')
plt.plot(data['exploration/Average Returns'])
plt.plot(data['exploration/Average Returns'])
# +
n = 30
sigma = .1
sparse_vecs = set()
for i in range(n):
for j in range(n):
sparse_vec = np.zeros(n)
sparse_vec[i] = 1
sparse_vec[j] = 1
sparse_vecs.add(tuple(sparse_vec))
sparse_vecs = np.asarray(list(sparse_vecs))
# -
def gauss_pdf(x):
return np.exp(-0.5*((x/sigma)**2))
# Comparing Simple Sensing Strategies
np.all(sparse_vecs[np.argmax(prior)] == beta)
NUM_TRIALS = 100
NUM_OBSERVATIONS = 100
entropy_results = np.zeros((NUM_TRIALS, 5, NUM_OBSERVATIONS))
recovery_results = np.zeros((NUM_TRIALS, 5, NUM_OBSERVATIONS))
for algo_type in [0,1,2,3,4]:
for trial in range(NUM_TRIALS):
prior = np.ones(len(sparse_vecs))/len(sparse_vecs)
beta = np.asarray(sparse_vecs[np.random.choice(len(sparse_vecs))])
entropies = []
recoveries = []
for i in range(NUM_OBSERVATIONS):
if algo_type == 0:
interval_start = 0
interval_length = n
elif algo_type == 1:
interval_start = int(n/2)
interval_length = int(n/2)
elif algo_type == 2:
interval_start = int(i%n)
interval_length = 1
elif algo_type == 3:
interval_start = np.random.randint(n)
interval_length = np.random.randint(n-interval_start)
elif algo_type == 4:
interval_start = np.random.randint(n)
interval_length = 1
x = np.zeros(n)
interval = np.ones(interval_length) / interval_length
x[interval_start:interval_start+interval_length] = interval
y = beta @ x + np.random.normal()*sigma
posterior = np.zeros(len(sparse_vecs))
for j in range(len(posterior)):
posterior[j] = gauss_pdf( y - sparse_vecs[j].T @ x )*prior[j]
posterior = posterior/np.sum(posterior)
prior = posterior
entropies.append(-np.log(prior) @ prior)
recovery = int(np.all(sparse_vecs[np.argmax(prior)] == beta))
recoveries.append(recovery)
entropy_results[trial,algo_type,:] = entropies
recovery_results[trial,algo_type,:] = recoveries
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(recovery_results[:,0,:]).flatten(),ci='sd',label='Persistent Full Sensing')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(recovery_results[:,1,:]).flatten(),ci='sd',label='Persistent Half Sensing')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(recovery_results[:,3,:]).flatten(),ci='sd',label='Sensing Region Uniform Random')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(recovery_results[:,2,:]).flatten(),ci='sd',label='Single Cell Sweep')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(recovery_results[:,4,:]).flatten(),ci='sd',label='Single Cell Uniform Random')
plt.xlabel('Number of Sensing Actions')
plt.ylabel('Full Recovery')
_ = plt.title(r'$d=1,n=30,k=2, \sigma = 0.1$ Num Trials $= 100$')
plt.legend(loc='lower right')
# plt.savefig('entropy_reduction5.pdf')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,0,:]).flatten(),ci='sd',label='Persistent Full Sensing')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,1,:]).flatten(),ci='sd',label='Persistent Half Sensing')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,3,:]).flatten(),ci='sd',label='Sensing Region Uniform Random')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,2,:]).flatten(),ci='sd',label='Single Cell Sweep')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,4,:]).flatten(),ci='sd',label='Single Cell Uniform Random')
plt.xlabel('Number of Sensing Actions')
plt.ylabel('Entropy of Posterior')
plt.ylim(ylims)
_ = plt.title(r'$d=1,n=30,k=2, \sigma = 0.1$ Num Trials $= 100$')
plt.savefig('entropy_reduction5.pdf')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,0,:]).flatten(),ci='sd',label='Persistent Full Sensing')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,1,:]).flatten(),ci='sd',label='Persistent Half Sensing')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,2,:]).flatten(),ci='sd',label='Single Cell Sweep')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,3,:]).flatten(),ci='sd',label='Sensing Region Uniform Random')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,4,:]).flatten(),ci='sd',label='Single Cell Uniform Random')
plt.xlabel('Number of Sensing Actions')
plt.ylabel('Entropy of Posterior')
_ = plt.title(r'$d=1,n=30,k=2, \sigma = 0.1$ Num Trials $= 100$')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,0,:]).flatten(),ci='sd',label='Persistent Half Sensing')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,1,:]).flatten(),ci='sd',label='Persistent Full Sensing')
plt.xlabel('Number of Sensing Actions')
plt.ylabel('Entropy of Posterior')
text = plt.title(r'$d=1,n=30,k=2, \sigma = 0.1$ Num Trials $= 100$')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,0,:]).flatten(),ci='sd',label='Persistent Sensing')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,1,:]).flatten(),ci='sd',label='Random Sensing')
plt.xlabel('Number of Sensing Actions')
plt.ylabel('Entropy of Posterior')
text = plt.title(r'$d=1,n=30,k=2, \sigma = 0.1$ Num Trials $= 100$')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,0,:]).flatten(),ci='sd',label='Persistent Sensing')
sns.lineplot(x=list(range(NUM_OBSERVATIONS))*NUM_TRIALS,y=np.asarray(results[:,1,:]).flatten(),ci='sd',label='Random Sensing')
plt.xlabel('Number of Sensing Actions')
plt.ylabel('Entropy of Posterior')
text = plt.title(r'$d=1,n=30,k=2,$ Num Trials $= 100$')
sum(posterior/np.sum(posterior))
# +
NUM_DRAWS = 100
samples = []
for i in range(NUM_DRAWS):
samples.append(np.asarray(sparse_vecs[np.random.choice(np.arange(len(sparse_vecs)), p=prior )]))
sns.lineplot(x=list(range(n))*NUM_DRAWS,y=np.asarray(samples).flatten(),ci='sd')
plt.plot(beta)
| .ipynb_checkpoints/tabular active search rl-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Description: Plot Figure 6 (time series plots of the dominant vorticity balance terms)
#
# Author: <NAME>
# E-mail: <EMAIL>
# Date: April/2020
# -
import numpy as np
import matplotlib.pyplot as plt
from xarray import open_dataset
import matplotlib.dates as mdates
from pandas import Timestamp
def snap_axes(ax1, ax2):
pos1 = ax1.get_position()
pos2 = ax2.get_position()
points1 = pos1.get_points()
points2 = pos2.get_points()
points2[1][1]=points1[0][1]
pos2.set_points(points2)
ax2.set_position(pos2)
# +
# 800 m isobath******
# Length A-B = 3803 km
# Length WAP = 1841 km
# Length Weddell = 4889 km
# Length W-EA = 5796 km
# Length E-EA = 7786 km
# Length Ross = 3768 km
# Length circumpolar = 27318 km
Length_circ = 27318e3 # [m]
Length_AB = 3803e3 # [m]
Length_Weddell = 4889e3 # [m]
Length_WAP = 1841e3 # [m]
Length_WEA = 5796e3 # [m]
Length_EEA = 7786e3 # [m]
Length_Ross = 3768e3 # [m]
m3stoSv = 1e-6
lat = -65
omega = 7.292115e-5 # (1/s)
beta = 2*omega*np.cos(np.radians(lat))/6400e3 # [1/m/s]
# +
plt.close('all')
terms = ['Ibetav', 'Icurlvdiff', 'Icurlhdiff', 'Istretchp', 'Ires', 'Icurlnonl']
segments = ['Amundsen-Bellingshausen', 'WAP', 'Weddell', 'W-EA', 'E-EA', 'Ross']
head_data = "../../data_reproduce_figs/"
# Circumpolar circulation terms.
fname = head_data+'circulation_terms_circumpolar.nc'
ds = open_dataset(fname)
t = ds['t']
years = [Timestamp('2005-01-01'), Timestamp('2006-01-01'), Timestamp('2007-01-01'), Timestamp('2008-01-01'), Timestamp('2009-01-01')]
# +
fig = plt.figure(figsize=(7.5, 10))
shp = (8, 2)
ax1u = plt.subplot2grid(shp, (0, 0))
ax1l = plt.subplot2grid(shp, (1, 0), sharex=ax1u)
ax2u = plt.subplot2grid(shp, (2, 0))
ax2l = plt.subplot2grid(shp, (3, 0), sharex=ax2u)
ax3u = plt.subplot2grid(shp, (2, 1))
ax3l = plt.subplot2grid(shp, (3, 1), sharex=ax3u)
ax4u = plt.subplot2grid(shp, (4, 0))
ax4l = plt.subplot2grid(shp, (5, 0), sharex=ax4u)
ax5u = plt.subplot2grid(shp, (4, 1))
ax5l = plt.subplot2grid(shp, (5, 1), sharex=ax5u)
ax6u = plt.subplot2grid(shp, (6, 0))
ax6l = plt.subplot2grid(shp, (7, 0), sharex=ax6u)
ax7u = plt.subplot2grid(shp, (6, 1))
ax7l = plt.subplot2grid(shp, (7, 1), sharex=ax7u)
winsize = 15
# Circumpolar. *****
################################
ds = open_dataset(head_data+'circulation_terms_circumpolar.nc').rolling(t=winsize, center=True).mean()
ax1, ax2 = ax1u, ax1l
fac = 1e10
ax1.plot(t, ds['Icurlvdiff']*fac, 'k-', zorder=2, linewidth=1, label=r'VVIS$_\xi$')
ax1.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=1, label=r'$\beta V$')
ax1.plot(t, (ds['Icurlhdiff'] + ds['Istretchp'] + ds['Icurlnonl'] - ds['Ires'])*fac, 'r-', linewidth=1, label=r'HVIS$_\xi$ - $fw_I$ - NONL$_\xi$ - RES$_\xi$')
ax1.set_ylabel(r"[$10^{-10}$ m/s$^2$]", fontsize=8, fontweight='black')
fac = 1e11
ax2.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=0.8, label='_nolegend_')
ax2.axhline(linestyle='-', color='gray')
ax2.set_ylabel(r"[$10^{-11}$ m/s$^2$]", fontsize=8, fontweight='black')
xt, yt = 0.01, 0.83
ax1.text(xt, yt, "Circumpolar", fontsize=10, fontweight='black', transform=ax1.transAxes)
dleg = 0.6
ax1.set_xlim(t.values[0], t.values[-1])
legh = ax1.legend(ncol=1, fontsize=13, loc=(1.05, -0.2-dleg), frameon=False)
for legobj, legtxt in zip(legh.legendHandles, legh.get_texts()):
legobj.set_linewidth(2.0)
legtxt.set_color(legobj.get_color())
legh = ax2.legend(ncol=1, fontsize=13, loc=(1.05, 0.8-dleg), frameon=False)
for legobj, legtxt in zip(legh.legendHandles, legh.get_texts()):
legobj.set_linewidth(2.0)
legtxt.set_color(legobj.get_color())
ax1.set_xlim(t.values[0], t.values[-1])
ax2.xaxis.set_ticks(years)
ax2.xaxis.set_ticklabels([])
snap_axes(ax1, ax2) # Set hspace of each pair of axes to zero.
# # Amundsen-Bellingshausen. *****
# ################################
ds = open_dataset(head_data+'circulation_terms-Amundsen-Bellingshausen.nc').rolling(t=winsize, center=True).mean()
ax1, ax2 = ax2u, ax2l
fac = 1e10
ax1.plot(t, ds['Icurlvdiff']*fac, 'k-', zorder=2, linewidth=1, label=r'VVIS$_\xi$')
ax1.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=1, label=r'$\beta V$')
ax1.plot(t, (ds['Icurlhdiff'] + ds['Istretchp'] + ds['Icurlnonl'] - ds['Ires'])*fac, 'r-', linewidth=1, label=r'HVIS$_\xi$ - $fw_I$ - NONL$_\xi$ - RES$_\xi$')
ax1.set_ylabel(r"[$10^{-10}$ m/s$^2$]", fontsize=8, fontweight='black')
xt, yt = 0.01, 0.83
ax1.text(xt, yt, "Amundsen-Bellingshausen", fontsize=10, fontweight='black', transform=ax1.transAxes)
# fac = 1e11
fac = Length_AB*m3stoSv/beta
ax2.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=0.8, label=r'$\beta V$')
ax2.axhline(linestyle='-', color='gray')
ax2.set_ylabel(r"[Sv]", fontsize=8, fontweight='black')
ax1.set_xlim(t.values[0], t.values[-1])
ax2.xaxis.set_ticks(years)
ax2.xaxis.set_ticklabels([])
snap_axes(ax1, ax2) # Set hspace of each pair of axes to zero.
# Add horizontal line indicating TSB-based transport estimate from Table 2.
ax2.axhline(y=-8.5, linestyle='--', color='m')
# WAP. *****
################################
ds = open_dataset(head_data+'circulation_terms-WAP.nc').rolling(t=winsize, center=True).mean()
ax1, ax2 = ax3u, ax3l
fac = 1e10
ax1.plot(t, ds['Icurlvdiff']*fac, 'k-', zorder=2, linewidth=1, label=r'VVIS$_\xi$')
ax1.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=1, label=r'$\beta V$')
ax1.plot(t, (ds['Icurlhdiff'] + ds['Istretchp'] + ds['Icurlnonl'] - ds['Ires'])*fac, 'r-', linewidth=1, label=r'HVIS$_\xi$ - $fw_I$ - NONL$_\xi$ - RES$_\xi$')
ax1.set_ylabel(r"[$10^{-10}$ m/s$^2$]", fontsize=8, fontweight='black')
xt, yt = 0.01, 0.83
ax1.text(xt, yt, "WAP", fontsize=10, fontweight='black', transform=ax1.transAxes)
fac = 1e10
ax2.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=0.8, label=r'$\beta V$')
ax2.axhline(linestyle='-', color='gray')
ax2.set_ylabel(r"[$10^{-10}$ m/s$^2$]", fontsize=8, fontweight='black')
ax1.set_xlim(t.values[0], t.values[-1])
ax2.xaxis.set_ticks(years)
ax2.xaxis.set_ticklabels([])
snap_axes(ax1, ax2) # Set hspace of each pair of axes to zero.
# Weddell. *****
################################
ds = open_dataset(head_data+'circulation_terms-Weddell.nc').rolling(t=winsize, center=True).mean()
ax1, ax2 = ax4u, ax4l
fac = 1e10
ax1.plot(t, ds['Icurlvdiff']*fac, 'k-', zorder=2, linewidth=1, label=r'VVIS$_\xi$')
ax1.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=1, label=r'$\beta V$')
ax1.plot(t, (ds['Icurlhdiff'] + ds['Istretchp'] + ds['Icurlnonl'] - ds['Ires'])*fac, 'r-', linewidth=1, label=r'HVIS$_\xi$ - $fw_I$ - NONL$_\xi$ - RES$_\xi$')
ax1.set_ylabel(r"[$10^{-10}$ m/s$^2$]", fontsize=8, fontweight='black')
xt, yt = 0.01, 0.83
ax1.text(xt, yt, "Weddell", fontsize=10, fontweight='black', transform=ax1.transAxes)
fac = 1e10
ax2.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=0.8, label=r'$\beta V$')
ax2.axhline(linestyle='-', color='gray')
ax2.set_ylabel(r"[$10^{-10}$ m/s$^2$]", fontsize=8, fontweight='black')
ax1.set_xlim(t.values[0], t.values[-1])
ax2.xaxis.set_ticks(years)
ax2.xaxis.set_ticklabels([])
snap_axes(ax1, ax2) # Set hspace of each pair of axes to zero.
# W-EA. *****
################################
ds = open_dataset(head_data+'circulation_terms-W-EA.nc').rolling(t=winsize, center=True).mean()
ax1, ax2 = ax5u, ax5l
fac = 1e10
ax1.plot(t, ds['Icurlvdiff']*fac, 'k-', zorder=2, linewidth=1, label=r'VVIS$_\xi$')
ax1.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=1, label=r'$\beta V$')
ax1.plot(t, (ds['Icurlhdiff'] + ds['Istretchp'] + ds['Icurlnonl'] - ds['Ires'])*fac, 'r-', linewidth=1, label=r'HVIS$_\xi$ - $fw_I$ - NONL$_\xi$ - RES$_\xi$')
ax1.set_ylabel(r"[$10^{-10}$ m/s$^2$]", fontsize=8, fontweight='black')
xt, yt = 0.01, 0.83
ax1.text(xt, yt, "W-EA", fontsize=10, fontweight='black', transform=ax1.transAxes)
# fac = 1e11
fac = Length_WEA*m3stoSv/beta
ax2.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=0.8, label=r'$\beta V$')
ax2.axhline(linestyle='-', color='gray')
ax2.set_ylabel(r"[Sv]", fontsize=8, fontweight='black')
ax1.set_xlim(t.values[0], t.values[-1])
ax2.xaxis.set_ticks(years)
ax2.xaxis.set_ticklabels([])
snap_axes(ax1, ax2) # Set hspace of each pair of axes to zero.
# Add horizontal line indicating TSB-based transport estimate from Table 2.
ax2.axhline(y=2.1, linestyle='--', color='m')
# E-EA. *****
################################
ds = open_dataset(head_data+'circulation_terms-E-EA.nc').rolling(t=winsize, center=True).mean()
ax1, ax2 = ax6u, ax6l
fac = 1e10
ax1.plot(t, ds['Icurlvdiff']*fac, 'k-', zorder=2, linewidth=1, label=r'VVIS$_\xi$')
ax1.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=1, label=r'$\beta V$')
ax1.plot(t, (ds['Icurlhdiff'] + ds['Istretchp'] + ds['Icurlnonl'] - ds['Ires'])*fac, 'r-', linewidth=1, label=r'HVIS$_\xi$ - $fw_I$ - NONL$_\xi$ - RES$_\xi$')
ax1.set_ylabel(r"[$10^{-10}$ m/s$^2$]", fontsize=8, fontweight='black')
xt, yt = 0.01, 0.83
ax1.text(xt, yt, "E-EA", fontsize=10, fontweight='black', transform=ax1.transAxes)
# fac = 1e11
fac = Length_EEA*m3stoSv/beta
ax2.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=0.8, label=r'$\beta V$')
ax2.axhline(linestyle='-', color='gray')
ax2.set_ylabel(r"[Sv]", fontsize=8, fontweight='black')
ax1.set_xlim(t.values[0], t.values[-1])
ax2.xaxis.set_ticks(years)
ax2.xaxis.set_ticklabels([])
snap_axes(ax1, ax2) # Set hspace of each pair of axes to zero.
# Add horizontal line indicating TSB-based transport estimate from Table 2.
ax2.axhline(y=-11.8, linestyle='--', color='m')
# Ross. *****
################################
ds = open_dataset(head_data+'circulation_terms-Ross.nc').rolling(t=winsize, center=True).mean()
ax1, ax2 = ax7u, ax7l
fac = 1e10
ax1.plot(t, ds['Icurlvdiff']*fac, 'k-', zorder=2, linewidth=1, label=r'VVIS$_\xi$')
ax1.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=1, label=r'$\beta V$')
ax1.plot(t, (ds['Icurlhdiff'] + ds['Istretchp'] + ds['Icurlnonl'] - ds['Ires'])*fac, 'r-', linewidth=1, label=r'HVIS$_\xi$ - $fw_I$ - NONL$_\xi$ - RES$_\xi$')
ax1.set_ylabel(r"[$10^{-10}$ m/s$^2$]", fontsize=8, fontweight='black')
xt, yt = 0.01, 0.83
ax1.text(xt, yt, "Ross", fontsize=10, fontweight='black', transform=ax1.transAxes)
# fac = 1e11
fac = Length_Ross*m3stoSv/beta
ax2.plot(t, -ds['Ibetav']*fac, 'm-', linewidth=0.8, label=r'$\beta V$')
ax2.axhline(linestyle='-', color='gray')
ax2.set_ylabel(r"[Sv]", fontsize=8, fontweight='black')
ax1.set_xlim(t.values[0], t.values[-1])
ax2.xaxis.set_ticks(years)
ax2.xaxis.set_ticklabels([])
snap_axes(ax1, ax2) # Set hspace of each pair of axes to zero.
# Add horizontal line indicating TSB-based transport estimate from Table 2.
ax2.axhline(y=-1.0, linestyle='--', color='m')
# Decrease size of yticklabels.
ax1u.xaxis.set_tick_params(tickdir='in')
ax1l.xaxis.set_tick_params(tickdir='in')
ax2u.xaxis.set_tick_params(tickdir='in')
ax2l.xaxis.set_tick_params(tickdir='in')
ax3u.xaxis.set_tick_params(tickdir='in')
ax3l.xaxis.set_tick_params(tickdir='in')
ax4u.xaxis.set_tick_params(tickdir='in')
ax4l.xaxis.set_tick_params(tickdir='in')
ax5u.xaxis.set_tick_params(tickdir='in')
ax5l.xaxis.set_tick_params(tickdir='in')
ax6u.xaxis.set_tick_params(tickdir='in')
ax7u.xaxis.set_tick_params(tickdir='in')
ax1u.yaxis.set_tick_params(tickdir='in')
ax1l.yaxis.set_tick_params(tickdir='in')
ax2u.yaxis.set_tick_params(tickdir='in')
ax2l.yaxis.set_tick_params(tickdir='in')
ax3u.yaxis.set_tick_params(tickdir='in')
ax3l.yaxis.set_tick_params(tickdir='in')
ax4u.yaxis.set_tick_params(tickdir='in')
ax4l.yaxis.set_tick_params(tickdir='in')
ax5u.yaxis.set_tick_params(tickdir='in')
ax5l.yaxis.set_tick_params(tickdir='in')
ax6u.yaxis.set_tick_params(tickdir='in')
ax7u.yaxis.set_tick_params(tickdir='in')
ax1u.yaxis.set_tick_params(labelsize=7)
ax1l.yaxis.set_tick_params(labelsize=7)
ax2u.yaxis.set_tick_params(labelsize=7)
ax2l.yaxis.set_tick_params(labelsize=7)
ax3u.yaxis.set_tick_params(labelsize=7)
ax3l.yaxis.set_tick_params(labelsize=7)
ax4u.yaxis.set_tick_params(labelsize=7)
ax4l.yaxis.set_tick_params(labelsize=7)
ax5u.yaxis.set_tick_params(labelsize=7)
ax5l.yaxis.set_tick_params(labelsize=7)
ax6u.yaxis.set_tick_params(labelsize=7)
ax6l.yaxis.set_tick_params(labelsize=7)
ax7u.yaxis.set_tick_params(labelsize=7)
ax7l.yaxis.set_tick_params(labelsize=7)
yearsl = [a.strftime("%Y") for a in years]
yearsl[0] = ''
ax6l.xaxis.set_ticklabels(yearsl)
ax6l.xaxis.set_ticklabels(ax6l.xaxis.get_ticklabels(), x=1)
plt.show()
fig.savefig("fig06.png", dpi=300, bbox_inches="tight")
| plot_figs/fig06/fig06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import emat
import pandas as pd
# # TableParser Example
#
# In this notebook, we will illustrate the use of a TableParser with
# a few simple examples.
from emat.model.core_files.parsers import (
TableParser,
loc, loc_sum, loc_mean,
iloc, iloc_sum, iloc_mean
)
# ## Parsing a Labeled Table
#
# First, let's consider a TableParser for extracting values from a
# simple CSV table of traffic counts by time period. We'll begin
# by writing such a table as a temporary file to be processed:
# +
sample_file_labeled_table = """
LinkID,Count_AM,Count_MD,Count_PM,Count_EV
123,3498,2340,3821,1820
234,4011,2513,4101,1942
345,386,103,441,251
"""
with open('/tmp/emat_sample_file_labeled_table.csv', 'wt') as f:
f.write(sample_file_labeled_table)
# -
# If we wanted to read this table one time, we could easily
# do so using `pandas.read_csv`:
df = pd.read_csv('/tmp/emat_sample_file_labeled_table.csv', index_col='LinkID')
df
# It is then simple to manually extract individual values by label,
# or by position, or we could extract a row total to get a daily
# total count for a link, or take the mean of a column:
{
'A': df.loc[123,'Count_AM'], # by label
'B': df.iloc[1,0], # by position
'C': df.loc[345,:].sum(), # sum a row
'D': df.iloc[:,1].mean(), # mean of a column
}
# The `TableParser` object makes it easy to combine these instructions
# to extract the same values from the same file in any model run.
parser = TableParser(
'emat_sample_file_labeled_table.csv',
{
'A': loc[123,'Count_AM'], # by label
'B': iloc[1,0], # by position
'C': loc_sum[345,:], # sum a row
'D': iloc_mean[:,1], # mean of a column
},
index_col='LinkID',
)
# We can now execute all these instructions by using the `read` method
# of the parser.
parser.read(from_dir='/tmp')
# Using the `TableParser` has some advantages over just writing a custom
# function for each table to be processed. The most important is that
# we do not need to actually parse anything to access the names of the
# keys available in the parser's output.
parser.measure_names
# ## Parsing Labeled Values
#
# The `TableParser` can also be used to read performace measures
# from a file that contains simply a list of labeled values, as
# this can readily be interpreted as a table with one index column
# and a single data column.
# +
sample_file_labeled_values = """
Mean Highway Speed (mph),56.34
Mean Arterial Speed (mph),31.52
Mean Collector Speed (mph),24.80
"""
with open('/tmp/emat_sample_file_labeled_values.csv', 'wt') as f:
f.write(sample_file_labeled_values)
# -
# Reading this file with `pandas.read_csv` can be done neatly
# by giving a few extra keyword arguments:
pd.read_csv(
'/tmp/emat_sample_file_labeled_values.csv',
header=None,
names=['Label','Value'],
index_col=0,
)
# We can simply pass these same keyword arguments on to the `TableParser`,
# and proceed as above to define the values to extract.
parser = TableParser(
'emat_sample_file_labeled_values.csv',
{
'Highway Speed': loc['Mean Highway Speed (mph)','Value']
},
header=None,
names=['Label','Value'],
index_col=0,
)
parser.read(from_dir='/tmp')
# ## Parsing Labeled Values
#
# Lastly, the `TableParser` can be used to read performace measures
# from a file that contains an unlabeled array of values, as
# sometimes is generated from popular transportation modeling tools.
# +
sample_file_unlabeled_array = """
11,22,33
44,55,66
77,88,99
"""
with open('/tmp/emat_sample_file_unlabeled_array.csv', 'wt') as f:
f.write(sample_file_unlabeled_array)
# -
# The labels are not required to read this data using `pandas.read_csv`,
# as a default set of row and column index labels are generated.
pd.read_csv(
'/tmp/emat_sample_file_unlabeled_array.csv',
header=None,
)
# But the table is loaded, and individual values or slices can be
# taken using the `iloc` tool.
parser = TableParser(
'emat_sample_file_unlabeled_array.csv',
{
'upper_left': iloc[0,0],
'lower_right': iloc[-1,-1],
'partial_row': iloc_sum[0,1:],
'top_corner_sum': iloc[0,0] + iloc[0,-1],
},
header=None,
)
parser.read(from_dir='/tmp')
| docs/source/emat.models/table_parse_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import csv
import random
import nltk
import pandas as pd
import numpy as np
import scipy.stats
# +
#data prep
#read in csv, make dict with cats
#structure: 0 country | 1 date | 2 sect | 3 subsect | 4 text
rd = csv.reader(open('/Users/ewanog/Dropbox (Old)/DEEP dropbox/Assessment reports/GEO Data/2017-04-02-country-content-60-weeks.csv'))
cats = {}
for row in rd:
sect = row[2]
txt = row[4]
if sect not in cats:
cats[sect] = [txt]
else:
cats[sect].append(txt)
#counts
c=0
for k,v in cats.items():
print(k + ' ' + str(len(v)))
c+=len(v)
print('total count: ' + str(c))
print('all: ' + str(len(cats.values())))
#TODO: clean bad chars
#remove stopwords. STOP!!!!!!!!!ADSAAA!!W
#lower - then remove all the other calls
# -
cats['Education'][1:100]
# +
#show duplicates
from pprint import pprint
c=0
for k,v in cats.items():
print(k + ' ' + str(len((set(v)))))
c+=len((set(v)))
print(c)
# +
#create random sampling groups
#TODO: should we do uniform random sampling per group or a whole scale random draw?
#return dicts with {sect: setted list of entries}
def gen_tt(corpus):
test = {}
train = {}
#add corpus to test and train
for k in corpus.keys():
test[k] = None
train[k] = None
for k,v in corpus.items():
random.shuffle(v)
setv = list(set(v))
len_test = int(len(setv)*.3)
test[k] = setv[:len_test]
train[k] = setv[len_test:]
return train, test
train, test = gen_tt(cats)
# #check lengths
# for k in train.keys():
# print(k + ' train : ' + str(len(train[k])))
# print(k + ' test : ' + str(len(test[k])))
# +
#test sampling (if we do 70:30)
d = {
'cat1': [random.randint(0,100) for i in range(100)],
'cat2': [random.randint(0,100) for i in range(150)]
}
test = {}
train = {}
#add cats to test and train
for k in d.keys():
test[k] = None
train[k] = None
for k,v in d.items():
len_test = int(len(v)*.3)
print(len(v)*.3)
test[k] = v[:len_test]
train[k] = v[len_test:]
print(len(test))
print(len(train))
# +
#start building our model
#train and test are just subsets of cats (for now)
#make a feature with word presence
#TODO: what's python func?
all_words = []
for k,v in cats.items():
for e in list(set(v)):
for w in e.split(' '):
all_words.append(w.lower())
freq_words = nltk.FreqDist(all_words)
def document_features(document):
# if type(document) == list:
# #TODO: func
# grp = ''
# for l in document:
# for w in l:
# grp += w
# document = grp
uniq_doc = set(document.split(' '))
features = {}
for word in list(freq_words.keys())[:2000]:
features['contains(%s)' % word] = (word in uniq_doc)
return features
#convert dicts into list of tuples we need
#TODO: func
train_merge = []
test_merge = []
for k,v in train.items():
for i in v:
train_merge.append((k,i))
for k,v in test.items():
for i in v:
test_merge.append((k,i))
print(len(test_merge))
wp_train = [(document_features(v), k) for k,v in train_merge]
wp_test = [(document_features(v), k) for k,v in test_merge]
nb_class = nltk.NaiveBayesClassifier.train(wp_train)
# wp_test[0]
# +
#results
#from just raw, unrefined features and 200 words: 0.398576512455516
nb_class.show_most_informative_features(100)
# +
# ['contains(hit)',
# 'contains(down)',
# 'contains(being)',
# 'contains(member.)',
# 'contains(17)',
# 'contains(other)',
# 'contains(air)',
# 'contains(redirect,)',
# 'contains(released)',
# 'contains(independence)',
# 'contains(until)',
# 'contains(the)']
# [k for k,v in wp_train[100][0].items() if v is True]
wp_train[100][1]
# -
for k,v in train.items():
print(k)
print(v)
break
wpfeatures[0][0]
| _playground/in_progress/GEO output.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pyplot
# `pyplot` is a context based functional API offering meaningful defaults. It's a concise API and very similar to matplotlib's pyplot. Users new to `bqplot` should use `pyplot` as a starting point. Users create figure and mark objects using `pyplot` functions.
#
# Steps for building plots in `pyplot`:
# 1. Create a figure object using plt.figure()
# * (Optional steps)
# * Scales can be customized using `plt.scales` function
# * Axes options can customized by passing a dict to `axes_options` argument in the marks' functions
# * Create marks using `pyplot` functions like `plot`, `bar`, `scatter` etc. (All the marks created will be automatically added to the figure object created in step 1)
# * Render the figure object using the following approaches:
# * Using `plt.show` function which renders the figure in the current context along with toolbar for panzoom etc.
# * Using display on the figure object created in step 1 (toolbar doesn't show up in this case)
#
# `pyplot` also offers many helper functions. A few are listed here:
# * plt.xlim: sets the domain bounds of the current 'x' scale
# * plt.ylim: sets the domain bounds of the current 'y' scale
# * plt.grids: shows/hides the axis grid lines
# * plt.xlabel: sets the X-Axis label
# * plt.ylabel: sets the Y-Axis label
# * plt.hline: draws a horizontal line at a specified level
# * plt.vline: draws a vertical line at a specified level
#
# Let's look at the same examples which were created in the [Object Model Notebook](Object Model.ipynb)
import bqplot.pyplot as plt
# +
# first, let's create two vectors x and y to plot using a Lines mark
import numpy as np
x = np.linspace(-10, 10, 100)
y = np.sin(x)
# 1. Create the figure object
fig = plt.figure(title="Simple Line Chart")
# 2. By default axes are created with basic defaults. If you want to customize the axes create
# a dict and pass it to `axxes_options` argument in the marks
axes_opts = {"x": {"label": "X"}, "y": {"label": "Y"}}
# 3. Create a Lines mark by calling plt.plot function
line = plt.plot(
x=x, y=y, axes_options=axes_opts
) # note that custom axes options are passed here
# 4. Render the figure using plt.show()
plt.show()
# -
# For creating other marks (like scatter, pie, bars, etc.), only step 2 needs to be changed. Lets look a simple example to create a bar chart:
# +
# first, let's create two vectors x and y to plot a bar chart
x = list("ABCDE")
y = np.random.rand(5)
# 1. Create the figure object
fig = plt.figure(title="Simple Bar Chart")
# 2. Customize the axes options
axes_opts = {
"x": {"label": "X", "grid_lines": "none"},
"y": {"label": "Y", "tick_format": ".0%"},
}
# 3. Create a Bars mark by calling plt.bar function
bar = plt.bar(x=x, y=y, padding=0.2, axes_options=axes_opts)
# 4. directly display the figure object created in step 1 (note that the toolbar no longer shows up)
fig
# -
# Multiple marks can be rendered in a figure. It's as easy as creating marks one after another. They'll all be added to the same figure!
# +
# first, let's create two vectors x and y
import numpy as np
x = np.linspace(-10, 10, 25)
y = 3 * x + 5
y_noise = y + 10 * np.random.randn(25) # add some random noise to y
# 1. Create the figure object
fig = plt.figure(title="Scatter and Line")
# 3. Create line and scatter marks
# additional attributes (stroke_width, colors etc.) can be passed as attributes to the mark objects as needed
line = plt.plot(x=x, y=y, colors=["green"], stroke_width=3)
scatter = plt.scatter(x=x, y=y_noise, colors=["red"], stroke="black")
# setting x and y axis labels using pyplot functions. Note that these functions
# should be called only after creating the marks
plt.xlabel("X")
plt.ylabel("Y")
# 4. render the figure
fig
# -
# `pyplot` is a simpler and an intuitive API. It's available for all the marks except MarketMap. It should be used in almost all the cases by default since it offers a less verbose API compared to the Object Model. Please refer to the mark [examples](../Marks/Pyplot) using `pyplot` and also this [pyplot example](../Basic Plotting/Pyplot.ipynb) notebook
| examples/Tutorials/Pyplot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Machine Learning Engineer Capstone
# ## Preprocessing
#
# The preprocessing routine searches the ./data/videos dir and creates usable datasets for subsequent model learning tasks. It breaks down videos into one second clips and from those clips generates frames, spectrograms, and InceptionV3 feature vectors. Currently clips and frames aren't used directly and spectrograms aren't used at all, but they are kept for labeling, debugging, and future model upgrades. The feature vector is the only output used for learning. Feature vectors are generated in preprocessing because it drastically reduced training time which allows for a faster model iteration cycle. This methodology was inspired from this repo and associated blog post: https://github.com/harvitronix/five-video-classification-methods
# +
import glob
import subprocess
import json
import os
import csv
from tqdm import tnrange, tqdm_notebook
from keras.preprocessing import image
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.models import Model, load_model
from keras.layers import Input
import numpy as np
# Adapted from https://github.com/harvitronix/five-video-classification-methods
class Extractor():
"""Extractor builds an inception model without the top classification
layers and extracts a feature array from an image."""
def __init__(self):
# Get model with pretrained weights.
base_model = InceptionV3(
weights='imagenet',
include_top=True
)
# We'll extract features at the final pool layer.
self.model = Model(
inputs=base_model.input,
outputs=base_model.get_layer('avg_pool').output
)
def extract(self, image_path):
img = image.load_img(image_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# Get the prediction.
features = self.model.predict(x)
features = features[0]
return features
def video_length(path):
"""returns the length of the video in secs"""
cmd = "ffprobe -i " + path + " -show_entries format=duration -v quiet -of json"
pipe = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout
output = pipe.read()
d = json.loads(output)
s = d["format"]["duration"]
return int(float(s))
def video_id(path):
"""returns the id of a video from a path in this format: ./data/videos/:video_id"""
return path.split("/")[3].split(".")[0]
def clip_dir_path(path):
"""returns the path to dir containing all clips for a video ./data/clips/:video_id"""
vid_id = video_id(path)
return "./data/clips/" + vid_id
def create_clips(path):
"""given a path to a video create_clips writes one sec video segments to disk
in the following format ./data/clips/:video_id/:clip_id.mp4"""
# create clip dir
dir_path = clip_dir_path(path)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# create one sec clips from src
video_len = video_length(path)
for i in tqdm_notebook(xrange(video_len), desc="Clips for " + video_id(path)):
clip_path = dir_path + "/" + '%05d' % i + ".mp4"
if not os.path.exists(clip_path):
cmd = "ffmpeg -v error -y -i " + path + " -ss " + str(i) + " -t 1 " + clip_path
os.system(cmd)
def create_frames(path):
"""given a path to a video create_frames writes frames from previous generated
clips. create_clips must be run before create_frames. Frames are saved in the
following format ./data/frames/:video_id/:clip_id/:frame_id.jpg"""
# create frame dir
vid_id = video_id(path)
dir_path = "./data/frames/" + vid_id
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# create frames from clip
video_len = video_length(path)
for i in tqdm_notebook(xrange(video_len), desc="Frames for " + vid_id):
clip_path = clip_dir_path(path) + "/" + '%05d' % i + ".mp4"
frame_dir_path = dir_path + "/" + '%05d' % i
if not os.path.exists(frame_dir_path):
os.makedirs(frame_dir_path)
cmd = "ffmpeg -v error -y -i " + clip_path + " -r 5.0 " + frame_dir_path + "/%5d.jpg"
os.system(cmd)
# resize frames to 299x299 for InceptionV3
frame_paths = glob.glob(frame_dir_path + "/*.jpg")
for fi in xrange(len(frame_paths)):
path = frame_paths[fi]
# resize first
cmd = "convert " + path + " -resize 299x299 " + path
os.system(cmd)
# add black background
cmd = "convert " + path + " -gravity center -background black -extent 299x299 " + path
os.system(cmd)
def create_spectrograms(path):
"""given a path to a video create_spectrograms writes spectrograms from previous generated
clips. create_clips must be run before create_spectrograms. Spectrograms are saved in the
following format ./data/audio/:video_id/:clip_id.png"""
# create audio dir
vid_id = video_id(path)
dir_path = "./data/audio/" + vid_id
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# create spectrogram from clip
video_len = video_length(path)
for i in tqdm_notebook(xrange(video_len), desc="Spectrograms for " + vid_id):
clip_path = clip_dir_path(path) + "/" + '%05d' % i + ".mp4"
spec_path = dir_path + "/" + '%05d' % i + ".png"
if not os.path.exists(spec_path):
cmd = "ffmpeg -v error -y -i " + clip_path + " -lavfi showspectrumpic=s=32x32:legend=false " + spec_path
os.system(cmd)
extractor = Extractor()
def create_features(path):
"""given a path to a video create_features writes inceptionV3 feature outputs from previous generated
clips. create_clips and create_frames must be run before create_features. Feature outputs are saved
in the following format ./data/features/:video_id/:clip_id/:frame_id.txt.gz"""
# create feature dir
vid_id = video_id(path)
dir_path = "./data/features/" + vid_id
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# save feature array for every frame
video_len = video_length(path)
with tqdm_notebook(total=video_len, desc="Features for " + vid_id) as pbar:
for root, dirs, files in os.walk('./data/frames/'+ vid_id):
for f in files:
if f.endswith(".jpg"):
frame_path = root + "/" + f
feature_path = frame_path.replace("frames", "features").replace("jpg", "txt.gz")
feature_dir = root.replace("frames", "features")
if not os.path.exists(feature_dir):
os.makedirs(feature_dir)
if not os.path.exists(feature_path):
features = extractor.extract(frame_path)
np.savetxt(feature_path, features)
pbar.update(1)
# create assets from folder of videos. This takes a LONG TIME.
video_paths = glob.glob("./data/videos/*.mp4")
videos_len = len(video_paths)
for i in tqdm_notebook(xrange(videos_len), desc="Preprocessing Videos"):
path = video_paths[i]
create_clips(path)
create_frames(path)
create_spectrograms(path)
create_features(path)
# -
# ## Create Labels
#
# Labels are generated from the labelmaker's csv output of its internal sqlite database. Labels are shuffled and divided into training, validation, and test sets at a ratio of roughly 3:1:1
# +
import pandas as pd
import glob
import numpy as np
# read in and shuffle data
labels = pd.read_csv("./labelmaker/labels.csv").as_matrix()
print "Labels Shape: {}".format(labels.shape)
np.random.seed(0)
np.random.shuffle(labels)
# split labels into train, validation, and test sets
div = len(labels) / 5
train_labels = labels[0:div*3,:]
val_labels = labels[div*3:div*4,:]
test_labels = labels[div*4:,:]
print "Trainging Labels Shape: {}".format(train_labels.shape)
print "Validation Labels Shape: {}".format(val_labels.shape)
print "Test Labels Shape: {}".format(test_labels.shape)
# -
# ## Model
#
# The Keras model is composed of a sequential model with two time sensitive LSTM layers followed by two Dense layers and an output layer. The initial input of (7, 2048) represents seven frames per clip each with a 2048 sized vector generated by InceptionV3. The final 4x1 output vector is the category prediction.
# +
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, Flatten, GRU
from keras import backend as K
model = Sequential([
LSTM(512, return_sequences=True, input_shape=(7, 2048)),
LSTM(512, return_sequences=True, input_shape=(7, 512)),
Flatten(),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(4, activation='softmax')
])
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=['accuracy'])
print "Model Compiled"
# -
# ## Match Labels
#
# This routine retrieves the features from disk and pairs them with their one hot encoded labels. Currently all datasets are loaded into memory, but with enough videos, the code should be switched to using a Keras generator.
# +
def one_hot(i):
return np.array([int(i==0),int(i==1),int(i==2),int(i==3)])
def get_features(labels):
x, y = [], []
for i in xrange(len(labels)):
video_id = labels[i][0]
clip_id = labels[i][1]
label = labels[i][2]
features = []
for i in range(7):
fname = "./data/features/" + video_id + "/" + '%05d' % clip_id + "/" + '%05d' % (i+1) + ".txt.gz"
f = np.loadtxt(fname)
features.append(f)
x.append(features)
y.append(one_hot(label))
x = np.array(x)
return x, np.array(y)
print "Getting features"
X_train, Y_train = get_features(train_labels)
X_val, Y_val = get_features(val_labels)
print X_train.shape
print Y_train.shape
# -
# ## Training
#
# This routine trains the model and logs updates to the console and Tensorboard. After training is complete the model is saved using the current timestamp to distinguish training runs.
# +
from keras.callbacks import TensorBoard
import time
import numpy as np
tensorboard = TensorBoard(log_dir='./logs',
histogram_freq=0,
write_graph=True,
write_images=True)
model.fit(X_train,
Y_train,
batch_size=100,
epochs=30,
verbose=2,
callbacks=[tensorboard],
validation_data=(X_val, Y_val))
file_name = "shot_classifier_" + str(int(time.time())) + ".h5"
model.save(file_name)
print "Model Saved"
# -
# ## Prediction
#
# This routine tests the saved model using the Keras predict method. Overall accuracy and a confusion matrix are displayed to validate that the model is accurate against unseen data.
# +
from keras.models import load_model
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
def reverse_one_hot(val):
hi_idx = -1
hi = -1
for i in range(len(val)):
v = val[i]
if hi == -1 or v > hi:
hi = v
hi_idx = i
return hi_idx
def normalize_labels(Y):
norm = []
for v in Y:
norm.append(reverse_one_hot(v))
return np.array(norm)
X_test, Y_test = get_features(test_labels)
model = load_model("shot_classifier_1501284149.h5")
Y_pred = model.predict(X_test, verbose=2)
Y_test_norm = normalize_labels(Y_test)
Y_pred_norm = normalize_labels(Y_pred)
print "Overall Accuracy: " + str(accuracy_score(Y_test_norm, Y_pred_norm))
con_m = confusion_matrix(Y_test_norm, Y_pred_norm)
titles = ["forehand", "backhand", "volley", "serve"]
for i in range(4):
for j in range(4):
actual = titles[i]
predicted = titles[j]
print "predicted " + predicted + " when " + actual + " " + str(con_m[i][j])
| project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # # Lists
# ### == > it is same as array in c++ , but it can also store multiple data types at the same time
# +
# creating lists
a = [1,2,3]
print(type(a))
a1 = list()
print(a1)
a2 = list(a)
print(a2)
a4 = [ i for i in range(10)] ## for range from 0 to 10 set i
print(a4)
a5 = [ i*i for i in range(10)]
print(a5)
a6 = [1,2,"as",True]
print(a6)
# +
#how to access data
print(a[1])
print(a[-1])
# len of array
print("len : ",len(a))
# -
# ## 1) slicing and fast iteration in lists
# +
# slicing of array
print(a[1:2])
# fast iteration
for i in a :
print(i)
# -
# ## 2) string spliting
# +
## returns a list after the spliting that string on the basis of " "
str = " a abc d ef ghf "
print(str.split(" "))
str = " a df d dsds "
print(str.split())
str = "a,bcd,dsd,dwd"
print(str.split(","))
# -
# ## 3) user input
# +
########## 1st method ###########
list1 = input().strip().split()
print(list1)
for i in range(len(list1)):
list1[i] = int(list1[i])
print(list1)
print()
########## 2nd method ###########
list = input().split()
print(list)
for i in range(len(list)):
list[i] = int(list[i])
print(list)
print()
########## 3rd method ###########
### one line for taking array input
arr = [int(x) for x in input().split()]
print(arr)
# -
# ## 4) add elements in a lists ( append , insert , extend )
# +
l = [1,2,3]
l.append(9)
print(l)
l.insert(1,243)
print(l)
l2 = [2,3,4]
l.extend(l2)
print(l)
# -
# ## 5) deleting elements ( pop , remove , del )
# +
print(l)
l.pop() ## without arguments its going to remove the last element
print(l)
l.pop(2) ## remove element at index 2
print(l)
l.remove(2) ## remove first occurance of that elements
print(l)
del l[0:2] ## delete this range from the list
print(l)
# -
# ## 6) concatenation of two lists
l = [1,2,3]
l = l + l
# l = l - l ## there is - operation possible
print(l)
l*=3
print(l)
# ## 7) some useful inbuild functions (sort , count , index , in , reverse , max , min , len)
# +
#sorting
l = [2,5,1,3,64,13,0,1]
l.sort()
print(l)
print(len(l))
# count , index , reverse
print(l.count(1)) ## count element 1 in the array
print(l.index(64)) ## find index of element
l.reverse() ## reverse the array
print(l)
if 64 in l: ## boolean function is present or not
print("found")
else:
print("not_found")
print(max(l)) ## find the maximum element in the list
print(min(l)) ## find the minimum element in the list
# -
# ## 8) bubble sort
# +
## bubble sort
l = [int(x) for x in input().split()]
print(l)
n = len(l)
for j in range(n-1):
for i in range(0,n-1-j):
if(l[i] > l[i+1]):
l[i],l[i+1] = l[i+1],l[i]
print(l)
# -
# # # Dictionaries
# ### === > it is same as map in c++ , here we can store different types of keys and values but in c++ we have to create a different map for different type of keys and values . ex : map<pair<int,int> , int> mp ....... this will store pair as a key always and integer as value but in python we can store anything. keys are immutable therefore keys can be string , int ,float but it not be a list
# +
d = {}
d[23] = 34
d[3 , 4] = 32
d["str"] = 31
print(d[23])
print(type(d[23]))
d = {23 : 34 , "str" : 31}
print(d)
### fast iteration on dictionaries
print("\n" , "traversing on map")
for i in d: ### in C++ i is pair of key and value but here it is key
print(i , ":" , d[i])
print("over")
### delete elements
del d[23]
print(d)
# +
# common functions in dictionaries
d1 = {}
d1[1] = 1
d2 = {}
d2[2] = 3
print(d1 == d2)
print(len(d1))
d1.clear()
print(d1)
print(d2.keys()) ## behave like a list
print(d2.values()) ## behave like a list
print(23 in d2) ## is this key is present or not
| .ipynb_checkpoints/3 . Lists and Dictionaries ..... [abhishek soni]-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python Django
# language: python
# name: django
# ---
# +
import time
import requests
# -
PAGE_URL_LIST = [
'http://example.com/1.page',
'http://example.com/2.page',
'http://example.com/3.page',
]
for page_url in PAGE_URL_LIST:
res = requests.get(page_url, timeout=30)
print(
"페이지 URL:{}, HTTP 상태: {}, 처리 시간(초): {}".format(
page_url,
res.status_code,
res.elapsed.total_seconds()
)
)
time.sleep(1)
| Web_Crawling/python-crawler/chapter_5/example3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:amlsamples_env]
# language: python
# name: conda-env-amlsamples_env-py
# ---
# # Distributed training using torch.distributed.launch module on Azure Machine Learning
#
#
# This example show how to train language model using the huggingface library distributed on Azure Machine Learning using pytorch estimator.
# +
# %load_ext autoreload
# %autoreload 2
import wget
import os
from azureml.core import (Workspace, Experiment,
VERSION, Datastore)
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.environment import Environment,CondaDependencies
from azureml.train.dnn import PyTorch,Nccl
from azureml.data.data_reference import DataReference
from azureml.core.compute_target import ComputeTargetException
from azureml.widgets import RunDetails
SUBSCRIPTION_ID = ""
RESOURCE_GROUP = ""
WORKSPACE_NAME = ""
EXP_NAME = 'Azureml-LM_huggingface_example'
CLUSTER_NAME = "hf-cluster"
RUN_DIR = os.getcwd()
DATA_DIR = 'data'
print("SDK version:", VERSION)
# +
ws = Workspace(subscription_id = SUBSCRIPTION_ID,
resource_group =RESOURCE_GROUP ,
workspace_name = WORKSPACE_NAME
)
exp = Experiment(workspace=ws, name=EXP_NAME)
# -
os.makedirs(DATA_DIR, exist_ok=True)
wget.download("https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip",
out=DATA_DIR
)
datastore = ws.get_default_datastore()
ds_reference = datastore.upload(src_dir='data',
target_path='wikitext',
overwrite=True,
show_progress=True)
# +
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
found = False
cts = ws.compute_targets
if CLUSTER_NAME in cts and cts[CLUSTER_NAME].type == 'AmlCompute':
found = True
print('Found existing compute target.')
compute_target = cts[CLUSTER_NAME]
if not found:
print('Creating a new compute target...')
provisioning_config = AmlCompute.provisioning_configuration(vm_size = 'Standard_NC12',max_nodes = 8)
# Create the cluster.\n",
compute_target = ComputeTarget.create(ws, CLUSTER_NAME, provisioning_config)
print('Checking cluster status...')
compute_target.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)
# +
# %%writefile $RUN_DIR/train.py
import os
import shutil
import argparse
import subprocess
from git.repo.base import Repo
from zipfile import ZipFile
WORK_DIR = 'examples'
SRC_DIR = '/transformers'
OUTPUT_DIR = os.path.join(os.getcwd(),'outputs')
DATA_DIR = os.path.join(os.getcwd(),'wikitext-2-raw')
REPO_URL="https://github.com/datashinobi/transformers.git"
BRANCH='yassine/aml_distributed'
LOCAL_RANK = '0'
parser = argparse.ArgumentParser()
parser.add_argument('--dataset-path', dest='ds_path')
parser.add_argument('--rank', type=str,help='rank within nodes')
parser.add_argument('--node_count', type=str,help='number of nodes')
parser.add_argument('--process_per_node', type=str,help='number of process per node')
parser.add_argument('--batch_size', type=str,help='training & eval batch size')
args = parser.parse_args()
#============Clone forked repo==========
if os.path.exists(SRC_DIR):
print("huggingface repo exists, skip cloning")
else:
print('clone huggingface repo..........')
Repo.clone_from(REPO_URL,SRC_DIR, branch=BRANCH)
#===============Unzip dataset=============
data_file = os.path.join(args.ds_path,"wikitext-2-raw-v1.zip")
with ZipFile(data_file,"r") as zip_file:
zip_file.extractall(os.getcwd())
print(os.listdir(DATA_DIR))
#===========start training=================
master_node_params = os.environ['AZ_BATCH_MASTER_NODE'].split(':')
print("MASTER node", master_node_params)
master_ip = master_node_params[0]
master_port = master_node_params[1]
process = subprocess.Popen(['python', '-m', 'torch.distributed.launch',\
'--nnodes',args.node_count,\
'--nproc_per_node',args.process_per_node,\
'--node_rank', args.rank,\
'--master_addr',master_ip,\
'--master_port',master_port,\
os.path.join(SRC_DIR, WORK_DIR, 'run_language_modeling.py'),\
'--output_dir', OUTPUT_DIR,\
'--model_type', 'roberta', \
'--model_name_or_path', 'roberta-base', \
'--do_train', \
'--train_data_file', os.path.join(DATA_DIR, 'wiki.train.raw'),\
'--do_eval', \
'--eval_data_file', os.path.join(DATA_DIR, 'wiki.test.raw'),\
'--mlm',\
'--local_rank', LOCAL_RANK,\
'--per_gpu_train_batch_size', args.batch_size,\
'--per_gpu_eval_batch_size', args.batch_size
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
)
lines_iterator = iter(process.stdout.readline, b"")
while process.poll() is None:
for line in lines_iterator:
print(line, end = "\r\n",flush =True)
# +
node_count = 8
process_per_node = 1
script_params = {
'--dataset-path':ds_reference.as_mount(),
'--rank':'$AZ_BATCHAI_TASK_INDEX',
'--node_count':node_count,
'--process_per_node':process_per_node,
'--batch_size':'4'
}
from azureml.train.estimator import Estimator
est = PyTorch(source_directory=RUN_DIR,
pip_packages=['gitpython','scikit-learn','seqeval','tensorboardX',\
'tqdm','transformers'],
script_params=script_params,
use_gpu=True,
compute_target=compute_target,
entry_script=os.path.join(RUN_DIR,'train.py'),
framework_version='1.4',
node_count=node_count,
distributed_training=Nccl()
)
# -
run = exp.submit(est)
RunDetails(run).show()
run
| examples/language_model_distributed/LM_distributed_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Restricted Boltzmann Machine features for digit classification
#
#
# For greyscale image data where pixel values can be interpreted as degrees of
# blackness on a white background, like handwritten digit recognition, the
# Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
# <sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
# feature extraction.
#
# In order to learn good latent representations from a small dataset, we
# artificially generate more labeled data by perturbing the training data with
# linear shifts of 1 pixel in each direction.
#
# This example shows how to build a classification pipeline with a BernoulliRBM
# feature extractor and a :class:`LogisticRegression
# <sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
# of the entire model (learning rate, hidden layer size, regularization)
# were optimized by grid search, but the search is not reproduced here because
# of runtime constraints.
#
# Logistic regression on raw pixel values is presented for comparison. The
# example shows that the features extracted by the BernoulliRBM help improve the
# classification accuracy.
#
#
# +
from __future__ import print_function
print(__doc__)
# Authors: <NAME>, <NAME>, <NAME>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
# #############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
# #############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
# #############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
# #############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| lab12/neural_network/plot_rbm_logistic_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py37
# language: python
# name: py37
# ---
import sys
sys.path.append('../')
from torchdyn.models import *; from torchdyn.datasets import *
# ### Generating data
# Mass-Spring-Damper system:
class ControlledSystem(nn.Module):
"""Stable Neural Flow"""
def __init__(self, p, Phi, Psi, m, k, b, qr, c, g):
super().__init__()
self.p = p
self.Phi, self.Psi = Phi, Psi
self.c = 1. # control yes/no [1./0.]
def _energy_shaping(self, q):
Phi = self.Phi(q[:,None]).sum()
grad_Phi = torch.autograd.grad(Phi, q, create_graph=True)[0]
return -grad_Phi[:,None]
def _damping_injection(self, x):
m = self.p[0]
return -self.Psi(x).abs()*x[:,1][:,None]/m
def _autonomous_energy(self, x):
k = self.p[1]
return (m*x[:,1][:,None]**2)/2. + (k*(x[:,0][:,None] - qr)**2)/2.
def _energy(self, x):
m, k = self.p[:2]
return (m*x[:,1][:,None]**2)/2. + (k*(x[:,0][:,None] - qr)**2)/2. + self.Phi(x[:,0][:,None])
def forward(self, x):
m, k, b = self.p[:3]
with torch.set_grad_enabled(True):
q, p = x[:,0], x[:,1]
q = q.requires_grad_(True)
# compute control action
self.u = self._energy_shaping(q) + self._damping_injection(x)
# compute dynamics
dqdt = p[:,None]/m
dpdt = -k*(q[:,None] - qr) -b*p[:,None]/m + self.c*self.u#.clamp(-1,1)
return torch.cat([dqdt, dpdt], 1)
fig = plt.figure(figsize=(10, 8))
for i, m in enumerate([0.6, 1, 1.4]):
# vector field parametrized by a NN
Phi = nn.Sequential(
nn.Linear(1, 64),
nn.Tanh(),
nn.Linear(64, 1))
Psi = nn.Sequential(
nn.Linear(2, 64),
nn.Tanh(),
nn.Linear(64, 1))
f = ControlledSystem([m, 1, 1], Phi, Psi)
# neural ODE
model = NeuralDE(f,
order=1,
solver='dopri5',
sensitivity='adjoint',
s_span=torch.linspace(0,5,10)).to(device)
seq = nn.Sequential(model)
t_span = torch.linspace(0,5,100)
x0 = torch.tensor([[1, 0]]).to(device)
x0 = x0 + torch.randn(100, 2).to(device)
model.defunc.m.c = 0.
uncontrolled_trajectory = model.trajectory(x0, t_span).detach().cpu()
ax0 = fig.add_subplot(3,2,2*i+1)
ax1 = fig.add_subplot(3,2,2*i+2)
for i in range(len(x0)):
ax0.plot(t_span, uncontrolled_trajectory[:,i,:], color='b', alpha=.1)
for i in range(len(x0)):
ax1.plot(uncontrolled_trajectory[:,i,0], uncontrolled_trajectory[:,i,1], color='b', alpha=.1)
# ## Dataset
data = [] ; from tqdm import tqdm_notebook as tqdm
for i, m in enumerate([0.6, 1, 1.4]):
for _ in tqdm(range(1)):
# vector field parametrized by a NN
Phi = nn.Sequential(
nn.Linear(1, 64),
nn.Tanh(),
nn.Linear(64, 1))
Psi = nn.Sequential(
nn.Linear(2, 64),
nn.Tanh(),
nn.Linear(64, 1))
f = ControlledSystem([m, 1, 1], Phi, Psi)
# neural ODE
model = NeuralDE(f,
order=1,
solver='dopri5',
sensitivity='adjoint',
s_span=torch.linspace(0,5,10)).to(device)
seq = nn.Sequential(model)
t_span = torch.linspace(0,5,50)
x0 = torch.tensor([[1, 0]]).to(device)
x0 = x0 + torch.randn(2000, 2).to(device)
model.defunc.m.c = 0.
uncontrolled_trajectory = model.trajectory(x0, t_span).detach().cpu()
data += [uncontrolled_trajectory]
data = torch.cat(data, 1)
# +
fig = plt.figure(figsize=(10, 8))
ax0 = fig.add_subplot(1,2,1)
ax1 = fig.add_subplot(1,2,2)
for i in range(0, 30840, 200):
ax0.plot(t_span, data[:,i,:], color='b', alpha=.1)
for i in range(0, 30840, 200):
ax1.plot(data[:,i,0], data[:,i,1], color='b', alpha=.1)
# -
# ## Corrupt the data
# +
data_under = nn.Dropout(p=0.1)(data)
# add noise
idx_to_zero_0 = data_under[:, :, 0]==0
idx_to_zero_1 = data_under[:, :, 1]==0
data_under += 0.03*torch.rand_like(data_under)
# make sure entire obs is removed
data_under[idx_to_zero_0] = 0
data_under[idx_to_zero_1] = 0
# +
fig = plt.figure(figsize=(10, 8))
ax1 = fig.add_subplot(1,1,1)
#for i in range(0, 30840, 200):
# ax0.scatter(t_span, data_under[:,i,:], color='b', alpha=.1)
for i in range(0, 30840, 3000):
ax1.scatter(data_under[:,i,0], data_under[:,i,1], s=2, color='b', alpha=.5)
# -
torch.save(data, 'data/true_data_06_1_14')
# !cwd
| test/benchmark/hybrid_dev/generate_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:.conda-exatrkx]
# language: python
# name: conda-env-.conda-exatrkx-py
# ---
import sys
import os
sys.path.append('/global/u2/c/caditi97/exatrkx-ctd2020')
# %matplotlib inline
import warnings
warnings.filterwarnings("ignore")
from utils_robust import *
# for no ptcut = "/global/cfs/projectdirs/m3443/usr/aoka/data/artifacts/Training_Example_no_ptcut"
# xiangyang's model = "/global/cfs/projectdirs/m3443/usr/dtmurnane/artifacts/adjacent/"
# misaligned data = "/global/cfs/projectdirs/m3443/data/trackml-kaggle/misaligned"
# noise path = f"/global/cfs/cdirs/m3443/usr/aoka/data/classify/Classify_Example_{noise_keep}/preprocess_raw"
artifact_storage_path = "/global/cfs/projectdirs/m3443/usr/dtmurnane/artifacts/adjacent/"
best_emb_path = os.path.join(artifact_storage_path, 'metric_learning_emb', 'best_model.pkl')
best_filter_path = os.path.join(artifact_storage_path, 'metric_learning_filter', 'best_model.pkl')
event_path = "/global/cfs/projectdirs/m3443/usr/caditi97/exatrkx2020/robust_test/output/misaligned/"
mcm = ["20","100","400","600","800","1000"]
feature_names = ['x', 'y', 'z', 'cell_count', 'cell_val', 'leta', 'lphi', 'lx', 'ly', 'lz', 'geta', 'gphi']
f_eff = []
f_purity = []
e_eff = []
e_purity = []
for mc in mcm:
print("Volume 8 layer 6 Misalignment = " +mc+ " micrometers")
event_name = "event000009900_8_" +mc+ ".pickle"
hits, truth, emb_model, filter_model = get_data(best_emb_path, best_filter_path, event_name, event_path)
neighbors = get_emb_neighbors(hits[feature_names].values, emb_model, 0.4)
emb_purity, emb_efficiency = get_emb_eff_purity(hits, truth, neighbors, only_adjacent=True)
idx_pairs, filter_pairs = use_filter(hits, neighbors)
filter_purity, filter_efficiency = get_filter_eff_purity(hits, truth, idx_pairs, filter_pairs)
f_eff.append(filter_efficiency)
f_purity.append(filter_purity)
e_eff.append(emb_efficiency)
e_purity.append(emb_purity)
# +
def stat_mean(d):
n = []
for i in d:
n.append(statistics.mean(i))
return n
def stat_round(d):
n = []
for i in d:
n.append(round(i,3))
return n
f_e = stat_round(f_eff)
f_p = stat_round(f_purity)
e_e = stat_round(stat_mean(e_eff))
e_p = stat_round(stat_mean(e_purity))
# -
e_e
# +
f, a = plt.subplots(2,2, figsize=(13,13))
x = [int(m) for m in mcm]
a[0][0].plot(x,e_p)
a[0][0].set_title("Embedding Purity")
a[0][0].set_xlabel("Misalignment")
a[0][0].set_xticks([20,100,400,600,800,1000])
a[0][0].set_ylabel("Purity")
a[0][1].plot(x,e_e)
a[0][1].set_title("Embedding Efficiency")
a[0][1].set_xticks([20,100,400,600,800,1000])
a[0][1].set_xlabel("Misalignment")
a[0][1].set_ylabel("Efficiency")
a[1][0].plot(x,f_p)
a[1][0].set_title("Filtering Purity")
a[1][0].set_xticks([20,100,400,600,800,1000])
a[1][0].set_xlabel("Misalignment")
a[1][0].set_ylabel("Purity")
a[1][1].plot(x,f_e)
a[1][1].set_title("Filtering Efficiency")
a[1][1].set_xticks([20,100,400,600,800,1000])
a[1][1].set_xlabel("Misalignment")
a[1][1].set_ylabel("Efficiency")
plt.show()
# -
ev_n = "event000009900.pickle"
hits_o, truth_o, emb_model, filter_model = get_data(best_emb_path, best_filter_path, ev_n, event_path)
nbrs = get_emb_neighbors(hits_o[feature_names].values, emb_model, 0.4)
emb_p_o, emb_e_o = get_emb_eff_purity(hits_o, truth_o, nbrs, only_adjacent=True)
idx_pairs_o, filter_pairs_o = use_filter(hits_o, nbrs)
f_p_o, f_e_o = get_filter_eff_purity(hits_o, truth_o, idx_pairs_o, filter_pairs_o)
emb_eff_o = statistics.mean(emb_e_o)
emb_eff_o
e0 = np.zeros(len(e_e)) + emb_eff_o
obs_eff_loss = e0 - stat_mean(e_eff)
obs_eff_loss
| notebooks/misaligned.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import ZebraLib as zb
import numpy as np
import matplotlib.pyplot as plt
#Avião para ser analisado
Zb = zb.Airplane(name='Versão 3.0', b=1.86, S=0.843, Af=0.601, c=0.463, CLmax= 1.5193,
Load=5.5, mv=2.8, CLn=1.080, Swet= 10, Nmax=2.1 , Tc=(-0.001, -0.225, 35.225))
# Gerando os dados
V = np.linspace(0.01, 40, 1000)
# Altitude-densidade
alt = [0, 1212]
dens_0 = zb.alt2rho(alt[0])
dens_1 = zb.alt2rho(alt[1])
Np_0, phip_0 = Zb.turn_Nprop(V, dens_0) # Cálculo dos fatores de carga
Ns_0, phis_0 = Zb.turn_Nsust(V, dens_0)
# Plotando os gráficos
fig, ax = plt.subplots(figsize=(10, 10),nrows=2, ncols=1)
#SUBPLOT 1
#Fator de carga durante as curvas
ax[0].set(title=f'Curvas para alt-dens relativas',
xlabel='Velocidade [m/s]', ylabel='Fator de carga máximo [g]')
ax[1].set(ylabel=r'Ângulo de inclinação $\phi$ [graus]', xlabel='Velocidade [m/s]')
ax[0].plot(V, Np_0, color='b', label=f'limite propulsivo {alt[0]:.2f}m')
ax[0].plot(V, Ns_0, color='red', label=f'limite de estol {alt[0]:.2f}m')
ax[1].plot(V, phip_0, color='b', label=f'limite propulsivo {alt[0]:.2f}m')
ax[1].plot(V, phis_0, color='red', label=f'limite de estol {alt[0]:.2f}m')
#ax[0].axhline(y=Zb.Nmax, color='green', label=f'Limite estrutural de ${Zb.Nmax}g$')
Np_1, phip_1 = Zb.turn_Nprop(V, dens_1) # Cálculo dos fatores de carga
Ns_1, phis_1 = Zb.turn_Nsust(V, dens_1)
ax[0].plot(V, Np_1,'--', color='b', label=f'limite propulsivo {alt[1]:.2f}m')
ax[0].plot(V, Ns_1,'--', color='red', label=f'limite de estol {alt[1]:.2f}m')
ax[1].plot(V, phip_1,'--', color='b', label=f'limite propulsivo {alt[1]:.2f}m')
ax[1].plot(V, phis_1,'--', color='red', label=f'limite de estol {alt[1]:.2f}m')
#ax[0].axhline(y=Zb.Nmax, color='green', label=f'Limite estrutural de ${Zb.Nmax}g$')
ax[0].set_xlim(Zb.veloc_Req(Zb.CLmax, zb.alt2rho(1212)), 22)
ax[0].set_ylim(0, 4)
ax[0].grid(linestyle='dotted')
ax[0].legend()
ax[1].set_ylim(0, 62)
ax[1].set_xlim(Zb.veloc_Req(Zb.CLmax, zb.alt2rho(1212)), 22)
ax[1].grid(linestyle='dotted')
ax[1].legend()
plt.show()
fig, ax = plt.subplots(figsize=(10, 10),nrows=2, ncols=1)
#Raio de curva
Rp_0 = Zb.turn_Radius(V, Np_0) #Limite propulsivo
Rs_0 = Zb.turn_Radius(V, Ns_0) #Limite pelo estol da asa (CL)
Rp_1 = Zb.turn_Radius(V, Np_1) #Limite propulsivo
Rs_1 = Zb.turn_Radius(V, Ns_1) #Limite pelo estol da asa (CL)
Rstr = Zb.turn_Radius(V, Zb.Nmax) #Limite estrutural de Nmáx
#Razão de curva
Wp_0 = Zb.turn_Rate(V, Np_0) #Limite propulsivo
Ws_0 = Zb.turn_Rate(V, Ns_0) #Limite pelo estol da asa (CL)
Wp_1 = Zb.turn_Rate(V, Np_1) #Limite propulsivo
Ws_1 = Zb.turn_Rate(V, Ns_1) #Limite pelo estol da asa (CL)
Wstr = Zb.turn_Rate(V, Zb.Nmax) #Limite estrutural de Nmáx
#SUBPLOT 1
#Fator de carga durante as curvas
ax[0].set(title=f'Curvas para alt-dens relativas',
xlabel='Velocidade [m/s]', ylabel='Fator de carga máximo [g]')
ax[1].set(ylabel=r'Ângulo de inclinação $\phi$ [graus]', xlabel='Velocidade [m/s]')
ax[0].plot(V, Rp_0, color='b', label=f'limite propulsivo {alt[0]:.2f}m')
ax[0].plot(V, Rs_0, color='red', label=f'limite de estol {alt[0]:.2f}m')
ax[1].plot(V, Rp_0, color='b', label=f'limite propulsivo {alt[0]:.2f}m')
ax[1].plot(V, Rs_0, color='red', label=f'limite de estol {alt[0]:.2f}m')
#ax[0].axhline(y=Zb.Nmax, color='green', label=f'Limite estrutural de ${Zb.Nmax}g$')
ax[0].plot(V, Wp_0,'--', color='b', label=f'limite propulsivo {alt[1]:.2f}m')
ax[0].plot(V, Ws_0,'--', color='red', label=f'limite de estol {alt[1]:.2f}m')
ax[1].plot(V, Wp_1,'--', color='b', label=f'limite propulsivo {alt[1]:.2f}m')
ax[1].plot(V, Ws_1,'--', color='red', label=f'limite de estol {alt[1]:.2f}m')
#ax[0].axhline(y=Zb.Nmax, color='green', label=f'Limite estrutural de ${Zb.Nmax}g$')
ax[0].set_xlim(Zb.veloc_Req(Zb.CLmax, zb.alt2rho(1212)), 22)
ax[0].set_ylim(0, 4)
ax[0].grid(linestyle='dotted')
ax[0].legend()
ax[1].set_ylim(0, 62)
ax[1].set_xlim(Zb.veloc_Req(Zb.CLmax, zb.alt2rho(1212)), 22)
ax[1].grid(linestyle='dotted')
ax[1].legend()
plt.show()
# +
import ZebraLib as zb
import numpy as np
import matplotlib.pyplot as plt
#Avião para ser analisado
Zb = zb.Airplane(name='Versão 3.0', b=1.86, S=0.843, Af=0.601, c=0.463, CLmax= 1.5193,
Load=5.5, mv=2.8, CLn=1.080, Swet= 10, Nmax=2.1 , Tc=(-0.001, -0.225, 35.225))
# Gerando os dados
V = np.linspace(0.01, 40, 1000)
# Altitude-densidade
altitude = [0, 1212]
# Plotando os gráficos
fig, ax = plt.subplots(figsize=(10, 10),nrows=2, ncols=1)
#SUBPLOT 1
#Fator de carga durante as curvas
ax[0].set(title=f'Curvas para alt-dens relativas',
xlabel='Velocidade [m/s]', ylabel='Fator de carga máximo [g]')
ax[1].set(ylabel=r'Ângulo de inclinação $\phi$ [graus]', xlabel='Velocidade [m/s]')
for alt in altitude:
Np, phip = Zb.turn_Nprop(V, zb.alt2rho(alt)) # Cálculo dos fatores de carga
ax[0].plot(V, Np, color='b', label=f'limite propulsivo p/ {alt:.2f}m')
ax[1].plot(V, phip, color='red', label=f'limite de estol p/ {alt:.2f}m')
#ax[0].axhline(y=Zb.Nmax, color='green', label=f'Limite estrutural de ${Zb.Nmax}g$')
ax[0].set_xlim(Zb.veloc_Req(Zb.CLmax, zb.alt2rho(1212)), 22)
ax[0].set_ylim(0, 4)
ax[0].grid(linestyle='dotted')
ax[0].legend()
ax[1].set_ylim(0, 62)
ax[1].set_xlim(Zb.veloc_Req(Zb.CLmax, zb.alt2rho(1212)), 22)
ax[1].grid(linestyle='dotted')
ax[1].legend()
plt.show()
# +
#Raio de curva
Rp = Zb.turn_Radius(V, Np) #Limite propulsivo
Rs = Zb.turn_Radius(V, Ns) #Limite pelo estol da asa (CL)
Rstr = Zb.turn_Radius(V, Zb.Nmax) #Limite estrutural de Nmáx
#Razão de curva
Wp = Zb.turn_Rate(V, Np) #Limite propulsivo
Ws = Zb.turn_Rate(V, Ns) #Limite pelo estol da asa (CL)
Wstr = Zb.turn_Rate(V, Zb.Nmax) #Limite estrutural de Nmáx
fig, ax2 = plt.subplots(figsize=(10, 10),nrows=2, ncols=1)
#SUBPLOT 1
#Raio de curva
ax[0].set(title=f'Raio de curva mínimo para alt-dens {round(zb.alt2rho(rho=dens))}m',
ylabel='Raio de curva [m]', xlabel='Velocidade de voo [m/s]')
ax[0].plot(V, Rp, linestyle='dashdot', color='royalblue', label='Limite propulsivo')
ax[0].plot(V, Rs, '--', color='red', label='Limite de estol')
ax[0].plot(V, Rstr, color='green', label=f'Limite estrutural de ${Zb.Nmax}g$')
ax[0].set_ylim(0, 100)
ax[0].set_xlim(Zb.veloc_Req(Zb.CLmax, zb.alt2rho(1212))-1, 25)
ax[0].grid(linestyle='dotted')
ax[0].legend()
#SUBPLOT 2
#Razão de curva
ax[1].set(title=r'Razão de curva ($d \psi /dt$)',ylabel='Razão de curva [graus/s]'
, xlabel='Velocidade de voo [m/s]')
ax[1].plot(V, Wp, linestyle='dashdot', color='royalblue', label='Limite propulsivo')
ax[1].plot(V, Ws, '--', color='red', label='Limite de estol')
ax[1].plot(V, Wstr, color='green', label=f'Limite estrutural de ${Zb.Nmax}g$')
ax[1].set_ylim(0, 100)
ax[1].set_xlim(Zb.veloc_Req(Zb.CLmax, zb.alt2rho(1212))-1, 25)
ax[1].grid(linestyle='dotted')
ax[1].legend()
| Projeto Zebra/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Inner and Outer Joins
# ### Connect to the database
#
# Let us first load the SQL extension and establish a connection with the database
#
# +
import sqlalchemy
import ibm_db_sa
import pandas as pd
# %load_ext sql
# +
connection_string = "ibm_db_sa://nhr87395:n6k6mlj7x1f%408vs2@dashdb-txn-sbox-yp-lon02-13.services.eu-gb.bluemix.net:50000/BLUDB"
# %sql $connection_string
# -
# ### HR Database
# We will be working on a sample HR database. This HR database schema consists of 5 tables called EMPLOYEES, JOB_HISTORY, JOBS, DEPARTMENTS and LOCATIONS.
#
# The following diagram shows the tables for the HR database with a few rows of sample data.
#
# 
# ### Query 1A: Select the names and job start dates of all employees who work for the department number 5.
# + magic_args="select E.F_NAME, E.L_NAME, J.START_DATE" language="sql"
# from (select * from EMPLOYEES where DEP_ID=5) E inner join JOB_HISTORY J
# on E.EMP_ID = J.EMPL_ID
#
# -
# ### Query 1B: Select the names, job start dates, and job titles of all employees who work for the department number 5.
#
# + magic_args="select E.F_NAME, E.L_NAME, J.START_DATE, T.JOB_TITLE" language="sql"
# from (select * from EMPLOYEES where DEP_ID=5) E
# inner join JOB_HISTORY J on E.EMP_ID = J.EMPL_ID
# inner join JOBS T on E.JOB_ID = T.JOB_IDENT
#
# -
# ### Query 2A: Perform a Left Outer Join on the EMPLOYEES and DEPARTMENTS tables and select employee id, last name, department id and department name for all employees.
#
# + magic_args="select E.EMP_ID, E.L_NAME, E.DEP_ID, D.DEP_NAME" language="sql"
# from EMPLOYEES E left join DEPARTMENTS D
# on E.DEP_ID = D.DEPT_ID_DEP
#
# -
# ### Query 2B: Re-write the query for 2A to limit the result set to include only the rows for employees born before 1980.
#
# + magic_args="select E.EMP_ID, E.L_NAME, E.DEP_ID, D.DEP_NAME" language="sql"
# from EMPLOYEES E left join DEPARTMENTS D on E.DEP_ID = D.DEPT_ID_DEP
# where year(E.B_DATE) < 1980
#
# -
# ### Query 2C: Re-write the query for 2A to have the result set include all the employees but department names for only the employees who were born before 1980.
#
# + magic_args="select E.EMP_ID, E.L_NAME, E.DEP_ID, D.DEP_NAME" language="sql"
# from EMPLOYEES E left join DEPARTMENTS D
# on E.DEP_ID = D.DEPT_ID_DEP and year(E.B_DATE) < 1980
#
# -
# ### Query 3A: Perform a Full Join on the EMPLOYEES and DEPARTMENT tables and select the First name, Last name and Department name of all employees.
#
# + magic_args="select E.F_NAME, E.L_NAME, D.DEP_NAME" language="sql"
# from EMPLOYEES E full join DEPARTMENTS D
# on E.DEP_ID = D.DEPT_ID_DEP
#
# -
# ### Query 3B: Re-write Query 3A to have the result set include all employee names but department id and department names only for male employees.
#
# + magic_args="select E.F_NAME, E.L_NAME, D.DEPT_ID_DEP, D.DEP_NAME" language="sql"
# from EMPLOYEES E left join DEPARTMENTS D
# on E.DEP_ID = D.DEPT_ID_DEP and E.SEX = 'M'
# -
| src/Week 3 - Lab 4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import os
sys.path.insert(0, 'jindal/NER-Bi-LSTM-CNN')
import numpy as np
# from validation import compute_f1
from keras.models import Model
from keras.layers import TimeDistributed,Conv1D,Dense,Embedding,Input,Dropout,LSTM,Bidirectional,MaxPooling1D,Flatten,concatenate
from prepro import readfile,createBatches,createMatrices,iterate_minibatches,addCharInformatioin,padding
from keras.utils import plot_model,Progbar
from keras.preprocessing.sequence import pad_sequences
from keras.initializers import RandomUniform
import sklearn.metrics
epochs = 50
# -
def tag_dataset(dataset):
correctLabels = []
predLabels = []
b = Progbar(len(dataset))
for i,data in enumerate(dataset):
tokens, casing,char, labels = data
tokens = np.asarray([tokens])
casing = np.asarray([casing])
char = np.asarray([char])
try:
pred = model.predict([tokens, casing,char], verbose=False)[0]
pred = pred.argmax(axis=-1) #Predict the classes
correctLabels.append(labels)
predLabels.append(pred)
except Exception as e:
continue
b.update(i)
return predLabels, correctLabels
# +
def get_sentences(path):
with open(path,'rb') as f:
sentences=[]
sentence=[]
for line in f:
splits = [x.decode() for x in line.split()]
try:
word = splits[0]
label = splits[1]
temp = [word,label]
sentence.append(temp)
# sentence.append(label)
except Exception as e:
sentences.append(sentence)
sentence=[]
return sentences
# -
# adding BIO, no change in labels.
def modify_labels(dataset):
for sentence in dataset:
for pos, word in enumerate(sentence):
label = word[1]
if label!='0':
try:
if sentence[pos-1][1]!='0' and sentence[pos-1][1][0]=='B' and sentence[pos-1][1][2:] == label:
new_label = 'I-'+label
word[1] = new_label
else:
new_label = 'B-'+label
word[1] = new_label
except Exception as e:
new_label = 'B-' +label
word[1] = new_label
return dataset
# +
temp = get_sentences('/srv/Resources/SWE/train_corpus.tsv')
test_sentences = get_sentences('/srv/Resources/SWE/test_corpus.tsv')
train_sentences = temp[:6000]
dev_sentences = temp[6000:]
print(len(train_sentences))
print(len(dev_sentences))
print(len(test_sentences))
# -
print(train_sentences[0])
train_sentences = modify_labels(train_sentences)
test_sentences = modify_labels(test_sentences)
dev_sentences = modify_labels(dev_sentences)
trainSentences = addCharInformatioin(train_sentences)
devSentences = addCharInformatioin(dev_sentences)
testSentences = addCharInformatioin(test_sentences)
print(trainSentences[0])
# +
labelSet = set()
words = {}
for dataset in [trainSentences, devSentences, testSentences]:
for sentence in dataset:
for token,char,label in sentence:
labelSet.add(label)
words[token] = True
# -
print(labelSet)
# :: Create a mapping for the labels ::
label2Idx = {}
for label in labelSet:
label2Idx[label] = len(label2Idx)
print(label2Idx)
# :: Hard coded case lookup ::
case2Idx = {'numeric': 0, 'allLower':1, 'allUpper':2, 'initialUpper':3, 'other':4, 'mainly_numeric':5, 'contains_digit': 6, 'PADDING_TOKEN':7}
caseEmbeddings = np.identity(len(case2Idx), dtype='float32')
string_words = ' '.join(words.keys())
print(string_words)
print(caseEmbeddings)
print(case2Idx)
# +
word2Idx={}
wordEmbeddings=[]
# created a file by the name of german_words.txt in /fastText. Containing all the words in our dataset
with open('swedish_word_embeddings.txt','rb') as f:
for line in f:
splits = line.split()
word = splits[0].decode()
# print(word.decode())
if len(word2Idx) == 0: #Add padding+unknown
word2Idx["PADDING_TOKEN"] = len(word2Idx)
vector = np.zeros(len(splits)-1) #Zero vector vor 'PADDING' word
wordEmbeddings.append(vector)
word2Idx["UNKNOWN_TOKEN"] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, len(splits)-1)
wordEmbeddings.append(vector)
word2Idx[word]=len(word2Idx)
embedding = np.array([float(num) for num in splits[1:]])
wordEmbeddings.append(embedding)
wordEmbeddings=np.array(wordEmbeddings)
# -
# print(wordEmbeddings[2])
print(len(wordEmbeddings[0]))
print(word2Idx)
characters=set()
for word in words.keys():
for char in word:
characters.add(char)
# print(characters)
characters = set(characters)
char2Idx={}
for char in characters:
char2Idx[char] = len(char2Idx)
print(char2Idx)
print(trainSentences[0])
# createMatrices: for every sentence, changes its words, cases,characters, labels to its corresponding id in their embeddings
# padding is used to pad the character indices to a fixed size=52
train_set = padding(createMatrices(trainSentences,word2Idx, label2Idx, case2Idx,char2Idx))
dev_set = padding(createMatrices(devSentences,word2Idx, label2Idx, case2Idx,char2Idx))
test_set = padding(createMatrices(testSentences, word2Idx, label2Idx, case2Idx,char2Idx))
# +
# train-set[][0]: corresponds to the ids of the words in the sentence
# train_set[][1]: corresponds to the ids of the cases of the words
# train_set[][2]: contains numpy arrays, one corresponding to every word, each containing the indices of the characters of that word
# the numpy arrays have a fixed size (padding or truncation) to 52
# train_set[][3]: corresponds to the ids of the labels of every word
print(train_set[0])
print(len(train_set[0][0])) # gives the number of words in the sentence
print(len(train_set[0][2]))
# +
idx2Label = {v: k for k, v in label2Idx.items()}
train_batch,train_batch_len = createBatches(train_set)
dev_batch,dev_batch_len = createBatches(dev_set)
test_batch,test_batch_len = createBatches(test_set)
# -
words_input = Input(shape=(None,),dtype='int32',name='words_input')
words = Embedding(input_dim=wordEmbeddings.shape[0], output_dim=wordEmbeddings.shape[1], weights=[wordEmbeddings], trainable=False)(words_input)
casing_input = Input(shape=(None,), dtype='int32', name='casing_input')
casing = Embedding(output_dim=caseEmbeddings.shape[1], input_dim=caseEmbeddings.shape[0], weights=[caseEmbeddings], trainable=False)(casing_input)
character_input=Input(shape=(None,52,),name='char_input')
embed_char_out=TimeDistributed(Embedding(len(char2Idx),30,embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5)), name='char_embedding')(character_input)
dropout= Dropout(0.5)(embed_char_out)
conv1d_out= TimeDistributed(Conv1D(kernel_size=3, filters=30, padding='same',activation='tanh', strides=1))(dropout)
maxpool_out=TimeDistributed(MaxPooling1D(52))(conv1d_out)
char = TimeDistributed(Flatten())(maxpool_out)
char = Dropout(0.5)(char)
output = concatenate([words, casing,char])
output = Bidirectional(LSTM(200, return_sequences=True, dropout=0.50, recurrent_dropout=0.25))(output)
output = TimeDistributed(Dense(len(label2Idx), activation='softmax'))(output)
model = Model(inputs=[words_input, casing_input,character_input], outputs=[output])
model.compile(loss='sparse_categorical_crossentropy', optimizer='nadam')
model.summary()
plot_model(model, to_file='model.png')
for epoch in range(epochs):
print("Epoch %d/%d"%(epoch,epochs))
a = Progbar(len(train_batch_len))
for i,batch in enumerate(iterate_minibatches(train_batch,train_batch_len)):
labels, tokens, casing,char = batch
model.train_on_batch([tokens, casing,char], labels)
a.update(i)
print(' ')
predLabels, correctLabels = tag_dataset(dev_batch)
predLabels = np.concatenate(predLabels).ravel()
correctLabels = np.concatenate(correctLabels).ravel()
print(idx2Label)
print(sklearn.metrics.f1_score(correctLabels,predLabels,average='macro' ))
print(sklearn.metrics.f1_score(correctLabels, predLabels, average='micro'))
print(sklearn.metrics.f1_score(correctLabels, predLabels, average='weighted'))
print(sklearn.metrics.f1_score(correctLabels, predLabels, average=None))
# +
# Performance on test dataset
predLabels, correctLabels = tag_dataset(test_batch)
predLabels = np.concatenate(predLabels).ravel()
correctLabels = np.concatenate(correctLabels).ravel()
print(sklearn.metrics.f1_score(correctLabels,predLabels,average='macro' ))
print(sklearn.metrics.f1_score(correctLabels, predLabels, average='micro'))
print(sklearn.metrics.f1_score(correctLabels, predLabels, average='weighted'))
print(sklearn.metrics.f1_score(correctLabels, predLabels, average=None))
# -
model.save('swedish_ner_with_char.h5')
print(len(word2Idx.keys()))
print(len(trainSentences)+len(testSentences)+len(devSentences))
| diff_languages/ner_swedish_with_char.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Assemblage
# The `Assemblage` represents all of the things that can potentially be discovered within an `Area`. These are most commonly going to be artifacts represented as points, but can theoretically be other shapes as well. The `Assemblage` must lie within the `Area`, so an `Area` object is a required parameter of the `Assemblage` creation methods.
#
# An `Assemblage` is made from a list of `Layer` objects, so most of the heavy lifting is done by creating each `Layer`. We will walk through `Layer` creation methods first, then we will put them together in an `Assemblage`.
# + [markdown] toc-hr-collapsed=false
# ## Creating a `Layer`
# -
# A `Layer` is intended to be a group of artifacts or other features that share `time_penalty` and `ideal_obs_rate` parameters. More practically, you can think of a `Layer` standing in for a type of artifact. For example, you might expect those parameters to be the same for any Iron Age ceramics, so you can put all of the Iron Age ceramics into the same `Layer`.
#
# Each element of the `Layer` (i.e., each individual artifact) is a `Feature` object. Most of the time it will make more sense to use `Layer` methods to create many `Feature` objects at the same time, but it is possible to create the `Feature` objects one-by-one and assembling them into a `Layer`.
# ### From a list of `Feature` objects
# To create a `Feature`, we need a `shapely` object, so let's create a few simple points.
# +
from shapely.geometry import Point
import prospect
from scipy.stats import beta
pt1 = Point(10, 10)
ft1 = prospect.Feature(
name="feature1",
layer_name="demo_layer",
shape=pt1,
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
pt2 = Point(50, 50)
ft2 = prospect.Feature(
name="feature2",
layer_name="demo_layer",
shape=pt2,
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
pt3 = Point(90, 90)
ft3 = prospect.Feature(
name="feature3",
layer_name="demo_layer",
shape=pt3,
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
# -
# ```{note}
# Notice that we kept the `time_penalty` and `ideal_obs_rate` parameters constant. It is not *required* that all members of a `Layer` have identical values for these parameters, but it is probably a good idea. If you need to use different values, it is probably best to use one `Layer` per unique set of parameters.
# ```
#
# Now let's put our `Feature` objects into a `Layer`. The `Layer` constructor will check and ensure that the `Feature` objects are located within the `Area` boundaries, so you must pass an `Area` when creating a `Layer`.
#
# ```{note}
# Currently this spatial rule is only enforced if all of the elements in `input_features` are `Point` objects. It is my hope to include `LineString` and `Polygon` `Feature` objects in this "clipping" operation in the future.
# ```
demo_area = prospect.Area.from_area_value(
name='demo_area',
value=10000
)
layer_from_list = prospect.Layer(
name='demo_layer',
area=demo_area,
assemblage_name='demo_assemblage',
input_features=[ft1, ft2, ft3]
)
type(layer_from_list)
layer_from_list.df
type(layer_from_list.df)
# We can use the plotting functionality from `geopandas` to visualize the `Layer` members within the `Area`.
layer_from_list.df.plot(ax=demo_area.df.plot(), color="orange")
# ### From a shapefile
# The `from_shapefile()` method is useful for reading in existing datasets as `Layer` objects. These could be data from a completed field survey or maybe data designed to test some custom question.
# +
area_from_shp = prospect.Area.from_shapefile(
name="area_shp",
path="./data/demo_area.shp"
)
layer_from_shp = prospect.Layer.from_shapefile(
path="./data/demo_layer.shp",
name="demo_layer_from_shp",
area=area_from_shp,
assemblage_name="demo_assemblage",
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
# -
layer_from_shp.df
# Let's plot the resulting `Layer`.
layer_from_shp.df.plot(ax=area_from_shp.df.plot(), color="orange")
# ### From pseudorandom points
# To very quickly create a `Layer` with a specific number of points, you can use the `from_pseudorandom_points()` method. This method uses `numpy` to draw $n$ random values for coordinates for `Point` objects.
# +
area_from_shp = prospect.Area.from_shapefile(
name="area_shp",
path="./data/demo_area.shp"
)
layer_from_pseudo_rand = prospect.Layer.from_pseudorandom_points(
n=100,
name="demo_layer_from_pseu_rand",
area=area_from_shp,
assemblage_name="demo_assemblage",
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
# -
layer_from_pseudo_rand.df.shape
layer_from_pseudo_rand.df.plot(ax=area_from_shp.df.plot(), color='orange')
# + [markdown] toc-hr-collapsed=false
# ### From point processes
# -
# `prospect` offers methods for creating `Layer` objects using existing point pattern types: Poisson, Thomas, and Matern.
# ```{caution}
# For all of these point pattern types, the generated points are *not* guaranteed to fall within the given `Area`, only within its bounding box. The generated `GeoDataFrame` of points, `df`, is clipped by the actual `Area` bounds *after* they are generated, which can result in fewer points than expected. If you need to examine what has been clipped, all original points will remain in the `input_features` attribute.
# ```
# #### Poisson
# A Poisson point process is usually said to be more "purely" random than most random number generators (like the one used in `from_pseudorandom_points()`)
#
# The rate (usually called "lambda") of the Poisson point process represents the number of events per unit of area per unit of time across some theoretical space of which our `Area` is some subset. In this case, we only have one unit of time, so the rate really represents a theoretical number of events per unit area. For example, if the specified rate is 5, in any 1x1 square, the number of points observed will be drawn randomly from a Poisson distribution with a shape parameter of 5. In practical terms, this means that over many 1x1 areas (or many observations of the same area), the mean number of points observed in that area will approximate 5.
# +
area_from_shp = prospect.Area.from_shapefile(
name="area_shp",
path="./data/demo_area.shp"
)
layer_from_poisson = prospect.Layer.from_poisson_points(
rate=0.005,
name="demo_layer_from_poisson",
area=area_from_shp,
assemblage_name="demo_assemblage",
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
# -
layer_from_poisson.df.shape
layer_from_poisson.df.plot(ax=area_from_shp.df.plot(), color='orange')
# #### Thomas
# A Thomas point process is a two-stage Poisson process. It has a Poisson number of clusters, each with a Poisson number of points distributed with an isotropic Gaussian distribution of a given variance. The points that are used to define the parent clusters are *not* represented in the output.
#
# ```{tip}
# This is an excellent way to generate artifact clusters.
# ```
# +
area_from_shp = prospect.Area.from_shapefile(
name="area_shp",
path="./data/demo_area.shp"
)
layer_from_thomas = prospect.Layer.from_thomas_points(
parent_rate=0.001,
child_rate=10,
gauss_var=5,
name="demo_layer_from_thomas",
area=area_from_shp,
assemblage_name="demo_assemblage",
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
# -
layer_from_thomas.df.shape
layer_from_thomas.df.plot(ax=area_from_shp.df.plot(), color='orange')
# #### Matern
# The Matern process is similar to the Thomas point process. It has a Poisson number of parent clusters like the Thomas process, but in this case, each parent cluster has a Poisson number of points distributed uniformly across a disk of a given radius.
#
# ```{tip}
# This is an excellent method for generating circular clusters of artifacts.
# ```
# +
area_from_shp = prospect.Area.from_shapefile(
name="area_shp",
path="./data/demo_area.shp"
)
layer_from_matern = prospect.Layer.from_matern_points(
parent_rate=0.001,
child_rate=10,
radius=5,
name="demo_layer_from_matern",
area=area_from_shp,
assemblage_name="demo_assemblage",
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
# -
layer_from_matern.df.shape
layer_from_matern.df.plot(ax=area_from_shp.df.plot(), color='orange')
#
# ```{admonition} COMING SOON: from_rectangles()
# :class: tip
# In the future I plan to implement a convenience method for placing rectangles (or other polygon shapes) randomly within an `Area` using a Poisson point process to determine the centerpoints of the polygons.
# ```
# ## `time_penalty` parameter
# The time penalty is meant to reflect the amount of time added to the search time to record any particular `Feature` object when it is found.
#
# This parameter requires some knowledge or intuition about the recording methods that are (or could be) used in the field. For example, if special labeling or curation procedures are to be applied to some class of artifacts, that might justify a greater time penalty for that `Layer` of artifacts. Recall though that this parameter is applied to all `Features` that make up a `Layer`, so take care to include only `Feature` objects for which this `time_penalty` value holds.
#
# Let's revisit the last example we saw. Here, we specify the `time_penalty` parameter of the `Layer` as a truncated normal distribution with a mean of 10, a standard deviation of 7, lower bound at 0, and upper bound at 50.
# +
area_from_shp = prospect.Area.from_shapefile(
name="area_shp",
path="./data/demo_area.shp"
)
layer_from_matern = prospect.Layer.from_matern_points(
parent_rate=0.001,
child_rate=10,
radius=5,
name="demo_layer_from_matern",
area=area_from_shp,
assemblage_name="demo_assemblage",
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
# -
# We can check that the `time_penalty` column of the `<Layer>.df` attribute is indeed a `scipy` distribution.
layer_from_matern.df['time_penalty'].head()
# ## `ideal_obs_rate` parameter
# Of all the `prospect` parameters, the ideal observation rate is perhaps the most difficult to define. It represents the frequency with which an artifact or feature will be recorded, assuming the following ideal conditions:
# - It lies inside or intersects the `Coverage`
# - Surface visibility is 100%
# - The surveyor's skill is 1.0
#
# These assumptions are important to consider further. The ideal observation rate is specified here solely as a property of the materials (i.e., artifacts or features) themselves, unrelated to the distance from the observer, surface visibility, or surveyor skill. These other factors are all accounted for in other parts of the simulation, so users should avoid replicating that uncertainty here. For most `Layer` objects, this value should probably be 1.0 or close to 1.0, but there are some scenarios where you might want to consider an alternate value. For instance:
# - If the `Layer` represents extremely small artifacts (e.g., beads, tiny stone flakes) that are hard to observe even in the best conditions.
# - If the `Layer` represents artifacts or features that are difficult to differentiate from the surface "background" in a particular context. For example, in a gravelly area, ceramic sherds can be difficult to differentiate from rocks. A major caveat here is that this "background noise" is sometimes considered in surface visibility estimations, so the user should take care not to duplicate that uncertainty if it is already accounted for in the `Area` building block.
#
# Let's look at the `Layer` from above once again.
# +
area_from_shp = prospect.Area.from_shapefile(
name="area_shp",
path="./data/demo_area.shp"
)
layer_from_matern = prospect.Layer.from_matern_points(
parent_rate=0.001,
child_rate=10,
radius=5,
name="demo_layer_from_matern",
area=area_from_shp,
assemblage_name="demo_assemblage",
time_penalty=prospect.utils.truncnorm(mean=10, sd=7, lower=0, upper=50),
ideal_obs_rate=beta(9, 1)
)
# -
# By setting the `ideal_obs_rate` parameter to a Beta distribution (`scipy.stats.beta(9, 1)`), we are saying, for example, that if there were 10 artifacts of this type in an area, even a highly-skilled surveyor in perfect visibility conditions would only discover 9 of them most of the time.
# ## Creating an `Assemblage` from `Layer` objects
# An `Assemblage` is merely a collection of `Layer` objects. You can pass your previously-created `Layer` objects in a list to the `Assemblage` constructor. We'll pass it all of the `Layer` objects we created above.
demo_assemblage = prospect.Assemblage(
name="demo_assemblage",
area_name="area_shp",
layer_list=[
layer_from_list,
layer_from_shp,
layer_from_pseudo_rand,
layer_from_poisson,
layer_from_thomas,
layer_from_matern
]
)
# We can see that all of the `Feature` objects from the various `Layer` objects are part of one `Assemblage` object.
demo_assemblage.df.head(10)
| prospect-guide/_build/jupyter_execute/blocks/Assemblage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import nolearn
import matplotlib.pyplot as plt
import seaborn
import sklearn.linear_model as lm
import scipy.stats as sps
import math
from Bio import SeqIO
from collections import Counter
from decimal import Decimal
from lasagne import layers, nonlinearities
from lasagne.updates import nesterov_momentum
from lasagne import layers
from nolearn.lasagne import NeuralNet
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor, GradientBoostingRegressor
from sklearn.cross_validation import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.svm import SVR
seaborn.set_style('white')
seaborn.set_context('poster')
# %matplotlib inline
# -
# Read in the protease inhibitor data
widths = [8]
widths.extend([4]*8)
widths.extend([4]*99)
data = pd.read_csv('hiv-nnrt-data.csv', index_col='SeqID')
drug_cols = data.columns[0:4]
feat_cols = data.columns[4:]
# +
# Read in the consensus data
consensus = SeqIO.read('hiv-rt-consensus.fasta', 'fasta')
consensus_map = {i+1:letter for i, letter in enumerate(str(consensus.seq))}
# +
# Because there are '-' characters in the dataset, representing consensus sequence at each of the positions,
# they need to be replaced with the actual consensus letter.
for i, col in enumerate(feat_cols):
# Replace '-' with the consensus letter.
data[col] = data[col].replace({'-':consensus_map[i+1]})
# Replace '.' with np.nan
data[col] = data[col].replace({'.':np.nan})
# Replace 'X' with np.nan
data[col] = data[col].replace({'X':np.nan})
# -
# Drop any feat_cols that have np.nan inside them. We don't want low quality sequences.
data.dropna(inplace=True, subset=feat_cols)
data
# +
# Drop any feat_cols that are completely conserved.
# The nonconserved_cols list will serve as a convenient selector for the X- data from the
# original dataframe.
nonconserved_cols = []
for col in feat_cols:
if len(pd.unique(data[col])) == 1:
data.drop(col, axis=1, inplace=True)
else:
nonconserved_cols.append(col)
# -
drug_cols
# +
def x_equals_y(y_test):
"""
A function that returns a range from minimum to maximum of y_test.
Used below in the plotting below.
"""
floor = math.floor(min(y_test))
ceil = math.ceil(max(y_test))
x_eq_y = range(floor, ceil)
return x_eq_y
TWOPLACES = Decimal(10) ** -2
# +
colnum = 3
drug_df = pd.DataFrame()
drug_df[drug_cols[colnum]] = data[drug_cols[colnum]]
drug_df[nonconserved_cols] = data[nonconserved_cols]
for col in nonconserved_cols:
drug_df[col] = drug_df[col].apply(lambda x: np.nan if len(x) > 1 else x)
drug_df.dropna(inplace=True)
drug_X = drug_df[nonconserved_cols]
drug_Y = drug_df[drug_cols[colnum]].apply(lambda x:np.log(x))
# drug_Y.values
# +
from isoelectric_point import isoelectric_points
from molecular_weight import molecular_weights
# Standardize pI matrix. 7 is neutral
drug_X_pi = drug_X.replace(isoelectric_points)
# Standardize MW matrix.
drug_X_mw = drug_X.replace(molecular_weights)
# Binarize drug_X matrix.
from sklearn.preprocessing import LabelBinarizer
drug_X_bi = pd.DataFrame()
binarizers = dict()
for col in drug_X.columns:
lb = LabelBinarizer()
binarized_cols = lb.fit_transform(drug_X[col])
# print(binarized_cols)
if len(lb.classes_) == 2:
# print(binarized_cols)
drug_X_bi[col] = pd.Series(binarized_cols[:,0])
else:
for i, c in enumerate(lb.classes_):
# print(col + c)
# print(binarized_cols[:,i])
drug_X_bi[col + '_' + c] = binarized_cols[:,i]
drug_X_bi
# -
fig = plt.figure(figsize=(3,3))
drug_Y.hist(grid=False)
plt.xlabel('Value')
plt.ylabel('Count')
plt.title('{0} Distribution'.format(drug_cols[colnum]))
# +
# Here, let's try the Random Forest Regressor. This will be the baseline.
x_train, x_test, y_train, y_test = train_test_split(drug_X_bi, drug_Y)
rfr = RandomForestRegressor(n_estimators=500, n_jobs=-1, oob_score=True)
rfr.fit(x_train, y_train)
rfr_preds = rfr.predict(x_test)
print(rfr.score(x_test, y_test), mean_squared_error(rfr_preds, y_test))
rfr_mse = mean_squared_error(rfr_preds, y_test)
# print(rfr.oob_score_)
sps.pearsonr(rfr_preds, y_test)
plt.figure(figsize=(3,3))
plt.scatter(y_test, rfr_preds,)
plt.title('{0} Random Forest'.format(drug_cols[colnum]))
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.gca().set_aspect('equal', 'datalim')
plt.annotate(s='mse: {0}'.format(str(Decimal(rfr_mse).quantize(TWOPLACES))), xy=(1,0), xycoords='axes fraction', ha='right', va='bottom')
plt.plot(x_equals_y(y_test), x_equals_y(y_test), color='red')
plt.show()
# -
plt.bar(range(len(rfr.feature_importances_)), rfr.feature_importances_)
plt.xlabel('Position')
plt.ylabel('Relative Importance')
plt.title('{0} Random Forest'.format(drug_cols[colnum]))
# Get back the importance of each feature.
feat_impt = [(p, i) for p, i in zip(drug_X_bi.columns, rfr.feature_importances_)]
sorted(feat_impt, key=lambda x:x[1], reverse=True)
# +
# # Here, let's try a parameter grid search, to figure out what would be the best
# from sklearn.grid_search import GridSearchCV
# import numpy as np
# param_grid = [{'n_estimators':[100, 500, 1000],
# #'max_features':['auto', 'sqrt', 'log2'],
# #'min_samples_leaf':np.arange(1,20,1),
# }]
# x_train, x_test, y_train, y_test = train_test_split(fpv_X_bi, fpv_Y)
# rfr_gs = GridSearchCV(RandomForestRegressor(), param_grid=param_grid, n_jobs=-1)
# rfr_gs.fit(x_train, y_train)
# print(rfr_gs.best_estimator_)
# print(rfr_gs.best_params_)
# +
# Try Bayesian Ridge Regression
# x_train, x_test, y_train, y_test = train_test_split(drug_X_bi, drug_Y)
brr = lm.BayesianRidge()
brr.fit(x_train, y_train)
brr_preds = brr.predict(x_test)
print(brr.score(x_test, y_test), mean_squared_error(brr_preds, y_test))
print(sps.pearsonr(brr_preds, y_test))
brr_mse = mean_squared_error(brr_preds, y_test)
plt.figure(figsize=(3,3))
plt.scatter(y_test, brr_preds)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('{0} Bayesian Ridge'.format(drug_cols[colnum]))
plt.gca().set_aspect('equal', 'datalim')
plt.annotate(s='mse: {0}'.format(str(Decimal(brr_mse).quantize(TWOPLACES))), xy=(1,0), xycoords='axes fraction', ha='right', va='bottom')
plt.plot(x_equals_y(y_test), x_equals_y(y_test), color='red')
plt.show()
# +
# Try ARD regression
ardr = lm.ARDRegression()
ardr.fit(x_train, y_train)
ardr_preds = ardr.predict(x_test)
ardr_mse = mean_squared_error(ardr_preds, y_test)
plt.figure(figsize=(3,3))
plt.scatter(y_test, ardr_preds)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('{0} ARD Regression'.format(drug_cols[colnum]))
plt.gca().set_aspect('equal', 'datalim')
plt.annotate(s='mse: {0}'.format(str(Decimal(ardr_mse).quantize(TWOPLACES))), xy=(1,0), xycoords='axes fraction', ha='right', va='bottom')
plt.plot(x_equals_y(y_test), x_equals_y(y_test), color='red')
plt.show()
# +
# Try Gradient Boost
# x_train, x_test, y_train, y_test = train_test_split(drug_X_bi, drug_Y)
gbr = GradientBoostingRegressor()
gbr.fit(x_train, y_train)
gbr_preds = gbr.predict(x_test)
print(gbr.score(x_test, y_test), mean_squared_error(gbr_preds, y_test))
print(sps.pearsonr(gbr_preds, y_test))
gbr_mse = mean_squared_error(gbr_preds, y_test)
plt.figure(figsize=(3,3))
plt.scatter(y_test, gbr_preds)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('{0} Grad. Boost'.format(drug_cols[colnum]))
plt.gca().set_aspect('equal', 'datalim')
plt.annotate(s='mse: {0}'.format(str(Decimal(gbr_mse).quantize(TWOPLACES))), xy=(1,0), xycoords='axes fraction', ha='right', va='bottom')
plt.plot(x_equals_y(y_test), x_equals_y(y_test), color='red')
plt.show()
# -
plt.bar(range(len(gbr.feature_importances_)), gbr.feature_importances_)
# +
# Try AdaBoost
# x_train, x_test, y_train, y_test = train_test_split(drug_X_bi, drug_Y)
abr = AdaBoostRegressor()
abr.fit(x_train, y_train)
abr_preds = abr.predict(x_test)
print(abr.score(x_test, y_test), mean_squared_error(abr_preds, y_test))
print(sps.pearsonr(abr_preds, y_test))
abr_mse = mean_squared_error(abr_preds, y_test)
plt.figure(figsize=(3,3))
plt.scatter(x=y_test, y=abr_preds)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('{0} AdaBoost'.format(drug_cols[colnum]))
plt.gca().set_aspect('equal', 'datalim')
plt.annotate(s='mse: {0}'.format(str(Decimal(abr_mse).quantize(TWOPLACES))), xy=(1,0), xycoords='axes fraction', ha='right', va='bottom')
plt.plot(x_equals_y(y_test), x_equals_y(y_test), color='red')
plt.show()
# -
plt.bar(range(len(abr.feature_importances_)), abr.feature_importances_)
# +
# Try support vector regression
svr = SVR()
svr.fit(x_train, y_train)
svr_preds = svr.predict(x_test)
svr_mse = mean_squared_error(svr_preds, y_test)
plt.figure(figsize=(3,3))
plt.scatter(y_test, svr_preds, )
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('{0} SVR'.format(drug_cols[colnum]))
plt.gca().set_aspect('equal', 'datalim')
plt.annotate(s='mse: {0}'.format(str(Decimal(svr_mse).quantize(TWOPLACES))), xy=(1,0), xycoords='axes fraction', ha='right', va='bottom')
plt.plot(x_equals_y(y_test), x_equals_y(y_test), color='red')
plt.show()
# +
# Neural Network 1 Specification: Feed Forward ANN with 1 hidden layer.
# x_train, x_test, y_train, y_test = train_test_split(drug_X_bi, drug_Y)
x_train = x_train.astype(np.float32)
y_train = y_train.astype(np.float32)
x_test = x_test.astype(np.float32)
y_test = y_test.astype(np.float32)
net1 = NeuralNet(
layers=[ # three layers: one hidden layer
('input', layers.InputLayer),
('hidden1', layers.DenseLayer),
('dropout1', layers.DropoutLayer),
#('hidden2', layers.DenseLayer),
#('dropout2', layers.DropoutLayer),
('nonlinear', layers.NonlinearityLayer),
('output', layers.DenseLayer),
],
# layer parameters:
input_shape=(None, x_train.shape[1]), #
hidden1_num_units=math.ceil(x_train.shape[1] / 2), # number of units in hidden layer
hidden1_nonlinearity=nonlinearities.tanh,
dropout1_p = 0.5,
#hidden2_num_units=math.ceil(x_train.shape[1] / 2),
#dropout2_p = 0.5,
output_nonlinearity=None, # output layer uses identity function
output_num_units=1, # 30 target values
# optimization method:
update=nesterov_momentum,
update_learning_rate=0.01,
update_momentum=0.95,
regression=True, # flag to indicate we're dealing with regression problem
max_epochs=500, # we want to train this many epochs
verbose=1,
)
net1.fit(x_train.values, y_train.values)
# +
nn1_preds = net1.predict(x_test)
nn1_mse = float(mean_squared_error(nn1_preds, y_test))
plt.figure(figsize=(3,3))
plt.scatter(y_test, nn1_preds)
plt.xlabel('Actual')
plt.ylabel('Predicted')
plt.title('{0} Neural Network'.format(drug_cols[colnum]))
plt.gca().set_aspect('equal', 'datalim')
plt.annotate(s='mse: {0}'.format(str(Decimal(nn1_mse).quantize(TWOPLACES))), xy=(1,0), xycoords='axes fraction', ha='right', va='bottom')
plt.plot(x_equals_y(y_test), x_equals_y(y_test), color='red')
plt.show()
# -
sps.pearsonr(nn1_preds, y_test.reshape(y_test.shape[0],1))
| old_notebooks/Predict HIV Genotype from Phenotype - NNRTs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import sys
sys.path.append("../")
# +
import urban_dictionary_scraper
import torch
import re
import pickle
import wiki_article
import dictionary_definition
import glob
import modeling
import itertools
import random
import pandas as pd
import numpy as np
import datasets
from torch.nn.utils.rnn import pad_sequence
from dataclasses import dataclass
from io import StringIO
from ipywidgets import interact, interactive, fixed, interact_manual
from transformers import AutoModelWithLMHead, AutoTokenizer
from scipy import stats
import hashlib
from collections import OrderedDict
from types import SimpleNamespace
# +
def get_checkpoints(base_dir):
checkpoint_dirs = glob.glob(f"{base_dir}/checkpoint*")
checkpoint_dirs.sort(key=lambda x: int(x[(x.index("checkpoint-") + len("checkpoint-")):]))
return checkpoint_dirs
modeling_gpt
def evaluate_lm_checkpoints(base_dir, validation_path):
tokenizer = AutoTokenizer.from_pretrained("gpt2")
for d in get_checkpoints(base_dir):
model = AutoModelWithLMHead.from_pretrained(d).to('cuda')
refined_model_eval = wiki_article.lm_eval(model, tokenizer, validation_path)
print(f"{d}: {refined_model_eval}")
tokenizer
def evaluate_title_checkpoints(base_dir, validation_path):
tokenizer = AutoTokenizer.from_pretrained("gpt2")print(parsed_urban_dictionary_scraperpage.body.prettify())
for d in get_checkpoints(base_dir):
model = AutoModelWithLMHead.from_pretrained(d).to('cuda')
refined_model_eval = wiki_article.run_title_evaluation(model, tokenizer, validation_path)
print(f"{d}: m={refined_model_eval.mean}, v={refined_model_eval.variance}")
# evaluate_lm_checkAutoModelWithLMHead, AutoTokenizer, points("models/wikitext_103_stride_512_v0/", "data/wikitext-103-title-train/wiki_title.valid.raw")
#print(glob.glob("models/wikitext_103_stride_512_v0/*"))
# -
with open(f"data/en_dictionary_parsed_randomized.pickle", "rb") as f:
parsed_dictionary = pickle.load(f)
# +
potential_blacklist = set()
for word in parsed_dictionary:
potential_blacklist.add(word.word)
potential_blacklist.update(word.derivatives)
print(len(parsed_dictionary))
print(len(potential_blacklist))
# -
tokenizer = AutoTokenizer.from_pretrained("gpt2")
tokenizer.add_special_tokens(datasets.SpecialTokens.special_tokens_dict())
args = SimpleNamespace()
args.block_size = 768
dataset = datasets.ParsedDictionaryDefinitionDataset(tokenizer, args, None, None, None)
flattened_set = list(itertools.chain.from_iterable(dataset._make_examples(tokenizer, e) for e in parsed_dictionary))
# +
# print(f"{len(flattened_set)} from {len(parsed_dictionary)} entries")
word = tokenizer.encode("vitellogenin")
print(tokenizer.decode(dataset.bos_token_ids + [1] + dataset.eos_token_ids))
print(tokenizer.decode(tokenizer.encode("<|bod|>\"<|eod|>")))
print(f"\"{tokenizer.decode(dataset.pos_sep_ids)}\"")
tokenizer.decode(dataset._make_examples(tokenizer, parsed_dictionary[0])[0])
# for example in random.choices(flattened_set, k=20):
# print(tokenizer.decode(example))
# -
for example in dataset._make_examples(tokenizer, parsed_dictionary[10430]):
print(tokenizer.decode(example))
# +
with open("data/all_words.pickle", "rb") as f:
#words = pickle.load(f)
#items = list(words.items())
random.shuffle(items)
items = OrderedDict(items)
with open("data/all_words_randomized.pickle", "wb") as f:
pickle.dump(items, f, pickle.HIGHEST_PROTOCOL)
# -
urban_dictionary_scraper.UrbanDictionaryDataset._make_examples(tokenizer, words[2])
# +
model = AutoModelWithLMHead.from_pretrained("gpt2").to('cuda')
# -
unrefined_model_eval = wiki_article.run_title_evaluation(urban_dictionary_scrapermodel, tokenizer, "wikitext-103-raw/wiki.valid.raw")
unrefined_model_eval
tokenizer = AutoTokenizer.from_pretrained("gpt2")
model = AutoModelWithLMHead.from_pretrained("output_103/").to('cuda')
refined_model_eval = wiki_article.run_title_evaluation(model, tokenizer, "wikitext-103-raw/wiki.valid.raw")
refined_model_eval
# +
sequence = f"\"TITLE\" is a song collaboration by Chinese artist <NAME> and Canadian singer <NAME>, first released independently in March 2020. After gaining popularity amongst the cat community, the single was re-released by major label Columbia Records in May 2020. Pamela describes the song as being originally inspired by her two kittens, Apollo and Bean who once said meow.<bot>"
model = modeling.GPT2LMHeadWithWeightedLossModel.from_pretrained("models/wikitext-103-raw-title-scale-20-lr5e-5").to("cuda")
input = tokenizer.encode(sequence, return_tensors="pt").to('cuda')
generated = model.generate(input, max_length=100, num_return_sequences=100, temperature=1)
print(f"Prompt text: {sequence}")
for i in range(generated.size()[0]):
sentence_tokens = generated[i, :].tolist()
decoded = tokenizer.decode(sentence_tokens)
m = re.search(r"<bot>(.*?)<eot>", decoded)
if m:urban_dictionary_scraper
print(f"{i}) {m.groups(1)}")
else:
print(f"{i}) Didn't work")
resulting_string = tokenizer.decode(generated.tolist()[0])
# print(resulting_string)
# -
for entry in entries:
m = re.match(r"\s*" + re.escape(entry.title) + r"\d*\s*(\|[^|]*\|)?\s*", entry.entry_str)
if m:
trainable_entry = entry.entry_str[m.span()[1]:].strip()
if not trainable_entry:
raise RuntimeError(f"Bad entry for {entry.title}: '{entry.entry_str}'")
else:
raise RuntimeError(f"Couldn't match {entry.title} on '{entry.entry_str}'")
# +
dictionary_path = "data/com_apple_MobileAsset_DictionaryServices_dictionaryOSX/69b7ab1cf0f75ad16bf6662b0a77fbfd36b7941f.asset/AssetData/New Oxford American Dictionary.dictionary/Contents/Resources/Body.data"
with open(dictionary_path, "rb") as f:
valid_words = {e.title.upper() for e in dictionary_definition.DictionaryDefinition.gen_from_apple_dictionary(f)}full_dataset = [
]
# -
model = modeling.GPT2LMHeadWithWeightedLossModel.from_pretrained("models/dictionary-scale-10-lr5e-5").to("cuda")
words = dictionary_definition.generate_words(
tokenizer, model, allow_proper_nouns=False, blacklist=valid_words, num=1000, max_iterations=40
)
words.sort(key=lambda x: x.title)
for w in words:
print(f"{w} {w.entry_str}")
with open("words.tsv", "w") as f:
for word in words:
f.write(f"{word.title}\t{word.entry_str}\n")
tokenizer = AutoTokenizer.from_pretrained("gpt2")
tokenizer.add_special_tokens(datasets.SpecialTokens.special_tokens_dict())
blacklist = set((x.lower() for x in itertools.chain.from_iterable(
[e.word] + e.derivatives
for e in pickle.load(open(f"data/en_dictionary_parsed_randomized.pickle", "rb")))
))
model = AutoModelWithLMHead.from_pretrained("models/en_dictionary_parsed_lr_00001/checkpoint-120000").to("cuda:0")
def print_words(words, f):
for word in words:
word_str = [word.word]
if word.pos:
word_str.append(f"/{word.pos}/")
if word.topic:
word_str.append(f"[{word.topic}]")
print(" ".join(word_str), file=f)
print(f"\t{word.definition}", file=f)
print(f"\t\"{word.example}\"{' |e|' if word.from_example_expansion else ''}", file=f))
print("", file=f)
words.sort(key=lambda x: x.word)
with open("words_with_examples.txt", "w") as f:
print_words(words, f)
# +
words, stats = datasets.ParsedDictionaryDefinitionDataset.generate_words(
tokenizer, model,
num=500,
max_iterations=40,
blacklist=blacklist,
do_example_expansion=True,
generation_args=dict(
top_k=300,
num_return_sequences=100,
max_length=512,
do_sample=True,
),
expansion_generation_overrides=dict(
top_k=50,
num_return_sequences=10,
do_sample=True,
),
num_expansion_candidates=10,
filter_proper_nouns=True,
)
print(stats)
print()
print_words(words, sys.stdout)
# -
# from datasets import SpecialTokens
# """
# input_str = f"{tokenizer.bos_token}"
# input_str = "<|bod|>corner<|pos|>noun<|bd|>a point or space in a hierarchy that is within the order to which it moves along the axis.<|eod|>"
# input = tokenizer.encode(input_str, return_tensors="pt").to("cuda")
# max_length = 512
#
# generated = model.generate(
# input_ids=input,
# max_length=max_length,
# num_return_sequences=5,
# temperature=1.0,
# top_k=1000,
# pad_token_id=tokenizer.pad_token_id,
# bos_token_id=tokenizer.bos_token_id,
# eos_token_ids=tokenizer.eos_token_id,
# do_sample=True,
# )
#
# break_specials = [
# SpecialTokens.BOS_TOKEN, SpecialTokens.EOS_TOKEN, SpecialTokens.DEFINITION_SEP,
# SpecialTokens.EXAMPLE_SEP, SpecialTokens.TOPIC_SEP, SpecialTokens.POS_SEP
# ]
# break_special_ids = [tokenizer.encode(e, add_prefix_space=False)[0] for e in break_specials]
# break_special_token_map = {s: i for s, i in zip(break_specials, break_special_ids)}
#
#
# for i in range(generated.size()[0]):
# sentence_tokens = generated[i, :].tolist()
#
#
# accum = []
# last_special = None
# sep_map = {}
# for token_id in sentence_tokens:
# if token_id in break_special_ids:
# if last_special is not None:
# sep_map[last_special] = accum
# accum = []
# last_special = token_id
# else:
# last_special = token_id
# else:
# accum.append(token_id)
#
# sep_map[last_special] = accum
# accum = []
#
# decode_sep_map = {
# tokenizer.decode([k]): tokenizer.decode(v) for k, v in sep_map.items()
# }
#
# print(decode_sep_map)
#
# # decoded = tokenizer.decode([e for e in sentence_tokens if e != tokenizer.pad_token_id])
# print(decoded)
# """
#
tokenizer.decode(tokenizer.encode("a bc", add_prefix_space=False))
tokenizer.special_tokens_map
blacklist = set(e.title for e in pickle.load(open("data/all_words.pickle", "rb")).values())
model = modeling.GPT2LMHeadWithWeightedLossModel.from_pretrained(
"models/urban_dictionary_cleaned_top_def_mu02_lr_0_000005_tw40"
).to("cuda")
tw40_words = urban_dictionary_scraper.generate_words(
tokenizer,
model,
blacklist=blacklist,
num=100,
)
pickle.dump(tw1_words, open("data/labeling/tw1_words.pickle", "wb"), protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(tw40_words, open("data/labeling/tw40_words.pickle", "wb"), protocol=pickle.HIGHEST_PROTOCOL)
df = pd.DataFrame(
[
(
word.word,
word.definition,
word.example.replace(,
"tw1" if i < len(tw1_words) else "tw2",
)
for i, word in enumerate(itertools.chain(
tw1_words,
tw40_words
))
],
columns=("word", "definition", "example", "dataset")
)
sample = df.sample(frac=1)
sample_no_dataset = sample[:]
sample_no_dataset.to_csv("fun.csv", index=False, columns=["word", "definition", "example"])
interact()
# tokenizer = AutoTokenizer.from_pretrained("gpt2")
# tokenizer.add_special_tokens(datasets.SpecialTokens.special_tokens_dict())
# model = AutoModelWithLMHead.from_pretrained("models/en_dictionary_parsed_lr_00005/checkpoint-50000").to("cuda")
| notebooks/fooling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: text_analysis
# language: python
# name: text_analysis
# ---
# +
# access chat noir api
# based on https://www.uni-weimar.de/medien/webis/events/pan-12/pan12-code/pan12-source-retrieval-baseline.py
import requests
import unicodedata
import simplejson
import sys
from local_settings import TOKEN
CHATNOIR = 'https://www.chatnoir.eu/api/v1/_search'
def pose_query(query, token=TOKEN):
""" Poses the query to the ChatNoir search engine. """
# Double curly braces are escaped curly braces, so that format
# strings will still work.
json_query = u"""
{{
"apikey": "{apikey}",
"query": "{query}",
"index": ["cw12"],
"size": 20,
"pretty": true
}}
""".format(apikey=token, query=query)
json_query = \
unicodedata.normalize("NFKD", json_query).encode("ascii", "ignore")
try:
response = requests.post(CHATNOIR, data=json_query)
print(response)
results = simplejson.loads(response.text)
response.close()
return results
except requests.HTTPError as e:
error_message = e.read()
print >> sys.stderr, error_message
sys.exit(1)
# -
# collect topics from an external collection
# sample relevant documents
q = 'acid stain concrete'
results = pose_query(q)
print(results)
# +
# clean pages
from newspaper import Article
cluster = {}
for result in results['results']:
uuid = result['uuid']
print(uuid)
url = 'https://www.chatnoir.eu/cache?uuid=%s&index=cw12&raw&plain' % uuid
a = Article(url, language='en')
try:
a.download()
a.parse()
title = a.title
text = a.text
# collect paragraphs
paragraphs = [p for p in text.split('\n') if len(p) > 250]
if paragraphs:
cluster[uuid] = paragraphs
except:
pass
print(len(cluster), 'pages')
# -
paras = []
for uuid, paragraphs in cluster.items():
print(uuid)
for p in paragraphs:
print(p, '\n')
assert '/n' not in p
paras.append(p)
print('\n')
print(len(paras))
# +
# dump passages
import datetime
import json
import os
squash_path = '/home/svakule/squash-generation'
os.mkdir("%s/squash/temp/%s" % (squash_path, key))
key = 'clueweb12'
top_p = 0.9
gen_frac = 0.5
spec_frac = 0.8
metadata = {
"input_text": "\n".join(paras),
"key": key,
"timestamp": str(datetime.datetime.now()),
"settings": {
"top_p": top_p,
"gen_frac": gen_frac,
"spec_frac": spec_frac
}
}
with open('%s/squash/temp/%s/metadata.json' % (squash_path, key), 'w') as f:
f.write(json.dumps(metadata))
# python squash/extract_answers.py --key clueweb12
# python question-generation/interact.py --model_checkpoint question-generation/gpt2_corefs_question_generation --model_type gpt2 --key clueweb12 --filename squash/temp/clueweb12/input.pkl
# vim squash/temp/clueweb12/generated_questions.json
| src/clueweb12.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: recycling
# language: python
# name: recycling
# ---
# +
# %matplotlib inline
import sys
sys.path.append('../')
import numpy
import keras
from keras import backend as K
import matplotlib.pyplot as plt
from sklearn.preprocessing import label_binarize
from wlc.WLweakener import computeM, generateWeak, weak_to_index, binarizeWeakLabels
from experiments.visualizations import plot_history
from experiments.visualizations import plot_multilabel_scatter
cmap = plt.cm.get_cmap('tab20')
random_state = 0
# -
# # Create a dataset with true labels
# +
import sklearn.datasets as datasets
X, y = datasets.load_digits(return_X_y=True)
n_classes = 10
n_samples = X.shape[0]
n_features = X.shape[1]
Y = label_binarize(y, range(n_classes))
plt.scatter(X[:500,0], X[:500,1], c=y[:500], cmap=cmap)
# -
# # Generate weak labels
#
# This will generate weak labels given the specified mixing process. It will also show 3 plots with the true labels, weak labels and the corresponding rows of the mixing matrix M.
# +
from wlc.WLweakener import weak_to_decimal
M_weak = numpy.zeros((2**n_classes, n_classes))
M_weak[weak_to_decimal(numpy.array([([0, 1]*n_classes)[:n_classes]]))] = ([0, 1]*n_classes)[:n_classes]
M_weak[weak_to_decimal(numpy.array([([1, 0]*n_classes)[:n_classes]]))] = ([1, 0]*n_classes)[:n_classes]
M_random_weak = computeM(n_classes, alpha=0.7, beta=0.2, method='random_weak', seed=0)
M_weak += M_random_weak
if M_weak.shape[0] == 2**M_weak.shape[1]:
M_weak[0,:] = 0
M_weak /= M_weak.sum(axis=0)
print(numpy.round(M_weak, decimals=3))
z = generateWeak(y, M_weak, seed=0)
Z = binarizeWeakLabels(z, c=n_classes)
M_weak_indices = weak_to_index(Z, method='random_weak')
V_weak = M_weak[M_weak_indices]
fig = plt.figure(figsize=(15, 4))
ax = fig.add_subplot(1, 3, 1)
_ = plot_multilabel_scatter(X[:100], Y[:100], fig=fig,
ax=ax, title='True labels', cmap=cmap)
ax = fig.add_subplot(1, 3, 2)
_ = plot_multilabel_scatter(X[:100], Z[:100], fig=fig,
ax=ax, title='Weak labels', cmap=cmap)
ax = fig.add_subplot(1, 3, 3)
_ = plot_multilabel_scatter(X[:100], V_weak[:100], fig=fig,
ax=ax, title='M rows', cmap=cmap)
# -
# # Divide into training (weak and true), validation and test
# +
divide_proportions = numpy.array([0.5, 0.1, 0.1, 0.2])
# Ensure that all proportions sum to 1
divide_proportions /= divide_proportions.sum()
divide_proportions = numpy.cumsum(divide_proportions)
indices = (divide_proportions*X.shape[0]).astype(int)[:-1]
print('Proportions for the 4 partitions')
print(divide_proportions)
print('Indices of a total of {} samples'.format(X.shape[0]))
print(indices)
# # Divide into training (weak and true), validation and test
X_weak_train, X_true_train, X_val, X_test = numpy.array_split(X, indices)
Y_weak_train, Y_true_train, Y_val, Y_test = numpy.array_split(Y, indices)
Z_weak_train, Z_true_train, Z_val, Z_test = numpy.array_split(Z, indices)
V_weak_train, V_true_train, V_val, V_test = numpy.array_split(V_weak, indices)
y_weak_train, y_true_train, y_val, y_test = numpy.array_split(y, indices)
# Remove a portion of the weak data
train_proportion = 1.0
last_train_index = int(numpy.ceil(train_proportion*X_weak_train.shape[0]))
X_weak_train = X_weak_train[:last_train_index]
Y_weak_train = Y_weak_train[:last_train_index]
Z_weak_train = Z_weak_train[:last_train_index]
V_weak_train = V_weak_train[:last_train_index]
y_weak_train = y_weak_train[:last_train_index]
# Save the final model for each method
final_models = {}
# -
# # Define a common model
# +
from keras.callbacks import EarlyStopping, Callback
from keras import regularizers
max_epochs = 1000
# Callback to show performance per epoch in the same line
class EpochCallback(Callback):
def on_epoch_end(self, epoch, logs={}):
print('\rEpoch {}, val_loss = {:.2e}, val_acc = {:.2f}'.format(epoch, logs['val_loss'], logs['val_acc']), end=' ')
# Callback for early stopping
epoch_callback = EpochCallback()
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=int(max_epochs/20),
verbose=2, mode='auto', baseline=None,
restore_best_weights=True)
def make_model(loss, l2=0.0):
# Careful that it is ussing global variables for the input and output shapes
numpy.random.seed(0)
model = keras.models.Sequential()
model.add(keras.layers.Dense(Y.shape[1], input_dim=X.shape[1],
kernel_regularizer=regularizers.l2(l2),
activation='softmax'))
model.compile(optimizer='adam', loss=loss, metrics=['ce', 'mse', 'acc'])
return model
# Keyword arguments for the fit function
fit_kwargs = dict(validation_data=(X_val, Y_val), epochs=max_epochs, verbose=0,
callbacks=[early_stopping, epoch_callback], shuffle=True)
# -
# # Fully supervised (upperbound)
#
# Train with all true labels
# +
train_method = 'Supervised'
l2_list = numpy.array([0.0, 1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1])
model_supervised_list = []
val_losses = numpy.zeros_like(l2_list)
for i, l2 in enumerate(l2_list):
model = make_model('categorical_crossentropy', l2=l2)
history = model.fit(numpy.concatenate([X_weak_train, X_true_train]),
numpy.concatenate([Y_weak_train, Y_true_train]),
**fit_kwargs)#
plot_history(history, model, X_test, y_test)
model_supervised_list.append(model)
best_epoch = numpy.argmin(model.history.history['val_loss'])
val_losses[i] = model.history.history['val_loss'][best_epoch]
plt.show()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(l2_list, val_losses, 'o-')
ax.set_xscale("symlog")
best_supervised = numpy.argmin(val_losses)
final_models[train_method] = model_supervised_list[best_supervised]
l2 = l2_list[best_supervised]
print('Best l2 = {}'.format(l2))
# -
# # Our method with EM and original M
#
# Train EM with all weak labels
# +
def EM_log_loss(y_true, y_pred):
y_pred = K.clip(y_pred, K.epsilon(), 1.0-K.epsilon())
Q = y_true * y_pred
Z_em_train = Q / K.sum(Q, axis=-1, keepdims=True)
out = -K.stop_gradient(Z_em_train)*K.log(y_pred)
return K.mean(out, axis=-1)
model = make_model(EM_log_loss, l2=l2)
M_true = computeM(n_classes, method='supervised')
q_weak = X_weak_train.shape[0] / (X_weak_train.shape[0] + X_true_train.shape[0])
q_true = X_true_train.shape[0] / (X_weak_train.shape[0] + X_true_train.shape[0])
M = numpy.concatenate((q_weak*M_weak, q_true*M_true))
M_true_indices = weak_to_index(Y_true_train, method='supervised') + M_weak.shape[0]
V_true_train = M[M_true_indices]
history = model.fit(numpy.concatenate([X_weak_train, X_true_train]),
numpy.concatenate([V_weak_train, V_true_train]),
**fit_kwargs)
plot_history(history, model, X_test, y_test)
final_models['EM original M'] = model
# -
# # Our method with EM and estimated M
# +
from wlc.WLweakener import estimate_M
M_estimated = estimate_M(Z_true_train, Y_true_train, range(n_classes), reg='Partial', Z_reg=Z_weak_train)
M_true = computeM(n_classes, method='supervised')
q_estimated = X_weak_train.shape[0] / (X_weak_train.shape[0] + X_true_train.shape[0])
q_true = X_true_train.shape[0] / (X_weak_train.shape[0] + X_true_train.shape[0])
M = numpy.concatenate((q_estimated*M_estimated, q_true*M_true))
M_estimated_indices = weak_to_index(Z_weak_train, method='random_weak')
V_weak_train = M_estimated[M_estimated_indices]
M_true_indices = weak_to_index(Y_true_train, method='supervised') + M_estimated.shape[0]
V_true_train = M[M_true_indices]
model = make_model(EM_log_loss, l2=l2)
history = model.fit(numpy.concatenate([X_weak_train, X_true_train]),
numpy.concatenate([V_weak_train, V_true_train]),
**fit_kwargs)
plot_history(history, model, X_test, y_test)
final_models['EM estimated M'] = model
# -
# # Fully weak (lowerbound)
# +
model = make_model('categorical_crossentropy', l2=l2)
history = model.fit(numpy.concatenate([X_weak_train, X_true_train]),
numpy.concatenate([Z_weak_train, Y_true_train]),
**fit_kwargs)
plot_history(history, model, X_test, y_test)
final_models['Weak'] = model
# +
def OSL_log_loss(y_true, y_pred):
# Careful, I had to use a global variable here for the number of classes
# for some reason I can not use y_osl.shape[-1] in the reshape function
y_pred = K.clip(y_pred, K.epsilon(), 1.0-K.epsilon())
y_osl = y_true * y_pred
y_osl_max = K.max(y_osl, axis=-1)
y_osl_max = K.repeat_elements(y_osl_max, n_classes, 0)
y_osl_max = K.reshape(y_osl_max, (-1, n_classes))
y_osl = K.cast(K.equal(y_osl, y_osl_max), y_pred.dtype)
y_osl = y_osl / K.sum(y_osl, axis=-1, keepdims=True)
out = -K.stop_gradient(y_osl) * K.log(y_pred)
return K.mean(out, axis=-1)
model = make_model(OSL_log_loss, l2=l2)
history = model.fit(numpy.concatenate([X_weak_train, X_true_train]),
numpy.concatenate([Z_weak_train, Y_true_train]),
**fit_kwargs)
plot_history(history, model, X_test, y_test)
final_models['OSL'] = model
# -
plt.figure(figsize=(15, 4))
lowest_acc = 1.0
highest_acc = 0.0
for i, (key, model) in enumerate(sorted(final_models.items())):
lw = (len(final_models)+5 - i)/5
p = plt.plot(model.history.history['val_acc'], lw=lw, label='Val. ' + key)
test_acc = numpy.mean(model.predict_classes(X_test) == y_test)
print('{} : {}'.format(key, test_acc))
plt.axhline(y=test_acc, color=p[0].get_color(), lw=lw, linestyle='--')
lowest_acc = test_acc if test_acc < lowest_acc else lowest_acc
highest_acc = test_acc if test_acc > highest_acc else highest_acc
plt.title('Validation accuracy (dashed for test set)')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
range_acc = highest_acc - lowest_acc
plt.ylim([lowest_acc-range_acc*0.1, highest_acc+range_acc*0.1])
plt.legend()
| notebooks/Example_05_full_em_osl_digits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PythonData
# language: python
# name: pythondata
# ---
# ## Observations and Insights
#
# Dependencies and Setup
import matplotlib.pyplot as plt # pyplot is a
import pandas as pd
import scipy.stats as st
import numpy as np
# %matplotlib inline
import dataframe_image as dfi
# Observations:
# According to graph “Linear Regression Model of Mouse Weight vs Tumor Volume”, mouse weights and their tumor volumes have 0.84 correlation coefficient which is closely to be a perfect positive correlation
# According to graph “Line Plot - Capomulin Treatment Results of Mouse s185”, Capomulin regimen has significant efficacy on reducing tumor volume for Mouse s185.
# According to graph “BoxPlot - Final Tumor Volume of each Regimens” Capomulin regimen treatment results has the lowest standard error among Capomulin, Ramicane, Infubinol, and Ceftamin.
# +
# pip install dataframe_image
# +
# Notes: https://stackoverflow.com/questions/17071871/how-to-select-rows-from-a-dataframe-based-on-column-values
# +
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
# display dataframe up to Row 4 by using "iloc[]".
display(mouse_metadata.iloc[0:5])
study_results = pd.read_csv(study_results_path)
# print only the first few rows of the dataframe by using "head()"
study_results.head()
# +
# Combine the two datasets into a single dataset with "on" and "how"
merged_dataset = pd.merge(mouse_metadata, study_results, on = "Mouse ID", how= "left")
# Display the data table for preview
display(merged_dataset)
# Combine the two datasets into a single dataset WITHOUT "on" and "how".
# QUESTION: when will be neccessary to include"on" and "how"?
pd.merge(mouse_metadata, study_results)
# -
# Check the number of mice.
# print the length of "Mouse ID" that only count each mouse ID once.
len(merged_dataset["Mouse ID"].unique())
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
# loc vs. iloc:
# "loc" is label-based (need to specify rows or columns by their names)
# "iloc" is integer index-based (rows or columns need to be specified by their integer index)
# (need more digestion of this code)
duplicate_mouse_id = merged_dataset.loc[merged_dataset.duplicated(subset=['Mouse ID', 'Timepoint']),'Mouse ID'].unique()
duplicate_mouse_id
# Mouse ID g989 has duplicated mouse ID and timepoints
# Optional: Get all the data for the duplicate mouse ID
# "isin" drops duplicate_mouse_id.
# isin(duplicate_mouse_id) == False, DON'T keep data identified as "duplicate_mouse_id"
# isin(duplicate_mouse_id) == True, KEEP ONLY data identified as "duplicate_mouse_id"
# refer to https://www.geeksforgeeks.org/python-pandas-dataframe-isin/ for examples.
duplicate_mouse_data = merged_dataset[merged_dataset['Mouse ID'].isin(duplicate_mouse_id)==True]
duplicate_mouse_data
# Another way to get all the data for the duplicate mouse ID is by using "loc" to identify the rows with mouse ID "g989"
#duplicate_mouse_data = merged_dataset.loc[merged_dataset["Mouse ID"] == "g989"]
# duplicate_mouse_data
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_merged_dataset = merged_dataset[merged_dataset['Mouse ID'].isin(duplicate_mouse_id)==False]
clean_merged_dataset
# clean_merged_dataset dropped 13 rows which contain dublicated Mouse ID g989
# Checking the number of mice in the clean DataFrame.
len(clean_merged_dataset["Mouse ID"])
# this gives the mouse ID counts, but not neccessary the number of the mice, because each mouse ID appearses multiple times.
# Another way to get the number of mice is by using "count()"
# clean_merged_dataset["Mouse ID"].count()
# To get the number of mouse ID representing the number of mice in the clean DataFrame, add "unique()".
len(clean_merged_dataset["Mouse ID"].unique())
# one mouse ID has been dropped. Total number of mouse ID decrease to 248 from 249.
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# use groupby to group data based on regimen type and then get the mean of the tumor volumes within the regimen group.
regimen_mean = round(clean_merged_dataset.groupby('Drug Regimen').mean()["Tumor Volume (mm3)"], 2)
regimen_median = round(clean_merged_dataset.groupby('Drug Regimen').median()["Tumor Volume (mm3)"], 2)
regimen_var = round(clean_merged_dataset.groupby('Drug Regimen').var()["Tumor Volume (mm3)"], 2)
regimen_std = round(clean_merged_dataset.groupby('Drug Regimen').std()["Tumor Volume (mm3)"], 2)
regimen_sem = round(clean_merged_dataset.groupby('Drug Regimen').sem()["Tumor Volume (mm3)"], 2)
# round() function rounds up to the number of decimals of your choice.
# Assemble the resulting series into a single summary dataframe.
summary_table = pd.DataFrame({"Mean":regimen_mean,
"Median": regimen_median,
"Variance":regimen_var,
"Standard D": regimen_std,
"Standard Error": regimen_sem})
summary_table
# DataFrame comes out to be with column heads on different lines.
# To put column heads on the same line, use "reset_index()"
summary_table = summary_table.reset_index()
# dataframe tables can be saved as images (png, jpg, etc.).
# but before being able to do so, dataframe_image needs to be installed.
# dataframe_image can be installed directly by "pip install dataframe_image" in a notebook cell inside the same jupyter notebook.
# before saving the tables, create a folder that you want to save the images to, otherwise the image will be saved to the same folder as where this jupyter notebook is.
dfi.export(summary_table, 'My HW Images/summary_table.jpg')
summary_table
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Using the aggregation method, produce the same summary statistics in a single line
aggregated_summary_table = round(
clean_merged_dataset.groupby("Drug Regimen").
agg({"Tumor Volume (mm3)": ["mean", "median", "var", "std", "sem"]}
), 2)
aggregated_summary_table = aggregated_summary_table.reset_index()
dfi.export(aggregated_summary_table, 'My HW Images/aggregated_summary_table.jpg')
aggregated_summary_table
# -
# ## Bar and Pie Charts
# +
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas.
pandas_treatment_counts = clean_merged_dataset['Drug Regimen'].value_counts()
# "clean_merged_dataset['Drug Regimen'].value_counts()" counts how many times each regimen appears in the column "Drug Regimen"
display(pandas_treatment_counts)
pandas_treatment_counts.plot.bar(color = 'red', alpha = 0.5)
# alpha gives the shade of the color, ranging between 0-1
plt.title('Pandas Bar "Counts of Each Regimen"')
plt.xlabel("Drug Regimen")
plt.ylabel("Regimen Counts")
plt.savefig("My HW Images/pandas_treatment_counts.jpg", bbox_inches ="tight")
# 'bbox_inches ="tight" ' makes the graph fit to the image when saved
# +
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot.
pyplot_treatment_counts = clean_merged_dataset['Drug Regimen'].value_counts()
plt.xticks(rotation=90)
plt.bar(pyplot_treatment_counts.index.values,
pyplot_treatment_counts.values,
color = ['yellow', 'red', 'green', 'blue', 'cyan', 'orange', 'pink', 'purple', 'brown', 'magenta'])
plt.title('Pyplot Bar "Counts of Each Regimen"')
plt.xlabel("Drug Regimen")
plt.ylabel("Regimen Counts")
plt.savefig("My HW Images/pyplot_treatment_counts.jpg", bbox_inches ="tight")
# +
# Generate a pie plot showing the distribution of female versus male mice using pandas
pandas_sex_counts = clean_merged_dataset['Sex'].value_counts()
pandas_sex_counts.plot.pie(colors = ["orange", "pink"], explode=[0, 0.05], autopct= "%0.01f%%", startangle=180)
# color has to be plural, 'colors'
# 'autopct = "%1.1f%%"' is used for showing percentages along with the chart
plt.title('Pandas Pie "Mouse Sex Counts"')
plt.savefig("My HW Images/pandas_mouse_sex_counts.jpg")
# +
# Generate a pie plot showing the distribution of female versus male mice using pyplot
sex_counts = clean_merged_dataset['Sex'].value_counts()
display(sex_counts.index.values)
display(sex_counts.values)
# plt.pie(sex_counts.index.values, sex_counts.values)
plt.pie(sex_counts.values, explode=[0.05, 0.05],
colors = ["cyan", "green"],
labels=sex_counts.index.values,
autopct= "%1.1f%%",
shadow = True)
plt.title('Pyplot Pie "Mouse Sex Counts"')
plt.savefig("My HW Images/pyplot_mouse_sex_counts.jpg")
# -
# # Quartiles, Outliers and Boxplots
# I have spent a long time figuring it out and am still confused. I need help and will come back to work on it more.
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
target_regimen = clean_merged_dataset.loc[clean_merged_dataset['Drug Regimen'].isin(['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin'])]
display(target_regimen)
# Start by getting the last (greatest) timepoint for each mouse
target_timepoint = target_regimen.loc[target_regimen['Timepoint'] == 45]
# can be done by pd.DataFrame(target_regimen.groupby("Mouse ID").max()["Timepoint"]) as well
display(target_timepoint.sort_values(['Drug Regimen'], ascending=True))
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
merge_df = pd.merge(target_timepoint, clean_merged_dataset, on = ("Mouse ID", "Timepoint"), how = "left")
display(merge_df.sort_values(['Drug Regimen_x'], ascending=True))
# +
# Put treatments into a list for for loop (and later for plot labels)
target_regimen_list = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"] # for "for loop" and plot labels
# Create empty list to fill with tumor vol data (for plotting)
target_tumor_vol =[] # for plotting
for regimen in target_regimen_list:
target_regimen_tumor_volume =merge_df["Tumor Volume (mm3)_x"]
# a = target_tumor_vol.append(target_regimen_tumor_volume)
print(target_regimen_tumor_volume)
# print(a)
# +
# If the data is in a dataframe, we use pandas to give quartile calculations
quartiles = target_regimen_tumor_volume.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of final tumor volume is: {lowerq}")
print(f"The upper quartile of final tumor volume is: {upperq}")
print(f"The interquartile range of final tumor volume is: {iqr}")
print(f"The the median of final tumor volume is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# My NOcapomulin_volumes = TE: [] around column heads, but no [] around items under the column heads
capomulin_volumes = clean_merged_dataset.loc[clean_merged_dataset['Drug Regimen'] == 'Capomulin']
ramicane_volumes = clean_merged_dataset.loc[clean_merged_dataset['Drug Regimen'] == 'Ramicane']
infubinol_volumes = clean_merged_dataset.loc[clean_merged_dataset['Drug Regimen'] == 'Infubinol']
ceftamin_volumes = clean_merged_dataset.loc[clean_merged_dataset['Drug Regimen'] == 'Ceftamin']
regimens = [capomulin_volumes, ramicane_volumes, infubinol_volumes, ceftamin_volumes]
# +
# Locate the rows which contain mice on each drug and get the tumor volumes
capomulin_final_tumor_vol = clean_merged_dataset.loc[clean_merged_dataset["Drug Regimen"] == 'Capomulin', 'Tumor Volume (mm3)']
ramicane_final_tumor_vol = clean_merged_dataset.loc[clean_merged_dataset["Drug Regimen"] == 'Ramicane', 'Tumor Volume (mm3)']
infubinol_final_tumor_vol = clean_merged_dataset.loc[clean_merged_dataset["Drug Regimen"] == 'Infubinol', 'Tumor Volume (mm3)']
ceftamin_final_tumor_vol = clean_merged_dataset.loc[clean_merged_dataset["Drug Regimen"] == 'Ceftamin', 'Tumor Volume (mm3)']
# add subset
capomulin_final_tumor_vol.append(capomulin_final_tumor_vol)
ramicane_final_tumor_vol.append(ramicane_final_tumor_vol)
infubinol_final_tumor_vol.append(infubinol_final_tumor_vol)
ceftamin_final_tumor_vol.append(ceftamin_final_tumor_vol)
all_final_tumor_vol = [capomulin_final_tumor_vol,
ramicane_final_tumor_vol,
infubinol_final_tumor_vol,
ceftamin_final_tumor_vol]
# +
# Determine outliers using upper and lower bounds
for final_tumor_vol in all_final_tumor_vol:
quartiles = final_tumor_vol.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
outliers = final_tumor_vol.loc[(final_tumor_vol < lower_bound) | (final_tumor_vol > upper_bound)]
for regimen in target_regimen_list:
print(f"{regimen}'s potential outliers: {outliers}")
# +
final_tumor_plot = [capomulin_final_tumor_vol, ramicane_final_tumor_vol, infubinol_final_tumor_vol, ceftamin_final_tumor_vol]
Regimen_list = ["Capomulin", "Ceftamin", "Infubinol", "Ramincane"]
colors = ["blue", "green", "red", "black"]
fig2, ax2 = plt.subplots()
ax2.set_title("Final Tumor Volume of each Regimens")
ax2.set_xlabel("Drug Regimens")
ax2.set_ylabel("Final Tumor Volume (mm3)")
ax2.boxplot(final_tumor_plot, labels = Regimen_list, widths = 0.5)
plt.savefig("My HW Images/BoxPlot - Final Tumor Volume of each Regimens.jpg")
plt.show()
# -
# ## Line and Scatter Plots
# pull all data of target mouse treated with Capomulin
# pd.DataFrame(data = {"Counts":regimen_mouse_tumor_count, "Volumes":regimen_mouse_tumor_sum})
capomulin_full_df = clean_merged_dataset.loc[clean_merged_dataset["Drug Regimen"] == "Capomulin"]
target_mouse_df = capomulin_full_df.loc[capomulin_full_df["Mouse ID"] == "s185"]
display(target_mouse_df)
# +
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
timepoint = target_mouse_df["Timepoint"]
s185_tumor_vol = target_mouse_df["Tumor Volume (mm3)"]
s185_Capomulin, ax = plt.subplots()
ax.set_title("Capomulin Treatment Results of Mouse s185")
ax.set_xlabel("Timepoint")
ax.set_ylabel("Tumor Volume (mm3)")
ax.plot(timepoint, s185_tumor_vol, marker='o', linewidth = 3, color='green')
plt.xlim(0, 50)
plt.ylim(20,50)
s185_Capomulin.savefig("My HW Images/Line Plot - Capomulin Treatment Results of Mouse s185.jpg")
# +
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
capomulin_avg_tumor_vol = capomulin_full_df.groupby(['Mouse ID']).mean()['Tumor Volume (mm3)']
print(capomulin_avg_tumor_vol)
capomulin_avg_weight = capomulin_full_df.groupby(['Mouse ID']).mean()['Weight (g)']
display(capomulin_avg_weight)
plt.scatter(capomulin_avg_weight, capomulin_avg_tumor_vol)
plt.title('Average Tumor Volume vs. Mouse Weight for Capomulin')
plt.xlabel('Weight (g)')
plt.ylabel('Average Tumor Volume (mm3)')
plt.savefig("My HW Images/Scatter Plot - average tumor volume vs. mouse weight for Capomulin .jpg")
plt.show()
# +
# just for fun: 'Average Tumor Volume vs. Mouse Weight for Capomulin' line plot
weight_volume, ax = plt.subplots()
ax.set_title("Weight vs. Tumor Volume")
ax.set_xlabel("Weight (g)")
ax.set_ylabel("Tumor Volume (mm3)")
ax.plot(capomulin_avg_weight, capomulin_avg_tumor_vol, marker='o', linewidth = 1, color='green')
plt.xlim(12, 26)
plt.ylim(32,47)
# -
# ## Correlation and Regression
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
# resource: https://realpython.com/numpy-scipy-pandas-correlation-python/#example-pandas-correlation-calculation
# get correlation coefficient using pearson's
x = capomulin_avg_weight
y = capomulin_avg_tumor_vol
# correlation = x.corr(y)
correlation = x.corr(y)
print(f"The correlation coefficient between mouse average weight and their average tumor volume is: {correlation}")
# another way to get correlation coefficient is corr = st.pearsonr(x, y)
st.pearsonr(capomulin_avg_weight, capomulin_avg_tumor_vol)
# +
# Add the linear regression equation and line to plot to create a linear regression model
# for more information and examples, refer to Matplotlib, Folder 3, Activity 9
(slope, intercept, rvalue, pvalue, stderr) = st.linregress(x, y)
regress_values = x * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x, y)
plt.plot(x, regress_values, '--', color = "red")
plt.annotate(line_eq, (20,30), fontsize=15)
plt.xlabel('Weight (g)')
plt.ylabel('Tumor Volume (mm3)')
plt.title("Linear Regression Model of Mouse Weight vs Tumor Volume")
plt.savefig("My HW Images/Linear Regression Model of Mouse Weight vs Tumor Volume.jpg", bbox_inches = "tight")
plt.show()
| pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# En el módulo anterior, se presentó la partición de un conjunto de datos en un conjunto de entrenamiento y otro de prueba. Esta partición te permitió entrenar un conjunto de ejemplos y luego probar el modelo con un conjunto de ejemplos diferente. Con dos particiones, el flujo de trabajo podría verse de la siguiente manera:
#
# 
#
# **Figura 1. ¿Un flujo de trabajo posible?**
#
# En la figura, "Ajustar el modelo" significa modificar cualquier aspecto que puedas imaginar del modelo, desde cambiar la tasa de aprendizaje hasta agregar o quitar atributos, o diseñar un modelo completamente nuevo desde cero. Al final de este flujo de trabajo, elijes el modelo que mejor se desempeñe con respecto al conjunto de prueba.
#
# La división del conjunto de datos en dos conjuntos es una buena idea, pero no constituye una panacea. Puedes reducir en gran medida las posibilidades de sobreajuste al particionar el conjunto de datos en los tres subconjuntos que se muestran en la siguiente figura:
#
# 
#
# **Figura 2. División de un único conjunto de datos en tres subconjuntos.**
#
# Usa el **conjunto de validación** para evaluar los resultados del conjunto de entrenamiento. A continuación, usa el conjunto de prueba para verificar la evaluación después de que el modelo haya "pasado" el conjunto de validación. En la siguiente figura, se muestra el nuevo flujo de trabajo:
#
# 
#
# **Figura 3. Un flujo de trabajo más eficaz.**
#
# En este flujo de trabajo mejorado, realiza lo siguiente:
#
# 1. Selecciona el modelo que mejor se desempeñe con el conjunto de validación.
# 2. Verifica el modelo con respecto al conjunto de prueba.
#
# Este flujo de trabajo es más eficaz porque crea menos exposiciones al conjunto de prueba.
| notebooks/02_Machine_Learning/Teoric/09_Validacion_Otra_particion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
input("What is your name?")
n = input("Enter your favorite number")
x = int(n)
if x % 3 == 0:
print("Your favorite number is divisible by 3 :)")
else:
print("Your favorite number is not divisible by 3 :(")
| Divisible.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# <NAME>
# CISC 6210
# Natural Language Processing
# Dr. <NAME>
# HW1 - Poem data and processing
# Sept 9th, 2019
# This file reads in poem data remotely, pre-processes it and saves it as a dataframe
# The df is saved to a file and then re-read in to do data anaylsis on
# -
# ## Imports
# +
# For parsing
import requests
from bs4 import BeautifulSoup
# For data analysis
import re
import pandas as pd
import numpy as np
import nltk
nltk.download('stopwords')
# -
# ## Web Scrape the data
# +
url_prefix = 'https://storm.cis.fordham.edu/~yli/data/LoveOutput/'
# Fetch the url
response = requests.get(url_prefix)
# +
soup = BeautifulSoup(response.text, "html.parser")
poem_links = soup.find_all('a')[5:]
print(len(poem_links), "poems")
# -
# ## Functions - Preprocessing
# This is the callable function which calls helper functions
# poem_links --> array of links to poem .txt files
# url_prefix --> url where the data lives
# Returns dataframe of processed poems
def fillPoemDataFrame(poem_links, url_prefix):
# Init the dataframe
df = pd.DataFrame(columns=['Author', 'Title', 'Tags', 'Body', 'Link'])
idx = 0
# Loop, grab, parse, and fill dataframe
for a_tag in poem_links:
# Fetch the poem data from the website
retries = 2
while retries > 0:
try:
text = _httpRequest(a_tag)
except:
retries -= 1
continue
break
# Parse and clean the data into a vector row
new_row = _cleanPoem(text)
# Skip bad data (no poem body)
if len(new_row) == 0:
continue
# Assign row to dataframe
df.loc[idx] = new_row
idx += 1
return df
# -- Helper function --
# Makes HTTP requests to fetch text data
# Returns the text
def _httpRequest(a_tag):
# Get link and fetch
link = url_prefix + a_tag['href']
req = requests.get(link)
# Get the correct encoding
req.encoding = req.apparent_encoding
return req.text
# -- Helper function --
# Parses a line in the form "title By author"
# When multiple "By"s, split on the last one seen
# Returns (title, author)
def _parseTitleAuthor(firstline):
title = ""
author = ""
if firstline.count(" By ") == 1:
title, author = firstline.split(" By ")
else:
lineArr = firstline.split(" ")
# reverse until first "By" is found
for i in range(len(lineArr)-1, -1, -1):
if lineArr[i] == "By":
title = " ".join(lineArr[0:i])
author = " ".join(lineArr[i+1:])
break
# Prune non-alpha from beginning and end of author
trimAuth = re.compile(r'^[^a-zA-Z]+|[^a-zA-Z]+$')
author = re.sub(trimAuth, '', author)
return author, title
# -- Helper function --
# Uses regex and string parsing to conform the data
# Splits data into [author, title, tags, body, link]
# Returns an np array of row-poem data to be inserted into df
def _cleanPoem(text_data):
# Features are split by double line breaks
feats = text_data.split('\r\n\r\n')
# First line contains author, title
author, title = _parseTitleAuthor(feats[0])
author = author.strip()
title = title.strip()
# Second line has tags
tags = feats[1]
# Begin body parsing
# Set up regex for html
paragraphs = re.compile(r'<br><br>|<p>|</p>')
lines = re.compile(r'<br>')
prune_html = re.compile(r'<[^>]*>')
# Replace with markers, strip html and whitespace
body = re.sub(paragraphs, '[P] ', feats[2])
body = re.sub(lines, '[L] ', body)
body = re.sub(prune_html, '', body)
body = body.strip()
# Handle unicode characters
body = re.sub(r'\[L\](\s+)?([^\x00-\x7F]+)?(\s+)?\[L\]','[P] ', body)
body = re.sub(r'\[P\](\s+)?([^\x00-\x7F]+)?(\s+)?\[P\]','[P] ', body)
body = body.replace(u'[\xa0]+', ' ')
# Fix any extra whitespace
body = re.sub(r'[\s]{2,}', ' ', body)
# Remove starting markers/whitespace
body = re.sub(r'^(\[P\]|\[L\])(\s+)?', '', body)
# Remove trailing markers/whitespace
body = re.sub(r'((\[P\]\s?)+|(\[L\]\s?)+)$', '', body)
# Check if body is valid
if len(body) == 0:
return []
# Link is optional and may not be present
try:
link = feats[3].replace('original link: ', '')
except:
link = None
# Store as np-array and return
poem_row = [author, title, tags, body, link]
poem_row = np.asarray(poem_row, dtype='object')
return poem_row
# ### Build dataframe
df_clean = fillPoemDataFrame(poem_links, url_prefix)
# ## Poem dataframe stats
df_clean.head()
numPoems = df_clean.shape[0]
print("Removed", len(poem_links) - numPoems, "poems")
print(numPoems, "total poems in dataframe")
print(df_clean['Author'].nunique(), "total authors")
print("Top 20 authors:\n")
print(df_clean['Author'].value_counts()[:20])
# +
# Sort the df by author frequency
df_clean['Count'] = df_clean.groupby('Author')['Author'].transform('count')
df_clean = df_clean.sort_values(by=['Count'], ascending=False)
# Drop the temporary column
df_clean = df_clean.drop(columns=['Count'])
df_clean.head(3)
# -
# Save as excel file
try:
df_clean.to_excel("./CleanOutputLoveOutput.xlsx")
print("Success")
except:
print("Something went wrong")
# # Task 2
# ### Import clean data
# Read clean file into a new dataframe
df_import = pd.read_excel("./CleanOutputLoveOutput.xlsx")
df_import.head(5)
# ## Create new dataframe
data_cols = ['PoemID', 'Author', 'LengthOne', 'LengthTwo', 'NumLine', 'NumPara', 'NumSent', 'NumComma']
df_poem_data = pd.DataFrame(columns=data_cols)
# Set PoemID and Author columns
df_poem_data['Author'] = df_import['Author']
df_poem_data['PoemID'] = df_import.index
df_poem_data.head(5)
# ## Functions - Statistics
# Tokenize all words, disregarding all punctuation
# For use with df.apply(), for performance
# Returns number of words in poem
def totalWords(row):
body = row['Body']
# Remove the markers so we can count
body = body.replace('[L]', '')
body = body.replace('[P]', '')
# Regex for only words (including contractions)
tokenizeNoPunc = r"[\w'’-]+"
r = re.compile(tokenizeNoPunc)
# Tokenize the string
tokenizedWords = re.findall(r, body)
return len(tokenizedWords)
# Tokenize all words, including all punctuation
# For use with df.apply(), for performance
# Returns number of words and punctuation in poem
def totalWordsAndPunc(row):
body = row['Body']
# Remove the markers so we can count
body = body.replace('[L]', '')
body = body.replace('[P]', '')
# Regex for words and punc (including contractions)
punc = "[.,!?;:—]+"
tokenizeWithPunc = r"[\w'’-]+|{}".format(punc)
r = re.compile(tokenizeWithPunc)
# Tokenize the string
tokenizeAll = re.findall(r, body)
return len(tokenizeAll)
# Count total line breaks + paragraph breaks
def totalLines(row):
body = row['Body']
# Regex to find line and paragraph breaks
r = re.compile(r'\[L\]|\[P\]')
# Find and count them
return len(re.findall(r, body))
# Count only total paragraph breaks
def totalParas(row):
body = row['Body']
# Regex to find paragraph breaks
r = re.compile(r'\[P\]')
# Find and count them
return len(re.findall(r, body))
# Count total sentences
def totalSent(row):
body = row['Body']
# Remove the markers so they don't interfere
body = body.replace('[L]', '')
body = body.replace('[P]', '')
# Use nltk to get sentences
sentences = nltk.tokenize.sent_tokenize(body)
return len(sentences)
# Count total commas
def totalCommas(row):
body = row['Body']
# Just count commas in string
return body.count(',')
# ## Apply to new dataframe
# +
# ['PoemID', 'Author', 'LengthOne', 'LengthTwo', 'NumLine', 'NumPara', 'NumSent', 'NumComma']
# Apply functions into our new dataframe
df_poem_data['LengthOne'] = df_import.apply(totalWords, axis=1)
df_poem_data['LengthTwo'] = df_import.apply(totalWordsAndPunc, axis=1)
df_poem_data['NumLine'] = df_import.apply(totalLines, axis=1)
df_poem_data['NumPara'] = df_import.apply(totalParas, axis=1)
df_poem_data['NumSent'] = df_import.apply(totalSent, axis=1)
df_poem_data['NumComma'] = df_import.apply(totalCommas, axis=1)
# -
df_poem_data.head()
# ### Statistical data
df_poem_data.describe()
# # Final 3 dataframes
# +
# Init the dataframes
token_cols = ['PoemID', 'Author', 'Body', 'Length', 'UniCount']
df_tokenize = pd.DataFrame(columns=token_cols)
df_no_stopwords = pd.DataFrame(columns=token_cols)
df_no_stopwords_stemming = pd.DataFrame(columns=token_cols)
# +
# Set the author and index
df_tokenize['Author'] = df_import['Author']
df_tokenize['PoemID'] = df_import.index
df_no_stopwords['Author'] = df_import['Author']
df_no_stopwords['PoemID'] = df_import.index
df_no_stopwords_stemming['Author'] = df_import['Author']
df_no_stopwords_stemming['PoemID'] = df_import.index
# -
# ## Functions - nltk tokenization, stopwords, stemming
# For use in df_tokenize
# Uses nltk.word_tokenize to create a token list of a poem
def tokenizeNltk(row):
body = row['Body']
# Use nltk to tokenize the poem
tokenize = nltk.tokenize.word_tokenize(body)
return tokenize
# For use with df_no_stopwords
# Removes stopwords from the poem, and then tokenizes it
def tokenizeNoStopwords(row):
body = row['Body']
# Use nltk to tokenize the poem
tokenize = nltk.tokenize.word_tokenize(body)
# Get the set of stopwords
stop_words = set(nltk.corpus.stopwords.words('english'))
filtered_tokens = [w for w in tokenize if not w in stop_words]
return filtered_tokens
# For use with df_no_stopwords_stemming
# Removes stopwords from the poem and uses stemming
def tokenizeNoStopwordsStemming(row):
body = row['Body']
# Use nltk to tokenize the poem
tokenize = nltk.tokenize.word_tokenize(body)
# Get the set of stopwords
stop_words = set(nltk.corpus.stopwords.words('english'))
filtered_tokens = [w for w in tokenize if not w in stop_words]
# Use nltk PorterStemmer() to create a stemmer
ps = nltk.stem.PorterStemmer()
# Stem words
stemmed_filtered_tokens = [ps.stem(w) for w in filtered_tokens]
return stemmed_filtered_tokens
# ## Functions - number tokens and vocabulary
# Returns the length of the tokenized list
def getLength(row):
body = row['Body']
return len(body)
# Returns the length of the *set* of tokens
def getVocabulary(row):
body = row['Body']
return len(set(body))
# ## Apply to dataframes
# First dataframe (just tokenize words)
df_tokenize['Body'] = df_import.apply(tokenizeNltk, axis=1)
df_tokenize['Length'] = df_tokenize.apply(getLength, axis=1)
df_tokenize['UniCount'] = df_tokenize.apply(getVocabulary, axis=1)
df_tokenize.head()
# Second dataframe (remove stopwords)
# First dataframe (just tokenize words)
df_no_stopwords['Body'] = df_import.apply(tokenizeNoStopwords, axis=1)
df_no_stopwords['Length'] = df_no_stopwords.apply(getLength, axis=1)
df_no_stopwords['UniCount'] = df_no_stopwords.apply(getVocabulary, axis=1)
df_no_stopwords.head()
# Third dataframe (remove stopwords and do stemming)
df_no_stopwords_stemming['Body'] = df_import.apply(tokenizeNoStopwordsStemming, axis=1)
df_no_stopwords_stemming['Length'] = df_no_stopwords_stemming.apply(getLength, axis=1)
df_no_stopwords_stemming['UniCount'] = df_no_stopwords_stemming.apply(getVocabulary, axis=1)
df_no_stopwords_stemming.head()
# ## Save analysis dataframes to excel
# +
excel_file = "./ProcessedLoveOutput.xlsx"
# Save to excel
with pd.ExcelWriter(excel_file) as writer:
df_poem_data.to_excel(writer, 'sheet0')
df_tokenize.to_excel(writer, 'sheet1')
df_no_stopwords.to_excel(writer, 'sheet2')
df_no_stopwords_stemming.to_excel(writer, 'sheet3')
writer.save()
# -
# # End
| poems/poem_webscrape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# imports
import os
import glob
import re
import numpy as np
import cPickle as pickle
from scipy.io import wavfile
from scipy.signal import spectrogram,stft
import matplotlib.pyplot as plt
import librosa
import librosa.display
# -
class piece:
def __init__(self,filepath, composer, CD):
self.filepath = filepath
self.composer = composer
self.CD = CD
def save_spec(S, savename, savepath, filetype='png'):
"""
Save melspectrogram as an image
Args:
S (array): melspectrogram already converted to dBs
savename (string): name to for file
savepath (string): path to save to
filetype (string): file type for image
"""
fig = plt.figure(frameon=False)
# remove white space
plt.axes([0., 0., 1., 1.], frameon=False, xticks=[], yticks=[])
# plot spectrogram
librosa.display.specshow(S,
y_axis='mel', fmax=8000,
x_axis='time')
# save
fig.savefig('{0}/{1}.{2}'.format(savepath, savename, filetype),
bbox_inches=None, pad_inches=0)
plt.close(fig)
def prep_melspec(piece, initial_count, dt=30.0):
"""
Make melscpectogram for a given file
Args:
filepath (string): path to file
startpoint (int): number of last image for composer
dt (float): length of time segment
"""
# load file
y, sr = librosa.load('{0}'.format(piece.filepath))
# compute melspectrogram
S = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128,
fmax=8000)
# conver to db
S = librosa.power_to_db(S, ref=np.max)
# get time vector
t = librosa.display.__coord_time(S.shape[1])
# length of 1s segment
Lseg = int(1./(t[1]-t[0]))
# number of segments of length dt
Nseg = int(S.shape[1]/(dt*Lseg))
count = initial_count
# loop over segements of complete song
for n in xrange(Nseg):
start = int(dt*n*Lseg)
end = int(dt*(n+1.0)*Lseg)
seg = S[:,start:end]
# save spectrogram
save_spec(seg, savename = '{0}{1}'.format(piece.composer,count),
savepath='./data/data{0}/{1}/'.format(int(dt), piece.composer) )
# keep track of which file to write
count +=1
return count
def merge_two_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
# +
# path to music
musicpath = './data/music/'
# composer and CD
composer = 'beethoven'
CD = 'Beethoven- Piano Sonatas 1,2,3 '
# songs
songpaths = glob.glob('{0}{1}/{2}/*.flac'.format(musicpath, composer, CD))
songnames = [re.split('/|.flac', sp)[-2] for sp in songpaths]
# dictionary that will contain songs
songdict = {}
# length of time segments
dt = 30.0
# check if there is a pre-existing dictionary
if glob.glob('./data/data{0}/{1}/{1}.pkl'.format(int(dt),composer)):
# load existing dict
with open('./data/data{0}/{1}/{1}.pkl'.format(int(dt),composer), 'rb') as f:
old_songdict = pickle.load(f)
else:
old_songdict = {}
for sp, sn in zip(songpaths, songnames):
# check if song has already been processed
if sn in old_songdict:
# if so, skip
print('Skipping {0}'.format(sn))
pass
else:
# make spectrogram
print('Making melspectrogram for: {0}'.format(sn))
p = piece(sp, composer, CD)
# check where to start saving images
initial_count = len(glob.glob('./data/data{0}/{1}/{1}*.png'.format(int(dt),composer)))
# make melspectrogram and save it
final_count = prep_melspec(p, initial_count, dt)
# save start and end of song
songdict['{0}'.format(sn)] = (initial_count, final_count)
# if dicionary already exists
if old_songdict:
# merge dicts
new_songdict = merge_two_dicts(songdict, old_songdict)
# save upaded dict
with open('./data/data{0}/{1}/{1}.pkl'.format(int(dt),composer), 'wb') as f:
pickle.dump(new_songdict, f, protocol=pickle.HIGHEST_PROTOCOL)
else:
# create file
with open('./data/data{0}/{1}/{1}.pkl'.format(int(dt),composer), 'wb') as f:
pickle.dump(songdict, f, protocol=pickle.HIGHEST_PROTOCOL)
# +
# see what have already been processed
with open('./data/data{0}/{1}/{1}.pkl'.format(int(dt),composer), 'rb') as f:
current_songdict = pickle.load(f)
for k in current_songdict:
print(k, current_songdict[k])
| Mel-Spectogram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### **0. Quick introduction to jupyter notebooks**
# * Each cell in this notebook contains either code or text.
# * You can run a cell by pressing Ctrl-Enter, or run and advance to the next cell with Shift-Enter.
# * Code cells will print their output, including images, below the cell. Running it again deletes the previous output, so be careful if you want to save some reuslts.
# * You don't have to rerun all cells to test changes, just rerun the cell you have made changes to. Some exceptions might apply, for example if you overwrite variables from previous cells, but in general this will work.
# * If all else fails, use the "Kernel" menu and select "Restart Kernel and Clear All Output". You can also use this menu to run all cells.
# ### **0.5 Some hardware setup**
# Keras uses all available GPUs in your computer. The following ```os.environ``` commands configures that only one of them should be used. If you are on a system with several GPUs and want to use more than one, you can change or comment out these commands.
#
# By default, Keras will allocate all of the available memory in the device. The last two lines will have Keras allocate memory as needed.
# +
import os
import tensorflow as tf
# If there are multiple GPUs and we only want to use one/some, set the number in the visible device list.
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
# This sets the GPU to allocate memory only as needed
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) != 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# -
# ### **1. Introduction to Keras**
# Keras is a Python API (Application Programming Interface) for fast development of neural networks that sits on top of TensorFlow, the machine learning platform developed by Google. The full documentation, which you probably will have to search at some point, can be found at https://www.tensorflow.org/api_docs/python/tf/keras.
#
# To begin we should go through some of the essential terminology that you need to know for the rest of the assignment to go smoothly.
#
# 1. **Models**
#
# A Keras Model is, just like the name implies, the top-level object that describes your neural network architecture. This is the thing you create, train, and use to process data. It is very general and can be configured to perform essentially any task you want, such as image classification, text analysis, or continuous regression.
#
#
# 2. **Layers**
#
# These are the fundamental building blocks of the Model. Each Model contains a list of Layers. The way these are connected to each other defines the architecture of the network. There is a huge number of Layers, such as ```Dense```, which is the same fully connected layer you implemented in assignment 1. Another very important Layer is ```Conv2D```, which performs 2-dimensional convolution of the input using some filter.
#
#
# 3. **Optimizers, Losses, and Metrics**
#
# These are the functions and algorithms used to train and evaluate the Model.
# * The Optimizer defines the algorithm used to update the weights using the gradients. In the first assignment you implemented stocastic gradient descent (SGD), which is one type of Optimizer.
# * Losses are differentiable objective functions that compute the performance quantity that the model tries to minimize. One example of a loss function is Mean Squared Error, which you used in the first assignment. Another is Categorical Crossentropy (*aka.* log-loss) which we will use this time.
# * Metrics are functions that compute the performance of the network in a way that is humanly understandable. Unlike the Losses, Metrics don't need to be differentiable since we don't use them in the gradient calculations. A perfect example of this is Accuracy; it's easily understood, but we can't use it as a Loss function.
#
# We will look at all of this in more detail further down.
# ### **2. Loading the dataset**
# For this introduction, we will use the MNIST dataset as for assignment 1. This time however, we will use the higher resolution images. We start by importing Keras and loading the dataset.
# +
from tensorflow import keras
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
print("Shape of training data:")
print(X_train.shape)
print(y_train.shape)
print("Shape of test data:")
print(X_test.shape)
print(y_test.shape)
# -
# #### **Plotting images**
# In order to create nice plots in Python we use a library called *matplotlib*. As the name suggests, this gives access to Matlab-like plot functions, although without the interactive elements. When we call ```plt.show()``` the current figure is rendered as a *.png* and displayed.
#
# Here we select some random examples from the X_train matrix and show them as images (you might need to run the cell twice to see the images).
# +
import matplotlib.pyplot as plt
import numpy as np
fig, axarr = plt.subplots(1, 5, figsize=(16,3))
for i in range(5):
rnd = np.random.randint(low=0, high=X_train.shape[0])
img = X_train[rnd]
axarr[i].imshow(img, cmap=plt.get_cmap('gray'))
plt.show()
# -
# #### **Preparing the dataset**
# We need to make some transformations to the data before we can use it for training. It's possible to use the data as is, but the results will not be very good. We will leave it as an exercice as to why that is the case.
#
# The first step is to change the labels from a number representation, i.e. 1,2,3 etc., to a ***one-hot encoding*** where each target is a vector with only one of the values set to 1, the rest 0. This represents the probability that the output from the network should try to mimic. The concept is the same as the <strong>D</strong> matrix from the first assignment. A minor difference is that in **D**, each target was a vector with a single 1 and the rest -1. This is because we used *tanh* as output activation, which outputs in the [-1, 1] range. In this assignment we use *softmax* activation in the output layer, which only give values beween 0 and 1, thus the one-hot encoding.
#
# <small>*As a side note for those of you that are interested. While the change from a -1/1 vector to a 0/1 (one-hot) vector might not seem that significant at first, the gradient calculation is very nice when using both softmax activation and cross-entropy loss in the output layer, as several terms cancel. We will let Keras do all the work for us this time, but if you ever need to implement a general backpropagation algorithm from scratch, or you just really like partial derivatives :), this is something to look into.*</small>
#
# Second, we intensity normalize the data to the [0,1] range, instead of [0,255]. This is not strictly necessary as all weights will just scale to account for the change, but the convergence is much faster. The reason for this is that Keras uses specific initialization schemes for the weights that expect the data to be [0,1] normalized.
# +
# Transform label indices to one-hot encoded vectors
y_train_c = keras.utils.to_categorical(y_train, num_classes=10)
y_test_c = keras.utils.to_categorical(y_test, num_classes=10)
# Normalization of pixel values (to [0-1] range)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# Print some labels
print("Label index --> one-hot encoding")
for i in range(5):
print(str(y_train[i]) + " --> " + str(y_train_c[i]))
# -
# ### **3. A first Keras model**
# Now let's build our first Model for classifying the MNIST data. In Keras, there are two different ways to create a model. The first is using the **Sequential** API which can only build networks that have a single stack of layers. We can add new layers to the network by calling ```model.add(...)```. This is nice and simple but is limited to a single stack of layers.
#
# *Why would you need anything else, you might ask. Nowadays most models for deep learning connect layers not just sequentially, but also using longer connection that "skip" one or more layers before merging into the network again. These, called skip connections or residual connections, are very powerful but also outside the scope of this course. For a concrete example lookup the popular ResNet architecture.*
#
# The second way to build models is using the **Functional** API. In this, we treat each layer as an individual function and we manually use the output from one (or more) layer as the input to another layer. This gives much more flexibility in designing the network architecture. For this reason, we will use the Functional API even though the Sequential would do just fine for this assignment. This will give you more freedom in the final task and better prepare you for any future projects you want to do.
#
# We will begin by building a model very similar to the one from assignment 1, *i.e.* a two-layer fully connected (Dense) network.
# +
# Import some stuff we need
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense, Flatten
# Create the input layer
x_in = Input(shape=X_train.shape[1:])
# Create the rest of the layers
x = Flatten()(x_in)
x = Dense(64, activation='tanh')(x)
x = Dense(10, activation='softmax')(x)
# Create the model object
model = Model(inputs=x_in, outputs=x)
# -
# We want you to really understand what is going on here, so let's go through this step by step.
# 1. ```x_in = Input(shape=X_train.shape[1:])```: this creates the input layer using the ```Input``` class. This requires the shape of the input data, which is given by ```X_train.shape[1:]```. We use ```[1:]``` to select only the height and width of the images, skipping the number of images. If this is unfamiliar to you, we recommend section 3.1.3 in the official python tutorial: https://docs.python.org/3/tutorial/introduction.html. The output from the input layer, which we call ```x_in```, is a data structure that can be used as input to other layers.
#
# *As a side note, normally when we think of the output of a function we expect that to be some data that we can plot or visualize in some way, like the value 5, or a vector, etc. However, remember that we have not given any data to this model yet, so there is no data to visualize. Here instead we are defining how to process the input data that will eventually be given to the model. When feeding the input of a layer to another, we add a new operation to our process. Remember, a neural network is just a function. It can be a very complicated function, but a function nonetheless, and is therefore just a series of operations.*
#
#
# 2. ```x = Flatten()(x_in)```: simply changes the shape of the input data. The MNIST dataset is images of size 28x28 pixels. However, the next ```Dense``` layer expects all features in a single dimension. The flatten operation simply squashes multidimentional arrays into a single vector (in our case from 28x28 to 784). Note that when we add the ```Flatten``` layer we directly give it the input in the same line of code, *i.e.* ```x = Flatten()(x_in)```. This is a nice trick because it means we don't have to save the layer itself to a variable before using it; we can just define and use it on the same line which saves both space and time. You can test this by running the following cell, which creates a standalone ```Flatten``` layer and processes the first 5 images in X_train.
# +
# TEST for the Flatten() layer
F = Flatten(input_shape=(28,28))
''' don't worry about the following operation. This it to make sure that the data
is in the right format to be processed by Keras. This is automatically taken care
of when using the input layer.'''
In = tf.convert_to_tensor(X_train[0:5])
Out = F(In)
print("Before Flatten: " + str(In.shape))
print("After Flatten : " + str(Out.shape))
# -
# 3. ```x = Dense(64, activation='tanh')(x)``` and ```x = Dense(10, activation='softmax')(x)```: defines two dense layers, the first with 64 nodes and the second with 10, as the number of output classes. For the hidden layer we specify *tanh* as activation function , whereas *softmax* for the output layer.
#
# Specifying the activation directly when creating the layer is not the only way to do it; there is a layer called ```Activation``` that can be used to add a standalone activation function between layers. This is sometimes necessary, for example when using more advanced activation functions, such as LeakyReLU. We also sometimes want to add normalization layers within the network to improve the training, which we will see in the main assignment. In these cases we usually apply the activation function after normalization, which requires using a separate ```Activation``` layer.
#
#
# 4. ```model = Model(inputs=x_in, outputs=x)```: creates the actual model object. We initialize it with the input and output of the network, which we have in ```x_in``` and ```x```. At the moment we don't need any outputs from the middle layers, which is why we overwrite ```x``` when adding new layers. However, we must always save a separate variable for the input layer to create the model.
#
#
# Notice how we only specify the shape of the data at the first layer, using ```X_train.shape[1:]```. Although every layer has this input parameter, we only need to specify the input shape of the first layer since the model takes care of the rest for us. No more struggling with matrix sizes. Neat!
# #### **Finalizing the model**
# Now that we defined the architecture of our model, we need to specify the way we want to train and evaluate it, *i.e.* the Optimizer, Loss function and any Metrics. As Optimizer we will use Mini-batch Stocastic Gradient Descent, which is implemented in the ```SGD``` class. This is almost the same as in assignment 1, except that we don't process the entire dataset before taking a learning step. Instead we randomly divide the data in mini-batches of a few examples, typically a relatively small power of 2, and take a step for each of them. This makes learning faster since we take more steps per epoch than if we were to use the entire dataset for each step. It's also sometimes necessary since the dataset can be so large that it doesn't fit in memory. In other words, batch SGD takes one step based on the mean of the gradient from all data, whereas mini-batch SGD takes multiple steps where each is the mean computed over a fixed number of samples (batch size).
#
# We will also use Nesterov momentum and learing rate decay in the optimizer. Don't worry if this doesn't mean anything to you, it's just a specific improvement to the base SGD algorithm that tries to be smart about the learning step. You will see that it is very good for this problem.
#
# We will use *categorical crossentropy* as loss function, which uses the *log* of predicted probabilites to calculate the loss, which exponentially punishes predictions the more wrong they are. For the metric we will use accuracy, *i.e.* the fraction of the data classified correctly. Note that we can give several metrics to the model if we want, but in this case we will only use accuracy.
#
# We set the optimizer, loss function, and metrics using the ```model.compile``` method.
# +
from tensorflow.keras.optimizers import SGD
# Define the optimizer
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# Compile the model using the optimizer, loss and metric
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
# -
# #### **Inspecting the model**
# Now that we have a model, we can do some visualization. First, let's print a table summarizing the model with ```model.summary``` (the parameter is simply the width of the table).
model.summary(60)
# We can see the type, output size, and number of parameters for each layer. This is very useful when working with larger models. For example we might want to find out why the model is overfitting. Is there a layer with a lot of parameters? That might be a good candidate to change. Having many parameters to train is also slower, so changing the network by adding or removing layers might speed up training. Finally there's a summary at the bottom with the total number of parameters. Some layers have parameters that are configurable but not part of the optimization, these are the *Non-trainable params* of which we currently have none.
#
# Note how the first value of the output shape is **None**. This just means that we haven't specified the mini-batch size yet. We do this when we actually train the model.
#
# We can also print an image of the model using the code below. This might seem unnecessary when we have the table, and at the moment it gives roughly the same information. However, when building more advanced models, for example using residual connections, the table quickly becomes unreadable.
keras.utils.plot_model(model, show_shapes=True, show_layer_names=False)
# #### **Training the model**
# Finally, time to do some model training! This is done with the ```model.fit``` method. We input the training data, one-hot encoded targets, mini-batch size, and number of epochs. When using mini-batches, one epoch passes when all the data has been used once. Each training sample can only be used once per epoch, thus the mini-batches are randomly selected each epoch, which is why it's called stochastic gradient decent.
#
# We also set the *validataion_split* parameter to 0.2. This tells ```model.fit``` to split off 20% of the training data to use for validation during the training process. In the previous assignment we used the test data for this purpose, but this is not how it's usually done. We might want to use the validation data during training to make decisions, such as stopping early if we detect overfitting or decreasing the learning rate if we detect that the performance no longer improves. But that means we cannot use the same data to get the final performance of the model, as it has influenced the training and the model parameters. This is why we need three datasets, one for training, one for validation during training, and one for testing after training. Using the *validation_split* parameter we get a random selection from the training data for validation, but we can also give a specific validation set to the ```fit``` method if we want, or we can tell it to always use the first 20%, etc.
#
# Finally, setting the *verbose* flag to 1 prints the status of the training. Now let's run it!
history = model.fit(X_train, y_train_c, epochs=10, batch_size=32, verbose=1, validation_split=0.2)
# #### **Evaluating the model**
# We evaluate the model on the test data using the ```model.evaluate``` method, which returns the loss and metrics. The results can vary, but should generally be around 97% accuracy. Remember that the first assignment required only 93%. Not bad for a few lines of code and 10 epochs, right?
# +
score = model.evaluate(X_test, y_test_c, verbose=0)
for i in range(len(score)):
print("Test " + model.metrics_names[i] + " = %.3f" % score[i])
# -
# We can also plot the history of the training using the *history* object that is returned from ```model.fit```. This again uses the *matplotlib* library.
# +
plt.figure(figsize=(12,5))
# Plot loss
plt.subplot(1,2,1)
plt.semilogy(history.history['loss'] , label="Training")
plt.semilogy(history.history['val_loss'], label="Validation")
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(loc='upper right')
plt.grid(True, which="both")
# Plot accuracy
plt.subplot(1,2,2)
plt.plot(100 * np.array(history.history['accuracy']) , label="Training")
plt.plot(100 * np.array(history.history['val_accuracy']), label="Validation")
plt.title('Model accuracy')
plt.ylabel('Acc [%]')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.grid(True, which="both")
plt.show()
# -
# We can also import some libraries to calculate and view the confusion matrix. For this we want to get the predicted class for each test sample, which we can do with ```model.predict```. However, since the model outputs a 10-class probabilty vector we need to convert it back to a label index vector (*i.e.* 1,2,3 etc.). Use ```np.argmax``` for this.
# +
from sklearn.metrics import confusion_matrix
from Custom import PrintPrediction
# Probabilities for each class, for each test image
prob_test = model.predict(X_test)
# Prediction (class number) for each test image.
p_test = np.argmax(prob_test,axis=1)
# Calculate confusion matrix
CM = confusion_matrix(y_test, p_test)
# Print probablities and predictions (rounded to a few decimal places)
print("Probabilites and predictions")
for i in range(5):
PrintPrediction(prob_test[i])
print("\nConfusion matrix")
print(CM)
# -
# Here is a nice custom function for evaluating the model and plotting the history and confusion matrix. We will use this in the main part of the assignment, so you can focus on the fun parts instead of writing the plot code.
#
# *The code for the Labels input is a bit of python magic called list comprehention. It's very nice and also very easy to make completly unreadable. Use responsibly :)*
from Custom import PlotModelEval
plt.text
PlotModelEval(model, history, X_test, y_test, Labels=[str(x) for x in range(10)])
# ### **------------------------------------------------------------------------------------**
# ### **Extra, examples of more complicated models**
# As a bonus, here are some short examples of models built using the functional API, that can't be created with the Sequential model.
#
# ##### **Adding a residual (skip) connection**
# Residual (skip) connection are used in some of the most powerful modern architectures, for reasons that are outside the scope of this course. However, for a concreate example, lookup ResNet.
# +
# Import Keras and create some shorthand notation
import tensorflow.keras as keras
import tensorflow.keras.layers as L
# Begin like last time
x_in = L.Input(shape=100)
x = L.Dense(50, activation="tanh")(x_in)
# But now, save the output to another variable, y
y = L.Dense(10, activation="tanh")(x)
# Use y as new input, but then change back to x
x = L.Dense(10, activation="tanh")(y)
x = L.Dense(10, activation="tanh")(x)
# Now we have two different outputs from the network, at two different points.
# We can merge them using different Layers, for example Add, using a list of layer outputs:
x = L.Add()([x,y])
# Finally, add the output layer and create the model
x = L.Dense(5, activation="softmax")(x)
model2 = keras.models.Model(inputs=x_in, outputs=x)
# Print the model
model2.summary(100)
# -
# You can see on the right that there is a new column the shows the connections between layers. That's not very visual though, so let's print it as an image instead.
keras.utils.plot_model(model2, show_shapes=True, show_layer_names=False)
# ##### **More than one input and output**
# This can be useful when doing object recognition in images. You can imagine that you not only want to classify an image as a cat or dog, but also give the coordinates of the cat or dog in the image. Or even have a segmented images as output where the background is removed.
# +
# Import Keras and create some shorthand notation
import tensorflow.keras as keras
import tensorflow.keras.layers as L
x_in1 = L.Input(shape=100)
x_in2 = L.Input(shape=50)
x1 = L.Dense(20, activation="tanh")(x_in1)
x2 = L.Dense(10, activation="tanh")(x_in2)
c1 = L.Concatenate()([x1,x2])
x3 = L.Dense(10, activation="tanh")(c1)
x4 = L.Dense(10, activation="tanh")(c1)
model3 = keras.models.Model(inputs=[x_in1, x_in2], outputs=[x3,x4])
# Print the model
keras.utils.plot_model(model3, show_shapes=True, show_layer_names=False)
# -
# ##### **A (not so) crazy model**
#
# You can also very easily make some crazy models. Here is a network where the input to each layer is the sum of outputs from all previous layers. While this particular model is just a dummy, similar techniques where there is a high degree of connectivity between the layers have actually been used in some scientific publications, for example to detect diseases in X-Ray images.
# +
# Import Keras and create some shorthand notation
import tensorflow.keras as keras
import tensorflow.keras.layers as L
# Begin like last time
x_in = L.Input(shape=100)
x1 = L.Dense(10, activation="tanh")(x_in)
x2 = L.Dense(10, activation="tanh")(x1)
a1 = L.Add()([x1,x2])
x3 = L.Dense(10, activation="tanh")(a1)
a2 = L.Add()([x1,x2,x3])
x4 = L.Dense(10, activation="tanh")(a2)
a2 = L.Add()([x1,x2,x3,x4])
x = L.Dense(5, activation="tanh")(a2)
model4 = keras.models.Model(inputs=x_in, outputs=x)
# Print the model
keras.utils.plot_model(model4, show_shapes=True, show_layer_names=False)
| A2_DeepLearning/MNIST-Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # File downloading
# ## Getting metadata from the API
# +
import requests
import json
from tqdm.auto import tqdm
# +
headers = {
"accept": "application/json, text/plain, */*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "ru-RU,ru;q=0.9,en-US;q=0.8,en;q=0.7",
"content-length": "215",
"content-type": "application/json;charset=UTF-8",
"origin": "https://www.hse.ru",
"referer": "https://www.hse.ru/edu/vkr/index.html?faculty=139191145&textAvailable=1&lang=ru",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36"
}
payload={"author":"",
"vkrIds":"",
"faculty":"139191145",
"supervisor":"",
"year":0,
"title":"",
"level":0,
"score":0,
"programs":"",
"textAvailable":"1",
"lang":"ru",
"page": None,
"findMode":False,
"ruVersion":True,
"urlPrefix":"/edu/vkr"}
def get_vkrs_from_page(headers, payload, page):
payload["page"] = page
url = 'https://www.hse.ru/edu/vkr/api/list'
r = requests.post(url, data=json.dumps(payload), headers=headers)
vkr_list = r.json()['result']['vkrs']['list']
return vkr_list
# +
vkrs = []
for page in tqdm(range(1,91)):
vkrs.extend(get_vkrs_from_page(headers, payload, page))
print(len(vkrs))
# -
vkrs[0]
from collections import Counter
faculties = [v['learn_program_title'] for f in vkrs]
c = Counter(faculties)
programme_stat = dict(c.most_common())
programme_stat
# Writing to `.json` file
vkrs_dict = {vkr['vkr_id']: vkr for vkr in vkrs}
len(vkrs_dict)
with open ('vkrs_metada.json', 'w', encoding='utf-8') as output:
json.dump(vkrs_dict, output, ensure_ascii=False, indent=2)
# ## Downloading texts
# +
import os
import json
import requests
from tqdm.auto import tqdm
# -
with open ('vkrs_metada.json', 'r', encoding='utf-8') as f:
vkrs_dict = json.load(f)
# +
def get_file_type(content_type):
if content_type == 'application/vnd.openxmlformats-officedocument.wordprocessingml.document':
file_type = 'docx'
elif content_type == 'application/pdf':
file_type = 'pdf'
elif content_type == 'application/msword':
file_type = 'doc'
elif content_type == 'application/zip':
file_type = 'zip'
elif content_type == 'text/html; charset=UTF-8':
file_type = 'html'
elif content_type == 'application/vnd.ms-excel':
file_type = 'xls'
elif content_type == 'application/vnd.oasis.opendocument.text':
file_type = 'odt'
else:
file_type = 'unknown'
return file_type
def download(vkr_id, vkr_file, filepath=''):
resp = requests.get(vkr_file, allow_redirects=True)
file_type = get_file_type(resp.headers.get('content-type'))
if file_type in ['zip', 'xls', 'unknown', 'doc']:
file_type = 'docx'
file = f'{vkr_id}.{file_type}'
full_path = os.path.join(filepath, file)
open(full_path, 'wb').write(resp.content)
# -
for v_id, v_item in tqdm(vkrs_dict.items()):
try:
download(v_id, v_item['vkr_file'], filepath='./raw_files')
except Exception:
print(v_id, v_item['vkr_file'])
# *<NAME>, 2021*
| Code/Crawler.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_mxnet_p36
# language: python
# name: conda_mxnet_p36
# ---
# ## Using SageMaker debugger to monitor attentions in BERT model training
#
# [BERT](https://arxiv.org/abs/1810.04805) is a deep bidirectional transformer model that achieves state-of the art results in NLP tasks like question answering, text classification and others.
# In this notebook we will use [GluonNLP](https://gluon-nlp.mxnet.io/) to finetune a pretrained BERT model on the [Stanford Question and Answering dataset](https://web.stanford.edu/class/cs224n/reports/default/15848195.pdf) and we will use [SageMaker Debugger](https://docs.aws.amazon.com/sagemaker/latest/dg/train-debugger.html) to monitor model training in real-time.
#
# The paper [Visualizing Attention in Transformer-Based Language Representation Models [1]](https://arxiv.org/pdf/1904.02679.pdf) shows that plotting attentions and individual neurons in the query and key vectors can help to identify causes of incorrect model predictions.
# With SageMaker Debugger we can easily retrieve those tensors and plot them in real-time as training progresses which may help to understand what the model is learning.
#
# The animation below shows the attention scores of the first 20 input tokens for the first 10 iterations in the training.
#
# <img src='images/attention_scores.gif' width='350' />
# Fig. 1: Attention scores of the first head in the 7th layer
#
# [1] *Visualizing Attention in Transformer-Based Language Representation Models*: <NAME>, 2019, 1904.02679, arXiv
# !pip install smdebug==0.7.2
# !pip list
# +
import boto3
import sagemaker
sess = sagemaker.Session()
bucket = sess.default_bucket()
role = sagemaker.get_execution_role()
region = boto3.Session().region_name
sm = boto3.Session().client(service_name='sagemaker', region_name=region)
# -
# ### SageMaker training
# The following code defines the SageMaker Estimator. The entry point script [train.py](entry_point/train.py) defines the model training. It downloads a BERT model from the GluonNLP model zoo and finetunes the model on the Stanford Question Answering dataset. The training script follows the official GluonNLP [example](https://github.com/dmlc/gluon-nlp/blob/v0.8.x/scripts/bert/finetune_squad.py) on finetuning BERT.
#
# For demonstration purposes we will train only on a subset of the data (`train_dataset_size`) and perform evaluation on a single batch (`val_dataset_size`).
# +
from sagemaker.mxnet import MXNet
from sagemaker.debugger import DebuggerHookConfig, CollectionConfig
#role = sagemaker.get_execution_role()
#BUCKET_NAME = sagemaker_session.default_bucket()
s3_bucket_for_tensors = 's3://{}/sm_bert_viz/tensors'.format(bucket)
mxnet_estimator = MXNet(entry_point='train.py',
source_dir='entry_point',
role=role,
train_instance_type='ml.p3.2xlarge',
train_instance_count=1,
framework_version='1.6.0',
py_version='py3',
hyperparameters = {'epochs': 3,
'batch_size': 16,
'learning_rate': 5e-5,
'train_dataset_size': 1024,
'val_dataset_size': 16},
debugger_hook_config = DebuggerHookConfig(
s3_output_path=s3_bucket_for_tensors,
collection_configs=[
CollectionConfig(
name="all",
parameters={"include_regex":
".*multiheadattentioncell0_output_1|.*key_output|.*query_output",
"train.save_steps": "0",
"eval.save_interval": "1"}
)
]
)
)
# -
# SageMaker Debugger provides default collections for gradients, weights and biases. The default `save_interval` is 100 steps. A step presents the work done by the training job for one batch (i.e. forward and backward pass).
#
# In this example we are also interested in attention scores, query and key output tensors. We can emit them by just defining a new [collection](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/api.md#collection). In this example we call the collection `all` and define the corresponding regex. We save every iteration during validation phase (`eval.save_interval`) and only the first iteration during training phase (`train.save_steps`).
#
#
# We also add the following lines in the validation loop to record the string representation of input tokens:
# ```python
# if hook.get_collections()['all'].save_config.should_save_step(modes.EVAL, hook.mode_steps[modes.EVAL]):
# hook._write_raw_tensor_simple("input_tokens", input_tokens)
# ```
mxnet_estimator.fit(wait=False)
# We can check the S3 location of tensors:
path = mxnet_estimator.latest_job_debugger_artifacts_path()
print('Tensors are stored in: {}'.format(path))
# Get the training job name:
# +
job_name = mxnet_estimator.latest_training_job.name
print('Training job name: {}'.format(job_name))
client = mxnet_estimator.sagemaker_session.sagemaker_client
description = client.describe_training_job(TrainingJobName=job_name)
# -
# We can access the tensors from S3 once the training job is in status Training or Completed. In the following code cell we check the job status.
# +
import time
if description['TrainingJobStatus'] != 'Completed':
while description['SecondaryStatus'] not in {'Training', 'Completed'}:
description = client.describe_training_job(TrainingJobName=job_name)
primary_status = description['TrainingJobStatus']
secondary_status = description['SecondaryStatus']
print('Current job status: [PrimaryStatus: {}, SecondaryStatus: {}]'.format(primary_status, secondary_status))
time.sleep(15)
# -
# ### Get tensors and visualize BERT model training in real-time
# In this section, we will retrieve the tensors of our training job and create the attention-head view and neuron view as described in [Visualizing Attention in Transformer-Based Language Representation Models [1]](https://arxiv.org/pdf/1904.02679.pdf).
#
# First we create the [trial](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md#Trial) that points to the tensors in S3:
# +
from smdebug.trials import create_trial
trial = create_trial( path )
# -
for i in trial.tensor_names():
print(i)
# Next we import a script that implements the visualization for attentation head view in Bokeh.
from utils import attention_head_view, neuron_view
from ipywidgets import interactive
# We will use the tensors from the validation phase. In the next cell we check if such tensors are already available or not.
# +
import numpy as np
from smdebug import modes
while (True):
if len(trial.steps(modes.EVAL)) == 0:
print("Tensors from validation phase not available yet")
else:
step = trial.steps(modes.EVAL)[0]
break
time.sleep(15)
# -
# Once the validation phase started, we can retrieve the tensors from S3. In particular we are interested in outputs of the attention cells which gives the attention score. First we get the tensor names of the attention scores:
# +
tensor_names = []
for tname in sorted(trial.tensor_names(regex='.*multiheadattentioncell0_output_1')):
tensor_names.append(tname)
# -
# Next we iterate over the available tensors of the validation phase. We retrieve tensor values with `trial.tensor(tname).value(step, modes.EVAL)`. Note: if training is still in progress, not all steps will be available yet.
# +
steps = trial.steps(modes.EVAL)
tensors = {}
for step in steps:
print("Reading tensors from step", step)
for tname in tensor_names:
if tname not in tensors:
tensors[tname]={}
tensors[tname][step] = trial.tensor(tname).value(step, modes.EVAL)
num_heads = tensors[tname][step].shape[1]
# -
# Next we get the query and key output tensor names:
# +
layers = []
layer_names = {}
for index, (key, query) in enumerate(zip(trial.tensor_names(regex='.*key_output_'), trial.tensor_names(regex='.*query_output_'))):
layers.append([key,query])
layer_names[key.split('_')[1]] = index
# -
# We also retrieve the string representation of the input tokens that were input into our model during validation.
input_tokens = trial.tensor('input_tokens').value(0, modes.EVAL)
# #### Attention Head View
#
# The attention-head view shows the attention scores between different tokens. The thicker the line the higher the score. For demonstration purposes, we will limit the visualization to the first 20 tokens. We can select different attention heads and different layers. As training progresses attention scores change and we can check that by selecting a different step.
#
# **Note:** The following cells run fine in Jupyter. If you are using JupyterLab and encounter issues with the jupyter widgets (e.g. dropdown menu not displaying), check the subsection in the end of the notebook.
n_tokens = 20
view = attention_head_view.AttentionHeadView(input_tokens,
tensors,
step=trial.steps(modes.EVAL)[0],
layer='bertencoder0_transformer0_multiheadattentioncell0_output_1',
n_tokens=n_tokens)
interactive(view.select_layer, layer=tensor_names)
interactive(view.select_head, head=np.arange(num_heads))
interactive(view.select_step, step=trial.steps(modes.EVAL))
# The following code cell updates the dictionary `tensors` with the latest tensors from the training the job. Once the dict is updated we can go to above code cell `attention_head_view.AttentionHeadView` and re-execute this and subsequent cells in order to plot latest attentions.
# +
all_steps = trial.steps(modes.EVAL)
new_steps = list(set(all_steps).symmetric_difference(set(steps)))
for step in new_steps:
for tname in tensor_names:
if tname not in tensors:
tensors[tname]={}
tensors[tname][step] = trial.tensor(tname).value(step, modes.EVAL)
# -
# #### Neuron view
#
# To create the neuron view as described in paper [Visualizing Attention in Transformer-Based Language Representation Models [1]](https://arxiv.org/pdf/1904.02679.pdf), we need to retrieve the queries and keys from the model. The tensors are reshaped and transposed to have the shape: *batch size, number of attention heads, sequence length, attention head size*
#
# **Note:** The following cells run fine in Jupyter. If you are using JupyterLab and encounter issues with the jupyter widgets (e.g. dropdown menu not displaying), check the subsection in the end of the notebook.
# +
queries = {}
steps = trial.steps(modes.EVAL)
for step in steps:
print("Reading tensors from step", step)
for tname in trial.tensor_names(regex='.*query_output'):
query = trial.tensor(tname).value(step, modes.EVAL)
query = query.reshape((query.shape[0], query.shape[1], num_heads, -1))
query = query.transpose(0,2,1,3)
if tname not in queries:
queries[tname] = {}
queries[tname][step] = query
# -
# Retrieve the key vectors:
# +
keys = {}
steps = trial.steps(modes.EVAL)
for step in steps:
print("Reading tensors from step", step)
for tname in trial.tensor_names(regex='.*key_output'):
key = trial.tensor(tname).value(step, modes.EVAL)
key = key.reshape((key.shape[0], key.shape[1], num_heads, -1))
key = key.transpose(0,2,1,3)
if tname not in keys:
keys[tname] = {}
keys[tname][step] = key
# -
# We can now select different query vectors and see how they produce different attention scores. We can also select different steps to see how attention scores, query and key vectors change as training progresses. The neuron view shows:
# * Query
# * Key
# * Query x Key (element wise product)
# * Query * Key (dot product)
#
view = neuron_view.NeuronView(input_tokens,
keys=keys,
queries=queries,
layers=layers,
step=trial.steps(modes.EVAL)[0],
n_tokens=n_tokens,
layer_names=layer_names)
interactive(view.select_query, query=np.arange(n_tokens))
interactive(view.select_layer, layer=layer_names.keys())
interactive(view.select_step, step=trial.steps(modes.EVAL))
# #### Note: Jupyter widgets in JupyterLab
#
# If you encounter issues with this notebook in JupyterLab, you may have to install JupyterLab extensions. You can do this by defining a SageMaker [Lifecycle configuration](https://docs.aws.amazon.com/sagemaker/latest/dg/notebook-lifecycle-config.html). A lifecycle configuration is a shell script that runs when you either create a notebook instance or whenever you start an instance. You can create a Lifecycle configuration directly in the SageMaker console (more details [here](https://aws.amazon.com/blogs/machine-learning/customize-your-amazon-sagemaker-notebook-instances-with-lifecycle-configurations-and-the-option-to-disable-internet-access/)) When selecting `Start notebook`, copy and paste the following code. Once the configuration is created attach it to your notebook instance and start the instance.
#
# ```sh
# # #!/bin/bash
#
# set -e
#
# # OVERVIEW
# # This script installs a single jupyter notebook extension package in SageMaker Notebook Instance
# # For more details of the example extension, see https://github.com/jupyter-widgets/ipywidgets
#
# sudo -u ec2-user -i <<'EOF'
#
# # PARAMETERS
# PIP_PACKAGE_NAME=ipywidgets
# EXTENSION_NAME=widgetsnbextension
#
# source /home/ec2-user/anaconda3/bin/activate JupyterSystemEnv
#
# pip install $PIP_PACKAGE_NAME
# jupyter nbextension enable $EXTENSION_NAME --py --sys-prefix
# jupyter labextension install @jupyter-widgets/jupyterlab-manager
# # run the command in background to avoid timeout
# nohup jupyter labextension install @bokeh/jupyter_bokeh &
#
# source /home/ec2-user/anaconda3/bin/deactivate
#
# EOF
# ```
| 07_train/wip/sm_bert_viz/bert_attention_head_view.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# Copyright 2021 Lawrence Livermore National Security, LLC and other MuyGPyS
# Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: MIT
# # One-Line Regression Workflow
#
# This notebook walks through the same regression workflow as
# [the univariate regression tutorial](univariate_regression_tutorial.ipynb).
#
# This workflow differs from the
# [tutorial](univariate_regression_tutorial.ipynb)
# by making use of a
# [high-level API](../MuyGPyS/examples/regress.rst)
# that automates all of the steps contained therein.
# `MuyGPyS.examples` automates a small number of such workflows.
# While it is recommended to stick to the lower-level API, the supported high-level APIs are useful for the simple applications that they support.
# +
import matplotlib.pyplot as plt
import numpy as np
from MuyGPyS.testing.gp import benchmark_sample, benchmark_sample_full, BenchmarkGP
# -
# We will set a random seed here for consistency when building docs.
# In practice we would not fix a seed.
np.random.seed(0)
# We perform the same operations to sample a curve from a conventional GP as described in the
# [tutorial notebook](univariate_regression_tutorial.ipynb).
lb = -10.0
ub = 10.0
data_count = 10001
train_step = 10
x = np.linspace(lb, ub, data_count).reshape(data_count, 1)
test_features = x[np.mod(np.arange(data_count), train_step) != 0, :]
train_features = x[::train_step, :]
test_count, _ = test_features.shape
train_count, _ = train_features.shape
nugget_var = 1e-14
fixed_length_scale = 1.0
benchmark_kwargs = {
"kern": "matern",
"metric": "l2",
"eps": {"val": nugget_var},
"nu": {"val": 2.0},
"length_scale": {"val": fixed_length_scale},
}
gp = BenchmarkGP(**benchmark_kwargs)
y = benchmark_sample(gp, x)
test_responses = y[np.mod(np.arange(data_count), train_step) != 0, :]
measurement_eps = 1e-5
train_responses = y[::train_step, :] + np.random.normal(0, measurement_eps, size=(train_count,1))
# +
fig, axes = plt.subplots(2, 1, figsize=(15, 11))
axes[0].set_title("Sampled Curve", fontsize=24)
axes[0].set_xlabel("Feature Domain", fontsize=20)
axes[0].set_ylabel("Response Range", fontsize=20)
axes[0].plot(train_features, train_responses, "k*", label="perturbed train response")
axes[0].plot(test_features, test_responses, "g-", label="test response")
axes[0].legend(fontsize=20)
vis_subset_size = 10
mid = int(train_count / 2)
axes[1].set_title("Sampled Curve (subset)", fontsize=24)
axes[1].set_xlabel("Feature Domain", fontsize=20)
axes[1].set_ylabel("Response Range", fontsize=20)
axes[1].plot(
train_features[mid:mid + vis_subset_size],
train_responses[mid:mid + vis_subset_size],
"k*", label="perturbed train response"
)
axes[1].plot(
test_features[mid * (train_step - 1):mid * (train_step - 1) + (vis_subset_size * (train_step - 1))],
test_responses[mid * (train_step - 1):mid * (train_step - 1) + (vis_subset_size * (train_step - 1))],
"g-", label="test response"
)
plt.tight_layout()
plt.show()
# -
# We now set our nearest neighbor index and kernel parameters.
nn_kwargs = {"nn_method": "exact", "algorithm": "ball_tree"}
k_kwargs = {
"kern": "matern",
"metric": "l2",
"eps": {"val": measurement_eps},
"nu": {"val": "log_sample", "bounds": (0.1, 5.0)},
"length_scale": {"val": fixed_length_scale},
}
# Finally, we run [do_regress()](../MuyGPyS/examples/regress.rst).
# This function entirely instruments the regression workflow, with several tunable options.
# Most of the keyword arguments in this example are specified at their default values, so in practice this call need not be so verbose.
#
# In particular, `variance_mode` and `return_distances` affect the number of returns.
# If `variance_mode is None`, then no `variances` variable will be returned.
# This is the default behavior.
# If `return_distances is False`, then no `crosswise_dists` or `pairwise_dists` tensors will be returned.
# This is also the default behavior.
# +
from MuyGPyS.examples.regress import do_regress
muygps, nbrs_lookup, predictions, variances, crosswise_dists, pairwise_dists = do_regress(
test_features,
train_features,
train_responses,
nn_count=30,
batch_count=train_count,
loss_method="mse",
sigma_method="analytic",
variance_mode="diagonal",
k_kwargs=k_kwargs,
nn_kwargs=nn_kwargs,
verbose=True,
apply_sigma_sq=True,
return_distances=True,
)
# -
# We here evaluate our prediction performance in the same manner as in the
# [tutorial](univariate_regression_tutorial.ipynb).
# We report the RMSE, mean diagonal posterior variance, the mean 95% confidence interval size, and the coverage, which ideally should be near 95%.
# +
from MuyGPyS.optimize.objective import mse_fn
confidence_intervals = np.sqrt(variances) * 1.96
coverage = (
np.count_nonzero(
np.abs(test_responses - predictions)
< confidence_intervals.reshape(test_count, 1)
)
/ test_count
)
print(f"RMSE: {np.sqrt(mse_fn(predictions, test_responses))}")
print(f"mean diagonal variance: {np.mean(variances)}")
print(f"mean confidence interval size: {np.mean(confidence_intervals * 2)}")
print(f"coverage: {coverage}")
# -
# We also produce the same plots.
# +
fig, axes = plt.subplots(2, 1, figsize=(15, 11))
axes[0].set_title("Sampled Curve", fontsize=24)
axes[0].set_xlabel("Feature Domain", fontsize=20)
axes[0].set_ylabel("Response Range", fontsize=20)
axes[0].plot(train_features, train_responses, "k*", label="perturbed train response")
axes[0].plot(test_features, test_responses, "g-", label="test response")
axes[0].plot(test_features, predictions, "r--", label="test predictions")
axes[0].fill_between(
test_features[:, 0],
(predictions[:, 0] - confidence_intervals),
(predictions[:, 0] + confidence_intervals),
facecolor="red",
alpha=0.25,
label="95% Confidence Interval",
)
axes[0].legend(fontsize=20)
axes[1].set_title("Sampled Curve (subset)", fontsize=24)
axes[1].set_xlabel("Feature Domain", fontsize=20)
axes[1].set_ylabel("Response Range", fontsize=20)
axes[1].plot(
train_features[mid:mid + vis_subset_size],
train_responses[mid:mid + vis_subset_size],
"k*", label="perturbed train response"
)
axes[1].plot(
test_features[mid * (train_step - 1):mid * (train_step - 1) + (vis_subset_size * (train_step - 1))],
test_responses[mid * (train_step - 1):mid * (train_step - 1) + (vis_subset_size * (train_step - 1))],
"g-", label="test response"
)
axes[1].plot(
test_features[mid * (train_step - 1):mid * (train_step - 1) + (vis_subset_size * (train_step - 1))],
predictions[mid * (train_step - 1):mid * (train_step - 1) + (vis_subset_size * (train_step - 1))],
"r--", label="test predictions")
axes[1].fill_between(
test_features[mid * (train_step - 1):mid * (train_step - 1) + (vis_subset_size * (train_step - 1))][:, 0],
(predictions[:, 0] - confidence_intervals)[mid * (train_step - 1):mid * (train_step - 1) + (vis_subset_size * (train_step - 1))],
(predictions[:, 0] + confidence_intervals)[mid * (train_step - 1):mid * (train_step - 1) + (vis_subset_size * (train_step - 1))],
facecolor="red",
alpha=0.25,
label="95% Confidence Interval",
)
axes[1].legend(fontsize=20)
plt.tight_layout()
plt.show()
| docs/examples/regress_api_tutorial.ipynb |