code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#matplotlib inline
from __future__ import division
import numpy as np
from numpy.random import rand
from numpy import linalg as LA
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.lines as mlines
import math
import sys
import os
from random import shuffle
from random import gauss
from scipy.interpolate import UnivariateSpline
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.optimize import fmin
from scipy.optimize import fsolve
from scipy import interpolate
from scipy.optimize import curve_fit
import scipy.optimize as opt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from pylab import polyfit
import matplotlib.ticker as ticker
from matplotlib import gridspec
from scipy.optimize import differential_evolution
import warnings
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle, Circle
#from matplotlib.ticker import ScalarFormatter
import matplotlib.ticker as mticker
# %matplotlib inline
def fit_func_cv(x, b, c, d):
return b*np.absolute(x - d)**(-c)
def fit_func(xrange_s, a, b):
return a*xrange_s + b
######
#-----------------------------------------------------------------------------------------------------------------------
#######
#parameters of the code
######
#-----------------------------------------------------------------------------------------------------------------------
######
lambda3 = 2.1
Kc = 0.0
N = 80
folder = './diff_binder/'
deltas = [0.5, 1.0, 1.5]
#get data for specific heat
#extract the data to plot
all_datas = []
for i in range(len(deltas)):
datas = []
j2 = deltas[i]
j6 = 2.0 - j2
name_dir = 'testJ2={:.2f}'.format(j2) + 'J6={:.2f}'.format(j6) +'Lambda={:.2f}'.format(lambda3) + 'L='+str(N)+ 'Kc={:.2f}'.format(Kc)
preambule = folder +name_dir+'finalData/'
param = np.loadtxt(preambule + 'variables.data')
#temperature range
range_temp = param[7:]
datas.append(range_temp)
data = np.loadtxt(preambule+'thermo_output.data')
data2 = np.loadtxt(preambule+'STIFF_thermo_output.data')
nt = int(len(data[:,0])/2)
datas.append(data[0:(nt),2])
datas.append(data[nt:(2*nt),2])
#print 'L= ',N_list[i]
#print 'nt | Tmin | Tmax'
#print nt, np.min(range_temp), np.max(range_temp)
all_datas.append(datas)
datas = []
j2 = 1.0
j6 = 2.0 - j2
N = 300
folder = './delta_1.0_v4/'
name_dir = 'testJ2={:.2f}'.format(j2) + 'J6={:.2f}'.format(j6) +'Lambda={:.2f}'.format(lambda3) + 'L='+str(N)+ 'Kc={:.2f}'.format(Kc)
preambule = folder +name_dir+'finalData/'
param = np.loadtxt(preambule + 'variables.data')
#temperature range
range_temp = param[7:]
datas.append(range_temp)
data = np.loadtxt(preambule+'thermo_output.data')
data2 = np.loadtxt(preambule+'STIFF_thermo_output.data')
nt = int(len(data[:,0])/2)
datas.append(data[0:(nt),2])
datas.append(data[nt:(2*nt),2])
#print 'L= ',N_list[i]
#print 'nt | Tmin | Tmax'
#print nt, np.min(range_temp), np.max(range_temp)
all_datas.append(datas)
np.save('data_energy_binder_compare.npy', all_datas)
# -
| data_and_code_for_figures/data_pickling_example/make_comparison_data_energy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 72} id="hiQ6zAoYhyaA" outputId="0acee878-1207-42c3-9bee-a594acd44365"
import urllib
from IPython.display import Markdown as md
### change to reflect your notebook
_nb_loc = "03_image_models/diagrams.ipynb"
_nb_title = "Diagrams"
_icons=["https://raw.githubusercontent.com/GoogleCloudPlatform/practical-ml-vision-book/master/logo-cloud.png", "https://www.tensorflow.org/images/colab_logo_32px.png", "https://www.tensorflow.org/images/GitHub-Mark-32px.png", "https://www.tensorflow.org/images/download_logo_32px.png"]
_links=["https://console.cloud.google.com/vertex-ai/workbench/deploy-notebook?" + urllib.parse.urlencode({"name": _nb_title, "download_url": "https://github.com/takumiohym/practical-ml-vision-book-ja/raw/master/"+_nb_loc}), "https://colab.research.google.com/github/takumiohym/practical-ml-vision-book-ja/blob/master/{0}".format(_nb_loc), "https://github.com/takumiohym/practical-ml-vision-book-ja/blob/master/{0}".format(_nb_loc), "https://raw.githubusercontent.com/takumiohym/practical-ml-vision-book-ja/master/{0}".format(_nb_loc)]
md("""<table class="tfo-notebook-buttons" align="left"><td><a target="_blank" href="{0}"><img src="{4}"/>Run in Vertex AI Workbench</a></td><td><a target="_blank" href="{1}"><img src="{5}" />Run in Google Colab</a></td><td><a target="_blank" href="{2}"><img src="{6}" />View source on GitHub</a></td><td><a href="{3}"><img src="{7}" />Download notebook</a></td></table><br/><br/>""".format(_links[0], _links[1], _links[2], _links[3], _icons[0], _icons[1], _icons[2], _icons[3]))
# + [markdown] id="a8HQYsAtC0Fv"
# # 3章の図のプロッティング
#
# -
# %pip install -q -U tensorflow-addons
# + colab={"base_uri": "https://localhost:8080/"} id="ATwrq3yQXCZ3" outputId="9836d417-81ff-4c86-839f-8cba6cbbbe17"
import matplotlib.pylab as plt
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
IMG_HEIGHT = 224
IMG_WIDTH = 224
IMG_CHANNELS = 3
def read_and_decode(filename):
# Read the file
img = tf.io.read_file(filename)
# Convert the compressed string to a 3D uint8 tensor.
img = tf.image.decode_jpeg(img, channels=IMG_CHANNELS)
# Use `convert_image_dtype` to convert to floats in the [0,1] range.
img = tf.image.convert_image_dtype(img, tf.float32)
# Resize the image to the desired size.
return tf.image.resize(img, [IMG_HEIGHT, IMG_WIDTH])
# + colab={"base_uri": "https://localhost:8080/", "height": 682} id="HEh_vh0vvnNd" outputId="e43ebba2-fe2b-4a53-8b94-32598c021498"
# !gsutil cat gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/eval_set.csv | head -10
# +
img = read_and_decode('gs://practical-ml-vision-book/flowers_5_jpeg/flower_photos/tulips/14124669683_7fb74f20c3.jpg')
f, ax = plt.subplots(1, 4, figsize=(15,5))
ax[0].imshow(img.numpy())
ax[0].set_title('original')
ax[0].axis('off')
mean = tfa.image.mean_filter2d(img, filter_shape=11)
ax[1].imshow(mean.numpy())
ax[1].set_title('smooth')
ax[1].axis('off')
edge = tf.clip_by_value(tf.image.rgb_to_grayscale(
tfa.image.mean_filter2d(img, filter_shape=5) - tfa.image.mean_filter2d(img, filter_shape=11)
), 0, 1)
ax[2].imshow(edge.numpy(), cmap='gray')
ax[2].set_title('edge')
ax[2].axis('off')
bw_img = 1.0 - tf.image.rgb_to_grayscale(img)
ax[3].imshow(bw_img.numpy(), cmap='gray')
ax[3].set_title('intensity')
ax[3].axis('off');
# + [markdown] id="Duu8mX3iXANE"
# ## License
# Copyright 2022 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# -
| 03_image_models/diagrams.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Beaver Triples
#
# Author:
# - <NAME> - [email](mailto:<EMAIL>) - [linkedin](https://www.linkedin.com/in/eng-socd/) - [github](https://github.com/socd06) $\newcommand{\shared}[1]{[\![ #1 ]\!]}$
#
# ## Definition
#
# Beaver triples are [tuples](https://en.wikipedia.org/wiki/Tuple) of three values such that `a` and `b` are random uniform values and
#
# $ c = ab $
#
# Beaver triples are named after Donald Beaver, the author of the [paper](https://link.springer.com/chapter/10.1007/3-540-46766-1_34) where the technique was introduced.
# ## Geometric explanation
#
# Beaver triples are applied in **private multiplication** where we want to compute $ \shared{z} = \shared{xy} $ and both $\shared{x}$ and $\shared{y}$ are shared values. This is also known as preprocessed material and is used to compute the multiplication as shown in the geometric explanation below.
#
#
# 
# ### Private multiplication
# This has been implemented in different ways depending on the author and purpose of the protocol, we present a simplified overview of the process in favour of learning:
# #### Precomputation
# First-off, in the precomputation phase:
# - a crypto provider (or trusted third party) computes a Beaver multiplication triple $ (a, b, c) $, such that `a` and `b` are random and $ c = ab $ and then
# - It secret shares the triple with the party
#
# #### Online Phase
# Assumming all parties have a secret share of a multiplicatiion Beaver triple $\shared{a}, \shared{b}, \shared{c}$. The process is as follows:
#
# - Each party computes $ \shared{x-a} $ and publishes their share of $ x - a $, revealing (reconstructing) $ \delta = x - a $
#
#
# - Each party computes $ \shared{y-b} $ and publishes their share of $ y - b $, revealing (reconstructing) $ \epsilon = y - b $
#
#
# - All parties compute $ z_i = c_i + a_i(y - b) + b_i(x - a) $, adding their shares
#
# or, simplified
# - $ z_i = c_i + a_i \epsilon + \delta b_i + \delta \epsilon $
#
#
# - Finally, an arbitrary party adds $ (x - a)(y - b) $ or $ \delta \epsilon $ for short, to the computation, revealing $ z = xy $
#
#
# **Note:** Values inside this type of square brackets $\shared{}$ are secret shares.
# ### [Quiz] Compute the expected z<sub>alice</sub> and z<sub>bob</sub> shares of a private multiplication
# Let our inputs be
#
# $ x = 6 $ and $ y = 4 $
#
# and we consume the following triples:
#
# $( a, \;b,\; c ) = ( 12, \; 26, \; 312 )$
#
# Then let our inputs and triples be secret shared between `Alice` and `Bob`. Therefore,
#
# - `Alice` holds:
#
# $ x_{alice} = 2, \; y_{alice}= -5 $
#
# $ a_{alice} = 15, \; b_{alice} = -20, \; c_{alice} = 117 $
#
# - and `Bob` holds:
#
# $ x_{bob} = 4, \; y_{bob}= 9 $
#
# $ a_{bob} = -3, \; b_{bob} = 46, \; c_{bob} = 195 $
#
#
# | |Alice | | | | | | |Bob | |
# |:----------------------|:----------------------:|-|-|-|-|-|:--------------------|:-------------------:|-|
# |x<sub>alice</sub> = 2 |a<sub>alice</sub> = 15 | | | | | |x<sub>bob</sub> = 4 |a<sub>bob</sub> = -3 | |
# |y<sub>alice</sub> = -5 |b<sub>alice</sub> = -20 | | | | | |y<sub>bob</sub> = 9 |b<sub>bob</sub> = 46 | |
# | |c<sub>alice</sub> = 117 | | | | | | |c<sub>bob</sub> = 195| |
#
#
# Compute the expected $z_{alice} $ and $ z_{bob} $ shares assuming Bob adds $ \delta \epsilon $.
#
# Fill the ____ spaces below with your answers. Feel free to implement the equation in a new cell or use whatever tool you'd like (e.g. a calculator), it's your call.
# Run this cell to import the quizzes
from quiz import q3, q4
# +
# Fill the ____ space below with your answer
z_alice = ___
# run to check your answer
q3.check(z_alice)
# +
# Uncomment the line below to see a hint
# q3.hint
# +
# Uncomment the line below to see the solution
# q3.solution
# +
# Fill the ____ space below with your answer
z_bob = ___
# run to check your answer
q4.check(z_bob)
# +
# Uncomment the line below to see a hint
# q4.hint
# +
# Uncomment the line below to see the solution
# q4.solution
# -
# ## Implement Private multiplication with Beaver Triples
# Now that you are aware of the theory, why don't you try implementing private multiplication from scratch?
# How did it go? There are many different ways to implement this so there is no right solution. We hope you were able to see how the math allows us to hide our inputs.
# # (Solution:)
# ## Implementation
# There are many ways to implement this, particularly on a production-grade application. In this lesson, we want you to understand how these principles work and not worry too much about the best possible implementation. Therefore, we implement private multiplication in a simplified (and not very secure) way.
def beaver_triple(r):
'''
r = randomness
'''
a = randint(r)
b = randint(r)
c = a * b
return (a, b, c)
# +
# Define secret sharing from previous lesson
def n_share(s, r, n):
'''
s = secret
r = randomness
n = number of nodes, workers or participants
'''
share_lst = list()
for i in range(n - 1):
share_lst.append(randint(0,r))
final_share = r - (sum(share_lst) % r) + s
share_lst.append(final_share)
return tuple(share_lst)
# -
# also reusing the decryption function
def decrypt(shares, r):
'''
shares = iterable made of additive secret shares
r = randomness
'''
return sum(shares) % r
# +
# Import numpy randomness function
from numpy.random import randint
# Small Q in favour of computation speed
Q = 64601
# -
# We will use lists and dictionaries for simplicity.
# +
# 0 - Create a dictionary per party
alice = dict(name="alice")
bob = dict(name="bob")
# and put them in a list
parties = [alice, bob]
parties
# -
# Enter integers to multiply together
x = int(input("Alice's input is: "))
y = int(input("Bob's input is: "))
# +
# 1 - Secret share the inputs
x1, x2 = n_share(x,Q,len(parties))
xsecrets = [ x1, x2 ]
y1, y2 = n_share(y,Q,len(parties))
ysecrets = [ y1, y2 ]
for i, party in enumerate(parties):
party["x"] = xsecrets[i]
party["y"] = ysecrets[i]
print(party)
# +
# 2 - Generate Beaver Triple
# Compute triples using Q
a,b,c = beaver_triple(Q)
triple = (a,b,c)
print("Triple (a,b,c) = ",triple)
# -
# 3 - Secret Share triple
for count, elem in enumerate(triple):
# Additive secret share
shares = n_share(elem,Q,len(parties))
# a
if count == 0:
lit = "a"
# b
elif count == 1:
lit = "b"
# c
else:
lit = "c"
print(lit,"=",elem,"split into", shares,"\n")
for party, share in enumerate(shares):
parties[party][lit] = share
# In this example, we can check that the triples have been split into shares among the parties correctly
alice
bob
# Each party
for party in parties:
# computes x - a
party["x-a"] = party["x"] - party["a"]
print(f'{party["name"]} computes \n[x-a] = {party["x"]} - {party["a"]} = {party["x-a"]}')
# and y - b
party["y-b"] = party["y"] - party["b"]
print(f'{party["name"]} computes \n[y-b] = {party["y"]} - {party["b"]} = {party["y-b"]}')
# +
# revealing delta
delta = alice["x-a"] + bob["x-a"]
print("delta =",delta)
# and epsilon
epsilon = alice["y-b"] + bob["y-b"]
print("epsilon =",epsilon)
# +
# and all parties compute using the reconstructed triples and the newly generated delta and epsilon variables
for i, party in enumerate(parties):
party["z"] = party["c"] + delta * party["b"] + party["a"] * epsilon
print(f'z_{party["name"]} = {party["z"]}')
if i == len(parties)-1:
party["z"] += delta * epsilon
print(f'The last party ({party["name"]}) adds (delta)(epsilon) \n[z] = { party["z"] }')
# -
# Since we introduced randomness to our parties' inputs using Q, we use the additive secret sharing decrypt function to remove that randomness.
decrypt( [ alice["z"], bob["z"] ], Q )
# ## Working with Matrices
#
# Now that we know how to secret share, add and multiply integers privately, which is basic for deep learning networks we should learn how to do the same work with matrices. More importantly, neural networks programmed with [PyTorch](https://pytorch.org/) represent images as tensors.
#
# ### Additive Secret Sharing
#
# Borrowing from the previous lesson, we can use the same logic to secret share matrices by adding a `random_tensor` helper function.
# +
# We use the secrets module to generate strong random numbers
from secrets import randbelow
# We use NumPy for math operations
import numpy as np
# and PyTorch to represent our data using tensors
import torch
def random_tensor(shape,r):
'''
shape = desired tensor shape
r = randomness
'''
values = [ randbelow(r) for _ in range(np.prod(shape)) ]
return torch.tensor(values, dtype=torch.long).reshape(shape)
# +
# Modifying secret sharing from previous lesson to generate random matrices
def matrix_share(m, r, n):
'''
m = matrix secret
r = randomness
n = number of nodes, workers or participants
'''
share_lst = list()
for i in range(n - 1):
# add the random_tensor helper function
share_lst.append(random_tensor(m.shape,r))
final_share = r - (sum(share_lst) % r) + m
share_lst.append(final_share)
# and return a tuple of random tensors
return tuple(share_lst)
# -
# Next, we do a quick test to verify our secret sharing function works
# +
# We make an arbitrary tensor of 2x3 shape
test_tensor = torch.tensor([[1, 2, 3],
[3, 2, 1]])
# Make secret shares from our test tensor
n = 2
matrix_shares = matrix_share( test_tensor , Q, n)
matrix_shares
# -
# Can we decrypt the shares using our original function?
decrypt(matrix_shares, Q)
# Success!
#
# ### Matrix Multiplication Refresher
#
# For matrix multiplication, we need the columns of our first matrix to be the same as the rows in our second matrix.
# +
x = torch.tensor([ # 4 x 3
[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4]
], dtype=torch.long)
y = torch.tensor([ # 3 x 2
[0, 1],
[2, 3],
[0, 2]
], dtype=torch.long)
print(x.shape, y.shape)
# -
# In regular PyTorch, we can use the `torch.matmul` operation to do n-dimensional matrix multiplication, `x @ y` in short.
torch.matmul(x, y)
# ## Adapt your code to MatMul
# Now that you know how to multiply and secret share matrices with PyTorch, try implementing private multiplication on your own.
# # (Solution)
#
# ## Private Matrix Multiplication
# ### Matrix Beaver Triples
# Adapting Beaver's principles to matrices, we can implement private matrix multiplication this way.
#
# Following the same logic as before, we can make `a` and `b` random tensors and `matmul` them together to make `c = ab`
def matrix_triple(x: torch.LongTensor, y: torch.LongTensor, r: int):
'''
x = x tensor
y = y tensor
r = randomness
'''
# Generate random tensors with the same shape as our inputs
a = random_tensor(x.shape,r)
b = random_tensor(y.shape,r)
# And we matrix multiply them to make c = ab
c = torch.matmul(a,b)
return a, b, c
matrix_triple(x, y, Q)
# Define 2 parties that will virtually hold the shares
parties = ["alice", "bob"]
# and we view their shapes
print(f'Matrix 1 Shape: {x.shape} \nMatrix 2 Shape: {y.shape}')
# Then we secret share `x` and `y` between our two parties of Alice and Bob
# +
# 1 - Secret share inputs
# secret share matrix x
x_sh = matrix_share(x, Q, len(parties))
# secret share matrix y
y_sh = matrix_share(y, Q, len(parties))
# +
# 2 - Generate Matrix Beaver Triple
a, b, c = matrix_triple(x, y, Q)
matrix_triple = (a, b, c)
print("Triple (a,b,c) = \n", matrix_triple)
# -
# 3 - Secret Share triples
a_sh = matrix_share(a, Q, len(parties))
b_sh = matrix_share(b, Q, len(parties))
c_sh = matrix_share(c, Q, len(parties))
def sub(x, y):
"""Emulates x - y for shared values"""
n_party = len(x)
z = [
x[party] - y[party]
for party in range(n_party)
]
return z
# +
epsilon = decrypt(sub(x_sh, a_sh), Q)
delta = decrypt(sub(y_sh, b_sh), Q)
print("epsilon =",epsilon)
print("delta =",delta)
# -
z_sh = [0] * len(parties) # initialize the shares
for party in range(len(parties)):
z_sh[party] = c_sh[party] + epsilon @ b_sh[party] + a_sh[party] @ delta
if party == 0: # only add the public value once
z_sh[party] += epsilon @ delta
decrypt(z_sh, Q)
# Expected:
x @ y
# ## Private Multiplication with PySyft
# As we mentioned in the intro video, Beaver Triples is the backbone of the [SPDZ protocol](https://link.springer.com/chapter/10.1007%2F978-3-642-32009-5_38) which is mostly implemented in [PySyft](https://github.com/OpenMined/PySyft) already. So, we can do private multiplication without having to implement the math from scratch.
import torch
import syft as sy
hook = sy.TorchHook(torch)
alice = sy.VirtualWorker(hook, id="alice")
bob = sy.VirtualWorker(hook, id="bob")
charlie = sy.VirtualWorker(hook, id="charlie")
secure_worker = sy.VirtualWorker(hook, "secure_worker")
# ### Private Integer Multiplication with PySyft
# +
x = torch.tensor([6])
y = torch.tensor([8])
# And we additive share with our parties
x = x.share(alice, bob, charlie, crypto_provider=secure_worker)
y = y.share(alice, bob, charlie, crypto_provider=secure_worker)
# -
# Compute z = x * y
scalar_mul = x * y
scalar_mul
# If we try to look at the result, we can see that our inputs have been scrambled and replaced with pointers and random numbers, just like above.
decrypted_scalar_mul = scalar_mul.get()
decrypted_scalar_mul.item()
# It works! It may seem like we still need to write a lot of lines but consider that we are simulating four-workers environment where all inputs are hidden.
#
# ### Private Matrix Multiplication with PySyft
#
# Now, lets try something more complicated, like tensor(matrix) multiplication.
# +
# feel free to play with these values
matrix1 = torch.tensor(
[
# 3 x 3
[ 1, 1, 1],
[ 1, 1, 1],
[ 1, 1, 1]
], dtype=torch.long)
matrix2 = torch.tensor(
[
# 3 x 3
[ 0, -1, 0],
[-1, 5, -1],
[ 0, -1, 0]
], dtype=torch.long)
# -
matrix1 = matrix1.share(alice, bob, charlie, crypto_provider=secure_worker)
matrix2 = matrix2.share(alice, bob, charlie, crypto_provider=secure_worker)
tensor_mul = matrix1 * matrix2
tensor_mul
# So far so good...
decrypted_tensor_mul = tensor_mul.get()
decrypted_tensor_mul
# It also works! Since it works for tensor multiplication it will also work for convolution operations.
# All these operations, like we established on the previous lesson, work over a finite field of integers, but in neural networks and in real life, we use floats! FixedPrecision encoding is how we **fix** that problem, and our next lesson.
| Foundations_of_Private_Computation/Secure_multiparty_computation/Beaver Triples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ShubhInfotech-Bhilai/AI-ML-DL-NN/blob/master/Keras_XOR_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="KvVHLViv-tFm"
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense
# the four different states of the XOR gate
training_data = np.array([[0,0],[0,1],[1,0],[1,1]], "float32")
# the four expected results in the same order
target_data = np.array([[0],[1],[1],[0]], "float32")
model = Sequential()
model.add(Dense(16, input_dim=2, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='mean_squared_error',
optimizer='adam',
metrics=['binary_accuracy'])
model.fit(training_data, target_data, epochs=500, verbose=2)
print( model.predict(training_data).round())
| Keras_XOR_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys,os
import numpy as np
import matplotlib.pyplot as plt
# %pylab inline
import test_signals as tst
sys.path.insert(0, os.path.join(sys.path[0], '..'))
import extlib as xl
# ## Scharr Derviative Filters
factor = 3000
Nx = 301
Ny = 301
Nz = 201
data = tst.SphericalSignal(factor,Nx,Ny,Nz)
truederiv = tst.SphericalSignal(factor,Nx,Ny,Nz,deriv='dy')
tslice = truederiv.xSlice(Nx//2)
result = np.zeros((Ny,Nz))
itrc = 0
for indata in data.getXslice(Nx//2,1,1):
result[itrc,:] = xl.scharr3_dy(indata, full=False)
tmp1 = indata[:,:,149]*.12026 + indata[:,:,150]*.75948 + indata[:,:,151]*.12026
tmp2 = tmp1[0,:]*.12026+tmp1[1,:]*.75948+tmp1[2,:]*.12026
tmp3 = 0.5*tmp2[2]-0.5*tmp2[0]
print(tmp3, result[itrc,150], tslice[150,itrc], tmp3/tslice[150,itrc])
# result[itrc,:] = indata[1,1,:]
itrc += 1
'''
truederiv = tst.SphericalSignal(factor,Nx,Ny,Nz,deriv='dy')
#truederiv = tst.SphericalSignal(factor,Nx,Ny,Nz)
diff = np.abs((truederiv.xSlice(Nx//2) - np.transpose(result))/truederiv.xSlice(Nx//2))*100
f,ax = plt.subplots(3,1,figsize=(8,12))
ax[0].set_xlabel('Y')
ax[0].set_ylabel('Z')
ax[0].set_title('True Derivative X Slice - d/dy')
imt = ax[0].imshow(truederiv.xSlice(Nx//2),cmap='seismic',interpolation='bilinear')
f.colorbar(imt, ax=ax[0])
ax[1].set_xlabel('Y')
ax[1].set_ylabel('Z')
ax[1].set_title('Scharr3 Derviative X Slice - d/dy')
imf = ax[1].imshow(np.transpose(result),cmap='seismic',interpolation='bilinear')
f.colorbar(imf, ax=ax[1])
ax[2].set_xlabel('Y')
ax[2].set_ylabel('Y')
ax[2].set_title('Error - d/dy')
imd = ax[2].imshow(diff,cmap='viridis',vmin=0, vmax=100, interpolation='bilinear')
f.colorbar(imd, ax=ax[2])
f.tight_layout()
'''
| Python_3/Jupyter/spatial_derivatives.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 05: Linear module -- demo
import torch
import torch.nn as nn
# ### Make a _Linear Module_ that takes input of size 5 and return output of size 3
mod = nn.Linear(5,3,bias=True)
print(mod)
# ### Let's make a random tensor of size 5:
x=torch.rand(5)
print(x)
print(x.size())
# ### Feed it to the module:
y=mod(x)
print(y)
# ### The output y is computed according to the formula:
# $$
# \begin{bmatrix}
# y_1\\ y_2 \\y_3
# \end{bmatrix} =
# \begin{bmatrix}
# w_{11} & w_{12} & w_{13}& w_{14}& w_{15} \\
# w_{21} & w_{22} & w_{23}& w_{24}& w_{25} \\
# w_{31} & w_{32} & w_{33}& w_{34}& w_{35} \\
# \end{bmatrix}
# \begin{bmatrix}
# x_1\\ x_2 \\x_3 \\ x_4 \\x_5
# \end{bmatrix}
# +
# \begin{bmatrix}
# b_1\\ b_2 \\b_3
# \end{bmatrix}
# $$
# ### were the $w_{ij}$'s are the weight parameters and the $b_i$'s are the bias parameters. These internal parameters can be access as follow:
print(mod.weight)
print(mod.weight.size())
print(mod.bias)
print(mod.bias.size())
# ### If we want we can change the internal parameters of the module:
mod.weight[0,0]=0
mod.weight[0,1]=1
mod.weight[0,2]=2
print(mod.weight)
# ### We can also make a Linear module without bias:
mod2 = nn.Linear(5,3,bias=False)
print(mod2)
print(mod2.weight)
print(mod2.bias)
| codes/labs_lecture04/lab01_linear_module/linear_module_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["remove-input", "active-ipynb", "remove-output"]
# try:
# from openmdao.utils.notebook_utils import notebook_mode
# except ImportError:
# !python -m pip install openmdao[notebooks]
# -
# # The System Setup Stack: Understanding When to Use setup and configure
#
# This document explains what happens during the OpenMDAO `Problem` `setup` process, and how some of the model
# API methods interact during that process.
#
# The purpose of the `setup` process is to prepare the data structures that OpenMDAO needs to efficiently
# run your model or driver. In particular, this includes setting up the vectors used for passing data
# to inputs, converging solvers, and calculating derivatives. It also includes setting up the MPI
# communicators.
#
# Setup also performs some level of model checking, mainly for critical errors. More extensive model
# checking can be done by setting "check" when calling `setup`, or by using the [openmdao command
# line check](../other_useful_docs/om_command.ipynb). It is recommended that you do this after making any changes to the configuration
# of your model. The "check" argument to `setup` can be set to `True`, which will cause a default
# set of checks to run. It can also be set to 'all', which will run all available checks.
# A value of `None` or `True` will result in no checks being run. Finally,
# it can be set to a specific list of checks to run as a list of strings. The checks that are available can be
# determined by running the following command:
# ```
# openmdao check -h
# ```
# + tags=["remove-input"]
# !openmdao check -h
# -
# By default, the output of all checks will be written to a file called 'openmdao_checks.out' in
# addition to `stdout`. Checks can also be performed by calling the `check_config` method on
# your problem object.
#
#
# The OpenMDAO `Group` API includes three methods that are invoked during the `setup` process:
# `initialize`, `setup`, and `configure`. Most of the time, `setup` is all you need to build a group. The specific use case for `configure` is shown below in the next section. The `initialize` method is only used for declaring options for your group (and also in `Component`), and their placement here allows them to be passed into the group as instantiation arguments.
#
# One question that is often asked is: why can't we just put all of our model building code into our group's
# `__init__` method so that everything is there when I instantiate the class? The answer is, when
# running a parallel model under MPI, certain systems might only be executed on certain processors.
# To save memory across the model, these systems are not fully set up on processors where they are
# not local. The only way to do this is to isolate the model building process into a custom method
# (`setup`) and only call it on the processors where that system is active. While
# not everyone will run their models in parallel, it is a good practice to follow the stricter
# guideline so that, if someone wants to include your model in a larger parallel model, they won't
# be forced to allocate any unnecessary memory.
#
# ## Usage of setup vs. configure
#
# The need for two methods for setting up a group arose from a need to sometimes change the linear or
# nonlinear solvers in a subgroup after it had been added. When `setup` is called on the `problem`, the
# `setup` method in each group is called recursively from top to bottom of the hierarchy. For example,
# a group may contain several components and groups. Setup is first called in that top group, during
# which, those components and groups are instantiated. However, the `setup` methods belonging to those sub-components
# and groups cannot be called until the top group's `setup` finishes. This means they are in a state where
# components and groups that are declared in the subgroup don't exist yet.
#
# To remedy this, there is a second api method called `configure` that lets you make changes to your subsystems
# after they have been created. The `configure` method is only needed with groups, and it is called
# recursively from the bottom of the hierarchy to the top, so that at any level, you can be sure that
# `configure` has already run for all your subsystems. This assures that changes made in higher-level groups
# take precedence over those in lower-level ones. Top precedence is given to changes made after calling `setup`
# on the `Problem`.
#
# A second use case for `configure` is issuing connections to subsystems when you need information (e.g. path names)
# that has been set during setup of those subsystems. Since `configure` runs after `setup` has been
# called on all subsystems, you can be sure that this information will be available.
#
# Here is a quick guide covering what you can do in the `setup` and `configure` methods.
#
# | Action | Setup | Configure |
# |------------------------------------------------------------------------------------ |------- |-----------|
# | Add subsystems | o | |
# | Issue connections | o | o |
# | Set system execution order | o | |
# | Add inputs and outputs to components within this group | o | o |
# | Promote variables from subsystems | o | o |
# | Assign solvers at **this** group level | o | o |
# | Assign solvers within subsystems | o | o |
# | Change solver settings for any solver at **this** group level | o | o |
# | Change solver settings in subsystems | | o |
# | Assign Jacobians at **this** group level | o | o |
# | Assign Jacobians within subsystems | | o |
# | Add design variables, objectives, and constraints relative to **this** group level | o | o |
# | Add design variables, objectives, and constraints to subsystems | | o |
# | Add a case recorder to the group or to a solver in **this** group | o | o |
# | Add a case recorder to the group or to a solver in a subsystem | | o |
#
#
# Keep in mind that, when `configure` is being run, you are already done calling `setup` on every group and component in the model, so if you add a new subsystem here, setup will never be called on it, and it will not be properly integrated into the model hierarchy.
#
#
# ## Problem setup and final_setup
#
# OpenMDAO 2.0 introduced a new change to the setup process in which the original monolithic process
# is split into two separate phases triggered by the methods: `setup` and `final_setup`. The `final_setup` method is
# however something you will probably never have to call, as it is called automatically the first time that
# you call `run_model` or `run_driver` after running `setup`. The reason that the `setup` process was split into two
# phases is to allow you to perform certain actions after `setup`:
#
# **Post-setup actions**
#
# - Set values of inputs and indepvarcomps
# - Change settings on solvers
# - Change options on systems
# - Add recorders
# - Assign Jacobians
# - Add training data to metamodels
#
# If you do anything that changes the model hierarchy, such as adding a component to a group, then
# you will need to run `setup` again.
#
# During setup, the following things happen:
#
# - MPI processors are allocated
# - For each custom Group, setup function is called recursively from top to bottom
# - Model hierarchy is created
# - For each custom Group, configure function is called recursively from bottom to top
# - Connections are assembled and verified
# - Variables are sized
#
# This is just enough to allow you to perform the post-setup actions listed above, but there are
# still more things to do before the model can run. In `final_setup`, the following happens:
#
# - All vectors for the nonlinear and linear systems are created and allocated
# - Data transfers are created (i.e., scatters for MPI)
# - Solvers are set up
# - Jacobians are set up and allocated
# - Recorders are set up
# - Drivers are set up
# - Initial values are loaded into the inputs and outputs vectors
| openmdao/docs/openmdao_book/theory_manual/setup_stack.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="odBn6F_lGN_8"
import os
# Find the latest version of spark 3.0 from http://www.apache.org/dist/spark/ and enter as the spark version
# For example:
# spark_version = 'spark-3.0.3'
spark_version = 'spark-3.<enter version>'
os.environ['SPARK_VERSION']=spark_version
# Install Spark and Java
# !apt-get update
# !apt-get install openjdk-8-jdk-headless -qq > /dev/null
# !wget -q http://www.apache.org/dist/spark/$SPARK_VERSION/$SPARK_VERSION-bin-hadoop2.7.tgz
# !tar xf $SPARK_VERSION-bin-hadoop2.7.tgz
# !pip install -q findspark
# Set Environment Variables
os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64"
os.environ["SPARK_HOME"] = f"/content/{spark_version}-bin-hadoop2.7"
# Start a SparkSession
import findspark
findspark.init()
# -
# Start Spark session
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("Bigfoot").getOrCreate()
# + colab={"base_uri": "https://localhost:8080/", "height": 459} colab_type="code" id="5yT4wANyFtR6" outputId="ee93e544-ba27-46e6-89d6-838cc8d324e6"
# Read in data from S3 Buckets
from pyspark import SparkFiles
url ="https://s3.amazonaws.com/dataviz-curriculum/day_1/bigfoot.csv"
spark.sparkContext.addFile(url)
df = spark.read.csv(SparkFiles.get("bigfoot.csv"), header=True, inferSchema=True, timestampFormat="yyyy/MM/dd HH:mm:ss")
# Show DataFrame
df.show()
# + colab={} colab_type="code" id="zP-O7x64FtSC"
# Import date time functions
# + colab={} colab_type="code" id="76lFAjMQFtSH"
# Create a new DataFrame with the column Year
# + colab={} colab_type="code" id="wpJF31VGFtSL"
# Save the year as a new column
# + colab={} colab_type="code" id="sBpcHcZcFtSQ"
# Find the total bigfoot sightings per year
# + colab={} colab_type="code" id="vGvRXR0dFtSU"
# Import the summarized data to a pandas DataFrame for plotting
# Note: If your summarized data is still too big for your local memory then your notebook may crash
# + colab={} colab_type="code" id="KZ9HqmjoFtSY"
# Clean the data and rename the columns to "year" and "sightings"
# + colab={} colab_type="code" id="kPAL5A9oFtSc"
# Plot the year and sightings
# %matplotlib inline
# + colab={} colab_type="code" id="LsS-TQ5tGby5"
| 01-Lesson-Plans/22-Big-Data/1/Activities/10-Stu_Pyspark_DataFrames_Dates/Unsolved/bigfoot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + tags=[]
import numpy as np
import yaml
from tqdm.notebook import tqdm
import galsim
import batoid
import wfsim
import matplotlib.pyplot as plt
# -
# Some initial setup
# We'll do r-band for this demo.
bandpass = galsim.Bandpass("LSST_r.dat", wave_type='nm')
telescope = batoid.Optic.fromYaml("LSST_r.yaml")
pixel_scale = 10e-6
bandpass.effective_wavelength
# Setup observation parameters. Making ~plausible stuff up.
observation = {
'zenith': 30 * galsim.degrees,
'raw_seeing': 0.7 * galsim.arcsec, # zenith 500nm seeing
'wavelength': bandpass.effective_wavelength,
'exptime': 15.0, # seconds
'temperature': 293., # Kelvin
'pressure': 69., #kPa
'H2O_pressure': 1.0 #kPa
}
# Setup atmospheric parameters
atm_kwargs = {
'screen_size': 819.2,
'screen_scale': 0.1,
'nproc': 6 # create screens in parallel using this many CPUs
}
# + tags=[]
# We loaded the fiducial telescope, but we actually want to perturb it
# out of alignment a bit and misfigure the mirrors a bit.
# The big question is how much to perturb each potential
# degree-of-freedom. Let's not dwell on that at the moment though; for
# demonstration, the following will do.
rng = np.random.default_rng()
# Misalignments of M2 and camera first
M2_offset = np.array([
rng.uniform(-0.0001, 0.0001), # meters
rng.uniform(-0.0001, 0.0001),
rng.uniform(-0.00001, 0.00001),
])
M2_tilt = (
batoid.RotX(np.deg2rad(rng.uniform(-0.01, 0.01)/60)) @
batoid.RotY(np.deg2rad(rng.uniform(-0.01, 0.01)/60))
)
camera_offset = np.array([
rng.uniform(-0.001, 0.001), # meters
rng.uniform(-0.001, 0.001),
rng.uniform(-0.00001, 0.00001),
])
camera_tilt = (
batoid.RotX(np.deg2rad(rng.uniform(-0.01, 0.01)/60)) @
batoid.RotY(np.deg2rad(rng.uniform(-0.01, 0.01)/60))
)
perturbed = (
telescope
.withGloballyShiftedOptic("M2", M2_offset)
.withLocallyRotatedOptic("M2", M2_tilt)
.withGloballyShiftedOptic("LSSTCamera", camera_offset)
.withLocallyRotatedOptic("LSSTCamera", camera_tilt)
)
# Now let's perturb the mirrors, we should use the actual mirror modes
# here, but for now we'll just use Zernike polynomials.
M1M3_modes = rng.uniform(-0.05, 0.05, size=25) # waves
M1M3_modes *= bandpass.effective_wavelength*1e-9 # -> meters
# M1M3 bends coherently, so use a single Zernike perturbation for both,
# Set the outer radius to the M1 radius so the polynomial doesn't
# explode. It's fine to use a circular Zernike here though (no inner
# radius).
M1M3_surface_perturbation = batoid.Zernike(
M1M3_modes,
R_outer=telescope['M1'].obscuration.original.outer,
)
perturbed = perturbed.withSurface(
"M1",
batoid.Sum([
telescope['M1'].surface,
M1M3_surface_perturbation
])
)
perturbed = perturbed.withSurface(
"M3",
batoid.Sum([
telescope['M3'].surface,
M1M3_surface_perturbation
])
)
# M2 gets independent perturbations from M1M3
M2_modes = rng.uniform(-0.05, 0.05, size=25) # waves
M2_modes *= bandpass.effective_wavelength*1e-9 # -> meters
M2_surface_perturbation = batoid.Zernike(
M2_modes,
R_outer=telescope['M2'].obscuration.original.outer,
)
perturbed = perturbed.withSurface(
"M2",
batoid.Sum([
telescope['M2'].surface,
M2_surface_perturbation
])
)
# + tags=[]
# We can take a quick look at how we've perturbed the optics by making
# a spot diagram. The batoid.spot tool returns points in meters, so
# we divide by pixel_scale to get pixels. We also look in a few points
# around the field of view to get a global picture.
for thx, thy in [(0,0), (-1.5, 0), (1.5, 0), (0, -1.5), (0, 1.5)]:
sx, sy = batoid.spot(
perturbed,
np.deg2rad(thx), np.deg2rad(thy),
bandpass.effective_wavelength*1e-9,
nx=128
)
plt.figure()
plt.scatter(sx/pixel_scale, sy/pixel_scale, s=1)
plt.show()
# -
# To make donuts, we need to be intra-focal or extra-focal.
# To simulate normal science operations mode, shift the detector:
intra = perturbed.withGloballyShiftedOptic(
"Detector", [0, 0, -0.0015]
)
extra = perturbed.withGloballyShiftedOptic(
"Detector", [0, 0, +0.0015]
)
# + tags=[]
intra_simulator = wfsim.SimpleSimulator(
observation,
atm_kwargs,
intra,
bandpass,
shape=(512, 512),
rng=rng
)
# + tags=[]
# Now we can choose some parameters for a star and start simulating
# First, choose a field angle. At the moment, the simulator code only
# works close to the boresight direction, so just use that. I'll
# extend that soon.
thx = np.deg2rad(0.0)
thy = np.deg2rad(0.0)
# + tags=[]
# We also want to simulate chromatically. We could fetch an actual
# stellar SED for this, but it's easier and probably always good enough
# to just use a black body with a reasonable temperature.
star_T = rng.uniform(4000, 10000)
sed = wfsim.BBSED(star_T)
# + tags=[]
# We also need a flux (which needs to be an integer):
flux = int(rng.uniform(1_000_000, 2_000_000))
# + tags=[]
intra_simulator.add_star(thx, thy, sed, flux, rng)
# + tags=[]
# We can look at our star now:
plt.figure()
plt.imshow(intra_simulator.image.array)
plt.show()
# -
# Our image doesn't have any sky background noise in it yet.
# Here we add some.
intra_simulator.add_background(1000.0, rng)
# Here's our final star
plt.figure()
plt.imshow(intra_simulator.image.array)
plt.show()
# + tags=[]
# Finally, what were the actual Zernike's for the perturbed telescope
# we generated? Get that using batoid.zernike:
zs = batoid.zernike(
perturbed,
thx, thy,
bandpass.effective_wavelength*1e-9 # batoid wants meters,
)
zs *= bandpass.effective_wavelength # waves -> nm
for j in range(4, 23):
print(f"{j:>2d} {zs[j]:6.1f} nm")
# + tags=[]
# TODO items:
# - let image cover non-central regions of the focal plane
# - allow easy reuse of the generated atmosphere, but with different
# perturbed telescopes
# - allow easy resetting of the accumulated image
# - get chip coords from obs_lsst?
# - add tech to use phase screen with target Zernikes in front of
# telescope
| notebooks/Rubin Donut Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Calculate Returns
#
# 
#
# Using the formula $ \frac{p_{t} - p_{t-1}}{p_{t-1}} $, let's apply it to some example prices. For this exercise, we'll calculate the returns for each day using the closing price data in `close`.
# + deletable=true editable=true
import pandas as pd
close = pd.DataFrame(
{
'ABC': [1, 5, 3, 6, 2],
'EFG': [12, 51, 43, 56, 22],
'XYZ': [35, 36, 36, 36, 37],},
pd.date_range('10/01/2018', periods=5, freq='D'))
close
# + [markdown] deletable=true editable=true
# Using the returns formula on the closing prices for the ticker "ABC" should give us `[(5-1)/1, (3-5)/5, (6-3)/3, (2-6)/6]` or `[4, -0.4, 1, -0.66]`. To calculate this for the whole DataFrame, we'll use the [DataFrame.shift](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.DataFrame.shift.html) function.
#
# This function allows us to shift the rows of data. For example, the following shifts the rows in `close` two days back.
# + deletable=true editable=true
close.shift(2)
# + [markdown] deletable=true editable=true
# The data for the row "2018-10-03" contains data that is two days in the past. You'll also notice the "NaN" values for "2018-10-01" and "2018-10-02". Since there's not data two days in the past for these dates, it returns a "NaN" value.
#
# Use this function, you can also shift in the future using a negative number. Let's shift one day in the future.
# + deletable=true editable=true
close.shift(-1)
# -
close
close.shift(1)
(close - close.shift(1))/close.shift(1)
# + [markdown] deletable=true editable=true
# ## Quiz
# Using what you know about the [DataFrame.shift](https://pandas.pydata.org/pandas-docs/version/0.21/generated/pandas.DataFrame.shift.html) function, implement the function.
#
# Once you successfully implemented the quiz, you can can continue to the next concept in the classroom.
# + deletable=true editable=true
import quiz_tests
def calculate_returns(close):
"""
Compute returns for each ticker and date in close.
Parameters
----------
close : DataFrame
Close prices for each ticker and date
Returns
-------
returns : DataFrame
Returns for each ticker and date
"""
return (close - close.shift(1))/close.shift(1)
quiz_tests.test_calculate_returns(calculate_returns)
# + [markdown] deletable=true editable=true
# ## Quiz Solution
# If you're having trouble, you can check out the quiz solution [here](calculate_returns_solution.ipynb).
| Compute Returns/calculate_returns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### IMPORT LIBRARIES AS NUMPY AND PANDAS:
import numpy as np
import pandas as pd
# ### OPEN DATA FILE AS "first_flight_data":
flight_data = "first_flight_data.csv"
testing = pd.read_csv(flight_data)
# ### SHOW FIRST TEN ROWS OF THE DATA:
print("Dimensions of flight data :", testing.shape)
testing.head(10)
# ### SPLIT DATA INTO TWO PARTS AS "sensors_values" AND "GPS_values":
sensors_values = testing.iloc[:, 0:6]
sensors_values.head()
# ### GPS_values:
GPS_values = testing.iloc[:, 6:10]
GPS_values.head()
# ### DROP ROWS WHICH CONTAIN STARS:
dfsensors = pd.DataFrame(data=sensors_values)
sensor_new = dfsensors.iloc[::2, :]
sensor_new.head()
# ### DROP NULL ROWS BY USING "dropna()" :
dfGPS = pd.DataFrame(data=GPS_values)
GPS_new = dfGPS.dropna()
GPS_new.head()
# ### INDEX OF TWO PARTS NOT SMILAR. WE MUST REINDEX THEM:
# +
total_row, total_column = pd.DataFrame(data=sensor_new).shape
dfs = pd.DataFrame(data=sensor_new)
dfG = pd.DataFrame(data=GPS_new)
rows = list(range(total_row))
# -
# ### REINDEXING OF SENSORS VALUES:
dfs['new_index'] = rows
sensor1 = dfs.set_index('new_index')
sensor1.index.name = None
sensor1.head()
# ### REINDEXING OF GPS VALUES:
dfG['new_index'] = rows
GPS1 = dfG.set_index('new_index')
GPS1.index.name = None
GPS1.head()
# ### COMBINE SENSORS VALUES AND GPS VALUES :
# +
dfs = pd.DataFrame(data=sensor1)
dfG = pd.DataFrame(data=GPS1)
final_data = pd.concat([dfs, dfG], axis=1)
final_data = final_data.reindex(dfs.index)
# -
final_data.head(10)
# ### GOOD JOB!
pd.DataFrame(data=final_data).to_csv('cleaned_flight_data.csv')
| Data Science and Machine Learning/Machine-Learning-In-Python-THOROUGH/EXAMPLES/PROJECT_EXAMPLES/01_Flight_Data_Cleaning/Flight_Dataset_Cleaning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="uuAr0PR4P1Z4"
# <h2>Performing Basic Frequency Analysis</h2>
# Let's start by using some very basic frequency analysis on a document to see if we can determine what the document is about based on word frequency.
# -
# **Load a Text Document**
# <br>Run the cell below to load a document and view the text it contains.
# +
with open('Moon.txt', 'r') as doc1:
# Read the document
doc1Txt = doc1.read()
# print its contents
print(doc1Txt)
# -
# <h3><b>Normalizing the Text:</b></h3>
#
# Text Normalization is often the next step after loading the text document, text-normalization is the process of transforming a piece of text into a canonical (official) form. Normalization includes a variety activities such as:-
# * Tokenization
# * Parts of speech tagging
# * Phrase chunking
# * Remove Punctuations
# * Stemming
# * Lemmatization
# * Spell check
# * Lowercase
# * Remove Stopwords
# * Expand Contractions
# Stemming usually refers to a crude heuristic process that chops off the ends of words and often includes the removal of derivational affixes. Lemmatization usually refers to the use of a vocabulary and morphological analysis of words, normally aiming to remove inflections only and to return the base or dictionary form of a word, which is known as the lemma.
# <br> [link](https://nlp.stanford.edu/IR-book/html/htmledition/stemming-and-lemmatization-1.html)
# For this exercise, we shall perform the following text-normalization activities;-
# 1. Remove Punctuation
# 2. Change to Lowercase
# 3. Remove Stopwords
# 4. Stemming
# 5. Tokenization.
# +
# Let's import some much needed libraries
from nltk.stem.porter import PorterStemmer # for stemming the words
from string import punctuation # Import the string of punctuations
import math
from textblob import TextBlob as tb # import textblob
from nltk.corpus import stopwords # import stopwords
import matplotlib.pyplot as plt # for plotting charts
import nltk # The natural language toolkit library
import pandas as pd # for making Dataframes
from nltk.probability import FreqDist # The frequency distribution module
print('All modules imported!')
# -
# <br>**1. Remove Punctuations and Digits:**<br>
#
# Text includes a lot of punctuation, which we need to remove if we want to work only with the actual words. We shall go step further and also remove numbers from the text
# +
from string import punctuation
# First remove digits
doc1Txt = ''.join(c for c in doc1Txt if not c.isdigit())
# -
# Next we remove Punctuations
doc1Txt = ''.join(c for c in doc1Txt if c not in punctuation)
# **2. Change to Lowercase:**
doc1Txt = ''.join(c.lower() for c in doc1Txt)
# **3. Remove Stopwords:**
#
# <br>A large number of the words in the text are common words like "the" or "and". These "stopwords" add little in the way of semantic meaning to the text, and won't help us determine the subject matter - so run the cell below to remove them.
# +
# Remove Stopwords
# First download the 'stopwords' from nltk
nltk.download("stopwords")
from nltk.corpus import stopwords
doc1Txt = ' '.join(word for word in doc1Txt.split() if word not in (stopwords.words('english')))
print("\n")
print(doc1Txt)
# -
# **4. Tokenization:**
#
# Get the Frequency Distribution
# Now let's tokenize the text (split it into individual words), and count the number of times each word occurs.
# +
nltk.download("punkt")
# Tokenize the text into individual words
moon_words = nltk.tokenize.word_tokenize(doc1Txt)
moon_words
# -
# **Let's get the frequency distribution count using the FreqDist service**
fdist = FreqDist(moon_words)
fdist
type(fdist)
# **Let's make it a DataFrame**
# +
moon_df = pd.DataFrame(fdist, index=[0]).T
moon_df.columns = ['Count']
moon_df.head()
# -
# **Visualize Word Frequency**
#
# It's often easier to analyze frequency by creating a visualization, such as a Pareto chart.
moon_df.sort_values('Count', ascending = False, inplace=True)
fig = plt.figure(figsize=(16, 9))
ax = fig.gca()
moon_df['Count'][:60].plot(kind = 'bar', ax = ax, color="red")
ax.set_title('Frequency of the most common words')
ax.set_ylabel('Frequency of word')
ax.set_xlabel('Word')
plt.show()
# We can now see the most common words of the Moon speech, in a pareto chart, showing the highest frequencies first. These words are:
# * new
# * go
# * space
# * hostile
# **5. Stemming**
#
# Until now, we've simply counted the number of occurrances of each word. This doesn't take into account the fact that sometimes multiple words may be based on the same common base, or stem; and may be semantically equivalent. For example, "fishes", "fished", "fishing", and "fisher" are all derived from the stem "fish".
#
# So let's stem the words so we can perform feature extraction
# Get the word stems
ps = PorterStemmer()
doc1Txt = [ps.stem(word) for word in moon_words]
# +
# Get Frequency distribution
fdist = FreqDist(doc1Txt)
moon_df = pd.DataFrame(fdist, index =[0]).T
moon_df.columns = ['Count']
# Plot frequency
moon_df.sort_values('Count', ascending = False, inplace=True)
fig = plt.figure(figsize=(16, 9))
ax = fig.gca()
moon_df['Count'][:60].plot(kind = 'bar', ax = ax, color="red")
ax.set_title('Frequency of the most common words')
ax.set_ylabel('Frequency of word')
ax.set_xlabel('Word')
plt.show()
# -
# <h3><b>Feature Extraction:</b></h3>
# Feature extraction is the next process, after text-normalization phase. Generally, feature-extraction for text data has two main steps
# 1. Define Vocabulary
# 2. Vectorize Documents
#
# Step 1 basically has to do with identifying words based on their frequency and record them as a vocabulary, using a distribution, just as we did in tokenization and frequency distribution above.
#
# For step 2, we shall vectorize the text using TF-IDF algorithm.
# As stated above, TF-IDF shows the relative importance of a word or words to a document, given a collection of documents. Therefore, we need to download a few more documents.
# <br>This also implies normalizing each downloaded document just as we did with the Moon.txt above. Remember that in programming, the moment you start repeating code, then it's time to write a function.
#
# So let's write a function that performs the five normalizing steps above for a list of documents, and for uniformity's sakes, let's apply it to all 4 documents
def normalize_docs(doc_list):
"""Normalizes a list of text docs
Normalization here includes 4 steps:
1. Removing Punctuations
2. Removing Numbers
3. Setting to Lowercase
4. Removing Stopwords
:@Param doc: A list of text document
:@Return: The doc_list fully normalized
"""
normalized = []
for doc in doc_list:
# To remove punctuations
doc = ''.join(c for c in doc if c not in punctuation)
# To remove Numbers
doc = ''.join(c for c in doc if not c.isdigit())
# To set to lowercase
doc = ''.join(c.lower() for c in doc)
# To remove stopwords
doc = ' '.join(word for word in doc.split() if word not in (stopwords.words('english')))
# append to normalised list
normalized.append(doc)
return normalized
# let's read in all the docs as follows, best to do so with a simple method.
# +
doc_names = ['Moon.txt','Inaugural.txt','Cognitive.txt', 'Gettysburg.txt']
def read_docs(doc_names):
"""Read bytes of documents from a list
@Param doclist: List of documents
@return: read documents as texts
"""
output = []
for i in doc_names:
try:
with open(i, 'r', encoding='utf-16') as f:
docs = f.read()
except:
with open(i, 'r') as f:
docs = f.read()
output.append(docs)
return output
# -
doc_list = read_docs(doc_names)
doc_list = normalize_docs(doc_list)
doc1Txt, doc2Txt, doc3Txt, doc4Txt = doc_list
# +
# Let's see one of the documents
doc4Txt
# -
type(doc4Txt)
# <h3><b>Using Term Frequency - Inverse Document Frequency<b></h3>
#
# In the previous example, we've used basic term frequency to determine each word's "importance" based on how often it appears in the document. When dealing with a large corpus of multiple documents, a more commonly used technique is term frequency, inverse document frequency (or TF-IDF) in which a score is calculated based on how often a word or term appears in one document compared to its more general frequency across the entire collection of documents. Using this technique, a high degree of relevance is assumed for words that appear frequently in a particular document, but relatively infrequently across a wide range of other documents.
# **Get TF-IDF Values for the top five words in each document**
# +
class Tfidf(object):
"""instantiate a Tfidf object to compute TF-IDF scores
across a corpus of text documents.
"""
def __init__(self, docs, doc_names):
self.docs = docs
self.doc_names = doc_names
def _tf(self, word, doc):
lenOfDoc = len(doc.words)
if lenOfDoc < 1: return 0
else: return doc.words.count(word) / lenOfDoc
def _contains(self, word, docs):
return sum(1 for doc in docs if word in doc.words)
def _idf(self, word, docs):
docsCount = self._contains(word, docs)
if docsCount < 1 : return 0
else: return math.log(len(docs) / docsCount)
def tfidf(self):
print('-----------------------------------------------------------')
for i, doc in enumerate(docs):
print("Top words in document {}".format(doc_names[i]))
scores = {word: (self._tf(word,doc) * self._idf(word, docs)) for word in doc.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for word, score in sorted_words[:5]:
print("\tWord: {}, TF-IDF: {}".format(word, round(score, 5)))
print()
# -
# Create a collection of documents as textblobs
doc1 = tb(doc1Txt)
doc2 = tb(doc2Txt)
doc3 = tb(doc3Txt)
doc4 = tb(doc4Txt)
docs = [doc1, doc2, doc3, doc4]
type(doc1)
# Next, instantiate an object of the Tfidf class() and pass the list of documents to it
text_docs = Tfidf(docs, doc_names)
# Next, call the tfidf() function on the object and have it print out the Top 5 TF-IDF words per document.
# These are the wwords that are peculiar to the document and define the specific theme of the document.
text_docs.tfidf()
| hard_coding_tfidf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="PX6CfuJhjj4C" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + id="lEXCPHwlkxUh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="2e28c967-2f63-41f0-89e2-0f71f9055f06"
data = pd.read_csv("Wine.csv")
data
# + id="X--FgT8FlYrp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="5b86d224-be0d-460f-cef4-573614210e4f"
x = data.iloc[:, :-1].values
y = data.iloc[:, -1].values
y
# + id="CdXRuDZBnWOf" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
# + id="nOv9t3DSna82" colab_type="code" colab={}
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
# + id="ULbdONPCli7e" colab_type="code" colab={}
from sklearn.decomposition import KernelPCA
kpca = KernelPCA(n_components = 2, kernel= "rbf")
x_train = kpca.fit_transform(x_train)
x_test = kpca.transform(x_test)
# + id="d8mL_3gmn4bR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="8a5d8063-653a-4e52-9533-535836e49656"
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0)
classifier.fit(x_train, y_train)
# + id="28fnvGQGn6HQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="abcf61d1-bcd3-4735-a8c2-458b9171de2e"
from sklearn.metrics import confusion_matrix, accuracy_score
y_pred = classifier.predict(x_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred)
# + id="LlQQErdKn8rV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 366} outputId="fd4a2f57-0875-42db-d1bb-f25104729da8"
from matplotlib.colors import ListedColormap
x_set, y_set = x_train, y_train
x1, x2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1, stop = x_set[:, 0].max() + 1, step = 0.01),
np.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1, step = 0.01))
plt.contourf(x1, x2, classifier.predict(np.array([x1.ravel(), x2.ravel()]).T).reshape(x1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(x1.min(), x1.max())
plt.ylim(x2.min(), x2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Training set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
# + id="VUOrv0GDn-bo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 366} outputId="f7e4b3ea-2ad1-4920-d415-eea766ad80f9"
from matplotlib.colors import ListedColormap
x_set, y_set = x_test, y_test
x1, x2 = np.meshgrid(np.arange(start = x_set[:, 0].min() - 1, stop = x_set[:, 0].max() + 1, step = 0.01),
np.arange(start = x_set[:, 1].min() - 1, stop = x_set[:, 1].max() + 1, step = 0.01))
plt.contourf(x1, x2, classifier.predict(np.array([x1.ravel(), x2.ravel()]).T).reshape(x1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green', 'blue')))
plt.xlim(x1.min(), x1.max())
plt.ylim(x2.min(), x2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(x_set[y_set == j, 0], x_set[y_set == j, 1],
c = ListedColormap(('red', 'green', 'blue'))(i), label = j)
plt.title('Logistic Regression (Test set)')
plt.xlabel('PC1')
plt.ylabel('PC2')
plt.legend()
plt.show()
| Dimensionality Reduction/Kernel_PCA_ipynb.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Project Proposal Group 15
#
# #### By: <NAME>, <NAME>, <NAME>
# ## 1. Introduction
#
# ### Below are just instructions to be deleted after finish
# *Begin by providing some relevant background information on the topic so that someone unfamiliar with it will be prepared to understand the rest of your proposal.*
#
# *Clearly state the question you will try to answer with your project. Your question should involve one or more random variables of interest, spread across two or more categories that are interesting to compare. For example, you could consider the annual maxima river flow at two different locations along a river, or perhaps gender diversity at different universities. Of the response variable, identify one location parameter (mean, median, quantile, etc.) and one scale parameter (standard deviation, inter-quartile range, etc.) that would be useful in answering your question. Justify your choices.*
#
# *Identify and describe the dataset that will be used to answer the question. Remember, this dataset is allowed to contain more variables than you need – feel free to drop them!*
#
# *Also, be sure to frame your question/objectives in terms of what is already known in the literature. Be sure to include at least two scientific publications that can help frame your study (you will need to include these in the References section). We have no specific citation style requirements, but be consistent.*
# ### 1.1 Background information on the topic
#
# Text
# ### 1.2 The Question
#
# Text
# ### 1.3 The Dataset
#
# Text
# ### 1.4 The Literature
#
# Text
# ## 2. Preliminary Results
# ### 2.0 Libraries and Packages
library(tidyverse)
library(readr)
library(tidyr)
library(dbplyr)
library(lubridate)
# ### 2.1 Read the data into R
df <- read_csv("../data/crdt-data.csv")
cat("The dimenstion of this dataframe is: ", dim(df)[1], "x", dim(df)[2])
head(df)
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# After reading data, we noticed that this raw data is consists of 5320 observations (rows) and 54 variables (columns), whereas there exist many NA values. This suggests that we need to clean and wrangle it into a tidier data to work with, plus, selecting our interest variables. We are interested in estimating proportions of tested positive among selected ethnicities, and categorize them by `Date`, `State`. Hence, we will only keep the following variables:
#
# `Cases_Total`, `Cases_White`, `Cases_Black`, `Cases_Latinx`, `Cases_Asian`, `Date`,`State`.
# <br>
# </br>
# And for the simplicity of this project, we will assume these data were Missing Completely At Random (MCAR), therefore dropping all NA values.
#
# -
# ### 2.2 Clean and wrangle data into a tidy format
cases_piped <- df %>%
select(Date, State, Cases_Total:Cases_Asian) %>%
drop_na()
head(cases_piped)
cat("The dimenstion of cases_piped is: ",
dim(cases_piped)[1], "x", dim(cases_piped)[2])
# After selecting only those interested variables (mentioned above) and dropping the NA values, we noticed the data is still bit large and unorganized, plus, `Date` is shown in YYYYMMDD, and we still don't know the total states we have. Hence, we will separate this `Date` column into `Year`, `Month`, `Day` and read for unique states in `State`.
# + tags=[]
cases_piped2 <- cases_piped %>%
mutate(Date = lubridate::ymd(Date),
Year = lubridate::year(Date),
Month = lubridate::month(Date),
Day = lubridate::day(Date)) %>%
select(-Date)
states <- as.factor(cases_piped2$State) %>%
levels()
years <- cases_piped2 %>%
select(Year) %>%
unique()
states
years
# -
# The data only contains `Year` of 2021 and 2020, and total of 23 states. Then, we will find only in `Year = 2021` , `State` whose occur most cases by adding them all together and assigned it to a dataframe called `max_cases`.
# + tags=[]
options(repr.plot.width = 20, repr.plot.height = 8)
max_cases <- cases_piped2 %>%
filter(Year == 2021) %>%
ungroup() %>%
select(-Year) %>%
group_by(State) %>%
summarize(Cases = sum(Cases_Total)) %>% arrange(desc(Cases))
cases_plot <- ggplot(max_cases, aes(x = State, y = Cases)) +
geom_bar(stat = "identity") +
labs(title="State VS Total Positive Cases") +
theme(text = element_text(size = 20))
cases_plot
max_cases
# -
# +
# Focus on twos states WA and
# tidy_cdc <- Case_Death_Clean %>%
# filter(State == "WA") %>%
# summarize(p_white = sum(Cases_White) / sum(Cases_Total),
# p_black = sum(Cases_Black) / sum(Cases_Total),
# p_Asian = sum(Cases_Asian) / sum(Cases_Total),
# p_latin = sum(Cases_Latinx) / sum(Cases_Total),
# d_white = sum(Deaths_White) / sum(Deaths_Total),
# d_black = sum(Deaths_Black) / sum(Deaths_Total),
# d_Asian = sum(Deaths_Asian) / sum(Deaths_Total),
# d_latin = sum(Deaths_Latinx) / sum(Deaths_Total))
# tidy_cdc
# Pivoting by deaths?
# group by races?
# -
# ### 2.3 Plot the relevant raw data, tailoring your plot in a way that addresses your question.
# +
# Plotting raw data
# -
# ### 2.4 Compute estimates
#
# *Compute estimates of the parameter you identified across your groups. Present this in a table. If relevant, include these estimates in your plot.*
# +
# Summary statistics
# -
# ## 3. Methods: Plan
# ### 3.1 What do you expect to find?
#
# Text
# ### 3.2 What impact could such findings have?
#
# Text
# ### 3.3 What future questions could this lead to?
#
# Text
# ## 4. Reference
#
# About the Racial Data Tracker
# https://covidtracking.com/race/about
| notebooks/Stat201Proposal_Draft.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="N2_J4Rw2r0SQ" outputId="5a34d12b-ee0b-40cf-a183-16f5f2f3bb26"
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from tqdm import tqdm
# %matplotlib inline
from torch.utils.data import Dataset, DataLoader
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim
from torch.nn import functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
# + id="y9cP_3OWFHdo"
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark= False
# + id="xR3b9hhxzqKB"
m = 5 # 5, 50, 100, 500 , 1000 , 2000
# + id="xDifMu_gRXFN"
desired_num = 2000
# + colab={"base_uri": "https://localhost:8080/"} id="tl7WPcTcRW-8" outputId="0e58587a-9781-4f81-8cb0-9d03fd9ddae6"
tr_i = 0
tr_j = int(desired_num/2)
tr_k = desired_num
tr_i, tr_j, tr_k
# + [markdown] id="F6fjud_Fr0Sa"
# # Generate dataset
# + colab={"base_uri": "https://localhost:8080/"} id="CqdXHO0Cr0Sd" outputId="0f23cfe8-ef4a-48c9-f55c-959a1bb57de5"
np.random.seed(12)
y = np.random.randint(0,3,500)
idx= []
for i in range(3):
print(i,sum(y==i))
idx.append(y==i)
# + id="ddhXyODwr0Sk"
x = np.zeros((500,))
# + id="DyV3N2DIr0Sp"
np.random.seed(12)
x[idx[0]] = np.random.uniform(low =-1,high =0,size= sum(idx[0]))
x[idx[1]] = np.random.uniform(low =0,high =1,size= sum(idx[1]))
x[idx[2]] = np.random.uniform(low =2,high =3,size= sum(idx[2]))
# + colab={"base_uri": "https://localhost:8080/"} id="qh1mDScsU07I" outputId="2ffb8ff0-55a0-4109-df5b-bd8073510a7e"
x[idx[0]][0], x[idx[2]][5]
# + colab={"base_uri": "https://localhost:8080/"} id="9Vr5ErQ_wSrV" outputId="f6bd5423-130a-43a4-91c7-d93dc3b30cdb"
print(x.shape,y.shape)
# + id="NG-3RpffwU_i"
idx= []
for i in range(3):
idx.append(y==i)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="hJ8Jm7YUr0St" outputId="293b4a9d-c8d5-4831-dc7a-f75aaefa9115"
for i in range(3):
y= np.zeros(x[idx[i]].shape[0])
plt.scatter(x[idx[i]],y,label="class_"+str(i))
plt.legend()
# + colab={"base_uri": "https://localhost:8080/"} id="3lMBZEHNBlF2" outputId="6b736302-bc27-4972-f9c4-62036c933bab"
bg_idx = [ np.where(idx[2] == True)[0]]
bg_idx = np.concatenate(bg_idx, axis = 0)
bg_idx.shape
# + colab={"base_uri": "https://localhost:8080/"} id="blRbGZHeCwXU" outputId="627a92d4-b1c5-4f91-fdbd-98ff9b37e331"
np.unique(bg_idx).shape
# + id="Y43sWeX7C15F"
x = x - np.mean(x[bg_idx], axis = 0, keepdims = True)
# + colab={"base_uri": "https://localhost:8080/"} id="ooII7N6UDWe0" outputId="24c14744-6b22-4cfb-e18d-2197f26f37a1"
np.mean(x[bg_idx], axis = 0, keepdims = True), np.mean(x, axis = 0, keepdims = True)
# + id="g21bvPRYDL9k"
x = x/np.std(x[bg_idx], axis = 0, keepdims = True)
# + colab={"base_uri": "https://localhost:8080/"} id="GtFvIeHsDZJk" outputId="af8f5412-f221-4858-cc20-477ed771e8af"
np.std(x[bg_idx], axis = 0, keepdims = True), np.std(x, axis = 0, keepdims = True)
# + colab={"base_uri": "https://localhost:8080/", "height": 283} id="8-VLhUfDDeHt" outputId="0cc0e5fd-81e5-4088-e42e-6dcdf4261e9f"
for i in range(3):
y= np.zeros(x[idx[i]].shape[0])
plt.scatter(x[idx[i]],y,label="class_"+str(i))
plt.legend()
# + id="UfFHcZJOr0Sz"
foreground_classes = {'class_0','class_1' }
background_classes = {'class_2'}
# + colab={"base_uri": "https://localhost:8080/"} id="OplNpNQVr0S2" outputId="c2fd4d44-204e-4e78-b175-c124a077fcb3"
fg_class = np.random.randint(0,2)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(2,3)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
print(a.shape)
print(fg_class , fg_idx)
# + colab={"base_uri": "https://localhost:8080/"} id="dwZVmmRBr0S8" outputId="327afd31-76a7-4d6e-f0ae-0110ac123fc2"
a.shape
# + colab={"base_uri": "https://localhost:8080/"} id="OoxzYI-ur0S_" outputId="6be1ee5b-7a58-4aa0-f726-17d34f7f8c24"
np.reshape(a,(m,1))
# + id="jqbvfbwVr0TN"
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,2)
fg_idx = np.random.randint(0,m)
a = []
for i in range(m):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(2,3)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(np.reshape(a,(m,1)))
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
# + id="BOsFmWfMr0TR" colab={"base_uri": "https://localhost:8080/"} outputId="b7dcde32-6654-4ea5-816d-f71cdc034ade"
mosaic_list_of_images = np.concatenate(mosaic_list_of_images,axis=1).T
mosaic_list_of_images.shape
# + colab={"base_uri": "https://localhost:8080/"} id="2aIPMgLXNiXW" outputId="901ca793-dfa1-436e-a043-ea2cbd09b081"
mosaic_list_of_images.shape, mosaic_list_of_images[0]
# + colab={"base_uri": "https://localhost:8080/"} id="A3qcsbbzPfRG" outputId="8ebd2849-d93b-438e-aa8f-e13a2fde8d93"
for j in range(m):
print(mosaic_list_of_images[0][j])
# + id="iPoIwbMHx44n"
def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number, m):
"""
mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point
labels : mosaic_dataset labels
foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average
dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9
"""
avg_image_dataset = []
cnt = 0
counter = np.zeros(m)
for i in range(len(mosaic_dataset)):
img = torch.zeros([1], dtype=torch.float64)
np.random.seed(int(dataset_number*10000 + i))
give_pref = foreground_index[i] #np.random.randint(0,9)
# print("outside", give_pref,foreground_index[i])
for j in range(m):
if j == give_pref:
img = img + mosaic_dataset[i][j]*dataset_number/m #2 is data dim
else :
img = img + mosaic_dataset[i][j]*(m-dataset_number)/((m-1)*m)
if give_pref == foreground_index[i] :
# print("equal are", give_pref,foreground_index[i])
cnt += 1
counter[give_pref] += 1
else :
counter[give_pref] += 1
avg_image_dataset.append(img)
print("number of correct averaging happened for dataset "+str(dataset_number)+" is "+str(cnt))
print("the averaging are done as ", counter)
return avg_image_dataset , labels , foreground_index
# + colab={"base_uri": "https://localhost:8080/"} id="5sEE5AxhSFsf" outputId="e760f2ac-82c3-4d67-8256-6015901c17b4"
avg_image_dataset_1 , labels_1, fg_index_1 = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[0:tr_j], mosaic_label[0:tr_j], fore_idx[0:tr_j] , 1, m)
test_dataset , labels , fg_index = create_avg_image_from_mosaic_dataset(mosaic_list_of_images[tr_j : tr_k], mosaic_label[tr_j : tr_k], fore_idx[tr_j : tr_k] , m, m)
# + id="0dYXnywAD-4l"
avg_image_dataset_1 = torch.stack(avg_image_dataset_1, axis = 0)
# mean = torch.mean(avg_image_dataset_1, keepdims= True, axis = 0)
# std = torch.std(avg_image_dataset_1, keepdims= True, axis = 0)
# avg_image_dataset_1 = (avg_image_dataset_1 - mean) / std
# print(torch.mean(avg_image_dataset_1, keepdims= True, axis = 0))
# print(torch.std(avg_image_dataset_1, keepdims= True, axis = 0))
# print("=="*40)
test_dataset = torch.stack(test_dataset, axis = 0)
# mean = torch.mean(test_dataset, keepdims= True, axis = 0)
# std = torch.std(test_dataset, keepdims= True, axis = 0)
# test_dataset = (test_dataset - mean) / std
# print(torch.mean(test_dataset, keepdims= True, axis = 0))
# print(torch.std(test_dataset, keepdims= True, axis = 0))
# print("=="*40)
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="bT9-kEI7NAnR" outputId="9a969672-3efb-44c7-9126-18f923fcbc9a"
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
# idx1 = []
# for i in range(3):
# idx1.append(y1 == i)
# for i in range(3):
# z = np.zeros(x1[idx1[i]].shape[0])
# plt.scatter(x1[idx1[i]],z,label="class_"+str(i))
# plt.legend()
plt.scatter(x1[y1==0], y1[y1==0]*0, label='class 0')
plt.scatter(x1[y1==1], y1[y1==1]*0, label='class 1')
# plt.scatter(x1[y1==2], y1[y1==2]*0, label='class 2')
plt.legend()
plt.title("dataset1 CIN with alpha = 1/"+str(m))
# + colab={"base_uri": "https://localhost:8080/", "height": 305} id="DWQydhG9jg1I" outputId="6ca79b30-71d2-4d57-ba2b-9e901b25a6b5"
x1 = (avg_image_dataset_1).numpy()
y1 = np.array(labels_1)
idx_1 = y1==0
idx_2 = np.where(idx_1==True)[0]
idx_3 = np.where(idx_1==False)[0]
color = ['#1F77B4','orange', 'brown']
true_point = len(idx_2)
plt.scatter(x1[idx_2[:25]], y1[idx_2[:25]]*0, label='class 0', c= color[0], marker='o')
plt.scatter(x1[idx_3[:25]], y1[idx_3[:25]]*0, label='class 1', c= color[1], marker='o')
plt.scatter(x1[idx_3[50:75]], y1[idx_3[50:75]]*0, c= color[1], marker='o')
plt.scatter(x1[idx_2[50:75]], y1[idx_2[50:75]]*0, c= color[0], marker='o')
plt.legend()
plt.xticks( fontsize=14, fontweight = 'bold')
plt.yticks( fontsize=14, fontweight = 'bold')
plt.xlabel("X", fontsize=14, fontweight = 'bold')
# plt.savefig(fp_cin+"ds1_alpha_04.png", bbox_inches="tight")
# plt.savefig(fp_cin+"ds1_alpha_04.pdf", bbox_inches="tight")
# + colab={"base_uri": "https://localhost:8080/"} id="bu5Po7YIiLvN" outputId="97ad72d5-504a-454d-c204-497ec35c3e48"
avg_image_dataset_1[0:10]
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="1fhxsxf9384L" outputId="815c1b34-3f2f-42f5-b820-dc9e9a322046"
x1 = (test_dataset).numpy()/m
y1 = np.array(labels)
# idx1 = []
# for i in range(3):
# idx1.append(y1 == i)
# for i in range(3):
# z = np.zeros(x1[idx1[i]].shape[0])
# plt.scatter(x1[idx1[i]],z,label="class_"+str(i))
# plt.legend()
plt.scatter(x1[y1==0], y1[y1==0]*0, label='class 0')
plt.scatter(x1[y1==1], y1[y1==1]*0, label='class 1')
# plt.scatter(x1[y1==2], y1[y1==2]*0, label='class 2')
plt.legend()
plt.title("test dataset1 ")
# + colab={"base_uri": "https://localhost:8080/"} id="wa4C_cnogtXE" outputId="874124db-b966-408f-d4c8-dce2fced1cf1"
test_dataset.numpy()[0:10]/m
# + id="3iJoJ0eIhSqz"
test_dataset = test_dataset/m
# + colab={"base_uri": "https://localhost:8080/"} id="nKl8pSEbhVGU" outputId="08c33550-a905-477c-b977-fb83655efb11"
test_dataset.numpy()[0:10]
# + id="yL0BRf8er0TX"
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list_of_images, mosaic_label):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list_of_images
self.label = mosaic_label
#self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] #, self.fore_idx[idx]
# + colab={"base_uri": "https://localhost:8080/"} id="4KsrW9qL9xgS" outputId="6ca65b0f-654a-48ba-b06f-4dfcc23a8dc6"
avg_image_dataset_1[0].shape, avg_image_dataset_1[0]
# + id="EY2l62APygaV"
batch = 200
traindata_1 = MosaicDataset(avg_image_dataset_1, labels_1 )
trainloader_1 = DataLoader( traindata_1 , batch_size= batch ,shuffle=True)
# + id="Nh3mBQHZ8bEj"
testdata_1 = MosaicDataset(test_dataset, labels )
testloader_1 = DataLoader( testdata_1 , batch_size= batch ,shuffle=False)
# + id="5_XeIUk0r0Tl"
class Whatnet(nn.Module):
def __init__(self):
super(Whatnet,self).__init__()
self.linear1 = nn.Linear(1,2)
# self.linear2 = nn.Linear(50,10)
# self.linear3 = nn.Linear(10,3)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
def forward(self,x):
# x = F.relu(self.linear1(x))
# x = F.relu(self.linear2(x))
x = (self.linear1(x))
return x
# + id="pjD2VZuV9Ed4"
def calculate_loss(dataloader,model,criter):
model.eval()
r_loss = 0
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
outputs = model(inputs)
loss = criter(outputs, labels)
r_loss += loss.item()
return r_loss/(i+1)
# + id="uALi25pmzQHV"
def test_all(number, testloader,net):
correct = 0
total = 0
out = []
pred = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to("cuda"),labels.to("cuda")
out.append(labels.cpu().numpy())
outputs= net(images)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
total += labels.size(0)
correct += (predicted == labels).sum().item()
pred = np.concatenate(pred, axis = 0)
out = np.concatenate(out, axis = 0)
print("unique out: ", np.unique(out), "unique pred: ", np.unique(pred) )
print("correct: ", correct, "total ", total)
print('Accuracy of the network on the %d test dataset %d: %.2f %%' % (total, number , 100 * correct / total))
# + id="4vmNprlPzTjP"
def train_all(trainloader, ds_number, testloader_list):
print("--"*40)
print("training on data set ", ds_number)
torch.manual_seed(12)
net = Whatnet().double()
net = net.to("cuda")
criterion_net = nn.CrossEntropyLoss()
optimizer_net = optim.Adam(net.parameters(), lr=0.001 ) #, momentum=0.9)
acti = []
loss_curi = []
epochs = 1500
running_loss = calculate_loss(trainloader,net,criterion_net)
loss_curi.append(running_loss)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
net.train()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to("cuda"),labels.to("cuda")
# zero the parameter gradients
optimizer_net.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion_net(outputs, labels)
# print statistics
running_loss += loss.item()
loss.backward()
optimizer_net.step()
running_loss = calculate_loss(trainloader,net,criterion_net)
if(epoch%200 == 0):
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.05:
print('epoch: [%d] loss: %.3f' %(epoch + 1,running_loss))
break
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for data in trainloader:
images, labels = data
images, labels = images.to("cuda"), labels.to("cuda")
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the %d train images: %.2f %%' % (total, 100 * correct / total))
for i, j in enumerate(testloader_list):
test_all(i+1, j,net)
print("--"*40)
return loss_curi, net
# + id="Yl41sE8vFERk"
train_loss_all=[]
testloader_list= [ testloader_1 ]
# + id="5gQoPST5zW2t" colab={"base_uri": "https://localhost:8080/"} outputId="1edd0ca2-f1ae-4be2-f9bb-21e5ef33c95b"
loss, net = train_all(trainloader_1, 1, testloader_list)
train_loss_all.append(loss)
# + colab={"base_uri": "https://localhost:8080/"} id="1PKuWWdcic_T" outputId="bf3d787e-e4de-4f19-f480-e39212fe1691"
net.linear1.weight, net.linear1.bias
# + id="In76SYH_zZHV"
# %matplotlib inline
# + id="BS4HtOHEzZ0E" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="4f5a245e-df97-45ab-edf9-483b2c37e7a0"
for i,j in enumerate(train_loss_all):
plt.plot(j,label ="dataset "+str(i+1))
plt.xlabel("Epochs")
plt.ylabel("Training_loss")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
# + id="1UbTkfLUINTI"
| AAAI/Learnability/CIN/Linear/ds1/size_1000/synthetic_type0_Linear_m_5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import itertools, functools
import numpy as np
# ### Pairwise independent hash functions
# Reference:
# - https://cseweb.ucsd.edu/~slovett/teaching/SP15-CSE190/pairwise_hash_functions.pdf
# - https://people.csail.mit.edu/ronitt/COURSE/S12/handouts/lec5.pdf
#
# A family $H=\{h:U \to R\}$ is said to be pairwise independent, if for any two distinct elements $x_1 \neq x_2 \in U$ and two (possibly equal) values $y_1, y_2 \in R$,
# $$ Pr_{h\in H} [h(x_1)=y_1 \text{ and } h(x_2)=y_2] = \frac{1}{|R|^2} $$
# ### Example
# $U=\{0,1\}^k, R=\{0,1\}$
k = 3
U = np.asarray([np.asarray(u).astype(np.int) for u in itertools.product('01',repeat=k)])
R = [0,1]
# Family $H$:
# $$ H = \{h_{a,b}(x)=\langle a,x\rangle+b (mod 2): a\in\{0,1\}^k, b\in\{0,1\}\} $$
H = []
for a in itertools.product('01',repeat=k):
for b in [0,1]:
a = np.asarray(a).astype(np.int)
h_ab = (lambda y,z: (lambda x: (np.dot(y,x)+z)%2))(a,b)
H.append(h_ab)
# The lambdas in the list comprehension are a closure over the scope of this comprehension. A lexical closure, so they refer to the a,b via reference, and not its value when they were evaluated. So need to wrap it in another lambda.
(x1, x2) = U[np.random.choice(U.shape[0], 2, replace=False), :]
(y1, y2) = np.random.choice(R, 2)
np.average([h(x1)==y1 and h(x2)==y2 for h in np.random.choice(H, size=1000)])
# which is close to $\frac{1}{|R|^2} = \frac{1}{4}$
# ### More about universal hashing
# Reference:
# - https://en.wikipedia.org/wiki/Universal_hashing
# - https://en.wikipedia.org/wiki/K-independent_hashing
#
# (minhash)
# - https://github.com/4d55397500/learning-scraps/blob/master/minhash/minhash.py
# - https://stackoverflow.com/questions/2255604/hash-functions-family-generator-in-python
# - https://stackoverflow.com/questions/19701052/how-many-hash-functions-are-required-in-a-minhash-algorithm/25104050#25104050
#
# Properties of a family of hashing functions:
#
# universality < uniform difference (distance) property < pairwise indepedence (strong universality)
import random
import numpy as np
import sys
from collections import Counter
# ### Hashing integers (machine words)
# +
# Carter and Wegman's method
PRIME = 131071 # large prime number
m = 10 # number of hashes
rang = 100 # range
assert PRIME > max(m,rang), "The PRIME is too small"
# generate the universal family for hashing integers
# A linear congruential generator (LCG) is an algorithm that yields a sequence of pseudo–randomized numbers
# calculated with a discontinuous piecewise linear equation.
def hash_generator_int():
while True:
a,b = random.randint(1, PRIME), random.randint(0, PRIME)
h_ab = (lambda a,b: lambda x: (a*x+b)%PRIME%m)(a,b)
yield h_ab
family = hash_generator_int()
# -
[x,y] = random.sample(range(rang), 2)
print "x", x, "y", y
# universality
print "<= 1/%s = %s" %(m, np.mean([h(x)==h(y) for h in [next(family) for _ in range(10000)]]))
# uniform difference property
Counter([(h(x)-h(y))%m for h in [next(family) for _ in range(10000)]])
# pairwise independent
(z1, z2) = random.sample(range(m), 2)
print "1/m^2 = 1/%s = %s"%(m**2,np.mean([h(x)==z1 and h(y)==z2 for h in [next(family) for _ in range(10000)]]))
# ### Hashing vectors (fixed-length sequence of machine words)
# +
k = 7 # length of sequence
def hash_generator_int():
while True:
a,b = random.randint(1, PRIME), random.randint(0, PRIME)
h_ab = (lambda a,b: lambda x: (a*x+b)%PRIME%m)(a,b)
yield h_ab
family_int = hash_generator_int()
def hash_generator_vector():
while True:
h = lambda x: sum(map(next(family_int), x))%m
yield h
family = hash_generator_vector()
# -
[x,y] = [random.sample(range(rang), k), random.sample(range(rang), k)]
print "x", x, "y", y
# universality
print "<= 1/%s = %s" %(m, np.mean([h(x)==h(y) for h in [next(family) for _ in range(10000)]]))
# uniform difference property
Counter([(h(x)-h(y))%m for h in [next(family) for _ in range(10000)]])
# pairwise independent
(z1, z2) = random.sample(range(m), 2)
print "1/m^2 = 1/%s = %s"%(m**2,np.mean([h(x)==z1 and h(y)==z2 for h in [next(family) for _ in range(10000)]]))
# ### Hashing strings (variable-sized sequence of machine words)
# +
LARGER_PRIME = 2147483647
def hash_generator_int():
while True:
a,b = random.randint(1, LARGER_PRIME), random.randint(0, LARGER_PRIME)
h_ab = (lambda a,b: lambda x: (a*x+b)%LARGER_PRIME%m)(a,b)
yield h_ab
family_int = hash_generator_int()
def hash_generator_string():
while True:
a = random.randint(0, PRIME)
h = lambda x: next(family_int)(sum([xi*a**(len(x)-i) for i,xi in enumerate(x)])%PRIME)
yield h
family = hash_generator_string()
# -
k1, k2 = 10, 5
[x,y] = [random.sample(range(rang), k1), random.sample(range(rang), k2)]
print "x", x, "y", y
# universality
print "<= 1/%s = %s" %(m, np.mean([h(x)==h(y) for h in [next(family) for _ in range(10000)]]))
# uniform difference property
Counter([(h(x)-h(y))%m for h in [next(family) for _ in range(10000)]])
# pairwise independent
(z1, z2) = random.sample(range(m), 2)
print "1/m^2 = 1/%s = %s"%(m**2,np.mean([h(x)==z1 and h(y)==z2 for h in [next(family) for _ in range(10000)]]))
# ### Padding
# Reference:
# - https://crypto.stackexchange.com/questions/2753/in-the-sha-hash-algorithm-why-is-the-message-always-padded
# - Strongly universal string hashing is fast
#
# Padding can be used for converting variable-length string to fixed-length string. Then use hashing for vectors. However, padding with zero only won't work since universality will break. We need to introduce an extra bit at the end.
| 012319_hashing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Anwendung 2: Topic Modeling
# Eine weitere Anwendung von Vector-Space-Modellen ist das Topic Modeling. Es zielt auf die Identifikation von »topics« in einem Corpus. Die Berechnung dieser topics basiert dabei auf ihrer Verteilung in den Dokumenten des Corpus und damit letztlich auf Wort-Kookkurrenzen. Ob die ermittelten topics tatsächlich *Themen* im inhaltlichen Sinne, oder eher »Diskursstränge«, Wort-Cluster o.ä. sind, bleibt der inhaltlichen Interpretation überlassen.
#
# Weiter kann an dieser Stelle nicht auf die Hintergründe des Topic Modeling eingegangen werden. Für die weitere Lektüre sei daher dieser Beitrag empfohlen:
#
# Brett, <NAME>. (2012): „Topic Modeling: A Basic Introduction“, Journal of Digital Humanities 2/1, http://journalofdigitalhumanities.org/2-1/topic-modeling-a-basic-introduction-by-megan-r-brett/.
#
# Für das Topic Modeling muss der Text nur minimal aufbereitet werden. Im Gegensatz zu der `TextCorpus`-Klasse, die für die Keyword Extraction verwendet wurde, wurden aber zwei Details verändert, die die Qualität der erzeugten Topics verbessern:
#
# * Für die Tokenisierung, also die Identifikation von Worten, wird nicht auf `TextBlob` zurückgegriffen, sondern der reguläre Ausdruck `\w+` verwendet. Durch die Begrenzung auf alphanumerische Zeichen werden etwa Satzzeichen wie Bindestriche oder Anführungszeichen automatisch aussortiert.
# * Es wird ein Stoppwortfilter verwendet, der auf einer vorgegebenen Liste basiert.
#
# Diese beiden Punkte sind in den Methoden `tokenize()` und `filter()` umgesetzt. Die Methode `get_texts()` wurde entsprechend angepasst. Zusätzlich sind noch zwei Details umgesetzt, die die Verarbeitungsgeschwindigkeit erhöhen: So wird die CSV-Tabelle nur einmal eingelesen, anstatt bei jedem Durchlauf neu geladen zu werden. Ebenso wird die Anzahl der Dokumente im Corpus, die für manche Berechnungen notwendig ist, zwischengespeichert. Diese beiden Punkte sind aber nicht zwingend erforderlich.
# +
import re
from gensim.corpora.textcorpus import TextCorpus
from textblob_de import TextBlobDE as TextBlob
from textblob_de import PatternParser
import pandas as pd
class CSVCorpus(TextCorpus):
"""Read corpus from a csv file."""
def tokenize(self, text):
words = re.findall('\w+', text.lower(), re.U)
return words
def filter(self, tokens, stopwords):
return [token for token in tokens if not token in stopwords]
def get_texts(self):
with open('../Daten/stopwords.txt') as stopwordfile:
stopwords = stopwordfile.read().splitlines()
table = self.gettable()
for text in table['text']:
tokens = self.tokenize(text)
yield self.filter(tokens, stopwords)
def gettable(self):
if not hasattr(self, 'table'):
with self.getstream() as csvfile:
self.table = pd.read_csv(csvfile, parse_dates=['date'], encoding='utf-8')
return self.table
def __len__(self):
if not hasattr(self, 'length'):
# Cache length
self.length = len(self.gettable())
return self.length
# -
# Alternativ kann auch auf die Verfahren der Lemmatisierung und Wortartenfilterung zurückgegriffen werden, die in den vorherigen Einheiten besprochen wurden. Dazu werden die beiden Methoden `tokenzie()` und `filter()` überschrieben. Damit dauert die Verarbeitung aber deutlich länger, und es ist unklar, ob die Qualität der Analyse dadurch zwingend steigt. Für ein vergleichendes Experiment kann diese Version aber verwendet werden.
# +
from string import punctuation
from collections import namedtuple
class LemmatizedCSVCorpus(CSVCorpus):
"""Read corpus from a csv file."""
def tokenize(self, text):
text = text.replace('\xa0', ' ') # Ersetze "non-breaking space" durch normales Leerzeichen
text = re.sub('[„“”‚‘’–]', '', text, re.U) # Entferne Anführungzeichen und Gedankenstriche
blob = TextBlob(text, parser=PatternParser(lemmata=True))
parse = blob.parse()
fieldnames = [tag.replace('-', '_') for tag in parse.tags]
Token = namedtuple('Token', fieldnames)
tokens = [Token(*token.split('/', 4)) for token in parse.split(' ')]
return tokens
def filter(self, tokens, stopwords):
result = []
for token in tokens:
pos = token.part_of_speech[0:2]
word = token.word
if not word.lower() in stopwords and not word in punctuation:
if pos == 'NN':
result.append(token.lemma.title())
else:
result.append(token.lemma)
return result
# -
corpus = CSVCorpus('../Daten/Reden.csv')
len(corpus.dictionary)
# In der nicht lemmatisierten Form enthält das Corpus nun also 56551 unterschiedliche Wörter. Besonders häufige ebenso wie besonders seltene Wörter können dabei die Analyse negativ beeinflussen. Gensim stellt eine Methode bereit, mit der das Corpus um diese Extremwerte bereinigt werden kann. (Erst im zweiten Schritt `compactify()` werden dann die gelöschten Extremwerte tatsächlich aus dem Corpus entfernt.)
corpus.dictionary.filter_extremes()
corpus.dictionary.compactify()
len(corpus.dictionary)
# Nach dem Filtern bleiben noch 12470 Einträge übrig.
#
# Gensim enthält eine Implementierung des Topic-Modeling-Verfahrens »LDA« (neben anderen). Diese Version ist dabei für sehr große Corpora optimiert, die resultierenden Topics sind aber leider oft nicht sehr leicht zu interpretieren. Daher soll hier die Implementierung aus dem Python-Paket »lda« verwendet werden. Sie arbeitet nicht direkt mit dem TextCorpus-Format von gensim, kann aber eine *sparse matrix* einlesen, die mit gensim erzeugt wird.
# +
from gensim.matutils import corpus2csc
corpus_matrix = corpus2csc(corpus)
corpus_matrix.shape
# -
corpus_matrix = corpus_matrix.transpose()
corpus_matrix.shape
# Aus technischen Gründen muss die Matrix nun noch in ein bestimmtes Zahlenformat (Kommazahl zu Ganzzahl) konvertiert werden.
corpus_matrix = corpus_matrix.astype(int)
# Ähnlich wie bei der Keyword Extraction wird hier im ersten Schritt ein Modell erzeugt, das vor allem bestimmte Parameter für die Berechnung speichert. Der wichtigste ist hierbei die Anzahl der Topics. Diese muss vorgegeben werden und kann nicht vom Algorithmus selbst bestimmt werden. 20 ist oft ein guter Ausgangswert, man sollte aber mit verschiedenen Werten experimentieren und die Ergebnisse vergleichen.
import lda
lda_model = lda.LDA(n_topics=20, n_iter=500, random_state=1)
lda_model.fit(corpus_matrix)
# *Hinweis:* Um die Ergebnisse etwas übersichtlicher darzustellen, ist eine gewisse Formatierung der Ausgabe nützlich. Dies könnte etwa mit HTML realisiert werden. Etwas einfacher ist das minimalistische Textformat »Markdown«, das etwa Zeilen mit vorangestellten Sternchen `*` in Aufzählungslisten umwandelt. Um dies einfacher zu nutzen, wird hier eine kleine Hilfsfunktion definiert. Zu Details siehe die [Syntax-Beschreibung](http://www.daringfireball.net/projects/markdown/syntax).
# +
from IPython.nbconvert.filters.markdown import markdown2html
class MD(str):
def _repr_html_(self):
return markdown2html(self)
MD('Das ist ein **Test!**')
# -
# Das Ergebnisformat von LDA sind Matrizen im numpy-Format. Diese sind sehr effizient und bieten eine Reihe von Berechnungsmöglichkeiten, sie sind jedoch auf den ersten Blick nicht ganz leicht zu verstehen. Der folgende Code wurde aus der Dokumentation des lda-Pakets übernomme und ein wenig angepasst. Für den Moment soll die Beschreibung ausreichen, dass hierüber für jedes Topic die einflussreichsten 20 Wörter ausgegeben werden.
import numpy as np
vocab = [corpus.dictionary[i] for i in range(len(corpus.dictionary))]
topic_word = lda_model.topic_word_ # model.components_ also works
n_top_words = 20
topics = [np.array(vocab)[np.argsort(topic_dist)][:-n_top_words:-1]
for topic_dist in topic_word]
result = ''
for i, topic_words in enumerate(topics):
result += '* **Topic {}:** {}\n'.format(i, ' '.join(topic_words))
MD(result)
# Diese Topics sind zunächst einmal probabilistisch identifizierte Wort-Cluster. Bei genauerer Betrachtung lässt sich aber für die meisten Topics ein Eindruck gewinnen, welches Thema die ausgegebenen Worte umreißen. Topic 0 etwa beschreibt die Themenfelder Universität und Forschung, Topic 2 dagegen den Bereich Film und Kino. Topic 1 dagegen lässt sich auf den ersten Blick weniger leicht interpretieren.
#
# Neben den Topics selbst gibt LDA auch eine Zuordnung von Topics zu Dokumenten aus. Es lassen sich also auch für jedes Dokument die relevantesten Topics ausgeben, die das Dokument beschreiben. Die ist ähnlich wie bei der Keyword Extraction, nur dass ganze Topics und nicht einzelne Schlüsselwörter zur Beschreibung herangezogen werden.
# +
data = pd.read_csv("../Daten/Reden.csv", parse_dates=['date'], encoding='utf-8')
titles = data['title']
doc_topic = lda_model.doc_topic_
result = ''
for i in range(10):
result += '\n\n**{}**\n\n'.format(titles[i])
for topic in doc_topic[i].argsort()[:-4:-1]:
result += ' * _Topic {}:_ {}\n'.format(topic, ' '.join(topics[topic]))
MD(result)
# -
# Dabei ist zunächst auffällig, dass die Topics 1 und/oder 7 relativ häufig auftauchen. Gemeinsam mit ihrer inhaltlichen Vagheit ergibt sich der Eindruck, dass es sich weitgehend um Residual-Topics handelt, die relativ unspezifische, aber regelmäßig auftretenden Wörter gruppieren. Die anderen Topics geben dagegen einen relativ guten Einblick in die inhaltlichen Schwerpunkte der Texte.
#
# Um später erneut auf die Ergebnisse zurückgreifen zu können, wird das berechnete Modell gespeichert.
# +
from pickle import dump
with open('../Daten/topicmodel20.pickle', 'wb') as picklefile:
dump(lda_model, picklefile)
| 07_Topic_Modeling/Topic Modeling 20.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
"""
This program makes learning ev-gmm.
"""
# +
# __future__ module make compatible python2 and python3
from __future__ import division, print_function
# basic modules
import os
import os.path
import time
# for warning ignore
import warnings
#warning.filterwarnings('ignore')
# for file system manupulation
from shutil import rmtree
import glob
import argparse
# for save object
import pickle
# for make glaph
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
plt.rcParams['figure.figsize'] = (16, 5)
import librosa.display
# for scientific computing
import numpy as np
from numpy.linalg import norm
from sklearn.decomposition import PCA
from sklearn.mixture import GMM # GMM class cannot use after sklearn 0.20.0
import sklearn.mixture
#from sklearn.mixture.gaussian_mixture import _compute_precision_cholesky
from sklearn.preprocessing import StandardScaler
import scipy.sparse
from scipy.signal import firwin, lfilter
# for display audio controler
from IPython.display import Audio
# for manuplate audio data
import soundfile as sf
import pyworld as pw
import pysptk
from dtw import dtw
from fastdtw import fastdtw
# -
class WORLD(object):
"""
WORLD based speech analyzer and synthezer.
Ref : https://github.com/k2kobayashi/sprocket/
"""
def __init__(self, fs=16000, fftl=1024, shiftms=5.0, minf0=40.0, maxf0=500.0):
"""
Parameters
----------
fs : int
Sampling frequency
fftl : int
FFT length
shiftms : float
Shift length [ms]
minf0 : float
Floor in F0 estimation
maxf0 : float
Seli in F0 estimation
"""
self.fs = fs
self.fftl = fftl
self.shiftms = shiftms
self.minf0 = minf0
self.maxf0 = maxf0
def analyze(self, x):
"""
Analyze acoustic featueres.
Parameters
----------
x : array, shape(`T`)
monoral speech signal in time domain
Returns
----------
f0 : array, shape(`T`)
F0 sequence
sp : array, shape(`T`, `fftl / 2 + 1`)
Spectral envelope sequence
ap : array, shape(`T`, `fftl / 2 + 1`)
aperiodicity sequence
"""
f0, time_axis = pw.harvest(x, self.fs, f0_floor=self.minf0,
f0_ceil=self.maxf0, frame_period=self.shiftms)
sp = pw.cheaptrick(x, f0, time_axis, self.fs, fft_size=self.fftl)
ap = pw.d4c(x, f0, time_axis, self.fs, fft_size=self.fftl)
assert sp.shape == ap.shape
return f0, sp, ap
def analyze_f0(self, x):
"""
Analyze f0.
Parameters
----------
x : array, shape(`T`)
monoral speech signal in time domain
Returns
----------
f0 : array, shape(`T`)
F0 sequence
"""
f0, time_axis = pw.harvest(x, self.fs, f0_floor=self.minf0,
f0_ceil=self.maxf0, frame_period=self.shiftms)
assert f0.shape == x.shape()
return f0
def synthesis(self, f0, sp, ap):
"""
Re-synthesizes a speech waveform from acoustic featueres.
Parameters
----------
f0 : array, shape(`T`)
F0 sequence
sp : array, shape(`T`, `fftl / 2 + 1`)
Spectral envelope sequence
ap : array, shape(`T`, `fftl / 2 + 1`)
aperiodicity sequence
"""
return pw.synthesize(f0, sp, ap, self.fs, frame_period=self.shiftms)
class FeatureExtractor(object):
"""
Analyze acoustic features from a waveform.
This class may have several types of estimeter like WORLD or STRAIGHT.
Default type is WORLD.
Ref : https://github.com/k2kobayashi/sprocket/
"""
def __init__(self, analyzer='world', fs=16000, fftl=1024,
shiftms=5.0, minf0=50.0, maxf0=500.0):
"""
Parameters
----------
analyzer : str
Analyzer
fs : int
Sampling frequency
fftl : int
FFT length
shiftms : float
Shift length [ms]
minf0 : float
Floor in F0 estimation
maxf0 : float
Seli in F0 estimation
"""
self.analyzer = analyzer
self.fs = fs
self.fftl = fftl
self.shiftms = shiftms
self.minf0 = minf0
self.maxf0 = maxf0
if self.analyzer == 'world':
self.analyzer = WORLD(fs=self.fs, fftl=self.fftl,
minf0=self.minf0, maxf0=self.maxf0, shiftms=self.shiftms)
else:
raise('Analyzer Error : not support type, see FeatureExtractor class.')
self._f0 = None
self._sp = None
self._ap = None
def analyze(self, x):
"""
Analyze acoustic featueres.
Parameters
----------
x : array, shape(`T`)
monoral speech signal in time domain
Returns
----------
f0 : array, shape(`T`)
F0 sequence
sp : array, shape(`T`, `fftl / 2 + 1`)
Spectral envelope sequence
ap : array, shape(`T`, `fftl / 2 + 1`)
aperiodicity sequence
"""
self.x = np.array(x, dtype=np.float)
self._f0, self._sp, self._ap = self.analyzer.analyze(self.x)
# check f0 < 0
self._f0[self._f0 < 0] = 0
if np.sum(self._f0) == 0.0:
print("Warning : F0 values are all zero.")
return self._f0, self._sp, self._ap
def analyze_f0(self, x):
"""
Analyze f0.
Parameters
----------
x : array, shape(`T`)
monoral speech signal in time domain
Returns
----------
f0 : array, shape(`T`)
F0 sequence
"""
self.x = np.array(x, dtype=np.float)
self._f0 = self.analyzer.analyze_f0(self.x)
# check f0 < 0
self._f0[self._f0 < 0] = 0
if np.sum(self._f0) == 0.0:
print("Warning : F0 values are all zero.")
return self._f0
def mcep(self, dim=24, alpha=0.42):
"""
Convert mel-cepstrum sequence from spectral envelope.
Parameters
----------
dim : int
mel-cepstrum dimension
alpha : float
parameter of all-path filter
Returns
----------
mcep : array, shape(`T`, `dim + 1`)
mel-cepstrum sequence
"""
self._analyzed_check()
return pysptk.sp2mc(self._sp, dim, alpha)
def codeap(self):
"""
"""
self._analyzed_check()
return pw.code_aperiodicity(self._ap, self.fs)
def npow(self):
"""
Normalized power sequence from spectral envelope.
Returns
----------
npow : vector, shape(`T`, `1`)
Normalized power sequence of the given waveform
"""
self._analyzed_check()
npow = np.apply_along_axis(self._spvec2pow, 1, self._sp)
meanpow = np.mean(npow)
npow = 10.0 * np.log10(npow / meanpow)
return npow
def _spvec2pow(self, specvec):
"""
"""
fftl2 = len(specvec) - 1
fftl = fftl2 * 2
power = specvec[0] + specvec[fftl2]
for k in range(1, fftl2):
power += 2.0 * specvec[k]
power /= fftl
return power
def _analyzed_check(self):
if self._f0 is None and self._sp is None and self._ap is None:
raise('Call FeatureExtractor.analyze() before this method.')
# +
class Synthesizer(object):
"""
Synthesize a waveform from acoustic features.
Ref : https://github.com/k2kobayashi/sprocket/
"""
def __init__(self, fs=16000, fftl=1024, shiftms=5.0):
"""
Parameters
----------
fs : int
Sampling frequency
fftl : int
FFT length
shiftms : float
Shift length [ms]
"""
self.fs = fs
self.fftl = fftl
self.shiftms = shiftms
def synthesis(self, f0, mcep, ap, rmcep=None, alpha=0.42):
"""
Re-synthesizes a speech waveform from acoustic featueres.
Parameters
----------
f0 : array, shape(`T`)
F0 sequence
mcep : array, shape(`T`, `dim`)
mel-cepstrum sequence
ap : array, shape(`T`, `fftl / 2 + 1`)
aperiodicity sequence
rmcep : array, shape(`T`, `dim`)
array of reference mel-cepstrum sequence
alpha : float
parameter of all-path filter
Returns
----------
wav : array,
syntesized waveform
"""
if rmcep is not None:
# power modification
mcep = mod_power(mcep, rmcep, alpha=alpha)
sp = pysptk.mc2sp(mcep, alpha, self.fftl)
wav = pw.synthesize(f0, sp, ap, self.fs, frame_period=self.shiftms)
return wav
def synthesis_diff(self, x, diffmcep, rmcep=None, alpha=0.42):
"""
Re-synthesizes a speech waveform from acoustic featueres.
filtering with a differential mel-cepstrum.
Parameters
----------
x : array, shape(`samples`)
array of waveform sequence
diffmcep : array, shape(`T`, `dim`)
array of differential mel-cepstrum sequence
rmcep : array, shape(`T`, `dim`)
array of reference mel-cepstrum sequence
alpha : float
parameter of all-path filter
Returns
----------
wav : array,
syntesized waveform
"""
x = x.astype(np.float64)
dim = diffmcep.shape[1] - 1
shiftl = int(self.fs / 1000 * self.shiftms)
if rmcep is not None:
# power modification
diffmcep = mod_power(rmcep + diffmcep, rmcep, alpha=alpha) - rmcep
# mc2b = transform mel-cepstrum to MLSA digital filter coefficients.
b = np.apply_along_axis(pysptk.mc2b, 1, diffmcep, alpha)
mlsa_fil = pysptk.synthesis.Synthesizer(pysptk.synthesis.MLSADF(dim, alpha=alpha),
shiftl)
wav = mlsa_fil.synthesis(x, b)
return wav
def synthesis_sp(self, f0, sp, ap):
"""
Re-synthesizes a speech waveform from acoustic featueres.
Parameters
----------
f0 : array, shape(`T`)
F0 sequence
spc : array, shape(`T`, `dim`)
mel-cepstrum sequence
ap : array, shape(`T`, `fftl / 2 + 1`)
aperiodicity sequence
Returns
----------
wav : array,
syntesized waveform
"""
wav = pw.synthesize(f0, sp, ap, self.fs, frame_period=self.shiftms)
return wav
def mod_power(cvmcep, rmcep, alpha=0.42, irlen=256):
"""
power modification based on inpuulse responce
Parameters
----------
cvmcep : array, shape(`T`, `dim`)
array of converted mel-cepstrum
rmcep : arraym shape(`T`, `dim`)
array of reference mel-cepstrum
alpha : float
parameter of all-path filter
irlen : int
Length for IIR filter
Returns
----------
modified_cvmcep : array, shape(`T`, `dim`)
array of power modified converted mel-cepstrum
"""
if rmcep.shape != cvmcep.shape:
raise ValueError(
"The shape of the converted and reference mel-cepstrum are different : {} / {}.format(cvmcep.shape, rmcep.shape)"
)
# mc2e = Compute energy from mel-cepstrum. e-option
cv_e = pysptk.mc2e(cvmcep, alpha=alpha, irlen=irlen)
r_e = pysptk.mc2e(rmcep, alpha=alpha, irlen=irlen)
dpow = np.log(r_e / cv_e) / 2
modified_cvmcep = np.copy(cvmcep)
modified_cvmcep[:, 0] += dpow
return modified_cvmcep
# +
# def util methods
def melcd(array1, array2):
"""
calculate mel-cepstrum distortion
Parameters
----------
array1, array2 : array, shape(`T`, `dim`) or shape(`dim`)
Array of original and target.
Returns
----------
mcd : scala, number > 0
Scala of mel-cepstrum distoriton
"""
if array1.shape != array2.shape:
raise ValueError(
"The shape of both array are different : {} / {}.format(array1.shape,array2.shape)"
)
if array1.ndim == 2:
diff = array1 - array2
mcd = 10.0 / np.log(10) * np.mean(np.sqrt(2.0 * np.sum(diff ** 2, axis=1)))
elif array1.ndim == 1:
diff = array1 - array2
mcd = 10.0 / np.log(10) * np.sqrt(2.0 * np.sum(diff ** 2))
else:
raise ValueError("Dimension mismatch.")
return mcd
def delta(data, win=[-1.0, 1.0, 0]):
"""
calculate delta component
Parameters
----------
data : array, shape(`T`, `dim`)
Array of static matrix sequence.
win : array, shape(`3`)
The shape of window matrix.
Returns
----------
delta : array, shape(`T`, `dim`)
Array of delta matrix sequence.
"""
if data.ndim == 1:
# change vector into 1d-array
T = len(data)
dim = data.ndim
data = data.reshape(T, dim)
else:
T, dim = data.shape
win = np.array(win, dtype=np.float64)
delta = np.zeros((T, dim))
delta[0] = win[0] * data[0] + win[1] * data[1]
delta[-1] = win[0] * data[-2] + win[1] * data[-1]
for i in range(len(win)):
delta[1:T - 1] += win[i] * delta[i:T - 2 + i]
return delta
def static_delta(data, win=[-1.0, 1.0, 0]):
"""
calculate static and delta component
Parameters
----------
data : array, shape(`T`, `dim`)
Array of static matrix sequence.
win : array, shape(`3`)
The shape of window matrix.
Returns
----------
sddata : array, shape(`T`, `dim * 2`)
Array of static and delta matrix sequence.
"""
sddata = np.c_[data, delta(data, win)]
assert sddata.shape[1] == data.shape[1] * 2
return sddata
def construct_static_and_delta_matrix(T, D, win=[-1.0, 1.0, 0]):
"""
calculate static and delta transformation matrix
Parameters
----------
T : scala, `T`
Scala of time length
D : scala, `D`
Scala of the number of dimension.
win : array, shape(`3`)
The shape of window matrix.
Returns
----------
W : array, shape(`2 * D * T`, `D * T`)
Array of static and delta transformation matrix.
"""
static = [0, 1, 0]
delta = win
assert len(static) == len(delta)
# generate full W
DT = D * T
ones = np.ones(DT)
row = np.arange(2 * DT).reshape(2 * T, D) # generate serial numbers
static_row = row[::2] # [1,2,3,4,5] => [1,3,5]
delta_row = row[1::2] # [1,2,3,4,5] => [2,4]
col = np.arange(DT)
data = np.array([ones * static[0], ones * static[1],
ones * static[2], ones * delta[0],
ones * delta[1], ones * delta[2]]).flatten()
row = np.array([[static_row] * 3, [delta_row] * 3]).flatten()
col = np.array([[col - D, col, col + D] * 2]).flatten()
# remove component at first and end frame
valid_idx = np.logical_not(np.logical_or(col < 0, col >= DT))
W = scipy.sparse.csr_matrix(
(data[valid_idx], (row[valid_idx], col[valid_idx])), shape=(2 * DT, DT))
W.eliminate_zeros()
return W
def extfrm(data, npow, power_threshold=-20):
"""
Extract frame over the power threshold
Parameters
----------
data : array, shape(`T`, `dim`)
array of input data
npow : array, shape(`T`)
vector of normalized power sequence
threshold : scala
scala of power threshold [dB]
Returns
----------
data : array, shape(`T_ext`, `dim`)
remaining data after extracting frame
`T_ext` <= `T`
"""
T = data.shape[0]
if T != len(npow):
raise("Length of two vectors is different.")
valid_index = np.where(npow > power_threshold)
extdata = data[valid_index]
assert extdata.shape[0] <= T
return extdata
def estimate_twf(orgdata, tardata, distance='melcd', fast=True, otflag=None):
"""
time warping function estimator
Parameters
----------
orgdata : array, shape(`T_org`, `dim`)
array of source feature
tardata : array, shape(`T_tar`, `dim`)
array of target feature
distance : str
distance function
fast : bool
use fastdtw instead of dtw
otflag : str
Alignment into the length of specification
'org' : alignment into original length
'tar' : alignment into target length
Returns
----------
twf : array, shape(`2`, `T`)
time warping function between original and target
"""
if distance == 'melcd':
def distance_func(x, y): return melcd(x, y)
else:
raise ValueError('this distance method is not support.')
if fast:
_, path = fastdtw(orgdata, tardata, dist=distance_func)
twf = np.array(path).T
else:
_, _, _, twf = dtw(orgdata, tardata, distance_func)
if otflag is not None:
twf = modify_twf(twf, otflag=otflag)
return twf
def align_data(org_data, tar_data, twf):
"""
get aligned joint feature vector
Parameters
----------
org_data : array, shape(`T_org`, `dim_org`)
Acoustic feature vector of original speaker
tar_data : array, shape(`T_tar`, `dim_tar`)
Acoustic feature vector of target speaker
twf : array, shape(`2`, `T`)
time warping function between original and target
Returns
----------
jdata : array, shape(`T_new`, `dim_org + dim_tar`)
Joint feature vector between source and target
"""
jdata = np.c_[org_data[twf[0]], tar_data[twf[1]]]
return jdata
def modify_twf(twf, otflag=None):
"""
align specified length
Parameters
----------
twf : array, shape(`2`, `T`)
time warping function between original and target
otflag : str
Alignment into the length of specification
'org' : alignment into original length
'tar' : alignment into target length
Returns
----------
mod_twf : array, shape(`2`, `T_new`)
time warping function of modified alignment
"""
if otflag == 'org':
of, indice = np.unique(twf[0], return_index=True)
mod_twf = np.c_[of, twf[1][indice]].T
elif otflag == 'tar':
tf, indice = np.unique(twf[1], return_index=True)
mod_twf = np.c_[twf[0][indice], tf].T
return mod_twf
def low_cut_filter(x, fs, cutoff=70):
"""
low cut filter
Parameters
----------
x : array, shape('samples')
waveform sequence
fs : array, int
Sampling frequency
cutoff : float
cutoff frequency of low cut filter
Returns
----------
lct_x : array, shape('samples')
Low cut filtered waveform sequence
"""
nyquist = fs // 2
norm_cutoff = cutoff / nyquist
# low cut filter
fil = firwin(255, norm_cutoff, pass_zero=False)
lct_x = lfilter(fil, 1, x)
return lct_x
def extsddata(data, npow, power_threshold=-20):
"""
get power extract static and delta feature vector
Parameters
----------
data : array, shape(`T`, `dim`)
acoustic feature vector
npow : array, shape(`T`)
normalized power vector
power_threshold : float
power threshold
Returns
----------
extsddata : array, shape(`T_new`, `dim * 2`)
silence remove static and delta feature vector
"""
extsddata = extfrm(static_delta(data), npow, power_threshold=power_threshold)
return extsddata
def transform_jnt(array_list):
num_files = len(array_list)
for i in range(num_files):
if i == 0:
jnt = array_list[i]
else:
jnt = np.r_[jnt, array_list[i]]
return jnt
# -
class F0statistics(object):
"""
Estimate F0 statistics and convert F0
"""
def __init__(self):
pass
def estimate(self, f0list):
"""
estimate F0 statistics from list of f0
Parameters
----------
f0list : list, shape(`f0num`)
List of several F0 sequence
Returns
----------
f0stats : array, shape(`[mean, std]`)
values of mean and standard deviation for log f0
"""
n_files = len(f0list)
assert n_files != 0
for i in range(n_files):
f0 = f0list[i]
nonzero_indices = np.nonzero(f0)
if i == 0:
f0s = np.log(f0[nonzero_indices])
else:
f0s = np.r_[f0s, np.log(f0[nonzero_indices])]
f0stats = np.array([np.mean(f0s), np.std(f0s)])
return f0stats
def convert(self, f0, orgf0stats, tarf0stats):
"""
convert F0 based on F0 statistics
Parameters
----------
f0 : array, shape(`T`, `1`)
array of F0 sequence
orgf0stats : array, shape(`[mean, std]`)
vectors of mean and standard deviation of log f0 for original speaker
tarf0stats : array, shape(`[mean, std]`)
vectors of mean and standard deviation of log f0 for target speaker
Returns
----------
cvf0 : array, shape(`T`, `1`)
array of converted F0 sequence
"""
# get length and dimension
T = len(f0)
# perform f0 conversion
cvf0 = np.zeros(T)
nonzero_indices = f0 > 0
cvf0[nonzero_indices] = np.exp((tarf0stats[1] / orgf0stats[1]) * (np.log(f0[nonzero_indices]) - orgf0stats[0]) + tarf0stats[0])
return cvf0
class GV(object):
"""
Estimate statistics and perform postfilter based on the GV statistics.
"""
def __init__(self):
pass
def estimate(self, datalist):
"""
estimate GV statistics from list of data
Parameters
----------
datalist : list, shape(`num_data`)
List of several data ([T, dim]) sequence
Returns
----------
gvstats : array, shape(`2`, `dim`)
array of mean and standard deviation for GV
"""
n_files = len(datalist)
assert n_files != 0
var = []
for i in range(n_files):
data = datalist[i]
var.append(np.var(data, axis=0))
# calculate vm and vv
vm = np.mean(np.array(var), axis=0)
vv = np.var(np.array(var), axis=0)
gvstats = np.r_[vm, vv]
gvstats = gvstats.reshape(2, len(vm))
return gvstats
def postfilter(self, data, gvstats, cvgvstats=None, alpha=1.0, startdim=1):
"""
perform postfilter based on GV statistics into data
Parameters
----------
data : array, shape(`T`, `dim`)
array of data sequence
gvstats : array, shape(`2`, `dim`)
array of mean and variance for target GV
cvgvstats : array, shape(`2`, `dim`)
array of mean and variance for converted GV
alpha : float
morphing coefficient between GV transformed data and data.
alpha * gvpf(data) + (1 - alpha) * data
startdim : int
start dimension to perform GV postfilter
Returns
----------
filtered_data : array, shape(`T`, `data`)
array of GV postfiltered data sequnece
"""
# get length and dimension
T, dim = data.shape
assert gvstats is not None
assert dim == gvstats.shape[1]
# calculate statics of input data
datamean = np.mean(data, axis=0)
if cvgvstats is None:
# use variance of the given data
datavar = np.var(data, axis=0)
else:
# use variance of trained gv stats
datavar = cvgvstats[0]
# perform GV postfilter
filterd = np.sqrt(gvstats[0, startdim:] / datavar[startdim:]) * (data[:, startdim:] - datamean[startdim:]) + datamean[startdim:]
filterd_data = np.c_[data[:, :startdim], filterd]
return alpha * filterd_data + (1 - alpha) * data
# +
# 0. config path
__versions = "pre-stored-jp"
__same_path = "./utterance/" + __versions + "/"
prepare_path = __same_path + "output/"
pre_stored_source_list = __same_path + 'pre-source/**/V01/T01/**/*.wav'
pre_stored_list = __same_path + "pre/**/V01/T01/**/*.wav"
output_path = "./utterance/orl/jp-m/4/adapt31/"
# 1. estimate features
feat = FeatureExtractor()
synthesizer = Synthesizer()
org_f0list = None
org_splist = None
org_mceplist = None
org_aplist = None
org_npowlist = None
org_codeaplist = None
if os.path.exists(prepare_path + "_org_f0.pickle") \
and os.path.exists(prepare_path + "_org_sp.pickle") \
and os.path.exists(prepare_path + "_org_ap.pickle") \
and os.path.exists(prepare_path + "_org_mcep.pickle") \
and os.path.exists(prepare_path + "_org_npow.pickle") \
and os.path.exists(prepare_path + "_org_codeap.pickle"):
with open(prepare_path + "_org_f0.pickle", 'rb') as f:
org_f0list = pickle.load(f)
with open(prepare_path + "_org_sp.pickle", 'rb') as f:
org_splist = pickle.load(f)
with open(prepare_path + "_org_ap.pickle", 'rb') as f:
org_aplist = pickle.load(f)
with open(prepare_path + "_org_mcep.pickle", 'rb') as f:
org_mceplist = pickle.load(f)
with open(prepare_path + "_org_npow.pickle", 'rb') as f:
org_npowlist = pickle.load(f)
with open(prepare_path + "_org_codeap.pickle", 'rb') as f:
org_codeaplist = pickle.load(f)
else:
org_f0list = []
org_splist = []
org_mceplist = []
org_aplist = []
org_npowlist = []
org_codeaplist = []
ite = 0
for files in sorted(glob.iglob(pre_stored_source_list, recursive=True)):
wavf = files
x, fs = sf.read(wavf)
x = np.array(x, dtype=np.float)
x = low_cut_filter(x, fs, cutoff=70)
assert fs == 16000
print("extract acoustic featuers: " + wavf)
f0, sp, ap = feat.analyze(x)
mcep = feat.mcep()
npow = feat.npow()
codeap = feat.codeap()
wav = synthesizer.synthesis_sp(f0, sp, ap)
wav = np.clip(wav, -32768, 32767)
sf.write(prepare_path + "src_ansys_{}_.wav".format(ite), wav, fs)
org_f0list.append(f0)
org_splist.append(sp)
org_mceplist.append(mcep)
org_aplist.append(ap)
org_npowlist.append(npow)
org_codeaplist.append(codeap)
wav = synthesizer.synthesis(f0, mcep, ap)
wav = np.clip(wav, -32768, 32767)
sf.write(prepare_path + "src_mcep_{}_.wav".format(ite), wav, fs)
ite = ite + 1
with open(prepare_path + "_org_f0.pickle", 'wb') as f:
pickle.dump(org_f0list, f)
with open(prepare_path + "_org_sp.pickle", 'wb') as f:
pickle.dump(org_splist, f)
with open(prepare_path + "_org_npow.pickle", 'wb') as f:
pickle.dump(org_npowlist, f)
with open(prepare_path + "_org_ap.pickle", 'wb') as f:
pickle.dump(org_aplist, f)
with open(prepare_path + "_org_mcep.pickle", 'wb') as f:
pickle.dump(org_mceplist, f)
with open(prepare_path + "_org_codeap.pickle", 'wb') as f:
pickle.dump(org_codeaplist, f)
mid_f0list = None
mid_mceplist = None
mid_aplist = None
mid_npowlist = None
mid_splist = None
mid_codeaplist = None
if os.path.exists(prepare_path + "_mid_f0.pickle") \
and os.path.exists(prepare_path + "_mid_sp_0_.pickle") \
and os.path.exists(prepare_path + "_mid_ap_0_.pickle") \
and os.path.exists(prepare_path + "_mid_mcep.pickle") \
and os.path.exists(prepare_path + "_mid_npow.pickle") \
and os.path.exists(prepare_path + "_mid_codeap.pickle"):
with open(prepare_path + "_mid_f0.pickle", 'rb') as f:
mid_f0list = pickle.load(f)
for i in range(0, len(org_splist)*21, len(org_splist)):
with open(prepare_path + "_mid_sp_{}_.pickle".format(i), 'rb') as f:
temp_splist = pickle.load(f)
if mid_splist is None:
mid_splist = temp_splist
else:
mid_splist = mid_splist + temp_splist
for i in range(0, len(org_aplist)*21, len(org_aplist)):
with open(prepare_path + "_mid_ap_{}_.pickle".format(i), 'rb') as f:
temp_aplist = pickle.load(f)
if mid_aplist is None:
mid_aplist = temp_aplist
else:
mid_aplist = mid_aplist + temp_aplist
with open(prepare_path + "_mid_mcep.pickle", 'rb') as f:
mid_mceplist = pickle.load(f)
with open(prepare_path + "_mid_npow.pickle", 'rb') as f:
mid_npowlist = pickle.load(f)
with open(prepare_path + "_mid_codeap.pickle", 'rb') as f:
mid_codeaplist = pickle.load(f)
else:
mid_f0list = []
mid_mceplist = []
mid_aplist = []
mid_npowlist = []
mid_splist = []
mid_codeaplist = []
ite = 0
for files in sorted(glob.iglob(pre_stored_list, recursive=True)):
wavf = files
x, fs = sf.read(wavf)
x = np.array(x, dtype=np.float)
x = low_cut_filter(x, fs, cutoff=70)
assert fs == 16000
print("extract acoustic featuers: " + wavf)
f0, sp, ap = feat.analyze(x)
mcep = feat.mcep()
npow = feat.npow()
codeap = feat.codeap()
name, ext = os.path.splitext(wavf)
wav = synthesizer.synthesis_sp(f0, sp, ap)
wav = np.clip(wav, -32768, 32767)
sf.write(prepare_path + "mid_ansys_{}_.wav".format(ite), wav, fs)
mid_f0list.append(f0)
mid_splist.append(sp)
mid_mceplist.append(mcep)
mid_aplist.append(ap)
mid_npowlist.append(npow)
mid_codeaplist.append(codeap)
wav = synthesizer.synthesis(f0, mcep, ap)
wav = np.clip(wav, -32768, 32767)
sf.write(prepare_path + "mid_mcep_{}_.wav".format(ite), wav, fs)
ite = ite + 1
with open(prepare_path + "_mid_f0.pickle", 'wb') as f:
print(f)
pickle.dump(mid_f0list, f)
with open(prepare_path + "_mid_npow.pickle", 'wb') as f:
print(f)
pickle.dump(mid_npowlist, f)
for i in range(0, len(mid_splist), len(org_splist)):
with open(prepare_path + "_mid_sp_{}_.pickle".format(i), 'wb') as f:
print(f)
pickle.dump(mid_splist[i:i+len(org_splist)], f)
for i in range(0, len(mid_aplist), len(org_aplist)):
with open(prepare_path + "_mid_ap_{}_.pickle".format(i), 'wb') as f:
print(f)
pickle.dump(mid_aplist[i:i+len(org_aplist)], f)
with open(prepare_path + "_mid_mcep.pickle", 'wb') as f:
print(f)
pickle.dump(mid_mceplist, f)
with open(prepare_path + "_mid_codeap.pickle", 'wb') as f:
print(f)
pickle.dump(mid_codeaplist, f)
# +
class GMMTrainer(object):
"""
this class offers the training of GMM with several types of covariance matrix.
Parameters
----------
n_mix : int
the number of mixture components of the GMM
n_iter : int
the number of iteration for EM algorithm
covtype : str
the type of covariance matrix of the GMM
'full': full-covariance matrix
Attributes
---------
param :
sklearn-based model parameters of the GMM
"""
def __init__(self, n_mix=64, n_iter=100, covtype='full', params='wmc'):
self.n_mix = n_mix
self.n_iter = n_iter
self.covtype = covtype
self.params = params
self.param = sklearn.mixture.GMM(n_components=self.n_mix,
covariance_type=self.covtype,
n_iter=self.n_iter, params=self.params)
def train(self, jnt):
"""
fit GMM parameter from given joint feature vector
Parametes
---------
jnt : array, shape(`T`, `jnt.shape[0]`)
joint feature vector of original and target feature vector consisting of static and delta components
"""
if self.covtype == 'full':
self.param.fit(jnt)
return
class GMMConvertor(object):
"""
this class offers the several conversion techniques such as Maximum Likelihood Parameter Generation (MLPG)
and Minimum Mean Square Error (MMSE).
Parametes
---------
n_mix : int
the number of mixture components of the GMM
covtype : str
the type of covariance matrix of the GMM
'full': full-covariance matrix
gmmmode : str
the type of the GMM for opening
`None` : Normal Joint Density - GMM (JD-GMM)
Attributes
---------
param :
sklearn-based model parameters of the GMM
w : shape(`n_mix`)
vector of mixture component weight of the GMM
jmean : shape(`n_mix`, `jnt.shape[0]`)
Array of joint mean vector of the GMM
jcov : shape(`n_mix`, `jnt.shape[0]`, `jnt.shape[0]`)
array of joint covariance matrix of the GMM
"""
def __init__(self, n_mix=64, covtype='full', gmmmode=None):
self.n_mix = n_mix
self.covtype = covtype
self.gmmmode = gmmmode
def open_from_param(self, param):
"""
open GMM from GMMTrainer
Parameters
----------
param : GMMTrainer
GMMTrainer class
"""
self.param = param
self._deploy_parameters()
return
def convert(self, data, cvtype='mlpg'):
"""
convert data based on conditional probability density function
Parametes
---------
data : array, shape(`T`, `dim`)
original data will be converted
cvtype : str
type of conversion technique
`mlpg` : maximum likelihood parameter generation
Returns
----------
odata : array, shape(`T`, `dim`)
converted data
"""
# estimate parameter sequence
cseq, wseq, mseq, covseq = self._gmmmap(data)
if cvtype == 'mlpg':
odata = self._mlpg(mseq, covseq)
else:
raise ValueError('please choose conversion mode in `mlpg`.')
return odata
def _gmmmap(self, sddata):
# paramete for sequencial data
T, sddim = sddata.shape
# estimate posterior sequence
wseq = self.pX.predict_proba(sddata)
# estimate mixture sequence
cseq = np.argmax(wseq, axis=1)
mseq = np.zeros((T, sddim))
covseq = np.zeros((T, sddim, sddim))
for t in range(T):
# read maximum likelihood mixture component in frame t
m = cseq[t]
# conditional mean vector sequence
mseq[t] = self.meanY[m] + self.A[m] @ (sddata[t] - self.meanX[m])
# conditional covariance sequence
covseq[t] = self.cond_cov_inv[m]
return cseq, wseq, mseq, covseq
def _mlpg(self, mseq, covseq):
# parameter for sequencial data
T, sddim = mseq.shape
# prepare W
W = construct_static_and_delta_matrix(T, sddim // 2)
# prepare D
D = get_diagonal_precision_matrix(T, sddim, covseq)
# calculate W'D
WD = W.T @ D
# W'DW
WDW = WD @ W
# W'Dm
WDM = WD @ mseq.flatten()
# estimate y = (W'DW)^-1 * W'Dm
odata = scipy.sparse.linalg.spsolve(WDW, WDM, use_umfpack=False).reshape(T, sddim // 2)
return odata
def _deploy_parameters(self):
# read JD-GMM parameters from self.param
self.W = self.param.weights_
self.jmean = self.param.means_
self.jcov = self.param.covars_
# devide GMM parameters into source and target parameters
sddim = self.jmean.shape[1] // 2
self.meanX = self.jmean[:, 0:sddim]
self.meanY = self.jmean[:, sddim:]
self.covXX = self.jcov[:, :sddim, :sddim]
self.covXY = self.jcov[:, :sddim, sddim:]
self.covYX = self.jcov[:, sddim:, :sddim]
self.covYY = self.jcov[:, sddim:, sddim:]
# change model parameter of GMM into that of gmmmode
if self.gmmmode is None:
pass
else:
raise ValueError('please choose GMM mode in [None]')
# estimate parameters for conversion
self._set_Ab()
self._set_pX()
return
def _set_Ab(self):
# calculate A and b from self.jmean, self.jcov
sddim = self.jmean.shape[1] // 2
# calculate inverse covariance for covariance XX in each mixture
self.covXXinv = np.zeros((self.n_mix, sddim, sddim))
for m in range(self.n_mix):
self.covXXinv[m] = np.linalg.inv(self.covXX[m])
# calculate A, b, and conditional covariance given X
self.A = np.zeros((self.n_mix, sddim, sddim))
self.b = np.zeros((self.n_mix, sddim))
self.cond_cov_inv = np.zeros((self.n_mix, sddim, sddim))
for m in range(self.n_mix):
# calculate A (A = yxcov_m * xxcov_m^-1)
self.A[m] = self.covYX[m] @ self.covXXinv[m]
# calculate b (b = mean^Y - A * mean^X)
self.b[m] = self.meanY[m] - self.A[m] @ self.meanX[m]
# calculate conditional covariance (cov^(Y|X)^-1 = (yycov - A * xycov)^-1)
self.cond_cov_inv[m] = np.linalg.inv(self.covYY[m] - self.A[m] @ self.covXY[m])
return
def _set_pX(self):
# probability density function of X
self.pX = sklearn.mixture.GMM(n_components=self.n_mix, covariance_type=self.covtype)
self.pX.weights_ = self.W
self.pX.means_ = self.meanX
self.pX.covars_ = self.covXX
# following function is required to estimate porsterior
# p(x | \lambda^(X))
#self.pX.precisions_cholesky_ = _compute_precision_cholesky(self.covXX, self.covtype)
return
def get_diagonal_precision_matrix(T, D, covseq):
return scipy.sparse.block_diag(covseq, format='csr')
# +
def get_alignment(odata, onpow, tdata, tnpow, opow=-20, tpow=-20, sd=0, cvdata=None, given_twf=None, otflag=None, distance='melcd'):
"""
get alignment between original and target.
Parameters
----------
odata : array, shape(`T`, `dim`)
acoustic feature vector of original
onpow : array, shape(`T`)
Normalized power vector of original
tdata : array, shape(`T`, `dim`)
acoustic feature vector of target
tnpow : array, shape(`T`)
Normalized power vector of target
opow : float
power threshold of original
tpow : float
power threshold of target
sd : int
start dimension to be used for alignment
cvdata : array, shape(`T`, `dim`)
converted original data
given_twf : array, shape(`T_new`, `dim * 2`)
Alignment given twf
otflag : str
Alignment into the length of specification
'org' : alignment into original length
'tar' : alignment into target length
distance : str
Distance function to be used
Returns
----------
jdata : array, shape(`T_new`, `dim * 2`)
joint static and delta feature vector
twf : array, shape(`T_new`, `dim * 2`)
Time warping function
mcd : float
Mel-cepstrum distortion between arrays
"""
oexdata = extsddata(odata[:, sd:], onpow, power_threshold=opow)
texdata = extsddata(tdata[:, sd:], tnpow, power_threshold=tpow)
if cvdata is None:
align_odata = oexdata
else:
cvexdata = extsddata(cvdata, onpow, power_threshold=opow)
align_odata = cvexdata
if given_twf is None:
twf = estimate_twf(align_odata, texdata, distance=distance, fast=False, otflag=otflag)
else:
twf = given_twf
jdata = align_data(oexdata, texdata, twf)
mcd = melcd(align_odata[twf[0]], texdata[twf[1]])
return jdata, twf, mcd
def align_feature_vectors(odata, onpows, tdata, tnpows, opow=-100, tpow=-100, itnum=3, sd=0, given_twfs=None, otflag=None):
"""
get alignment to create joint feature vector
Parameters
----------
odata : list, (`num_files`)
List of original feature vectors
onpow : list, (`num_files`)
List of original npows
tdata : list, (`num_files`)
List of target feature vectors
tnpow : list, (`num_files`)
List of target npows
opow : float
power threshold of original
tpow : float
power threshold of target
itnum : int
the number of iteration
sd : int
start dimension of feature vector to be used for alignment
given_twf : array, shape(`T_new`, `dim * 2`)
use given alignment while 1st iteration
otflag : str
Alignment into the length of specification
'org' : alignment into original length
'tar' : alignment into target length
distance : str
Distance function to be used
Returns
----------
jdata : array, shape(`T_new`, `dim * 2`)
joint static and delta feature vector
twf : array, shape(`T_new`, `dim * 2`)
Time warping function
mcd : float
Mel-cepstrum distortion between arrays
"""
it = 1
num_files = len(odata)
cvgmm, cvdata = None, None
for it in range(1, itnum+1):
print('{}-th joint feature extraction starts.'.format(it))
# alignment
twfs, jfvs = [], []
for i in range(num_files):
if it == 1 and given_twfs is not None:
gtwf = given_twfs[i]
else:
gtwf = None
if it > 1:
cvdata = cvgmm.convert(static_delta(odata[i][:, sd:]))
jdata, twf, mcd = get_alignment(odata[i], onpows[i], tdata[i], tnpows[i], opow=opow, tpow=tpow,
sd=sd, cvdata=cvdata, given_twf=gtwf, otflag=otflag)
twfs.append(twf)
jfvs.append(jdata)
print('distortion [dB] for {}-th file: {}'.format(i+1, mcd))
jnt_data = transform_jnt(jfvs)
if it != itnum:
# train GMM, if not final iteration
datagmm = GMMTrainer()
datagmm.train(jnt_data)
cvgmm = GMMConvertor()
cvgmm.open_from_param(datagmm.param)
it += 1
return jfvs, twfs
# -
# 2. estimate twf and jnt
if os.path.exists(prepare_path + "_jnt_mcep_0_.pickle"):
pass
else:
for i in range(0, len(mid_mceplist), len(org_mceplist)):
org_mceps = org_mceplist
org_npows = org_npowlist
mid_mceps = mid_mceplist[i:i+len(org_mceps)]
mid_npows = mid_npowlist[i:i+len(org_npows)]
assert len(org_mceps) == len(mid_mceps)
assert len(org_npows) == len(mid_npows)
assert len(org_mceps) == len(org_npows)
# dtw between original and target 0-th and silence
print("## alignment mcep 0-th and silence ##")
jmceps, twfs = align_feature_vectors(org_mceps, org_npows, mid_mceps, mid_npows, opow=-15, tpow=-15, sd=1)
jnt_mcep = transform_jnt(jmceps)
# save joint feature vectors
with open(prepare_path + "_jnt_mcep_{}_.pickle".format(i), 'wb') as f:
print(f)
pickle.dump(jnt_mcep, f)
# 3. make EV-GMM
initgmm, initgmm_codeap = None, None
if os.path.exists(prepare_path + "initgmm.pickle"):
with open(prepare_path + "initgmm.pickle".format(i), 'rb') as f:
print(f)
initgmm = pickle.load(f)
else:
jnt, jnt_codeap = None, []
for i in range(0, len(mid_mceplist), len(org_mceplist)):
with open(prepare_path + "_jnt_mcep_{}_.pickle".format(i), 'rb') as f:
temp_jnt = pickle.load(f)
if jnt is None:
jnt = temp_jnt
else:
jnt = np.r_[jnt, temp_jnt]
# train initial gmm
initgmm = GMMTrainer()
initgmm.train(jnt)
with open(prepare_path + "initgmm.pickle", 'wb') as f:
print(f)
pickle.dump(initgmm, f)
# get initial gmm params
init_W = initgmm.param.weights_
init_jmean = initgmm.param.means_
init_jcov = initgmm.param.covars_
sddim = init_jmean.shape[1] // 2
init_meanX = init_jmean[:, :sddim]
init_meanY = init_jmean[:, sddim:]
init_covXX = init_jcov[:, :sddim, :sddim]
init_covXY = init_jcov[:, :sddim, sddim:]
init_covYX = init_jcov[:, sddim:, :sddim]
init_covYY = init_jcov[:, sddim:, sddim:]
fitted_source = init_meanX
fitted_target = init_meanY
sv = None
if os.path.exists(prepare_path + "_sv.npy"):
sv = np.array(sv)
sv = np.load(prepare_path + '_sv.npy')
else:
depengmm, depenjnt = None, None
sv = []
for i in range(0, len(mid_mceplist), len(org_mceplist)):
with open(prepare_path + "_jnt_mcep_{}_.pickle".format(i), 'rb') as f:
depenjnt = pickle.load(f)
depengmm = GMMTrainer(params='m')
depengmm.param.weights_ = init_W
depengmm.param.means_ = init_jmean
depengmm.param.covars_ = init_jcov
depengmm.train(depenjnt)
sv.append(depengmm.param.means_)
sv = np.array(sv)
np.save(prepare_path + "_sv", sv)
# +
n_mix = 64
S = int(len(mid_mceplist) / len(org_mceplist))
assert S == 22
source_pca = sklearn.decomposition.PCA()
source_pca.fit(sv[:,:,:sddim].reshape((S, n_mix*sddim)))
target_pca = sklearn.decomposition.PCA()
target_pca.fit(sv[:,:,sddim:].reshape((S, n_mix*sddim)))
eigenvectors = source_pca.components_.reshape((n_mix, sddim, S)), target_pca.components_.reshape((n_mix, sddim, S))
biasvectors = source_pca.mean_.reshape((n_mix, sddim)), target_pca.mean_.reshape((n_mix, sddim))
# -
# +
# estimate statistic features
for_convert_source = __same_path + 'input/EJM10/V01/T01/TIMIT/000/*.wav'
for_convert_target = __same_path + 'adaptation/EJM04/V01/T01/ATR503/A/*.wav'
src_f0list = []
src_splist = []
src_mceplist = []
src_aplist = []
src_npowlist = []
src_codeaplist = []
if os.path.exists(__same_path + 'input/EJM10/V01/T01/TIMIT/000/A11.wav'):
ite = 0
for files in sorted(glob.iglob(for_convert_source, recursive=True)):
wavf = files
x, fs = sf.read(wavf)
x = np.array(x, dtype=np.float)
x = low_cut_filter(x, fs, cutoff=70)
assert fs == 16000
print("extract acoustic featuers: " + wavf)
f0, sp, ap = feat.analyze(x)
mcep = feat.mcep()
npow = feat.npow()
codeap = feat.codeap()
wav = synthesizer.synthesis_sp(f0, sp, ap)
wav = np.clip(wav, -32768, 32767)
sf.write(output_path + "input_ansys_{}_.wav".format(ite), wav, fs)
src_f0list.append(f0)
src_splist.append(sp)
src_mceplist.append(mcep)
src_aplist.append(ap)
src_npowlist.append(npow)
src_codeaplist.append(codeap)
wav = synthesizer.synthesis(f0, mcep, ap)
wav = np.clip(wav, -32768, 32767)
sf.write(output_path + "input_mcep_{}_.wav".format(ite), wav, fs)
ite = ite + 1
else:
raise ValueError("No such files.")
tar_f0list = []
tar_mceplist = []
tar_aplist = []
tar_npowlist = []
tar_splist = []
tar_codeaplist = []
if os.path.exists(__same_path + 'adaptation/EJM04/V01/T01/ATR503/A/A01.wav'):
ite = 0
for files in sorted(glob.iglob(for_convert_target, recursive=True)):
wavf = files
x, fs = sf.read(wavf)
x = np.array(x, dtype=np.float)
x = low_cut_filter(x, fs, cutoff=70)
assert fs == 16000
print("extract acoustic featuers: " + wavf)
f0, sp, ap = feat.analyze(x)
mcep = feat.mcep()
npow = feat.npow()
codeap = feat.codeap()
name, ext = os.path.splitext(wavf)
wav = synthesizer.synthesis_sp(f0, sp, ap)
wav = np.clip(wav, -32768, 32767)
sf.write(output_path + "target_ansys_{}_.wav".format(ite), wav, fs)
tar_f0list.append(f0)
tar_splist.append(sp)
tar_mceplist.append(mcep)
tar_aplist.append(ap)
tar_npowlist.append(npow)
tar_codeaplist.append(codeap)
wav = synthesizer.synthesis(f0, mcep, ap)
wav = np.clip(wav, -32768, 32767)
sf.write(output_path + "target_mcep_{}_.wav".format(ite), wav, fs)
ite = ite + 1
else:
raise ValueError("No such files.")
# +
f0statis = F0statistics()
tarf0stats = f0statis.estimate(tar_f0list)
srcf0stats = f0statis.estimate(org_f0list)
gv = GV()
srcgvstats = gv.estimate(org_mceplist)
targvstats = gv.estimate(tar_mceplist)
# +
# 5. fitting target
epoch = 100
fitgmm = sklearn.mixture.GMM(n_components=n_mix,
covariance_type='full',
n_iter=100)
fitgmm.weights_ = init_W
fitgmm.means_ = init_meanY
fitgmm.covars_ = init_covYY
for i in range(len(tar_mceplist)):
print("adapt: ", i+1, "/", len(tar_mceplist))
target = tar_mceplist[i]
target_pow = target[:, 0]
target = target[:, 1:]
for x in range(epoch):
print("epoch = ", x)
predict = fitgmm.predict_proba(np.atleast_2d(static_delta(target)))
y = np.sum([predict[:, k:k+1] * (static_delta(target) - biasvectors[1][k]) for k in range(n_mix)], axis=1)
gamma = np.sum(predict, axis=0)
left = np.sum([gamma[k] * np.dot(eigenvectors[1][k].T,
np.linalg.solve(fitgmm.covars_, eigenvectors[1])[k])
for k in range(n_mix)], axis=0)
right = np.sum([np.dot(eigenvectors[1][k].T,
np.linalg.solve(fitgmm.covars_, y)[k])
for k in range(n_mix)], axis=0)
weight = np.linalg.solve(left, right)
fitted_target = np.dot(eigenvectors[1], weight) + biasvectors[1]
fitgmm.means_ = fitted_target
# -
def mcepconvert(source, weights, jmean, meanX, covarXX, covarXY, covarYX, covarYY,
fitted_source, fitted_target):
M = 64
# set pX
px = sklearn.mixture.GMM(n_components=M, covariance_type='full', n_iter=100)
px.weights_ = weights
px.means_ = meanX
px.covars_ = covarXX
# set Ab
sddim = jmean.shape[1] // 2
covXXinv = np.zeros((M, sddim, sddim))
for m in range(M):
covXXinv[m] = np.linalg.inv(covarXX[m])
A = np.zeros((M, sddim, sddim))
b = np.zeros((M, sddim))
cond_cov_inv = np.zeros((M, sddim, sddim))
for m in range(M):
A[m] = covarYX[m] @ covXXinv[m]
b[m] = fitted_target[m] - A[m] @ meanX[m]
cond_cov_inv[m] = np.linalg.inv(covarYY[m] - A[m] @ covarXY[m])
# _gmmmap
T, sddim = source.shape
wseq = px.predict_proba(source)
cseq = np.argmax(wseq, axis=1)
mseq = np.zeros((T, sddim))
covseq = np.zeros((T, sddim, sddim))
for t in range(T):
m = cseq[t]
mseq[t] = fitted_target[m] + A[m] @ (source[t] - meanX[m])
covseq[t] = cond_cov_inv[m]
# _mlpg
T, sddim = mseq.shape
W = construct_static_and_delta_matrix(T, sddim // 2)
D = get_diagonal_precision_matrix(T, sddim, covseq)
WD = W.T @ D
WDW = WD @ W
WDM = WD @ mseq.flatten()
output = scipy.sparse.linalg.spsolve(WDW, WDM, use_umfpack=False).reshape(T, sddim // 2)
return output
# learn cvgvstats
cv_mceps = []
for i in range(len(src_mceplist)):
temp_mcep = src_mceplist[i]
temp_mcep_0th = temp_mcep[:, 0]
temp_mcep = temp_mcep[:, 1:]
sta_mcep = static_delta(temp_mcep)
cvmcep_wopow = np.array(mcepconvert(sta_mcep, init_W, init_jmean, init_meanX,
init_covXX, init_covXY, init_covYX, init_covYY,
fitted_source, fitted_target))
cvmcep = np.c_[temp_mcep_0th, cvmcep_wopow]
cv_mceps.append(cvmcep)
# +
cvgvstats = gv.estimate(cv_mceps)
for i in range(len(src_mceplist)):
cvmcep_wGV = gv.postfilter(cv_mceps[i], targvstats, cvgvstats=cvgvstats)
cvf0 = f0statis.convert(src_f0list[i], srcf0stats, tarf0stats)
wav = synthesizer.synthesis(cvf0, cvmcep_wGV, src_aplist[i], rmcep=src_mceplist[i])
sf.write(output_path + "cv_{}_.wav".format(i), wav, 16000)
# -
for i in range(len(src_mceplist)):
wav = synthesizer.synthesis(src_f0list[i], src_mceplist[i], src_aplist[i])
sf.write(output_path + "mcep_{}_.wav".format(i), wav, 16000)
wav = synthesizer.synthesis_sp(src_f0list[i], src_splist[i], src_aplist[i])
sf.write(output_path + "ansys_{}_.wav".format(i), wav, 16000)
cvf0 = f0statis.convert(src_f0list[0], srcf0stats, tarf0stats)
plt.plot(cvf0)
plt.plot(src_f0list[0])
cvmcep_wGV = gv.postfilter(cv_mceps[0], srcgvstats, cvgvstats=cvgvstats)
cvf0 = f0statis.convert(src_f0list[0], srcf0stats, tarf0stats)
wav = synthesizer.synthesis(cvf0, cvmcep_wGV, src_aplist[0], rmcep=src_mceplist[0])
sf.write(output_path + "te.wav", wav, 16000)
# +
# org-cv distance
with open(output_path + "melcd_org-cv.txt", "w") as outfile:
outfile.write("adapt31 org-cv mcd.\n")
for i in range(len(src_mceplist)):
temp_mcep = src_mceplist[i]
temp_mcep_0th = temp_mcep[:, 0]
temp_mcep = temp_mcep[:, 1:]
temp_cv = cv_mceps[i]
temp_cv_0th = temp_cv[:, 0]
temp_cv = temp_cv[:, 1:]
_, _, mcd = get_alignment(temp_mcep, temp_mcep_0th, temp_cv, temp_cv_0th, opow=-15, tpow=-15, sd=1)
outfile.write("{0},{1}\n".format(i, mcd))
# cv-target distance
# read target files and analyze mceps
targets_mceplist = []
targets_list = __same_path + 'adaptation/EJM04/V01/T01/TIMIT/000/*.wav'
for files in sorted(glob.iglob(targets_list, recursive=True)):
wavf = files
x, fs = sf.read(wavf)
x = np.array(x, dtype=np.float)
x = low_cut_filter(x, fs, cutoff=70)
assert fs == 16000
print("extract acoustic featuers: " + wavf)
f0, sp, ap = feat.analyze(x)
mcep = feat.mcep()
targets_mceplist.append(mcep)
with open(output_path + "melcd_cv-target.txt", "w") as outfile:
outfile.write("adapt31 cv-target mcd.\n")
for i in range(len(src_mceplist)):
temp_mcep = targets_mceplist[i]
temp_mcep_0th = temp_mcep[:, 0]
temp_mcep = temp_mcep[:, 1:]
temp_cv = cv_mceps[i]
temp_cv_0th = temp_cv[:, 0]
temp_cv = temp_cv[:, 1:]
_, _, mcd = get_alignment(temp_cv, temp_cv_0th, temp_mcep, temp_mcep_0th, opow=-15, tpow=-15, sd=1)
outfile.write("{0},{1}\n".format(i, mcd))
# org-target distance
with open(output_path + "melcd_org-target.txt", "w") as outfile:
outfile.write("adapt31 org-target mcd.\n")
for i in range(len(src_mceplist)):
temp_mcep = src_mceplist[i]
temp_mcep_0th = temp_mcep[:, 0]
temp_mcep = temp_mcep[:, 1:]
temp_mcep2 = targets_mceplist[i]
temp_mcep2_0th = temp_mcep2[:, 0]
temp_mcep2 = temp_mcep2[:, 1:]
_, _, mcd = get_alignment(temp_mcep, temp_mcep_0th, temp_mcep2, temp_mcep2_0th, opow=-15, tpow=-15, sd=1)
outfile.write("{0},{1}\n".format(i, mcd))
# -
| convert-jp-m-adapt4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multi-bin Poisson
# +
import logging
import json
import math
import numpy as np
import matplotlib.pyplot as plt
import pyhf
from pyhf import Model, optimizer
from pyhf.simplemodels import uncorrelated_background
from pyhf.contrib.viz import brazil
from scipy.interpolate import griddata
import scrapbook as sb
# +
def plot_histo(ax, binning, data):
bin_width = (binning[2] - binning[1]) / binning[0]
bin_leftedges = np.linspace(binning[1], binning[2], binning[0] + 1)[:-1]
bin_centers = [le + bin_width / 2.0 for le in bin_leftedges]
ax.bar(bin_centers, data, 1, alpha=0.5)
def plot_data(ax, binning, data):
errors = [math.sqrt(d) for d in data]
bin_width = (binning[2] - binning[1]) / binning[0]
bin_leftedges = np.linspace(binning[1], binning[2], binning[0] + 1)[:-1]
bin_centers = [le + bin_width / 2.0 for le in bin_leftedges]
ax.bar(
bin_centers,
data,
0,
yerr=errors,
linewidth=0,
error_kw=dict(ecolor='k', elinewidth=1),
)
ax.scatter(bin_centers, data, c='k')
# + tags=["parameters"]
validation_datadir = '../../validation/data'
# +
source = json.load(open(validation_datadir + '/1bin_example1.json'))
model = uncorrelated_background(
source['bindata']['sig'], source['bindata']['bkg'], source['bindata']['bkgerr']
)
data = source['bindata']['data'] + model.config.auxdata
init_pars = model.config.suggested_init()
par_bounds = model.config.suggested_bounds()
obs_limit, exp_limits, (poi_tests, tests) = pyhf.infer.intervals.upperlimit(
data, model, np.linspace(0, 5, 61), level=0.05, return_results=True
)
# -
fig, ax = plt.subplots(figsize=(10, 7))
artists = brazil.plot_results(poi_tests, tests, test_size=0.05, ax=ax)
print(f'expected upper limits: {exp_limits}')
print(f'observed upper limit : {obs_limit}')
# +
source = {
"binning": [2, -0.5, 1.5],
"bindata": {
"data": [120.0, 145.0],
"bkg": [100.0, 150.0],
"bkgerr": [15.0, 20.0],
"sig": [30.0, 45.0],
},
}
my_observed_counts = source['bindata']['data']
model = uncorrelated_background(
source['bindata']['sig'], source['bindata']['bkg'], source['bindata']['bkgerr']
)
data = my_observed_counts + model.config.auxdata
binning = source['binning']
nompars = model.config.suggested_init()
bonly_pars = [x for x in nompars]
bonly_pars[model.config.poi_index] = 0.0
nom_bonly = model.expected_data(bonly_pars, include_auxdata=False)
nom_sb = model.expected_data(nompars, include_auxdata=False)
init_pars = model.config.suggested_init()
par_bounds = model.config.suggested_bounds()
print(init_pars)
bestfit_pars = pyhf.infer.mle.fit(data, model, init_pars, par_bounds)
bestfit_cts = model.expected_data(bestfit_pars, include_auxdata=False)
# +
f, axarr = plt.subplots(1, 3, sharey=True)
f.set_size_inches(12, 4)
plot_histo(axarr[0], binning, nom_bonly)
plot_data(axarr[0], binning, my_observed_counts)
axarr[0].set_xlim(binning[1:])
plot_histo(axarr[1], binning, nom_sb)
plot_data(axarr[1], binning, my_observed_counts)
axarr[1].set_xlim(binning[1:])
plot_histo(axarr[2], binning, bestfit_cts)
plot_data(axarr[2], binning, my_observed_counts)
axarr[2].set_xlim(binning[1:])
plt.ylim(0, 300);
# +
## DUMMY 2D thing
def signal(m1, m2):
massscale = 150.0
minmass = 100.0
countscale = 2000
effective_mass = np.sqrt(m1 ** 2 + m2 ** 2)
return [countscale * np.exp(-(effective_mass - minmass) / massscale), 0]
def CLs(m1, m2):
signal_counts = signal(m1, m2)
pdf = uncorrelated_background(
signal_counts, source['bindata']['bkg'], source['bindata']['bkgerr']
)
try:
cls_obs, cls_exp_set = pyhf.infer.hypotest(
1.0, data, pdf, init_pars, par_bounds, return_expected_set=True
)
return cls_obs, cls_exp_set, True
except AssertionError:
print(f'fit failed for mass points ({m1}, {m2})')
return None, None, False
# -
nx, ny = 15, 15
grid = grid_x, grid_y = np.mgrid[
100 : 1000 : complex(0, nx), 100 : 1000 : complex(0, ny)
]
X = grid.T.reshape(nx * ny, 2)
results = [CLs(m1, m2) for m1, m2 in X]
X = np.array([x for x, (_, _, success) in zip(X, results) if success])
yobs = np.array([obs for obs, exp, success in results if success]).flatten()
yexp = [
np.array([exp[i] for obs, exp, success in results if success]).flatten()
for i in range(5)
]
# +
int_obs = griddata(X, yobs, (grid_x, grid_y), method='linear')
int_exp = [griddata(X, yexp[i], (grid_x, grid_y), method='linear') for i in range(5)]
plt.contourf(grid_x, grid_y, int_obs, levels=np.linspace(0, 1))
plt.colorbar()
plt.contour(grid_x, grid_y, int_obs, levels=[0.05], colors='w')
for level in int_exp:
plt.contour(grid_x, grid_y, level, levels=[0.05], colors='w', linestyles='dashed')
plt.scatter(X[:, 0], X[:, 1], c=yobs, vmin=0, vmax=1);
# -
sb.glue("number_2d_successpoints", len(X))
| docs/examples/notebooks/multiBinPois.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Updating the Tutorial Notebooks
# You can use the provided [**igz-tutorials-get.sh**](https://github.com/v3io/tutorials/blob/master/igz-tutorials-get.sh) script to update the tutorial notebooks to the latest stable version available on [GitHub](https://github.com/v3io/tutorials/). The script copies the latest tutorial files and directories to the **/v3io/users/<username>** directory, where `<username>` is the name of the running service user (`$V3IO_USERNAME`) unless you pass another username.
#
# > **Note:**
# > - Before running the script, close all GitHub tutorial files that might be updated, such as the **welcome.ipynb** or **getting-started** and **demo** notebooks.<br>
# > The script doesn't overwrite the current notebook (**update-tutorials.ipynb**) and the **igz-tutorials-get.sh** script nor any custom file or directory that you created.
# > - Make sure that the updated notebooks match your version of the Iguazio Data Science Platform.
# > If in doubt, consult the [customer-success team](mailto:<EMAIL>).
# To use the script, first change its permissions. This only needs to be done once:
# !chmod +x /User/igz-tutorials-get.sh
# To update the tutorial notebooks in your user home directory, simply run the following code:
# !/User/igz-tutorials-get.sh
# For full usage instructions, run the script with the `-h` or `--help` flag:
# !/User/igz-tutorials-get.sh -h
| update-tutorials.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from rdkit import Chem
from multiprocessing import Pool
import pandas as pd
from rdkit.Chem import PandasTools
with open('data/guacamol_v1_all.smiles') as f:
chembl_smiles = f.read().split()
n_compounds = len(chembl_smiles)
# +
smarts_strings = ['O-O', 'O-O-O', 'ON([H])O', 'N1-C=N-C=C-1', 'c1c[nH]cn1', 'N-F','*-N=C(-*)-*', 'N=C-N', 'C=N-C=C', 'S-O', 'S-O-O', 'O-N-N', 'C-!@[NX2]=[C!R,#1]-C']
substructures = [Chem.MolFromSmarts(s) for s in smarts_strings]
def match_substructures(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
mol = Chem.AddHs(mol)
return [mol.HasSubstructMatch(p) for p in substructures]
except:
return None
with Pool(10) as pool:
matches = pool.map(match_substructures, chembl_smiles)
# +
import numpy as np
matches = np.array(matches)
print('; '.join(smarts_strings))
print('; '.join(str(i) for i in matches.sum(0)))
chembl_smiles_np = np.array(chembl_smiles)
# -
from IPython.display import display
df = pd.DataFrame({'SMARTS': smarts_strings, 'Mol': substructures, 'Counts': matches.sum(0)})
display(HTML(df.to_html(escape=False)))
df_filtered = df[df['Counts']<15]
HTML(df_filtered.to_html())
df_filtered['#'] = range(1, len(df_filtered)+1)
df_filtered['rel. freq.'] = df_filtered['Counts'] / n_compounds
print(df_filtered[['#', 'SMARTS', 'Counts', 'rel. freq.']].to_latex(index=False))
for i in range(30):
display(Chem.MolFromSmiles(chembl_smiles_np[matches[:,4]][i]))
chembl_smiles_np[matches[:,1]][0]
s = chembl_smiles_np[matches[:,-1]][10000]
m = Chem.MolFromSmiles(s)
#
m
p = Chem.MolFromSmarts('*-N=C(-*)-*')
print(m.HasSubstructMatch(p))
p
# +
df = pd.read_csv('/home/lfr/Downloads/DOWNLOAD-bGxOa7WMXUE0NN6ZKzAhPRjpjNgO-KtkZjIlyPHO01c=.csv', sep=';')
# -
c = df['Smiles']
smiles_new = c[~pd.isna(c)]
smiles_new
for i, s in enumerate(smiles_new):
if 'OOO' in s:
print(i, s)
alerts = pd.read_csv('alert_collection.csv')
alerts
alerts_patterns = {k: Chem.MolFromSmarts(v) for k,v in zip(alerts['description'], alerts['smarts'])}
alerts_patterns
# +
s = 'C1=NC=CN1'
s = 'Fc1ccc(C(=NO)CSOOCNNOC)cc1'
m = Chem.MolFromSmiles(s)
display(m)
for name, p in alerts_patterns.items():
if m.HasSubstructMatch(p):
print(name)
| substructure_search.ipynb |
# + [markdown] colab_type="text" id="copyright-notice"
# #### Copyright 2017 Google LLC.
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="copyright-notice2"
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="g4T-_IsVbweU"
# # 特征组合
# + [markdown] colab_type="text" id="F7dke6skIK-k"
# **学习目标:**
# * 通过添加其他合成特征来改进线性回归模型(这是前一个练习的延续)
# * 使用输入函数将 Pandas `DataFrame` 对象转换为 `Tensors`,并在 `fit()` 和 `predict()` 中调用输入函数
# * 使用 FTRL 优化算法进行模型训练
# * 通过独热编码、分箱和特征组合创建新的合成特征
# + [markdown] colab_type="text" id="NS_fcQRd8B97"
# ## 设置
# + [markdown] colab_type="text" id="4IdzD8IdIK-l"
# 首先,我们来定义输入并创建数据加载代码,正如我们在之前的练习中所做的那样。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="CsfdiLiDIK-n"
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
california_housing_dataframe = pd.read_csv("https://download.mlcc.google.cn/mledu-datasets/california_housing_train.csv", sep=",")
california_housing_dataframe = california_housing_dataframe.reindex(
np.random.permutation(california_housing_dataframe.index))
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="10rhoflKIK-s"
def preprocess_features(california_housing_dataframe):
"""Prepares input features from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the features to be used for the model, including
synthetic features.
"""
selected_features = california_housing_dataframe[
["latitude",
"longitude",
"housing_median_age",
"total_rooms",
"total_bedrooms",
"population",
"households",
"median_income"]]
processed_features = selected_features.copy()
# Create a synthetic feature.
processed_features["rooms_per_person"] = (
california_housing_dataframe["total_rooms"] /
california_housing_dataframe["population"])
return processed_features
def preprocess_targets(california_housing_dataframe):
"""Prepares target features (i.e., labels) from California housing data set.
Args:
california_housing_dataframe: A Pandas DataFrame expected to contain data
from the California housing data set.
Returns:
A DataFrame that contains the target feature.
"""
output_targets = pd.DataFrame()
# Scale the target to be in units of thousands of dollars.
output_targets["median_house_value"] = (
california_housing_dataframe["median_house_value"] / 1000.0)
return output_targets
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="ufplEkjN8KUp"
# Choose the first 12000 (out of 17000) examples for training.
training_examples = preprocess_features(california_housing_dataframe.head(12000))
training_targets = preprocess_targets(california_housing_dataframe.head(12000))
# Choose the last 5000 (out of 17000) examples for validation.
validation_examples = preprocess_features(california_housing_dataframe.tail(5000))
validation_targets = preprocess_targets(california_housing_dataframe.tail(5000))
# Double-check that we've done the right thing.
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="oJlrB4rJ_2Ma"
def construct_feature_columns(input_features):
"""Construct the TensorFlow Feature Columns.
Args:
input_features: The names of the numerical input features to use.
Returns:
A set of feature columns
"""
return set([tf.feature_column.numeric_column(my_feature)
for my_feature in input_features])
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="NBxoAfp2AcB6"
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a linear regression model.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
# + [markdown] colab_type="text" id="hweDyy31LBsV"
# ## FTRL 优化算法
#
# 高维度线性模型可受益于使用一种基于梯度的优化方法,叫做 FTRL。该算法的优势是针对不同系数以不同方式调整学习速率,如果某些特征很少采用非零值,该算法可能比较实用(也非常适合支持 L1 正则化)。我们可以使用 [FtrlOptimizer](https://www.tensorflow.org/api_docs/python/tf/train/FtrlOptimizer) 来应用 FTRL。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="S0SBf1X1IK_O"
def train_model(
learning_rate,
steps,
batch_size,
feature_columns,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear regression model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
feature_columns: A `set` specifying the input feature columns to use.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearRegressor` object trained on the training data.
"""
periods = 10
steps_per_period = steps / periods
# Create a linear regressor object.
my_optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["median_house_value"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["median_house_value"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
return linear_regressor
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="1Cdr02tLIK_Q"
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(training_examples),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] colab_type="text" id="i4lGvqajDWlw"
# ## 离散特征的独热编码
#
# 通常,在训练逻辑回归模型之前,离散(即字符串、枚举、整数)特征会转换为二元特征系列。
#
# 例如,假设我们创建了一个合成特征,可以采用 `0`、`1` 或 `2` 中的任何值,并且我们还具有以下几个训练点:
#
# | # | feature_value |
# |---|---------------|
# | 0 | 2 |
# | 1 | 0 |
# | 2 | 1 |
#
# 对于每个可能的分类值,我们都会创建一个新的**二元****实值**特征,该特征只能采用两个可能值中的一个:如果示例中包含该值,则值为 1.0;如果不包含,则值为 0.0。在上述示例中,分类特征会被转换成三个特征,现在训练点如下所示:
#
# | # | feature_value_0 | feature_value_1 | feature_value_2 |
# |---|-----------------|-----------------|-----------------|
# | 0 | 0.0 | 0.0 | 1.0 |
# | 1 | 1.0 | 0.0 | 0.0 |
# | 2 | 0.0 | 1.0 | 0.0 |
# + [markdown] colab_type="text" id="KnssXowblKm7"
# ## 分桶(分箱)特征
#
# 分桶也称为分箱。
#
# 例如,我们可以将 `population` 分为以下 3 个分桶:
# - `bucket_0` (`< 5000`):对应于人口分布较少的街区
# - `bucket_1` (`5000 - 25000`):对应于人口分布适中的街区
# - `bucket_2` (`> 25000`):对应于人口分布较多的街区
#
# 根据前面的分桶定义,以下 `population` 矢量:
#
# [[10001], [42004], [2500], [18000]]
#
# 将变成以下经过分桶的特征矢量:
#
# [[1], [2], [0], [1]]
#
# 这些特征值现在是分桶索引。请注意,这些索引被视为离散特征。通常情况下,这些特征将被进一步转换为上述独热表示法,但这是以透明方式实现的。
#
# 要为分桶特征定义特征列,我们可以使用 [`bucketized_column`](https://www.tensorflow.org/api_docs/python/tf/feature_column/bucketized_column)(而不是使用 `numeric_column`),该列将数字列作为输入,并使用 `boundardies` 参数中指定的分桶边界将其转换为分桶特征。以下代码为 `households` 和 `longitude` 定义了分桶特征列;`get_quantile_based_boundaries` 函数会根据分位数计算边界,以便每个分桶包含相同数量的元素。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="cc9qZrtRy-ED"
def get_quantile_based_boundaries(feature_values, num_buckets):
boundaries = np.arange(1.0, num_buckets) / num_buckets
quantiles = feature_values.quantile(boundaries)
return [quantiles[q] for q in quantiles.keys()]
# Divide households into 7 buckets.
households = tf.feature_column.numeric_column("households")
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
california_housing_dataframe["households"], 7))
# Divide longitude into 10 buckets.
longitude = tf.feature_column.numeric_column("longitude")
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
california_housing_dataframe["longitude"], 10))
# + [markdown] colab_type="text" id="U-pQDAa0MeN3"
# ## 任务 1:使用分桶特征列训练模型
# **将我们示例中的所有实值特征进行分桶,训练模型,然后查看结果是否有所改善。**
#
# 在前面的代码块中,两个实值列(即 `households` 和 `longitude`)已被转换为分桶特征列。您的任务是对其余的列进行分桶,然后运行代码来训练模型。您可以采用各种启发法来确定分桶的范围。本练习使用了分位数技巧,通过这种方式选择分桶边界后,每个分桶将包含相同数量的样本。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="YFXV9lyMLedy"
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
households = tf.feature_column.numeric_column("households")
longitude = tf.feature_column.numeric_column("longitude")
latitude = tf.feature_column.numeric_column("latitude")
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
median_income = tf.feature_column.numeric_column("median_income")
rooms_per_person = tf.feature_column.numeric_column("rooms_per_person")
# Divide households into 7 buckets.
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
training_examples["households"], 7))
# Divide longitude into 10 buckets.
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
training_examples["longitude"], 10))
#
# YOUR CODE HERE: bucketize the following columns, following the example above:
#
bucketized_latitude =
bucketized_housing_median_age =
bucketized_median_income =
bucketized_rooms_per_person =
feature_columns = set([
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person])
return feature_columns
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="0FfUytOTNJhL"
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] colab_type="text" id="ZTDHHM61NPTw"
# ### 解决方案
#
# 点击下方即可查看解决方案。
# + [markdown] colab_type="text" id="JQHnUhL_NRwA"
# 您可能想知道如何确定要使用多少个分桶。这当然要取决于数据。在这里,我们只是选择了任意值,以获得一个不太大的模型。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Ro5civQ3Ngh_"
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
households = tf.feature_column.numeric_column("households")
longitude = tf.feature_column.numeric_column("longitude")
latitude = tf.feature_column.numeric_column("latitude")
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
median_income = tf.feature_column.numeric_column("median_income")
rooms_per_person = tf.feature_column.numeric_column("rooms_per_person")
# Divide households into 7 buckets.
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
training_examples["households"], 7))
# Divide longitude into 10 buckets.
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
training_examples["longitude"], 10))
# Divide latitude into 10 buckets.
bucketized_latitude = tf.feature_column.bucketized_column(
latitude, boundaries=get_quantile_based_boundaries(
training_examples["latitude"], 10))
# Divide housing_median_age into 7 buckets.
bucketized_housing_median_age = tf.feature_column.bucketized_column(
housing_median_age, boundaries=get_quantile_based_boundaries(
training_examples["housing_median_age"], 7))
# Divide median_income into 7 buckets.
bucketized_median_income = tf.feature_column.bucketized_column(
median_income, boundaries=get_quantile_based_boundaries(
training_examples["median_income"], 7))
# Divide rooms_per_person into 7 buckets.
bucketized_rooms_per_person = tf.feature_column.bucketized_column(
rooms_per_person, boundaries=get_quantile_based_boundaries(
training_examples["rooms_per_person"], 7))
feature_columns = set([
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person])
return feature_columns
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="RNgfYk6OO8Sy"
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] colab_type="text" id="AFJ1qoZPlQcs"
# ## 特征组合
#
# 组合两个(或更多个)特征是使用线性模型来学习非线性关系的一种聪明做法。在我们的问题中,如果我们只使用 `latitude` 特征进行学习,那么该模型可能会发现特定纬度(或特定纬度范围内,因为我们已经将其分桶)的城市街区更可能比其他街区住房成本高昂。`longitude` 特征的情况与此类似。但是,如果我们将 `longitude` 与 `latitude` 组合,产生的组合特征则代表一个明确的城市街区。如果模型发现某些城市街区(位于特定纬度和经度范围内)更可能比其他街区住房成本高昂,那么这将是比单独考虑两个特征更强烈的信号。
#
# 目前,特征列 API 仅支持组合离散特征。要组合两个连续的值(比如 `latitude` 或 `longitude`),我们可以对其进行分桶。
#
# 如果我们组合 `latitude` 和 `longitude` 特征(例如,假设 `longitude` 被分到 `2` 个分桶中,而 `latitude` 有 `3` 个分桶),我们实际上会得到 6 个组合的二元特征。当我们训练模型时,每个特征都会分别获得自己的权重。
# + [markdown] colab_type="text" id="-Rk0c1oTYaVH"
# ## 任务 2:使用特征组合训练模型
#
# **在模型中添加 `longitude` 与 `latitude` 的特征组合,训练模型,然后确定结果是否有所改善。**
#
# 请参阅有关 [`crossed_column()`](https://www.tensorflow.org/api_docs/python/tf/feature_column/crossed_column) 的 TensorFlow API 文档,了解如何为您的组合构建特征列。`hash_bucket_size` 可以设为 `1000`。
# + cellView="both" colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="-eYiVEGeYhUi"
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
households = tf.feature_column.numeric_column("households")
longitude = tf.feature_column.numeric_column("longitude")
latitude = tf.feature_column.numeric_column("latitude")
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
median_income = tf.feature_column.numeric_column("median_income")
rooms_per_person = tf.feature_column.numeric_column("rooms_per_person")
# Divide households into 7 buckets.
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
training_examples["households"], 7))
# Divide longitude into 10 buckets.
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
training_examples["longitude"], 10))
# Divide latitude into 10 buckets.
bucketized_latitude = tf.feature_column.bucketized_column(
latitude, boundaries=get_quantile_based_boundaries(
training_examples["latitude"], 10))
# Divide housing_median_age into 7 buckets.
bucketized_housing_median_age = tf.feature_column.bucketized_column(
housing_median_age, boundaries=get_quantile_based_boundaries(
training_examples["housing_median_age"], 7))
# Divide median_income into 7 buckets.
bucketized_median_income = tf.feature_column.bucketized_column(
median_income, boundaries=get_quantile_based_boundaries(
training_examples["median_income"], 7))
# Divide rooms_per_person into 7 buckets.
bucketized_rooms_per_person = tf.feature_column.bucketized_column(
rooms_per_person, boundaries=get_quantile_based_boundaries(
training_examples["rooms_per_person"], 7))
# YOUR CODE HERE: Make a feature column for the long_x_lat feature cross
long_x_lat =
feature_columns = set([
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person,
long_x_lat])
return feature_columns
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="xZuZMp3EShkM"
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] colab_type="text" id="0i7vGo9PTaZl"
# ### 解决方案
#
# 点击下方即可查看解决方案。
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="3tAWu8qSTe2v"
def construct_feature_columns():
"""Construct the TensorFlow Feature Columns.
Returns:
A set of feature columns
"""
households = tf.feature_column.numeric_column("households")
longitude = tf.feature_column.numeric_column("longitude")
latitude = tf.feature_column.numeric_column("latitude")
housing_median_age = tf.feature_column.numeric_column("housing_median_age")
median_income = tf.feature_column.numeric_column("median_income")
rooms_per_person = tf.feature_column.numeric_column("rooms_per_person")
# Divide households into 7 buckets.
bucketized_households = tf.feature_column.bucketized_column(
households, boundaries=get_quantile_based_boundaries(
training_examples["households"], 7))
# Divide longitude into 10 buckets.
bucketized_longitude = tf.feature_column.bucketized_column(
longitude, boundaries=get_quantile_based_boundaries(
training_examples["longitude"], 10))
# Divide latitude into 10 buckets.
bucketized_latitude = tf.feature_column.bucketized_column(
latitude, boundaries=get_quantile_based_boundaries(
training_examples["latitude"], 10))
# Divide housing_median_age into 7 buckets.
bucketized_housing_median_age = tf.feature_column.bucketized_column(
housing_median_age, boundaries=get_quantile_based_boundaries(
training_examples["housing_median_age"], 7))
# Divide median_income into 7 buckets.
bucketized_median_income = tf.feature_column.bucketized_column(
median_income, boundaries=get_quantile_based_boundaries(
training_examples["median_income"], 7))
# Divide rooms_per_person into 7 buckets.
bucketized_rooms_per_person = tf.feature_column.bucketized_column(
rooms_per_person, boundaries=get_quantile_based_boundaries(
training_examples["rooms_per_person"], 7))
# YOUR CODE HERE: Make a feature column for the long_x_lat feature cross
long_x_lat = tf.feature_column.crossed_column(
set([bucketized_longitude, bucketized_latitude]), hash_bucket_size=1000)
feature_columns = set([
bucketized_longitude,
bucketized_latitude,
bucketized_housing_median_age,
bucketized_households,
bucketized_median_income,
bucketized_rooms_per_person,
long_x_lat])
return feature_columns
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="-_vvNYIyTtPC"
_ = train_model(
learning_rate=1.0,
steps=500,
batch_size=100,
feature_columns=construct_feature_columns(),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
# + [markdown] colab_type="text" id="ymlHJ-vrhLZw"
# ## 可选挑战:尝试更多合成特征
#
# 到目前为止,我们已经尝试了简单的分桶列和特征组合,但还有更多组合有可能会改进结果。例如,您可以组合多个列。如果改变分桶的数量,会出现什么情况?您还能想到哪些其他的合成特征?它们能否改进模型效果?
| exercises/feature_crosses.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" papermill={"duration": 0.083561, "end_time": "2021-08-21T12:15:16.757053", "exception": false, "start_time": "2021-08-21T12:15:16.673492", "status": "completed"} tags=[]
# ### This kernel used dataset from the Home Credit Default Risk and copied from the 'Introduction to Manual Feature Engineering' written by <NAME>
#
# #### **Introduction to Manual Feature Engineering : [URL](https://www.kaggle.com/willkoehrsen/introduction-to-manual-feature-engineering)**
#
# *Thanks for sharing kernel, <NAME>*
# + [markdown] papermill={"duration": 0.081628, "end_time": "2021-08-21T12:15:16.922171", "exception": false, "start_time": "2021-08-21T12:15:16.840543", "status": "completed"} tags=[]
# ### Introduction: Manual Feature Engineering
#
# If you are new to this competition, I highly suggest checking out this notebook to get started.
#
# In this notebook, we will explore making features by hand for the Home Credit Default Risk competition. In an earlier notebook, we used only the application data in order to build a model. The best model we made from this data achieved a score on the leaderboard around 0.74. In order to better this score, we will have to include more information from the other dataframes. Here, we will look at using information from the bureau and bureau_balance data. The definitions of these data files are:
#
# * bureau: information about client's previous loans with other financial institutions reported to Home Credit. Each previous loan has its own row.
# * bureau_balance: monthly information about the previous loans. Each month has its own row.
#
# Manual feature engineering can be a tedious process (which is why we use automated feature engineering with featuretools!) and often relies on domain expertise. Since I have limited domain knowledge of loans and what makes a person likely to default, I will instead concentrate of getting as much info as possible into the final training dataframe. The idea is that the model will then pick up on which features are important rather than us having to decide that. Basically, our approach is to make as many features as possible and then give them all to the model to use! Later, we can perform feature reduction using the feature importances from the model or other techniques such as PCA.
#
# The process of manual feature engineering will involve plenty of Pandas code, a little patience, and a lot of great practice manipulation data. Even though automated feature engineering tools are starting to be made available, feature engineering will still have to be done using plenty of data wrangling for a little while longer.
# + papermill={"duration": 1.168721, "end_time": "2021-08-21T12:15:18.172911", "exception": false, "start_time": "2021-08-21T12:15:17.004190", "status": "completed"} tags=[]
# pandas and numpy for data manipulation
import pandas as pd
import numpy as np
# matplotlib and seaborn for plotting
import matplotlib.pyplot as plt
import seaborn as sns
# suppress warnings from pandas
import warnings
warnings.filterwarnings('ignore')
plt.style.use('fivethirtyeight')
# + [markdown] papermill={"duration": 0.083356, "end_time": "2021-08-21T12:15:18.344693", "exception": false, "start_time": "2021-08-21T12:15:18.261337", "status": "completed"} tags=[]
# ### Example: Counts of a client's previous loans
#
# To illustrate the general process of manual feature engineering, we will first simply get the count of a client's previous loans at other financial institutions. This requires a number of Pandas operations we will make heavy use of throughout the notebook:
#
# * groupby: group a dataframe by a column. In this case we will group by the unique client, the SK_ID_CURR column
# * agg: perform a calculation on the grouped data such as taking the mean of columns. We can either call the function directly (grouped_df.mean()) or use the agg function together with a list of transforms (grouped_df.agg([mean, max, min, sum]))
# * merge: match the aggregated statistics to the appropriate client. We need to merge the original training data with the calculated stats on the SK_ID_CURR column which will insert NaN in any cell for which the client does not have the corresponding statistic
# We also use the (rename) function quite a bit specifying the columns to be renamed as a dictionary. This is useful in order to keep track of the new variables we create.
#
# This might seem like a lot, which is why we'll eventually write a function to do this process for us. Let's take a look at implementing this by hand first.
# + papermill={"duration": 5.598051, "end_time": "2021-08-21T12:15:24.027383", "exception": false, "start_time": "2021-08-21T12:15:18.429332", "status": "completed"} tags=[]
# Read in bureau
bureau = pd.read_csv('../input/home-credit-default-risk/bureau.csv')
bureau.head()
# + papermill={"duration": 0.261882, "end_time": "2021-08-21T12:15:24.374844", "exception": false, "start_time": "2021-08-21T12:15:24.112962", "status": "completed"} tags=[]
# groupby the client id (SK_ID_CURR), count the number of previous loans, and rename the column
previous_loan_counts = bureau.groupby('SK_ID_CURR', as_index = False)['SK_ID_BUREAU'].count().rename(columns = {'SK_ID_BUREAU' : 'previous_loan_counts'})
previous_loan_counts.head()
# + papermill={"duration": 7.820592, "end_time": "2021-08-21T12:15:32.279142", "exception": false, "start_time": "2021-08-21T12:15:24.458550", "status": "completed"} tags=[]
# join to the training dataframe
train = pd.read_csv('../input/home-credit-default-risk/application_train.csv')
train = train.merge(previous_loan_counts, on = 'SK_ID_CURR', how = 'left')
# fill the missing values with 0
train['previous_loan_counts'] = train['previous_loan_counts'].fillna(0)
train.head()
# + [markdown] papermill={"duration": 0.086361, "end_time": "2021-08-21T12:15:32.450773", "exception": false, "start_time": "2021-08-21T12:15:32.364412", "status": "completed"} tags=[]
# ### Assessing Usefulness of New Variable with r value
#
# To determine if the new variable is useful, we can calculate the Pearson Correlation Coefficient (r-value) between this variable and the target. This measures the strength of a linear relationship between two variables and ranges from -1 (perfectly negatively linear) to +1 (perfectly positively linear). The r-value is not best measure of the "usefulness" of a new variable, but it can give a first approximation of whether a variable will be helpful to a machine learning model. The larger the r-value of a variable with respect to the target, the more a change in this variable is likely to affect the value of the target. Therefore, we look for the variables with the greatest absolute value r-value relative to the target.
#
# We can also visually inspect a relationship with the target using the Kernel Density Estimate (KDE) plot.
#
# ##### Kernel Density Estimate Plots
#
# The kernel density estimate plot shows the distribution of a single variable (think of it as a smoothed histogram). To see the different in distributions dependent on the value of a categorical variable, we can color the distributions differently according to the category. For example, we can show the kernel density estimate of the previous_loan_count colored by whether the TARGET = 1 or 0. The resulting KDE will show any significant differences in the distribution of the variable between people who did not repay their loan (TARGET == 1) and the people who did (TARGET == 0). This can serve as an indicator of whether a variable will be 'relevant' to a machine learning model.
#
# We will put this plotting functionality in a function to re-use for any variable
# + papermill={"duration": 0.097133, "end_time": "2021-08-21T12:15:32.634146", "exception": false, "start_time": "2021-08-21T12:15:32.537013", "status": "completed"} tags=[]
# plots the distribution of a variable colored by value of the target
def kde_target(var_name, df):
# calculate the correlation coefficient between the new variable and the target
corr = df['TARGET'].corr(df[var_name])
# calculate medians for repaid vs not repaid
avg_repaid = df.loc[df['TARGET'] == 0, var_name].median()
avg_not_repaid = df.loc[df['TARGET'] == 1, var_name].median()
plt.figure(figsize = (12, 6))
# plot the distribution for target == 0 and target == 1
sns.kdeplot(df.loc[df['TARGET'] == 0, var_name], label = 'TARGET == 0')
sns.kdeplot(df.loc[df['TARGET'] == 1, var_name], label = 'TARGET == 1')
# label the plot
plt.xlabel(var_name);
plt.ylabel('Density');
plt.title('%s Distribution ' % var_name);
plt.legend();
# print out the correlation
print('The correlation between %s and the TARGET is %0.4f' % (var_name, corr))
# Print out average values
print('Median value for loan that was not repaid = %0.4f' % avg_not_repaid)
print('Median value for loan that was repaid = %0.4f' % avg_repaid)
# + papermill={"duration": 1.913128, "end_time": "2021-08-21T12:15:34.633593", "exception": false, "start_time": "2021-08-21T12:15:32.720465", "status": "completed"} tags=[]
kde_target('EXT_SOURCE_3', train)
# + papermill={"duration": 1.734456, "end_time": "2021-08-21T12:15:36.456288", "exception": false, "start_time": "2021-08-21T12:15:34.721832", "status": "completed"} tags=[]
kde_target('previous_loan_counts', train)
# + [markdown] papermill={"duration": 0.08851, "end_time": "2021-08-21T12:15:36.633953", "exception": false, "start_time": "2021-08-21T12:15:36.545443", "status": "completed"} tags=[]
# ### Aggregating Numeric Columns
#
# To account for the numeric information in the bureau dataframe, we can compute statistics for all the numeric columns. To do so, we groupby the client id, agg the grouped dataframe, and merge the result back into the training data. The agg function will only calculate the values for the numeric columns where the operation is considered valid. We will stick to using 'mean', 'max', 'min', 'sum' but any function can be passed in here. We can even write our own function and use it in an agg call.
# + papermill={"duration": 3.233009, "end_time": "2021-08-21T12:15:39.959461", "exception": false, "start_time": "2021-08-21T12:15:36.726452", "status": "completed"} tags=[]
# group by the client id, calculate aggregation statistics
bureau_agg = bureau.drop(columns = ['SK_ID_BUREAU']).groupby('SK_ID_CURR', as_index = False).agg(['count', 'mean', 'max', 'min', 'sum']).reset_index()
bureau_agg.head()
# + [markdown] _kg_hide-input=true papermill={"duration": 0.089049, "end_time": "2021-08-21T12:15:40.137653", "exception": false, "start_time": "2021-08-21T12:15:40.048604", "status": "completed"} tags=[]
# We need to create new names for each of these columns. The following code makes new names by appending the stat to the name. Here we have to deal with the fact that the dataframe has a multi-level index. I find these confusing and hard to work with, so I try to reduce to a single level index as quickly as possible.
# + papermill={"duration": 0.097654, "end_time": "2021-08-21T12:15:40.324278", "exception": false, "start_time": "2021-08-21T12:15:40.226624", "status": "completed"} tags=[]
# list of columns names
columns = ['SK_ID_CURR']
# iterate through the variables names
for var in bureau_agg.columns.levels[0]:
# skip the id name
if var != 'SK_ID_CURR':
# iterate through the stat names
for stat in bureau_agg.columns.levels[1][:-1]:
# make a new column name for the variable and stat
columns.append('bureau_%s_%s' % (var, stat))
# + papermill={"duration": 0.121748, "end_time": "2021-08-21T12:15:40.535775", "exception": false, "start_time": "2021-08-21T12:15:40.414027", "status": "completed"} tags=[]
# assign the list of columns names as the dataframe column names
bureau_agg.columns = columns
bureau_agg.head()
# + papermill={"duration": 1.726587, "end_time": "2021-08-21T12:15:42.352222", "exception": false, "start_time": "2021-08-21T12:15:40.625635", "status": "completed"} tags=[]
# Merge with the training data
train = train.merge(bureau_agg, on = 'SK_ID_CURR', how = 'left')
train.head()
# + [markdown] papermill={"duration": 0.091058, "end_time": "2021-08-21T12:15:42.534400", "exception": false, "start_time": "2021-08-21T12:15:42.443342", "status": "completed"} tags=[]
# ### Correlations of Aggregated Values with Target
#
# We can calculate the correlation of all new values with the target. Again, we can use these as an approximation of the variables which may be important for modeling.
# + papermill={"duration": 0.483613, "end_time": "2021-08-21T12:15:43.109347", "exception": false, "start_time": "2021-08-21T12:15:42.625734", "status": "completed"} tags=[]
# list of new correlations
new_corrs = []
# iterate through the columns
for col in columns:
# calculate correlation with the target
corr = train['TARGET'].corr(train[col])
# append the list as a tuple
new_corrs.append((col, corr))
# + [markdown] papermill={"duration": 0.091045, "end_time": "2021-08-21T12:15:43.290678", "exception": false, "start_time": "2021-08-21T12:15:43.199633", "status": "completed"} tags=[]
# In the code below, we sort the correlations by the magnitude (absolute value) using the sorted Python function. We also make use of an anonymous lambda function, another important Python operation that is good to know.
# + papermill={"duration": 0.101256, "end_time": "2021-08-21T12:15:43.483481", "exception": false, "start_time": "2021-08-21T12:15:43.382225", "status": "completed"} tags=[]
# sort the correlations by the absolute value
# make sure to reverse to put the largest values at the front of list
new_corrs = sorted(new_corrs, key = lambda x: abs(x[1]), reverse = True)
new_corrs[:5]
# + [markdown] papermill={"duration": 0.090846, "end_time": "2021-08-21T12:15:43.664951", "exception": false, "start_time": "2021-08-21T12:15:43.574105", "status": "completed"} tags=[]
# None of the new variables have a significant correlation with the TARGET. We can look at the KDE plot of the highest correlated variable, bureau_DAYS_CREDIT_mean, with the target in in terms of absolute magnitude correlation.
# + papermill={"duration": 1.80811, "end_time": "2021-08-21T12:15:45.566240", "exception": false, "start_time": "2021-08-21T12:15:43.758130", "status": "completed"} tags=[]
kde_target('bureau_DAYS_CREDIT_mean', train)
# + [markdown] papermill={"duration": 0.111191, "end_time": "2021-08-21T12:15:45.784582", "exception": false, "start_time": "2021-08-21T12:15:45.673391", "status": "completed"} tags=[]
# The definition of this column is: "How many days before current application did client apply for Credit Bureau credit". My interpretation is this is the number of days that the previous loan was applied for before the application for a loan at Home Credit. Therefore, a larger negative number indicates the loan was further before the current loan application. We see an extremely weak positive relationship between the average of this variable and the target meaning that clients who applied for loans further in the past potentially are more likely to repay loans at Home Credit. With a correlation this weak though, it is just as likely to be noise as a signal.
#
# #### The Multiple Comparisons Problem
#
# When we have lots of variables, we expect some of them to be correlated just by pure chance, a problem known as multiple comparisons. We can make hundreds of features, and some will turn out to be corelated with the target simply because of random noise in the data. Then, when our model trains, it may overfit to these variables because it thinks they have a relationship with the target in the training set, but this does not necessarily generalize to the test set. There are many considerations that we have to take into account when making features!
# + [markdown] papermill={"duration": 0.093888, "end_time": "2021-08-21T12:15:45.975315", "exception": false, "start_time": "2021-08-21T12:15:45.881427", "status": "completed"} tags=[]
# ### Function for Numeric Aggregations
#
# Let's encapsulate all of the previous work into a function. This will allow us to compute aggregate stats for numeric columns across any dataframe. We will re-use this function when we want to apply the same operations for other dataframes.
# + papermill={"duration": 0.10846, "end_time": "2021-08-21T12:15:46.178940", "exception": false, "start_time": "2021-08-21T12:15:46.070480", "status": "completed"} tags=[]
def agg_numeric(df, group_var, df_name):
"""Aggregates the numeric values in a dataframe. This can
be used to create features for each instance of the grouping variable.
Parameters
--------
df (dataframe):
the dataframe to calculate the statistics on
group_var (string):
the variable by which to group df
df_name (string):
the variable used to rename the columns
Return
--------
agg (dataframe):
a dataframe with the statistics aggregated for
all numeric columns. Each instance of the grouping variable will have
the statistics (mean, min, max, sum; currently supported) calculated.
The columns are also renamed to keep track of features created.
"""
# remove id variables other than grouping variables
for col in df:
if col != group_var and 'SK_ID' in col:
df = df.drop(columns = col)
group_ids = df[group_var]
numeric_df = df.select_dtypes('number')
numeric_df[group_var] = group_ids
# group by the specified variable and calculate the statistics
agg = numeric_df.groupby(group_var).agg(['count', 'mean', 'max', 'min', 'sum']).reset_index()
# need to create new column names
columns = [group_var]
# iterate through the variables names
for var in agg.columns.levels[0]:
# skip the grouping variables
if var != group_var:
# iterate through the stat names
for stat in agg.columns.levels[1][:-1]:
# make a new column name for the variable and stat
columns.append('%s_%s_%s' % (df_name, var, stat))
agg.columns = columns
return agg
# + papermill={"duration": 2.602096, "end_time": "2021-08-21T12:15:48.880951", "exception": false, "start_time": "2021-08-21T12:15:46.278855", "status": "completed"} tags=[]
bureau_agg_new = agg_numeric(bureau.drop(columns = ['SK_ID_BUREAU']), group_var = 'SK_ID_CURR', df_name = 'bureau')
bureau_agg_new.head()
# + [markdown] papermill={"duration": 0.093541, "end_time": "2021-08-21T12:15:49.069627", "exception": false, "start_time": "2021-08-21T12:15:48.976086", "status": "completed"} tags=[]
# ### Correlation Function
#
# Before we move on, we can also make the code to calculate correlations with the target into a function
# + papermill={"duration": 0.118507, "end_time": "2021-08-21T12:15:49.283186", "exception": false, "start_time": "2021-08-21T12:15:49.164679", "status": "completed"} tags=[]
# function to calculate calculations with the target for a dataframe
def target_corrs(df):
# list of correlations
corrs = []
# iterate through the columns
for col in df.columns:
print(col)
# skip the target column
if col != 'TARGET':
# calculate correlation with the target
corr = df['TARGET'].corr(df[col])
# append the llist as a turple
corrs.append((col, corr))
# sort by absolute magnitude of correlations
corrs = sorted(corrs, key = lambda x : abx(x[1]), reverse = True)
return corrs
# + [markdown] papermill={"duration": 0.095807, "end_time": "2021-08-21T12:15:49.483016", "exception": false, "start_time": "2021-08-21T12:15:49.387209", "status": "completed"} tags=[]
# ### Categorical Variables
#
# Now we move from the numeric columns to the categorical columns. These are discrete string variables, so we cannot just calculate statistics such as mean and max which only work with numeric variables. Instead, we will rely on calculating value counts of each category within each categorical variable.
# + papermill={"duration": 0.777365, "end_time": "2021-08-21T12:15:50.407221", "exception": false, "start_time": "2021-08-21T12:15:49.629856", "status": "completed"} tags=[]
categorical = pd.get_dummies(bureau.select_dtypes('object'))
categorical['SK_ID_CURR'] = bureau['SK_ID_CURR']
categorical.head()
# + papermill={"duration": 2.028982, "end_time": "2021-08-21T12:15:52.530392", "exception": false, "start_time": "2021-08-21T12:15:50.501410", "status": "completed"} tags=[]
categorical_grouped = categorical.groupby('SK_ID_CURR').agg(['sum', 'mean'])
categorical_grouped.head()
# + [markdown] papermill={"duration": 0.094974, "end_time": "2021-08-21T12:15:52.721007", "exception": false, "start_time": "2021-08-21T12:15:52.626033", "status": "completed"} tags=[]
# The sum columns represent the count of that category for the associated client and the mean represents the normalized count. One-hot encoding makes the process of calculating these figures very easy!
#
# We can use a similar function as before to rename the columns. Again, we have to deal with the multi-level index for the columns. We iterate through the first level (level 0) which is the name of the categorical variable appended with the value of the category (from one-hot encoding). Then we iterate stats we calculated for each client. We will rename the column with the level 0 name appended with the stat. As an example, the column with CREDIT_ACTIVE_Active as level 0 and sum as level 1 will become CREDIT_ACTIVE_Active_count.
# + papermill={"duration": 0.109066, "end_time": "2021-08-21T12:15:52.927004", "exception": false, "start_time": "2021-08-21T12:15:52.817938", "status": "completed"} tags=[]
categorical_grouped.columns.levels[0][:10]
# + papermill={"duration": 0.10511, "end_time": "2021-08-21T12:15:53.129416", "exception": false, "start_time": "2021-08-21T12:15:53.024306", "status": "completed"} tags=[]
categorical_grouped.columns.levels[1]
# + papermill={"duration": 0.143633, "end_time": "2021-08-21T12:15:53.370499", "exception": false, "start_time": "2021-08-21T12:15:53.226866", "status": "completed"} tags=[]
group_var = 'SK_ID_CURR'
# need to create new column names
columns = []
# iterate through the variables names
for val in categorical_grouped.columns.levels[0]:
# skip the grouping variable
if val != group_var:
# ierate through the stat names
for stat in ['count', 'count_norm']:
# make a new column name for the variable and stat
columns.append('%s_%s' % (var, stat))
# rename the columns
categorical_grouped.columns = columns
categorical_grouped.head()
# + papermill={"duration": 0.580904, "end_time": "2021-08-21T12:15:54.049565", "exception": false, "start_time": "2021-08-21T12:15:53.468661", "status": "completed"} tags=[]
train = train.merge(categorical_grouped, left_on = 'SK_ID_CURR', right_index = True, how = 'left')
train.head()
# + [markdown] papermill={"duration": 0.102088, "end_time": "2021-08-21T12:15:54.252999", "exception": false, "start_time": "2021-08-21T12:15:54.150911", "status": "completed"} tags=[]
# ### Function to Handle Categorical Variables
# To make the code more efficient, we can now write a function to handle the categorical variables for us. This will take the same form as the agg_numeric function in that it accepts a dataframe and a grouping variable. Then it will calculate the counts and normalized counts of each category for all categorical variables in the dataframe.
# + papermill={"duration": 0.109684, "end_time": "2021-08-21T12:15:54.460895", "exception": false, "start_time": "2021-08-21T12:15:54.351211", "status": "completed"} tags=[]
def count_categorical(df, group_var, df_name):
"""Computes counts and normalized counts for each observation
of `group_var` of each unique category in every categorical variable
Parameters
--------
df : dataframe
The dataframe to calculate the value counts for.
group_var : string
The variable by which to group the dataframe. For each unique
value of this variable, the final dataframe will have one row
df_name : string
Variable added to the front of column names to keep track of columns
Return
--------
categorical : dataframe
A dataframe with counts and normalized counts of each unique category in every categorical variable
with one row for every unique value of the `group_var`.
"""
# select the categorical columns
categorical = pd.get_dummies(df.select_dtypes('object'))
# make sure to put the identifying id on the column
categorical[group_var] = df[group_var]
# groupby the group var and calculate the sum and the mean
categorical = categorical.groupby(group_var).agg(['sum', 'mean'])
column_names = []
# iterate through the columns in level 0
for var in categorical.columns.levels[0]:
# iterate through the stats in levle 1
for stat in ['count', 'count_norm']:
# make a new column name
column_names.append('%s_%s_%s' % (df_name, var, stat))
categorical.columns = column_names
return categorical
# + papermill={"duration": 2.797053, "end_time": "2021-08-21T12:15:57.358877", "exception": false, "start_time": "2021-08-21T12:15:54.561824", "status": "completed"} tags=[]
bureau_counts = count_categorical(bureau, group_var = 'SK_ID_CURR', df_name = 'bureau')
bureau_counts.head()
# + [markdown] papermill={"duration": 0.09833, "end_time": "2021-08-21T12:15:57.555959", "exception": false, "start_time": "2021-08-21T12:15:57.457629", "status": "completed"} tags=[]
# ### Applying Operations to another dataframe
# We will now turn to the bureau balance dataframe. This dataframe has monthly information about each client's previous loan(s) with other financial institutions. Instead of grouping this dataframe by the SK_ID_CURR which is the client id, we will first group the dataframe by the SK_ID_BUREAU which is the id of the previous loan. This will give us one row of the dataframe for each loan. Then, we can group by the SK_ID_CURR and calculate the aggregations across the loans of each client. The final result will be a dataframe with one row for each client, with stats calculated for their loans.
# + papermill={"duration": 11.176432, "end_time": "2021-08-21T12:16:08.833008", "exception": false, "start_time": "2021-08-21T12:15:57.656576", "status": "completed"} tags=[]
# read in bureau balance
bureau_balance = pd.read_csv('../input/home-credit-default-risk/bureau_balance.csv')
bureau_balance.head()
# + [markdown] papermill={"duration": 0.102776, "end_time": "2021-08-21T12:16:09.035296", "exception": false, "start_time": "2021-08-21T12:16:08.932520", "status": "completed"} tags=[]
# First, we can calculate the value counts of each status for each loan. Fortunately, we already have a function that does this for us
# + papermill={"duration": 9.295784, "end_time": "2021-08-21T12:16:18.432261", "exception": false, "start_time": "2021-08-21T12:16:09.136477", "status": "completed"} tags=[]
# counts of each type of status for each previous loan
bureau_balance_counts = count_categorical(bureau_balance, group_var = 'SK_ID_BUREAU',
df_name = 'bureau_balance')
bureau_balance_counts.head()
# + [markdown] papermill={"duration": 0.10047, "end_time": "2021-08-21T12:16:18.632439", "exception": false, "start_time": "2021-08-21T12:16:18.531969", "status": "completed"} tags=[]
# Now we can handle the one numeric column. The MONTHS_BALANCE column has the "months of balance relative to application date." This might not necessarily be that important as a numeric variable, and in future work we might want to consider this as a time variable. For now, we can just calculate the same aggregation statistics as previously.
# + papermill={"duration": 2.056781, "end_time": "2021-08-21T12:16:20.789071", "exception": false, "start_time": "2021-08-21T12:16:18.732290", "status": "completed"} tags=[]
# calcuate value count statistics for each 'SK_ID_CURR'
bureau_balance_agg = agg_numeric(bureau_balance, group_var = 'SK_ID_BUREAU',
df_name = 'bureau_balance')
bureau_balance_agg.head()
# + [markdown] papermill={"duration": 0.100275, "end_time": "2021-08-21T12:16:20.990558", "exception": false, "start_time": "2021-08-21T12:16:20.890283", "status": "completed"} tags=[]
# The above dataframes have the calculations done on each loan. Now we need to aggregate these for each client. We can do this by merging the dataframes together first and then since all the variables are numeric, we just need to aggregate the statistics again, this time grouping by the SK_ID_CURR.
# + papermill={"duration": 1.239512, "end_time": "2021-08-21T12:16:22.334337", "exception": false, "start_time": "2021-08-21T12:16:21.094825", "status": "completed"} tags=[]
# dataframe grouped by loan
bureau_by_loan = bureau_balance_agg.merge(bureau_balance_counts, right_index = True,
left_on = 'SK_ID_BUREAU', how = 'outer')
# merge to include the SK_ID_CURR
bureau_by_loan = bureau_by_loan.merge(bureau[['SK_ID_BUREAU', 'SK_ID_CURR']], on = 'SK_ID_BUREAU',
how = 'left')
bureau_by_loan.head()
# + papermill={"duration": 1.741298, "end_time": "2021-08-21T12:16:24.176605", "exception": false, "start_time": "2021-08-21T12:16:22.435307", "status": "completed"} tags=[]
bureau_balance_by_client = agg_numeric(bureau_by_loan.drop(columns = ['SK_ID_BUREAU']),
group_var = 'SK_ID_CURR', df_name = 'client')
bureau_balance_by_client.head()
# + [markdown] papermill={"duration": 0.10253, "end_time": "2021-08-21T12:16:24.391124", "exception": false, "start_time": "2021-08-21T12:16:24.288594", "status": "completed"} tags=[]
# To recap, for the bureau_balance dataframe we:
#
# 1. Calculated numeric stats grouping by each loan
# 2. Made value counts of each categorical variable grouping by loan
# 3. Merged the stats and the value counts on the loans
# 4. Calculated numeric stats for the resulting dataframe grouping by the client id
#
# The final resulting dataframe has one row for each client, with statistics calculated for all of their loans with monthly balance information.
#
# Some of these variables are a little confusing, so let's try to explain a few:
#
# * client_bureau_balance_MONTHS_BALANCE_mean_mean: For each loan calculate the mean value of MONTHS_BALANCE. Then for each client, calculate the mean of this value for all of their loans.
# * client_bureau_balance_STATUS_X_count_norm_sum: For each loan, calculate the number of occurences of STATUS == X divided by the number of total STATUS values for the loan. Then, for each client, add up the values for each loan.
#
# We will hold off on calculating the correlations until we have all the variables together in one dataframe.
# + [markdown] papermill={"duration": 0.103874, "end_time": "2021-08-21T12:16:24.597864", "exception": false, "start_time": "2021-08-21T12:16:24.493990", "status": "completed"} tags=[]
# ### Putting the Functions Together
#
# We now have all the pieces in place to take the information from the previous loans at other institutions and the monthly payments information about these loans and put them into the main training dataframe. Let's do a reset of all the variables and then use the functions we built to do this from the ground up. This demonstrate the benefit of using functions for repeatable workflows!
# + papermill={"duration": 0.227112, "end_time": "2021-08-21T12:16:24.927274", "exception": false, "start_time": "2021-08-21T12:16:24.700162", "status": "completed"} tags=[]
# free up memory by deleting old objects
import gc
gc.enable()
del train, bureau, bureau_balance, bureau_agg, bureau_agg_new, bureau_balance_agg, bureau_balance_counts, bureau_by_loan, bureau_balance_by_client, bureau_counts
gc.collect()
# + papermill={"duration": 15.291021, "end_time": "2021-08-21T12:16:40.322004", "exception": false, "start_time": "2021-08-21T12:16:25.030983", "status": "completed"} tags=[]
# read in new copies of all the dataframe
train = pd.read_csv('../input/home-credit-default-risk/application_train.csv')
bureau = pd.read_csv('../input/home-credit-default-risk/bureau.csv')
bureau_balance = pd.read_csv('../input/home-credit-default-risk/bureau_balance.csv')
# + [markdown] papermill={"duration": 0.10476, "end_time": "2021-08-21T12:16:40.528944", "exception": false, "start_time": "2021-08-21T12:16:40.424184", "status": "completed"} tags=[]
# #### Counts of Bureau Dataframe
# + papermill={"duration": 2.638494, "end_time": "2021-08-21T12:16:43.269706", "exception": false, "start_time": "2021-08-21T12:16:40.631212", "status": "completed"} tags=[]
bureau_counts = count_categorical(bureau, group_var = 'SK_ID_CURR', df_name = 'bureau')
bureau_counts.head()
# + [markdown] papermill={"duration": 0.102118, "end_time": "2021-08-21T12:16:43.474800", "exception": false, "start_time": "2021-08-21T12:16:43.372682", "status": "completed"} tags=[]
# #### Aggregated Stats of Bureau Dataframe
# + papermill={"duration": 2.424924, "end_time": "2021-08-21T12:16:46.003552", "exception": false, "start_time": "2021-08-21T12:16:43.578628", "status": "completed"} tags=[]
bureau_agg = agg_numeric(bureau.drop(columns = ['SK_ID_BUREAU']), group_var = 'SK_ID_CURR', df_name = 'bureau')
bureau_agg.head()
# + [markdown] papermill={"duration": 0.105109, "end_time": "2021-08-21T12:16:46.213298", "exception": false, "start_time": "2021-08-21T12:16:46.108189", "status": "completed"} tags=[]
# #### Value counts of Bureau Balance dataframe by loan
# + papermill={"duration": 9.292431, "end_time": "2021-08-21T12:16:55.611084", "exception": false, "start_time": "2021-08-21T12:16:46.318653", "status": "completed"} tags=[]
bureau_balance_counts = count_categorical(bureau_balance, group_var = 'SK_ID_BUREAU', df_name = 'bureau_balance')
bureau_balance_counts.head()
# + [markdown] papermill={"duration": 0.106196, "end_time": "2021-08-21T12:16:55.822086", "exception": false, "start_time": "2021-08-21T12:16:55.715890", "status": "completed"} tags=[]
# #### Aggregated stats of Bureau Balance dataframe by loan
# + papermill={"duration": 2.042383, "end_time": "2021-08-21T12:16:57.970221", "exception": false, "start_time": "2021-08-21T12:16:55.927838", "status": "completed"} tags=[]
bureau_balance_agg = agg_numeric(bureau_balance, group_var = 'SK_ID_BUREAU', df_name = 'bureau_balance')
bureau_balance_agg.head()
# + papermill={"duration": 5.77702, "end_time": "2021-08-21T12:17:03.852293", "exception": false, "start_time": "2021-08-21T12:16:58.075273", "status": "completed"} tags=[]
# dataframe grouped by the loan
bureau_by_loan = bureau_balance_agg.merge(bureau_balance_counts,
right_index = True, left_on = 'SK_ID_BUREAU', how = 'outer')
# merge to include the SK_ID_CURR
bureau_by_loan = bureau[['SK_ID_BUREAU', 'SK_ID_CURR']].merge(bureau_by_loan,
on = 'SK_ID_BUREAU', how = 'left')
# aggregate the stats for each client
bureau_balance_by_client = agg_numeric(bureau_by_loan.drop(columns = ['SK_ID_BUREAU']), group_var = 'SK_ID_CURR', df_name = 'client')
# + [markdown] papermill={"duration": 0.105747, "end_time": "2021-08-21T12:17:04.063694", "exception": false, "start_time": "2021-08-21T12:17:03.957947", "status": "completed"} tags=[]
# #### Insert Computed Features into Training Data
# + papermill={"duration": 0.117922, "end_time": "2021-08-21T12:17:04.286887", "exception": false, "start_time": "2021-08-21T12:17:04.168965", "status": "completed"} tags=[]
original_features = list(train.columns)
print('original number of features: ', len(original_features))
# + papermill={"duration": 7.975088, "end_time": "2021-08-21T12:17:12.369109", "exception": false, "start_time": "2021-08-21T12:17:04.394021", "status": "completed"} tags=[]
# Merge with the value counts of bureau
train = train.merge(bureau_counts, on = 'SK_ID_CURR', how = 'left')
# Merge with the stats of bureau
train = train.merge(bureau_agg, on = 'SK_ID_CURR', how = 'left')
# Merge with the monthly information grouped by client
train = train.merge(bureau_balance_by_client, on = 'SK_ID_CURR', how = 'left')
# + papermill={"duration": 0.115394, "end_time": "2021-08-21T12:17:12.589712", "exception": false, "start_time": "2021-08-21T12:17:12.474318", "status": "completed"} tags=[]
new_features = list(train.columns)
print('number of features previous loands from other institutions data: ', len(new_features))
# + [markdown] papermill={"duration": 0.107191, "end_time": "2021-08-21T12:17:12.803185", "exception": false, "start_time": "2021-08-21T12:17:12.695994", "status": "completed"} tags=[]
# ### Feature Engineering Outcomes
# After all that work, now we want to take a look at the variables we have created. We can look at the percentage of missing values, the correlations of variables with the target, and also the correlation of variables with the other variables. The correlations between variables can show if we have collinear varibles, that is, variables that are highly correlated with one another. Often, we want to remove one in a pair of collinear variables because having both variables would be redundant. We can also use the percentage of missing values to remove features with a substantial majority of values that are not present. Feature selection will be an important focus going forward, because reducing the number of features can help the model learn during training and also generalize better to the testing data. The "curse of dimensionality" is the name given to the issues caused by having too many features (too high of a dimension). As the number of variables increases, the number of datapoints needed to learn the relationship between these variables and the target value increases exponentially.
#
# Feature selection is the process of removing variables to help our model to learn and generalize better to the testing set. The objective is to remove useless/redundant variables while preserving those that are useful. There are a number of tools we can use for this process, but in this notebook we will stick to removing columns with a high percentage of missing values and variables that have a high correlation with one another. Later we can look at using the feature importances returned from models such as the Gradient Boosting Machine or Random Forest to perform feature selection.
# + [markdown] papermill={"duration": 0.107123, "end_time": "2021-08-21T12:17:13.015964", "exception": false, "start_time": "2021-08-21T12:17:12.908841", "status": "completed"} tags=[]
# #### Missing Values
# An important consideration is the missing values in the dataframe. Columns with too many missing values might have to be dropped.
# + papermill={"duration": 0.118024, "end_time": "2021-08-21T12:17:13.240982", "exception": false, "start_time": "2021-08-21T12:17:13.122958", "status": "completed"} tags=[]
# function to calculate missing values by column # funct
def missing_values_table(df):
# total missing values
mis_val = df.isnull().sum()
# percentage of missing values
mis_val_percent = 100 * df.isnull().sum() / len(df)
# make a table with the results
mis_val_table = pd.concat([mis_val, mis_val_percent], axis = 1)
# rename the columns
mis_val_table_ren_columns = mis_val_table.rename(columns = {0 : 'Missing Values', 1 : '% of Total Values'})
# sort the table by percentage of missing decending
mis_val_table_ren_columns = mis_val_table_ren_columns[mis_val_table_ren_columns.iloc[:, 1] != 0].sort_values('% of Total Values', ascending = False).round(1)
# print some summary information
print('your selected dataframe has ' + str(df.shape[1]) + ' columns.\n'
'there are ' + str(mis_val_table_ren_columns.shape[0]) + ' columns that have missing values')
# return the dataframe with missing information
return mis_val_table_ren_columns
# + papermill={"duration": 1.418367, "end_time": "2021-08-21T12:17:14.764823", "exception": false, "start_time": "2021-08-21T12:17:13.346456", "status": "completed"} tags=[]
missing_train = missing_values_table(train)
missing_train.head(10)
# + [markdown] papermill={"duration": 0.106672, "end_time": "2021-08-21T12:17:14.980806", "exception": false, "start_time": "2021-08-21T12:17:14.874134", "status": "completed"} tags=[]
# We see there are a number of columns with a high percentage of missing values. There is no well-established threshold for removing missing values, and the best course of action depends on the problem. Here, to reduce the number of features, we will remove any columns in either the training or the testing data that have greater than 90% missing values.
# + papermill={"duration": 0.115462, "end_time": "2021-08-21T12:17:15.203015", "exception": false, "start_time": "2021-08-21T12:17:15.087553", "status": "completed"} tags=[]
missing_train_vars = list(missing_train.index[missing_train['% of Total Values'] > 90])
len(missing_train_vars)
# + [markdown] papermill={"duration": 0.106466, "end_time": "2021-08-21T12:17:15.417549", "exception": false, "start_time": "2021-08-21T12:17:15.311083", "status": "completed"} tags=[]
# Before we remove the missing values, we will find the missing value percentages in the testing data. We'll then remove any columns with greater than 90% missing values in either the training or testing data. Let's now read in the testing data, perform the same operations, and look at the missing values in the testing data. We already have calculated all the counts and aggregation statistics, so we only need to merge the testing data with the appropriate data.
# + [markdown] papermill={"duration": 0.109037, "end_time": "2021-08-21T12:17:15.634281", "exception": false, "start_time": "2021-08-21T12:17:15.525244", "status": "completed"} tags=[]
# ### Calculate Information for Testing Data
# + papermill={"duration": 2.794574, "end_time": "2021-08-21T12:17:18.535161", "exception": false, "start_time": "2021-08-21T12:17:15.740587", "status": "completed"} tags=[]
# Read in the test dataframe
test = pd.read_csv('../input/home-credit-default-risk/application_test.csv')
# merge with the value counts of bureau
test = test.merge(bureau_counts, on = 'SK_ID_CURR', how = 'left')
# merge with the state of bureau
test = test.merge(bureau_agg, on = 'SK_ID_CURR', how = 'left')
# merge with the value counts of bureau balance
test = test.merge(bureau_balance_by_client, on = 'SK_ID_CURR', how = 'left')
# + papermill={"duration": 0.116987, "end_time": "2021-08-21T12:17:18.760010", "exception": false, "start_time": "2021-08-21T12:17:18.643023", "status": "completed"} tags=[]
print('shape of testing data: ', test.shape)
# + [markdown] papermill={"duration": 0.10678, "end_time": "2021-08-21T12:17:18.975407", "exception": false, "start_time": "2021-08-21T12:17:18.868627", "status": "completed"} tags=[]
# We need to align the testing and training dataframes, which means matching up the columns so they have the exact same columns. This shouldn't be an issue here, but when we one-hot encode variables, we need to align the dataframes to make sure they have the same columns.
# + papermill={"duration": 1.084026, "end_time": "2021-08-21T12:17:20.168813", "exception": false, "start_time": "2021-08-21T12:17:19.084787", "status": "completed"} tags=[]
train_labels = train['TARGET']
# align the dataframe, this will remove the 'target' column
train, test = train.align(test, join = 'inner', axis = 1)
train['TARGET'] = train_labels
# + papermill={"duration": 0.116954, "end_time": "2021-08-21T12:17:20.396196", "exception": false, "start_time": "2021-08-21T12:17:20.279242", "status": "completed"} tags=[]
print('training data shape: ', train.shape)
print('testing data shape: ', test.shape)
# + [markdown] papermill={"duration": 0.106822, "end_time": "2021-08-21T12:17:20.611537", "exception": false, "start_time": "2021-08-21T12:17:20.504715", "status": "completed"} tags=[]
# The dataframes now have the same columns (with the exception of the TARGET column in the training data). This means we can use them in a machine learning model which needs to see the same columns in both the training and testing dataframes.
#
# Let's now look at the percentage of missing values in the testing data so we can figure out the columns that should be dropped.
# + papermill={"duration": 0.334753, "end_time": "2021-08-21T12:17:21.054519", "exception": false, "start_time": "2021-08-21T12:17:20.719766", "status": "completed"} tags=[]
missing_test = missing_values_table(test)
missing_test.head(10)
# + papermill={"duration": 0.120727, "end_time": "2021-08-21T12:17:21.284117", "exception": false, "start_time": "2021-08-21T12:17:21.163390", "status": "completed"} tags=[]
missing_test_vars = list(missing_test.index[missing_test['% of Total Values'] > 90])
len(missing_test_vars)
# + papermill={"duration": 0.135413, "end_time": "2021-08-21T12:17:21.530270", "exception": false, "start_time": "2021-08-21T12:17:21.394857", "status": "completed"} tags=[]
# to erase the duplicated data >> list(set())
missing_columns = list(set(missing_test_vars + missing_train_vars))
print('there are %d columns with more than 90%% missing in either the training or testing data'
% len(missing_columns))
# + papermill={"duration": 0.509608, "end_time": "2021-08-21T12:17:22.164018", "exception": false, "start_time": "2021-08-21T12:17:21.654410", "status": "completed"} tags=[]
# drop the missing columns
train = train.drop(columns = missing_columns)
test = test.drop(columns = missing_columns)
# + [markdown] papermill={"duration": 0.110914, "end_time": "2021-08-21T12:17:22.388018", "exception": false, "start_time": "2021-08-21T12:17:22.277104", "status": "completed"} tags=[]
# We ended up removing no columns in this round because there are no columns with more than 90% missing values. We might have to apply another feature selection method to reduce the dimensionality.
# + [markdown] papermill={"duration": 0.110827, "end_time": "2021-08-21T12:17:22.611972", "exception": false, "start_time": "2021-08-21T12:17:22.501145", "status": "completed"} tags=[]
# At this point we will save both the training and testing data. I encourage anyone to try different percentages for dropping the missing columns and compare the outcomes.
# + papermill={"duration": 91.462506, "end_time": "2021-08-21T12:18:54.185644", "exception": false, "start_time": "2021-08-21T12:17:22.723138", "status": "completed"} tags=[]
train.to_csv('train_bureau_raw.csv', index = False)
test.to_csv('test_vureau_raw.csv', index = False)
# + [markdown] papermill={"duration": 0.109178, "end_time": "2021-08-21T12:18:54.411557", "exception": false, "start_time": "2021-08-21T12:18:54.302379", "status": "completed"} tags=[]
# ### Correlations
# First let's look at the correlations of the variables with the target. We can see in any of the variables we created have a greater correlation than those already present in the training data (from application).
# + papermill={"duration": 89.748384, "end_time": "2021-08-21T12:20:24.270282", "exception": false, "start_time": "2021-08-21T12:18:54.521898", "status": "completed"} tags=[]
# calculate all correlations in dataframe
corrs = train.corr()
# + papermill={"duration": 0.127506, "end_time": "2021-08-21T12:20:24.527467", "exception": false, "start_time": "2021-08-21T12:20:24.399961", "status": "completed"} tags=[]
corrs = corrs.sort_values('TARGET', ascending = False)
# ten most positive correlations
pd.DataFrame(corrs['TARGET'].head(10))
# + papermill={"duration": 0.127816, "end_time": "2021-08-21T12:20:24.766658", "exception": false, "start_time": "2021-08-21T12:20:24.638842", "status": "completed"} tags=[]
# ten most negative correlations
pd.DataFrame(corrs['TARGET'].dropna().tail(10))
# + [markdown] papermill={"duration": 0.11222, "end_time": "2021-08-21T12:20:24.990205", "exception": false, "start_time": "2021-08-21T12:20:24.877985", "status": "completed"} tags=[]
# The highest correlated variable with the target (other than the TARGET which of course has a correlation of 1), is a variable we created. However, just because the variable is correlated does not mean that it will be useful, and we have to remember that if we generate hundreds of new variables, some are going to be correlated with the target simply because of random noise.
#
# Viewing the correlations skeptically, it does appear that several of the newly created variables may be useful. To assess the "usefulness" of variables, we will look at the feature importances returned by the model. For curiousity's sake (and because we already wrote the function) we can make a kde plot of two of the newly created variables.
# + papermill={"duration": 1.74026, "end_time": "2021-08-21T12:20:26.843555", "exception": false, "start_time": "2021-08-21T12:20:25.103295", "status": "completed"} tags=[]
kde_target(var_name='bureau_CREDIT_ACTIVE_Active_count_norm', df=train)
# + [markdown] papermill={"duration": 0.117207, "end_time": "2021-08-21T12:20:27.074862", "exception": false, "start_time": "2021-08-21T12:20:26.957655", "status": "completed"} tags=[]
# Well this distribution is all over the place. This variable represents the number of previous loans with a CREDIT_ACTIVE value of Active divided by the total number of previous loans for a client. The correlation here is so weak that I do not think we should draw any conclusions!
# + [markdown] papermill={"duration": 0.112867, "end_time": "2021-08-21T12:20:27.301911", "exception": false, "start_time": "2021-08-21T12:20:27.189044", "status": "completed"} tags=[]
# #### Collinear Variables
#
# We can calculate not only the correlations of the variables with the target, but also the correlation of each variable with every other variable. This will allow us to see if there are highly collinear variables that should perhaps be removed from the data.
#
# Let's look for any variables that have a greather than 0.8 correlation with other variables.
# + papermill={"duration": 0.207938, "end_time": "2021-08-21T12:20:27.622328", "exception": false, "start_time": "2021-08-21T12:20:27.414390", "status": "completed"} tags=[]
# set the threshold
threshold = 0.8
# empty dictionary to hold correlated variables
above_threshold_vars = {}
# for each columns, record the variables that are above the threshold
for col in corrs:
above_threshold_vars[col] = list(corrs.index[corrs[col] > threshold])
# + [markdown] papermill={"duration": 0.11423, "end_time": "2021-08-21T12:20:27.850183", "exception": false, "start_time": "2021-08-21T12:20:27.735953", "status": "completed"} tags=[]
# For each of these pairs of highly correlated variables, we only want to remove one of the variables. The following code creates a set of variables to remove by only adding one of each pair.
# + papermill={"duration": 0.131759, "end_time": "2021-08-21T12:20:28.097217", "exception": false, "start_time": "2021-08-21T12:20:27.965458", "status": "completed"} tags=[]
# track columns to remove and columns already examined
cols_to_remove = []
cols_seen = []
cols_to_remove_pair = []
# iterate through columns and correlated columns
for key, value in above_threshold_vars.items():
# keep track of columns already examined
cols_seen.append(key)
for x in value:
if x == key:
next
else:
# only want to remove one in a pair
if x not in cols_seen:
cols_to_remove.append(x)
cols_to_remove_pair.append(key)
cols_to_remove = list(set(cols_to_remove))
print('number of columns to remove: ', len(cols_to_remove))
# + [markdown] papermill={"duration": 0.113219, "end_time": "2021-08-21T12:20:28.326922", "exception": false, "start_time": "2021-08-21T12:20:28.213703", "status": "completed"} tags=[]
# We can remove these columns from both the training and the testing datasets. We will have to compare performance after removing these variables with performance keeping these variables (the raw csv files we saved earlier).
# + papermill={"duration": 0.330315, "end_time": "2021-08-21T12:20:28.771271", "exception": false, "start_time": "2021-08-21T12:20:28.440956", "status": "completed"} tags=[]
train_corrs_removed = train.drop(columns = cols_to_remove)
test_corrs_removed = test.drop(columns = cols_to_remove)
print('training corrs removed shape: ', train_corrs_removed.shape)
print('testing corrs removed shape: ', test_corrs_removed.shape)
# + papermill={"duration": 50.442437, "end_time": "2021-08-21T12:21:19.329072", "exception": false, "start_time": "2021-08-21T12:20:28.886635", "status": "completed"} tags=[]
train_corrs_removed.to_csv('train_bureau_corrs_removed.csv', index = False)
test_corrs_removed.to_csv('test_bureau_corrs_removed.csv', index = False)
# + [markdown] papermill={"duration": 0.113593, "end_time": "2021-08-21T12:21:19.557815", "exception": false, "start_time": "2021-08-21T12:21:19.444222", "status": "completed"} tags=[]
# ### Modeling
#
# To actually test the performance of these new datasets, we will try using them for machine learning! Here we will use a function I developed in another notebook to compare the features (the raw version with the highly correlated variables removed). We can run this kind of like an experiment, and the control will be the performance of just the application data in this function when submitted to the competition. I've already recorded that performance, so we can list out our control and our two test conditions:
#
# **For all datasets, use the model shown below (with the exact hyperparameters).**
#
# * control: only the data in the application files.
#
# * test one: the data in the application files with all of the data recorded from the bureau and bureau_balance files
#
# * test two: the data in the application files with all of the data recorded from the bureau and bureau_balance files with highly correlated variables removed.
# + papermill={"duration": 1.371751, "end_time": "2021-08-21T12:21:21.043027", "exception": false, "start_time": "2021-08-21T12:21:19.671276", "status": "completed"} tags=[]
import lightgbm as lgb
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
# + papermill={"duration": 0.144029, "end_time": "2021-08-21T12:21:21.303690", "exception": false, "start_time": "2021-08-21T12:21:21.159661", "status": "completed"} tags=[]
def model(features, test_features, encoding = 'ohe', n_folds = 5):
"""Train and test a light gradient boosting model using
cross validation.
Parameters
--------
features (pd.DataFrame):
dataframe of training features to use
for training a model. Must include the TARGET column.
test_features (pd.DataFrame):
dataframe of testing features to use
for making predictions with the model.
encoding (str, default = 'ohe'):
method for encoding categorical variables. Either 'ohe' for one-hot encoding or 'le' for integer label encoding
n_folds (int, default = 5): number of folds to use for cross validation
Return
--------
submission (pd.DataFrame):
dataframe with `SK_ID_CURR` and `TARGET` probabilities
predicted by the model.
feature_importances (pd.DataFrame):
dataframe with the feature importances from the model.
valid_metrics (pd.DataFrame):
dataframe with training and validation metrics (ROC AUC) for each fold and overall.
"""
# extract the ids
train_ids = features['SK_ID_CURR']
test_ids = test_features['SK_ID_CURR']
# extract the labels for training
labels = features['TARGET']
# remove the ids and target
features = features.drop(columns = ['SK_ID_CURR', 'TARGET'])
test_features = test_features.drop(columns = ['SK_ID_CURR'])
# one hot encoding
if encoding == 'ohe':
features = pd.get_dummies(features)
test_features = pd.get_dummies(test_features)
# align the dataframe by the columns
features, test_features = features.align(test_features, join = 'inner', axis = 1)
# no categorical indices to record
cat_indices = 'auto'
# integer label encoding
elif encoding == 'le':
# create a label encoder
label_encoder = LabelEncoder()
# list for storing categorical indices
cat_indices = []
# iterate through each columns
for i, col in enumerate(features):
if features[col].dtype == 'object':
# map the categorical features to integers
features[col] = label_encoder.fit_transform(np.array(features[col].astype(str)).reshape((-1, )))
test_features[col] = label_encoder.fit_transform(np.array(test_features[col].astype(str)).reshape((-1, )))
# record the categorical indices
cat_indices.append(i)
# catch error if label encoding scheme is not valid
else:
raise ValueError("Encoding must be either 'ohe' or 'le'")
print('training data shape: ', features.shape)
print('testing data shape: ', test_features.shape)
# extract feature names
feature_names = list(features.columns)
# convert to np arrays
features = np.array(features)
test_features = np.array(test_features)
# convert to np arrays
features = np.array(features)
test_features = np.array(test_features)
# create the kfold object
k_fold = KFold(n_splits = n_folds, shuffle = False, random_state = 50)
# empty array for feature importances
feature_importance_values = np.zeros(len(feature_names))
# empty array for test predictions
test_predictions = np.zeros(test_features.shape[0])
# empty array for out of fold validation predictions
out_of_fold = np.zeros(features.shape[0])
# lists for recording validation and training scores
valid_scores = []
train_scores = []
# iterate through each fold
for train_indices, valid_indices in k_fold.split(features):
# training data for the fold
train_features, train_labels = features[train_indices], labels[train_indices]
# validation data for the fold
valid_features, valid_labels = features[valid_indices], labels[valid_indices]
# create the model
model = lgb.LGBMClassifier(n_estimators=10000, objective = 'binary',
class_weight = 'balanced', learning_rate = 0.05,
reg_alpha = 0.1, reg_lambda = 0.1,
subsample = 0.8, n_jobs = -1, random_state = 50)
# train the model
model.fit(train_features, train_labels, eval_metric = 'auc',
eval_set = [(valid_features, valid_labels), (train_features, train_labels)],
eval_names = ['valid', 'train'], categorical_feature = cat_indices,
early_stopping_rounds = 100, verbose = 200)
# record the best iteration
best_iteration = model.best_iteration_
# record the feature importances
feature_importance_values += model.feature_importances_ / k_fold.n_splits
# make predictions
test_predictions += model.predict_proba(test_features, num_iteration = best_iteration)[:, 1] / k_fold.n_splits
# record the out of fold predictions
out_of_fold[valid_indices] = model.predict_proba(valid_features, num_iteration = best_iteration)[:, 1]
# record the best score
valid_score = model.best_score_['valid']['auc']
train_score = model.best_score_['train']['auc']
valid_scores.append(valid_score)
train_scores.append(train_score)
# clean up memory
gc.enable()
del model, train_features, valid_features
gc.collect()
# make the submission dataframe
submission = pd.DataFrame({'SK_ID_CURR' : test_ids, 'TARGET' : test_predictions})
# make the feature importance dataframe
feature_importances = pd.DataFrame({'feature' : feature_names, 'importance' : feature_importance_values})
# overall validation score
valid_auc = roc_auc_score(labels, out_of_fold)
# add the overall scores to the metrics
valid_scores.append(valid_auc)
train_scores.append(np.mean(train_scores))
# needed for creating dataframe of validation scores
fold_names = list(range(n_folds))
fold_names.append('overall')
# dataframe of validation scores
metrics = pd.DataFrame({'fold' : fold_names,
'train' : train_scores,
'valid' : valid_scores})
return submission, feature_importances, metrics
# + papermill={"duration": 0.128215, "end_time": "2021-08-21T12:21:21.546612", "exception": false, "start_time": "2021-08-21T12:21:21.418397", "status": "completed"} tags=[]
def plot_feature_importances(df):
"""
Plot importances returned by a model. This can work with any measure of
feature importance provided that higher importance is better.
Args:
df (dataframe): feature importances. Must have the features in a column
called `features` and the importances in a column called `importance
Returns:
shows a plot of the 15 most importance features
df (dataframe): feature importances sorted by importance (highest to lowest)
with a column for normalized importance
"""
# sort features according to importance
df = df.sort_values('importance', ascending = False).reset_index()
# normalize the feature importances to add up to one
df['importance_normalized'] = df['importance'] / df['importance'].sum()
# Make a horizontal bar chart of feature importances
plt.figure(figsize = (10, 6))
ax = plt.subplot()
# need to reverse the index to plot most important on top
ax.barh(list(reversed(list(df.index[:15]))),
df['importance_normalized'].head(15),
align = 'center', edgecolor = 'k')
# set the yticks and labels
ax.set_yticks(list(reversed(list(df.index[:15]))))
ax.set_yticklabels(df['feature'].head(15))
# plot labeling
plt.xlabel('normalized importance');
plt.title('feature importances');
plt.show()
return df
# + [markdown] papermill={"duration": 0.113188, "end_time": "2021-08-21T12:21:21.773202", "exception": false, "start_time": "2021-08-21T12:21:21.660014", "status": "completed"} tags=[]
# #### Control
# The first step in any experiment is establishing a control. For this we will use the function defined above (that implements a Gradient Boosting Machine model) and the single main data source (application).
# + papermill={"duration": 5.337384, "end_time": "2021-08-21T12:21:27.224908", "exception": false, "start_time": "2021-08-21T12:21:21.887524", "status": "completed"} tags=[]
train_control = pd.read_csv('../input/home-credit-default-risk/application_train.csv')
test_control = pd.read_csv('../input/home-credit-default-risk/application_test.csv')
# + [markdown] papermill={"duration": 0.114683, "end_time": "2021-08-21T12:21:27.455163", "exception": false, "start_time": "2021-08-21T12:21:27.340480", "status": "completed"} tags=[]
# Fortunately, once we have taken the time to write a function, using it is simple (if there's a central theme in this notebook, it's use functions to make things simpler and reproducible!). The function above returns a submission dataframe we can upload to the competition, a fi dataframe of feature importances, and a metrics dataframe with validation and test performance.
# + papermill={"duration": 166.234189, "end_time": "2021-08-21T12:24:13.804054", "exception": false, "start_time": "2021-08-21T12:21:27.569865", "status": "completed"} tags=[]
submission, fi, metrics = model(train_control, test_control)
# + [markdown] papermill={"duration": 0.122086, "end_time": "2021-08-21T12:24:14.046272", "exception": false, "start_time": "2021-08-21T12:24:13.924186", "status": "completed"} tags=[]
# The control slightly overfits because the training score is higher than the validation score. We can address this in later notebooks when we look at regularization (we already perform some regularization in this model by using reg_lambda and reg_alpha as well as early stopping).
#
# We can visualize the feature importance with another function, plot_feature_importances. The feature importances may be useful when it's time for feature selection.
# + papermill={"duration": 0.386869, "end_time": "2021-08-21T12:24:14.553198", "exception": false, "start_time": "2021-08-21T12:24:14.166329", "status": "completed"} tags=[]
fi_sorted = plot_feature_importances(fi)
# + papermill={"duration": 0.316156, "end_time": "2021-08-21T12:24:14.991309", "exception": false, "start_time": "2021-08-21T12:24:14.675153", "status": "completed"} tags=[]
submission.to_csv('control.csv', index = False)
# + [markdown] papermill={"duration": 0.130995, "end_time": "2021-08-21T12:24:15.247914", "exception": false, "start_time": "2021-08-21T12:24:15.116919", "status": "completed"} tags=[]
# **The control scores 0.745 when submitted to the competition.**
# + [markdown] papermill={"duration": 0.121336, "end_time": "2021-08-21T12:24:15.508514", "exception": false, "start_time": "2021-08-21T12:24:15.387178", "status": "completed"} tags=[]
# ------------------------
#
# #### Test One
# Let's conduct the first test. We will just need to pass in the data to the function, which does most of the work for us.
# + papermill={"duration": 335.1957, "end_time": "2021-08-21T12:29:50.825696", "exception": false, "start_time": "2021-08-21T12:24:15.629996", "status": "completed"} tags=[]
submission_raw, fi_raw, metrics_raw = model(train, test)
# + [markdown] papermill={"duration": 0.128351, "end_time": "2021-08-21T12:29:51.081365", "exception": false, "start_time": "2021-08-21T12:29:50.953014", "status": "completed"} tags=[]
# Based on these numbers, the engineered features perform better than the control case. However, we will have to submit the predictions to the leaderboard before we can say if this better validation performance transfers to the testing data.
# + papermill={"duration": 0.384451, "end_time": "2021-08-21T12:29:51.592777", "exception": false, "start_time": "2021-08-21T12:29:51.208326", "status": "completed"} tags=[]
fi_raw_sorted = plot_feature_importances(fi_raw)
# + [markdown] papermill={"duration": 0.130215, "end_time": "2021-08-21T12:29:51.852438", "exception": false, "start_time": "2021-08-21T12:29:51.722223", "status": "completed"} tags=[]
# Examining the feature improtances, it looks as if a few of the feature we constructed are among the most important. Let's find the percentage of the top 100 most important features that we made in this notebook. However, rather than just compare to the original features, we need to compare to the one-hot encoded original features. These are already recorded for us in fi (from the original data).
# + papermill={"duration": 0.142713, "end_time": "2021-08-21T12:29:52.124519", "exception": false, "start_time": "2021-08-21T12:29:51.981806", "status": "completed"} tags=[]
top_100 = list(fi_raw_sorted['feature'])[:100]
new_features = [x for x in top_100 if x not in list(fi['feature'])]
print("% of top 100 features created from the bureau data = {}".format(len(new_features)))
# + [markdown] papermill={"duration": 0.129929, "end_time": "2021-08-21T12:29:52.384415", "exception": false, "start_time": "2021-08-21T12:29:52.254486", "status": "completed"} tags=[]
# Over half of the top 100 features were made by us! That should give us confidence that all the hard work we did was worthwhile.
# + papermill={"duration": 0.32176, "end_time": "2021-08-21T12:29:52.835831", "exception": false, "start_time": "2021-08-21T12:29:52.514071", "status": "completed"} tags=[]
submission_raw.to_csv('test_one.csv', index = False)
# + [markdown] papermill={"duration": 0.131365, "end_time": "2021-08-21T12:29:53.096432", "exception": false, "start_time": "2021-08-21T12:29:52.965067", "status": "completed"} tags=[]
# **Test one scores 0.759 when submitted to the competition.**
# + [markdown] papermill={"duration": 0.130294, "end_time": "2021-08-21T12:29:53.357490", "exception": false, "start_time": "2021-08-21T12:29:53.227196", "status": "completed"} tags=[]
# -------------------------------------
# #### Test Two
# That was easy, so let's do another run! Same as before but with the highly collinear variables removed.
# + papermill={"duration": 227.386674, "end_time": "2021-08-21T12:33:40.873335", "exception": false, "start_time": "2021-08-21T12:29:53.486661", "status": "completed"} tags=[]
submission_corrs, fi_corrs, metrics_corr = model(train_corrs_removed, test_corrs_removed)
# + papermill={"duration": 0.394212, "end_time": "2021-08-21T12:33:41.403231", "exception": false, "start_time": "2021-08-21T12:33:41.009019", "status": "completed"} tags=[]
fi_corrs_sorted = plot_feature_importances(fi_corrs)
# + papermill={"duration": 0.329443, "end_time": "2021-08-21T12:33:41.870717", "exception": false, "start_time": "2021-08-21T12:33:41.541274", "status": "completed"} tags=[]
submission_corrs.to_csv('test_two.csv', index = False)
# + [markdown] papermill={"duration": 0.358365, "end_time": "2021-08-21T12:33:42.368783", "exception": false, "start_time": "2021-08-21T12:33:42.010418", "status": "completed"} tags=[]
# **Test Two scores 0.753 when submitted to the competition.**
| notebooks/notebooks_to_review/manual-feature-engineering-by-will_075.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tributary as t
from perspective import PerspectiveWidget
import tributary.functional as tp
foo = tp.wrap(tp.ws, url='wss://ws.paine.nyc', wrap=True, json=True)
p = PerspectiveWidget([], view='y_line')
p
t.pipeline([foo], ['callback'], on_data=p.update)
t.stop()
http = tp.wrap(tp.http, url='https://unpkg.com/@jpmorganchase/perspective-examples@0.1.18/build/citibike.json', json=True, field='stationBeanList')
psp = PerspectiveWidget([], view='xy_scatter', columns=['longitude', 'latitude', 'availableBikes'])
psp
t.pipeline([http], ['callback'], on_data=psp.update)
| examples/pipeline_ws_http_sio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial for *diskmap*
# This tutorial showcases the functionalities of *diskmap*. We will use a polarized scattered light image of the LkCa 15 circumstellar disk. The data were obtained with [VLT/SPHERE](https://www.eso.org/sci/facilities/paranal/instruments/sphere.html) in the $J$ band and have been published by [Thalmann et al. (2016)](https://ui.adsabs.harvard.edu/abs/2016ApJ...828L..17T/abstract).
# ## Getting started
# We start by importing the Python modules that are required for this tutorial.
import diskmap
import glob
import matplotlib.pyplot as plt
import numpy as np
import urllib.request
from astropy.io import fits
# Next, we download the $J$ band image of LkCa 15 circumstellar disk.
urllib.request.urlretrieve('https://home.strw.leidenuniv.nl/~stolker/diskmap/lkca15_irdis_qphi.fits',
'lkca15_irdis_qphi.fits')
# We can read the FITS file with `astropy`.
image = fits.getdata('lkca15_irdis_qphi.fits')
# Let's have a look at the image. The central masked region contains NaNs.
plt.imshow(image, origin='lower', vmin=np.nanmin(image), vmax=0.35*np.nanmax(image))
# ## Mapping of the disk surface
# We will now create an instance of `DiskMap` by providing the FITS filename, the pixel scale of the IRDIS detector (12.25 mas), the inclination (50 deg) and position angle (90 deg) of the disk, the distance (160 pc), and the image type (polarized flux)..
#
# The inclination convention is such that the near side is located on the right side of the image when using an inclination between 0 and 90 deg and a position angle of 0 deg. Therefore, with an position angle of 90 deg, the near side will be in upward direction of the image, as we will also see later in the `_radius.fits` file.
mapping = diskmap.DiskMap(fitsfile='lkca15_irdis_qphi.fits',
pixscale=0.01225,
inclination=50.,
pos_angle=90.,
distance=160.,
image_type='polarized')
# The scattering surface of the disk is mapped with the `map_disk` method. Here, we provide a powerlaw funtion as the (approximate) shape of the disk surface for which we assume a constant opening angle: $h(r) = 0 + 0.05r^{1}$. The argument of `radius` specifies the sampling of the radii (100 points between 1 and 500 au).
#
# For running the deprojection later on, it is important that the outer radius of the `radius` parameter is larger than the field of view of the image. This may not be possible if the disk is strongly inclined and flaring. A mapping of the full field of view is not required for the $r^2$ scaling and phase function extraction. In that case, a smaller outer radius can be used, for example the actual outer radius of the disk. The radius and scattering angle output will contain NaNs beyond the outer radius.
mapping.map_disk(power_law=(0., 0.05, 1.),
radius=(1., 500., 100))
# ## Radius and scattering angle
# The available output from the `DiskMap` methods are written by calling `write_output`. The argument of `filename` contains the prefix of the output files.
mapping.write_output(filename='lkca15')
# Let's see which FITS files have been written.
glob.glob('*.fits')
# For simplicity with the plots, we define half the field of view in arcseconds.
size = mapping.pixscale * image.shape[0]/2
# The deprojected radius (in au) from the disk surface to the star is stored in the `_radius.fits` file. Let's plot the image from this FITS file.
radius = fits.getdata('lkca15_radius.fits')
plt.imshow(radius, origin='lower', extent=[size, -size, -size, size])
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Deprojected radius (au)', size=14)
# Similarly, the scattering angles on the disk surface are stored in the `_scat_angle.fits` file. The scattering angle is defined as 180 degrees minus the angle between the direction from the disk surface to the star and the direction from the disk surface to the observer.
scat_angle = fits.getdata('lkca15_scat_angle.fits')
plt.imshow(scat_angle, origin='lower', extent=[size, -size, -size, size])
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Scattering angle (deg)', size=14)
# ## Irradiation correction
# Now that we have the deprojected distance from each pixel to the star, we can compute the stellar irradiation corrected (i.e. $r^2$ scaled) image. We set a maximum radius of 100 au such that the flux at large separations, which only consists of noise, is not enhanced by the scaling.
mapping.r2_scaling(r_max=100.)
# We run again the `write_output` method such that also the r$^2$ scaled image is stored as FITS file.
mapping.write_output(filename='lkca15')
# Let's have a look at the r$^2$ scaled image. The dynamical range is smaller compared to the regular image which brings out the disk features more clearly.
r2_scaled = fits.getdata('lkca15_r2_scaled.fits')
plt.imshow(r2_scaled, origin='lower', extent=[size, -size, -size, size])
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Flux (r$^2$ ADU)', size=14)
# ## Disk deprojection
# Next, we will use the 3D mapping of the disk surface to deproject the image with the `deproject_disk` method. The deprojection corrects therefore both for the inclination (i.e. the disk midplane) and height of the disk surface (i.e. the powerlaw profile).
mapping.deproject_disk()
# And we write again all available output files.
mapping.write_output(filename='lkca15')
# The deprojected image is stored in the FITS file with the `_deprojected.fits` suffix. This image shows what the disk would look like at an inclination of 0 degrees. Let's have a look at the result.
deprojected = fits.getdata('lkca15_deprojected.fits')
plt.imshow(deprojected, origin='lower', extent=[size, -size, -size, size],
vmin=np.amin(deprojected), vmax=0.25*np.amax(deprojected))
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Flux (ADU)', size=14)
# ## Estimated total intensity image
# By assuming a bell-shaped (i.e. Rayleigh-like curve with an adjustable peak value) degree of polarization and using the scattering angles from before, we use the `total_intensity` method to convert the r$^2$-scaled, polarized intensity image into an estimated total intensity image. This method should therefore only be used if the input image is a polarized light image (i.e. `image_type='polarized'`).
#
# In this example, we assume a maximum polarization of 100% at a scattering angle of 90 degrees, which is to be expected for aggregate-like dust grains with submicron-sized monomers.
mapping.total_intensity(pol_max=1.)
# We write again all available output, which now also includes the r$^2$-scaled, total intensity image.
mapping.write_output(filename='lkca15')
# Let's plot the total intensity image. The forward scattering by dust grains on the near/north side of the disk is visible in this image.
total_intensity = fits.getdata('lkca15_total_intensity.fits')
plt.imshow(total_intensity, origin='lower', extent=[size, -size, -size, size])
plt.xlabel('RA offset (arcsec)', fontsize=14)
plt.ylabel('Dec offset (arcsec)', fontsize=14)
cb = plt.colorbar()
cb.set_label(label='Flux (r$^2$ ADU)', size=14)
# ## Scattering phase function
# As a last step, we extract the scattering phase function from, that is, the normalized flux as function of scattering angle. We use the `phase_function` method and select pixels between a deprojected distance of 80 and 100 au (i.e. along the bright ring in the r$^2$-scaled image) and caculate the average flux in 30 steps between 0 and 180 degrees.
mapping.phase_function(radius=(80., 100.), n_phase=30)
# We run again the `write_output` method to store the phase function in a text file.
mapping.write_output(filename='lkca15')
# We can read the extracted phase function with the `loadtxt` function of `numpy`. The second and third column of the data file contain the extracted phase function and error, which in this case is the polarized phase function. The fourth and fifth column contains an extimated total intensity phase function, which assumes that the degree of polarization is bell-shaped.
#
# In case the input image is a total intensity image (i.e. `image_type='total'`), the data file contains only the regular / total intensity phase function.
angle, pol_flux, pol_error, total_flux, total_error = np.loadtxt('lkca15_phase_function.dat', unpack=True)
# Let's plot the polarized phase function that is extracted from the r$^2$-scaled image.
plt.errorbar(angle, pol_flux, yerr=pol_error)
plt.xlabel('Scattering angle (deg)', fontsize=14)
plt.ylabel('Normalized polarized flux', fontsize=14)
# Finally, we plot the total intensity phase function, which shows the onset of a strong forward scattering peak and a more shallow backward scattering peak.
plt.errorbar(angle, total_flux, yerr=total_error)
plt.xlabel('Scattering angle (deg)', fontsize=14)
plt.ylabel('Normalized total flux', fontsize=14)
| docs/tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + language="javascript"
# function toggler(){
# if(window.already_toggling){
# // Don't add multiple buttons.
# return 0
# }
# let btn = $('.input').append('<button>Toggle Code</button>')
# .children('button');
# btn.on('click', function(e){
# let tgt = e.currentTarget;
# $(tgt).parent().children('.inner_cell').toggle()
# })
# window.already_toggling = true;
# }
# -
# a test
test = 5
test
| notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="ur8xi4C7S06n"
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="JAPoU8Sm5E6e"
# <table align="left">
#
# <td>
# <a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/notebook_template.ipynb"">
# <img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
# </a>
# </td>
# <td>
# <a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/notebook_template.ipynb">
# <img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
# View on GitHub
# </a>
# </td>
# </table>
# + [markdown] id="tvgnzT1CKxrO"
# ## Overview
#
# {TODO: Include a paragraph or two explaining what this example demonstrates, who should be interested in it, and what you need to know before you get started.}
#
# ### Dataset
#
# {TODO: Include a paragraph with Dataset information and where to obtain it.}
#
# {TODO: Make sure the dataset is accessible to the public. **Googlers**: Add your dataset to the [public samples bucket](http://goto/cloudsamples#sample-storage-bucket) within gs://cloud-samples-data/ai-platform-unified, if it doesn't already exist there.}
#
# ### Objective
#
# In this notebook, you will learn how to {TODO: Complete the sentence explaining briefly what you will learn from the notebook, such as
# training, hyperparameter tuning, or serving}:
#
# * {TODO: Add high level bullets for the steps of what you will perform in the notebook}
#
# ### Costs
#
# {TODO: Update the list of billable products that your tutorial uses.}
#
# This tutorial uses billable components of Google Cloud:
#
# * AI Platform (Unified)
# * Cloud Storage
#
# {TODO: Include links to pricing documentation for each product you listed above.}
#
# Learn about [AI Platform (Unified)
# pricing](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storage
# pricing](https://cloud.google.com/storage/pricing), and use the [Pricing
# Calculator](https://cloud.google.com/products/calculator/)
# to generate a cost estimate based on your projected usage.
# + [markdown] id="ze4-nDLfK4pw"
# ### Set up your local development environment
#
# **If you are using Colab or AI Platform Notebooks**, your environment already meets
# all the requirements to run this notebook. You can skip this step.
# + [markdown] id="gCuSR8GkAgzl"
# **Otherwise**, make sure your environment meets this notebook's requirements.
# You need the following:
#
# * The Google Cloud SDK
# * Git
# * Python 3
# * virtualenv
# * Jupyter notebook running in a virtual environment with Python 3
#
# The Google Cloud guide to [Setting up a Python development
# environment](https://cloud.google.com/python/setup) and the [Jupyter
# installation guide](https://jupyter.org/install) provide detailed instructions
# for meeting these requirements. The following steps provide a condensed set of
# instructions:
#
# 1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)
#
# 1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)
#
# 1. [Install
# virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)
# and create a virtual environment that uses Python 3. Activate the virtual environment.
#
# 1. To install Jupyter, run `pip install jupyter` on the
# command-line in a terminal shell.
#
# 1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.
#
# 1. Open this notebook in the Jupyter Notebook Dashboard.
# + [markdown] id="i7EUnXsZhAGF"
# ### Install additional packages
#
# Install additional package dependencies not installed in your notebook environment, such as {XGBoost, AdaNet, or TensorFlow Hub TODO: Replace with relevant packages for the tutorial}. Use the latest major GA version of each package.
# + id="wyy5Lbnzg5fi"
# ! pip install -U tensorflow
# ! pip install -U numpy
# + [markdown] id="hhq5zEbGg0XX"
# ### Restart the kernel
#
# After you install the additional packages, you need to restart the notebook kernel so it can find the packages.
# + id="EzrelQZ22IZj"
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
# + [markdown] id="lWEdiXsJg0XY"
# ## Before you begin
#
# ### Select a GPU runtime
#
# **Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select "Runtime --> Change runtime type > GPU"**
# + [markdown] id="BF1j6f9HApxa"
# ### Set up your Google Cloud project
#
# **The following steps are required, regardless of your notebook environment.**
#
# 1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
#
# 1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).
#
# 1. [Enable the AI Platform (Unified) API and Compute Engine API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component). {TODO: Update the APIs needed for your tutorial. Edit the API names, and update the link to append the API IDs, separating each one with a comma. For example, container.googleapis.com,cloudbuild.googleapis.com}
#
# 1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).
#
# 1. Enter your project ID in the cell below. Then run the cell to make sure the
# Cloud SDK uses the right project for all the commands in this notebook.
#
# **Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
# + [markdown] id="WReHDGG5g0XY"
# #### Set your project ID
#
# **If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
# + id="oM1iC_MfAts1"
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
# shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
# + [markdown] id="qJYoRfYng0XZ"
# Otherwise, set your project ID here.
# + id="riG_qUokg0XZ"
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
# + [markdown] id="06571eb4063b"
# #### Timestamp
#
# If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
# + id="697568e92bd6"
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
# + [markdown] id="dr--iN2kAylZ"
# ### Authenticate your Google Cloud account
#
# **If you are using AI Platform Notebooks**, your environment is already
# authenticated. Skip this step.
# + [markdown] id="sBCra4QMA2wR"
# **If you are using Colab**, run the cell below and follow the instructions
# when prompted to authenticate your account via oAuth.
#
# **Otherwise**, follow these steps:
#
# 1. In the Cloud Console, go to the [**Create service account key**
# page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
#
# 2. Click **Create service account**.
#
# 3. In the **Service account name** field, enter a name, and
# click **Create**.
#
# 4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "AI Platform"
# into the filter box, and select
# **AI Platform Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
#
# 5. Click *Create*. A JSON file that contains your key downloads to your
# local environment.
#
# 6. Enter the path to your service account key as the
# `GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
# + id="PyQmSRbKA8r-"
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AI Platform, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
# %env GOOGLE_APPLICATION_CREDENTIALS ''
# + [markdown] id="zgPO1eR3CYjk"
# ### Create a Cloud Storage bucket
#
# **The following steps are required, regardless of your notebook environment.**
#
#
# {TODO: Adjust wording in the first paragraph to fit your use case - explain how your tutorial uses the Cloud Storage bucket. The example below shows how AI Platform uses the bucket for training.}
#
# When you submit a training job using the Cloud SDK, you upload a Python package
# containing your training code to a Cloud Storage bucket. AI Platform runs
# the code from this package. In this tutorial, AI Platform also saves the
# trained model that results from your job in the same bucket. Using this model artifact, you can then
# create AI Platform model and endpoint resources in order to serve
# online predictions.
#
# Set the name of your Cloud Storage bucket below. It must be unique across all
# Cloud Storage buckets.
#
# You may also change the `REGION` variable, which is used for operations
# throughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services are
# available](https://cloud.google.com/ai-platform-unified/docs/general/locations#available_regions). You may
# not use a Multi-Regional Storage bucket for training with AI Platform.
# + id="MzGDU7TWdts_"
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "[your-region]" # @param {type:"string"}
# + id="cf221059d072"
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
# + [markdown] id="-EcIXiGsCePi"
# **Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
# + id="NIq7R4HZCfIc"
# ! gsutil mb -l $REGION $BUCKET_NAME
# + [markdown] id="ucvCsknMCims"
# Finally, validate access to your Cloud Storage bucket by examining its contents:
# + id="vhOb7YnwClBb"
# ! gsutil ls -al $BUCKET_NAME
# + [markdown] id="XoEqT2Y4DJmf"
# ### Import libraries and define constants
# + [markdown] id="Y9Uo3tifg1kx"
# {TODO: Put all your imports and installs up into a setup section.}
#
# + id="pRUOFELefqf1"
import os
import sys
import numpy as np
import tensorflow as tf
# + [markdown] id="E6ppE7imft-y"
# ## General style examples
#
# ### Notebook heading
#
# - Include the collapsed license at the top (this uses Colab's "Form" mode to hide the cells).
# - Only include a single H1 title.
# - Include the button-bar immediately under the H1.
# - Check that the Colab and GitHub links at the top are correct.
#
# ### Notebook sections
#
# - Use H2 (##) and H3 (###) titles for notebook section headings.
# - Use [sentence case to capitalize titles and headings](https://developers.google.com/style/capitalization#capitalization-in-titles-and-headings). ("Train the model" instead of "Train the Model")
# - Include a brief text explanation before any code cells.
# - Use short titles/headings: "Download the data", "Build the model", "Train the model".
#
# ### Writing style
#
# - Use [present tense](https://developers.google.com/style/tense). ("You receive a response" instead of "You will receive a response")
# - Use [active voice](https://developers.google.com/style/voice). ("The service processes the request" instead of "The request is processed by the service")
# - Use [second person](https://developers.google.com/style/person) and an imperative style.
# - Correct examples: "Update the field", "You must update the field"
# - Incorrect examples: "Let's update the field", "We'll update the field", "The user should update the field"
# - **Googlers**: Please follow our [branding guidelines](http://goto/cloud-branding).
#
# ### Code
#
# - Put all your installs and imports in a setup section.
# - Save the notebook with the Table of Contents open.
# - Write Python 3 compatible code.
# - Follow the [Google Python Style guide](https://github.com/google/styleguide/blob/gh-pages/pyguide.md) and write readable code.
# - Keep cells small (max ~20 lines).
# + [markdown] id="euaAioez2WyE"
# ## TensorFlow code style
#
# Use the highest level API that gets the job done (unless the goal is to demonstrate the low level API). For example, when using Tensorflow:
#
# - Use TF.keras.Sequential > keras functional api > keras model subclassing > ...
#
# - Use model.fit > model.train_on_batch > manual GradientTapes.
#
# - Use eager-style code.
#
# - Use tensorflow_datasets and tf.data where possible.
# + [markdown] id="reBCSTKOg47l"
# ### Notebook code style examples
#
# - Notebooks are for people. Write code optimized for clarity.
#
# - Demonstrate small parts before combining them into something more complex. Like below:
# + id="MjJTYC86hPOZ"
# Build the model
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(10, activation="relu", input_shape=(None, 5)),
tf.keras.layers.Dense(3),
]
)
# + id="LWymd3KPhP_S"
# Run the model on a single batch of data, and inspect the output.
result = model(tf.constant(np.random.randn(10, 5), dtype=tf.float32)).numpy()
print("min:", result.min())
print("max:", result.max())
print("mean:", result.mean())
print("shape:", result.shape)
# + id="y-pQzaxnhR-0"
# Compile the model for training
model.compile(
optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.categorical_crossentropy
)
# + [markdown] id="g0d8u7FmhUNf"
# - Keep examples quick. Use small datasets, or small slices of datasets. You don't need to train to convergence, train until it's obvious it's making progress.
#
# - For a large example, don't try to fit all the code in the notebook. Add python files to tensorflow examples, and in the notebook run:
# # # ! pip install git+https://github.com/tensorflow/examples
# + [markdown] id="TpV-iwP9qw9c"
# ## Cleaning up
#
# To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
# project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
#
# Otherwise, you can delete the individual resources you created in this tutorial:
#
# {TODO: Include commands to delete individual resources below}
# + id="sx_vKniMq9ZX"
# Delete endpoint resource
# ! gcloud ai endpoints delete $ENDPOINT_NAME --quiet --region $REGION_NAME
# Delete model resource
# ! gcloud ai models delete $MODEL_NAME --quiet
# Delete Cloud Storage objects that were created
# ! gsutil -m rm -r $JOB_DIR
| ai-platform-unified/notebooks/notebook_template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="IkSguVy8Xv83"
# # **CycleGAN**
#
# ---
#
# <font size = 4>CycleGAN is a method that can capture the characteristics of one image domain and learn how these characteristics can be translated into another image domain, all in the absence of any paired training examples. It was first published by [Zhu *et al.* in 2017](https://arxiv.org/abs/1703.10593). Unlike pix2pix, the image transformation performed does not require paired images for training (unsupervised learning) and is made possible here by using a set of two Generative Adversarial Networks (GANs) that learn to transform images both from the first domain to the second and vice-versa.
#
# <font size = 4> **This particular notebook enables unpaired image-to-image translation. If your dataset is paired, you should also consider using the pix2pix notebook.**
#
# ---
#
# <font size = 4>*Disclaimer*:
#
# <font size = 4>This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.
#
# <font size = 4>This notebook is based on the following paper:
#
# <font size = 4> **Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks** from Zhu *et al.* published in arXiv in 2018 (https://arxiv.org/abs/1703.10593)
#
# <font size = 4>The source code of the CycleGAN PyTorch implementation can be found in: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
#
# <font size = 4>**Please also cite this original paper when using or developing this notebook.**
# + [markdown] id="jqvkQQkcuMmM"
# # **License**
#
# ---
# + cellView="form" id="vCihhAzluRvI"
#@markdown ##Double click to see the license information
#------------------------- LICENSE FOR ZeroCostDL4Mic------------------------------------
#This ZeroCostDL4Mic notebook is distributed under the MIT licence
#------------------------- LICENSE FOR CycleGAN ------------------------------------
#Copyright (c) 2017, <NAME> and <NAME>
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#--------------------------- LICENSE FOR pix2pix --------------------------------
#BSD License
#For pix2pix software
#Copyright (c) 2016, <NAME> and <NAME>
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#----------------------------- LICENSE FOR DCGAN --------------------------------
#BSD License
#For dcgan.torch software
#Copyright (c) 2015, Facebook, Inc. All rights reserved.
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# + [markdown] id="jWAz2i7RdxUV"
# #**0. Before getting started**
# ---
# <font size = 4> To train CycleGAN, **you only need two folders containing PNG images**. The images do not need to be paired.
#
# <font size = 4>While you do not need paired images to train CycleGAN, if possible, **we strongly recommend that you generate a paired dataset. This means that the same image needs to be acquired in the two conditions. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook.
#
#
# <font size = 4> Please note that you currently can **only use .png files!**
#
#
# <font size = 4>Here's a common data structure that can work:
# * Experiment A
# - **Training dataset (non-matching images)**
# - Training_source
# - img_1.png, img_2.png, ...
# - Training_target
# - img_1.png, img_2.png, ...
# - **Quality control dataset (matching images)**
# - Training_source
# - img_1.png, img_2.png
# - Training_target
# - img_1.png, img_2.png
# - **Data to be predicted**
# - **Results**
#
# ---
# <font size = 4>**Important note**
#
# <font size = 4>- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.
#
# <font size = 4>- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.
#
# <font size = 4>- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.
# ---
# + [markdown] id="n4yWFoJNnoin"
# # **1. Initialise the Colab session**
# ---
#
#
#
#
#
# + [markdown] id="DMNHVZfHmbKb"
#
# ## **1.1. Check for GPU access**
# ---
#
# By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:
#
# <font size = 4>Go to **Runtime -> Change the Runtime type**
#
# <font size = 4>**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*
#
# <font size = 4>**Accelerator: GPU** *(Graphics processing unit)*
#
# + cellView="form" id="zCvebubeSaGY"
#@markdown ##Run this cell to check if you have GPU access
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime setting is correct then Google did not allocate a GPU for your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
# !nvidia-smi
# + [markdown] id="sNIVx8_CLolt"
# ## **1.2. Mount your Google Drive**
# ---
# <font size = 4> To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.
#
# <font size = 4> Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive.
#
# <font size = 4> Once this is done, your data are available in the **Files** tab on the top left of notebook.
# + cellView="form" id="01Djr8v-5pPk"
#@markdown ##Play the cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
# mount user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="AdN8B91xZO0x"
# # **2. Install CycleGAN and dependencies**
# ---
#
# + cellView="form" id="fq21zJVFNASx"
Notebook_version = ['1.12']
#@markdown ##Install CycleGAN and dependencies
#------- Code from the cycleGAN demo notebook starts here -------
#Here, we install libraries which are not already included in Colab.
import sys
before = [str(m) for m in sys.modules]
# !git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
import os
os.chdir('pytorch-CycleGAN-and-pix2pix/')
# !pip install -r requirements.txt
# !pip install fpdf
import imageio
from skimage import data
from skimage import exposure
from skimage.exposure import match_histograms
from skimage.util import img_as_int
# ------- Common variable to all ZeroCostDL4Mic notebooks -------
import numpy as np
from matplotlib import pyplot as plt
import urllib
import os, random
import shutil
import zipfile
from tifffile import imread, imsave
import time
import sys
from pathlib import Path
import pandas as pd
import csv
from glob import glob
from scipy import signal
from scipy import ndimage
from skimage import io
from sklearn.linear_model import LinearRegression
from skimage.util import img_as_uint
import matplotlib as mpl
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
from astropy.visualization import simple_norm
from skimage import img_as_float32
from skimage.util import img_as_ubyte
from tqdm import tqdm
from fpdf import FPDF, HTMLMixin
from datetime import datetime
from pip._internal.operations.freeze import freeze
import subprocess
# Colors for the warning messages
class bcolors:
WARNING = '\033[31m'
#Disable some of the tensorflow warnings
import warnings
warnings.filterwarnings("ignore")
print("Libraries installed")
# Check if this is the latest version of the notebook
Latest_notebook_version = pd.read_csv("https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_ZeroCostDL4Mic_Release.csv")
if Notebook_version == list(Latest_notebook_version.columns):
print("This notebook is up-to-date.")
if not Notebook_version == list(Latest_notebook_version.columns):
print(bcolors.WARNING +"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki")
def pdf_export(trained = False, augmentation = False, pretrained_model = False):
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = 'cycleGAN'
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Training report for '+Network+' model ('+model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
# add another cell
if trained:
training_time = "Training time: "+str(hour)+ "hour(s) "+str(mins)+"min(s) "+str(round(sec))+"sec(s)"
pdf.cell(190, 5, txt = training_time, ln = 1, align='L')
pdf.ln(1)
Header_2 = 'Information for your materials and method:'
pdf.cell(190, 5, txt=Header_2, ln=1, align='L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
#print(all_packages)
#Main Packages
main_packages = ''
version_numbers = []
for name in ['tensorflow','numpy','torch']:
find_name=all_packages.find(name)
main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '
#Version numbers only here:
version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])
cuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True)
cuda_version = cuda_version.stdout.decode('utf-8')
cuda_version = cuda_version[cuda_version.find(', V')+3:-1]
gpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True)
gpu_name = gpu_name.stdout.decode('utf-8')
gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]
#print(cuda_version[cuda_version.find(', V')+3:-1])
#print(gpu_name)
shape = io.imread(Training_source+'/'+os.listdir(Training_source)[1]).shape
dataset_size = len(os.listdir(Training_source))
text = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a least-square GAN loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), torch (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
if pretrained_model:
text = 'The '+Network+' model was trained for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and an least-square GAN loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). The model was retrained from a pretrained model. Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), torch (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
pdf.set_font('')
pdf.set_font_size(10.)
pdf.multi_cell(190, 5, txt = text, align='L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(1)
pdf.cell(28, 5, txt='Augmentation: ', ln=0)
pdf.set_font('')
if augmentation:
aug_text = 'The dataset was augmented by default'
else:
aug_text = 'No augmentation was used for training.'
pdf.multi_cell(190, 5, txt=aug_text, align='L')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
if Use_Default_Advanced_Parameters:
pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')
pdf.cell(200, 5, txt='The following parameters were used for training:')
pdf.ln(1)
html = """
<table width=40% style="margin-left:0px;">
<tr>
<th width = 50% align="left">Parameter</th>
<th width = 50% align="left">Value</th>
</tr>
<tr>
<td width = 50%>number_of_epochs</td>
<td width = 50%>{0}</td>
</tr>
<tr>
<td width = 50%>patch_size</td>
<td width = 50%>{1}</td>
</tr>
<tr>
<td width = 50%>batch_size</td>
<td width = 50%>{2}</td>
</tr>
<tr>
<td width = 50%>initial_learning_rate</td>
<td width = 50%>{3}</td>
</tr>
</table>
""".format(number_of_epochs,str(patch_size)+'x'+str(patch_size),batch_size,initial_learning_rate)
pdf.write_html(html)
#pdf.multi_cell(190, 5, txt = text_2, align='L')
pdf.set_font("Arial", size = 11, style='B')
pdf.ln(1)
pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(30, 5, txt= 'Training_source:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_source, align = 'L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(29, 5, txt= 'Training_target:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_target, align = 'L')
#pdf.cell(190, 5, txt=aug_text, align='L', ln=1)
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')
pdf.ln(1)
pdf.cell(60, 5, txt = 'Example Training pair', ln=1)
pdf.ln(1)
exp_size = io.imread('/content/TrainingDataExample_cycleGAN.png').shape
pdf.image('/content/TrainingDataExample_cycleGAN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
ref_1 = 'References:\n - ZeroCostDL4Mic: <NAME>, Lucas & <NAME>, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- cycleGAN: <NAME>, et al. "Unpaired image-to-image translation using cycle-consistent adversarial networks." Proceedings of the IEEE international conference on computer vision. 2017.'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
# if Use_Data_augmentation:
# ref_3 = '- Augmentor: Bloice, <NAME>., <NAME>, and <NAME>. "Augmentor: an image augmentation library for machine learning." arXiv preprint arXiv:1708.04680 (2017).'
# pdf.multi_cell(190, 5, txt = ref_3, align='L')
pdf.ln(3)
reminder = 'Important:\nRemember to perform the quality control step on all newly trained models\nPlease consider depositing your training dataset on Zenodo'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(model_path+'/'+model_name+'/'+model_name+"_training_report.pdf")
def qc_pdf_export():
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = 'cycleGAN'
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Quality Control report for '+Network+' model ('+QC_model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(2)
pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png').shape
pdf.image(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(2)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(3)
pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/QC_example_data.png').shape
if Image_type == 'RGB':
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/5), h = round(exp_size[0]/5))
if Image_type == 'Grayscale':
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
pdf.ln(1)
for checkpoint in os.listdir(full_QC_model_path+'Quality Control'):
if os.path.isdir(os.path.join(full_QC_model_path,'Quality Control',checkpoint)):
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(70, 5, txt = 'Metrics for checkpoint: '+ str(checkpoint), align='L', ln=1)
html = """
<body>
<font size="8" face="Courier New" >
<table width=95% style="margin-left:0px;">"""
with open(full_QC_model_path+'Quality Control/'+str(checkpoint)+'/QC_metrics_'+QC_model_name+str(checkpoint)+'.csv', 'r') as csvfile:
metrics = csv.reader(csvfile)
header = next(metrics)
image = header[0]
mSSIM_PvsGT = header[1]
mSSIM_SvsGT = header[2]
header = """
<tr>
<th width = 60% align="left">{0}</th>
<th width = 20% align="center">{1}</th>
<th width = 20% align="center">{2}</th>
</tr>""".format(image,mSSIM_PvsGT,mSSIM_SvsGT)
html = html+header
for row in metrics:
image = row[0]
mSSIM_PvsGT = row[1]
mSSIM_SvsGT = row[2]
cells = """
<tr>
<td width = 60% align="left">{0}</td>
<td width = 20% align="center">{1}</td>
<td width = 20% align="center">{2}</td>
</tr>""".format(image,str(round(float(mSSIM_PvsGT),3)),str(round(float(mSSIM_SvsGT),3)))
html = html+cells
html = html+"""</body></table>"""
pdf.write_html(html)
pdf.ln(2)
else:
continue
pdf.ln(1)
pdf.set_font('')
pdf.set_font_size(10.)
ref_1 = 'References:\n - ZeroCostDL4Mic: <NAME>, <NAME>, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- cycleGAN: Zhu, Jun-Yan, et al. "Unpaired image-to-image translation using cycle-consistent adversarial networks." Proceedings of the IEEE international conference on computer vision. 2017.'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
pdf.ln(3)
reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(full_QC_model_path+'Quality Control/'+QC_model_name+'_QC_report.pdf')
# Exporting requirements.txt for local run
# !pip freeze > ../requirements.txt
after = [str(m) for m in sys.modules]
# Get minimum requirements file
#Add the following lines before all imports:
# import sys
# before = [str(m) for m in sys.modules]
#Add the following line after the imports:
# after = [str(m) for m in sys.modules]
from builtins import any as b_any
def filter_files(file_list, filter_list):
filtered_list = []
for fname in file_list:
if b_any(fname.split('==')[0] in s for s in filter_list):
filtered_list.append(fname)
return filtered_list
df = pd.read_csv('../requirements.txt', delimiter = "\n")
mod_list = [m.split('.')[0] for m in after if not m in before]
req_list_temp = df.values.tolist()
req_list = [x[0] for x in req_list_temp]
# Replace with package name
mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]
mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]
filtered_list = filter_files(req_list, mod_replace_list)
file=open('../CycleGAN_requirements_simple.txt','w')
for item in filtered_list:
file.writelines(item + '\n')
file.close()
# + [markdown] id="HLYcZR9gMv42"
# # **3. Select your parameters and paths**
# ---
# + [markdown] id="FQ_QxtSWQ7CL"
# ## **3.1. Setting main training parameters**
# ---
# <font size = 4>
# + [markdown] id="AuESFimvMv43"
# <font size = 5> **Paths for training, predictions and results**
#
# <font size = 4>**`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.
#
# <font size = 4>**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.
#
# <font size = 4>**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).
#
# <font size = 5>**Training Parameters**
#
# <font size = 4>**`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10) epochs, but a full training should run for 200 epochs or more. Evaluate the performance after training (see 5). **Default value: 200**
#
#
# <font size = 5>**Advanced Parameters - experienced users only**
#
# <font size = 4>**`patch_size`:** CycleGAN divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 4. **Default value: 512**
#
# <font size = 4>**When choosing the patch_size, the value should be i) large enough that it will enclose many instances, ii) small enough that the resulting patches fit into the RAM.**<font size = 4>
#
# <font size =4>**`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 1**
#
# <font size = 4>**`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0002**
# + cellView="form" id="ewpNJ_I0Mv47"
#@markdown ###Path to training images:
Training_source = "" #@param {type:"string"}
InputFile = Training_source+"/*.png"
Training_target = "" #@param {type:"string"}
OutputFile = Training_target+"/*.png"
#Define where the patch file will be saved
base = "/content"
# model name and path
#@markdown ###Name of the model and path to model folder:
model_name = "" #@param {type:"string"}
model_path = "" #@param {type:"string"}
# other parameters for training.
#@markdown ###Training Parameters
#@markdown Number of epochs:
number_of_epochs = 200#@param {type:"number"}
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True #@param {type:"boolean"}
#@markdown ###If not, please input:
patch_size = 512#@param {type:"number"} # in pixels
batch_size = 1#@param {type:"number"}
initial_learning_rate = 0.0002 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
batch_size = 1
patch_size = 512
initial_learning_rate = 0.0002
#here we check that no model with the same name already exist, if so delete
if os.path.exists(model_path+'/'+model_name):
print(bcolors.WARNING +"!! WARNING: "+model_name+" already exists and will be deleted in the following cell !!")
print(bcolors.WARNING +"To continue training "+model_name+", choose a new model_name here, and load "+model_name+" in section 3.3")
#To use Cyclegan we need to organise the data in a way the model can understand
Saving_path= "/content/"+model_name
#Saving_path= model_path+"/"+model_name
if os.path.exists(Saving_path):
shutil.rmtree(Saving_path)
os.makedirs(Saving_path)
TrainA_Folder = Saving_path+"/trainA"
if os.path.exists(TrainA_Folder):
shutil.rmtree(TrainA_Folder)
os.makedirs(TrainA_Folder)
TrainB_Folder = Saving_path+"/trainB"
if os.path.exists(TrainB_Folder):
shutil.rmtree(TrainB_Folder)
os.makedirs(TrainB_Folder)
# Here we disable pre-trained model by default (in case the cell is not ran)
Use_pretrained_model = False
# Here we disable data augmentation by default (in case the cell is not ran)
Use_Data_augmentation = True
# This will display a randomly chosen dataset input and output
random_choice = random.choice(os.listdir(Training_source))
x = imageio.imread(Training_source+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
#Hyperparameters failsafes
if patch_size > min(Image_Y, Image_X):
patch_size = min(Image_Y, Image_X)
print (bcolors.WARNING + " Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:",patch_size)
# Here we check that patch_size is divisible by 4
if not patch_size % 4 == 0:
patch_size = ((int(patch_size / 4)-1) * 4)
print (bcolors.WARNING + " Your chosen patch_size is not divisible by 4; therefore the patch_size chosen is now:",patch_size)
random_choice_2 = random.choice(os.listdir(Training_target))
y = imageio.imread(Training_target+"/"+random_choice_2)
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Training source')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Training target')
plt.axis('off');
plt.savefig('/content/TrainingDataExample_cycleGAN.png',bbox_inches='tight',pad_inches=0)
# + [markdown] id="xyQZKby8yFME"
# ## **3.2. Data augmentation**
# ---
# <font size = 4>
# + [markdown] id="w_jCy7xOx2g3"
# <font size = 4>Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.
#
# <font size = 4>Data augmentation is performed here by flipping the patches.
#
# <font size = 4> By default data augmentation is enabled.
# + cellView="form" id="DMqWq5-AxnFU"
#Data augmentation
#@markdown ##Play this cell to enable or disable data augmentation:
Use_Data_augmentation = True #@param {type:"boolean"}
if Use_Data_augmentation:
print("Data augmentation enabled")
if not Use_Data_augmentation:
print("Data augmentation disabled")
# + [markdown] id="3L9zSGtORKYI"
#
# ## **3.3. Using weights from a pre-trained model as initial weights**
# ---
# <font size = 4> Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CycleGAN model**.
#
# <font size = 4> This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**.
#
# <font size = 4> In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used.
# + cellView="form" id="9vC2n-HeLdiJ"
# @markdown ##Loading weights from a pre-trained network
Use_pretrained_model = False #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
pretrained_model_path = "" #@param {type:"string"}
# --------------------- Check if we load a previously trained model ------------------------
if Use_pretrained_model:
h5_file_path_A = os.path.join(pretrained_model_path, "latest_net_G_A.pth")
h5_file_path_B = os.path.join(pretrained_model_path, "latest_net_G_B.pth")
# --------------------- Check the model exist ------------------------
if not os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print(bcolors.WARNING+'WARNING: Pretrained model does not exist')
Use_pretrained_model = False
print(bcolors.WARNING+'No pretrained network will be used.')
if os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print("Pretrained model "+os.path.basename(pretrained_model_path)+" was found and will be loaded prior to training.")
else:
print(bcolors.WARNING+'No pretrained network will be used.')
# + [markdown] id="MCGklf1vZf2M"
# # **4. Train the network**
# ---
# + [markdown] id="1KYOuygETJkT"
# ## **4.1. Prepare the training data for training**
# ---
# <font size = 4>Here, we use the information from 3. to prepare the training data into a suitable format for training.
# + cellView="form" id="lIUAOJ_LMv5E"
#@markdown ##Prepare the data for training
print("Data preparation in progress")
if os.path.exists(model_path+'/'+model_name):
shutil.rmtree(model_path+'/'+model_name)
os.makedirs(model_path+'/'+model_name)
#--------------- Here we move the files to trainA and train B ---------
for f in os.listdir(Training_source):
shutil.copyfile(Training_source+"/"+f, TrainA_Folder+"/"+f)
for files in os.listdir(Training_target):
shutil.copyfile(Training_target+"/"+files, TrainB_Folder+"/"+files)
#---------------------------------------------------------------------
# CycleGAN use number of EPOCH withouth lr decay and number of EPOCH with lr decay
number_of_epochs_lr_stable = int(number_of_epochs/2)
number_of_epochs_lr_decay = int(number_of_epochs/2)
if Use_pretrained_model :
for f in os.listdir(pretrained_model_path):
if (f.startswith("latest_net_")):
shutil.copyfile(pretrained_model_path+"/"+f, model_path+'/'+model_name+"/"+f)
pdf_export(augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
print("Data ready for training")
# + [markdown] id="0Dfn8ZsEMv5d"
# ## **4.2. Start Training**
# ---
# <font size = 4>When playing the cell below you should see updates after each epoch (round). Network training can take some time.
#
# <font size = 4>* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches or continue the training in a second Colab session.
#
# <font size = 4>Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder from Google Drive as all data can be erased at the next training if using the same folder.
# + cellView="form" id="iwNmp1PUzRDQ"
#@markdown ##Start training
start = time.time()
os.chdir("/content")
#--------------------------------- Command line inputs to change CycleGAN paramaters------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# visdom and HTML visualization parameters
#('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
#('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
#('--display_id', type=int, default=1, help='window id of the web display')
#('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
#('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
#('--display_port', type=int, default=8097, help='visdom port of the web display')
#('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
#('--print_freq', type=int, default=100, help='frequency of showing training results on console')
#('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# network saving and loading parameters
#('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
#('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
#('--save_by_iter', action='store_true', help='whether saves model by iteration')
#('--continue_train', action='store_true', help='continue training: load the latest model')
#('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
#('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
#('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
#('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
#('--beta1', type=float, default=0.5, help='momentum term of adam')
#('--lr', type=float, default=0.0002, help='initial learning rate for adam')
#('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
#('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
#('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
#('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations'
#---------------------------------------------------------
#----- Start the training ------------------------------------
if not Use_pretrained_model:
if Use_Data_augmentation:
# !python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5
if not Use_Data_augmentation:
# !python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --no_flip
if Use_pretrained_model:
if Use_Data_augmentation:
# !python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train
if not Use_Data_augmentation:
# !python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train --no_flip
#---------------------------------------------------------
print("Training, done.")
# Displaying the time elapsed for training
dt = time.time() - start
mins, sec = divmod(dt, 60)
hour, mins = divmod(mins, 60)
print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)")
# Save training summary as pdf
pdf_export(trained = True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
# + [markdown] id="_0Hynw3-xHp1"
# # **5. Evaluate your model**
# ---
#
# <font size = 4>This section allows the user to perform important quality checks on the validity and generalisability of the trained model.
#
# <font size = 4>**We highly recommend to perform quality control on all newly trained models.**
#
# <font size = 4>Unfortunately loss functions curve are not very informative for GAN network. Therefore we perform the QC here using a test dataset.
#
#
#
#
#
#
#
# + [markdown] id="1Wext8woxt_F"
# ## **5.1. Choose the model you want to assess**
# + cellView="form" id="eAJzMwPA6tlH"
# model name and path
#@markdown ###Do you want to assess the model you just trained ?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
QC_model_folder = "" #@param {type:"string"}
#Here we define the loaded model name and path
QC_model_name = os.path.basename(QC_model_folder)
QC_model_path = os.path.dirname(QC_model_folder)
if (Use_the_current_trained_model):
QC_model_name = model_name
QC_model_path = model_path
full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'
if os.path.exists(full_QC_model_path):
print("The "+QC_model_name+" network will be evaluated")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# + [markdown] id="1CFbjvTpx5C3"
# ## **5.2. Identify the best checkpoint to use to make predictions**
# + [markdown] id="q8tCfAadx96X"
# <font size = 4> CycleGAN save model checkpoints every five epochs. Due to the stochastic nature of GAN networks, the last checkpoint is not always the best one to use. As a consequence, it can be challenging to choose the most suitable checkpoint to use to make predictions.
#
# <font size = 4>This section allows you to perform predictions using all the saved checkpoints and to estimate the quality of these predictions by comparing them to the provided ground truths images. Metric used include:
#
# <font size = 4>**1. The SSIM (structural similarity) map**
#
# <font size = 4>The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info).
#
# <font size=4>**mSSIM** is the SSIM value calculated across the entire window of both images.
#
# <font size=4>**The output below shows the SSIM maps with the mSSIM**
#
# <font size = 4>**2. The RSE (Root Squared Error) map**
#
# <font size = 4>This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).
#
#
# <font size =4>**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.
#
# <font size = 4>**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.
#
# <font size=4>**The output below shows the RSE maps with the NRMSE and PSNR values.**
#
#
# + cellView="form" id="q2T4t8NNyDZ6"
#@markdown ##Choose the folders that contain your Quality Control dataset
Source_QC_folder = "" #@param{type:"string"}
Target_QC_folder = "" #@param{type:"string"}
Image_type = "Grayscale" #@param ["Grayscale", "RGB"]
# average function
def Average(lst):
return sum(lst) / len(lst)
# Create a quality control folder
if os.path.exists(QC_model_path+"/"+QC_model_name+"/Quality Control"):
shutil.rmtree(QC_model_path+"/"+QC_model_name+"/Quality Control")
os.makedirs(QC_model_path+"/"+QC_model_name+"/Quality Control")
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_QC= "/content/"+QC_model_name
if os.path.exists(Saving_path_QC):
shutil.rmtree(Saving_path_QC)
os.makedirs(Saving_path_QC)
Saving_path_QC_folder = Saving_path_QC+"_images"
if os.path.exists(Saving_path_QC_folder):
shutil.rmtree(Saving_path_QC_folder)
os.makedirs(Saving_path_QC_folder)
#Here we copy and rename the all the checkpoint to be analysed
for f in os.listdir(full_QC_model_path):
shortname = f[:-6]
shortname = shortname + ".pth"
if f.endswith("net_G_A.pth"):
shutil.copyfile(full_QC_model_path+f, Saving_path_QC+"/"+shortname)
for files in os.listdir(Source_QC_folder):
shutil.copyfile(Source_QC_folder+"/"+files, Saving_path_QC_folder+"/"+files)
# This will find the image dimension of a randomly chosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = int(min(Image_Y, Image_X))
Nb_Checkpoint = len(os.listdir(Saving_path_QC))
print(Nb_Checkpoint)
## Initiate list
Checkpoint_list = []
Average_ssim_score_list = []
for j in range(1, len(os.listdir(Saving_path_QC))+1):
checkpoints = j*5
if checkpoints == Nb_Checkpoint*5:
checkpoints = "latest"
print("The checkpoint currently analysed is ="+str(checkpoints))
Checkpoint_list.append(checkpoints)
# Create a quality control/Prediction Folder
QC_prediction_results = QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)
if os.path.exists(QC_prediction_results):
shutil.rmtree(QC_prediction_results)
os.makedirs(QC_prediction_results)
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
# !python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_QC_folder" --name "$QC_model_name" --model test --epoch $checkpoints --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$QC_prediction_results" --checkpoints_dir "/content/"
#-----------------------------------------------------------------------------------
#Here we need to move the data again and remove all the unnecessary folders
Checkpoint_name = "test_"+str(checkpoints)
QC_results_images = QC_prediction_results+"/"+QC_model_name+"/"+Checkpoint_name+"/images"
QC_results_images_files = os.listdir(QC_results_images)
for f in QC_results_images_files:
shutil.copyfile(QC_results_images+"/"+f, QC_prediction_results+"/"+f)
os.chdir("/content")
#Here we clean up the extra files
shutil.rmtree(QC_prediction_results+"/"+QC_model_name)
#-------------------------------- QC for RGB ------------------------------------
if Image_type == "RGB":
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, multichannel=True)
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM"])
# Initiate list
ssim_score_list = []
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
# -------------------------------- Source test data --------------------------------
test_source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
# -------------------------------- Prediction --------------------------------
test_prediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
#--------------------------- Here we normalise using histograms matching--------------------------------
test_prediction_matched = match_histograms(test_prediction, test_GT, multichannel=True)
test_source_matched = match_histograms(test_source, test_GT, multichannel=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT, test_prediction_matched)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT, test_source_matched)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
#------------------------------------------- QC for Grayscale ----------------------------------------------
if Image_type == "Grayscale":
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"])
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
ssim_score_list = []
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT_raw = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
test_GT = test_GT_raw[:,:,2]
# -------------------------------- Source test data --------------------------------
test_source_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
test_source = test_source_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and Source image
test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)
# -------------------------------- Prediction --------------------------------
test_prediction_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
test_prediction = test_prediction_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and prediction
test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
# Calculate the Root Squared Error (RSE) maps
img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))
img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))
# Save SE maps
img_RSE_GTvsPrediction_8bit = (img_RSE_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsPrediction_"+shortname_no_PNG+'.tif',img_RSE_GTvsPrediction_8bit)
img_RSE_GTvsSource_8bit = (img_RSE_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsSource_"+shortname_no_PNG+'.tif',img_RSE_GTvsSource_8bit)
# -------------------------------- Calculate the RSE metrics and save them --------------------------------
# Normalised Root Mean Squared Error (here it's valid to take the mean of the image)
NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))
NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))
# We can also measure the peak signal to noise ratio between the images
PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)
PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
# All data is now processed saved
# -------------------------------- Display --------------------------------
# Display the IoV vs Threshold plot
plt.figure(figsize=(20,5))
plt.plot(Checkpoint_list, Average_ssim_score_list, label="SSIM")
plt.title('Checkpoints vs. SSIM')
plt.ylabel('SSIM')
plt.xlabel('Checkpoints')
plt.legend()
plt.savefig(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png',bbox_inches='tight',pad_inches=0)
plt.show()
# -------------------------------- Display RGB --------------------------------
from ipywidgets import interact
import ipywidgets as widgets
if Image_type == "RGB":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
#Setting up colours
cmap = None
plt.figure(figsize=(10,10))
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_GT, cmap = cmap)
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(Source_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_Source, cmap = cmap)
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, cmap = cmap)
plt.title('Prediction',fontsize=15)
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
#plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
#plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
plt.savefig(full_QC_model_path+'Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0)
# -------------------------------- Display Grayscale --------------------------------
if Image_type == "Grayscale":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
NRMSE_GTvsPrediction = df2.loc[file, "Prediction v. GT NRMSE"]
NRMSE_GTvsSource = df2.loc[file, "Input v. GT NRMSE"]
PSNR_GTvsSource = df2.loc[file, "Input v. GT PSNR"]
PSNR_GTvsPrediction = df2.loc[file, "Prediction v. GT PSNR"]
plt.figure(figsize=(15,15))
cmap = None
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=True, pilmode="RGB")
plt.imshow(img_GT, norm=simple_norm(img_GT, percent = 99), cmap = 'gray')
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_real.png"))
plt.imshow(img_Source, norm=simple_norm(img_Source, percent = 99))
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, norm=simple_norm(img_Prediction, percent = 99))
plt.title('Prediction',fontsize=15)
#Setting up colours
cmap = plt.cm.CMRmap
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsSource = img_SSIM_GTvsSource / 255
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsPrediction = img_SSIM_GTvsPrediction / 255
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
#Root Squared Error between GT and Source
plt.subplot(3,3,8)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsSource = img_RSE_GTvsSource / 255
imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)
plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)
#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))
plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)
#Root Squared Error between GT and Prediction
plt.subplot(3,3,9)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsPrediction = img_RSE_GTvsPrediction / 255
imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)
plt.savefig(full_QC_model_path+'/Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0)
#Make a pdf summary of the QC results
qc_pdf_export()
# + [markdown] id="-tJeeJjLnRkP"
# # **6. Using the trained model**
#
# ---
#
# <font size = 4>In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive.
# + [markdown] id="d8wuQGjoq6eN"
# ## **6.1. Generate prediction(s) from unseen dataset**
# ---
#
# <font size = 4>The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as PNG images.
#
# <font size = 4>**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.
#
# <font size = 4>**`Result_folder`:** This folder will contain the predicted output images.
#
# <font size = 4>**`checkpoint`:** Choose the checkpoint number you would like to use to perform predictions. To use the "latest" checkpoint, input "latest".
# + cellView="form" id="y2TD5p7MZrEb"
#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.
import glob
import os.path
latest = "latest"
Data_folder = "" #@param {type:"string"}
Result_folder = "" #@param {type:"string"}
# model name and path
#@markdown ###Do you want to use the current trained model?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
Prediction_model_folder = "" #@param {type:"string"}
#@markdown ###What model checkpoint would you like to use?
checkpoint = latest#@param {type:"raw"}
#Here we find the loaded model name and parent path
Prediction_model_name = os.path.basename(Prediction_model_folder)
Prediction_model_path = os.path.dirname(Prediction_model_folder)
#here we check if we use the newly trained network or not
if (Use_the_current_trained_model):
print("Using current trained network")
Prediction_model_name = model_name
Prediction_model_path = model_path
#here we check if the model exists
full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'
if os.path.exists(full_Prediction_model_path):
print("The "+Prediction_model_name+" network will be used.")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# Here we check that checkpoint exist, if not the closest one will be chosen
Nb_Checkpoint = len(glob.glob(os.path.join(full_Prediction_model_path, '*G_A.pth')))
print(Nb_Checkpoint)
if not checkpoint == "latest":
if checkpoint < 10:
checkpoint = 5
if not checkpoint % 5 == 0:
checkpoint = ((int(checkpoint / 5)-1) * 5)
print (bcolors.WARNING + " Your chosen checkpoints is not divisible by 5; therefore the checkpoints chosen is now:",checkpoints)
if checkpoint > Nb_Checkpoint*5:
checkpoint = "latest"
if checkpoint == Nb_Checkpoint*5:
checkpoint = "latest"
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_prediction= "/content/"+Prediction_model_name
if os.path.exists(Saving_path_prediction):
shutil.rmtree(Saving_path_prediction)
os.makedirs(Saving_path_prediction)
Saving_path_Data_folder = Saving_path_prediction+"/testA"
if os.path.exists(Saving_path_Data_folder):
shutil.rmtree(Saving_path_Data_folder)
os.makedirs(Saving_path_Data_folder)
for files in os.listdir(Data_folder):
shutil.copyfile(Data_folder+"/"+files, Saving_path_Data_folder+"/"+files)
Nb_files_Data_folder = len(os.listdir(Data_folder)) +10
#Here we copy and rename the checkpoint to be used
shutil.copyfile(full_Prediction_model_path+"/"+str(checkpoint)+"_net_G_A.pth", full_Prediction_model_path+"/"+str(checkpoint)+"_net_G.pth")
# This will find the image dimension of a randomly choosen image in Data_folder
random_choice = random.choice(os.listdir(Data_folder))
x = imageio.imread(Data_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
print(Image_min_dim)
#-------------------------------- Perform predictions -----------------------------
#-------------------------------- Options that can be used to perform predictions -----------------------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
#('--ntest', type=int, default=float("inf"), help='# of test examples.')
#('--results_dir', type=str, default='./results/', help='saves results here.')
#('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
#('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
#('--eval', action='store_true', help='use eval mode during test time.')
#('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
# To avoid cropping, the load_size should be the same as crop_size
#parser.set_defaults(load_size=parser.get_default('crop_size'))
#------------------------------------------------------------------------
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
# !python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_Data_folder" --name "$Prediction_model_name" --model test --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$Result_folder" --checkpoints_dir "$Prediction_model_path" --num_test $Nb_files_Data_folder --epoch $checkpoint
#-----------------------------------------------------------------------------------
# + [markdown] id="SXqS_EhByhQ7"
# ## **6.2. Inspect the predicted output**
# ---
#
#
# + cellView="form" id="64emoATwylxM"
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
import os
# This will display a randomly chosen dataset input and predicted output
random_choice = random.choice(os.listdir(Data_folder))
random_choice_no_extension = os.path.splitext(random_choice)
x = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_real.png")
y = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_fake.png")
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Input')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Prediction')
plt.axis('off');
# + [markdown] id="hvkd66PldsXB"
# ## **6.3. Download your predictions**
# ---
#
# <font size = 4>**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, trained model etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name.
# + [markdown] id="UvSlTaH14s3t"
#
# #**Thank you for using CycleGAN!**
| Colab_notebooks/CycleGAN_ZeroCostDL4Mic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import tensorflow as tf
tf.enable_eager_execution()
# +
import numpy as np
ndarray = np.ones([3, 3])
print("TensorFlow operations convert numpy arrays to Tensors automatically")
tensor = tf.multiply(ndarray, 42)
print(tensor)
print("And NumPy operations convert Tensors to numpy arrays automatically")
print(np.add(tensor, 1))
print("The .numpy() method explicitly converts a Tensor to a numpy array")
print(tensor.numpy())
# +
x = tf.random_uniform([3, 3])
print("Is there a GPU available: "),
print(tf.test.is_gpu_available())
print("Is the Tensor on GPU #0: "),
print(x.device.endswith('GPU:0'))
# +
import time
def time_matmul(x):
start = time.time()
for loop in range(10):
tf.matmul(x, x)
result = time.time()-start
print("10 loops: {:0.2f}ms".format(1000*result))
# Force execution on CPU
print("On CPU:")
with tf.device("CPU:0"):
x = tf.random_uniform([1000, 1000])
assert x.device.endswith("CPU:0")
time_matmul(x)
# Force execution on GPU #0 if available
if tf.test.is_gpu_available():
with tf.device("GPU:0"): # Or GPU:1 for the 2nd GPU, GPU:2 for the 3rd etc.
x = tf.random_uniform([1000, 1000])
assert x.device.endswith("GPU:0")
time_matmul(x)
# +
ds_tensors = tf.data.Dataset.from_tensor_slices([1, 2, 3, 4, 5, 6])
# Create a CSV file
import tempfile
_, filename = tempfile.mkstemp()
with open(filename, 'w') as f:
f.write("""Line 1
Line 2
Line 3
""")
ds_file = tf.data.TextLineDataset(filename)
# +
ds_tensors = ds_tensors.map(tf.square).shuffle(2).batch(2)
ds_file = ds_file.batch(2)
# +
print('Elements of ds_tensors:')
for x in ds_tensors:
print(x)
print('\nElements in ds_file:')
for x in ds_file:
print(x)
| Eager execution basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
pd.set_option('display.max_colwidth', -1)
dd = pd.read_excel("../data/external/General Indicators/WDI/Data_Extract_From_World_Development_Indicators_Metadata.xlsx")[["Code","Indicator Name"]]
dd = pd.concat([dd, pd.read_excel("../data/external/Urbanization/UNPD/Data_Extract_From_World_Development_Indicators_Metadata.xlsx")[["Code","Indicator Name"]]])
dd = pd.concat([dd,pd.read_excel("../data/external/Labor/WDI/Data_Extract_From_World_Development_Indicators_Metadata.xlsx")[["Code","Indicator Name"]]])
dd = pd.concat([dd,pd.read_excel("../data/external/Inequality/WDI/Data_Extract_From_Poverty_and_Equity_Metadata.xlsx",sheet_name="Series - Metadata")[["Code","Indicator Name"]]])
dd = pd.concat([dd,pd.read_excel("../data/external/Education/WDI/Data_Extract_From_World_Development_Indicators_Metadata.xlsx")[["Code","Indicator Name"]]])
dd = pd.concat([dd,pd.read_excel("../data/external/Economy/WDI/Data_Extract_From_World_Development_Indicators_Metadata.xlsx")[["Code","Indicator Name"]]])
dd = pd.concat([dd,pd.read_pickle("../data/interim/WDI_DD.pickle").rename({"Series Code":"Code","Series Name":"Indicator Name"},axis="columns")])
dd = pd.concat([dd,
pd.DataFrame([
["Country Code","The ISO 3-character country code for a nation"],
["Year","The year of the data"],
["Tourist Defecit","The difference in outbound-inbound tourists for a country"],
["Tourism Net","The difference in tourism recepts-expenditures"],
["Tourist Avg Net","The average net income per tourist"],
["Population Estimate","The UNPD estimated population for the country"],
["ST.INT.ARVL.PER.CAPITA","Inbound tourists per resident"],
["ST.INT.DPRT.PER.CAPITA","Outbound tourists per resident"],
["Maddison GDPPC","Real GDP per capita in 2011US$, 2011 benchmark (suitable for cross-country growth comparisons)"]
],columns=dd.columns)])
dd = pd.concat([dd,
pd.DataFrame({
"Code": ["q1","q2","q3","q4","q5","d1","d2","d3","d4","d5","d6","d7","d8","d9","d10"],
"Indicator Name": ["Quintile group shares of resource","Quintile group shares of resource","Quintile group shares of resource","Quintile group shares of resource","Quintile group shares of resource","Decile group shares of resource","Decile group shares of resource","Decile group shares of resource","Decile group shares of resource","Decile group shares of resource","Decile group shares of resource","Decile group shares of resource","Decile group shares of resource","Decile group shares of resource","Decile group shares of resource"]
})])
dd = pd.concat([dd, pd.DataFrame([
["id","Identifier" ],
["country","Country/area" ],
["c3","3-digit country code in ISO 3166-1 alpha-3 format" ],
["c2","2-digit country code in ISO 3166-1 alpha-2 format" ],
["year","Year. Note that when a survey continues for more than a year, the year when it is finished is considered" ],
["gini_reported","Gini coefficient as reported by the source (in most cases based on microdata, in some older observations estimates derive from grouped data)" ],
["bottom5","Bottom five percent group shares of resource" ],
["top5","Top five percent group shares of resource" ],
["resource","Resource concept" ],
["resource_detailed","Detailed resource concept" ],
["scale","Equivalence scale" ],
["scale_detailed","Detailed equivalence scale" ],
["sharing_unit","Income sharing unit/statistical unit" ],
["reference_unit","Unit of analysis, indicates whether the data has been weighted with a person or a household weight" ],
["areacovr","Area coverage. The land area which was included in the original sample surveys etc." ],
["areacovr_detailed","Detailed area coverage" ],
["popcovr","Population coverage. The population covered in the sample surveys in the land area (all, rural, urban etc.) which was included" ],
["popcovr_detailed","Detailed population coverage, including age coverage information in certain cases" ],
["region_un","Regional grouping based on United Nations geoscheme" ],
["region_un_sub","Sub-regional grouping based on United Nations geoscheme" ],
["region_wb","Regional grouping based on World Bank classification" ],
["eu","Current EU member state" ],
["oecd","Current OECD member state" ],
["incomegroup","World Bank classification by country income" ],
["mean","Survey mean given with the same underlying definitions as the Gini coefficient and the share data" ],
["median","Survey median given with the same underlying definitions as the Gini coefficient and the share data" ],
["currency","Currency for the mean and median values. If the reference is US$2011PPP it means that the currency is in 2011 US dollar per month, with purchasing power parity applied on it." ],
["reference_period","Time period for measuring mean and median values" ],
["exchangerate","Conversion rate from local currency units (LCU) to United States Dollars (USD)" ],
["mean_usd","Mean measure in United States Dollar (USD)" ],
["median_usd","Median measure in United States Dollar (USD)" ],
["gdp_ppp_pc_usd2011","Gross Domestic Product (GDP) is converted to United States Dollars (USD) using purchasing power parity rates and divided by total population. Data are in constant 2011 United States Dollar (USD)" ]
],columns=dd.columns)])
dd.set_index("Code", inplace=True)
dd.to_csv("../data/production/data_dictionary.csv")
dd.to_html("../docs/dd.html",justify='left')
| notebooks/DataDictionary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Patents
#
# This is patent data with UK inventors and applicants from PATSTAT
#
# **Warning**
#
# This data hasn't been explored yet so it is likely to contain bugs and issues. It is also unlikely to contain a lot of creative industries activity so we need to decide how to use it.
#
# Check the `references` folder for some data dictionaries and metadata
#
# This [repo](https://github.com/nestauk/patent_analysis) contains detailed information about data processing and relevant documentation including data dictionaries etc.
# ## Notebook preamble
# %run notebook_preamble.ipy
# ## Load data
# +
your_path = '/Users/jmateosgarcia/Desktop/patents/patents/data/processed/9_7_2019_patent_table.csv'
pats = pd.read_csv(your_path,compression='gzip')
# -
pats.head()
| notebooks/02_patents.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Question 5 from Assignment: RegressionProblems
# #### Done by - <NAME>, vc389
#
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
from math import sqrt
# +
# Global variables
m, w, b, sigma_square = 200, 1, 5, 0.1
# Creating data
def create_data(m, w, b, sigma_square):
x = [0]*m
x_minus = [0]*m
y = [0]*m
for i in range(m):
x[i] = np.random.uniform(low=100, high=102)
x_minus[i] = x[i] - 101
y[i] = x[i]*w + b + np.random.normal(loc=0, scale=sqrt(sigma_square))
return x, y, x_minus
# +
# Plotting x and y data points
def plot(x, y):
fig = plt.figure(figsize=(8,6))
plt.scatter(x, y)
plt.title("Dataset")
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
def plot_line(x, w, b):
temp = np.linspace(min(x), max(x), 100)
temp_y = [temp[i]*w + b for i in range(len(temp))]
fig = plt.figure(figsize=(8,6))
plt.scatter(x, y)
plt.plot(temp, temp_y, color='g')
plt.title("Predicted Line")
plt.xlabel("X")
plt.ylabel("Y")
plt.show()
# +
x, y, x_minus = create_data(m, w, b, sigma_square)
plot(x, y)
# -
def calculate_w_b(x, y, m):
e_xy = [x[i]*y[i] for i in range(m)]
w = (np.mean(e_xy) - np.mean(x)*np.mean(y))/np.var(x)
b = np.mean(y) - w*np.mean(x)
return w, b
def multiple_runs(m):
runs = 1000
expected_w = []
expected_b = []
expected_w_m = []
expected_b_m = []
for i in tqdm(range(runs)):
x, y, x_minus = create_data(m, w, b, sigma_square)
w_pred, b_pred = calculate_w_b(x, y, m)
w_pred_minus, b_pred_minus = calculate_w_b(x_minus, y, m)
expected_w.append(w_pred)
expected_b.append(b_pred)
expected_w_m.append(w_pred_minus)
expected_b_m.append(b_pred_minus)
print("Expected values: Actual: w = {} : b = {}".format(np.mean(expected_w), np.mean(expected_b)))
print("Expected values: Shifted: w = {} : b = {}".format(np.mean(expected_w_m), np.mean(expected_b_m)))
print("Variance values: Actual: w = {} : b = {}".format(np.var(expected_w), np.var(expected_b)))
print("Variance values: Shifted: w = {} : b = {}".format(np.var(expected_w_m), np.var(expected_b_m)))
multiple_runs(m)
# This practical example aligns with what has been shown mathematically in the assignment.
| Assignment6/hw6_RegressionProblems_q5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="qSuUpkj1UuUa" colab_type="text"
# ### © Copyright 2020 [<NAME>](https://github.com/gmihaila).
# + id="FkXPZsPbT2aV" colab_type="code" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="aP1zAHX4S70e" colab_type="text"
# # Info
#
# [](https://colab.research.google.com/drive/14KCDms4YLrE7Ekxl9VtrdT229UTDyim3#offline=true&sandboxMode=true)
# [](https://github.com/gmihaila/machine_learning_things/blob/master/tutorial_notebooks/pretrain_transformer.ipynb)
#
#
#
# This notebook is used to pretrain transformers models using [Huggingface](https://huggingface.co/transformers/). This notebooks is part of my trusty notebooks for Machine Learning. Check out more similar content on my website [gmihaila.github.io/useful/useful/](https://gmihaila.github.io/useful/useful/) where I post useful notebooks like this one.
#
# This notebook is **heavily inspired** from the Huggingface script used for training language models: [transformers/tree/master/examples/language-modeling](https://github.com/huggingface/transformers/tree/master/examples/language-modeling).
#
# 'Fine-tuning the library models for language modeling on a text file (GPT, GPT-2, CTRL, BERT, RoBERTa, XLNet).
# GPT, GPT-2 and CTRL are fine-tuned using a causal language modeling (CLM) loss. BERT and RoBERTa are fine-tuned
# using a masked language modeling (MLM) loss. XLNet is fine-tuned using a permutation language modeling (PLM) loss.'
#
# <br>
#
# ## How to use this notebook?
#
# This notebooks is a code adaptation of the [run_language_modeling.py](https://github.com/huggingface/transformers/blob/master/examples/language-modeling/run_language_modeling.py).
#
# **Models that are guarantee to work:** [GPT](https://huggingface.co/transformers/model_summary.html#original-gpt), [GPT-2](https://huggingface.co/transformers/model_summary.html#gpt-2), [BERT](https://huggingface.co/transformers/model_summary.html#bert), [DistilBERT](https://huggingface.co/transformers/model_summary.html#distilbert), [RoBERTa](https://huggingface.co/transformers/model_summary.html#roberta) and [XLNet](https://huggingface.co/transformers/model_summary.html#xlnet).
#
# Parse the arguments needed that are split in TrainingArguments, ModelArguments and DataTrainingArguments. The only variables that need configuration depending on your needs are `model_args`, `data_args` and `training_args` in **Parameters**:
#
# * `model_args` of type **ModelArguments**: These are the arguments for the model that you want to use such as the model_name_or_path, tokenizer_name etc. You'll need these to load the model and tokenizer.
#
# Minimum setup:
#
# ```python
# model_args = ModelArguments(model_name_or_path,
# model_type,
# tokenizer_name,
# )
# ```
#
# * `model_name_or_path` path to existing transformers model or name of transformer model to be used: *bert-base-cased*, *roberta-base*, *gpt2* etc. More details [here](https://huggingface.co/transformers/pretrained_models.html).
#
# * `model_type` type of model used: *bert*, *roberta*, *gpt2*. More details [here](https://huggingface.co/transformers/pretrained_models.html).
#
# * `tokenizer_name` [tokenizer](https://huggingface.co/transformers/main_classes/tokenizer.html#tokenizer) used to process data for training the model. It usually has same name as `model_name_or_path`: *bert-base-cased*, *roberta-base*, *gpt2* etc.
#
#
# * `data_args` of type **DataTrainingArguments**: These are as the name suggests arguments needed for the dataset. Such as the directory name where your files are stored etc. You'll need these to load/process the dataset.
#
# Minimum setup:
#
# ```python
# data_args = DataArgs(train_data_file,
# eval_data_file,
# mlm,
# )
# ```
#
# * `train_data_file` path to your dataset. This is a plain file that contains all your text data to train a model. Use each line to separate examples: i.e. if you have a dataset composed of multiple text documents, create a single file with each line in the file associated to a text document.
#
# * `eval_data_file` same story as `train_data_file`. This file is used to evaluate the model performance
#
# * `mlm` is a flag that changes loss function depending on model architecture. This variable needs to be set to **True** when working with masked language models like *bert* or *roberta*.
#
#
#
# * `training_args` of type **TrainingArguments**: These are the training hyper-parameters such as learning rate, batch size, weight decay, gradient accumulation steps etc. See all possible arguments [here](https://github.com/huggingface/transformers/blob/master/src/transformers/training_args.py). These are used by the Trainer.
#
# Minimum setup:
#
# * `model_args`
# ```python
# training_args = TrainingArguments(output_dir,
# do_train,
# do_eval,
# )
# ```
#
# * `output_dir` path where to save the pre-trained model.
# * `do_train` variable to signal if you're using train data or not. Set it to **True** if you mentioned `train_data_file`.
# * `do_eval` variable to signal if you're using evaluate data or not. Set it to **True** if you mentioned `eval_data_file`.
#
# <br>
#
# ## Example:
#
# ### Pre-train Bert
#
# In the **Parameters** section use arguments:
#
# ```python
# # process model arguments. Check Info - Notes for more details
# model_args = ModelArguments(model_name_or_path='bert-base-cased',
# model_type='bert',
# tokenizer_name='bert-base-cased',
# )
#
# # process data arguments. Check Info - Notes for more details
# data_args = DataArgs(train_data_file='/content/your_train_data',
# eval_data_file='/content/your_test_data,
# mlm=True,
# )
#
# # process training arguments. Check Info - Notes for more details
# training_args = TrainingArguments(output_dir='/content/pretrained_bert',
# do_train=True,
# do_eval=False)
# ```
#
#
# <br>
#
# ## Notes:
# * Parameters details got from [here](https://github.com/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb).
#
# * **Models that are guarantee to work:** [GPT](https://huggingface.co/transformers/model_summary.html#original-gpt), [GPT-2](https://huggingface.co/transformers/model_summary.html#gpt-2), [BERT](https://huggingface.co/transformers/model_summary.html#bert), [DistilBERT](https://huggingface.co/transformers/model_summary.html#distilbert), [RoBERTa](https://huggingface.co/transformers/model_summary.html#roberta) and [XLNet](https://huggingface.co/transformers/model_summary.html#xlnet). I plan on testing more models in the future.
# * I used the [The WikiText Long Term Dependency Language Modeling Dataset](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/) as an example. **To reduce training time I used the evaluate split as training and test split as evaluation!**.
#
# + id="pmIFMjnxXdI9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 357} outputId="8d7e23ce-59ad-4d58-df43-7993a3d3c894"
# check GPU alocation
# !nvidia-smi
# + [markdown] id="8ppW60cUXZQK" colab_type="text"
# # Download
# + id="6l_gehghXapy" colab_type="code" colab={}
# download any dataset or manualy upload it. I will use the wikitext raw:
# !wget -q -nc https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip
# !unzip -q -n /content/wikitext-2-raw-v1.zip
# + [markdown] id="UCLtm5BiXona" colab_type="text"
# # Installs
# + id="1JQhmThRXp7b" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="d85af845-8faf-437f-f8ac-15ca0b884015"
# install latest version of Tranformers from GitHub
# !pip install -q git+https://github.com/huggingface/transformers
# + [markdown] id="X5IO8-xrXvWY" colab_type="text"
# # Imports
# + id="J42h802BXwTe" colab_type="code" colab={}
import logging
import math
import os
from dataclasses import dataclass, field
from typing import Optional
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
HfArgumentParser,
LineByLineTextDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
# setup logger
logger = logging.getLogger(__name__)
# get names of models with language model heads
MODEL_CONFIG_CLASSES = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
# get all model types
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
# + [markdown] id="exr7h9hNYL8v" colab_type="text"
# # Helper Functions
# + id="Cccuf4frXz0h" colab_type="code" colab={}
class ModelArguments:
"""Class to define model arguments.
These are the arguments for the model that you want to use such as the
model_name_or_path, tokenizer_name etc.
You'll need these to load the model and tokenizer.
Arguments:
model_name_or_path: path to existing transformers model or name of
transformer model to be used: bert-base-cased, roberta-base, gpt2 etc.
More details: https://huggingface.co/transformers/pretrained_models.html
model_type: type of model used: bert, roberta, gpt2.
More details: https://huggingface.co/transformers/pretrained_models.html
tokenizer_name: tokenizer used to process data for training the model.
It usually has same name as model_name_or_path: bert-base-cased,
roberta-base, gpt2 etc.
cache_dir: path to cache files to save time when re-running code.
"""
def __init__(self, model_name_or_path=None, model_type=None, config_name=None,
tokenizer_name=None, cache_dir=None):
self.model_name_or_path = model_name_or_path
self.model_type = model_type
self.config_name = config_name
self.tokenizer_name = tokenizer_name
self.cache_dir = cache_dir
return
class DataArguments:
"""Class to define data arguments.
Arguments needed for the dataset such as the directory name where your
files are stored etc. You'll need these to load/process the dataset.
Arguments:
train_data_file: path to your dataset. This is a plain file that
contains all your text data to train a model. Use each line to separate
examples: i.e. if you have a dataset composed of multiple text documents,
create a single file with each line in the file associated
to each text document.
eval_data_file: same story as train_data_file. This file is used to evaluate
the model performance
line_by_line: if each line is associated to a specific example in
your dataset.
mlm: is a flag that changes loss function depending on model architecture.
This variable needs to be set to True when working with masked language
models like bert or roberta.
mlm_probability: used when training masked language models.
Needs to have mlm set to True.
It represents the probability of masking tokens when training model.
plm_probability: flag to define the ratio of length of a span of masked
tokens to surrounding context length for permutation language modeling.
Used for XLNet.
max_span_length: flag may also be used to limit the length of a span of
masked tokens used for permutation language modeling.
Used for XLNet.
block_size: it refers to block size of data. Set to -1 to use all data.
overwrite_cache: if there are any cached files, overwrite them.
"""
def __init__(self, train_data_file=None, eval_data_file=None, line_by_line=False,
mlm=False, mlm_probability=0.15, plm_probability=float(1/6), max_span_length=5,
block_size=-1, overwrite_cache=False):
self.train_data_file = train_data_file
self.eval_data_file = eval_data_file
self.line_by_line = line_by_line
self.mlm = mlm
self.mlm_probability = mlm_probability
self.plm_probability = plm_probability
self.max_span_length = max_span_length
self.block_size = block_size
self.overwrite_cache = overwrite_cache
return
def get_dataset(args: DataArguments, tokenizer: PreTrainedTokenizer, evaluate=False):
"""Process dataset file
"""
file_path = args.eval_data_file if evaluate else args.train_data_file
if args.line_by_line:
return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size)
else:
return TextDataset(
tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache
)
# + [markdown] id="LVFOs6cSYvKT" colab_type="text"
# # Parameters
# + id="RoIC40FJYwJm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 88} outputId="a8587533-ee50-4a2a-81ff-20bdb42e7734"
# process model arguments. Check Info - Notes for more details
model_args = ModelArguments(model_name_or_path='bert-base-cased',
model_type='bert',
tokenizer_name='bert-base-cased',
)
# process data arguments. Check Info - Notes for more details
data_args = DataArguments(train_data_file='/content/wikitext-2-raw/wiki.valid.raw',
eval_data_file='/content/wikitext-2-raw/wiki.test.raw',
line_by_line=False,
mlm=True,
)
# process training arguments. Check Info - Notes for more details
training_args = TrainingArguments(output_dir='pretrain_bert',
do_train=True,
do_eval=True,
overwrite_output_dir=True)
# check arguments
if data_args.eval_data_file is None and training_args.do_eval:
# make sure do_eval is set to False if no evaluate data
raise ValueError("Cannot do evaluation without an evaluation data file. \
Either supply a file to --eval_data_file \
or remove the --do_eval argument."
)
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir):
# make sure we set overwrite correct if fileas already exist in path
raise ValueError(f"Output directory ({training_args.output_dir}) already \
exists and is not empty. Use --overwrite_output_dir to overcome."
)
# setup logger
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
training_args.local_rank,
training_args.device,
training_args.n_gpu,
bool(training_args.local_rank != -1),
training_args.fp16,
)
logger.info("Training/evaluation parameters %s", training_args)
# setup seed
set_seed(training_args.seed)
# + [markdown] id="CQ8F7gwbbNio" colab_type="text"
# # Load Model and Tokenizer
# + id="H-cZPvObbPPr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["5b22f6305d4d4be4b5ff4cd55a37a793", "13d461b4dc884a7c95a2c971a1fe1ff7", "14e396ff5ecf41c7b2ad1c0a88f8f44d", "93568a1211bb4ec6831b2a7a1706d45c", "8ccce63db8f740c4acf30a5984524887", "<KEY>", "3e3dade805e14bcd950bc8dbe61919c9", "67e3b557ed944846ba9f4ccb814a7cea", "d19a2a7d0b0b406bb8c6242581756faa", "cefebaecd4ab4b559f9eb277be8a87de", "<KEY>", "a42f68f5d3964ccbb24a58c436c5ab66", "75b2d7819d1a49409cc028c2ec171185", "95eca277baa44ef1ada751b2135e435c", "<KEY>", "6f9e6fe87db24bc6a4007f38b4ef8d36", "<KEY>", "f6a5137958aa4f6cad1d1ff259c22433", "2170da5ca9874a799bc1efce37a76ee2", "<KEY>", "7adb596bed44419ea65d7c3fa343ce3d", "479276e74d4a49bc9b81a0b4e1d0daad", "bfe3a93243f04b93ad4537e98a90d5d2", "d85b01ae45e74878b119db827f79ec92"]} outputId="09bbcedc-1bf4-47f3-c786-3888ab316a24"
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
# check model configuration
if model_args.config_name:
# use configure name if defined
config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
# use model name or path if defined
config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
# use config mapping
config = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
# check tokenizer configuraiton
if model_args.tokenizer_name:
# use tokenizer name if define
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, cache_dir=model_args.cache_dir)
elif model_args.model_name_or_path:
# use tokenizer name of path if defined
tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir)
else:
# tokenizer configuration incorrect
raise ValueError("You are instantiating a new tokenizer from scratch. \
This is not supported, but you can do it from another script, save it, \
and load it from here, using --tokenizer_name"
)
# check if using pre-trained model or train from scratch
if model_args.model_name_or_path:
# use pre-trained model
model = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
)
else:
# use model from configuration - train from scratch
logger.info("Training new model from scratch")
model = AutoModelWithLMHead.from_config(config)
# resize model to fit all tokens in tokenizer
model.resize_token_embeddings(len(tokenizer))
# make sure `--mlm` flag si set for masked language models
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError("BERT and RoBERTa-like models do not have LM heads but \
masked LM heads. They must be run using the --mlm flag \
(masked language modeling)."
)
# setp data block size
if data_args.block_size <= 0:
# set block size to maximum length of tokenizer
# input block size will be the max possible for the model
data_args.block_size = tokenizer.max_len
else:
# never go beyond tokenzier maximum length
data_args.block_size = min(data_args.block_size, tokenizer.max_len)
# + [markdown] id="znKwkO9oeRC6" colab_type="text"
# # Dataset
# + id="acmLQCeZeUpJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 173} outputId="cba4fe2e-3fae-48fe-ce2d-4695ca84597a"
# setup train dataset if `do_train` is set
train_dataset = get_dataset(data_args, tokenizer=tokenizer) if training_args.do_train else None
# setup evaluation dataset if `do_eval` is set
eval_dataset = get_dataset(data_args, tokenizer=tokenizer, evaluate=True) if training_args.do_eval else None
# special dataset handle depending on model type
if config.model_type == "xlnet":
# configure data for XLNET
data_collator = DataCollatorForPermutationLanguageModeling(
tokenizer=tokenizer,
plm_probability=data_args.plm_probability,
max_span_length=data_args.max_span_length,
)
else:
# configure data for rest of model types
data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer,
mlm=data_args.mlm,
mlm_probability=data_args.mlm_probability
)
# + [markdown] id="pSM9mLfKeT8m" colab_type="text"
# # Trainer
# + id="oYiAHslWe_0S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 556, "referenced_widgets": ["7c7be961fe3c4fbebd6842599c6e4cee", "b7534c3a6486497fb29fb1ff79187666", "96e5b4e416184710a7ac656e36258612", "<KEY>", "d57e431fa76646d49af035782eff6c6e", "<KEY>", "0f82fac089484ff3802dd5bca6c1f342", "c1a90eae6dba4800a25dd6e60ab6d04f", "<KEY>", "<KEY>", "fa31d116e3cf4e2e9eecb9785944dc7f", "<KEY>", "7347d675b7794bfd989e6643fe5da3d4", "721312449a344a46bdbe101a0c059e45", "74398199f8144a3dbd5611812b7a6f04", "<KEY>", "<KEY>", "5e87074a2bef465fa158c4b335af7124", "<KEY>", "313fa40c7e24405f9906be3eca09d424", "<KEY>", "881feace35c249358b16c2c219b7ea2b", "<KEY>", "<KEY>", "d3abee198a4847f1b2abc06f8fadeb87", "<KEY>", "f261f14c20514175856a617d1b688ff4", "9fb431b6108e4752a72f62054be54812", "<KEY>", "ed227ed0cf6d49ad93d5939db1ad5c84", "f57fdf2386c94eab838090229fc32a33", "<KEY>"]} outputId="a04e7ef7-2fe6-4a0d-a6c9-2dd95009420e"
# initialize Trainer
trainer = Trainer(model=model,
args=training_args,
data_collator=data_collator,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
prediction_loss_only=True,
)
# check model path to save
if training_args.do_train:
# setup model path is already defined
model_path = (model_args.model_name_or_path
if model_args.model_name_or_path is not None and
os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.train(model_path=model_path)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir)
# + [markdown] id="PJuUovebgBkA" colab_type="text"
# # Evaluate
# + id="V4_2kHS2gCg6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 256, "referenced_widgets": ["50da31b49a7040cbb682a005ed0f54e4", "9feff57e5f7041faa3dbc554a6fbbfd9", "55c1ebbe8d7841d18751b6ea5a8af243", "c925ba070f6244509ba64471c9b2843d", "13ad8cf62bd140de9e83047ae726e611", "dcc1331c5f4e459cbed4b5b181950cd9", "5098ed899de84a8ebbcb1a33ea0f08dc", "dc74ab056eb74a499c478d4bb4e96c65"]} outputId="d8b76a93-1a86-4ba3-c66e-ec3102cae3d4"
# save results
results = {}
# check if `do_eval` flag is set
if training_args.do_eval:
# evaluate mode on evaluate data
logger.info("*** Evaluate ***")
# capture output if trainer evaluate
eval_output = trainer.evaluate()
# compute perplexity from model loss
perplexity = math.exp(eval_output["eval_loss"])
# save perplexity of results
result = {"perplexity": perplexity}
# set path for output evaluation file
output_eval_file = os.path.join(training_args.output_dir, "eval_results_lm.txt")
# dump evaluaiton results to file
if trainer.is_world_master():
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
# update results
results.update(result)
print("results: ",results)
| notebooks/old_notebooks/pretrain_transformer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="8b4tiMKzkA7e"
# Author <NAME>
# + id="ARAmpLtYeZd1"
# !pip install falconn
# !pip install annoy
# !mkdir -p input
# + id="obpVklN5ohfV"
# !rm -rf functions
# !git clone https://github.com/Lennard-Alms/lab_bd.git functions
# + id="TzhLXOEKmkIl"
# # !wget https://storage.googleapis.com/laubenthal_spatiolab/feature_vectors_75.h5 -O input/feature_vectors_75.h5 --no-verbose
# !wget https://storage.googleapis.com/laubenthal_spatiolab/duplicates.h5 -O input/duplicates.h5 --no-verbose
# !wget https://storage.googleapis.com/laubenthal_spatiolab/final_test.h5 -O input/final_test.h5 --no-verbose
# + id="oOH8u6G-o1F5"
import tensorflow as tf
import numpy as np
import glob
import cv2
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.metrics import jaccard_score
import matplotlib.pyplot as plt
import math
import h5py
import keras
from keras.layers import Input
from keras import backend as K
from keras import layers
import gc
from scipy.spatial import distance_matrix
import seaborn as sns
from operator import itemgetter
from google.colab.patches import cv2_imshow
from functions.preprocessing.BatchToFile import BatchProcessToFile
from functions.preprocessing.FeatureExtractor import VGGFeatureExtractorMax
from functions.preprocessing.FeatureExtracorMaxNoPatches import VGGFeatureExtractorMaxNoPatches
# + id="Ur2GpdKKnlmo"
from falconn import LSHIndex, LSHConstructionParameters, get_default_parameters
import falconn
from annoy import AnnoyIndex
from functions.preprocessing.ImageMutation import PatchMutation
from functions.preprocessing.HelperFunctions import get_patches_from_image
from functions.postprocessing.ErrorEvaluation import evaluate_result
# + id="5HDtawr0Mgb1"
f = h5py.File('input/duplicates.h5', 'r')
for key in f.keys():
print(key)
f.close()
print("---")
f = h5py.File('input/final_test.h5', 'r')
for key in f.keys():
print(key)
f.close()
# + id="jfKp2hBpQKT1"
f = h5py.File('input/duplicates.h5', 'r')
g = h5py.File('input/final_test.h5', 'r')
vectors = f['a2d2_background_horses_50_cover'][:].astype(np.float32)
labels = f['a2d2_background_horses_50_cover_label'][:].astype(np.float32)
queries = f['query(200, 200)'][:].astype(np.float32)
query_labels = np.arange(0, queries.shape[0]) + 1
# + id="JL0LMKh7hOZU"
# + id="0HFjNwmVLY9Y"
# + id="h8fpRx8CS7x-"
def do_query(query_vector):
global query_labels
query_index = 0
query = lsh_index.construct_query_object()
# query.set_num_probes(70)
candidates = np.array(query.get_unique_candidates(query_vector))
return candidates
def evaluate_hash_candidates(candidates, filtered, query_vector, vectors, ground_truth = None):
if ground_truth is None:
ground_truth = filter_results(vectors, np.arange(0, vectors.shape[0]), query_vector, threshold).flatten()
database_size = vectors.shape[0]
query_size = candidates.shape[0]
filtered_size = filtered.shape[0]
false_positives = query_size - filtered_size
false_negatives = ground_truth.shape[0] - filtered_size
#recall = 100 / ground_truth.shape[0] * filtered_size / 100
query_ratio = 100 / database_size * query_size / 100
# relevant_ratio = 100 / query_size * filtered_size / 100
relevant_ratio = 0
return 0, query_ratio, relevant_ratio, false_positives, false_negatives
def calculate_cosine_sim(feature_vectors, feature_vectors_b = None):
if feature_vectors_b is None:
feature_vectors_b = feature_vectors.copy()
norms = np.linalg.norm(feature_vectors, axis=1)
norms_b = np.linalg.norm(feature_vectors_b, axis=1)
angle_matrix = (np.dot(feature_vectors, feature_vectors_b.T) / np.dot(norms[:,np.newaxis], norms_b[np.newaxis, :])).clip(-1,1)
angle_matrix = np.arccos(angle_matrix)
return angle_matrix
def filter_results(vectors, result_ids, query, threshold):
selection = vectors[result_ids]
cosine_sim = calculate_cosine_sim(selection, query[np.newaxis, :]).flatten()
filter = np.argwhere(cosine_sim < threshold)
return result_ids[filter]
# + id="IZP-nl9bQKwH"
params = get_default_parameters(
num_points = vectors.shape[0],
dimension = vectors.shape[1],
distance=falconn.DistanceFunction.NegativeInnerProduct)
params.lsh_family = falconn.LSHFamily.Hyperplane
params.k = 20
params.l = 50
print(params.k)
print(params.l)
lsh_index = LSHIndex(params)
lsh_index.setup(vectors)
# + id="WoA4nWjA_F4W"
# NEAR DUPLICATE TEST
query_index = 0
query_vector = queries[query_index]
candidates = do_query(query_vector)
for threshold in np.arange(0.6, 1, 0.1):
query_label = query_labels[query_index]
filtered = filter_results(vectors, candidates, query_vector, threshold).flatten()
ground_truth = filter_results(vectors, np.arange(0, vectors.shape[0]), query_vector, threshold).flatten()
recall, query_ratio, relevant_ratio, fp, fn = evaluate_hash_candidates(candidates, filtered, query_vector, vectors)
sc,ic = evaluate_result(filtered, labels, query_label)
precision, recall, accuracy = sc[0], sc[1], sc[2]
print("Results for threshold: ", threshold)
print("Recall: ", recall)
print("Precision: ", precision)
print("Accuracy: ", accuracy)
print("Queried % of database: ", query_ratio)
print("True Positive Ratio: ", relevant_ratio)
print("FP / FN: ", fp, fn)
print("")
# + id="8O28t_jX-BvD"
# NEAR DUPLICATE EVALUATION WITH ALL QUERIES
nd_precisions = []
nd_recalls = []
nd_accuracies = []
nd_precisions_gem = []
nd_recalls_gem = []
nd_accuracies_gem = []
query_index = 0
for threshold in np.arange(0, 15, 0.1):
mean_precision = 0
mean_recall = 0
mean_accuracy = 0
mean_query_ratio = 0
mean_relevant_ratio = 0
gemmean_precision = 0
gemmean_recall = 0
gemmean_accuracy = 0
gemmean_query_ratio = 0
gemmean_relevant_ratio = 0
indices = [0,1,2,3,4,5,6,7,8,9,10,11,12,13]
# indices = range(queries.shape[0])
for query_index in indices:
query_vector = queries[query_index]
query_label = query_labels[query_index]
candidates = do_query(query_vector)
filtered = filter_results(vectors, candidates, query_vector, threshold / 10).flatten()
ground_truth = filter_results(vectors, np.arange(0, vectors.shape[0]), query_vector, threshold / 10).flatten()
recall, query_ratio, relevant_ratio, fp, fn = evaluate_hash_candidates(candidates, filtered, query_vector, vectors)
sc,ic = evaluate_result(filtered, labels, query_label)
gsc,gic = evaluate_result(ground_truth, labels, query_label)
precision, recall, accuracy = sc[0], sc[1], sc[2]
mean_precision = mean_precision + precision
mean_recall = mean_recall + recall
mean_accuracy = mean_accuracy + accuracy
mean_query_ratio = mean_query_ratio + query_ratio
mean_relevant_ratio = mean_relevant_ratio + relevant_ratio
gemprecision, gemrecall, gemaccuracy = gsc[0], gsc[1], gsc[2]
gemmean_precision = gemmean_precision + gemprecision
gemmean_recall = gemmean_recall + gemrecall
gemmean_accuracy = gemmean_accuracy + gemaccuracy
nd_precisions.append(mean_precision / len(indices))
nd_recalls.append(mean_recall / len(indices))
nd_accuracies.append(mean_accuracy / len(indices))
nd_precisions_gem.append(gemmean_precision / len(indices))
nd_recalls_gem.append(gemmean_recall / len(indices))
nd_accuracies_gem.append(gemmean_accuracy / len(indices))
print("Results for threshold: ", threshold / 10)
print("Recall: ", mean_recall / len(indices))
print("Precision: ", mean_precision / len(indices))
print("Accuracy: ", mean_accuracy / len(indices))
print("gemRecall: ", gemmean_recall / len(indices))
print("gemPrecision: ", gemmean_precision / len(indices))
print("gemAccuracy: ", gemmean_accuracy / len(indices))
print("Queried % of database: ", mean_query_ratio / len(indices))
print("True Positive Ratio: ", mean_relevant_ratio / len(indices))
print("")
# + id="j1-2NQqwixKz"
x_axis = np.arange(0, 15, 0.1) / 10
fig, ax = plt.subplots(figsize=(7,5))
ax.plot(x_axis, nd_recalls, label="recall")
ax.plot(x_axis, nd_precisions, label="precision")
ax.plot(x_axis, nd_accuracies, label="accuracy")
ax.legend(loc="center left")
ax.set_xlabel('Cosine distance threshold')
plt.show()
fig, ax = plt.subplots(figsize=(7,5))
ax.plot(x_axis, nd_recalls_gem, label="recall")
ax.plot(x_axis, nd_precisions_gem, label="precision")
ax.plot(x_axis, nd_accuracies_gem, label="accuracy")
ax.legend(loc="center left")
ax.set_xlabel('Cosine distance threshold')
plt.show()
# + id="sVTCeo3KY9aE"
# NEAR DUPLICATE EVALUATION WITH ALL QUERIES AND QUERY SCALING
nd_precisions = []
nd_recalls = []
nd_accuracies = []
nd_precisions_gem = []
nd_recalls_gem = []
nd_accuracies_gem = []
query_index = 0
for threshold in np.arange(0, 15, 0.1):
mean_precision = 0
mean_recall = 0
mean_accuracy = 0
mean_query_ratio = 0
mean_relevant_ratio = 0
gemmean_precision = 0
gemmean_recall = 0
gemmean_accuracy = 0
gemmean_query_ratio = 0
gemmean_relevant_ratio = 0
indices = [0,1,2,3,4,5,6,7,8,9,10,11,12,13]
# indices = range(queries.shape[0])
for query_index in indices:
query_vector = queries[query_index]
# query_vectors = [0] * len(q)
# for _i in range(len(q)):
# query_vectors[_i] = q[_i][query_index]
query_label = query_labels[query_index]
candidates_list = []
# for query_vector in query_vectors:
# candidates = do_query(query_vector)
# candidates_list.append(candidates)
# candidates = np.concatenate(candidates_list)
# candidates = np.array(list(set(candidates)))
filtered = filter_results(vectors, candidates, query_vector, threshold / 10).flatten()
ground_truth = filter_results(vectors, np.arange(0, vectors.shape[0]), query_vector, threshold / 10).flatten()
recall, query_ratio, relevant_ratio, fp, fn = evaluate_hash_candidates(candidates, filtered, query_vector, vectors)
sc,ic = evaluate_result(filtered, labels, query_label)
gsc,gic = evaluate_result(ground_truth, labels, query_label)
precision, recall, accuracy = sc[0], sc[1], sc[2]
mean_precision = mean_precision + precision
mean_recall = mean_recall + recall
mean_accuracy = mean_accuracy + accuracy
mean_query_ratio = mean_query_ratio + query_ratio
mean_relevant_ratio = mean_relevant_ratio + relevant_ratio
gemprecision, gemrecall, gemaccuracy = gsc[0], gsc[1], gsc[2]
gemmean_precision = gemmean_precision + gemprecision
gemmean_recall = gemmean_recall + gemrecall
gemmean_accuracy = gemmean_accuracy + gemaccuracy
nd_precisions.append(mean_precision / len(indices))
nd_recalls.append(mean_recall / len(indices))
nd_accuracies.append(mean_accuracy / len(indices))
nd_precisions_gem.append(gemmean_precision / len(indices))
nd_recalls_gem.append(gemmean_recall / len(indices))
nd_accuracies_gem.append(gemmean_accuracy / len(indices))
print("Results for threshold: ", threshold / 10)
print("Recall: ", mean_recall / len(indices))
print("Precision: ", mean_precision / len(indices))
print("Accuracy: ", mean_accuracy / len(indices))
print("gemRecall: ", gemmean_recall / len(indices))
print("gemPrecision: ", gemmean_precision / len(indices))
print("gemAccuracy: ", gemmean_accuracy / len(indices))
print("Queried % of database: ", mean_query_ratio / len(indices))
print("True Positive Ratio: ", mean_relevant_ratio / len(indices))
print("")
# + id="pXewbiuAlAGs"
# + id="w8tX6vd_rKLT"
# SIMILAR IMAGE EVALUATION WITH ALL QUERIES
query_index = 0
nd_precisions = []
nd_recalls = []
nd_accuracies = []
nd_precisions_gem = []
nd_recalls_gem = []
nd_accuracies_gem = []
for threshold in np.arange(0, 15, 0.1):
mean_precision = 0
mean_recall = 0
mean_accuracy = 0
mean_query_ratio = 0
mean_relevant_ratio = 0
gemmean_precision = 0
gemmean_recall = 0
gemmean_accuracy = 0
gemmean_query_ratio = 0
gemmean_relevant_ratio = 0
indices = list(range(queries.shape[0]))
indices.remove(15)
indices.remove(2)
indices.remove(18)
for query_index in indices:
query_vector = queries[query_index]
query_label = query_labels[query_index]
candidates = do_query(query_vector)
filtered = filter_results(vectors, candidates, query_vector, threshold / 10).flatten()
ground_truth = filter_results(vectors, np.arange(0, vectors.shape[0]), query_vector, threshold / 10).flatten()
same_label_ids = np.where(labels == query_label)[0]
# Remove Same Duplicates
# filtered = np.array([x for x in filtered if (x not in same_label_ids)])
# ground_truth = np.array([x for x in ground_truth if x not in same_label_ids])
# _vectors = []
# for i, v in enumerate(vectors):
# if labels[i] != query_label:
# _vectors.append(v)
# _vectors = np.array(_vectors)
gem_recall, gem_qr, gem_rr, gem_fp, gem_fn = evaluate_hash_candidates(ground_truth, ground_truth, query_vector, vectors, ground_truth=ground_truth)
recall, query_ratio, relevant_ratio, fp, fn = evaluate_hash_candidates(candidates, filtered, query_vector, vectors, ground_truth=ground_truth)
sc,ic = evaluate_result(filtered, labels, query_label)
gsc,gic = evaluate_result(ground_truth, labels, query_label)
precision, recall, accuracy = ic[0], ic[1], ic[2]
mean_precision = mean_precision + precision
mean_recall = mean_recall + recall
mean_accuracy = mean_accuracy + accuracy
mean_query_ratio = mean_query_ratio + query_ratio
mean_relevant_ratio = mean_relevant_ratio + relevant_ratio
gemprecision, gemrecall, gemaccuracy = gic[0], gic[1], gic[2]
gemmean_precision = gemmean_precision + gemprecision
gemmean_recall = gemmean_recall + gemrecall
gemmean_accuracy = gemmean_accuracy + gemaccuracy
nd_precisions.append(mean_precision / len(indices))
nd_recalls.append(mean_recall / len(indices))
nd_accuracies.append(mean_accuracy / len(indices))
nd_precisions_gem.append(gemmean_precision / len(indices))
nd_recalls_gem.append(gemmean_recall / len(indices))
nd_accuracies_gem.append(gemmean_accuracy / len(indices))
print("Results for threshold: ", threshold / 10)
print("Recall: ", mean_recall / len(indices))
print("Precision: ", mean_precision / len(indices))
print("Accuracy: ", mean_accuracy / len(indices))
print("gemRecall: ", gemmean_recall / len(indices))
print("gemPrecision: ", gemmean_precision / len(indices))
print("gemAccuracy: ", gemmean_accuracy / len(indices))
print("Queried % of database: ", mean_query_ratio / len(indices))
print("True Positive Ratio: ", mean_relevant_ratio / len(indices))
print("")
# + id="NfGoJ3vkmbFC"
sc_ev, ic_ev = evaluate_result(filtered, labels, query_label)
sc_precision, sc_recall, sc_accuracy = sc_ev
ic_precision, ic_recall, ic_accuracy = ic_ev
print(sc_ev, ic_ev)
# + id="Mfw-RqrteIi8"
x_axis = np.arange(0, 15, 0.1) / 10
fig, ax = plt.subplots(figsize=(7,5))
ax.plot(x_axis, nd_recalls, label="recall")
ax.plot(x_axis, nd_precisions, label="precision")
ax.plot(x_axis, nd_accuracies, label="accuracy")
ax.legend(loc="center left")
ax.set_xlabel('Cosine distance threshold')
plt.show()
fig, ax = plt.subplots(figsize=(7,5))
ax.plot(x_axis, nd_recalls_gem, label="recall")
ax.plot(x_axis, nd_precisions_gem, label="precision")
ax.plot(x_axis, nd_accuracies_gem, label="accuracy")
ax.legend(loc="center left")
ax.set_xlabel('Cosine distance threshold')
plt.show()
| Falconn_Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Length scale bounds
#
# <NAME>, September 2019.
# To use different length scale bounds we need to pass the optional paramter acq_optimizer_kwargs = to the optimize.
#
# This parameter is a dict containing optional parameters for the optimizer. An example:
# `acq_optimizer_kwargs = {
# "length_scale_bounds":[(0.1,1),(0.5,1.5)],
# "length_scale":[1.0,3.0]
# }`
#
# For a 2-dimensional problem the length_scale_bounds must be a list of length 2 containing lower and upper bounds as tuples.
# length_scale is a list of numbers defining the length scale for each dimension.
#
#
# Lenght scale and length scale bound can only be used when base estimator is set to "GP".
# Length scale bounds are only used for the Matern kernel i.e when at least one of the dimensions is non-categorical.
# +
import numpy as np
np.random.seed(1)
# %matplotlib inline
import matplotlib.pyplot as plt
plt.set_cmap("viridis")
from ProcessOptimizer.learning import ExtraTreesRegressor
from ProcessOptimizer import Optimizer
# -
acq_optimizer_kwargs = {"length_scale_bounds" :[(0.01,0.01)]}
opt = Optimizer([(-2.0, 2.0)], "GP", n_initial_points = 5, acq_optimizer="sampling",acq_optimizer_kwargs=acq_optimizer_kwargs)
# +
noise_level = 0.1
# Our 1D toy problem, this is the function we are trying to
# minimize
def objective(x, noise_level=noise_level):
return np.sin(5 * x[0]) * (1 - np.tanh(x[0] ** 2)) + np.random.randn() * noise_level
# -
x = np.linspace(-2, 2, 400).reshape(-1, 1)
fx = np.array([objective(x_i, noise_level=0.0) for x_i in x])
# +
from ProcessOptimizer.acquisition import gaussian_ei
def plot_optimizer(opt, x, fx):
model = opt.models[-1]
x_model = opt.space.transform(x.tolist())
# Plot true function.
plt.plot(x, fx, "r--", label="True (unknown)")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([fx - 1.9600 * noise_level,
fx[::-1] + 1.9600 * noise_level]),
alpha=.2, fc="r", ec="None")
# Plot Model(x) + contours
y_pred, sigma = model.predict(x_model, return_std=True)
plt.plot(x, y_pred, "g--", label=r"$\mu(x)$")
plt.fill(np.concatenate([x, x[::-1]]),
np.concatenate([y_pred - 1.9600 * sigma,
(y_pred + 1.9600 * sigma)[::-1]]),
alpha=.2, fc="g", ec="None")
# Plot sampled points
plt.plot(opt.Xi, opt.yi,
"r.", markersize=8, label="Observations")
acq = gaussian_ei(x_model, model, y_opt=np.min(opt.yi))
# shift down to make a better plot
acq = 4*acq - 2
plt.plot(x, acq, "b", label="EI(x)")
plt.fill_between(x.ravel(), -2.0, acq.ravel(), alpha=0.3, color='blue')
# Adjust plot layout
plt.grid()
plt.legend(loc='best')
# -
for i in range(5):
next_x = opt.ask()
f_val = objective(next_x)
opt.tell(next_x, f_val)
plot_optimizer(opt, x, fx)
# Let's change the length scale bounds to see how it affects the surrogate function.
acq_optimizer_kwargs = {"length_scale_bounds" :[(0.1,1.0)]}
opt = Optimizer([(-2.0, 2.0)], "GP", n_initial_points = 5, acq_optimizer="sampling",acq_optimizer_kwargs=acq_optimizer_kwargs)
# +
for i in range(5):
next_x = opt.ask()
f_val = objective(next_x)
a = opt.tell(next_x, f_val)
plot_optimizer(opt, x, fx)
| examples/Length scale bounds.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from tqdm import tqdm
from package.utils import KPIPoint
from package.utils import KPISet
from package.utils import Transformer
# ### 验证数据
# #### 第一周数据
timestamp_strat = 1535731200
timestamp_end = 1536335700 #1535731200 + 5 * 60#
timestamp_interval = 5 * 60
file_path = '../2019AIOps_data/'
kSet = Transformer().transformKPIData2KPISet(file_path, timestamp_strat, timestamp_end, timestamp_interval)
# kSet.test()
for attr in kSet._attribute_list:
print(attr, len(kSet._attribute_list[attr]))
kSet.save('../result/metadata/KPISetTrain')
# #### 第二周数据
timestamp_strat = 1536336000
timestamp_end = 1536940500 #1535731200 + 5 * 60#
timestamp_interval = 5 * 60
file_path = '../2019AIOps_data_valid/'
kSet = Transformer().transformKPIData2KPISet(file_path, timestamp_strat, timestamp_end, timestamp_interval)
# kSet.test()
for attr in kSet._attribute_list:
print(attr, len(kSet._attribute_list[attr]))
kSet.save('../result/metadata/KPISetValid')
# ### 测试数据1
# #### 整理第一阶段数据
timestamp_strat = 1535731200
timestamp_end = 1536940500 #1535731200 + 5 * 60#
timestamp_interval = 5 * 60
file_path = '../2019AIOps_data/'
kSet = Transformer().transformKPIData2KPISet(file_path, timestamp_strat, timestamp_end, timestamp_interval)
# kSet.test()
for attr in kSet._attribute_list:
print(attr, len(kSet._attribute_list[attr]))
kSet.save('../result/metadata/KPISet')
# #### 整理第二阶段数据
timestamp_strat = 1536940800
timestamp_end = 1538150100 #1536940800 + 5 * 60#
timestamp_interval = 5 * 60
file_path = '../2019AIOps_data_test1/'
kSet_test = Transformer().transformKPIData2KPISet(file_path, timestamp_strat, timestamp_end, timestamp_interval)
# kSet_test.test()
for attr in kSet_test._attribute_list:
print(attr, len(kSet_test._attribute_list[attr]))
kSet_test.save('../result/metadata/KPISetTest')
| hotspot/code/dataTransform.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Dominic-Marasigan/CPEN-21A-CPE-1-1/blob/main/Midterm_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="DaPXh5Wovv5F" outputId="004d4f1b-2dda-4126-d705-bd5028d3ccc6"
# Problem Statement 1
Full_name = "<NAME>"
Student_num, Age = 202101628, 19
Birthdate = "July 2, 2002"
Address = "Blk 74 Lot 25 Sampaguita Village, Brgy. Inocencio, Trece Martires City, Cavite"
Course = "BS Computer Engineering"
Last_Sem_Gwa = 94
print("Name:"+ Full_name)
print("Student Number:" +str(Student_num))
print("Age:"+ str(Age))
print("Birthday:" + Birthdate)
print("Address:" + Address)
print("Course:" + Course)
print("Last Semeter GWA:" + str(Last_Sem_Gwa))
# + colab={"base_uri": "https://localhost:8080/"} id="ljptnzXIvwpj" outputId="f31e4c07-1ff0-4ed4-b162-b69d9849b82c"
#Problem Statement 2
n = 4
answ = "Y"
#a
print((2<n) and (n<6))
#b
print((2<n) or (n == 6))
#c
print((not 2<n) or (n == 6))
#d
print(not n <6)
#e
print((answ=="Y") or (answ=="y"))
#f
print((answ=="y") and (answ=="y"))
#g
print(not answ=="y")
#h
print((2<n) and (n== 5+ 1) or (answ=="No"))
#i
print((n==2) and (n==7) or (answ == "y"))
#j
print(((n==2) and (n == 7)) or (answ == "Y"))
# + id="CV6H35BBvzbJ" colab={"base_uri": "https://localhost:8080/"} outputId="5c286fa4-e0a7-41a0-ab49-0a7bc92bbf09"
# Problem Statement 3
x, y ,w, z = 2, -3, 7, -10
print("x/y is",x/y)
print("w/y/k is", w/y/x)
print("z/y%x is", z/y%x)
print("x%-y*w is", x%-y*w)
print("x%y is ",x%y)
print("z%w-y/x*5+5 is", z%w-y/x*5+5)
print("9-x%(2+y) is", 9-x%(2+y))
print(" z//w is ", z//w)
print("(2+y)**2 is", (2+y)**2)
print("w/x*2 is ", w/x*2)
| Midterm_Exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Backtesting Algorithms
# ## By: <NAME>
# ### Published March 29, 2020
#
# ### Concept
# When it comes to backtesting the performance of a rebalanced portfolio, there are several common questions that come into play.
#
# NOTE: This notebook is purely an example of what to consider when generating your own backtests.
#
# Common questions include:
# 1. What do I use for my dataset?
# 2. How frequent should the algorithm trade?
# 3. What constitutes a long/short signal?
# 4. How should my portfolio be allocated during a long/short signal vs. a neutral position?
# 1. If we remain neutral, should the algorithm still rebalance?
# 2. How do I include fees?
# 5. How do I compare a rebalanced portfolio to a non-rebalanced portfolio?
# ## 1. What do I use for my dataset?
# For our backtests, we will be using hourly and daily Ethereum candlestick data.
# +
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
eth_1h = pd.read_csv('../data/ETH.csv')
print(f"Start date: {eth_1h['date'].iat[0]}")
print(f"End date: {eth_1h['date'].iat[-1]}")
# -
eth_1h.head()
# ## 2. How frequent should the algorithm trade?
#
# For our example, we want our algorithm to trade once/week on the same hour. Instead of picking an arbitrary time to rebalance on, let's find:
# * The day with the lowest price volatility
# * On that day, the hour with the lowest price volatility
#
# From this, we can have our algorithm rebalance at a time where price is most constant.
def calc_pct_change(high, low, _open):
return (high - low) / _open
# ### Daily Volatility
# +
# Group ETH into 1d candles
# https://pypi.org/project/TAcharts
from TAcharts.utils import group_candles
eth_1d = group_candles(eth_1h, n=24)
eth_1d.head()
# +
eth_1d['pct_change'] = calc_pct_change(eth_1d['high'], eth_1d['low'], eth_1d['open'])
eth_1d['weekday'] = pd.DatetimeIndex(eth_1d['date']).day_name()
daily_volatility = eth_1d.groupby('weekday')['pct_change'].mean().sort_values(ascending=False)
print(daily_volatility)
# -
# #### Conclusion: Saturday has the lowest average daily volatility
# ### Hourly Volatility
# +
eth_1h['pct_change'] = calc_pct_change(eth_1h['high'], eth_1h['low'], eth_1h['open'])
eth_1h['hour'] = pd.DatetimeIndex(eth_1h['date']).hour
# Only take volatility from Saturday
eth_1h_saturday = eth_1h[pd.DatetimeIndex(eth_1h['date']).day_name() == 'Saturday']
hourly_volatility = eth_1h.groupby('hour')['pct_change'].mean().sort_values(ascending=False)
print(hourly_volatility)
# -
# #### Conclusion: on Saturday, hour 1 has the lowest average volatility.
# +
# Add boolean value to rebalance based on weekday and hour with least volatility
is_weekday = pd.DatetimeIndex(eth_1h['date']).day_name() == 'Saturday'
is_hour = pd.to_datetime(eth_1h['date']).apply(lambda x: x.hour == 1 and x.minute == 0)
eth_1h['rebalance'] = is_weekday & is_hour
# -
# ## 3. What constitutes a long/short signal?
# For our example, let's use the 50, 100, and 200 hour moving averages for our signals. Typically, if you have moving averages of different time intervals in stacked order, you'll find support or resistance at the longer moving averages. So, if the 50-hour moving average is above the 100-hour moving average, and the 100-hour moving average is above the 200-hour moving average, we have bullish support. Similarly, we can determine bearish resistance looking at the opposite relationship between moving averages.
# +
from TAcharts.indicators import sma
sma_200 = sma(eth_1h['close'], n=200)
sma_100 = sma(eth_1h['close'], n=100)
sma_50 = sma(eth_1h['close'], n=50)
bullish = (sma_50 > sma_100) & (sma_100 > sma_200)
bearish = (sma_50 < sma_100) & (sma_100 < sma_200)
eth_1h['signal'] = ['bull' if bullish[i] else 'bear' if bearish[i] else 'neutral' for i in range(len(eth_1h))]
# Dates of rebalance and signal
rebalance_dates_signals = eth_1h[['date', 'signal']].loc[eth_1h['rebalance']]
rebalance_dates_signals
# -
rebalance_dates_signals['signal'].value_counts()
# #### Conclusion: we have 20 neutral, 14 bear, and 13 bull signals during algorithm rebalances.
# ## 4. How should my portfolio be allocated during a long/short signal vs. a neutral position?
# Let's keep it simple and assume a neutral portfolio will be 50% ETH and 50% USD. For a long (bull) signal, let's reallocate our portfolio to 75% ETH and 25% USD to capture the upwards price movement. For a short (bear) signal, let's reallocate to 25% ETH and 75% USD to reduce downside.
#
# ### A. If we remain neutral, should the algorithm still rebalance?
# This question comes up because you may not always need to rebalance a portfolio. Sometimes, less trades are better. For our backtests, let's add 5% of wiggle room before rebalancing. This means a neutral portfolio would need to have at least a 5% difference between the market value of positions to trigger a rebalance.
#
# ### B. How do I include fees?
# We'll be assuming a fee rate of 0.75% per trade applied to the buy side. You will see how fees incorporated below.
# ## 5. How do I compare a rebalanced portfolio to a non-rebalanced portfolio?
# First, we create a portfolio object to keep track of important parameters.
class Portfolio:
trade_count = 0
initial_capital = 10000
wiggle_room = 0.05
fee_rate = 0.0075
allocations = {
'bull': [0.75, 0.25],
'neutral': [0.50, 0.50],
'bear': [0.25, 0.75]
}
def __init__(self, assets, start_prices):
capital_each = self.initial_capital / len(assets)
units = capital_each / start_prices
self.units = units
self.start_units = units.copy()
self.assets = assets
self.start_prices = start_prices
# Next, we create a dataset using only the columns we need, and use ETH open price as the price on ETH for trades.
# +
df = eth_1h[['date', 'rebalance', 'signal', 'open']].rename({'open': 'ETH'}, axis=1)
# Add USD column and set to 1 as USD price stays constant at 1
df['USD'] = 1
# Save df to CSV to use in other backtests
df.to_csv('../data/rebalance_signals.csv', index=False)
df.head()
# -
# After that we define the assets we're rebalancing and their starting price. We can use that information to create our portfolio.
# +
assets = ['ETH', 'USD']
start_prices = df[assets].iloc[0]
p = Portfolio(assets, start_prices)
# -
# It's also important to keep a running total for portfolio market values, both rebalance and non-rebalanced. We can use this dataframe later for algorithm comparison.
#
running_totals = pd.DataFrame(columns=[
'50/50 ETH-USD: Without rebalancing',
'50/50 ETH-USD: With rebalancing',
])
# Finally! Now we can run the rebalancing simulation.
for _, row in df.iterrows():
current_prices = row[assets]
if row['rebalance']:
# Calculate weighting based on current prices
dollar_values = p.units * current_prices
weights_current = dollar_values / sum(dollar_values)
weights_preferred = p.allocations[row['signal']]
# Calculate weight to trade from each side
trade_weights = (weights_preferred - weights_current) / 2
# Make sure trade size is larger than our wiggle_room criteria
is_trade_actionable = sum(abs(weight) > p.wiggle_room for weight in trade_weights) == len(assets)
if is_trade_actionable:
trade_dollar_values = trade_weights * sum(dollar_values)
trade_units = trade_dollar_values / current_prices
# Reduce the units for the buy side by the fee rate
trade_units_after_slippage = [(1 - p.fee_rate)*t if t > 0 else t for t in trade_units]
# Update porfolio units with added/subtracted units
p.units += trade_units_after_slippage
# Update trade count
p.trade_count += 1
# Update running totals
running_totals = running_totals.append({
'50/50 ETH-USD: Without rebalancing': sum(p.start_units * current_prices),
'50/50 ETH-USD: With rebalancing': sum(p.units * current_prices)
}, ignore_index=True)
# ### Portfolio comparison
# Since we kept a running total of the non-rebalanced and rebalanced portfolio, we can use that data to chart performance. We can also see the total number of trades.
print(round(running_totals.iloc[-1]))
print(f'Total trades: {p.trade_count}\n')
# +
import matplotlib.pyplot as plt
# Set dates as index for running totals
running_totals.index = pd.DatetimeIndex(df['date'])
fig, ax = plt.subplots(figsize=(15, 10))
ax.plot(running_totals)
ax.legend(running_totals.columns, loc='upper right', fontsize=14)
ax.set_ylabel('Portfolio Value ($)', fontsize=16)
ax.set_xlabel('Date', fontsize=16)
ax.set_title('Backtest Results', fontsize=20)
plt.show()
# +
col1, col2 = running_totals.columns[:2]
running_totals['Performance against benchmark'] = (running_totals[col2] - running_totals[col1]) / running_totals[col1]
fig, ax = plt.subplots(figsize=(15, 10))
ax.plot(running_totals.drop(running_totals.columns[:2], axis=1))
ax.set_xlabel('Date', fontsize=16)
ax.set_title('Performance against benchmark', fontsize=20)
plt.show()
# -
# ## Conclusion
# I hope this analysis provides some basic clarity on the logic involved with backtests. Some caveats to notice when running your own backtests:
# * This model does not account for overfitting or underfitting.
# * This model is based on the sole performance of 11 months of price discovery and 34 trades. Accurate backtests should have enough data for several hundred or thousand trades to improve accuracy and account for all types of price movement.
# * Price movement in datasets should include several bullish, bearish, and sideways trends.
# * The more data the better!
# * Trading in a live market is not the same as trading on static historical prices.
# * Slippage based on order size is not factored in.
# * Models are dynamic and the best model today may not be the best model tomorrow.
# ---
# Questions? Feedback? Send it my way :)
#
# <EMAIL>
| 1. Introduction to Backtesting Algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# pip install requests
# pip install beautifulsoup4
# -
import requests
url1 = 'https://assets.digitalocean.com/articles/eng_python/beautiful-soup/mockturtle.html'
url2 = 'https://dzone.com/articles/microservices-architecture-what-when-how'
page = requests.get(url2)
page.status_code
page.text
from bs4 import BeautifulSoup
# print(soup.prettify())
soup.find_all('p')
soup.find_all('p')[8].get_text()
soup.find_all(class_='mobile-invisible sign-in-join')[0].get_text()
soup.find_all(id='third')
soup.find_all('p', class_='chorus')
| requests.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Before you begin, execute this cell to import numpy and packages from the D-Wave Ocean suite, and all necessary functions for the gate-model framework you are going to use, whether that is the Forest SDK or Qiskit. In the case of Forest SDK, it also starts the qvm and quilc servers.
# %run -i "assignment_helper.py"
# %matplotlib inline
# # Classical probability distributions
#
# **Exercise 1** (1 point). Recall that in classical con flipping, get heads with probability $P(X=0) = p_0$ and tails with $P(X=1) = p_1$ for each toss of the coin, where $p_i\geq 0$ for all $i$, and the probabilities sum to one: $\sum_i p_i = 1$. Create a sample with a 1000 data points using numpy, with a probability of getting tails being 0.3. This is the parameter that the `binomial` function takes. Store the outcome in an array called `x_data`.
n_samples = 1000
### BEGIN SOLUTION
p_1 = 0.3
x_data = np.random.binomial(1, p_1, (n_samples,))
### END SOLUTION
assert isinstance(x_data, np.ndarray)
assert abs(p_1-x_data.sum()/n_samples) < 0.05
# **Exercise 2** (1 point). As you recall, we may also write the probability distribution as a stochastic vector $\vec{p} = \begin{bmatrix} p_0 \\ p_1 \end{bmatrix}$. The normalization constraint on the probability distribution says that the norm of the vector is restricted to one in the $l_1$ norm. In other words, $||\vec{p}||_1 = \sum_i |p_i| = 1$. This would be the unit circle in the $l_1$ norm, but since $p_i\geq 0$, we are restricted to a quarter of the unit circle, just as we plotted above. Write a function that checks whether a given two-dimensional vector is a stochastic vector. That is, it should return `True` if all elements are positive and the 1-norm is approximately one, and it should return `False` otherwise. The input of the function is a numpy array.
def is_stochastic_vector(p: np.array):
### BEGIN SOLUTION
if abs(np.linalg.norm(p, ord=1)-1) < 0.01 and np.all(p>=0):
return True
else:
return False
### END SOLUTION
assert not is_stochastic_vector(np.array([0.2, 0.3]))
assert not is_stochastic_vector(np.array([-0.2, 0.7]))
assert is_stochastic_vector(np.array([0.2, 0.8]))
# **Exercise 3** (1 point). The probability of heads is just the first element in the $\vec{p}$ and we can use a projection to extract it. For the first element of the stochastic vector, the projection is described by the matrix $\begin{bmatrix} 1 & 0\\0 & 0\end{bmatrix}$. Write a function that performs this projection on a two-element vector described by a numpy array. Your output after the projection is also a two-element vector.
def project_to_first_basis_vector(p: np.array):
### BEGIN SOLUTION
Π_0 = np.array([[1, 0], [0, 0]])
return Π_0.dot(p)
### END SOLUTION
assert np.alltrue(project_to_first_basis_vector(np.array([0.2, 0.3])) == np.array([0.2, 0.]))
assert np.alltrue(project_to_first_basis_vector(np.array([1., 0.])) == np.array([1., 0.]))
# **Exercise 4** (1 point). The projection operators introduce some linear algebra to working with probability distributions. We can also use linear algebra to transform one probability distribution to another. A left *stochastic matrix* will map stochastic vectors to stochastic vectors when multiplied from the left: its columns add up to one. Write a function that takes a matrix and a vector as input arguments (both are numpy arrays), checks whether the vector is a stochastic vector and whether the matrix is left stochastic. If they are, return the matrix applied to the vector, otherwise raise a `ValueError`. You can call the function `is_stochastic_vector` that you defined above.
def apply_stochastic_matrix(p: np.array, M: np.array):
"""Apply the matrix M to the vector p, but only if
p is a stochastic vector and M is a left stochastic
matrix. Otherwise raise a ValueError.
"""
### BEGIN SOLUTION
if is_stochastic_vector(p) and np.alltrue(M.sum(axis=0) == [1, 1]):
return M.dot(p)
else:
raise ValueError
### END SOLUTION
p = np.array([[.5], [.5]])
M = np.array([[0.7, 0.6], [0.3, 0.4]])
assert abs(np.linalg.norm(apply_stochastic_matrix(p, M), ord=1)-1) < 0.01
M = np.array([[0.7, 0.6], [0.3, 0.5]])
try:
apply_stochastic_matrix(p, M)
except ValueError:
pass
else:
raise AssertionError("did not raise")
# **Exercise 5** (1 point). Create a left stochastic matrix in a variable called `M` that transforms the uniform distribution $\vec{p}= \begin{bmatrix} 0.5 \\ 0.5 \end{bmatrix}$ to $\begin{bmatrix} 0.6 \\ 0.4 \end{bmatrix}$. `M` should be a two-dimensional numpy array.
### BEGIN SOLUTION
M = np.array([[0.5, 0.7], [0.5, 0.3]])
### END SOLUTION
assert np.allclose(M.dot(np.array([0.5, 0.5])), np.array([0.6, 0.4]))
# **Exercise 6** (1 point). Calculate the entropy of this distribution $\begin{bmatrix} 0.6 \\ 0.4 \end{bmatrix}$ in a variable called `S`.
### BEGIN SOLUTION
S = -(0.6*np.log2(0.6) + 0.4*np.log2(0.4))
### END SOLUTION
### BEGIN HIDDEN TESTS
assert abs(S-0.9709505944546686) < 0.01
### END HIDDEN TESTS
# # Quantum states
#
# **Exercise 7** (1 point). A quantum state is a probability distribution. A qubit state is a distribution over two values, similar to the coin flipping in the classical state. A major difference is that the entries are complex numbers and the normalization is in the $l_2$ norm. Create a function similar to `is_stochastic_vector` that checks whether a vector is a valid quantum state. The input is a numpy array and the output should be boolean.
def is_quantum_state(psi: np.array):
### BEGIN SOLUTION
if abs(np.linalg.norm(psi, ord=2)-1) < 0.01:
return True
else:
return False
### END SOLUTION
assert is_quantum_state(np.array([1/np.sqrt(2), 1/np.sqrt(2)]))
assert is_quantum_state(np.array([-1/np.sqrt(2), 1/np.sqrt(2)]))
assert is_quantum_state(np.array([-1/3, 2*np.sqrt(2)/3]))
assert is_quantum_state(np.array([-1j/3, 2*np.sqrt(2)/3]))
assert not is_quantum_state(np.array([0.2, 0.8]))
# **Exercise 8** (1 point). While working with numpy arrays is convenient, it is better to use a framework designed for quantum computing, since it often allows us to execute a circuit directly on a quantum computer. In your preferred framework, implement a circuit of a single qubit with no operation on it. You should create it in an object called `circuit`. Do not add a measurement. The evaluation will automatically branch according to which framework you chose.
### BEGIN SOLUTION
q = QuantumRegister(1)
c = ClassicalRegister(1)
circuit = QuantumCircuit(q, c)
circuit.iden(q[0])
# Forest version
# circuit = Program()
### END SOLUTION
amplitudes = get_amplitudes(circuit)
assert abs(amplitudes[0]-1.0) < 0.01
# **Exercise 9** (1 point). In the execution branching above, you see that we use the wavefunction simulator. This allows us to use the probability amplitudes as usual numpy arrays, as you can see above. If we ran the circuit on an actual quantum device, we would not be able to inspect the wavefunction, but we would have to rely on the statistics of measurements to understand what is happening in the circuit.
#
# Create a circuit in your preferred framework that creates an equal superposition in a qubit using a Hadamard gate. Again, the name of the object should be `circuit`. The evaluation will be based on measurement statistics. In this case, you should explicitly specify the measurement on the qubit
### BEGIN SOLUTION
circuit = QuantumCircuit(q, c)
circuit.h(q[0])
circuit.measure(q, c)
# Forest version
# circuit = Program()
# ro = circuit.declare('ro', 'BIT', 1)
# circuit += H(0)
# circuit += MEASURE(0, ro[0])
### END SOLUTION
counts = get_counts(circuit)
assert abs(counts['0']/100-.5) < 0.2
# **Exercise 10** (1 point). If you plotted the state before measurement on the Bloch sphere, it would have been on the equator halfway between the $|0\rangle$ and $|1\rangle$ states, and the tip of the X axis. If you apply the Hadamard on the $|1\rangle$, it would have been the point on the opposite and of the X axis, since the resulting superposition would have had a -1 amplitude for $|1\rangle$. The measurement statistics, however, would be identical. The negative sign plays a role in interference: for instance, applying a Hadamard again, would take you back to $|1\rangle$. Create the superposition after applying the Hadamard gate on $|1\rangle$. We will verify whether it picked up the phase. Do not include a measurement, since we will inspect the wavefunction.
### BEGIN SOLUTION
circuit = QuantumCircuit(q, c)
circuit.x(q[0])
circuit.h(q[0])
# Forest version
# circuit = Program()
# circuit += X(0)
# circuit += H(0)
### END SOLUTION
amplitudes = get_amplitudes(circuit)
assert abs(amplitudes[1]+np.sqrt(2)/2) < 0.01
# # More qubits and entanglement
#
# **Exercise 11** (1 point). To get a sense of multiqubit states, it is important to be confident with the tensor product operation. Create a function that returns the four basis vectors, $|00\rangle$, $|01\rangle$, $|10\rangle$, and $|11\rangle$, of the tensor product space $\mathbb{C}^2\otimes\mathbb{C}^2$. The order in which they appear does not matter. The return value should be a list of four numpy arrays.
def create_canonical_basis():
### BEGIN SOLUTION
qubit_basis = [np.array([1, 0]), np.array([0, 1])]
basis = []
for e1 in qubit_basis:
for e2 in qubit_basis:
basis.append(np.kron(e1, e2))
return basis
### END SOLUTION
basis = create_canonical_basis()
assert len(basis) == 4
if basis[0].shape != (4, ):
basis = [basis_vector.reshape((4, )) for basis_vector in basis]
### BEGIN HIDDEN TESTS
assert any([np.alltrue(np.array([1, 0, 0, 0]) == basis_vector) for basis_vector in basis])
assert any([np.alltrue(np.array([0, 1, 0, 0]) == basis_vector) for basis_vector in basis])
assert any([np.alltrue(np.array([0, 0, 1, 0]) == basis_vector) for basis_vector in basis])
assert any([np.alltrue(np.array([0, 0, 0, 1]) == basis_vector) for basis_vector in basis])
### END HIDDEN TESTS
# **Exercise 12** (1 point). A generic product state has the form $\begin{bmatrix}a_0b_0\\ a_0b_1\\ a_1b_0\\ a_1b_1\end{bmatrix}=a_0b_0|00\rangle + a_0b_1|01\rangle + a_1b_0|10\rangle + a_1b_1|11\rangle$ on $\mathbb{C}^2\otimes\mathbb{C}^2$, but not all. We can use the basis vectors to form vectors in the space that do not have a product structure. These are entangled states that show strong correlations. Entanglement is an important resource in quantum computing and being able to create a circuit that generates an entangled state is critical. Implement a circuit in your preferred framework to create the $|\phi^-\rangle = \frac{1}{\sqrt{2}}(|00\rangle-|11\rangle)$ state, that is, almost the same as the $|\phi^+\rangle$ state, but with the opposite sign of the probability amplitude of $|11\rangle$. Do not include a measurement, as we will verify the state with the wavefunction simulator
### BEGIN SOLUTION
q = QuantumRegister(2)
c = ClassicalRegister(2)
circuit = QuantumCircuit(q, c)
circuit.x(q[0])
circuit.h(q[0])
circuit.cx(q[0], q[1])
# Forest version
# circuit = Program()
# circuit += X(0)
# circuit += H(0)
# circuit += CNOT(0, 1)
### END SOLUTION
amplitudes = get_amplitudes(circuit)
assert np.allclose(np.array([np.sqrt(2)/2, 0, 0, -np.sqrt(2)/2]), amplitudes)
| coding_assignments/solutions/01_Classical_and_Quantum_Probability_Distributions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import siibra
from nilearn import plotting
from IPython.display import display, Markdown
# ### Define two points in MNI space
#
# We start by specifying some points in the reference space. Here, we use the MNI space. We can find such point, amongst other possibilities, by clicking them in the [siibra-explorer](https://siibra-explorer.apps.hbp.eu/).
# +
import siibra
from nilearn import plotting
pt_mni = siibra.Point(
(-25.65, -2.75, -33.75), space="mni152")
view1 = plotting.plot_anat(
cut_coords=tuple(pt_mni),
title="Point in MNI152 space")
view1.add_markers([tuple(pt_mni)])
pt_bb = pt_mni.warp('bigbrain')
atlas = siibra.atlases['human']
bigbrain = atlas.get_template("bigbrain")
view2 = plotting.plot_anat(
bigbrain.fetch(), cut_coords=tuple(pt_bb),
title="Point in BigBrain space")
view2.add_markers([tuple(pt_bb)])
# -
import siibra
from nilearn import plotting
point = siibra.Point((-25.65, -2.75, -33.75), space="mni152")
pmaps = siibra.atlases['human'].get_map(
space="mni152", parcellation="julich 2.9", maptype="continuous")
region, pmap, scores = pmaps.assign_coordinates(point, sigma_mm=5)[0]
view = plotting.plot_stat_map(
pmap, cut_coords=tuple(point),
title=f"{region.name} ({scores['correlation']:.2f})")
view.add_markers([tuple(point)],['cyan'])
# These are copy-pasted from the interactive atlas viewer:
points = siibra.PointSet(
[
"-25.650mm, -2.750mm, -33.750mm",
"-37.350mm, -81.050mm, -6.300mm"
],
space='mni152')
plotting.view_markers(list(map(tuple,points)), ['red', 'cyan'], marker_size=10)
# ### Assign brain regions to the 3D points
#
# We assign the points to brain regions from the Julich-Brain cytoarchitectonic atlas, using a certain location tolerance of 5mm standard deviation.
atlas = siibra.atlases['human']
pmaps = atlas.get_map(space="mni152",parcellation="julich",maptype="continuous")
regions1, regions2 = pmaps.assign_coordinates(points,sigma_mm=5)
# Let's look at the most probable area found for each point.
region1,statmap1,scores1 = regions1[0]
region2,statmap2,scores2 = regions2[0]
view1 = plotting.plot_stat_map(
statmap1, title=f"{region1.name} ({scores1['correlation']:.2f})", cut_coords=tuple(points[0]))
view2 = plotting.plot_stat_map(
statmap2, title=f"{region2.name} ({scores2['correlation']:.2f})", cut_coords=tuple(points[1]))
view1.add_markers([tuple(points[0])],['cyan'])
view2.add_markers([tuple(points[1])],['cyan'])
# ### Look for white matter bundles connecting the regions
#
# Next, we look at the most probable region associated to each point, considering them as a source and target region to investigate connectivity. In order to find white matter fibre bundles which are likely to connect them, we use the probability maps of the white matter fibre bundle parcellation, and assign both the source and target region from Julich-Brain to the fibre bundles. The intersection of the resulting bundles gives us those that are likely to provide connections from source to target.
# +
# get the probabilistic maps of long fibre bundles
bundlemaps = atlas.get_map("mni152","long bundles","continuous")
# assign the source and target region to the bundles,
# using their probability maps found above.
bundles1 = bundlemaps.assign(statmap1,msg=f"Find bundles touching {region1.name}")
bundles2 = bundlemaps.assign(statmap2,msg=f"Find bundles touching {region2.name}")
# intersect the two sets of related fiber bundles
intersection = {
(b[0],b[1]) for b in bundles1+bundles2
if any(b1[0]==b[0] for b1 in bundles1)
and any(b2[0]==b[0] for b2 in bundles2) }
# -
# plot each of the bundles in the intersection (but no more than 3)
for bundle,bundlemap in intersection:
view=plotting.plot_stat_map(bundlemap,
title=f"{region1.name} and {region2.name} connected by {bundle.name} ")
view.add_overlay(statmap1)
view.add_overlay(statmap2)
view.add_markers([tuple(points[0]),tuple(points[1])],['white'])
# ### Find connectivity in terms of streamlines from DTI
#
# We also investigate the connectivtiy of the two regions as measured by in-vivo imaging. To do so, we select the first region in the atlas, and search for connectivity profiles.
# select the source region
with siibra.QUIET:
p, = siibra.get_features(region1,"ConnectivityProfile")
print(p.name)
#display(Markdown(p.description))
# We use a parcellation object to decode the names of the connected regions.
with siibra.QUIET:
connections = p.decode(atlas.get_parcellation("julich"))
# Create a bar plot of the connectivity profile, and identify the target region from above.
# +
import matplotlib.pyplot as plt
# %matplotlib inline
i = [r for v,r in connections].index(region2)
N = max(20,i)
X = range(N)
Y = [v for v,_ in connections[:N]]
L = [r.name for v,r in connections[:N]]
fig = plt.figure(figsize=(12,4))
ax1 = fig.add_subplot()
ax1.set_xticks(X)
ax1.set_xticklabels(L,rotation=60,ha='right')
ax1.bar(X,Y,0.8)
# where is region2?
i = [r for v,r in connections].index(region2)
ax1.bar(i,connections[i][0],0.9,color='r')
fig.gca().set_title(f"Connection strengths from area {region1.name}")
plt.show()
# -
| 03-ProbabilisticAssignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:analysis3]
# language: python
# name: conda-env-analysis3-py
# ---
# # COSIMA Cookbook
# 1. Python package for analysis of ocean/ice model output (MOM5, ACCESS-OM2)
#
# 2. Jupyter notebooks for documenting and sharing analyses.
# - GitHub
# - https://github.com/OceansAus/cosima-cookbook
# - example PR
# - example of a file recovery:
# - git log notebooks/MOM-SIS-01\ Salinity\ Restoring\ Analysis.ipynb
# - git checkout 6868c3 -- notebooks/MOM-SIS-01\ Salinity\ Restoring\ Analysis.ipynb
#
# - Documentation (Sphinx)
# - http://cosima-cookbook.readthedocs.io
# - auto-generated on hook with GitHub
# - Python modules and packages
# - PyPi vs conda, setup.py and meta.yaml
# - `__init__.py`
# - plots and diagnostics separated
# - use of joblib.Memory
from joblib import Memory
memory = Memory(cachedir='/g/data1/v45/jm0634/demo')
def my_long_caculation
# Database of netCDF4 files
# - SQL: tables and records
# - dataset (python module)
# - tqdm.tqdm_notebook(iter)
# - reporting using pivot_table
#
# Kelvin waves
# - finding a depth contour
# - filtering out eddies
| ContributedExamples/COSIMA Cookbook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **UNIDAD 7**
# ### La Ciencia de Datos: Análisis, Minería y Visualización de Datos
# ## Caso Práctico Unidad 7
# # Introducción
# Gran parte de la actividad financiera se basa en la relación entre dos conceptos fundamentales: la rentabilidad y el riesgo. Ambos son conceptos muy susceptibles de ser tratados de manera científica y, para ambos, se genera constantemente gran cantidad de datos, así que no es sorprendente que la ciencia de datos tenga mucho que decir, desde hace mucho tiempo, en este campo.
#
# En este caso, se analiza un ejemplo del mercado bancario: la estimación del riesgo de impago de créditos.
#
# Para ello, se proporciona un conjunto de datos demográficos y económicos de mil clientes bancarios, y un indicador de si dejaron deudas impagadas. Se trata de datos sobre un banco alemán en 1994, y la fuente original es UCI Machine Learning Repository del School of Information and Computer Science, de la Universidad de California.
#
# Estos datos se proporcionan para su acceso local en `credit_g.csv`.
#
# Como parte del conjunto de datos, se proporciona una tabla de coste (la idea es que el error de considerar como bueno a un cliente que finalmente no paga su crédito es mucho más costoso que el de considerar malo a un cliente que sí lo pagaría: en el primer caso, el banco puede perder todo lo prestado, mientras que en el segundo, solo se pierde el margen que habría generado ese préstamo):
#
# 
# Para el siguiente caso práctico, se solicitan los siguientes:
#
# 1. Cargar los datos y realizar un análisis exploratorio y una evaluación de la calidad de los datos necesarios para el resto del caso.
# 2. Analizar, con la ayuda de visualizaciones, cuáles de las variables proporcionadas tienen más impacto en la probabilidad de impago.
# 3. Basándose en los resultados del punto anterior, elaborar criterios sencillos de decisión sobre si conceder el crédito que solicita el cliente o no.
# 4. Valorar el coste de los errores cometidos siguiendo esos modelos, según la tabla de coste proporcionada.
# # Análisis del Archivo de Datos
# Comenzaremos por lo más sencillo que es cargar los datos en memoria y revisar.
# Leer datos como df
import pandas as pd
import numpy as np
creditos = pd.read_csv("credit-g.csv")
creditos.describe()
# Leer el sumario de los datos nos da algunas pistas de la estructura de los mismos. Pero no nos cuenta la historia completa. Veamos unos 10 movimientos para entender como funciona.
creditos.head(10)
# Obviamente hay muchas variables categóricas y no numéricas en el archivo y por eso no se ven en el reporte sumario.
creditos.info()
# Analicemos un poco algunas de estas variables para determinar cómo influyen y cómo mejor utilizarlas para concluir una regla heurística que nos permita evaluar si un crédito es bueno o malo. Comencemos con la primera, `checking_status`.
print(creditos.checking_status.unique())
print(creditos.credit_history.unique())
# Podemos ver que cada una de estas variables categóricas no tienen necesariamente un orden específico, y en algunas inclusive la información se repite. Lo ideal sería contar con alguna función que nos permita analizar una variable y determinar cuantos casos en cada ocasión tuvieron créditos que se pagaron y cuántos no. Esta función es totalmente explorativa, no necesitamos ninguna estructura especial, sino solo contar y reportar los datos por ahora.
def analizar(variable):
instancias = creditos[variable].unique()
print("Ocurrencias Negativas de Variable", variable)
print((len(variable) + 34)*"¨")
for cosas in instancias:
malos = creditos[(creditos[variable] == cosas) & (creditos['class'] == "bad")]
malos = malos['class'].value_counts()
malos = int(malos)
print("--> clase ", '{:>15}'.format(cosas), " es igual a ", malos)
analizar('checking_status')
# Antes de seguir avanzando, debemos establecer una barrera para saber que son muchos casos malos para atribuirle valor a nuestra fórmula heurística. Analicemos cuantos casos de los 1,000 son créditos morosos.
total_malos = creditos[creditos['class'] == "bad"]
total_malos = total_malos['class'].value_counts()
total_malos = int(total_malos)
print("Total de créditos morosos ", total_malos)
# Solo a modo de completar la comprensión del juego de datos, adjuntamos la descripción de los campos.
#
# **Attribute description**
# 1. Status of existing checking account, in Deutsche Mark.
# 2. Duration in months
# 3. Credit history (credits taken, paid back duly, delays, critical accounts)
# 4. Purpose of the credit (car, television,…)
# 5. Credit amount
# 6. Status of savings account/bonds, in Deutsche Mark.
# 7. Present employment, in number of years.
# 8. Installment rate in percentage of disposable income
# 9. Personal status (married, single,…) and sex
# 10. Other debtors / guarantors
# 11. Present residence since X years
# 12. Property (e.g. real estate)
# 13. Age in years
# 14. Other installment plans (banks, stores)
# 15. Housing (rent, own,…)
# 16. Number of existing credits at this bank
# 17. Job
# 18. Number of people being liable to provide maintenance for
# 19. Telephone (yes,no)
# 20. Foreign worker (yes,no)
# # Funciones de Apoyo
# Comencemos por la primera variable, el estatus de la cuenta corriente de banco. Esta variable es categórica y podemos utilizar nuestra función para el análisis.
analizar('checking_status')
# Por lo que vemos las cuentas entre cero a doscientos marcos, y las menores de 200 marcos, tienen alta cantidad de casos morosos. Tratemos de visualizar esta variable categórica de alguna manera. Para tal fin contamos con la función `countplot()` de **Seaborn**.
import seaborn as sns
import matplotlib.pyplot as plt
total_malos = creditos[creditos['class'] == "bad"]
sns.countplot(x='checking_status', data=total_malos)
# Dado que tendremos que visualizar esto bastantes veces, vamos a crear una función que lo haga automáticamente.
def visual_class(target):
total_malos = creditos[creditos['class'] == "bad"]
sns.countplot(x=target, data=total_malos)
# Veamos si funciona:
visual_class('checking_status')
# Pero podemos hacer algo mejor, crear una clase que vea cada variable categórica en dos: uno para los casos en que el crédito resultó pago, y otro para los casos que resultó moroso. Eso nos va a permitir hacer mejores análisis de las 20 variables involucradas.
def grafica_categorica(target):
sns.catplot(x=target, col="class", data=creditos, kind="count", aspect=1.2)
plt.show()
# De igual forma, probamos que funcione:
grafica_categorica('checking_status')
# Ahora que podemos visualizar datos categóricos, sería bueno ver la distribución de datos numéricos para comparar clases diferentes (en este caso créditos buenos de malos).
def grafica_numerica(target):
sns.displot(creditos, x=target, hue="class", multiple="stack", aspect=1.2, bins=20)
plt.show()
grafica_numerica('age')
# # Análisis Visual de las Variables Implicadas
# Es probable que de las veinte variables disponibles en el juego de datos, existan algunas donde la diferencia es marcada y tiene una tendencia que apunta a ser parte importante de la razón de los 300 casos de créditos morosos. En nuestro caso, y dado que estamos usando una heurística y no aprendizaje automatizado, inicializamos el valor de referencia en 100.
#
# Procedemos así a evaluar las variables involucradas y determinar su incidencia - o no - en la morosidad.
# ## Checking Status
# La primera variable a analizar es el estado de crédito del cliente en marcos suizos.
grafica_categorica('checking_status')
# Pareciera haber igual posibilidad de incumplir el crédito para aquellos que presentan fondos menores a 200 marcos. Ambas variables cruzan nuestra linea imaginaria de 100. Sin embargo aquellos que no tienen cuenta corriente tienen un muy alto nivel de cumplimiento de pagos.
# ## Duration
# Analicemos como influye el plazo del crédito en la morosidad.
import matplotlib.pyplot as plt
import seaborn as sns
ax = sns.boxplot(x="class", y="duration", data=creditos)
# Este cuadro no nos dice mucho. Mejor solo analicemos los créditos morosos y veamos una distribución por madurez de los mismos, a ver si un patrón emerge.
grafica_numerica('duration')
# Según el análisis visual, los créditos cortos tienen más probabilidad de convertirse en morosos. Curiosamente a medida que el plazo del crédito aumenta, disminuyen los casos de créditos malos. Pero visto en el contexto de todos los datos, ninguno de los indicados como créditos malos es necesariamente alto en incidencia (solo uno cruza la línea de 50).
# ## Credit History
# El historial de crédito es una variable categórica que podemos analizar y visualizar fácilmente con nuestras funciones.
analizar('credit_history')
grafica_categorica('credit_history')
# Es muy curioso que los casos morosos son con personas que ya han pagado un crédito anterior. Esta variable tiene una proporción de 0.50:1 con el mismo bracket en créditos buenos y debieramos penalizar la función en tal caso. Adicionalmente marca 169 en nuestra línea imaginaria de cruce.
# ## Purpose
# ¿Habrá alguna relación entre el propósito del crédito y el nivel de morosidad?
analizar('purpose')
# Solamente una clase se acerca a nuestra medida de 100 sin llegar, créditos destinados a autos.
grafica_categorica('purpose')
# ## Credit Amount
# El monto del crédito puede contener información valiosa sobre la incidencia de morosidad. Uno pensaría que la gente incumple en montos grandes y difíciles de pagar en caso de problemas.
grafica_numerica('credit_amount')
# Extrañamente, los créditos por debajo de los 2,500 marcos son los que más la gente incumple, exactamente lo contrario a lo pensado. Pero vistos en el contexto total son muchos más los que se pagan, y ninguno cruza la línea de 100 incidencias.
# ## Saving Status
# Esta variable - estatus de los ahorros - parece superflua, ya que se asemeja mucho al estatus de la cuenta corriente.
analizar('savings_status')
grafica_categorica('savings_status')
# Inmediatamente vemos como aquellos clientes con ahorros menores a 100 marcos son los que más incumplen. Sin embargo aquellos sin ahorros tienen un nivel muy bajo de incumplimiento, lo que es un tanto contradictorio. Comparado con aquellos clientes con ahorros de menos de 100 marcos que si pagan, las incidencias de morosidad igual son relativamente grandes, por lo que debemos penalizar este **bracket**.
# ## Employment
# Uno pensaría que el momento de desempleo es el momento adecuado para pedir un préstamo, y quizás el de más vulnerabilidad para incumplir. Analicemos esta variable.
analizar('employment')
grafica_categorica('employment')
# Por alguna razón, el indice más alto de morosidad se da en aquellos trabajadores con más de un año de trabajo pero menos de cuatro. En el caso de los créditos pagos, también son mayoría, pero esta variable cruza nuestra línea de 100, por lo que debemos penalizarla.
# ## Installment Commitment
# La tasa de pago se representa como el porcentage de retorno por el monto de crédito y nos puede dar luces de si algunas tasas son causal de mayores niveles de morosidad.
grafica_numerica('installment_commitment')
# Es evidente que las tasas de 4.0 son muy proclives a casos de morosidad y créditos malos. Es la única que cruza nuestro umbral de 100 y debe ser penalizada.
# ## Personal Status
# Veamos como puede afectar la morosidad el estatus personal o marital.
analizar('personal_status')
grafica_categorica('personal_status')
# Del análisis visual, podemos ver que tanto los hombres solteros como las mujeres de cualquier clase marital son fuertes candidatos a morosidad (**y esto de por si es una regla que raya en lo ilegal, descriminar mujeres como un todo**). Ambas variables deben ser penalizadas.
# ## Other Parties
# El sentido común dice que créditos con garantes y co-deudores debieran tener mayor cumplimiento que aquellos que no poseen ninguna garantía.
analizar('other_parties')
grafica_categorica('other_parties')
# El análisis visual determina que aquellas cuentas que no tienen garante son más proclives a incumplir pagos que los que si tienen. Casi una tercera parte incumple por lo tanto esta variable debe ser penalizada.
# ## Residence Since
# Aquellas personas que tienen determinado tiempo de residencia pudieran ser menos proclives a morosidad, ya que tienen cierta estabilidad social. Analicemos el comporamiento de la variable.
grafica_categorica('residence_since')
# Esta gráfica y la de tasa de pago se parecen mucho, y parece contradictorio que aquellos que tienen 4 como tiempo de residencia son los que más incumplen pagos.
# ## Property Magnitude
# Aquellos que tienen propiedad por lo general se consideran mejores sujetos de crédito que aquellos que no la poseen.
analizar('property_magnitude')
grafica_categorica('property_magnitude')
# Aunque extraños, aquellos que poseen un carro son los únicos que tienen una ligera inclinación a la morosidad. Casi un 45% de los que poseen carros tienen créditos morosos.
# ## Age
# Si la edad trae el juicio en la gente, seguramente aquellos de mayor edad tienen menos incidencia a la morosidad.
grafica_numerica('age')
# Efectivamente los menores de 25 tienen mayor incidencia en la morosidad, pero no es determinante y los casos son menores a 50, por lo que no los consideraremos.
# ## Other Payment Plans
# La variable `other_payment_plans` indica otros pagos pendientes que pudieran afectar la capacidad de solventar un prestamo.
analizar('other_payment_plans')
grafica_categorica('other_payment_plans')
# Nuevamente, aunque un poco contraintuitivo, aquellos que tienen cero obligaciones adicionales son los que más morosidad aportan. El porcentaje de casos morosos es superior al 33%.
# ## Housing
# ¿Qué efecto tiene el tipo de situación de habitación en el pago de la deuda?
analizar('housing')
grafica_categorica('housing')
# Por alguna razón, aquellas personas que tienen casa son mucho más proclives a caer en morosidad. Hay 186 casos malos de 525 que se pagaron. Por lo tanto debemos penalizar dicha variable.
# ## Existing Credits
# ¿Cómo impacta la morosidad la existencia de créditos previos? Analicemos la misma.
grafica_numerica('existing_credits')
# Aparentemente, aquellos que ya tienen un crédito existentes tienen problema pagando dos. Pero los que tienen dos, no parecen tener tanto problema pagando tres (por lo menos no cruzan nuestra linea de 100).
# ## Job
# La calidad del trabajo seguramente tiene una incidencia importante en la morosidad - o no - del crédito.
grafica_categorica('job')
# Aparentemente, aquellos con calificación de _skilled_ tienen mucha probabilidad de fallar en los pagos, con casi 200 ocurrencias. Es casi la mitad del total que si pagó.
# ## Own Telephone
# Durante años, los bancos han utilizado la propiedad de una línea de teléfono fijo como señal de buen crédito, y la ausencia de la misma, como mala señal. Veamos que nos indica el juego de datos.
grafica_categorica('own_telephone')
# La incidencia no es tan alta como pensada, pero casi 200 créditos malos de 420 con personas que no poseían teléfono es un claro indicador que debemos penalizar.
# ## Foreign Worker
# Un trabajador extranjero está mucho más expuesto a problemas financieros que uno nacional. Analicemos el comportamiento del juego de datos.
grafica_categorica('foreign_worker')
# Lamentablemente, de casi 1000 créditos, 300 que fracasaron corresponden a trabajadores extranjeros. Esta variable debe penalizarse en el modelo por doble.
# # Un Modelo Heurístico
# Dado que hemos recaudado mucha información del análisis visual de la data, podemos crear un modelo heurístico, aunque crudo, que reuna información de cada caso y penalice ciertas variables, con un puntaje final. De dicha manera, si el puntaje cruza cierto rango, podemos definir el crédito como bueno o malo, y luego comparar el resultado de la predicción con la realidad.
# +
# Modelo predictivo
def asignar_puntaje(caso):
p = 0
if caso['checking_status'] == "'<0'":p = p + 1
if caso['checking_status'] == "'0<=X<200'":p = p + 1
if caso['credit_history'] == "'existing paid'": p = p + 1
if caso['savings_status'] == "'<100'": p = p + 1
if caso['employment'] == "'1<=X<4'": p = p + 1
if caso['installment_commitment'] > 3.5: p = p + 1
if caso['personal_status'] == "'male single'": p = p + 1
if caso['personal_status'] == "'female div/dep/mar'": p = p + 1
if caso['other_parties'] == 'none': p = p + 1
if caso['residence_since'] > 3: p = p + 1
if caso['property_magnitude'] == 'car': p = p + 1
if caso['other_payment_plans'] == 'none': p = p + 1
if caso['housing'] == 'own': p = p + 1
if caso['existing_credits'] == 1: p = p + 1
if caso['job'] == 'skilled': p = p + 1
if caso['own_telephone'] == 'none': p = p + 1
if caso['foreign_worker'] == 'yes': p = p + 1
return(p)
# -
# Hagamos una prueba con el primer record para analizar que tan bien podemos evaluar un crédito.
print(creditos.iloc[1])
print(asignar_puntaje(creditos.iloc[1]))
# Hasta aquí y con un poco de _debugging_ podemos identificar exitosamente los casos y dar un puntaje. Pudiéramos armar una función que imprima el registro, otorgue un puntaje, y determine si es un caso potencial de crédito bueno o malo. Vamos a fijar el umbral en 8 puntos (todo aquel que tenga más de 8 puntos es un caso de posible moratoria).
def analisis_credito(target):
print(target)
print("Puntaje asignado: ", asignar_puntaje(target), "\n")
if asignar_puntaje(target) < 8:
print("Crédito aprobado!")
else:
print("Crédito RECHAZADO!")
analisis_credito(creditos.iloc[1])
analisis_credito(creditos.iloc[100])
# Pudiera ser que 8 es un umbral muy dificil de alcanzar. Probemos dos casos más al azar.
analisis_credito(creditos.iloc[355])
analisis_credito(creditos.iloc[777])
# Es difícil ver caso por caso, por lo que vamos a preparar una tabla de confusión automatizando el clasificador. El script para la tabla de confusión cortesia de
def get_confussion_matrix(umbral):
predicciones = []
for i in range(0,1000):
if asignar_puntaje(creditos.iloc[i]) > umbral:
predicciones.append("bad")
else:
predicciones.append("good")
import pandas as pd
y_actu = pd.Series(creditos['class'], name='Actual')
y_pred = pd.Series(predicciones, name='Predicted')
df_confusion = pd.crosstab(y_actu, y_pred)
return(df_confusion)
get_confussion_matrix(8)
# Probemos con un umbral menor, por ejemplo 6:
get_confussion_matrix(6)
# Con un umbral de 6 estamos arrojando demasiados falsos positivos (créditos que predecimos como malos que en la vida real son buenos). Subamos el umbral.
get_confussion_matrix(10)
get_confussion_matrix(12)
# Dado nuestra pequeña fórmula heurística no admite mucha variación, hagamos una función que aplique los costos de errores y determine cual de todos nuestros umbrales es el mejor.
# # Evaluando Costos de Modelos Heurísticos
# Vamos a evaluar la puntuación de un modelo heurístico usando la tabla de costos determinada en el problema.
# 
def get_puntaje(matriz):
puntaje = (matriz.iloc[0,0] * 0) + (matriz.iloc[0,1] * 5) + (matriz.iloc[1,0] * 1) + (matriz.iloc[1,1] * 0)
return(puntaje)
# Hagamos la prueba con valores del 6 al 10 (lo vamos a hacer a mano porque no es mucho código).
print("Umbral 6 arroja puntaje: ", get_puntaje(get_confussion_matrix(6)))
print("Umbral 7 arroja puntaje: ", get_puntaje(get_confussion_matrix(7)))
print("Umbral 8 arroja puntaje: ", get_puntaje(get_confussion_matrix(8)))
print("Umbral 9 arroja puntaje: ", get_puntaje(get_confussion_matrix(9)))
print("Umbral 10 arroja puntaje: ", get_puntaje(get_confussion_matrix(10)))
print("Umbral 11 arroja puntaje: ", get_puntaje(get_confussion_matrix(11)))
print("Umbral 12 arroja puntaje: ", get_puntaje(get_confussion_matrix(12)))
# # Conclusión
# Obviamente, un modelo heurístico donde la proporción de penalización sea cinco a uno por falsos positivos tiene poca oportunidad de prosperar y castiga a un porcentaje mucho más grande de créditos buenos clasificados como malos de lo necesario. Sin embargo hemos comprobado con el ejercicio como utilizar reglas de visualización manteniendo consistencia en las mismas para tomar decisiones en la construcción de un modelo heurístico que otorga puntos de manera sencilla y puede ser evaluado con una función de costos.
| AMV/AMV Unidad 7 Caso Practico.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 7장. 날짜와 시간 다루기
# 이 노트북을 주피터 노트북 뷰어(nbviewer.jupyter.org)로 보거나 구글 코랩(colab.research.google.com)에서 실행할 수 있습니다.
#
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://nbviewer.jupyter.org/github/rickiepark/machine-learning-with-python-cookbook/blob/master/07.ipynb"><img src="https://jupyter.org/assets/main-logo.svg" width="28" />주피터 노트북 뷰어로 보기</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/rickiepark/machine-learning-with-python-cookbook/blob/master/07.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
# </td>
# </table>
# ## 7.1 문자열을 날짜로 변환하기
# +
# 라이브러리를 임포트합니다.
import numpy as np
import pandas as pd
# 문자열을 만듭니다.
date_strings = np.array(['03-04-2005 11:35 PM',
'23-05-2010 12:01 AM',
'04-09-2009 09:09 PM'])
# Timestamp 객체로 바꿉니다.
[pd.to_datetime(date, format='%d-%m-%Y %I:%M %p') for date in date_strings]
# -
# datetime으로 바꿉니다.
[pd.to_datetime(date, format="%d-%m-%Y %I:%M %p", errors="ignore")
for date in date_strings]
# ## 붙임
pd.to_datetime(date_strings)
# ## 7.2 시간대 다루기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# datetime을 만듭니다.
pd.Timestamp('2017-05-01 06:00:00', tz='Europe/London')
# +
# datetime을 만듭니다.
date = pd.Timestamp('2017-05-01 06:00:00')
# 시간대를 지정합니다.
date_in_london = date.tz_localize('Europe/London')
# datetime을 확인합니다.
date_in_london
# -
# 시간대를 바꿉니다.
date_in_london.tz_convert('Africa/Abidjan')
# +
# 세 개의 날짜를 만듭니다.
dates = pd.Series(pd.date_range('2/2/2002', periods=3, freq='M'))
# 시간대를 지정합니다.
dates.dt.tz_localize('Africa/Abidjan')
# +
# 라이브러리를 임포트합니다.
from pytz import all_timezones
# 두 개의 시간대를 확인합니다.
all_timezones[0:2]
# -
# ## 붙임
dates.dt.tz_localize('dateutil/Aisa/Seoul')
# +
import pytz
tz = pytz.timezone('Asia/Seoul')
dates.dt.tz_localize(tz)
# -
# ## 7.3 날짜와 시간 선택
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터프레임을 만듭니다.
dataframe = pd.DataFrame()
# datetime을 만듭니다.
dataframe['date'] = pd.date_range('1/1/2001', periods=100000, freq='H')
# 두 datetime 사이의 샘플을 선택합니다.
dataframe[(dataframe['date'] > '2002-1-1 01:00:00') &
(dataframe['date'] <= '2002-1-1 04:00:00')]
# +
# 인덱스를 설정합니다.
dataframe = dataframe.set_index(dataframe['date'])
# 두 datetime 사이 샘플을 선택합니다.
dataframe.loc['2002-1-1 01:00:00':'2002-1-1 04:00:00']
# -
# ## 7.4 날짜 데이터를 여러 특성으로 나누기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터프레임을 만듭니다.
dataframe = pd.DataFrame()
# 다섯 개의 날짜를 만듭니다.
dataframe['date'] = pd.date_range('1/1/2001', periods=150, freq='W')
# 년, 월, 일, 시, 분에 대한 특성을 만듭니다.
dataframe['year'] = dataframe['date'].dt.year
dataframe['month'] = dataframe['date'].dt.month
dataframe['day'] = dataframe['date'].dt.day
dataframe['hour'] = dataframe['date'].dt.hour
dataframe['minute'] = dataframe['date'].dt.minute
# 세 개의 행을 확인합니다.
dataframe.head(3)
# -
# ## 7.5 날짜 간의 차이를 계산하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터프레임을 만듭니다.
dataframe = pd.DataFrame()
# 두 datetime 특성을 만듭니다.
dataframe['Arrived'] = [pd.Timestamp('01-01-2017'), pd.Timestamp('01-04-2017')]
dataframe['Left'] = [pd.Timestamp('01-01-2017'), pd.Timestamp('01-06-2017')]
# 특성 사이의 차이를 계산합니다.
dataframe['Left'] - dataframe['Arrived']
# -
# 특성 간의 기간을 계산합니다.
pd.Series(delta.days for delta in (dataframe['Left'] - dataframe['Arrived']))
# ## 7.6 요일을 인코딩하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 시리즈 객체를 만듭니다.
dates = pd.Series(pd.date_range("2/2/2002", periods=3, freq="M"))
# 요일을 확인합니다.
dates.dt.day_name()
# -
# 요일을 확인합니다.
dates.dt.weekday
# ## 7.7 시차 특성 만들기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# 데이터프레임을 만듭니다.
dataframe = pd.DataFrame()
# 날짜를 만듭니다.
dataframe["dates"] = pd.date_range("1/1/2001", periods=5, freq="D")
dataframe["stock_price"] = [1.1,2.2,3.3,4.4,5.5]
# 한 행 뒤의 값을 가져옵니다.
dataframe["previous_days_stock_price"] = dataframe["stock_price"].shift(1)
# 데이터프레임을 확인합니다.
dataframe
# -
# ## 7.8 이동 시간 윈도 사용하기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
# datetime을 만듭니다.
time_index = pd.date_range("01/01/2010", periods=5, freq="M")
# 데이터프레임을 만들고 인덱스를 설정합니다.
dataframe = pd.DataFrame(index=time_index)
# 특성을 만듭니다.
dataframe["Stock_Price"] = [1,2,3,4,5]
# 이동 평균을 계산합니다.
dataframe.rolling(window=2).mean()
# -
# ## 붙임
dataframe.ewm(alpha=0.5).mean()
# ## 7.9 시계열 데이터에서 누락된 값 다루기
# +
# 라이브러리를 임포트합니다.
import pandas as pd
import numpy as np
# 날짜를 만듭니다.
time_index = pd.date_range("01/01/2010", periods=5, freq="M")
# 데이터프레임을 만들고 인덱스를 지정합니다.
dataframe = pd.DataFrame(index=time_index)
# 누락된 값이 있는 특성을 만듭니다.
dataframe["Sales"] = [1.0,2.0,np.nan,np.nan,5.0]
# 누락된 값을 보간합니다.
dataframe.interpolate()
# -
# 앞쪽으로 채우기(Forward-fill)
dataframe.ffill()
# 뒤쪽으로 채우기(Back-fill)
dataframe.bfill()
# 누락된 값을 보간하기
dataframe.interpolate(method="quadratic")
# 누락된 값을 보간하기
dataframe.interpolate(limit=1, limit_direction="forward")
| 07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from my_package import mod2
from my_package.sub_package2 import sub_mod2
mod2.func_same()
mod2.func_sub()
sub_mod2.func_parent()
sub_mod2.func_parent_sub()
| notebook/import_example_relative.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
repo_directory = '/Users/iaincarmichael/Dropbox/Research/law/law-net/'
data_dir = '/Users/iaincarmichael/Documents/courtlistener/data/'
import numpy as np
import sys
import matplotlib.pyplot as plt
import glob
# text processing
from sklearn.metrics.pairwise import cosine_similarity
# graph package
import igraph as ig
# our code
sys.path.append(repo_directory + 'code/')
from setup_data_dir import setup_data_dir, make_subnetwork_directory
from pipeline.download_data import download_bulk_resource, download_master_edgelist, download_scdb
from helpful_functions import case_info
sys.path.append(repo_directory + 'vertex_metrics_experiment/code/')
from bag_of_words import *
# which network to download data for
network_name = 'scotus' # 'federal', 'ca1', etc
# some sub directories that get used
raw_dir = data_dir + 'raw/'
subnet_dir = data_dir + network_name + '/'
text_dir = subnet_dir + 'textfiles/'
nlp_dir = subnet_dir + 'nlp/'
# jupyter notebook settings
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# -
G = ig.Graph.Read_GraphML(subnet_dir + network_name +'_network.graphml')
G.summary()
from bag_of_words import *
from sklearn.metrics.pairwise import cosine_similarity
tfidf_matrix, op_id_to_bow_id = load_tf_idf(nlp_dir)
tfidf_matrix
# +
M = 10000
sims = []
indices = np.random.choice(range(tfidf_matrix.shape[0]), size=M, replace=False)
sim_mat = cosine_similarity(tfidf_matrix[indices, :])
for i in range(M):
for j in range(M):
if i < j:
sims.append(sim_mat[i, j])
# -
bins = np.linspace(0, 1, 101)
h = plt.hist(sims, bins=bins)
# +
mean = np.mean(sims)
median = np.median(sims)
values = h[0]
mode = bins[np.argmax(values)]
# -
print mean
print median
print mode
| vertex_metrics_experiment/hospital_nlp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Imports
import sys
sys.path.append('../../')
from pyspark.sql import SparkSession
from pyspark.ml import Pipeline
from sparknlp.annotator import *
from sparknlp.common import RegexRule
from sparknlp.base import DocumentAssembler, Finisher
# -
data = spark. \
read. \
parquet("../../../../spark-nlp/src/test/resources/sentiment.parquet"). \
limit(10000)
data.cache()
data.count()
# +
document_assembler = DocumentAssembler() \
.setInputCol("text")
sentence_detector = SentenceDetectorModel() \
.setInputCols(["document"]) \
.setOutputCol("sentence")
tokenizer = RegexTokenizer() \
.setInputCols(["sentence"]) \
.setOutputCol("token")
lemmatizer = Lemmatizer() \
.setInputCols(["token"]) \
.setOutputCol("lemma") \
.setDictionary("../../../src/test/resources/lemma-corpus/AntBNC_lemmas_ver_001.txt")
sentiment_detector = SentimentDetectorModel() \
.setInputCols(["lemma", "sentence"]) \
.setOutputCol("sentiment_score") \
.setDictPath("../../../src/test/resources/sentiment-corpus/default-sentiment-dict.txt")
finisher = Finisher() \
.setInputCols(["sentiment_score"]) \
.setOutputCols(["sentiment"])
# -
pipeline = Pipeline(stages=[document_assembler, sentence_detector, tokenizer, lemmatizer, sentiment_detector, finisher])
model = pipeline.fit(data)
result = model.transform(data)
result.filter("sentiment != 'positive'").show()
| JupyterNotebooks/Spark NLP/dictionary-sentiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## PDE
# The TTI wave equation for the square slowness m, thomsen parametersd $\epsilon$, $\delta$, $\theta$ and $\Phi$ (dip and asymuth) and a source q is given in 3D by :
#
# \begin{cases}
# &m \frac{d^2 p(x,t)}{dt^2} - (1+2\epsilon)(G_{\bar{x}\bar{x}} + G_{\bar{y}\bar{y}}) p(x,t) - \sqrt{(1+2\delta)}G_{\bar{z}\bar{z}} r(x,t) =q \\
# &m \frac{d^2 r(x,t)}{dt^2} - \sqrt{(1+2\delta)}(G_{\bar{x}\bar{x}} + G_{\bar{y}\bar{y}}) p(x,t) - G_{\bar{z}\bar{z}} r(x,t) =q \\
# &u(.,0) = 0 \\
# &\frac{d u(x,t)}{dt}|_{t=0} = 0
# \end{cases}
#
#
# where
# \begin{cases}
# G_{\bar{x}\bar{x}} & = cos(\phi)^2 cos(\theta)^2 \frac{d^2}{dx^2} +sin(\phi)^2 cos(\theta)^2 \frac{d^2}{dy^2}+ sin(\theta)^2 \frac{d^2}{dz^2} + sin(2\phi) cos(\theta)^2 \frac{d^2}{dx dy} - sin(\phi) sin(2\theta) \frac{d^2}{dy dz} -cos(\phi) sin(2\theta) \frac{d^2}{dx dz} \\
# G_{\bar{y}\bar{y}} & = sin(\phi)^2 \frac{d^2}{dx^2} +cos(\phi)^2 \frac{d^2}{dy^2} - sin(2\phi)^2 \frac{d^2}{dx dy}\\
# G_{\bar{z}\bar{z}} & = cos(\phi)^2 sin(\theta)^2 \frac{d^2}{dx^2} +sin(\phi)^2 sin(\theta)^2 \frac{d^2}{dy^2}+ cos(\theta)^2 \frac{d^2}{dz^2} + sin(2\phi) sin(\theta)^2 \frac{d^2}{dx dy} + sin(\phi) sin(2\theta) \frac{d^2}{dy dz} +cos(\phi) sin(2\theta) \frac{d^2}{dx dz} \\
# \end{cases}
# with the zero initial conditons to guaranty unicity of the solution
#
# It correspondto a velocity model where the velocity in the z direction is different from the velocity in the xy plane
from sympy import *
from sympy.abc import *
from sympy.galgebra.ga import *
import numpy as np
from numpy import linalg as LA
from __future__ import print_function
from functools import reduce
from operator import mul
init_printing()
# +
p=Function('p')
r=Function('r')
s,h,x,y,z = symbols('s h x y z')
m=M(x,y,z)
q=Q(x,y,z,t)
d=D(x,y,z,t)
e=E(x,y,z)
A=epsilon(x,y,z) # (1 + 2epsilon) but make the symbolic simpler
B=delta(x,y,z) # sqrt(1 + 2epsilon) but make the symbolic simpler
Th=theta(x,y,z)
Ph=phi(x,y,z)
# +
order=1
indxx = [(x + i * h) for i in range(-order, order + 1)]
indyy = [(y + i * h) for i in range(-order, order + 1)]
indzz = [(z + i * h) for i in range(-order, order + 1)]
# +
dttp=as_finite_diff(p(x,y,z,t).diff(t,t), [t-s,t, t+s])
dttr=as_finite_diff(r(x,y,z,t).diff(t,t), [t-s,t, t+s])
dtp=as_finite_diff(p(x,y,z,t).diff(t), [t-s,t])
dtr=as_finite_diff(r(x,y,z,t).diff(t), [t-s,t])
# Spacial finite differences can easily be extended to higher order by increasing the list of sampling point in the next expression.
# Be sure to keep this stencil symmetric and everything else in the notebook will follow.
dxxp=as_finite_diff(p(x,y,z,t).diff(x,x), indxx)
dyyp=as_finite_diff(p(x,y,z,t).diff(y,y), indyy)
dzzp=as_finite_diff(p(x,y,z,t).diff(z,z), indzz)
dxxr=as_finite_diff(r(x,y,z,t).diff(x,x), indxx)
dyyr=as_finite_diff(r(x,y,z,t).diff(y,y), indyy)
dzzr=as_finite_diff(r(x,y,z,t).diff(z,z), indzz)
# My 4th order stencil for cross derivatives
dxzp = .5/(h**2)*(-2*p(x,y,z,t) + p(x,y,z+h,t) + p(x,y,z-h,t) - p(x+h,y,z-h,t) + p(x-h,y,z,t) - p(x-h,y,z+h,t) + p(x+h,y,z,t))
dxzr = .5/(h**2)*(-2*r(x,y,z,t) + r(x,y,z+h,t) + r(x,y,z-h,t) - r(x+h,y,z-h,t) + r(x-h,y,z,t) - r(x-h,y,z+h,t) + r(x+h,y,z,t))
dxyp = .5/(h**2)*(-2*p(x,y,z,t) + p(x,y+h,z,t) + p(x,y-h,z,t) - p(x+h,y-h,z,t) + p(x-h,y,z,t) - p(x-h,y+h,z,t) + p(x+h,y,z,t))
dxyr = .5/(h**2)*(-2*r(x,y,z,t) + r(x,y+h,z,t) + r(x,y-h,z,t) - r(x+h,y-h,z,t) + r(x-h,y,z,t) - r(x-h,y+h,z,t) + r(x+h,y,z,t))
dyzp = .5/(h**2)*(-2*p(x,y,z,t) + p(x,y,z+h,t) + p(x,y,z-h,t) - p(x,y+h,z-h,t) + p(x,y-h,z,t) - p(x,y-h,z+h,t) + p(x,y+h,z,t))
dyzr = .5/(h**2)*(-2*r(x,y,z,t) + r(x,y,z+h,t) + r(x,y,z-h,t) - r(x,y+h,z-h,t) + r(x,y-h,z,t) - r(x,y-h,z+h,t) + r(x,y+h,z,t))
# -
# add the *arg input and done
def cross_deriv(*args, **kwargs):
deriv=0
order = kwargs.get('order', 1)
dims = kwargs.get('dims', (x, y))
diff = kwargs.get('diff', h)
assert(isinstance(dims, tuple) and len(dims) == 2)
ind1r = [(dims[0] + i * diff) for i in range(-int((order) / 2) + 1 - (order<4), int((order + 1) / 2) + 2 - (order<4))]
ind2r = [(dims[1] + i * diff) for i in range(-int((order) / 2) + 1 - (order<4), int((order + 1) / 2) + 2 - (order<4))]
ind1l = [(dims[0] - i * diff) for i in range(-int((order) / 2) + 1 - (order<4), int((order + 1) / 2) + 2 - (order<4))]
ind2l = [(dims[1] - i * diff) for i in range(-int((order) / 2) + 1 - (order<4), int((order + 1) / 2) + 2 - (order<4))]
cx = finite_diff_weights(1, ind1r, dims[0])
cx = cx[-1][-1]
cy = finite_diff_weights(1, ind2r, dims[1])
cy = cy[-1][-1]
for i in range(0,len(ind1r)):
for j in range(0,len(ind2r)):
var1 = [a.subs({dims[0]: ind1r[i], dims[1]: ind2r[j]}) for a in args]
var2 = [a.subs({dims[0]: ind1l[i], dims[1]: ind2l[j]}) for a in args]
deriv += .25 * cy[j] * cx[i] * reduce(mul, var1, 1) + .25 * cy[len(ind2l)-j-1] * cx[len(ind1l)-i-1] * reduce(mul, var2, 1)
return deriv
cross_deriv(p(x, y, z, t), order=1, dims=(x,z))
Gxxp = c**2 * a**2 * dxxp + d**2 * a**2 * dyyp + b**2 * dzzp + 2 * d * c * a**2 * dxyp - d * 2 * b * a * dyzp - c * 2 * b * a * dxzp
Gyyp = b**2 * dxxp + c**2 * dyyp - (2 * d * c)**2 * dxyp
Gzzr = c**2 * b**2 * dxxr + d**2 * b**2 * dyyr + a**2 * dzzr + 2 * d * c * b**2 * dxyr + d * 2 * b * a * dyzr + c * 2 * b * a * dxzr
# +
def Bhaskarasin(angle):
return 16 * angle * (3.14 - abs(angle))/(49.34 - 4 * abs(angle) * (3.14 - abs(angle)))
def Bhaskaracos(angle):
return Bhaskarasin(angle + 1.57)
a = Bhaskaracos(Th)
b = Bhaskarasin(Th)
c = Bhaskaracos(Ph)
d = Bhaskaracos(Ph)
# -
print(a)
print(b)
print(c)
print(d)
stencilp = 2 * s**2 / (2 * m + s * damp) * (2 * m / s**2 * p(x, y, z, t) + (s * damp - 2 * m) / (2 * s**2) * p(x, y, z, t-s) + A * (Gxxp + Gyyp) + B * Gzzr)
# +
#stencilp = simplify(expand(stencilp))
# +
#cse((stencilp))
# -
stencilr = 2 * s**2 / (2 * m + s * damp) * (2 * m / s**2 * r(x, y, z, t) + (s * damp - 2 * m) / (2 * s**2) * r(x, y, z, t-s) + B * (Gxxp + Gyyp) + Gzzr)
# +
#stencilr=factor(simplify(expand(stencilr)))
# -
cse((stencilp,stencilr))
| AcousticFWI/TTI_3d_simplified.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import glob
DATA_DIR = '/Users/yuji/BigData/QM9/dsgdb9nsd.xyz/'
filenames = glob.glob(DATA_DIR + '*')
filenames.sort()
# +
SMILES_list = []
gap_list = []
for filename in filenames :
with open(filename) as f:
s = f.read()
Na = int(s.split('\n')[0])
gap_list.append(float(s.split('\n')[1].split('\t')[8]))
SMILES_list.append(s.split('\n')[Na+3].split('\t')[0])
# -
df = pd.DataFrame([SMILES_list,gap_list]).T
df.columns = ['SMILES','gap']
df.to_csv('data/SMILES_data.csv')
from rdkit import Chem
from rdkit.Chem import Draw
import matplotlib.pyplot as plt
mols = []
indeces = np.random.randint(0,len(SMILES_list),(9))
smiles_list = []
for i in indeces:
mols.append(Chem.MolFromSmiles(SMILES_list[i]))
smiles_list.append(SMILES_list[i])
img = Draw.MolsToGridImage(mols,molsPerRow=3, subImgSize=(300,300),legends=smiles_list)
img.save('img_mol/sample.png')
| MakeDataSet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
# <script>
# window.dataLayer = window.dataLayer || [];
# function gtag(){dataLayer.push(arguments);}
# gtag('js', new Date());
#
# gtag('config', 'UA-59152712-8');
# </script>
#
# # Tutorial-IllinoisGRMHD: Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C
#
# ## Authors: <NAME> & <NAME>
#
# <font color='red'>**This module is currently under development**</font>
#
# ## In this tutorial module we explain the construction of the right-hand side of the evolution equations of $\left[\sqrt{\gamma}\Phi\right]$ and add gauge terms to the right-hand side of the evolution equations of $A_{i}$
#
# ### Required and recommended citations:
#
# * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>., and <NAME>. IllinoisGRMHD: an open-source, user-friendly GRMHD code for dynamical spacetimes. Class. Quantum Grav. 32 (2015) 175009. ([arxiv:1501.07276](http://arxiv.org/abs/1501.07276)).
# * **(Required)** <NAME>., <NAME>., <NAME>., <NAME>. Primitive Variable Solvers for Conservative General Relativistic Magnetohydrodynamics. Astrophysical Journal, 641, 626 (2006) ([astro-ph/0512420](https://arxiv.org/abs/astro-ph/0512420)).
# * **(Recommended)** <NAME>., <NAME>., <NAME>. An efficient shock-capturing central-type scheme for multidimensional relativistic flows - II. Magnetohydrodynamics. A&A 400 (2) 397-413 (2003). DOI: 10.1051/0004-6361:20021641 ([astro-ph/0210618](https://arxiv.org/abs/astro-ph/0210618)).
# <a id='toc'></a>
#
# # Table of Contents
# $$\label{toc}$$
#
# This module is organized as follows
#
# 0. [Step 0](#src_dir): **Source directory creation**
# 1. [Step 1](#introduction): **Introduction**
# 1. [Step 2](#lorenz_psi6phi_rhs__add_gauge_terms_to_a_i_rhs__c): **`Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C`**
# 1. [Step n-1](#code_validation): **Code validation**
# 1. [Step n](#latex_pdf_output): **Output this notebook to $\LaTeX$-formatted PDF file**
# <a id='src_dir'></a>
#
# # Step 0: Source directory creation \[Back to [top](#toc)\]
# $$\label{src_dir}$$
#
# We will now use the [cmdline_helper.py NRPy+ module](Tutorial-Tutorial-cmdline_helper.ipynb) to create the source directory within the `IllinoisGRMHD` NRPy+ directory, if it does not exist yet.
# +
# Step 0: Creation of the IllinoisGRMHD source directory
# Step 0a: Add NRPy's directory to the path
# https://stackoverflow.com/questions/16780014/import-file-from-parent-directory
import os,sys
nrpy_dir_path = os.path.join("..","..")
if nrpy_dir_path not in sys.path:
sys.path.append(nrpy_dir_path)
# Step 0b: Load up cmdline_helper and create the directory
import cmdline_helper as cmd
IGM_src_dir_path = os.path.join("..","src")
cmd.mkdir(IGM_src_dir_path)
# Step 0c: Create the output file path
outfile_path__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C = os.path.join(IGM_src_dir_path,"Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C")
# -
# <a id='introduction'></a>
#
# # Step 1: Introduction \[Back to [top](#toc)\]
# $$\label{introduction}$$
# <a id='lorenz_psi6phi_rhs__add_gauge_terms_to_a_i_rhs__c'></a>
#
# # Step 2: `Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C` \[Back to [top](#toc)\]
# $$\label{lorenz_psi6phi_rhs__add_gauge_terms_to_a_i_rhs__c}$$
# +
# %%writefile $outfile_path__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C
static inline CCTK_REAL avg(CCTK_REAL f[PLUS2+1][PLUS2+1][PLUS2+1],int imin,int imax, int jmin,int jmax, int kmin,int kmax);
static void Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs(const cGH *cctkGH,const int *cctk_lsh,const int *cctk_nghostzones,CCTK_REAL *dX,CCTK_REAL **in_vars,CCTK_REAL *psi6phi,
CCTK_REAL *shiftx_iphjphkph,CCTK_REAL *shifty_iphjphkph,CCTK_REAL *shiftz_iphjphkph,
CCTK_REAL *alpha_iphjphkph,CCTK_REAL *alpha_Phi_minus_betaj_A_j_iphjphkph,CCTK_REAL *alpha_sqrtg_Ax_interp,
CCTK_REAL *alpha_sqrtg_Ay_interp,CCTK_REAL *alpha_sqrtg_Az_interp,
CCTK_REAL *psi6phi_rhs,CCTK_REAL *Ax_rhs,CCTK_REAL *Ay_rhs,CCTK_REAL *Az_rhs) {
DECLARE_CCTK_PARAMETERS;
/* Compute \partial_t psi6phi = -\partial_i ( \alpha psi^6 A^i - psi6phi \beta^i)
* (Eq 13 of http://arxiv.org/pdf/1110.4633.pdf), using Lorenz gauge.
* Note that the RHS consists of a shift advection term on psi6phi and
* a term depending on the vector potential.
* psi6phi is defined at (i+1/2,j+1/2,k+1/2), but instead of reconstructing
* to compute the RHS of \partial_t psi6phi, we instead use standard
* interpolations.
*/
CCTK_REAL dXm1=1.0/dX[0];
CCTK_REAL dYm1=1.0/dX[1];
CCTK_REAL dZm1=1.0/dX[2];
// The stencil here is {-1,1},{-1,1},{-1,1} for x,y,z directions, respectively.
// Note that ALL input variables are defined at ALL gridpoints, so no
// worries about ghostzones.
#pragma omp parallel for
for(int k=1;k<cctk_lsh[2]-1;k++) for(int j=1;j<cctk_lsh[1]-1;j++) for(int i=1;i<cctk_lsh[0]-1;i++) {
int index=CCTK_GFINDEX3D(cctkGH,i,j,k);
CCTK_REAL INTERP_VARS[MAXNUMINTERP][PLUS2+1][PLUS2+1][PLUS2+1];
// First compute \partial_j \alpha \sqrt{\gamma} A^j (RHS of \partial_i psi6phi)
// FIXME: Would be much cheaper & easier to unstagger A_i, raise, then interpolate A^i.
// However, we keep it this way to be completely compatible with the original
// Illinois GRMHD thorn, called mhd_evolve.
//
//Step 1) j=x: Need to raise A_i, but to do that, we must have all variables at the same gridpoints:
// The goal is to compute \partial_j (\alpha \sqrt{\gamma} A^j) at (i+1/2,j+1/2,k+1/2)
// We do this by first interpolating (RHS1x) = (\alpha \sqrt{\gamma} A^x) at
// (i,j+1/2,k+1/2)and (i+1,j+1/2,k+1/2), then taking \partial_x (RHS1x) =
// [ RHS1x(i+1,j+1/2,k+1/2) - RHS1x(i,j+1/2,k+1/2) ]/dX.
// First bring gup's, psi, and alpha to (i,j+1/2,k+1/2):
int num_vars_to_interp;
int vars_to_interpolate[MAXNUMINTERP] = {GUPXXI,GUPXYI,GUPXZI,GUPYYI,GUPYZI,GUPZZI,LAPM1I,PSII,SHIFTXI,SHIFTYI,SHIFTZI};
num_vars_to_interp = 11;
// We may set interp_limits to be more general than we need.
int interp_limits[6] = {-1,1,-1,1,-1,1}; SET_INDEX_ARRAYS_3DBLOCK(interp_limits);
//SET_INDEX_ARRAYS_3DBLOCK(interp_limits);
for(int ww=0;ww<num_vars_to_interp;ww++) {
int whichvar=vars_to_interpolate[ww];
// Read in variable at interp. stencil points from main memory, store in INTERP_VARS.
for(int kk=PLUS0;kk<=PLUS1;kk++) for(int jj=PLUS0;jj<=PLUS1;jj++) for(int ii=PLUS0;ii<=PLUS1;ii++) {
INTERP_VARS[whichvar][kk][jj][ii] = in_vars[whichvar][index_arr_3DB[kk][jj][ii]]; }
}
// Next set \alpha at (i+1/2,j+1/2,k+1/2). Will come in handy when computing damping term later.
alpha_iphjphkph[index] = avg(INTERP_VARS[LAPM1I] , PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1)+1.0;
//A_x needs a stencil s.t. interp_limits={0,1,-1,1,-1,1}:
for(int kk=MINUS1;kk<=PLUS1;kk++) for(int jj=MINUS1;jj<=PLUS1;jj++) for(int ii=PLUS0;ii<=PLUS1;ii++) {
INTERP_VARS[A_XI][kk][jj][ii] = in_vars[A_XI][index_arr_3DB[kk][jj][ii]]; }
//A_y needs a stencil s.t. interp_limits={-1,1,0,1,-1,1}:
for(int kk=MINUS1;kk<=PLUS1;kk++) for(int jj=PLUS0;jj<=PLUS1;jj++) for(int ii=MINUS1;ii<=PLUS1;ii++) {
INTERP_VARS[A_YI][kk][jj][ii] = in_vars[A_YI][index_arr_3DB[kk][jj][ii]]; }
//A_z needs a stencil s.t. interp_limits={-1,1,-1,1,0,1}:
for(int kk=PLUS0;kk<=PLUS1;kk++) for(int jj=MINUS1;jj<=PLUS1;jj++) for(int ii=MINUS1;ii<=PLUS1;ii++) {
INTERP_VARS[A_ZI][kk][jj][ii] = in_vars[A_ZI][index_arr_3DB[kk][jj][ii]]; }
// FIRST DO A^X TERM (interpolate to (i,j+1/2,k+1/2) )
// \alpha \sqrt{\gamma} A^x = \alpha psi^6 A^x (RHS of \partial_i psi6phi)
// Note that gupij is \tilde{\gamma}^{ij}, so we need to multiply by \psi^{-4}.
CCTK_REAL gupxx_jphkph = avg(INTERP_VARS[GUPXXI], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL gupxy_jphkph = avg(INTERP_VARS[GUPXYI], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL gupxz_jphkph = avg(INTERP_VARS[GUPXZI], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS1);
for(int kk=PLUS0;kk<=PLUS1;kk++) for(int jj=PLUS0;jj<=PLUS1;jj++) for(int ii=PLUS0;ii<=PLUS1;ii++) {
CCTK_REAL Psi2 = INTERP_VARS[PSII][kk][jj][ii]*INTERP_VARS[PSII][kk][jj][ii];
CCTK_REAL alpha = INTERP_VARS[LAPM1I][kk][jj][ii]+1.0;
INTERP_VARS[LAPSE_PSI2I][kk][jj][ii]=alpha*Psi2;
INTERP_VARS[LAPSE_OVER_PSI6I][kk][jj][ii]=alpha/(Psi2*Psi2*Psi2);
}
CCTK_REAL lapse_Psi2_jphkph = avg(INTERP_VARS[LAPSE_PSI2I], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL A_x_jphkph = avg(INTERP_VARS[A_XI], PLUS0,PLUS0, PLUS0,PLUS0, PLUS0,PLUS0); // @ (i,j+1/2,k+1/2)
CCTK_REAL A_y_jphkph = avg(INTERP_VARS[A_YI],MINUS1,PLUS0, PLUS0,PLUS1, PLUS0,PLUS0); // @ (i+1/2,j,k+1/2)
CCTK_REAL A_z_jphkph = avg(INTERP_VARS[A_ZI],MINUS1,PLUS0, PLUS0,PLUS0, PLUS0,PLUS1); // @ (i+1/2,j+1/2,k)
alpha_sqrtg_Ax_interp[index] = lapse_Psi2_jphkph*
( gupxx_jphkph*A_x_jphkph + gupxy_jphkph*A_y_jphkph + gupxz_jphkph*A_z_jphkph );
// DO A^Y TERM (interpolate to (i+1/2,j,k+1/2) )
// \alpha \sqrt{\gamma} A^y = \alpha psi^6 A^y (RHS of \partial_i psi6phi)
// Note that gupij is \tilde{\gamma}^{ij}, so we need to multiply by \psi^{-4}.
CCTK_REAL gupxy_iphkph = avg(INTERP_VARS[GUPXYI], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
CCTK_REAL gupyy_iphkph = avg(INTERP_VARS[GUPYYI], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
CCTK_REAL gupyz_iphkph = avg(INTERP_VARS[GUPYZI], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
CCTK_REAL lapse_Psi2_iphkph = avg(INTERP_VARS[LAPSE_PSI2I], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
//CCTK_REAL lapse_iphkph = avg(INTERP_VARS[LAPM1I], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1)+1.0;
//CCTK_REAL psi_iphkph = avg(INTERP_VARS[PSII ], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS1);
CCTK_REAL A_x_iphkph = avg(INTERP_VARS[A_XI], PLUS0,PLUS1,MINUS1,PLUS0, PLUS0,PLUS0); // @ (i,j+1/2,k+1/2)
CCTK_REAL A_y_iphkph = avg(INTERP_VARS[A_YI], PLUS0,PLUS0, PLUS0,PLUS0, PLUS0,PLUS0); // @ (i+1/2,j,k+1/2)
CCTK_REAL A_z_iphkph = avg(INTERP_VARS[A_ZI], PLUS0,PLUS0,MINUS1,PLUS0, PLUS0,PLUS1); // @ (i+1/2,j+1/2,k)
alpha_sqrtg_Ay_interp[index] = lapse_Psi2_iphkph*
( gupxy_iphkph*A_x_iphkph + gupyy_iphkph*A_y_iphkph + gupyz_iphkph*A_z_iphkph );
// DO A^Z TERM (interpolate to (i+1/2,j+1/2,k) )
// \alpha \sqrt{\gamma} A^z = \alpha psi^6 A^z (RHS of \partial_i psi6phi)
// Note that gupij is \tilde{\gamma}^{ij}, so we need to multiply by \psi^{-4}.
CCTK_REAL gupxz_iphjph = avg(INTERP_VARS[GUPXZI], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
CCTK_REAL gupyz_iphjph = avg(INTERP_VARS[GUPYZI], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
CCTK_REAL gupzz_iphjph = avg(INTERP_VARS[GUPZZI], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
//CCTK_REAL lapse_iphjph = avg(INTERP_VARS[LAPM1I], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0)+1.0;
//CCTK_REAL psi_iphjph = avg(INTERP_VARS[PSII ], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
CCTK_REAL lapse_Psi2_iphjph = avg(INTERP_VARS[LAPSE_PSI2I], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS0);
CCTK_REAL A_x_iphjph = avg(INTERP_VARS[A_XI], PLUS0,PLUS1, PLUS0,PLUS0,MINUS1,PLUS0); // @ (i,j+1/2,k+1/2)
CCTK_REAL A_y_iphjph = avg(INTERP_VARS[A_YI], PLUS0,PLUS0, PLUS0,PLUS1,MINUS1,PLUS0); // @ (i+1/2,j,k+1/2)
CCTK_REAL A_z_iphjph = avg(INTERP_VARS[A_ZI], PLUS0,PLUS0, PLUS0,PLUS0, PLUS0,PLUS0); // @ (i+1/2,j+1/2,k)
alpha_sqrtg_Az_interp[index] = lapse_Psi2_iphjph*
( gupxz_iphjph*A_x_iphjph + gupyz_iphjph*A_y_iphjph + gupzz_iphjph*A_z_iphjph );
// Next set \alpha \Phi - \beta^j A_j at (i+1/2,j+1/2,k+1/2):
// We add a "L" suffix to shifti_iphjphkph to denote "LOCAL", as we set
// shifti_iphjphkph[] gridfunction below.
CCTK_REAL shiftx_iphjphkphL = avg(INTERP_VARS[SHIFTXI], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL shifty_iphjphkphL = avg(INTERP_VARS[SHIFTYI], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL shiftz_iphjphkphL = avg(INTERP_VARS[SHIFTZI], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
CCTK_REAL lapse_over_Psi6_iphjphkphL = avg(INTERP_VARS[LAPSE_OVER_PSI6I], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
//CCTK_REAL psi_iphjphkph = avg(INTERP_VARS[PSII ], PLUS0,PLUS1, PLUS0,PLUS1, PLUS0,PLUS1);
//CCTK_REAL psi2_iphjphkph= psi_iphjphkph*psi_iphjphkph;
//CCTK_REAL psi6_iphjphkph= psi2_iphjphkph*psi2_iphjphkph*psi2_iphjphkph;
CCTK_REAL A_x_iphjphkph = avg(INTERP_VARS[A_XI], PLUS0,PLUS1, PLUS0,PLUS0, PLUS0,PLUS0); // @ (i,j+1/2,k+1/2)
CCTK_REAL A_y_iphjphkph = avg(INTERP_VARS[A_YI], PLUS0,PLUS0, PLUS0,PLUS1, PLUS0,PLUS0); // @ (i+1/2,j,k+1/2)
CCTK_REAL A_z_iphjphkph = avg(INTERP_VARS[A_ZI], PLUS0,PLUS0, PLUS0,PLUS0, PLUS0,PLUS1); // @ (i+1/2,j+1/2,k)
alpha_Phi_minus_betaj_A_j_iphjphkph[index] = psi6phi[index]*lapse_over_Psi6_iphjphkphL
- (shiftx_iphjphkphL*A_x_iphjphkph + shifty_iphjphkphL*A_y_iphjphkph + shiftz_iphjphkphL*A_z_iphjphkph);
// Finally, save shifti_iphjphkph, for \partial_j \beta^j psi6phi
shiftx_iphjphkph[index]=shiftx_iphjphkphL;
shifty_iphjphkph[index]=shifty_iphjphkphL;
shiftz_iphjphkph[index]=shiftz_iphjphkphL;
}
// This loop requires two additional ghostzones in every direction. Hence the following loop definition:
#pragma omp parallel for
for(int k=cctk_nghostzones[2];k<cctk_lsh[2]-cctk_nghostzones[2];k++) for(int j=cctk_nghostzones[1];j<cctk_lsh[1]-cctk_nghostzones[1];j++) for(int i=cctk_nghostzones[0];i<cctk_lsh[0]-cctk_nghostzones[0];i++) {
int index = CCTK_GFINDEX3D(cctkGH,i,j,k);
// \partial_t A_i = [reconstructed stuff] + [gauge stuff],
// where [gauge stuff] = -\partial_i (\alpha \Phi - \beta^j A_j)
CCTK_REAL alpha_Phi_minus_betaj_A_j_iphjphkphL = alpha_Phi_minus_betaj_A_j_iphjphkph[index];
// - partial_i -> - (A_{i} - A_{i-1})/dX = (A_{i-1} - A_{i})/dX, for Ax
Ax_rhs[index] += dXm1*(alpha_Phi_minus_betaj_A_j_iphjphkph[CCTK_GFINDEX3D(cctkGH,i-1,j,k)] - alpha_Phi_minus_betaj_A_j_iphjphkphL);
Ay_rhs[index] += dYm1*(alpha_Phi_minus_betaj_A_j_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j-1,k)] - alpha_Phi_minus_betaj_A_j_iphjphkphL);
Az_rhs[index] += dZm1*(alpha_Phi_minus_betaj_A_j_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k-1)] - alpha_Phi_minus_betaj_A_j_iphjphkphL);
// \partial_t psi6phi = [shift advection term] + \partial_j (\alpha \sqrt{\gamma} A^j)
// Here we compute [shift advection term] = \partial_j (\beta^j psi6phi)
// Cache misses are likely more expensive than branch mispredictions here,
// which is why we use if() statements and array lookups inside the if()'s.
CCTK_REAL psi6phi_rhsL=0.0;
CCTK_REAL psi6phiL=psi6phi[index];
CCTK_REAL shiftx_iphjphkphL=shiftx_iphjphkph[index];
CCTK_REAL shifty_iphjphkphL=shifty_iphjphkph[index];
CCTK_REAL shiftz_iphjphkphL=shiftz_iphjphkph[index];
// \partial_x (\beta^x psi6phi) :
if(shiftx_iphjphkphL < 0.0) {
psi6phi_rhsL+=0.5*dXm1*(+ shiftx_iphjphkph[CCTK_GFINDEX3D(cctkGH,i-2,j,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i-2,j,k)]
-4.0*shiftx_iphjphkph[CCTK_GFINDEX3D(cctkGH,i-1,j,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i-1,j,k)]
+3.0*shiftx_iphjphkphL* psi6phiL);
} else {
psi6phi_rhsL+=0.5*dXm1*(- shiftx_iphjphkph[CCTK_GFINDEX3D(cctkGH,i+2,j,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i+2,j,k)]
+4.0*shiftx_iphjphkph[CCTK_GFINDEX3D(cctkGH,i+1,j,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i+1,j,k)]
-3.0*shiftx_iphjphkphL* psi6phiL);
}
// \partial_y (\beta^y psi6phi) :
if(shifty_iphjphkphL < 0.0) {
psi6phi_rhsL+=0.5*dYm1*(+ shifty_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j-2,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j-2,k)]
-4.0*shifty_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j-1,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j-1,k)]
+3.0*shifty_iphjphkphL* psi6phiL);
} else {
psi6phi_rhsL+=0.5*dYm1*(- shifty_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j+2,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j+2,k)]
+4.0*shifty_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j+1,k)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j+1,k)]
-3.0*shifty_iphjphkphL* psi6phiL);
}
// \partial_z (\beta^z psi6phi) :
if(shiftz_iphjphkphL < 0.0) {
psi6phi_rhsL+=0.5*dZm1*(+ shiftz_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k-2)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j,k-2)]
-4.0*shiftz_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k-1)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j,k-1)]
+3.0*shiftz_iphjphkphL* psi6phiL);
} else {
psi6phi_rhsL+=0.5*dZm1*(- shiftz_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k+2)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j,k+2)]
+4.0*shiftz_iphjphkph[CCTK_GFINDEX3D(cctkGH,i,j,k+1)]*psi6phi[CCTK_GFINDEX3D(cctkGH,i,j,k+1)]
-3.0*shiftz_iphjphkphL* psi6phiL);
}
// Next we add \partial_j (\alpha \sqrt{\gamma} A^j) to \partial_t psi6phi:
psi6phi_rhsL+=dXm1*(alpha_sqrtg_Ax_interp[index] - alpha_sqrtg_Ax_interp[CCTK_GFINDEX3D(cctkGH,i+1,j,k)])
+ dYm1*(alpha_sqrtg_Ay_interp[index] - alpha_sqrtg_Ay_interp[CCTK_GFINDEX3D(cctkGH,i,j+1,k)])
+ dZm1*(alpha_sqrtg_Az_interp[index] - alpha_sqrtg_Az_interp[CCTK_GFINDEX3D(cctkGH,i,j,k+1)]);
// *GENERALIZED* LORENZ GAUGE:
// Finally, add damping factor to \partial_t psi6phi
//subtract lambda * alpha psi^6 Phi
psi6phi_rhsL+=-damp_lorenz*alpha_iphjphkph[index]*psi6phiL;
psi6phi_rhs[index] = psi6phi_rhsL;
}
}
static inline CCTK_REAL avg(CCTK_REAL f[PLUS2+1][PLUS2+1][PLUS2+1],int imin,int imax, int jmin,int jmax, int kmin,int kmax) {
CCTK_REAL retval=0.0,num_in_sum=0.0;
for(int kk=kmin;kk<=kmax;kk++) for(int jj=jmin;jj<=jmax;jj++) for(int ii=imin;ii<=imax;ii++) {
retval+=f[kk][jj][ii]; num_in_sum++;
}
return retval/num_in_sum;
}
# -
# <a id='code_validation'></a>
#
# # Step n-1: Code validation \[Back to [top](#toc)\]
# $$\label{code_validation}$$
#
# First we download the original `IllinoisGRMHD` source code and then compare it to the source code generated by this tutorial notebook.
# +
# Verify if the code generated by this tutorial module
# matches the original IllinoisGRMHD source code
# First download the original IllinoisGRMHD source code
import urllib
from os import path
original_IGM_file_url = "https://bitbucket.org/zach_etienne/wvuthorns/raw/5611b2f0b17135538c9d9d17c7da062abe0401b6/IllinoisGRMHD/src/Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C"
original_IGM_file_name = "Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs-original.C"
original_IGM_file_path = os.path.join(IGM_src_dir_path,original_IGM_file_name)
# Then download the original IllinoisGRMHD source code
# We try it here in a couple of ways in an attempt to keep
# the code more portable
try:
original_IGM_file_code = urllib.request.urlopen(original_IGM_file_url).read().decode("utf-8")
# Write down the file the original IllinoisGRMHD source code
with open(original_IGM_file_path,"w") as file:
file.write(original_IGM_file_code)
except:
try:
original_IGM_file_code = urllib.urlopen(original_IGM_file_url).read().decode("utf-8")
# Write down the file the original IllinoisGRMHD source code
with open(original_IGM_file_path,"w") as file:
file.write(original_IGM_file_code)
except:
# If all else fails, hope wget does the job
# !wget -O $original_IGM_file_path $original_IGM_file_url
# Perform validation
# Validation__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C = !diff $original_IGM_file_path $outfile_path__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C
if Validation__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C == []:
# If the validation passes, we do not need to store the original IGM source code file
# !rm $original_IGM_file_path
print("Validation test for Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C: PASSED!")
else:
# If the validation fails, we keep the original IGM source code file
print("Validation test for Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.C: FAILED!")
# We also print out the difference between the code generated
# in this tutorial module and the original IGM source code
print("Diff:")
for diff_line in Validation__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs__C:
print(diff_line)
# -
# <a id='latex_pdf_output'></a>
#
# # Step n: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
# $$\label{latex_pdf_output}$$
#
# The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
# [Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.pdf](Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means).
latex_nrpy_style_path = os.path.join(nrpy_dir_path,"latex_nrpy_style.tplx")
# #!jupyter nbconvert --to latex --template $latex_nrpy_style_path --log-level='WARN' Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.ipynb
# #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.tex
# #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.tex
# #!pdflatex -interaction=batchmode Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.tex
# !rm -f Tut*.out Tut*.aux Tut*.log
| IllinoisGRMHD/doc/Tutorial-IllinoisGRMHD__Lorenz_psi6phi_rhs__add_gauge_terms_to_A_i_rhs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Read and plot near-real-time Wave Glider data
# first cut by Tom, 10/18/2021
# +
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import cftime
import cartopy.crs as ccrs # import projections
import cartopy
import gsw
# -
# +
# %matplotlib inline
# # %matplotlib qt5
plt.rcParams['figure.figsize'] = (5,4)
plt.rcParams['figure.dpi'] = 200
plt.rcParams['savefig.dpi'] = 400
plt.close('all')
__figdir__ = '../plots/' + 'SMODE_'
savefig_args = {'bbox_inches':'tight', 'pad_inches':0.2}
plotfiletype='png'
# +
savefig = True
zoom = False
if zoom:
xmin, xmax = (-126,-121)
ymin, ymax = (36.25,38.5)
levels = np.linspace(14,17,21)-2.5
else:
xmin, xmax = (-127,-121)
ymin, ymax = (35, 45)
levels = np.linspace(13,18,11)
# -
def plot_ops_area(ax,**kwargs):
""" Add polygon to show S-MODE pilot operations area
Inputs
- matplotlib.pyplot.plot kwargs
return
- exit code (True if OK)
"""
# Add S-MODE pilot operations area
'''
New corners of pentagon:
38° 05.500’ N, 125° 22.067’ W
37° 43.000’ N, 124° 00.067’ W
37° 45.000’ N, 123° 26.000‘ W
36° 58.000’ N, 122° 57.000’ W
36° 20.000’ N, 124° 19.067’ W
'''
coord = [[-(125+22.067/60),38+5.5/60], [-(124+0.067/60),37+43/60], [-(123+26/60),37+45/60], [-(122+57/60),36+58/60], [-(124+19.067/60),36+20/60]]
coord.append(coord[0]) #repeat the first point to create a 'closed loop'
xs, ys = zip(*coord) #create lists of x and y values
if ax is None:
ax = plt.gca()
# ax.plot(xs,ys,transform=ccrs.PlateCarree())
ax.plot(xs,ys,**kwargs)
SF_lon=-(122+25/60)
SF_lat= 37+47/60
# mark a known place to help us geo-locate ourselves
ax.plot(SF_lon, SF_lat, 'o', markersize=3, zorder=10, **kwargs)
ax.text(SF_lon-5/60, SF_lat+5/60, 'San Francisco', fontsize=8, zorder=10, **kwargs)
# ax.text(np.mean(xs)-.6, np.mean(ys)-.3, 'S-MODE ops area', fontsize=8, **kwargs)
print(kwargs)
return(xs,ys,ax)
# url = 'http://smode.whoi.edu:8080/thredds/dodsC/insitu/waveglider/SV3-1043_PLD1_TAB1.nc'
# url = 'http://smode.whoi.edu:8080/thredds/fileServer/insitu/waveglider/SV3-1043_PLD1_TAB1.nc'
# url = 'http://smode.whoi.edu:8080/thredds/dodsC/insitu/waveglider/SV3-1043_PLD2_TAB2.nc'
url = 'http://smode.whoi.edu:8080/thredds/fileServer/insitu/waveglider/SV3-1043_PLD1_TAB1.nc#mode=bytes'
ds_payload1_table1 = xr.open_dataset(url)
url = 'http://smode.whoi.edu:8080/thredds/fileServer/insitu/waveglider/SV3-1043_PLD2_TAB1.nc#mode=bytes'
ds_payload2_table1 = xr.open_dataset(url)
url = 'http://smode.whoi.edu:8080/thredds/fileServer/insitu/waveglider/SV3-1043_PLD2_TAB2.nc#mode=bytes'
ds_payload2_table2 = xr.open_dataset(url,drop_variables=['z'])
url = 'http://smode.whoi.edu:8080/thredds/fileServer/insitu/waveglider/SV3-1043_PLD2_TAB3.nc#mode=bytes'
ds_payload2_table3 = xr.open_dataset(url,drop_variables=['freq'])
ds_payload1_table1
ds_payload2_table1
ds_payload2_table2
ds_payload2_table3
# ## Plot some health diagnostics for monitoring
# Around 1200 UTC on 10/20/2021, a wave broke the lashings on the WGs, and this is a real-time effort to assess the status of the instruments
#
# The data file of most interest at the moment is ```ds_payload2_table1```
ds = ds_payload2_table1
# +
fig = plt.figure()
ax = plt.axes(projection = ccrs.PlateCarree(central_longitude=200)) # Orthographic
extent = [xmin, xmax, ymin, ymax]
ax.set_extent(extent, crs=ccrs.PlateCarree())
'''
daystr=ds.time.dt.day.astype(str).values[0]
monstr=ds.time.dt.month.astype(str).values[0]
yrstr=ds.time.dt.year.astype(str).values[0]
day_str = monstr+'-'+daystr+'-'+yrstr
'''
day_str=ds.time.isel(time=-1).dt.strftime("%a, %b %d %H:%M").values
ax.set_title(day_str+' UTC', size = 10.)
#plt.set_cmap(cmap=plt.get_cmap('nipy_spectral'))
plt.set_cmap(cmap=plt.get_cmap('turbo'))
gl = ax.gridlines(draw_labels=True, dms=True, x_inline=False, y_inline=False, alpha=0.5, linestyle='--')
gl.top_labels = False
gl.ylabels_right = False
#gl.xlocator = matplotlib.ticker.MaxNLocator(10)
#gl.xlocator = matplotlib.ticker.AutoLocator
# gl.xlocator = matplotlib.ticker.FixedLocator(np.arange(0, 360 ,30))
ax.coastlines()
ax.add_feature(cartopy.feature.LAND, zorder=3, facecolor=[.6,.6,.6], edgecolor='black')
ax.add_feature(cartopy.feature.STATES, zorder=3, edgecolor='black')
ax.plot(ds.longitude_1hz_Avg,ds.latitude_1hz_Avg,transform=ccrs.PlateCarree())
ax.plot(ds.longitude_1hz_Avg.isel(time=-1),ds.latitude_1hz_Avg.isel(time=-1),'ko',transform=ccrs.PlateCarree())
ax.text(ds.longitude_1hz_Avg.isel(time=-1),ds.latitude_1hz_Avg.isel(time=-1), 'last_time', fontsize=6, transform=ccrs.PlateCarree())
# cs = ax.contourf(ds.lon,ds.lat,np.squeeze(ds.sea_surface_temperature)-273.15, levels, extend='both', transform=ccrs.PlateCarree())
# cb = plt.colorbar(cs,fraction = 0.022,extend='both')
# cb.set_label('SST [$\circ$C]',fontsize = 10)
plot_ops_area(ax,transform=ccrs.PlateCarree(),color='w')
# Add a 10 km scale bar
km_per_deg_lat=gsw.geostrophy.distance((125,125), (37,38))/1000
deg_lat_equal_10km=10/km_per_deg_lat
x0 = -123.75
y0 = 36.75
ax.plot(x0+np.asarray([0, 0]),y0+np.asarray([0.,deg_lat_equal_10km]),transform=ccrs.PlateCarree(),color='k')
ax.text(x0+2/60, y0-.5/60, '10 km', fontsize=6,transform=ccrs.PlateCarree())
if savefig:
plt.savefig(__figdir__+'_foo_WG_pos_' + day_str + '.' +plotfiletype,**savefig_args)
# -
ds.longitude_1hz_Avg
ds["time"].dt.strftime("%a, %b %d %H:%M")
day_str=ds.time.isel(time=-1).dt.strftime("%a, %b %d %H:%M")
print(day_str.values)
| code/read_realtime_waveglider.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from tqdm import tqdm, trange
import torch
from torch.optim import Adam
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from pytorch_pretrained_bert import BertTokenizer, BertConfig
from pytorch_pretrained_bert import BertForTokenClassification, BertAdam
# dataset link https://www.kaggle.com/abhinavwalia95/entity-annotated-corpus
data = pd.read_csv("ner_dataset.csv", encoding="latin1").fillna(method="ffill")
data.tail(10)
# +
class SentenceGetter(object):
def __init__(self, data):
self.n_sent = 1
self.data = data
self.empty = False
agg_func = lambda s: [(w, p, t) for w, p, t in zip(s["Word"].values.tolist(),
s["POS"].values.tolist(),
s["Tag"].values.tolist())]
self.grouped = self.data.groupby("Sentence #").apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["Sentence: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
getter = SentenceGetter(data)
# -
sentences = [" ".join([s[0] for s in sent]) for sent in getter.sentences]
sentences[0]
labels = [[s[2] for s in sent] for sent in getter.sentences]
print(labels[0])
tags_vals = list(set(data["Tag"].values))
tag2idx = {t: i for i, t in enumerate(tags_vals)}
# We will limit our sequence length to 75 tokens and we will use a batch size of 32 as suggested by the Bert paper. Note, that Bert natively supports sequences of up to 512 tokens.
MAX_LEN = 75
bs = 32
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
torch.cuda.get_device_name(0)
# The Bert implementation comes with a pretrained tokenizer and a definied vocabulary. We load the one related to the smallest pre-trained model bert-base-uncased. Try also the cased variate since it is well suited for NER.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
# Now we tokenize all sentences
tokenized_texts = [tokenizer.tokenize(sent) for sent in sentences]
print(tokenized_texts[0])
# Next, we cut and pad the token and label sequences to our desired length.
input_ids = pad_sequences([tokenizer.convert_tokens_to_ids(txt) for txt in tokenized_texts],
maxlen=MAX_LEN, dtype="long", truncating="post", padding="post")
tags = pad_sequences([[tag2idx.get(l) for l in lab] for lab in labels],
maxlen=MAX_LEN, value=tag2idx["O"], padding="post",
dtype="long", truncating="post")
# The Bert model supports something called attention_mask, which is similar to the masking in keras. So here we create the mask to ignore the padded elements in the sequences.
attention_masks = [[float(i>0) for i in ii] for ii in input_ids]
tr_inputs, val_inputs, tr_tags, val_tags = train_test_split(input_ids, tags,
random_state=2018, test_size=0.1)
tr_masks, val_masks, _, _ = train_test_split(attention_masks, input_ids,
random_state=2018, test_size=0.1)
tr_inputs = torch.tensor(tr_inputs)
val_inputs = torch.tensor(val_inputs)
tr_tags = torch.tensor(tr_tags)
val_tags = torch.tensor(val_tags)
tr_masks = torch.tensor(tr_masks)
val_masks = torch.tensor(val_masks)
# +
train_data = TensorDataset(tr_inputs, tr_masks, tr_tags)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=bs)
valid_data = TensorDataset(val_inputs, val_masks, val_tags)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler, batch_size=bs)
# -
# ## Setup the Bert model for finetuning
#
# The pytorch-pretrained-bert package provides a BertForTokenClassification class for token-level predictions. BertForTokenClassification is a fine-tuning model that wraps BertModel and adds token-level classifier on top of the BertModel. The token-level classifier is a linear layer that takes as input the last hidden state of the sequence. We load the pre-trained bert-base-uncased model and provide the number of possible labels.
model = BertForTokenClassification.from_pretrained("bert-base-uncased", num_labels=len(tag2idx))
model.cuda();
# Before we can start the fine-tuning process, we have to setup the optimizer and add the parameters it should update. A common choice is the Adam optimizer. We also add some weight_decay as regularization to the main weight matrices. If you have limited resources, you can also try to just train the linear classifier on top of Bert and keep all other weights fixed. This will still give you a good performance.
FULL_FINETUNING = True
if FULL_FINETUNING:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
else:
param_optimizer = list(model.classifier.named_parameters())
optimizer_grouped_parameters = [{"params": [p for n, p in param_optimizer]}]
optimizer = Adam(optimizer_grouped_parameters, lr=3e-5)
# ## Finetune Bert
# First we define some metrics, we want to track while training. We use the f1_score from the seqeval package. You can find more details here. And we use simple accuracy on a token level comparable to the accuracy in keras.
# +
from seqeval.metrics import f1_score
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=2).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# -
# Finally, we can fine-tune the model. A few epochs should be enough. The paper suggest 3-4 epochs.
# +
epochs = 2
max_grad_norm = 1.0
for _ in trange(epochs, desc="Epoch"):
# TRAIN loop
model.train()
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(train_dataloader):
# add batch to gpu
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
# forward pass
loss = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
# backward pass
loss.backward()
# track train loss
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
# gradient clipping
torch.nn.utils.clip_grad_norm_(parameters=model.parameters(), max_norm=max_grad_norm)
# update parameters
optimizer.step()
model.zero_grad()
# print train loss per epoch
print("Train loss: {}".format(tr_loss/nb_tr_steps))
# VALIDATION on validation set
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
predictions , true_labels = [], []
for batch in valid_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
tmp_eval_loss = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
logits = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
true_labels.append(label_ids)
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += b_input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss/nb_eval_steps
print("Validation loss: {}".format(eval_loss))
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
pred_tags = [tags_vals[p_i] for p in predictions for p_i in p]
valid_tags = [tags_vals[l_ii] for l in true_labels for l_i in l for l_ii in l_i]
print("F1-Score: {}".format(f1_score(pred_tags, valid_tags)))
# +
model.eval()
predictions = []
true_labels = []
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for batch in valid_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
tmp_eval_loss = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask, labels=b_labels)
logits = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = logits.detach().cpu().numpy()
predictions.extend([list(p) for p in np.argmax(logits, axis=2)])
label_ids = b_labels.to('cpu').numpy()
true_labels.append(label_ids)
tmp_eval_accuracy = flat_accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += b_input_ids.size(0)
nb_eval_steps += 1
pred_tags = [[tags_vals[p_i] for p_i in p] for p in predictions]
valid_tags = [[tags_vals[l_ii] for l_ii in l_i] for l in true_labels for l_i in l ]
print("Validation loss: {}".format(eval_loss/nb_eval_steps))
print("Validation Accuracy: {}".format(eval_accuracy/nb_eval_steps))
print("Validation F1-Score: {}".format(f1_score(pred_tags, valid_tags)))
| DEEP LEARNING/NLP/BERD pretrained model/Named Entity Recognition With Bert.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/gumdropsteve/intro_to_python/blob/main/day_02/intro_to_python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="scxw573ueqHT"
# # Intro to Python
# Intro to Python, day 2.
#
# ## Variables
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="AHJDWWYMe_8G" outputId="05e9012d-4569-44c2-a6bb-824c03b69f68"
a = 'this is a string'
a
# + colab={"base_uri": "https://localhost:8080/"} id="AZ8CF1TJie_E" outputId="ec54c234-b7f0-4af4-fc6f-84bb180ad199"
# you can print strings to avoid the ''s
print(a)
# + colab={"base_uri": "https://localhost:8080/"} id="ESdIoa9sic8l" outputId="8d5eaaba-8a4a-42ac-b87d-9ce439a258c0"
# you can overwrite variables
a = 4
a
# + colab={"base_uri": "https://localhost:8080/"} id="NdnbPDiKirVE" outputId="f023bd32-3a83-43ec-e015-26c2e4fb0079"
print(a)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="CjwfgCLugyAV" outputId="c943c223-87c7-4278-f3c2-b7c33cbc5c2b"
b = "this is also a string"
b
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="1ew6qiech1VI" outputId="2e4e39fd-7d67-4c3d-c0aa-13401cae2fec"
a = str(a)
a + b
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="lYv9Cm8ah9Ck" outputId="ae6758ce-db2b-4b26-e8ef-b199f97f7de4"
a + ' ' + b
# + [markdown] id="7fg9b_uafBY5"
# ## Strings
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="l50CGXaqYnht" outputId="8f12737d-dd00-4288-c8d8-4bc500337683"
'this is a string'
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="ejXyJ09BjGE6" outputId="b2b3e644-00c0-4874-ed86-375eb2b8bdda"
"this is also a string"
# + colab={"base_uri": "https://localhost:8080/"} id="qF27lPo5jHxI" outputId="a4e43e51-b261-4ab1-c11d-c805345ca981"
print('this is a string')
# + colab={"base_uri": "https://localhost:8080/"} id="jSiIV1hcjKTO" outputId="5f2a6fe7-577e-4be5-ae31-f872c11cb989"
print("this is also a string")
# + colab={"base_uri": "https://localhost:8080/"} id="HbHKLcPAqP5Q" outputId="ae7edf87-621b-47e2-e073-c1721e9d7cf7"
# check the datatype with type
type('this is a string')
# + colab={"base_uri": "https://localhost:8080/"} id="_rzcd2hFqWqH" outputId="5581ec5f-bdf3-487e-da68-8e620519a561"
type(2)
# + [markdown] id="qIrstM4EjRWu"
# #### Formatting Strings
# There are different ways to format a string.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="FFPFsnGIkShe" outputId="0c4ce357-95b3-447f-ca30-a3cacb7110bf"
variable = .0333333
f'{round(variable, 2)}'
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="XQOtnZUok79c" outputId="1f7507e6-1ad7-46f4-cb9d-e89d4cfb9295"
variable = .0333333
f'{variable:.2f}'
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="zJXZ3fVlj2OH" outputId="6de9313f-3af1-4f08-f45f-bd7a9b16bf4c"
variable = .0333333
'{:.2f}'.format(variable)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="vfw-ElSRljdp" outputId="b22b9121-1c75-4bf3-fd1a-840113968a79"
variable = .0333333
f'{variable:.2%}'
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="FuQ9QCQglsls" outputId="93d061fd-c575-4669-f32e-f6fa6152098c"
variable = .0333333
'{:.2%}'.format(variable)
# + [markdown] id="WzkRQYsNl6FL"
# You can input multiple variables into formatted strings.
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="GuILIh8SjM53" outputId="727313a4-0eb8-4f32-9a2c-e5fc245127cb"
name = 'Winston'
day = 'Monday'
f'Hi {name}, today is {day}'
# + colab={"base_uri": "https://localhost:8080/"} id="gxUNQ9ixkPRu" outputId="fa0dbfd7-a7e0-4a7d-dc30-a91fe132f7a2"
print(f'Hi {name}, today is {day}')
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="rVoIjwnMmCjI" outputId="5d42173e-84c8-4a77-d7d9-42f53e65c902"
x = 'test\n3'
x
# + colab={"base_uri": "https://localhost:8080/"} id="tVZ_O8hBmF6v" outputId="eddc4299-dfad-441b-a7de-aff1bbaa3509"
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="jrXNQIqomOZF" outputId="aeb44aac-a374-4d5b-a0cb-ab9c261d6621"
name = 'Winston'
day = 'Monday'
print(f"Hi {name}, today is {day}")
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="7MqAwOmimdAw" outputId="9a985832-90ca-44d3-a15d-04089cb5823e"
name = 'Winston'
# can I use expressions in formatted strings?
f'Hi {name}, today is {3 > 2}, {round(2.1232, 2)}'
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="mIiX-YRtmsZ3" outputId="fc0a1810-7fdc-4b73-d235-124f11fdca3b"
"Hi {}, today is {}, {}".format(name, 3 > 2, round(2.1232, 2))
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="HdRr4weynYat" outputId="4683bb2e-2cc9-4595-c62d-6e90fd49bdbf"
'Hi ' + name + ', today is ' + day + '.'
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="JXgSnV6Ynil_" outputId="73f87670-dd58-4906-c178-26a45f8a9695"
'Hi ' + name + ", today is " + f'{day}.'
# + [markdown] id="0lWpmP8Jooq-"
# #### String Functions
# Built in string functions.
# + colab={"base_uri": "https://localhost:8080/"} id="HcZSvDX3n3zw" outputId="20ad33e1-0df4-44af-86c7-ee12d9bdd06c"
hello = 'Hi ' + name + ", today is " + f'{day}.'
# lowercase everything
print(hello.lower())
# caps lock
print(hello.upper())
# make it a title (capitolize every word)
print(hello.title())
# + colab={"base_uri": "https://localhost:8080/"} id="Rtim_iywsSHh" outputId="ae508102-793b-4e65-f3cd-836b77c484ed"
# use strip to remove spaces on the sides
valid_email = ' <EMAIL> '
print(valid_email)
print(valid_email.strip())
# + colab={"base_uri": "https://localhost:8080/"} id="R33s8q4joHyq" outputId="999ea2bd-2ec8-4752-de60-3e2c4908ad8a"
hello = 'Hi ' + name + ", today is " + f'{day}.'
# split the string into a list of strings (by ' ' on default)
hello.split()
# + colab={"base_uri": "https://localhost:8080/"} id="_3K3xriLoyV2" outputId="50142271-e137-4a96-d07f-f769050f11ec"
print(hello)
hello.split(',')
# + colab={"base_uri": "https://localhost:8080/"} id="d-bUnsvNo5eA" outputId="211b8105-d604-4cdc-bcd7-9878c376abfd"
print(hello)
hello_list = hello.split('Winston')
hello_list
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="DRYqslUopNIT" outputId="a017b1cb-f968-4d6d-8006-297acc4ca267"
'Winston'.join(hello_list)
# + colab={"base_uri": "https://localhost:8080/"} id="PglZBOz5pZ3n" outputId="ca9ab75a-cdbd-47dd-fff6-1815fe013b6f"
print(hello)
print(hello.split(','))
print(','.join(hello.split(',')))
# + colab={"base_uri": "https://localhost:8080/"} id="sOoBuiqLqBUb" outputId="58f4188e-c3aa-4189-e02f-a5127b6ecdde"
from datetime import datetime
now = datetime.now()
now = str(now)
print(now)
# split the now datetime string into a list of strings where there are ' '
now.split()
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="GevDaVyzq1gK" outputId="99772c01-f8c3-470f-ecb8-135570a00945"
date = now.split()[0]
date
# + colab={"base_uri": "https://localhost:8080/"} id="GGTZQQE3q9M4" outputId="5eadb9d5-a573-41b6-bb24-43c6d2754c16"
'your string here'.split()
# + colab={"base_uri": "https://localhost:8080/"} id="4utc2VAHrIB4" outputId="0f907bc4-4274-4a98-abca-55b348584cfe"
from datetime import datetime
now = datetime.now()
now = str(now)
print(now)
print(now.split())
print(' '.join(now.split()))
# + colab={"base_uri": "https://localhost:8080/"} id="VZzYB9sks8iq" outputId="4d2ae042-4a08-43ce-ff34-60c28cbc1d5d"
# how do I split on the month? (really, use a package)
print(now.split('-'))
print(now.split('-')[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="spUSPmRBrefz" outputId="f4d122e1-2545-4ff4-8348-0050dd0ea626"
'whatever I put here'.join(['something on the left', 'something on the right'])
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="AiZ_F7OarpU6" outputId="0799db53-0597-4e06-d9b0-c1e07d39fd10"
' whatever I put here '.join(['something on the left', 'something on the right'])
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="KsZRcJv7rvM-" outputId="3f497756-9390-48ee-a36a-9c7a0558ccb1"
name = 'Winston'
day = 'Monday'
l = ['Hello', name, 'today is', day]
' '.join(l)
# + [markdown] id="hk3Q1eMFpwzm"
# Bool string checks.
# + colab={"base_uri": "https://localhost:8080/"} id="isTxLbLmpm4q" outputId="7e02283b-3ad7-4508-e896-26288aeb0c75"
hello.isupper()
# + colab={"base_uri": "https://localhost:8080/"} id="Pu4Jjjlwpukx" outputId="a5df0ba1-12ce-4049-b67e-05019ca5d0a2"
'hello'.islower()
# + colab={"base_uri": "https://localhost:8080/"} id="ZbxUDcoEp03n" outputId="55ba2e7d-6708-4051-cfb6-c1e0de362e1b"
'H'.isupper()
# + [markdown] id="JWH0dtxAtaEp"
# ### Conditionals
# - if, else, elif
# + colab={"base_uri": "https://localhost:8080/"} id="sE3ym_imp2sq" outputId="6dda1efb-30aa-4ad5-abff-9cefccb21d58"
a = 2
if a > 0:
print('a is greater than 0')
else:
print('a is less than or equal to 0')
# + colab={"base_uri": "https://localhost:8080/"} id="RKsNtIZttpr_" outputId="482d6a4c-96df-4802-9168-0b5d7c7582e7"
a = -1
if a > 0:
print('a is greater than 0')
else:
print('a is less than or equal to 0')
# + colab={"base_uri": "https://localhost:8080/"} id="UWLdkOyMtrfy" outputId="3cd91572-a5cd-468a-9617-464a47b2cb30"
a = -1
if a > 0:
print(f'{a} is greater than 0')
else:
print(f'{a} is less than or equal to 0')
# + colab={"base_uri": "https://localhost:8080/"} id="1MHBsepet1CI" outputId="4f13d5e7-6559-47bb-eae0-9ba36401a2dd"
a = -1
if a > 0:
print(f'{a} is greater than 0')
elif a == 0:
print(f'{a} is exactly 0')
else:
print(f'{a} is less than 0')
# + colab={"base_uri": "https://localhost:8080/"} id="hSadLWduuGM2" outputId="8a9b21f2-df68-4a9d-a0a7-ab42d655b74c"
a = 0
# this checks 3 different things every single time
if a > 0:
print(f'{a} is greater than 0')
if a == 0:
print(f'{a} is exactly 0')
if:
print(f'{a} is less than to 0')
# + colab={"base_uri": "https://localhost:8080/"} id="m-V3V3aIuInr" outputId="a35f0e1b-a2be-4633-eaac-5c160d62ca79"
a = 0
# this checks however many it needs to until one is satisfied (2 in this case)
if a > 0:
print(f'{a} is greater than 0')
elif a == 0:
print(f'{a} is exactly 0')
else:
print(f'{a} is less than to 0')
# + [markdown] id="2XP6-2xWvC3P"
# #### Operators
# + colab={"base_uri": "https://localhost:8080/"} id="tuF3f9B7vU8R" outputId="b0ed4cd6-bf49-4883-eba6-41e79df9bc06"
# single = sets a variable
a = 'lol'
# double == checks if 2 things are equal (the same)
a == 'lol'
# + colab={"base_uri": "https://localhost:8080/"} id="0U46XdSxvCIR" outputId="052fe0e5-9b1b-4c68-9f0b-ba859253cd8c"
# check if something is the same as something else
'lol' == 'lol'
# + colab={"base_uri": "https://localhost:8080/"} id="kqiGr_fYuvMx" outputId="5d0a76fb-932e-48b0-9236-aa915dadb799"
# check if something is not the same as something else
'lol' != 'lol'
# + colab={"base_uri": "https://localhost:8080/"} id="RLsBHdDCvQPg" outputId="89673c6a-dcb0-4ae6-8003-e9f7259129ca"
# less than
0 < 2
# + colab={"base_uri": "https://localhost:8080/"} id="yq7rTx2Evld7" outputId="075a1b5c-1b66-4c13-ba96-6f6db1ab3d49"
# greater than
0 > 2
# + colab={"base_uri": "https://localhost:8080/"} id="E48oVk0IvnVG" outputId="139cf7ce-9c5f-42d1-91fe-de199f25a540"
# less than or equal to
0 <= 2
# + colab={"base_uri": "https://localhost:8080/"} id="jkW_NUyqvqDL" outputId="59d3f05b-ff19-4944-bb62-61b16b2353ae"
# greater than or equal to
0 >= 2
# + colab={"base_uri": "https://localhost:8080/"} id="AP35DxagvsgM" outputId="fee7d9f9-f5ce-49c6-8cc9-fecdb56d16f6"
0 < 0
# + colab={"base_uri": "https://localhost:8080/"} id="Eu6REbzqvu5W" outputId="c271f87c-8604-402b-adfd-b87e21654d0f"
0 <= 0
# + [markdown] id="vDvpaVuUz1jb"
# ## Loops
# - for loop
# - while loop
# + colab={"base_uri": "https://localhost:8080/"} id="GLuhn5wZvv6Y" outputId="b5cf5927-67c1-467d-cda7-96af58b56e90"
for i in range(5):
# starts with 0 (by default), therefore 'not inclusive'
print(i)
# + colab={"base_uri": "https://localhost:8080/"} id="uAXqsZ4F0ODT" outputId="5fea6fa9-60e4-44b2-ce1f-2179bfba2157"
for i in range(1, 5):
# can start with a different number, but still 'not inclusive'
print(i)
# + colab={"base_uri": "https://localhost:8080/"} id="86aNMz700oIT" outputId="f9a90d29-5454-46ea-9b6c-dac88859ea18"
l = ['a', 'few', 'things', 'in', "this", 'list']
for i in range(len(l)):
print(i)
# + colab={"base_uri": "https://localhost:8080/"} id="BZbNSzC002rX" outputId="1ff63e66-36dc-4477-ec7c-135d20bb4e28"
# check how long something is with len()
len(l)
# + colab={"base_uri": "https://localhost:8080/"} id="dUOnXGw806mI" outputId="e253a258-d4a3-4732-883d-c1a40add84bc"
for i in range(len(l)):
print(l[i])
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="IfT_hLRj1Hdi" outputId="40ee9acd-86ac-44df-ee3f-29ad783e2512"
# l[len(l)]
l[5]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="cqVwErfz1NYn" outputId="5e477b99-9664-4d76-e838-81539f8f7b28"
l[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="3B5UYHMs1Spv" outputId="fdaf458c-dd36-40da-a77f-750af47801dd"
l[1]
# + colab={"base_uri": "https://localhost:8080/"} id="Z-9T29Yq1hYX" outputId="13fb2e33-bbf1-4a52-a38d-8274c31612d4"
a = 0
# as long as a is less than 5
while a < 5:
# tell me what a is
print(a)
# add 1 to the value of a
a += 1 # a = a + 1
# + colab={"base_uri": "https://localhost:8080/"} id="b3SJlDKE15pe" outputId="5ff2e9ef-9777-42bb-93cd-640cbd8a610b"
l = ['a', 'b', 'c']
for i in l:
print(i)
# + colab={"base_uri": "https://localhost:8080/"} id="ztikA1te2Qne" outputId="9554defd-7a09-4bd0-b1b8-63b8fb10e185"
for _ in l:
print(_)
# + colab={"base_uri": "https://localhost:8080/"} id="k5Lpzhcp2aa4" outputId="5108ddf2-3d1c-43eb-992b-e8c6674b872d"
for i in range(len(l)):
print(i)
# + colab={"base_uri": "https://localhost:8080/"} id="x_fXZTE72ddf" outputId="1affa460-b1f7-4096-f4d1-717b86ee0531"
l = ['a', 'b', 'c']
# go through the index of the list l
for i in range(len(l)):
# print the item at that index
print(l[i])
# + colab={"base_uri": "https://localhost:8080/"} id="MDLfrKVf2v22" outputId="39978cc4-021e-4b25-bb19-6bd15c765332"
l = ['a', 'b', 'c']
# go through the index of the list l
for i in range(len(l)):
# print the item at that index
print(l[i])
# print the index
print(i)
# + [markdown] id="J13Ojxkd3Omc"
# ## Functions
#
# ```
# def function_name():
# your function here
# ```
# + [markdown] id="0Mk5cWrq33Yq"
# ```
# def add_two_numbers(number 1, number 2):
# take number 1 and add it to number 2
# return the result
# ```
# + colab={"base_uri": "https://localhost:8080/"} id="S9uGJPuR4YJ2" outputId="9f839af8-6c6d-4537-bd52-d918d2af33be"
def add_two_numbers(n1, n2):
out = n1 + n2
return out
add_two_numbers(1, 2)
# + colab={"base_uri": "https://localhost:8080/"} id="CHkywAHq4mfb" outputId="6405e742-e80e-4489-9231-3e42bc07d2cf"
print(add_two_numbers(1, 2))
# + [markdown] id="yAeKc_pE4ujz"
# **print vs return**
# + colab={"base_uri": "https://localhost:8080/"} id="h5a3lPtZ4r3o" outputId="f1acfd4c-9d15-41dd-843d-e937b78e2155"
# print just displays a number
def add_two_numbers_p(n1, n2):
out = n1 + n2
print(out)
add_two_numbers_p(1, 2)
# + colab={"base_uri": "https://localhost:8080/"} id="BfKkzCsb4640" outputId="3bb0cb03-4f3a-49e7-a8aa-4761feb72bf1"
outcome = add_two_numbers_p(1, 2)
# + id="dybrcjE35B-3"
outcome
# + colab={"base_uri": "https://localhost:8080/"} id="nsQxEXPt5K-R" outputId="82d31692-74a5-44a0-e2ae-2da0d3c677f7"
outcome == None
# + id="N7U-ce7z5SRn"
def add_two_numbers(n1, n2):
out = n1 + n2
return out
outcome = add_two_numbers(1, 2)
# + colab={"base_uri": "https://localhost:8080/"} id="Q4j6LAgh5ZGt" outputId="0d56e6cd-254d-4857-8431-85a11d77bf1b"
outcome
# + colab={"base_uri": "https://localhost:8080/"} id="hxFKI9eX5gwu" outputId="53b17dd1-30fe-428a-ee11-6601d3470445"
outcome == None
# + [markdown] id="gn5kKe3a50W7"
# #### Default Parameters
# + id="VaS80cKu5r7c"
def add_two_numbers(n1, n2, print_out=False):
out = n1 + n2
if print_out:
print(out)
return out
o = add_two_numbers(5, 7)
# + colab={"base_uri": "https://localhost:8080/"} id="ZV6bCl986RQl" outputId="b9d8dbf3-07f1-434b-b7bd-17402baf275d"
o
# + colab={"base_uri": "https://localhost:8080/"} id="WRFMcEtw5iKW" outputId="1a9f8d00-1888-421e-b630-bdbd2f979eb4"
o = add_two_numbers(5, 7, print_out=True)
# + colab={"base_uri": "https://localhost:8080/"} id="QtfSzuC96PA2" outputId="9098f474-1c1a-4bc8-8387-8638efa1da00"
o
# + id="vyRi9tPk6wIC"
# with all default arguments
def add_two_numbers(n1=1, n2=2, print_out=False):
out = n1 + n2
if print_out:
print(out)
return out
# + colab={"base_uri": "https://localhost:8080/"} id="zOc3pL5e6QZm" outputId="9f421c9b-e0ad-4e8a-8cc1-cdec886d8dc5"
j = add_two_numbers()
j
# + colab={"base_uri": "https://localhost:8080/"} id="Ltd6aBce6qoD" outputId="7de6081f-22aa-4110-f29b-c1f6229d708d"
j = add_two_numbers(n2=7)
j
# + colab={"base_uri": "https://localhost:8080/"} id="YVd8Vapx6zXs" outputId="40fd41b3-6385-4a61-a01d-ec25181e3917"
w = add_two_numbers(print_out=True)
# + id="TmpRKZ2j64wG"
# with 1 required input
def add_two_numbers(n1, n2=2, print_out=False):
out = n1 + n2
if print_out:
print(out)
return out
# + colab={"base_uri": "https://localhost:8080/"} id="wNNnmXt67jfu" outputId="c4aa2af3-c0bf-43b7-a0f8-a7ca907beee0"
add_two_numbers(n1=6)
# + colab={"base_uri": "https://localhost:8080/"} id="fTWqsrmS7um0" outputId="b5b3e92f-1c4d-43c9-8eb5-9b3a8d58ab6a"
add_two_numbers(2)
# + colab={"base_uri": "https://localhost:8080/"} id="K9VzOuWH7kQo" outputId="5bf3a311-c4fb-4ac5-bf04-ea20e25639c1"
add_two_numbers(7, 4, False)
# + [markdown] id="QavNtd6Z79Lf"
# # Sorting
# + colab={"base_uri": "https://localhost:8080/"} id="QeIsdH4D7otf" outputId="c0f91c0c-29b9-4685-9f81-a95901db0064"
"""
write a function that take in a list of numbers and returns the list sorted from least to greatest
"""
def find_the_lowest_value(a_list):
"""
returns the lowest value in a list
"""
for i in range(len(a_list)):
if i == 0:
low = a_list[i]
elif a_list[i] < low:
low = a_list[i]
return low
list_b = [3, -1, 2, 1]
find_the_lowest_value(list_b)
# + colab={"base_uri": "https://localhost:8080/"} id="Pit4poVJ8_0Z" outputId="e7f5d5d2-7ec4-419c-95d2-3b6b266831f4"
def sort_from_least_to_greatest(list_here):
"""
takes a list of numbers and returns a new list sorted from least to greatest
"""
sorted_list = []
# go through the index of the original list
for i in range(len(list_here)):
low_value = find_the_lowest_value(list_here)
# remove lowest value from orignial list
list_here.remove(low_value)
# add the lowest value to the new list
sorted_list.append(low_value)
# output the new list sorted from least to greatest
return sorted_list
list_b = [3, -1, 2, 3, 2, 1]
sort_from_least_to_greatest(list_b)
# + id="iv5rUQIZCoSu"
# + [markdown] id="F2drnLiVBIxg"
# All of that already has a function. And it's a built in list function.
# + colab={"base_uri": "https://localhost:8080/"} id="O_JwadE2_DcY" outputId="7609732d-812e-4410-96ac-8c528a9fe30c"
list_b = [3, -1, 2, 3, 2, 1]
list_b.sort()
list_b
| day_02/intro_to_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:hyper]
# language: python
# name: conda-env-hyper-py
# ---
import numpy as np
import pandas as pd
import os, time
from sklearn.externals import joblib
# +
class ModelBlender:
def __init__(self, problem = 'classification',training_ratio=0.7, hold_out=0.2, blended_samples=2,
stack_levels=2, na_treatment='omit', sample_generation='random', n_cross=1, n_jobs = -1):
self.problem = problem
self.training_ratio = training_ratio
self.hold_out = hold_out
self.blended_samples = blended_samples
self.stack_levels = stack_levels
self.na_treatment = na_treatment
self.sample_generation = sample_generation
self.n_cross = n_cross
self.n_jobs = n_jobs
self.train = None
self.blend = None
self.hold_out = None
self.modelstore = {'stack_{}'.format(i):{} for i in range(self.stack_levels)}
def fit_data(self,df,na_imputation,):
pass
def fit_model(self):
pass
def plot_model(self):
pass
def plot_scores(self):
pass
def generate_stack(self):
pass
def save(self):
pass
def load(self):
pass
def predict(self):
pass
def score_on_holdout(self):
pass
# -
np.random.random((1000,10))
| misc - work/Blender.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
#
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
from scipy.stats import linregress
import numpy as np
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
df_metadata = pd.merge(study_results, mouse_metadata, how="outer", on= "Mouse ID")
# Display the data table for preview
df_metadata.head()
# -
# Checking the number of mice.
total_mice = df_metadata['Mouse ID'].nunique()
total_mice
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_rows = df_metadata.loc[df_metadata.duplicated(subset=["Mouse ID", "Timepoint"]), "Mouse ID"].unique()
duplicate_rows
# Optional: Get all the data for the duplicate mouse ID.
df_metadata[df_metadata["Mouse ID"]== 'g989']
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
df_clean = df_metadata[df_metadata["Mouse ID"].isin(duplicate_rows) == False]
df_clean
# Checking the number of mice in the clean DataFrame.
df_clean['Mouse ID'].nunique()
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
df_group = df_clean.groupby(["Drug Regimen"])
# mean, median, variance, standard deviation, and SEM of the tumor volume.
mean_tumor = df_group["Tumor Volume (mm3)"].mean()
median_tumor = df_group["Tumor Volume (mm3)"].median()
var_tumor = df_group["Tumor Volume (mm3)"].var()
std_tumor = df_group["Tumor Volume (mm3)"].std()
sem_tumor = df_group["Tumor Volume (mm3)"].sem()
# Assemble the resulting series into a single summary dataframe.
df_summary = pd.DataFrame({"Mean": mean_tumor, "Median":median_tumor, "Variance":var_tumor,
"Standard Deviation": std_tumor, "SEM": sem_tumor})
# -
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
df_summary
# Using the aggregation method, produce the same summary statistics in a single line
df_single_summary = df_group.agg(['mean','median','var','std','sem'])["Tumor Volume (mm3)"]
df_single_summary
# ## Bar and Pie Charts
# +
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pandas.
total_measurements = df_clean.groupby(["Drug Regimen"]).count()['Mouse ID']
total_measurements
x_axis1 = np.arange(len(total_measurements))
y_axis1 = total_measurements.values
total_measurements.plot(figsize = (10, 5),
kind = 'bar',
xlim=(-0.5, len(x_axis1)-0.3),
ylim=(0, max(y_axis1)+20),
title = 'Treatments of Drug Regimen',
xlabel = 'Drug Regimen',
ylabel = 'Number of unique mice tested');
# +
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot.
fig1, ax1 = plt.subplots(figsize=(10,5))
ax1.bar(x_axis1, y_axis1, width = 0.5);
ax1.set_xticks(x_axis1);
ax1.set_xticklabels(["Capomulin", "Ceftamin", "Infubinol", "Ketapril", "Naftisol",
"Placebo", "Propriva", "Ramicane", "Stelasyn", "Zoniferol"], rotation='vertical');
ax1.set(xlim=(-0.5, len(x_axis1)-0.3),
ylim=(0, max(y_axis1)+20),
title="Treatments per Drug Regimen",
xlabel="Drug Regimen",
ylabel="Number of Treatments");
# +
# Generate a pie plot showing the distribution of female versus male mice using pandas
gender_dist = df_clean["Sex"].value_counts()
values_gender_dist = gender_dist.values
labels_gender_dist = gender_dist.index
colors = ["purple", "yellow"]
explode = (0.1, 0)
gender_dist.plot(figsize = (7, 7),
ylabel = " ",
kind = 'pie',
explode=explode,
colors=colors,
autopct="%1.1f%%",
shadow=True,
startangle=140);
# -
# Generate a pie plot showing the distribution of female versus male mice using pyplot
fig2, ax2 = plt.subplots(figsize=(7,7))
ax2.pie(values_gender_dist,
explode=explode,
labels=labels_gender_dist,
colors=colors,
autopct="%1.1f%%",
shadow=True,
startangle=140);
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
final_tumor = df_clean.loc[df_clean['Drug Regimen'].isin(["Capomulin", "Ramicane", "Infubinol", "Ceftamin"])]
# Start by getting the last (greatest) timepoint for each mouse
last_time = final_tumor.groupby(final_tumor['Mouse ID']).agg({'Timepoint':['max']})
last_time.columns = ['Timepoint']
last_time
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
last_time = last_time.reset_index()
df_last_tumor = pd.merge(last_time, df_clean, how="left", on=["Mouse ID", "Timepoint"])
df_last_tumor
# +
# Put treatments into a list for for loop (and later for plot labels)
treatments = ['Capomulin', 'Ramicane', 'Infubinol', 'Ceftamin']
# Create empty list to fill with tumor vol data (for plotting)
vol_tumor = []
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
for treatment in treatments:
# add subset
vol_final = df_last_tumor['Tumor Volume (mm3)'].loc[df_last_tumor['Drug Regimen'] == treatment]
vol_tumor.append(vol_final)
# Determine outliers using upper and lower bounds
quartiles = vol_final.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of {treatment} is: {lowerq}")
print(f"The upper quartile of {treatment} is: {upperq}")
print(f"The interquartile range of {treatment} is: {iqr}")
print(f"The the median of {treatment} is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.\n")
# -
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
fig3, ax3 = plt.subplots(figsize=(8,7))
ax3.set_title('Final Tumor Volume')
ax3.set_ylabel('Tumor Volume (mm3)')
ax3.boxplot(vol_tumor, labels = treatments)
plt.show()
# ## Line and Scatter Plots
# +
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
capo_mouse = df_clean.loc[df_clean["Mouse ID"] == "l509",:]
x_axis4 = capo_mouse["Timepoint"]
y_axis4 = capo_mouse['Tumor Volume (mm3)']
fig4, ax4 = plt.subplots(figsize=(10,7));
ax4.plot(x_axis4, y_axis4, marker="o", color="blue");
ax4.set(xlabel = "Timepoint",
ylabel = "Tumor Volume (mm3)",
title = "Tumor Volume for Mouse l509 Treated with Capomulin",
xlim = (0, (x_axis4) + 5),
ylim = (min(y_axis4) - 5, max(y_axis4) + 5));
# +
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
df_capo = df_clean.loc[df_clean['Drug Regimen'] == 'Capomulin']
avg_capo = df_capo.groupby(["Mouse ID"]).mean()
x_value5 = avg_capo["Weight (g)"]
y_value5 = avg_capo["Tumor Volume (mm3)"]
fig5, ax5 = plt.subplots(figsize=(10,7))
ax5.scatter(x_value5, y_value5, marker="o", facecolors="blue", edgecolors="black",
s=x_value5, alpha=0.75)
ax5.set(
title="Average Tumor Volume vs. Mouse Weight",
xlabel="Mouse Weight (g)",
ylabel="Tumor Volume (mm3)");
# -
# ## Correlation and Regression
# +
# Calculate the correlation coefficient and linear regression model
correlation = st.pearsonr(x_value5,y_value5)
print(f"The correlation for average tumor volume vs. mouse weight is {round(correlation[0],2)}")
# +
# Linear regression model for mouse weight and average tumor volume for the Capomulin regimen
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_value5, y_value5)
regress_values = x_value5 * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
fig6, ax6 = plt.subplots(figsize=(10,7))
ax6.scatter(x_value5, y_value5, marker="o", facecolors="blue", edgecolors="black",
s=x_value5, alpha=0.75)
ax6.annotate(line_eq,(22,38),fontsize=15,color="red")
ax6.plot(x_value5, regress_values, "r-");
ax6.set(
title="Average Tumor Volume vs. Mouse Weight",
xlabel="Mouse Weight (g)",
ylabel="Tumor Volume (mm3)");
# -
| Pymaceuticals/pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Modules to install via pip pandas,ipynb
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import json
from pprint import pprint
import os
import import_ipynb
import sys
import kmeans
sys.path.append('../')
import trace_analysis
import os
import trace_statistics
from functions import *
from pandas.plotting import scatter_matrix
import cmath as math
from mpl_toolkits.mplot3d import Axes3D
from sklearn import cluster
from sklearn.metrics import confusion_matrix
from sklearn.metrics.cluster import normalized_mutual_info_score
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.cluster import KMeans
# scipy
from scipy.cluster.vq import kmeans,vq,whiten
import sklearn.metrics as sm
import seaborn as sns
from sklearn.decomposition import PCA
# %matplotlib inline
# +
# Read the ip of each node
ips = pd.read_csv('./traces/out-2019-01JAN-28-01.cap',
sep=';|addr:|/',
na_filter=True,
usecols=[1,3,4],
header=None,
nrows=550,
names=['prefix','node_id','addr','ip','scope'],
engine='python').dropna()
ips = ips[ips.scope == '64 scope: global'].reset_index(drop=True).drop(['scope'], axis=1)
# Read the rank of each node
rank = pd.read_csv('./traces/out-2019-01JAN-28-01.cap',
sep=';|\t|R: | \| OP: ',
na_filter=True,
usecols=[1,4],
header=None,
skiprows=550,
names=['node_id','rank'],
engine='python').dropna()
rank = rank[rank['rank'].apply(lambda x: x.isdigit())].reset_index(drop=True)
# Merge all data
node_ip_and_rank = pd.merge(ips, rank, how='inner').drop_duplicates()
#node_ip_and_rank
# +
# Read the rank of each node
nodes = pd.read_csv('./traces/out-2019-01JAN-28-01.cap',
sep=';|seq=| hop|time = |ms',
na_filter=True,
usecols=[1,3,5],
header=None,
skiprows=799,
names=['node_id','seq','rtt'],
engine='python').dropna().drop_duplicates()
nodes = nodes.sort_values(by=['node_id','seq'], ascending=True, na_position='first')
#
d_nodes = {} # <node_id, DataFrame containing seq and rtt columns>
for n in nodes.index:
if nodes['node_id'][n] in d_nodes:
d_nodes[nodes['node_id'][n]] = d_nodes[nodes['node_id'][n]].append(pd.DataFrame({'seq': [int(nodes['seq'][n])], nodes['node_id'][n]: [nodes['rtt'][n]]}))
else:
d_nodes[nodes['node_id'][n]] = pd.DataFrame({'seq': [int(nodes['seq'][n])], nodes['node_id'][n]:[nodes['rtt'][n]]})
#
nodes = pd.DataFrame([seq for seq in range(1,1001)], columns=['seq']).set_index('seq')
for node in d_nodes.keys():
nodes = nodes.join(d_nodes[node].set_index('seq'))
nodes = nodes[~nodes.index.duplicated(keep='first')]
#nodes.head(10)
# +
rank_to_hops = sorted([int(rank) for rank in list(node_ip_and_rank['rank'].drop_duplicates())])
hops = {}
icmp = [x for x in range(1,len(nodes)+1)]
for node in node_ip_and_rank.index:
if (rank_to_hops.index(int(node_ip_and_rank['rank'][node]))+1) in hops:
# The key should be created
hops[rank_to_hops.index(int(node_ip_and_rank['rank'][node]))+1].append(node_ip_and_rank['node_id'][node])
else:
# Just append to the list of nodes
hops[rank_to_hops.index(int(node_ip_and_rank['rank'][node]))+1] = [node_ip_and_rank['node_id'][node]]
# Contain mean time for each distance from the root
hop_nodes = pd.DataFrame({1: nodes[hops[1]].mean(axis=1), 2: nodes[hops[2]].mean(axis=1),\
3: nodes[hops[3]].mean(axis=1)})
#hop_nodes.head(10)
# -
data=nodes.describe().T
data.head()
hop_nodes2 = trace_analysis.process_iotlab_aggregated(os.getcwd() + '/traces/', '2019-01JAN-30-1b169')
std_values2, outliers2 = trace_analysis.separate_outliers(hop_nodes2)
tubling_packet_loss2 = trace_statistics.tumbling_packet_loss_per_hop(os.getcwd() + '/traces/', '2019-01JAN-30-1b169', window_size=10)
| module/data/iot-lab/iot-lab-25nodes/Kmeans.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.1.0
# language: julia
# name: julia-1.1
# ---
# # Tutorial of MendelEstimateFrequencies
#
# ## Julia version
# Current code supports Julia version 1.0+
# ## When to use MendelEstimateFrequencies
# The Estimate Frequencies model applies to pedigrees, including those with missing marker data. With too many marker alleles computational efficiency suffers and large sample statistical assumptions become suspect. We recommend consolidating alleles until at most eight alleles remain and each has a frequency of 0.05 or greater. If the fraction of missing data is large, ethnic stratification may come into play. One remedy is to limit analysis to a single ethnic group; another is to use ethnic-specific allele frequencies. If you opt for the latter strategy, then you cannot simultaneously estimate allele frequencies and transmission parameters.
# ## Installation
#
# *Note: Since the OpenMendel packages are not yet registered, the three OpenMendel packages (1) [SnpArrays](https://openmendel.github.io/SnpArrays.jl/latest/), (2) [MendelSearch](https://openmendel.github.io/MendelSearch.jl), and (3) [MendelBase](https://openmendel.github.io/MendelBase.jl) **must** be installed before any other OpenMendel package is installed. It is easiest if these three packages are installed in the above order.*
#
# If you have not already installed the MendelEstimateFrequencies, then within Julia, use the package manager to install MendelEstimateFrequencies:
] add https://github.com/OpenMendel/MendelEstimateFrequencies.jl.git
# or once the OpenMendel packages are registered simply use:
#
# `pkg> add MendelEstimateFrequencies`
#
# This package supports Julia v1.0+
# ## Input Files
# The Mendel EstimateFrequencies analysis package accepts the following input files. Example input files can be found in the [data]( https://github.com/OpenMendel/MendelEstimateFrequencies.jl/tree/master/data) subfolder of the Mendel EstimateFrequencies project. (An analysis won't always need every file type below.)
#
# * [Control File](https://openmendel.github.io/MendelEstimateFrequencies.jl/#control-file): Specifies the names of your data input and output files and any optional parameters (*keywords*) for the analysis. (For a list of common keywords, see [Keywords Table](https://openmendel.github.io/MendelBase.jl/#keywords-table)). The Control file is optional. If you don't use a Control file you will enter your keywords directly in the command line.
# * [Locus File]( https://openmendel.github.io/MendelBase.jl/#locus-file): Names and describes the genetic loci in your data.
# * [Pedigree File]( https://openmendel.github.io/MendelBase.jl/#pedigree-file): Gives information about your individuals, such as name, sex, family structure, and ancestry.
# * [Phenotype File]( https://openmendel.github.io/MendelBase.jl/#phenotype-file): Lists the available phenotypes.
# ### Control file
# The Control file is a text file consisting of keywords and their assigned values. The format of the Control file is:
#
# Keyword = Keyword_Value(s)
#
# Below is an example of a simple Control file to run EstimateFrequencies:
#
# #
# # Input and Output files.
# #
# locus_file = estimate frequencies 2 LocusFrame.txt
# pedigree_file = estimate frequencies 2 PedigreeFrame.txt
# phenotype_file = estimate frequencies 2 PhenotypeFrame.txt
# output_file = estimate frequencies 2 Output.txt
# #
# # Analysis parameters for Estimate Frequencies option.
# #
#
# In the example above, there are three keywords specifying the input files: *estimate frequencies 2 LocusFrame.txt*, *estimate frequencies 2 PedigreeFrame.txt*, and *estimate frequencies 2 PhenotypeFrame.txt*. There is one keyword specifying the standard output file: *estimate frequencies 2 Output.txt*. There are no analysis parameters specified for this run; all analysis parameters take the default values. The text after the '=' are the keyword values. A list of OpenMendel keywords common to most analysis package can be found [here](https://openmendel.github.io/MendelBase.jl/#keywords-table). The names of keywords are *not* case sensitive. (The keyword values *may* be case sensitive.)
# ## Data Files
# EstimateFrequencies requires a [Control file](https://openmendel.github.io/MendelBase.jl/#control-file), and a [Pedigree file](https://openmendel.github.io/MendelBase.jl/#pedigree-file). Genotype data can be included in the Pedigree file, in which case a [Locus file](https://openmendel.github.io/MendelBase.jl/#locus-file) is required. Alternatively, genotype data can be provided in a [SNP data file](https://openmendel.github.io/MendelBase.jl/#snp-data-file), in which case a [SNP Definition File](https://openmendel.github.io/MendelBase.jl/#snp-definition-file) is required. Details on the format and contents of the Control and data files can be found on the [MendelBase](https://openmendel.github.io/MendelBase.jl) documentation page. There are example data files in the EstimateFrequencies [data](https://github.com/OpenMendel/MendelEstimateFrequencies.jl/tree/master/data) folder.
# ## Running the Analysis
# To run this analysis package, first launch Julia. Then load the package with the command:
#
# `julia> using MendelEstimateFrequencies`
#
# Next, if necessary, change to the directory containing your files, for example,
#
# `julia> cd("~/path/to/data/files/")`
#
# Finally, to run the analysis using the parameters in your Control file, for example, Control_file.txt, use the command:
#
# `julia> EstimateFrequencies("Control_file.txt")`
#
# *Note: The package is called* MendelEstimateFrequencies *but the analysis function is called simply* EstimateFrequencies.
#
#
# ## Output Files
# Each option will create output files specific to that option, and will save them to the same directory that holds the input data files. The EstimateFrequencies package creates an output file that gives details about the analysis run.
# # Example 1:
# ### Step 0: Load the OpenMendel package and then go to the directory containing the data files:
# First we load the MendelEstimateFrequencies package.
using MendelEstimateFrequencies
# In this example we go to the directory containing the example data files that come with this package.
cd(MendelEstimateFrequencies.datadir())
pwd()
# ### Step 1: Preparing the Pedigree files:
# Recall the structure of a [valid pedigree structure](https://openmendel.github.io/MendelBase.jl/#pedigree-file). Note that we require a header line. Let's examine the first few lines of such an example:
;head -10 "estimate frequencies 1 PedigreeFrame.txt"
# ### Step 2: Preparing the Control file
# A Control file gives specific instructions to `MendelEstimateFrequencies`. To estimate allele frequencies, a minimal Control file looks like the following:
;cat "estimate frequencies 1 Control.txt"
# ### Step 3: Run the analysis in Julia REPL or directly in notebook
EstimateFrequencies("estimate frequencies 1 Control.txt")
# ### Step 4: Output File
#
# `MendelEstimateFrequencies` should have generated the file `estimate frequencies 1 Output.txt` in your local directory. This file has detailed information on the analysis (see below).
;cat "estimate frequencies 1 Output.txt"
# ### Step 5: Interpreting the result
# ## Citation
#
# If you use this analysis package in your research, please cite the following reference in the resulting publications:
#
# *<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> (2013) Mendel: The Swiss army knife of genetic analysis programs. Bioinformatics 29:1568-1570.*
# ## Acknowledgments
#
# This project is supported by the National Institutes of Health under NIGMS awards R01GM053275 and R25GM103774 and NHGRI award R01HG006139.
| EstimateFrequencies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Day 22
#
# https://adventofcode.com/2020/day/22
import collections
import itertools
import aocd
test_data = """
Player 1:
9
2
6
3
1
Player 2:
5
8
4
7
10
"""
def parse_data(data):
lines = [line.split(':')[1] for line in data.split('\n\n')]
deck1, deck2 = [
[int(value) for value in s.split('\n') if value != '']
for s in lines
]
return deck1, deck2
# deck1, deck2 = parse_data(test_data)
deck1, deck2 = parse_data(aocd.get_data(day=22, year=2020))
len(deck1), len(deck2)
# ### Solution to Part 1
def play(deck1, deck2):
deck1 = collections.deque(deck1)
deck2 = collections.deque(deck2)
while deck1 and deck2:
card1 = deck1.popleft()
card2 = deck2.popleft()
if card1 > card2:
deck1.extend([card1, card2])
elif card2 > card1:
deck2.extend([card2, card1])
return deck1, deck2
final1, final2 = play(deck1, deck2)
len(final1), len(final2)
def score(deck1, deck2):
winner = deck1 if deck1 else deck2
total = 0
for i, card in enumerate(reversed(winner)):
total += card * (i + 1)
return total
score(final1, final2)
# ### Solution to Part 2
def _play_subgame(deck1, deck2, *, card1, card2, game, game_counter):
subdeck1 = [
card for i, card in enumerate(deck1)
if i < card1
]
subdeck2 = [
card for i, card in enumerate(deck2)
if i < card2
]
return play_recursive(
subdeck1,
subdeck2,
game=game,
game_counter=game_counter,
)
def play_recursive(
deck1,
deck2,
*,
verbose=False,
game=None,
game_counter=None,
):
if game_counter is None:
game_counter = itertools.count(1)
game = next(game_counter)
game_round = 0
already_seen = set()
deck1 = collections.deque(deck1)
deck2 = collections.deque(deck2)
while deck1 and deck2:
game_round += 1
if verbose:
print(f'Game {game} Round {game_round}')
print(f'Player 1 deck: {list(deck1)}')
print(f'Player 2 deck: {list(deck2)}')
configuration = (tuple(deck1), tuple(deck2))
if configuration in already_seen:
break
already_seen.add(configuration)
card1 = deck1.popleft()
card2 = deck2.popleft()
if verbose:
print(f'Player 1 plays {card1}')
print(f'Player 2 plays {card2}')
if len(deck1) >= card1 and len(deck2) >= card2:
if verbose:
print('Playing a subgame to determine a winner...\n')
player1_wins_subgame, _ = _play_subgame(
deck1,
deck2,
card1=card1,
card2=card2,
game=next(game_counter),
game_counter=game_counter,
)
if verbose:
print()
print(f'...Back to game {game}')
if player1_wins_subgame:
winner = 'Player 1'
deck1.extend([card1, card2])
else:
winner = 'Player 2'
deck2.extend([card2, card1])
if verbose:
print(f'{winner} wins round {game_round} of game {game}')
elif card1 > card2:
winner = 'Player 1'
deck1.extend([card1, card2])
elif card2 > card1:
winner = 'Player 2'
deck2.extend([card2, card1])
if verbose:
print(f'{winner} wins round {game_round} of game {game}')
winner = 'Player 1' if deck1 else 'Player 2'
if verbose:
print(f'Winner of game {game} is {winner}')
return deck1, deck2
final1, final2 = play_recursive(deck1, deck2)
len(final1), len(final2)
score(final1, final2)
| Day 22.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.1
# language: julia
# name: julia-1.7
# ---
# # Ejemplo de uso de la biblioteca
# Este notebook muestra ejemplos de ejecución de la biblioteca programada.
# El contenido de la misma es:
# - Cómo importar la biblioteca.
# - Inicialización de red neuronal con dimensiones dadas y pesos aleatorios.
# - Inicialización de red neuronal a partir de matrices.
# - Ejemplo de evaluación con *Forward propagation*.
# - Ejemplo de uso del algoritmo de inicialización aprendida de pesos.
# - Ejemplo de llamadas a las funciones de activación.
# - Ejemplo de aprendizaje de una red neuronal con el algoritmo de *Backpropagation*.
#
#
# ## Cómo importar la biblioteca
# +
# Bibliotecas auxiliares que no tienen que ver con con la nuestra
using Random
using Plots
# Importamos nuestra biblioteca
include("../../OptimizedNeuralNetwork.jl/src/OptimizedNeuralNetwork.jl")
using Main.OptimizedNeuralNetwork
# -
# ## Inicialización de red neuronal con pesos aleatorios
#
# Creamos una red neuronal con pesos inicializados de manera aleatoria.
# +
entry_dimesion = 2
number_of_hidden_units = 3
output_dimension = 2
RandomWeightsNN(
entry_dimesion,
number_of_hidden_units,
output_dimension
)
# -
# La matriz $W1$ se corresponde a los pesos que se sitúan entre la capa de entrada y la capa oculta.
#
# La matriz $W2$ son los pesos entre la capa oculta y la salida.
#
# ## Inicialización de una red neuronal partir de matrices
# Como se comenta detalladamente en la memoria sección 5.2; $(A,S,B)$ son matrices que modelizan una red neuronal.
#
# - $A$ representa los coeficientes que se le aplican a los vectores de entrada.
# - $S$ representa los sesgos que se suma a los respectivos parámetros de entrada.
# - $B$ representan los coeficientes que se aplican a la capa oculta para la salida.
S = [1,2,3] # Sesgos que se añaden a los parámetros entrada
A = [3 4 1; 4 6 3; 1 1 1] # Coeficientes entrada
B = [1 2 3; 3 2 3] # Coeficientes de salida
h = FromMatrixNN(S, A, B)
display(h)
# ## Ejemplo de evaluación con Forward propagation
v = [1,2,2]
# Ejemplo de evaluación h(v)
# con función de activación ReLU y forward_propagation
forward_propagation(h, ReLU,v )
# ## Ejemplo de uso del algoritmo de inicialización aprendida de pesos
#
# Para ello se utilizará la función
# `nn_from_data(X_train, Y_train, n, M)`
# +
# Declaramos las variables que vamos a seguir
# Función ideal que queremos aproximar
f_regression(x)=(x<1) ? exp(-x)-4 : log(x)
data_set_size = 5
n = data_set_size # Número de neuronas
# coincide con el tamaño del conjunto
#Partición homogénea del dominio [-3,3]
K_range = 3
X_train= Vector(LinRange(-K_range, K_range, n))
Y_train = map(f_regression, X_train) # Imágenes de la partición
M = 1
# USO DE LA FUNCIÓN DE INICIALIZACIÓN DE LOS PESOS
h = nn_from_data(X_train, Y_train, n, M)
# Imprimimos la red neuronal
display(Text("La red neuronal obtenida es :"))
println(h)
# Vamos a ver cómo aproxima los resultados
# Función que dado un punto lo evalúa con forward_propagation
# y la función de activación Rampa
evaluate(x)=forward_propagation(h,
RampFunction,x)
plot(x->evaluate([x])[1],
-K_range,K_range,
label="red neuronal n=$n")
plot!(f_regression,
label="f ideal",
title="Comparativa función ideal y red neuronal n=$n")
# -
# ## Funciones de activación
#
# ### Funciones de activación no dependientes de parámetros
#
# +
funciones_activacion = [
CosineSquasher,
RampFunction,
ReLU,
Sigmoid,
HardTanh
]
for σ in funciones_activacion
x = rand()
println("$(σ)($x) = $(σ(x))")
end
# -
# ### Funciones de activación dependientes de parámetros
#
# Existen funciones de activación que depende de parámetros, podemos definirlas eficientemente a partir de macros:
#
# +
# Concretamos los parámetros de los que dependen
# de macros
umbral = @ThresholdFunction(x->x,0)
indicadora = @IndicatorFunction(0)
lRelu = @LReLU(0.01)
# Evaluamos en puntos concretos
dependientes_parametro = [umbral, indicadora, lRelu]
for σ in dependientes_parametro
x = (rand()-0.5)*10
println("$(σ)($x) = $(σ(x))")
end
# -
# ## Backpropagation
# Ejemplo de uso de Backpropagation
n = 3 # número de neuronas
η = 0.005 # queremos que reduzca sin pasarse, de ahí que sea ""pequeño"" el learning rate
tol = 0.5 # rango de error que permitimos ya que puede existir casos en los que el η sea demasiado grande
data_set_size = n
cosin(x,y)=cos(x)+sin(y) # funcion ideal
h = RandomWeightsNN(2,n, 1) # 2 dimensión entrada 1 dimensión de salida
X_train = (rand(Float64, (data_set_size, 2)))*3
Y_train = map(v->cosin(v...),eachrow(X_train))
disminuye_error = 0.0
error = error_in_data_set(
X_train,
Y_train,
x->forward_propagation(h,RampFunction,x)
)
println("En error en la iteración 0 es: $error")
for i in 1:n
backpropagation!(h, X_train, Y_train, RampFunction, derivativeRampFunction, n)
error = error_in_data_set(
X_train,
Y_train,
x->forward_propagation(h,RampFunction,x)
)
println("El error en la iteración $i es: $error")
end
| Memoria/capitulos/Ejemplo-uso-biblioteca.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Bias–variance tradeoff !!!
# 
# ##### No model can achieve perfect bias and variance
# ### Further theory is availabe in the accompanying Markdown file with the same name
# # Importing Libraries
import matplotlib.pyplot as plt
import seaborn as sn
# %matplotlib inline
import numpy as np
import pandas as pd
import random
rcParams['figure.figsize'] = 6, 4
from matplotlib.pylab import rcParams
from sklearn import metrics
from sklearn.model_selection import train_test_split
np.random.seed(42) # creating a random seed using numpy
x = np.array([i*np.pi/180 for i in range(60,400,4)]) # creating datapoints for x using numpy
y = np.sin(x) + np.random.normal(0,0.18,len(x)) # sin operated on x, and adding random noise
points = pd.DataFrame(np.column_stack([x,y]),columns=['x','y']) # making a dataframe with x and y
plt.xkcd() # for funny, cool graphs 😊😊😊
plt.plot(points['x'],points['y'], '.', color='m') # plotting the points
def curve_fit( degree ): # a function to fit a polynomial in the above graph
p = np.polyfit( points.x, points.y, deg = degree ) # numpy polyfit function to fit points (x, y)
points['fit'] = np.polyval( p, points.x ) # making a new column in the dataframe
sn.regplot( points.x, points.y, fit_reg = False ) # plotting the points using seaborn
plt.plot( points.x, points.fit, label='fit', color="m" )
plt.title("degree: "+str(i)) # obvious !!!
plt.show()
for i in range(15): # calling the function to plot graphs
curve_fit(i)
# # As you can see degree 1 model does't fit the data. It indicates High Bias. As the model complexity increases, it captures the variance in the data perfectly. At high degree, Model is not able to generalize on the test data, i.e. unseen data. A perfect model lies somewhere between High bias and High variance condition. If you decrease Bias, Variance will increase and vice versa. No model is perfect therefore there is a tradeoff between Bias and Variance, kind off like the uncertainity principle!!!
def rmse( y_true, y_pred ): # funtion to calculate rmse using sklearn
return np.sqrt( metrics.mean_squared_error( y_true, y_pred ) )
# splitting in train and test set
x_train, x_test, y_train, y_test = train_test_split( points.x,
points.y,
test_size = 0.20,
random_state = 42 )
# creating columns in dataframe
rmse_data = pd.DataFrame( columns = ["degree", "rmse_train", "rmse_test"] )
# filling the values in the coloumn
for i in range( 1, 15 ):
p = np.polyfit( x_train, y_train, deg = i )
rmse_data.loc[i-1] = [ i,
rmse( y_train, np.polyval( p, x_train ) ),
rmse( y_test, np.polyval( p, x_test ) ) ]
# +
# finally!!! Huh!! 😊😊😊
# Plotting the errors
plt.plot( rmse_data.degree,
rmse_data.rmse_train,
label='train',
color = 'k' )
plt.plot( rmse_data.degree,
rmse_data.rmse_test,
label='test',
color = 'm' )
plt.xlabel("degree")
plt.ylabel("RMSE")
plt.legend(bbox_to_anchor=(1.1, 1),
loc=2,
borderaxespad=0.)
# -
# # Train error decreases as well as the test error but test error increases when model starts overfitting. Model is not able to capture the underlying variance in the test data. It has high bias when degree is high.
# # Now you have understood how bias and variance are related to under and overfitting 😊😊. Actually, they are so similar that graphs can't be good measure to differentiate between them. More theory is available with the markdown.
| Anshul_ML_Bias&Variance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="xtztNMjHSErp" colab_type="code" outputId="bfecd354-4950-415a-a231-e4973621e24e" colab={"base_uri": "https://localhost:8080/", "height": 340}
# !pip install tensorflow-gpu==2.0.0-alpha0
# + id="wd9UXbwBUxK-" colab_type="code" colab={}
import numpy as np
import tensorflow as tf
from tensorflow import keras
import pandas as pd
import seaborn as sns
from pylab import rcParams
import matplotlib.pyplot as plt
from matplotlib import rc
from google.colab import drive
from sklearn.model_selection import train_test_split
# %matplotlib inline
sns.set(style='whitegrid', palette='muted', font_scale=1.5)
rcParams['figure.figsize'] = 14, 8
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
# + id="0hhVnWPzTREN" colab_type="code" outputId="ac47f6b5-5c1f-4925-d325-34d335bf668a" colab={"base_uri": "https://localhost:8080/", "height": 34}
drive.mount("/content/drive")
# + id="4OrY05Q6Uf6O" colab_type="code" colab={}
heart_csv_path = "/content/drive/My Drive/Colab Notebooks/tensorflow-2/data/heart.csv"
# + id="1UNIIxr7Uqnn" colab_type="code" colab={}
data = pd.read_csv(heart_csv_path)
# + id="lUU6Eu5UU24t" colab_type="code" outputId="2892f2be-2e50-4ea5-cc68-b8f60944323a" colab={"base_uri": "https://localhost:8080/", "height": 297}
data.describe()
# + id="LBUy0UWw8fb5" colab_type="code" outputId="24dbf55a-d150-48c2-e1f2-f694caa29b43" colab={"base_uri": "https://localhost:8080/", "height": 34}
data.shape
# + id="S3wUHL0E96M6" colab_type="code" outputId="fb740dd7-0b47-4e0c-df43-9a058be5356c" colab={"base_uri": "https://localhost:8080/", "height": 68}
data.columns
# + id="5RQ-xtTBtNec" colab_type="code" outputId="105526a7-b01e-439c-93b0-005d9302714c" colab={"base_uri": "https://localhost:8080/", "height": 540}
f = sns.countplot(x='target', data=data)
f.set_title("Heart disease presence distribution")
f.set_xticklabels(['No Heart disease', 'Heart Disease'])
plt.xlabel("");
# + id="_Z_VTHdoWEs2" colab_type="code" outputId="85119257-1742-4d64-b522-c5a7664b2567" colab={"base_uri": "https://localhost:8080/", "height": 540}
f = sns.countplot(x='target', data=data, hue='sex')
plt.legend(['Female', 'Male'])
f.set_title("Heart disease presence by gender")
f.set_xticklabels(['No Heart disease', 'Heart Disease'])
plt.xlabel("");
# + id="sjgW6iriUQgp" colab_type="code" outputId="ba24e5ee-a5ef-495f-99d0-24a883389cf6" colab={"base_uri": "https://localhost:8080/", "height": 521}
heat_map = sns.heatmap(data.corr(method='pearson'), annot=True, fmt='.2f', linewidths=2)
heat_map.set_xticklabels(heat_map.get_xticklabels(), rotation=45);
# + id="rOhxXB1kfD4u" colab_type="code" outputId="c3dec441-0579-45a6-f6e7-2809cf464ccc" colab={"base_uri": "https://localhost:8080/", "height": 506}
plt.scatter(x=data.age[data.target==1], y=data.thalach[(data.target==1)], c="red", s=60)
plt.scatter(x=data.age[data.target==0], y=data.thalach[(data.target==0)], s=60)
plt.legend(["Disease", "No Disease"])
plt.xlabel("Age")
plt.ylabel("Maximum Heart Rate");
# + id="swSuoLbBZUZZ" colab_type="code" colab={}
data['Age_Category'] = pd.cut(data['age'],bins=list(np.arange(25, 85, 5)))
# + id="MkUiEKEwZP1v" colab_type="code" outputId="dddb4c2a-41eb-4a96-90d4-b7cd1158ba04" colab={"base_uri": "https://localhost:8080/", "height": 579}
plt.subplot(121)
data[data['target']==1].groupby('Age_Category')['age'].count().plot(kind='bar')
plt.title('Age Distribution of Patients with +ve Heart Diagonsis')
plt.subplot(122)
data[data['target']==0].groupby('Age_Category')['age'].count().plot(kind='bar')
plt.title('Age Distribution of Patients with -ve Heart Diagonsis')
# + id="EBoXX2pbRxiF" colab_type="code" colab={}
del data['Age_Category']
# + id="Mz7TLvNOZjsR" colab_type="code" outputId="67719a31-75c3-4347-91a7-dbf30dae8ddd" colab={"base_uri": "https://localhost:8080/", "height": 540}
f = sns.countplot(x='cp', data=data, hue='target')
f.set_xticklabels(['Typical Angina', 'Atypical Angina', 'Non-anginal Pain', 'Asymptomatic']);
f.set_title('Disease presence by chest pain type')
plt.ylabel('Chest Pain Type')
plt.xlabel('')
plt.legend(['No Disease', 'Disease']);
# + id="imlD2bj3-CQQ" colab_type="code" colab={}
X = data.loc[:,data.columns!='target']
y = data.iloc[:,-1]
# + id="Bhbi322tjnD7" colab_type="code" outputId="798a7510-50b7-45f2-d724-42de886afe45" colab={"base_uri": "https://localhost:8080/", "height": 272}
data.isnull().sum()
# + id="MVUWjm-mEQMO" colab_type="code" colab={}
feature_columns = []
# numeric cols
for header in ['age', 'trestbps', 'chol', 'thalach', 'oldpeak', 'ca']:
feature_columns.append(tf.feature_column.numeric_column(header))
# bucketized cols
age = tf.feature_column.numeric_column("age")
age_buckets = tf.feature_column.bucketized_column(age, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_columns.append(age_buckets)
# indicator cols
data["thal"] = data["thal"].apply(str)
thal = tf.feature_column.categorical_column_with_vocabulary_list(
'thal', ['3', '6', '7'])
thal_one_hot = tf.feature_column.indicator_column(thal)
feature_columns.append(thal_one_hot)
data["sex"] = data["sex"].apply(str)
sex = tf.feature_column.categorical_column_with_vocabulary_list(
'sex', ['0', '1'])
sex_one_hot = tf.feature_column.indicator_column(sex)
feature_columns.append(sex_one_hot)
data["cp"] = data["cp"].apply(str)
cp = tf.feature_column.categorical_column_with_vocabulary_list(
'cp', ['0', '1', '2', '3'])
cp_one_hot = tf.feature_column.indicator_column(cp)
feature_columns.append(cp_one_hot)
data["slope"] = data["slope"].apply(str)
slope = tf.feature_column.categorical_column_with_vocabulary_list(
'slope', ['0', '1', '2'])
slope_one_hot = tf.feature_column.indicator_column(slope)
feature_columns.append(slope_one_hot)
# embedding cols
thal_embedding = tf.feature_column.embedding_column(thal, dimension=8)
feature_columns.append(thal_embedding)
# crossed cols
age_thal_crossed = tf.feature_column.crossed_column([age_buckets, thal], hash_bucket_size=1000)
age_thal_crossed = tf.feature_column.indicator_column(age_thal_crossed)
feature_columns.append(age_thal_crossed)
cp_slope_crossed = tf.feature_column.crossed_column([cp, slope], hash_bucket_size=1000)
cp_slope_crossed = tf.feature_column.indicator_column(cp_slope_crossed)
feature_columns.append(cp_slope_crossed)
# + id="QRHM4UnxcLQj" colab_type="code" colab={}
def create_dataset(dataframe, batch_size=32):
dataframe = dataframe.copy()
labels = dataframe.pop('target')
return tf.data.Dataset.from_tensor_slices((dict(dataframe), labels)) \
.shuffle(buffer_size=len(dataframe)) \
.batch(batch_size)
# + id="xbxq2bzFcRUs" colab_type="code" colab={}
train, test = train_test_split(data, test_size=0.2, random_state=RANDOM_SEED)
# + id="DW0afy46cWIH" colab_type="code" colab={}
train_ds = create_dataset(train)
test_ds = create_dataset(test)
# + id="jIgmYKbGElvj" colab_type="code" colab={}
model = tf.keras.models.Sequential([
tf.keras.layers.DenseFeatures(feature_columns=feature_columns),
tf.keras.layers.Dense(units=128, activation='relu'),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Dense(units=128, activation='relu'),
tf.keras.layers.Dense(units=1, activation='sigmoid')
])
# + id="62BvIGBLXgJN" colab_type="code" outputId="9e09d7cc-1f99-4b4e-a72a-374975952ce0" colab={"base_uri": "https://localhost:8080/", "height": 3709}
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(train_ds, validation_data=test_ds, epochs=100, use_multiprocessing=True)
# + id="QG6sJRD8uqyJ" colab_type="code" outputId="544c8903-b9e2-4eed-dd09-e5973ddd9ac0" colab={"base_uri": "https://localhost:8080/", "height": 51}
model.evaluate(test_ds)
# + id="HyDuGIwnwE1r" colab_type="code" outputId="25f14e95-5f88-40de-c949-2c036d1d01e2" colab={"base_uri": "https://localhost:8080/", "height": 525}
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.ylim((0, 1))
plt.legend(['train', 'test'], loc='upper left');
# + id="NaYh6u6Pw3Nk" colab_type="code" outputId="dcc39c0a-e783-4bd7-de5a-5cb5a500c85c" colab={"base_uri": "https://localhost:8080/", "height": 525}
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="ybqeTJyWxAPu" colab_type="code" colab={}
from sklearn.metrics import classification_report, confusion_matrix
# + id="sp1rVWsIytrl" colab_type="code" colab={}
predictions = model.predict(test_ds)
# + id="SfiKBFzyy3Kl" colab_type="code" colab={}
bin_predictions = tf.round(predictions).numpy().flatten()
# + id="QNsRPiNsywLU" colab_type="code" outputId="6b78d688-896d-4cca-f12f-521606399521" colab={"base_uri": "https://localhost:8080/", "height": 170}
print(classification_report(y_test.values, bin_predictions))
# + id="3h4iXBqvy0WM" colab_type="code" outputId="46c13135-9fee-4dfd-f3a2-86008db7d642" colab={"base_uri": "https://localhost:8080/", "height": 51}
cnf_matrix = confusion_matrix(y_test, bin_predictions)
cnf_matrix
# + id="zb5KUUmm0uoO" colab_type="code" outputId="7084ad95-0259-4dc3-d1cc-f35b889f7740" colab={"base_uri": "https://localhost:8080/", "height": 584}
class_names = [0,1]
fig,ax = plt.subplots()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks,class_names)
plt.yticks(tick_marks,class_names)
sns.heatmap(pd.DataFrame(cnf_matrix),annot=True,cmap="Blues",fmt="d",cbar=False)
ax.xaxis.set_label_position('top')
plt.tight_layout()
plt.ylabel('Actual label')
plt.xlabel('Predicted label');
| 02.heart_disease_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Project Submission
#
# When you are ready to submit your project, meaning you have checked the [rubric](https://review.udacity.com/#!/rubrics/1428/view) and made sure that you have completed all tasks and answered all questions. Then you are ready to compress your files and submit your solution!
#
# The following steps assume:
# 1. All cells have been *run* in Notebook 3 (and that progress has been saved).
# 2. All questions in Notebook 3 have been answered.
# 3. Your robot `sense` function in `robot_class.py` is complete.
#
# Please make sure all your work is saved before moving on. You do not need to change any code in the following cells; this code is to help you submit your project, only.
#
# ---
#
# The first thing we'll do, is convert your notebooks into `.html` files; these files will save the output of each cell and any code/text that you have modified and saved in those notebooks. Note that the second notebook is not included because its completion does not affect whether you pass this project.
# !jupyter nbconvert "1. Robot Moving and Sensing.ipynb"
# !jupyter nbconvert "3. Landmark Detection and Tracking.ipynb"
# ### Zip the project files
#
# Next, we'll zip these notebook files and your `robot_class.py` file into one compressed archive named `project3.zip`.
#
# After completing this step you should see this zip file appear in your home directory, where you can download it as seen in the image below, by selecting it from the list and clicking **Download**.
#
# <img src='images/download_ex.png' width=50% height=50%/>
#
!!apt-get -y update && apt-get install -y zip
# !zip project3.zip -r . -i@file<EMAIL>
# ### Submit Your Project
#
# After creating and downloading your zip file, click on the `Submit` button and follow the instructions for submitting your `project3.zip` file. Congratulations on completing this project and I hope you enjoyed it!
| 4. Zip Your Project Files and Submit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.chdir('../')
# ### 현재 디렉토리가 smtm 프로젝트 root로 설정되었는지 확인
# 분석 결과 파일이 저장될 output 폴더 생성
print("현재 디렉토리 " , os.getcwd())
from smtm import UpbitDataProvider
dp = UpbitDataProvider()
dp.get_info()
| notebook/upbit_data_provider_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Copyright (c) 2022, salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
import pandas as pd
import numpy as np
import os
import json
from scipy import sparse
# -
# ## Read processed documents
loaded = sparse.load_npz("./mnli_government_travel/corpus_mat.npz")
loaded.toarray()
loaded.shape
corpus_binary_dense = loaded.toarray()
# +
with open('./mnli_government_travel/input_columns.json', 'r') as json_input:
data = json.load(json_input)
input_columns = data['input_columns']
# -
# ## Rule Generation
'''get BERT model prediction and ground truth'''
model_output = pd.read_csv(filepath_or_buffer="./mnli_government_travel/model_output.csv")
model_output.head()
is_error = np.array(model_output['y_gt'] != model_output['y_pred']).astype(int)
model_output['is_error'] = is_error
np.unique(is_error, return_counts=True)
corpus_binary_dense.shape
import debug_rule
# +
filter_threshold = {
'support': 20,
'err_rate': .27,
}
drule_obj = debug_rule.DebugRule()
drule_obj.initialize(corpus_binary_dense, is_error, filter_threshold, verbose=True).train_surrogate_random_forest()
# +
# discover error-prone subpopulations
drule_obj.extract_token_rule()
# calcuate p-value of the error rate in the subpopulation
drule_obj.calculate_pval()
# calculate 95% confidence interval of the error rate in the subpopulation
drule_obj.calculate_ci()
# -
len(drule_obj.rules)
def output_rules(to_output, columns, good_cols, good_idx, dataname):
filename = "./" + dataname + "/list.json"
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filename, 'w') as output:
output.write(json.dumps(to_output))
filename = "./" + dataname + "/test.json"
'''get column frequency'''
with open(filename, 'w') as output:
output.write(json.dumps({
'columns': columns,
'good_cols': good_cols,
'good_idx': good_idx,
}))
# +
def generate_histogram(rule_lists):
num_bin = 20
hist = np.zeros(num_bin)
for rule in rule_lists:
pos_bin = int(np.floor(rule['err_rate'] * num_bin))
if (pos_bin==num_bin):
pos_bin = num_bin-1
hist[pos_bin] += 1
return hist
to_output = {'rule_lists': drule_obj.rules, 'target_names': ['correct', 'errors'], "top_list": drule_obj.top_token_list}
hist = generate_histogram(drule_obj.rules)
good_cols = [input_columns[x] for x in drule_obj.good_token_idx]
to_output['histogram'] = hist.tolist()
output_rules(to_output, input_columns, good_cols, drule_obj.good_token_idx.tolist(), 'mnli_government_travel_binary')
| pre-process/05-token_rule.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp models.layers
# -
# # Layers
#
# > Helper function used to build PyTorch timeseries models.
#export
from tsai.imports import *
from tsai.utils import *
from torch.nn.init import normal_
from fastai.torch_core import Module
from fastai.layers import *
from torch.nn.utils import weight_norm, spectral_norm
#export
def noop(x): return x
# +
#export
def init_lin_zero(m):
if isinstance(m, (nn.Linear)):
if getattr(m, 'bias', None) is not None: nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 0)
for l in m.children(): init_lin_zero(l)
lin_zero_init = init_lin_zero
# -
#export
class SwishBeta(Module):
def __init__(self, beta=1.):
self.sigmoid = torch.sigmoid
self.beta = nn.Parameter(torch.Tensor(1).fill_(beta).to(default_device()))
def forward(self, x): return x.mul(self.sigmoid(x*self.beta))
# +
#export
def same_padding1d(seq_len, ks, stride=1, dilation=1):
"Same padding formula as used in Tensorflow"
p = (seq_len - 1) * stride + (ks - 1) * dilation + 1 - seq_len
return p // 2, p - p // 2
class Pad1d(nn.ConstantPad1d):
def __init__(self, padding, value=0.):
super().__init__(padding, value)
@delegates(nn.Conv1d)
class Conv1dSame(Module):
"Conv1d with padding='same'"
def __init__(self, ni, nf, ks=3, stride=1, dilation=1, **kwargs):
self.ks, self.stride, self.dilation = ks, stride, dilation
self.conv1d_same = nn.Conv1d(ni, nf, ks, stride=stride, dilation=dilation, **kwargs)
self.weight = self.conv1d_same.weight
self.bias = self.conv1d_same.bias
self.pad = Pad1d
def forward(self, x):
self.padding = same_padding1d(x.shape[-1], self.ks, dilation=self.dilation) #stride=self.stride not used in padding calculation!
return self.conv1d_same(self.pad(self.padding)(x))
# -
init_linear(Conv1dSame(2, 3, 3), None, init='auto', bias_std=.01)
bs = 2
c_in = 3
c_out = 5
seq_len = 6
t = torch.rand(bs, c_in, seq_len)
test_eq(Conv1dSame(c_in, c_out, ks=3, stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, seq_len))
test_eq(Conv1dSame(c_in, c_out, ks=3, stride=1, dilation=2, bias=False)(t).shape, (bs, c_out, seq_len))
test_eq(Conv1dSame(c_in, c_out, ks=3, stride=2, dilation=1, bias=False)(t).shape, (bs, c_out, seq_len//2))
test_eq(Conv1dSame(c_in, c_out, ks=3, stride=2, dilation=2, bias=False)(t).shape, (bs, c_out, seq_len//2))
# +
#export
def same_padding2d(H, W, ks, stride=(1, 1), dilation=(1, 1)):
"Same padding formula as used in Tensorflow"
if isinstance(ks, Integral): ks = (ks, ks)
if ks[0] == 1: p_h = 0
else: p_h = (H - 1) * stride[0] + (ks[0] - 1) * dilation[0] + 1 - H
if ks[1] == 1: p_w = 0
else: p_w = (W - 1) * stride[1] + (ks[1] - 1) * dilation[1] + 1 - W
return (p_w // 2, p_w - p_w // 2, p_h // 2, p_h - p_h // 2)
class Pad2d(nn.ConstantPad2d):
def __init__(self, padding, value=0.):
super().__init__(padding, value)
@delegates(nn.Conv2d)
class Conv2dSame(Module):
"Conv2d with padding='same'"
def __init__(self, ni, nf, ks=(3, 3), stride=(1, 1), dilation=(1, 1), **kwargs):
if isinstance(ks, Integral): ks = (ks, ks)
if isinstance(stride, Integral): stride = (stride, stride)
if isinstance(dilation, Integral): dilation = (dilation, dilation)
self.ks, self.stride, self.dilation = ks, stride, dilation
self.conv2d_same = nn.Conv2d(ni, nf, ks, stride=stride, dilation=dilation, **kwargs)
self.weight = self.conv2d_same.weight
self.bias = self.conv2d_same.bias
self.pad = Pad2d
def forward(self, x):
self.padding = same_padding2d(x.shape[-2], x.shape[-1], self.ks, dilation=self.dilation) #stride=self.stride not used in padding calculation!
return self.conv2d_same(self.pad(self.padding)(x))
@delegates(nn.Conv2d)
def Conv2d(ni, nf, kernel_size=None, ks=None, stride=1, padding='same', dilation=1, init='auto', bias_std=0.01, **kwargs):
"conv1d layer with padding='same', 'valid', or any integer (defaults to 'same')"
assert not (kernel_size and ks), 'use kernel_size or ks but not both simultaneously'
assert kernel_size is not None or ks is not None, 'you need to pass a ks'
kernel_size = kernel_size or ks
if padding == 'same':
conv = Conv2dSame(ni, nf, kernel_size, stride=stride, dilation=dilation, **kwargs)
elif padding == 'valid': conv = nn.Conv2d(ni, nf, kernel_size, stride=stride, padding=0, dilation=dilation, **kwargs)
else: conv = nn.Conv2d(ni, nf, kernel_size, stride=stride, padding=padding, dilation=dilation, **kwargs)
init_linear(conv, None, init=init, bias_std=bias_std)
return conv
# -
bs = 2
c_in = 3
c_out = 5
h = 16
w = 20
t = torch.rand(bs, c_in, h, w)
test_eq(Conv2dSame(c_in, c_out, ks=3, stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))
test_eq(Conv2dSame(c_in, c_out, ks=(3, 1), stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))
test_eq(Conv2dSame(c_in, c_out, ks=3, stride=(1, 1), dilation=(2, 2), bias=False)(t).shape, (bs, c_out, h, w))
test_eq(Conv2dSame(c_in, c_out, ks=3, stride=(2, 2), dilation=(1, 1), bias=False)(t).shape, (bs, c_out, h//2, w//2))
test_eq(Conv2dSame(c_in, c_out, ks=3, stride=(2, 2), dilation=(2, 2), bias=False)(t).shape, (bs, c_out, h//2, w//2))
test_eq(Conv2d(c_in, c_out, ks=3, padding='same', stride=1, dilation=1, bias=False)(t).shape, (bs, c_out, h, w))
#export
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
#export
# Modified from https://github.com/locuslab/TCN/blob/master/TCN/tcn.py
class Conv1dCausal(Module):
def __init__(self, ni, nf, ks, stride=1, dilation=1, **kwargs):
padding = (ks - 1) * dilation
self.conv_causal = nn.Conv1d(ni, nf, ks, stride=stride, padding=padding, dilation=dilation, **kwargs)
self.weight = self.conv_causal.weight
self.bias = self.conv_causal.bias
self.chomp_size = padding
def forward(self, x):
x = self.conv_causal(x)
return x[..., :-self.chomp_size].contiguous()
init_linear(Conv1dCausal(2, 3, 3), None, init='auto', bias_std=.01)
bs = 2
c_in = 3
c_out = 5
seq_len = 512
t = torch.rand(bs, c_in, seq_len)
dilation = 1
test_eq(Conv1dCausal(c_in, c_out, ks=3, dilation=dilation)(t).shape, Conv1dSame(c_in, c_out, ks=3, dilation=dilation)(t).shape)
dilation = 2
test_eq(Conv1dCausal(c_in, c_out, ks=3, dilation=dilation)(t).shape, Conv1dSame(c_in, c_out, ks=3, dilation=dilation)(t).shape)
#export
@delegates(nn.Conv1d)
def Conv1d(ni, nf, kernel_size=None, ks=None, stride=1, padding='same', dilation=1, init='auto', bias_std=0.01, **kwargs):
"conv1d layer with padding='same', 'causal', 'valid', or any integer (defaults to 'same')"
assert not (kernel_size and ks), 'use kernel_size or ks but not both simultaneously'
assert kernel_size is not None or ks is not None, 'you need to pass a ks'
kernel_size = kernel_size or ks
if padding == 'same':
if kernel_size%2==1:
conv = nn.Conv1d(ni, nf, kernel_size, stride=stride, padding=kernel_size//2 * dilation, dilation=dilation, **kwargs)
else:
conv = Conv1dSame(ni, nf, kernel_size, stride=stride, dilation=dilation, **kwargs)
elif padding == 'causal': conv = Conv1dCausal(ni, nf, kernel_size, stride=stride, dilation=dilation, **kwargs)
elif padding == 'valid': conv = nn.Conv1d(ni, nf, kernel_size, stride=stride, padding=0, dilation=dilation, **kwargs)
else: conv = nn.Conv1d(ni, nf, kernel_size, stride=stride, padding=padding, dilation=dilation, **kwargs)
init_linear(conv, None, init=init, bias_std=bias_std)
return conv
bs = 2
ni = 3
nf = 5
seq_len = 6
ks = 3
t = torch.rand(bs, c_in, seq_len)
test_eq(Conv1d(ni, nf, ks, padding=0)(t).shape, (bs, c_out, seq_len - (2 * (ks//2))))
test_eq(Conv1d(ni, nf, ks, padding='valid')(t).shape, (bs, c_out, seq_len - (2 * (ks//2))))
test_eq(Conv1d(ni, nf, ks, padding='same')(t).shape, (bs, c_out, seq_len))
test_eq(Conv1d(ni, nf, ks, padding='causal')(t).shape, (bs, c_out, seq_len))
test_error('use kernel_size or ks but not both simultaneously', Conv1d, ni, nf, kernel_size=3, ks=3)
test_error('you need to pass a ks', Conv1d, ni, nf)
conv = Conv1d(ni, nf, ks, padding='same')
init_linear(conv, None, init='auto', bias_std=.01)
conv
conv = Conv1d(ni, nf, ks, padding='causal')
init_linear(conv, None, init='auto', bias_std=.01)
conv
conv = Conv1d(ni, nf, ks, padding='valid')
init_linear(conv, None, init='auto', bias_std=.01)
weight_norm(conv)
conv
conv = Conv1d(ni, nf, ks, padding=0)
init_linear(conv, None, init='auto', bias_std=.01)
weight_norm(conv)
conv
#export
class SeparableConv1d(Module):
def __init__(self, ni, nf, ks, stride=1, padding='same', dilation=1, bias=True, bias_std=0.01):
self.depthwise_conv = Conv1d(ni, ni, ks, stride=stride, padding=padding, dilation=dilation, groups=ni, bias=bias)
self.pointwise_conv = nn.Conv1d(ni, nf, 1, stride=1, padding=0, dilation=1, groups=1, bias=bias)
if bias:
if bias_std != 0:
normal_(self.depthwise_conv.bias, 0, bias_std)
normal_(self.pointwise_conv.bias, 0, bias_std)
else:
self.depthwise_conv.bias.data.zero_()
self.pointwise_conv.bias.data.zero_()
def forward(self, x):
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
bs = 64
c_in = 6
c_out = 5
seq_len = 512
t = torch.rand(bs, c_in, seq_len)
test_eq(SeparableConv1d(c_in, c_out, 3)(t).shape, (bs, c_out, seq_len))
#export
class AddCoords1d(Module):
"""Add coordinates to ease position identification without modifying mean and std"""
def forward(self, x):
bs, _, seq_len = x.shape
cc = torch.linspace(-1,1,x.shape[-1]).repeat(bs, 1, 1).to(x.device)
cc = (cc - cc.mean()) / cc.std()
x = torch.cat([x, cc], dim=1)
return x
# +
bs = 2
c_in = 3
c_out = 5
seq_len = 50
t = torch.rand(bs, c_in, seq_len)
t = (t - t.mean()) / t.std()
test_eq(AddCoords1d()(t).shape, (bs, c_in + 1, seq_len))
new_t = AddCoords1d()(t)
test_close(new_t.mean(),0, 1e-2)
test_close(new_t.std(), 1, 1e-2)
# +
#export
class ConvBlock(nn.Sequential):
"Create a sequence of conv1d (`ni` to `nf`), activation (if `act_cls`) and `norm_type` layers."
def __init__(self, ni, nf, kernel_size=None, ks=3, stride=1, padding='same', bias=None, bias_std=0.01, norm='Batch', zero_norm=False, bn_1st=True,
act=nn.ReLU, act_kwargs={}, init='auto', dropout=0., xtra=None, coord=False, separable=False, **kwargs):
kernel_size = kernel_size or ks
ndim = 1
layers = [AddCoords1d()] if coord else []
norm_type = getattr(NormType,f"{snake2camel(norm)}{'Zero' if zero_norm else ''}") if norm is not None else None
bn = norm_type in (NormType.Batch, NormType.BatchZero)
inn = norm_type in (NormType.Instance, NormType.InstanceZero)
if bias is None: bias = not (bn or inn)
if separable: conv = SeparableConv1d(ni + coord, nf, ks=kernel_size, bias=bias, stride=stride, padding=padding, **kwargs)
else: conv = Conv1d(ni + coord, nf, ks=kernel_size, bias=bias, stride=stride, padding=padding, **kwargs)
act = None if act is None else act(**act_kwargs)
if not separable: init_linear(conv, act, init=init, bias_std=bias_std)
if norm_type==NormType.Weight: conv = weight_norm(conv)
elif norm_type==NormType.Spectral: conv = spectral_norm(conv)
layers += [conv]
act_bn = []
if act is not None: act_bn.append(act)
if bn: act_bn.append(BatchNorm(nf, norm_type=norm_type, ndim=ndim))
if inn: act_bn.append(InstanceNorm(nf, norm_type=norm_type, ndim=ndim))
if bn_1st: act_bn.reverse()
if dropout: layers += [nn.Dropout(dropout)]
layers += act_bn
if xtra: layers.append(xtra)
super().__init__(*layers)
Conv = named_partial('Conv', ConvBlock, norm=None, act=None)
ConvBN = named_partial('ConvBN', ConvBlock, norm='Batch', act=None)
CoordConv = named_partial('CoordConv', ConvBlock, norm=None, act=None, coord=True)
SepConv = named_partial('SepConv', ConvBlock, norm=None, act=None, separable=True)
# -
#export
class ResBlock1dPlus(Module):
"Resnet block from `ni` to `nh` with `stride`"
@delegates(ConvLayer.__init__)
def __init__(self, expansion, ni, nf, coord=False, stride=1, groups=1, reduction=None, nh1=None, nh2=None, dw=False, g2=1,
sa=False, sym=False, norm='Batch', zero_norm=True, act_cls=defaults.activation, ks=3,
pool=AvgPool, pool_first=True, **kwargs):
if nh2 is None: nh2 = nf
if nh1 is None: nh1 = nh2
nf,ni = nf*expansion,ni*expansion
k0 = dict(norm=norm, zero_norm=False, act=act_cls, **kwargs)
k1 = dict(norm=norm, zero_norm=zero_norm, act=None, **kwargs)
convpath = [ConvBlock(ni, nh2, ks, coord=coord, stride=stride, groups=ni if dw else groups, **k0),
ConvBlock(nh2, nf, ks, coord=coord, groups=g2, **k1)
] if expansion == 1 else [
ConvBlock(ni, nh1, 1, coord=coord, **k0),
ConvBlock(nh1, nh2, ks, coord=coord, stride=stride, groups=nh1 if dw else groups, **k0),
ConvBlock(nh2, nf, 1, coord=coord, groups=g2, **k1)]
if reduction: convpath.append(SEModule(nf, reduction=reduction, act_cls=act_cls))
if sa: convpath.append(SimpleSelfAttention(nf,ks=1,sym=sym))
self.convpath = nn.Sequential(*convpath)
idpath = []
if ni!=nf: idpath.append(ConvBlock(ni, nf, 1, coord=coord, act=None, **kwargs))
if stride!=1: idpath.insert((1,0)[pool_first], pool(stride, ndim=1, ceil_mode=True))
self.idpath = nn.Sequential(*idpath)
self.act = defaults.activation(inplace=True) if act_cls is defaults.activation else act_cls()
def forward(self, x): return self.act(self.convpath(x) + self.idpath(x))
#export
def SEModule1d(ni, reduction=16, act=nn.ReLU, act_kwargs={}):
"Squeeze and excitation module for 1d"
nf = math.ceil(ni//reduction/8)*8
assert nf != 0, 'nf cannot be 0'
return SequentialEx(nn.AdaptiveAvgPool1d(1),
ConvBlock(ni, nf, ks=1, norm=None, act=act, act_kwargs=act_kwargs),
ConvBlock(nf, ni, ks=1, norm=None, act=nn.Sigmoid), ProdLayer())
t = torch.rand(8, 32, 12)
test_eq(SEModule1d(t.shape[1], 16, act=nn.ReLU, act_kwargs={})(t).shape, t.shape)
# +
#export
def Norm(nf, ndim=1, norm='Batch', zero_norm=False, init=True, **kwargs):
"Norm layer with `nf` features and `ndim` with auto init."
assert 1 <= ndim <= 3
nl = getattr(nn, f"{snake2camel(norm)}Norm{ndim}d")(nf, **kwargs)
if nl.affine and init:
nl.bias.data.fill_(1e-3)
nl.weight.data.fill_(0. if zero_norm else 1.)
return nl
BN1d = partial(Norm, ndim=1, norm='Batch')
IN1d = partial(Norm, ndim=1, norm='Instance')
# +
bs = 2
ni = 3
nf = 5
sl = 4
ks = 5
t = torch.rand(bs, ni, sl)
test_eq(ConvBlock(ni, nf, ks)(t).shape, (bs, nf, sl))
test_eq(ConvBlock(ni, nf, ks, padding='causal')(t).shape, (bs, nf, sl))
test_eq(ConvBlock(ni, nf, ks, coord=True)(t).shape, (bs, nf, sl))
ConvBlock(ni, nf, ks, stride=2)(t).shape
test_eq(ConvBlock(ni, nf, ks, stride=2)(t).shape, (bs, nf, sl//2))
# -
test_eq(BN1d(ni)(t).shape, (bs, ni, sl))
test_eq(BN1d(ni).weight.data.mean().item(), 1.)
test_eq(BN1d(ni, zero_norm=True).weight.data.mean().item(), 0.)
test_eq(ConvBlock(ni, nf, ks, norm='batch', zero_norm=True)[1].weight.data.unique().item(), 0)
test_ne(ConvBlock(ni, nf, ks, norm='batch', zero_norm=False)[1].weight.data.unique().item(), 0)
test_eq(ConvBlock(ni, nf, ks, bias=False)[0].bias, None)
ConvBlock(ni, nf, ks, act=Swish, coord=True)
#export
class LinLnDrop(nn.Sequential):
"Module grouping `LayerNorm1d`, `Dropout` and `Linear` layers"
def __init__(self, n_in, n_out, ln=True, p=0., act=None, lin_first=False):
layers = [nn.LayerNorm(n_out if lin_first else n_in)] if ln else []
if p != 0: layers.append(nn.Dropout(p))
lin = [nn.Linear(n_in, n_out, bias=not ln)]
if act is not None: lin.append(act)
layers = lin+layers if lin_first else layers+lin
super().__init__(*layers)
LinLnDrop(2, 3, p=.5)
#export
class LambdaPlus(Module):
def __init__(self, func, *args, **kwargs): self.func,self.args,self.kwargs=func,args,kwargs
def forward(self, x): return self.func(x, *self.args, **self.kwargs)
# +
#export
class Squeeze(Module):
def __init__(self, dim=-1): self.dim = dim
def forward(self, x): return x.squeeze(dim=self.dim)
def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim})'
class Unsqueeze(Module):
def __init__(self, dim=-1): self.dim = dim
def forward(self, x): return x.unsqueeze(dim=self.dim)
def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim})'
class Add(Module):
def forward(self, x, y): return x.add(y)
def __repr__(self): return f'{self.__class__.__name__}'
class Concat(Module):
def __init__(self, dim=1): self.dim = dim
def forward(self, *x): return torch.cat(*x, dim=self.dim)
def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim})'
class Permute(Module):
def __init__(self, *dims): self.dims = dims
def forward(self, x): return x.permute(self.dims)
def __repr__(self): return f"{self.__class__.__name__}(dims={', '.join([str(d) for d in self.dims])})"
class Transpose(Module):
def __init__(self, *dims, contiguous=False): self.dims, self.contiguous = dims, contiguous
def forward(self, x):
if self.contiguous: return x.transpose(*self.dims).contiguous()
else: return x.transpose(*self.dims)
def __repr__(self):
if self.contiguous: return f"{self.__class__.__name__}(dims={', '.join([str(d) for d in self.dims])}).contiguous()"
else: return f"{self.__class__.__name__}({', '.join([str(d) for d in self.dims])})"
class View(Module):
def __init__(self, *shape): self.shape = shape
def forward(self, x): return x.view(x.shape[0], *self.shape)
def __repr__(self): return f"{self.__class__.__name__}({', '.join(['bs'] + [str(s) for s in self.shape])})"
class Reshape(Module):
def __init__(self, *shape): self.shape = shape
def forward(self, x): return x.reshape(x.shape[0], *self.shape)
def __repr__(self): return f"{self.__class__.__name__}({', '.join(['bs'] + [str(s) for s in self.shape])})"
class Max(Module):
def __init__(self, dim=None, keepdim=False): self.dim, self.keepdim = dim, keepdim
def forward(self, x): return x.max(self.dim, keepdim=self.keepdim)[0]
def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim}, keepdim={self.keepdim})'
class LastStep(Module):
def forward(self, x): return x[..., -1]
def __repr__(self): return f'{self.__class__.__name__}()'
class SoftMax(Module):
"SoftMax layer"
def __init__(self, dim=-1):
self.dim = dim
def forward(self, x):
return F.softmax(x, dim=self.dim)
def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim})'
class Clamp(Module):
def __init__(self, min=None, max=None):
self.min, self.max = min, max
def forward(self, x):
return x.clamp(min=self.min, max=self.max)
def __repr__(self): return f'{self.__class__.__name__}(min={self.min}, max={self.max})'
class Clip(Module):
def __init__(self, min=None, max=None):
self.min, self.max = min, max
def forward(self, x):
if self.min is not None:
x = torch.maximum(x, self.min)
if self.max is not None:
x = torch.minimum(x, self.max)
return x
def __repr__(self): return f'{self.__class__.__name__}()'
Noop = nn.Sequential()
# +
bs = 2
nf = 5
sl = 4
t = torch.rand(bs, nf, sl)
test_eq(Permute(0,2,1)(t).shape, (bs, sl, nf))
test_eq(Max(1)(t).shape, (bs, sl))
test_eq(Transpose(1,2)(t).shape, (bs, sl, nf))
test_eq(Transpose(1,2, contiguous=True)(t).shape, (bs, sl, nf))
test_eq(View(-1, 2, 10)(t).shape, (bs, 1, 2, 10))
test_eq(Reshape(-1, 2, 10)(t).shape, (bs, 1, 2, 10))
Transpose(1,2), Permute(0,2,1), View(-1, 2, 10), Transpose(1,2, contiguous=True), Reshape(-1, 2, 10), Noop
# +
# export
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
It's similar to Dropout but it drops individual connections instead of nodes.
Original code in https://github.com/rwightman/pytorch-image-models (timm library)
"""
def __init__(self, p=None):
super().__init__()
self.p = p
def forward(self, x):
if self.p == 0. or not self.training: return x
keep_prob = 1 - self.p
shape = (x.shape[0],) + (1,) * (x.ndim - 1)
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_()
output = x.div(keep_prob) * random_tensor
# output = x.div(random_tensor.mean()) * random_tensor # divide by the actual mean to mantain the input mean?
return output
# -
t = torch.ones(100,2,3)
test_eq(DropPath(0.)(t), t)
assert DropPath(0.5)(t).max() >= 1
#export
class Sharpen(Module):
"This is used to increase confidence in predictions - MixMatch paper"
def __init__(self, T=.5): self.T = T
def forward(self, x):
x = x**(1. / self.T)
return x / x.sum(dim=1, keepdims=True)
# +
n_samples = 1000
n_classes = 3
t = (torch.rand(n_samples, n_classes) - .5) * 10
probas = F.softmax(t, -1)
sharpened_probas = Sharpen()(probas)
plt.plot(probas.flatten().sort().values, color='r')
plt.plot(sharpened_probas.flatten().sort().values, color='b')
plt.show()
test_gt(sharpened_probas[n_samples//2:].max(-1).values.sum().item(), probas[n_samples//2:].max(-1).values.sum().item())
# -
#export
class Sequential(nn.Sequential):
"""Class that allows you to pass one or multiple inputs"""
def forward(self, *x):
for i, module in enumerate(self._modules.values()):
x = module(*x) if isinstance(x, (list, tuple, L)) else module(x)
return x
#export
class TimeDistributed(nn.Module):
def __init__(self, module, batch_first=False):
super(TimeDistributed, self).__init__()
self.module = module
self.batch_first = batch_first
def forward(self, x):
if len(x.size()) <= 2:
return self.module(x)
# Squash samples and timesteps into a single axis
x_reshape = x.contiguous().view(-1, x.size(-1)) # (samples * timesteps, input_size)
y = self.module(x_reshape)
# We have to reshape Y
if self.batch_first:
y = y.contiguous().view(x.size(0), -1, y.size(-1)) # (samples, timesteps, output_size)
else:
y = y.view(-1, x.size(1), y.size(-1)) # (timesteps, samples, output_size)
return y
# +
#export
class Temp_Scale(Module):
"Used to perform Temperature Scaling (dirichlet=False) or Single-parameter Dirichlet calibration (dirichlet=True)"
def __init__(self, temp=1., dirichlet=False):
self.weight = nn.Parameter(tensor(temp))
self.bias = None
self.log_softmax = dirichlet
def forward(self, x):
if self.log_softmax: x = F.log_softmax(x, dim=-1)
return x.div(self.weight)
class Vector_Scale(Module):
"Used to perform Vector Scaling (dirichlet=False) or Diagonal Dirichlet calibration (dirichlet=True)"
def __init__(self, n_classes=1, dirichlet=False):
self.weight = nn.Parameter(torch.ones(n_classes))
self.bias = nn.Parameter(torch.zeros(n_classes))
self.log_softmax = dirichlet
def forward(self, x):
if self.log_softmax: x = F.log_softmax(x, dim=-1)
return x.mul(self.weight).add(self.bias)
class Matrix_Scale(Module):
"Used to perform Matrix Scaling (dirichlet=False) or Dirichlet calibration (dirichlet=True)"
def __init__(self, n_classes=1, dirichlet=False):
self.ms = nn.Linear(n_classes, n_classes)
self.ms.weight.data = nn.Parameter(torch.eye(n_classes))
nn.init.constant_(self.ms.bias.data, 0.)
self.weight = self.ms.weight
self.bias = self.ms.bias
self.log_softmax = dirichlet
def forward(self, x):
if self.log_softmax: x = F.log_softmax(x, dim=-1)
return self.ms(x)
def get_calibrator(calibrator=None, n_classes=1, **kwargs):
if calibrator is None or not calibrator: return noop
elif calibrator.lower() == 'temp': return Temp_Scale(dirichlet=False, **kwargs)
elif calibrator.lower() == 'vector': return Vector_Scale(n_classes=n_classes, dirichlet=False, **kwargs)
elif calibrator.lower() == 'matrix': return Matrix_Scale(n_classes=n_classes, dirichlet=False, **kwargs)
elif calibrator.lower() == 'dtemp': return Temp_Scale(dirichlet=True, **kwargs)
elif calibrator.lower() == 'dvector': return Vector_Scale(n_classes=n_classes, dirichlet=True, **kwargs)
elif calibrator.lower() == 'dmatrix': return Matrix_Scale(n_classes=n_classes, dirichlet=True, **kwargs)
else: assert False, f'please, select a correct calibrator instead of {calibrator}'
# +
bs = 2
c_out = 3
t = torch.rand(bs, c_out)
for calibrator, cal_name in zip(['temp', 'vector', 'matrix'], ['Temp_Scale', 'Vector_Scale', 'Matrix_Scale']):
cal = get_calibrator(calibrator, n_classes=c_out)
# print(calibrator)
# print(cal.weight, cal.bias, '\n')
test_eq(cal(t), t)
test_eq(cal.__class__.__name__, cal_name)
for calibrator, cal_name in zip(['dtemp', 'dvector', 'dmatrix'], ['Temp_Scale', 'Vector_Scale', 'Matrix_Scale']):
cal = get_calibrator(calibrator, n_classes=c_out)
# print(calibrator)
# print(cal.weight, cal.bias, '\n')
test_eq(cal(t), F.log_softmax(t, dim=1))
test_eq(cal.__class__.__name__, cal_name)
# +
bs = 2
c_out = 3
t = torch.rand(bs, c_out)
test_eq(Temp_Scale()(t).shape, t.shape)
test_eq(Vector_Scale(c_out)(t).shape, t.shape)
test_eq(Matrix_Scale(c_out)(t).shape, t.shape)
test_eq(Temp_Scale(dirichlet=True)(t).shape, t.shape)
test_eq(Vector_Scale(c_out, dirichlet=True)(t).shape, t.shape)
test_eq(Matrix_Scale(c_out, dirichlet=True)(t).shape, t.shape)
test_eq(Temp_Scale()(t), t)
test_eq(Vector_Scale(c_out)(t), t)
test_eq(Matrix_Scale(c_out)(t), t)
# +
bs = 2
c_out = 5
t = torch.rand(bs, c_out)
test_eq(Vector_Scale(c_out)(t), t)
test_eq(Vector_Scale(c_out).weight.data, torch.ones(c_out))
test_eq(Vector_Scale(c_out).weight.requires_grad, True)
test_eq(type(Vector_Scale(c_out).weight), torch.nn.parameter.Parameter)
# +
bs = 2
c_out = 3
weight = 2
bias = 1
t = torch.rand(bs, c_out)
test_eq(Matrix_Scale(c_out)(t).shape, t.shape)
test_eq(Matrix_Scale(c_out).weight.requires_grad, True)
test_eq(type(Matrix_Scale(c_out).weight), torch.nn.parameter.Parameter)
# +
#export
class LogitAdjustmentLayer(Module):
"Logit Adjustment for imbalanced datasets"
def __init__(self, class_priors):
self.class_priors = class_priors
def forward(self, x):
return x.add(self.class_priors)
LogitAdjLayer = LogitAdjustmentLayer
# -
bs, n_classes = 16, 3
class_priors = torch.rand(n_classes)
logits = torch.randn(bs, n_classes) * 2
test_eq(LogitAdjLayer(class_priors)(logits), logits + class_priors)
# +
#export
class PPV(Module):
def __init__(self, dim=-1):
self.dim = dim
def forward(self, x):
return torch.gt(x, 0).sum(dim=self.dim).float() / x.shape[self.dim]
def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim})'
class PPAuc(Module):
def __init__(self, dim=-1):
self.dim = dim
def forward(self, x):
x = F.relu(x).sum(self.dim) / (abs(x).sum(self.dim) + 1e-8)
return x
def __repr__(self): return f'{self.__class__.__name__}(dim={self.dim})'
class MaxPPVPool1d(Module):
"Drop-in replacement for AdaptiveConcatPool1d - multiplies nf by 2"
def forward(self, x):
_max = x.max(dim=-1).values
_ppv = torch.gt(x, 0).sum(dim=-1).float() / x.shape[-1]
return torch.cat((_max, _ppv), dim=-1).unsqueeze(2)
# +
bs = 2
nf = 5
sl = 4
t = torch.rand(bs, nf, sl)
test_eq(MaxPPVPool1d()(t).shape, (bs, nf*2, 1))
test_eq(MaxPPVPool1d()(t).shape, AdaptiveConcatPool1d(1)(t).shape)
# -
#export
class AdaptiveWeightedAvgPool1d(Module):
'''Global Pooling layer that performs a weighted average along the temporal axis
It can be considered as a channel-wise form of local temporal attention. Inspired by the paper:
<NAME>., <NAME>., & <NAME>. (2019). Universal Pooling--A New Pooling Method for Convolutional Neural Networks. arXiv preprint arXiv:1907.11440.'''
def __init__(self, n_in, seq_len, mult=2, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=True):
layers = nn.ModuleList()
for i in range(n_layers):
inp_mult = mult if i > 0 else 1
out_mult = mult if i < n_layers -1 else 1
p = dropout[i] if is_listy(dropout) else dropout
layers.append(LinLnDrop(seq_len * inp_mult, seq_len * out_mult, ln=False, p=p,
act=act if i < n_layers-1 and n_layers > 1 else None))
self.layers = layers
self.softmax = SoftMax(-1)
if zero_init: init_lin_zero(self)
def forward(self, x):
wap = x
for l in self.layers: wap = l(wap)
wap = self.softmax(wap)
return torch.mul(x, wap).sum(-1)
# +
#export
class GAP1d(Module):
"Global Adaptive Pooling + Flatten"
def __init__(self, output_size=1):
self.gap = nn.AdaptiveAvgPool1d(output_size)
self.flatten = Flatten()
def forward(self, x):
return self.flatten(self.gap(x))
class GACP1d(Module):
"Global AdaptiveConcatPool + Flatten"
def __init__(self, output_size=1):
self.gacp = AdaptiveConcatPool1d(output_size)
self.flatten = Flatten()
def forward(self, x):
return self.flatten(self.gacp(x))
class GAWP1d(Module):
"Global AdaptiveWeightedAvgPool1d + Flatten"
def __init__(self, n_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=False):
self.gacp = AdaptiveWeightedAvgPool1d(n_in, seq_len, n_layers=n_layers, ln=ln, dropout=dropout, act=act, zero_init=zero_init)
self.flatten = Flatten()
def forward(self, x):
return self.flatten(self.gacp(x))
# +
# export
class GlobalWeightedAveragePool1d(Module):
""" Global Weighted Average Pooling layer
Inspired by Building Efficient CNN Architecture for Offline Handwritten Chinese Character Recognition
https://arxiv.org/pdf/1804.01259.pdf
"""
def __init__(self, n_in, seq_len):
self.weight = nn.Parameter(torch.ones(1, n_in, seq_len))
self.bias = nn.Parameter(torch.zeros(1, n_in, seq_len))
def forward(self, x):
α = F.softmax(torch.sigmoid(x * self.weight + self.bias), dim=-1)
return (x * α).sum(-1)
GWAP1d = GlobalWeightedAveragePool1d
def gwa_pool_head(n_in, c_out, seq_len, bn=True, fc_dropout=0.):
return nn.Sequential(GlobalWeightedAveragePool1d(n_in, seq_len), Flatten(), LinBnDrop(n_in, c_out, p=fc_dropout, bn=bn))
# -
t = torch.randn(16, 64, 50)
head = gwa_pool_head(64, 5, 50)
test_eq(head(t).shape, (16, 5))
# +
#export
class AttentionalPool1d(Module):
"""Global Adaptive Pooling layer inspired by Attentional Pooling for Action Recognition https://arxiv.org/abs/1711.01467"""
def __init__(self, n_in, c_out, bn=False):
store_attr()
self.bn = nn.BatchNorm1d(n_in) if bn else None
self.conv1 = Conv1d(n_in, 1, 1)
self.conv2 = Conv1d(n_in, c_out, 1)
def forward(self, x):
if self.bn is not None: x = self.bn(x)
return (self.conv1(x) @ self.conv2(x).transpose(1,2)).transpose(1,2)
class GAttP1d(nn.Sequential):
def __init__(self, n_in, c_out, bn=False):
super().__init__(AttentionalPool1d(n_in, c_out, bn=bn), Flatten())
def attentional_pool_head(n_in, c_out, seq_len=None, bn=True, **kwargs):
return nn.Sequential(AttentionalPool1d(n_in, c_out, bn=bn, **kwargs), Flatten())
# -
bs, c_in, seq_len = 16, 1, 50
c_out = 3
t = torch.rand(bs, c_in, seq_len)
test_eq(GAP1d()(t).shape, (bs, c_in))
test_eq(GACP1d()(t).shape, (bs, c_in*2))
bs, c_in, seq_len = 16, 4, 50
t = torch.rand(bs, c_in, seq_len)
test_eq(GAP1d()(t).shape, (bs, c_in))
test_eq(GACP1d()(t).shape, (bs, c_in*2))
test_eq(GAWP1d(c_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=False)(t).shape, (bs, c_in))
test_eq(GAWP1d(c_in, seq_len, n_layers=2, ln=False, dropout=0.5, act=nn.ReLU(), zero_init=False)(t).shape, (bs, c_in))
test_eq(GAWP1d(c_in, seq_len, n_layers=1, ln=False, dropout=0.5, zero_init=False)(t).shape, (bs, c_in))
test_eq(GAWP1d(c_in, seq_len, n_layers=1, ln=False, dropout=0.5, zero_init=True)(t).shape, (bs, c_in))
test_eq(AttentionalPool1d(c_in, c_out)(t).shape, (bs, c_out, 1))
bs, c_in, seq_len = 16, 128, 50
c_out = 14
t = torch.rand(bs, c_in, seq_len)
attp = attentional_pool_head(c_in, c_out)
test_eq(attp(t).shape, (bs, c_out))
# +
#export
def create_pool_head(n_in, c_out, seq_len=None, concat_pool=False, fc_dropout=0., bn=False, y_range=None, **kwargs):
if kwargs: print(f'{kwargs} not being used')
if concat_pool: n_in*=2
layers = [GACP1d(1) if concat_pool else GAP1d(1)]
layers += [LinBnDrop(n_in, c_out, bn=bn, p=fc_dropout)]
if y_range: layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
pool_head = create_pool_head
average_pool_head = partial(pool_head, concat_pool=False)
setattr(average_pool_head, "__name__", "average_pool_head")
concat_pool_head = partial(pool_head, concat_pool=True)
setattr(concat_pool_head, "__name__", "concat_pool_head")
# -
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_pool_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
test_eq(create_pool_head(nf, c_out, seq_len, concat_pool=True, fc_dropout=0.5)(t).shape, (bs, c_out))
create_pool_head(nf, c_out, seq_len, concat_pool=True, bn=True, fc_dropout=.5)
#export
def max_pool_head(n_in, c_out, seq_len, fc_dropout=0., bn=False, y_range=None, **kwargs):
if kwargs: print(f'{kwargs} not being used')
layers = [nn.MaxPool1d(seq_len, **kwargs), Flatten()]
layers += [LinBnDrop(n_in, c_out, bn=bn, p=fc_dropout)]
if y_range: layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(max_pool_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
# +
#export
def create_pool_plus_head(*args, lin_ftrs=None, fc_dropout=0., concat_pool=True, bn_final=False, lin_first=False, y_range=None):
nf = args[0]
c_out = args[1]
if concat_pool: nf = nf * 2
lin_ftrs = [nf, 512, c_out] if lin_ftrs is None else [nf] + lin_ftrs + [c_out]
ps = L(fc_dropout)
if len(ps) == 1: ps = [ps[0]/2] * (len(lin_ftrs)-2) + ps
actns = [nn.ReLU(inplace=True)] * (len(lin_ftrs)-2) + [None]
pool = AdaptiveConcatPool1d() if concat_pool else nn.AdaptiveAvgPool1d(1)
layers = [pool, Flatten()]
if lin_first: layers.append(nn.Dropout(ps.pop(0)))
for ni,no,p,actn in zip(lin_ftrs[:-1], lin_ftrs[1:], ps, actns):
layers += LinBnDrop(ni, no, bn=True, p=p, act=actn, lin_first=lin_first)
if lin_first: layers.append(nn.Linear(lin_ftrs[-2], c_out))
if bn_final: layers.append(nn.BatchNorm1d(lin_ftrs[-1], momentum=0.01))
if y_range is not None: layers.append(SigmoidRange(*y_range))
return nn.Sequential(*layers)
pool_plus_head = create_pool_plus_head
# -
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_pool_plus_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
test_eq(create_pool_plus_head(nf, c_out, concat_pool=True, fc_dropout=0.5)(t).shape, (bs, c_out))
create_pool_plus_head(nf, c_out, seq_len, fc_dropout=0.5)
# +
#export
def create_conv_head(*args, adaptive_size=None, y_range=None):
nf = args[0]
c_out = args[1]
layers = [nn.AdaptiveAvgPool1d(adaptive_size)] if adaptive_size is not None else []
for i in range(2):
if nf > 1:
layers += [ConvBlock(nf, nf // 2, 1)]
nf = nf//2
else: break
layers += [ConvBlock(nf, c_out, 1), GAP1d(1)]
if y_range: layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
conv_head = create_conv_head
# -
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_conv_head(nf, c_out, seq_len)(t).shape, (bs, c_out))
test_eq(create_conv_head(nf, c_out, adaptive_size=50)(t).shape, (bs, c_out))
create_conv_head(nf, c_out, 50)
# +
#export
def create_mlp_head(nf, c_out, seq_len=None, flatten=True, fc_dropout=0., bn=False, y_range=None):
if flatten: nf *= seq_len
layers = [Flatten()] if flatten else []
layers += [LinBnDrop(nf, c_out, bn=bn, p=fc_dropout)]
if y_range: layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
mlp_head = create_mlp_head
# -
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_mlp_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
t = torch.rand(bs, nf, seq_len)
create_mlp_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)
# +
#export
def create_fc_head(nf, c_out, seq_len=None, flatten=True, lin_ftrs=None, y_range=None, fc_dropout=0., bn=False, bn_final=False, act=nn.ReLU(inplace=True)):
if flatten: nf *= seq_len
layers = [Flatten()] if flatten else []
lin_ftrs = [nf, 512, c_out] if lin_ftrs is None else [nf] + lin_ftrs + [c_out]
if not is_listy(fc_dropout): fc_dropout = [fc_dropout]*(len(lin_ftrs) - 1)
actns = [act for _ in range(len(lin_ftrs) - 2)] + [None]
layers += [LinBnDrop(lin_ftrs[i], lin_ftrs[i+1], bn=bn and (i!=len(actns)-1 or bn_final), p=p, act=a) for i,(p,a) in enumerate(zip(fc_dropout+[0.], actns))]
if y_range is not None: layers.append(SigmoidRange(*y_range))
return nn.Sequential(*layers)
fc_head = create_fc_head
# -
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_fc_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
create_mlp_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)
# +
#export
def create_rnn_head(*args, fc_dropout=0., bn=False, y_range=None):
nf = args[0]
c_out = args[1]
layers = [LastStep()]
layers += [LinBnDrop(nf, c_out, bn=bn, p=fc_dropout)]
if y_range: layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
rnn_head = create_rnn_head
# -
bs = 16
nf = 12
c_out = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
test_eq(create_rnn_head(nf, c_out, seq_len, fc_dropout=0.5)(t).shape, (bs, c_out))
create_rnn_head(nf, c_out, seq_len, bn=True, fc_dropout=.5)
# +
# export
def imputation_head(c_in, c_out, seq_len=None, ks=1, y_range=None, fc_dropout=0.):
layers = [nn.Dropout(fc_dropout), nn.Conv1d(c_in, c_out, ks)]
if y_range is not None:
y_range = (tensor(y_range[0]), tensor(y_range[1]))
layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
# -
bs = 16
nf = 12
ni = 2
seq_len = 20
t = torch.rand(bs, nf, seq_len)
head = imputation_head(nf, ni, seq_len=None, ks=1, y_range=None, fc_dropout=0.)
test_eq(head(t).shape, (bs, ni, seq_len))
head = imputation_head(nf, ni, seq_len=None, ks=1, y_range=(.3,.7), fc_dropout=0.)
test_ge(head(t).min(), .3)
test_le(head(t).max(), .7)
y_range = (tensor([0.1000, 0.1000, 0.1000, 0.1000, 0.2000, 0.2000, 0.2000, 0.2000, 0.3000,
0.3000, 0.3000, 0.3000]),
tensor([0.6000, 0.6000, 0.6000, 0.6000, 0.7000, 0.7000, 0.7000, 0.7000, 0.8000,
0.8000, 0.8000, 0.8000]))
test_ge(head(t).min(), .1)
test_le(head(t).max(), .9)
head = imputation_head(nf, ni, seq_len=None, ks=1, y_range=y_range, fc_dropout=0.)
head
# +
# export
class create_conv_lin_3d_head(nn.Sequential):
"Module to create a 3d output head"
def __init__(self, n_in, n_out, seq_len, d=(), conv_first=True, conv_bn=True, lin_first=False, lin_bn=True, act=None, fc_dropout=0., **kwargs):
assert len(d) == 2, "you must pass a tuple of len == 2 to create a 3d output"
conv = [BatchNorm(n_in, ndim=1)] if conv_bn else []
conv.append(Conv1d(n_in, d[0], 1, padding=0, bias=not conv_bn, **kwargs))
l = [Transpose(-1, -2), BatchNorm(n_out if lin_first else seq_len, ndim=1), Transpose(-1, -2)] if lin_bn else []
if fc_dropout != 0: l.append(nn.Dropout(fc_dropout))
lin = [nn.Linear(seq_len, d[1], bias=not lin_bn)]
if act is not None: lin.append(act)
lin_layers = lin+l if lin_first else l+lin
layers = conv + lin_layers if conv_first else lin_layers + conv
super().__init__(*layers)
conv_lin_3d_head = create_conv_lin_3d_head
# -
t = torch.randn(16, 3, 50)
head = conv_lin_3d_head(3, 20, 50, (4,5))
test_eq(head(t).shape, (16, 4, 5))
head = conv_lin_3d_head(3, 20, 50, (2, 10))
test_eq(head(t).shape, (16, 2, 10))
head
# +
# export
class create_lin_3d_head(nn.Sequential):
"Module to create a 3d output head with linear layers"
def __init__(self, n_in, n_out, seq_len, d=(), lin_first=False, bn=True, act=None, fc_dropout=0.):
assert len(d) == 2, "you must pass a tuple of len == 2 to create a 3d output"
layers = [Flatten()]
layers += LinBnDrop(n_in * seq_len, n_out, bn=bn, p=fc_dropout, act=act, lin_first=lin_first)
layers += [Reshape(*d)]
super().__init__(*layers)
lin_3d_head = create_lin_3d_head
# -
t = torch.randn(16, 64, 50)
head = lin_3d_head(64, 10, 50, (5,2))
test_eq(head(t).shape, (16, 5, 2))
head = lin_3d_head(64, 5, 50, (5, 1))
test_eq(head(t).shape, (16, 5, 1))
head
# +
# export
class create_conv_3d_head(nn.Sequential):
"Module to create a 3d output head with a convolutional layer"
def __init__(self, n_in, c_out, seq_len, d=(), lin_first=False, bn=True, act=None, fc_dropout=0.):
assert len(d) == 2, "you must pass a tuple of len == 2 to create a 3d output"
assert d[1] == seq_len, 'You can only use this head when learn.dls.len == learn.dls.d'
super().__init__(Conv(n_in, d[0], 1))
conv_3d_head = create_conv_3d_head
# -
bs = 16
c_out = 4
seq_len = 50
d = (2,50)
nf = 128
t = torch.rand(bs, nf, seq_len)
test_eq(conv_3d_head(nf, c_out, seq_len, d)(t).shape, (bs, *d))
#export
def universal_pool_head(n_in, c_out, seq_len, mult=2, pool_n_layers=2, pool_ln=True, pool_dropout=0.5, pool_act=nn.ReLU(),
zero_init=True, bn=True, fc_dropout=0.):
return nn.Sequential(AdaptiveWeightedAvgPool1d(n_in, seq_len, n_layers=pool_n_layers, mult=mult, ln=pool_ln, dropout=pool_dropout, act=pool_act),
Flatten(), LinBnDrop(n_in, c_out, p=fc_dropout, bn=bn))
bs, c_in, seq_len = 16, 128, 50
c_out = 14
t = torch.rand(bs, c_in, seq_len)
uph = universal_pool_head(c_in, c_out, seq_len)
test_eq(uph(t).shape, (bs, c_out))
uph = universal_pool_head(c_in, c_out, seq_len, 2)
test_eq(uph(t).shape, (bs, c_out))
#export
heads = [mlp_head, fc_head, average_pool_head, max_pool_head, concat_pool_head, pool_plus_head, conv_head, rnn_head,
conv_lin_3d_head, lin_3d_head, conv_3d_head, attentional_pool_head, universal_pool_head, gwa_pool_head]
bs, c_in, seq_len = 16, 128, 50
c_out = 14
d = (7, 2)
t = torch.rand(bs, c_in, seq_len)
for head in heads:
print(head.__name__)
if head.__name__ == 'create_conv_3d_head':
test_eq(head(c_in, c_out, seq_len, (d[0], seq_len))(t).shape, (bs, *(d[0], seq_len)))
elif '3d' in head.__name__:
test_eq(head(c_in, c_out, seq_len, d)(t).shape, (bs, *d))
else:
test_eq(head(c_in, c_out, seq_len)(t).shape, (bs, c_out))
#export
class SqueezeExciteBlock(Module):
def __init__(self, ni, reduction=16):
self.avg_pool = GAP1d(1)
self.fc = nn.Sequential(nn.Linear(ni, ni // reduction, bias=False), nn.ReLU(), nn.Linear(ni // reduction, ni, bias=False), nn.Sigmoid())
def forward(self, x):
y = self.avg_pool(x)
y = self.fc(y).unsqueeze(2)
return x * y.expand_as(x)
bs = 2
ni = 32
sl = 4
t = torch.rand(bs, ni, sl)
test_eq(SqueezeExciteBlock(ni)(t).shape, (bs, ni, sl))
#export
class GaussianNoise(Module):
"""Gaussian noise regularizer.
Args:
sigma (float, optional): relative standard deviation used to generate the
noise. Relative means that it will be multiplied by the magnitude of
the value your are adding the noise to. This means that sigma can be
the same regardless of the scale of the vector.
is_relative_detach (bool, optional): whether to detach the variable before
computing the scale of the noise. If `False` then the scale of the noise
won't be seen as a constant but something to optimize: this will bias the
network to generate vectors with smaller values.
"""
def __init__(self, sigma=0.1, is_relative_detach=True):
self.sigma, self.is_relative_detach = sigma, is_relative_detach
def forward(self, x):
if self.training and self.sigma not in [0, None]:
scale = self.sigma * (x.detach() if self.is_relative_detach else x)
sampled_noise = torch.empty(x.size()).normal_().to(device) * scale
x = x + sampled_noise
return x
t = torch.ones(2,3,4)
test_ne(GaussianNoise()(t), t)
test_eq(GaussianNoise()(t).shape, t.shape)
t = torch.ones(2,3)
test_ne(GaussianNoise()(t), t)
test_eq(GaussianNoise()(t).shape, t.shape)
t = torch.ones(2)
test_ne(GaussianNoise()(t), t)
test_eq(GaussianNoise()(t).shape, t.shape)
#export
def gambler_loss(reward=2):
def _gambler_loss(model_output, targets):
outputs = torch.nn.functional.softmax(model_output, dim=1)
outputs, reservation = outputs[:, :-1], outputs[:, -1]
gain = torch.gather(outputs, dim=1, index=targets.unsqueeze(1)).squeeze()
doubling_rate = (gain + reservation / reward).log()
return - doubling_rate.mean()
return _gambler_loss
model_output = torch.rand(16, 3)
targets = torch.randint(0, 2, (16,))
criterion = gambler_loss(2)
criterion(model_output, targets)
#export
def CrossEntropyLossOneHot(output, target, **kwargs):
if target.ndim == 2: _, target = target.max(dim=1)
return nn.CrossEntropyLoss(**kwargs)(output, target)
output = torch.rand(16, 2)
target = torch.randint(0, 2, (16,))
CrossEntropyLossOneHot(output, target)
from tsai.data.transforms import OneHot
output = nn.Parameter(torch.rand(16, 2))
target = torch.randint(0, 2, (16,))
one_hot_target = OneHot()(target)
CrossEntropyLossOneHot(output, one_hot_target)
#hide
def proba_certainty(output):
if output.sum(-1).mean().item() != 1: output = F.softmax(output, -1)
return (output.max(-1).values - 1. / output.shape[-1])/( 1 - 1. / output.shape[-1])
#hide
target = random_shuffle(concat(torch.zeros(5), torch.ones(7), torch.ones(4) + 1)).long()
output = nn.Parameter(5 * torch.rand((16, 3)) - 5 * torch.rand((16, 3)))
proba_certainty(output)
#hide
def CrossEntropyLossOneHotWithUncertainty():
def _CrossEntropyLossOneHotWithUncertainty(output, target, **kwargs):
return (proba_certainty(output) * CrossEntropyLossOneHot(output, target, reduction='none', **kwargs)).mean()
return _CrossEntropyLossOneHotWithUncertainty
# +
#hide
# https://stackoverflow.com/questions/22611446/perform-2-sample-t-test
from __future__ import print_function
import numpy as np
from scipy.stats import ttest_ind, ttest_ind_from_stats
from scipy.special import stdtr
np.random.seed(1)
# Create sample data.
a = np.random.randn(40)
b = 4*np.random.randn(50)
# Use scipy.stats.ttest_ind.
t, p = ttest_ind(a, b, equal_var=False)
print("ttest_ind: t = %g p = %g" % (t, p))
# Compute the descriptive statistics of a and b.
abar = a.mean()
avar = a.var(ddof=1)
na = a.size
adof = na - 1
bbar = b.mean()
bvar = b.var(ddof=1)
nb = b.size
bdof = nb - 1
# Use scipy.stats.ttest_ind_from_stats.
t2, p2 = ttest_ind_from_stats(abar, np.sqrt(avar), na,
bbar, np.sqrt(bvar), nb,
equal_var=False)
print("ttest_ind_from_stats: t = %g p = %g" % (t2, p2))
# Use the formulas directly.
tf = (abar - bbar) / np.sqrt(avar/na + bvar/nb)
dof = (avar/na + bvar/nb)**2 / (avar**2/(na**2*adof) + bvar**2/(nb**2*bdof))
pf = 2*stdtr(dof, -np.abs(tf))
print("formula: t = %g p = %g" % (tf, pf))
a = tensor(a)
b = tensor(b)
tf = (a.mean() - b.mean()) / torch.sqrt(a.var()/a.size(0) + b.var()/b.size(0))
print("formula: t = %g" % (tf))
# -
ttest_tensor(a, b)
# +
#export
def ttest_bin_loss(output, target):
output = nn.Softmax(dim=-1)(output[:, 1])
return ttest_tensor(output[target == 0], output[target == 1])
def ttest_reg_loss(output, target):
return ttest_tensor(output[target <= 0], output[target > 0])
# -
for _ in range(100):
output = torch.rand(256, 2)
target = torch.randint(0, 2, (256,))
test_close(ttest_bin_loss(output, target).item(),
ttest_ind(nn.Softmax(dim=-1)(output[:, 1])[target == 0], nn.Softmax(dim=-1)(output[:, 1])[target == 1], equal_var=False)[0], eps=1e-3)
# +
#export
class CenterLoss(Module):
r"""
Code in Pytorch has been slightly modified from: https://github.com/KaiyangZhou/pytorch-center-loss/blob/master/center_loss.py
Based on paper: Wen et al. A Discriminative Feature Learning Approach for Deep Face Recognition. ECCV 2016.
Args:
c_out (int): number of classes.
logits_dim (int): dim 1 of the logits. By default same as c_out (for one hot encoded logits)
"""
def __init__(self, c_out, logits_dim=None):
logits_dim = ifnone(logits_dim, c_out)
self.c_out, self.logits_dim = c_out, logits_dim
self.centers = nn.Parameter(torch.randn(c_out, logits_dim).to(device=default_device()))
self.classes = torch.arange(c_out).long().to(device=default_device())
def forward(self, x, labels):
"""
Args:
x: feature matrix with shape (batch_size, logits_dim).
labels: ground truth labels with shape (batch_size).
"""
bs = x.shape[0]
distmat = torch.pow(x, 2).sum(dim=1, keepdim=True).expand(bs, self.c_out) + \
torch.pow(self.centers, 2).sum(dim=1, keepdim=True).expand(self.c_out, bs).T
distmat = torch.addmm(distmat, x, self.centers.T, beta=1, alpha=-2)
labels = labels.unsqueeze(1).expand(bs, self.c_out)
mask = labels.eq(self.classes.expand(bs, self.c_out))
dist = distmat * mask.float()
loss = dist.clamp(min=1e-12, max=1e+12).sum() / bs
return loss
class CenterPlusLoss(Module):
def __init__(self, loss, c_out, λ=1e-2, logits_dim=None):
self.loss, self.c_out, self.λ = loss, c_out, λ
self.centerloss = CenterLoss(c_out, logits_dim)
def forward(self, x, labels):
return self.loss(x, labels) + self.λ * self.centerloss(x, labels)
def __repr__(self): return f"CenterPlusLoss(loss={self.loss}, c_out={self.c_out}, λ={self.λ})"
# -
c_in = 10
x = torch.rand(64, c_in).to(device=default_device())
x = F.softmax(x, dim=1)
label = x.max(dim=1).indices
CenterLoss(c_in)(x, label), CenterPlusLoss(LabelSmoothingCrossEntropyFlat(), c_in)(x, label)
CenterPlusLoss(LabelSmoothingCrossEntropyFlat(), c_in)
#export
class FocalLoss(Module):
def __init__(self, gamma=0, eps=1e-7):
self.gamma, self.eps, self.ce = gamma, eps, CrossEntropyLossFlat()
def forward(self, input, target):
logp = self.ce(input, target)
p = torch.exp(-logp)
loss = (1 - p) ** self.gamma * logp
return loss.mean()
c_in = 10
x = torch.rand(64, c_in).to(device=default_device())
x = F.softmax(x, dim=1)
label = x.max(dim=1).indices
FocalLoss(c_in)(x, label)
#export
class TweedieLoss(Module):
def __init__(self, p=1.5, eps=1e-10):
"""
Tweedie loss as calculated in LightGBM
Args:
p: tweedie variance power (1 < p < 2)
eps: small number to avoid log(zero).
"""
assert p > 1 and p < 2, "make sure 1 < p < 2"
self.p, self.eps = p, eps
def forward(self, inp, targ):
inp = inp.flatten()
targ = targ.flatten()
torch.clamp_min_(inp, self.eps)
a = targ * torch.exp((1 - self.p) * torch.log(inp)) / (1 - self.p)
b = torch.exp((2 - self.p) * torch.log(inp)) / (2 - self.p)
loss = -a + b
return loss.mean()
c_in = 10
output = torch.rand(64).to(device=default_device())
target = torch.rand(64).to(device=default_device())
TweedieLoss()(output, target)
# +
# export
class GEGLU(Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
class ReGLU(Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.relu(gates)
class PositionwiseFeedForward(nn.Sequential):
def __init__(self, dim, dropout=0., act='reglu', mlp_ratio=1):
act = act.lower()
act_mult = 2 if act in ['geglu', 'reglu'] else 1
if act == 'relu': act_fn = nn.ReLU()
elif act == 'gelu': act_fn = nn.GELU()
elif act == 'geglu': act_fn = GEGLU()
else: act_fn = ReGLU()
super().__init__(nn.Linear(dim, dim * act_mult * mlp_ratio),
act_fn,
nn.Dropout(dropout),
nn.Linear(dim * mlp_ratio, dim),
nn.Dropout(dropout))
class TokenLayer(Module):
def __init__(self, token=True): self.token = token
def forward(self, x): return x[..., 0] if self.token is not None else x.mean(-1)
def __repr__(self): return f"{self.__class__.__name__}()"
# +
#export
pytorch_acts = [nn.ELU, nn.LeakyReLU, nn.PReLU, nn.ReLU, nn.ReLU6, nn.SELU, nn.CELU, nn.GELU, nn.Sigmoid, Mish, nn.Softplus,
nn.Tanh, nn.Softmax, GEGLU, ReGLU]
pytorch_act_names = [a.__name__.lower() for a in pytorch_acts]
def get_act_fn(act_name, **act_kwargs):
if act_name is None: return
if callable(act_name): return act_name(**act_kwargs)
idx = pytorch_act_names.index(act_name.lower())
return pytorch_acts[idx](**act_kwargs)
test_eq(get_act_fn(nn.ReLU).__repr__(), "ReLU()")
test_eq(get_act_fn(nn.LeakyReLU, negative_slope=0.05).__repr__(), "LeakyReLU(negative_slope=0.05)")
test_eq(get_act_fn('reglu').__repr__(), "ReGLU()")
test_eq(get_act_fn('leakyrelu', negative_slope=0.05).__repr__(), "LeakyReLU(negative_slope=0.05)")
# -
#export
class ScaledDotProductAttention(Module):
"""Scaled Dot-Product Attention module (Vaswani et al., 2017) with optional residual attention from previous layer (He et al, 2020)"""
def __init__(self, res_attention:bool=False): self.res_attention = res_attention
def forward(self, q:Tensor, k:Tensor, v:Tensor, prev:Optional[Tensor]=None, key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None):
'''
Input shape:
q : [bs x n_heads x max_q_len x d_k]
k : [bs x n_heads x d_k x seq_len]
v : [bs x n_heads x seq_len x d_v]
prev : [bs x n_heads x q_len x seq_len]
key_padding_mask: [bs x seq_len]
attn_mask : [1 x seq_len x seq_len]
Output shape:
output: [bs x n_heads x q_len x d_v]
attn : [bs x n_heads x q_len x seq_len]
scores : [bs x n_heads x q_len x seq_len]
'''
# Scaled MatMul (q, k) - similarity scores for all pairs of positions in an input sequence
attn_scores = torch.matmul(q / np.sqrt(q.shape[-2]), k) # attn_scores : [bs x n_heads x max_q_len x q_len]
# Add pre-softmax attention scores from the previous layer (optional)
if prev is not None: attn_scores = attn_scores + prev
# Attention mask (optional)
if attn_mask is not None: # attn_mask with shape [q_len x seq_len] - only used when q_len == seq_len
if attn_mask.dtype == torch.bool:
attn_scores.masked_fill_(attn_mask, -np.inf)
else:
attn_scores += attn_mask
# Key padding mask (optional)
if key_padding_mask is not None: # mask with shape [bs x q_len] (only when max_w_len == q_len)
attn_scores.masked_fill_(key_padding_mask.unsqueeze(1).unsqueeze(2), -np.inf)
# normalize the attention weights
attn_weights = F.softmax(attn_scores, dim=-1) # attn_weights : [bs x n_heads x max_q_len x q_len]
# compute the new values given the attention weights
output = torch.matmul(attn_weights, v) # output: [bs x n_heads x max_q_len x d_v]
if self.res_attention: return output, attn_weights, attn_scores
else: return output, attn_weights
# +
B = 16
C = 10
M = 1500 # seq_len
n_heads = 1
D = 128 # model dimension
N = 512 # max_seq_len - latent's index dimension
d_k = D // n_heads
xb = torch.randn(B, C, M)
xb = (xb - xb.mean()) / xb.std()
# Attention
# input (Q)
lin = nn.Linear(M, N, bias=False)
Q = lin(xb).transpose(1,2)
test_eq(Q.shape, (B, N, C))
# q
to_q = nn.Linear(C, D, bias=False)
q = to_q(Q)
q = nn.LayerNorm(D)(q)
# k, v
context = xb.transpose(1,2)
to_kv = nn.Linear(C, D * 2, bias=False)
k, v = to_kv(context).chunk(2, dim = -1)
k = k.transpose(-1, -2)
k = nn.LayerNorm(M)(k)
v = nn.LayerNorm(D)(v)
test_eq(q.shape, (B, N, D))
test_eq(k.shape, (B, D, M))
test_eq(v.shape, (B, M, D))
output, attn, scores = ScaledDotProductAttention(res_attention=True)(q.unsqueeze(1), k.unsqueeze(1), v.unsqueeze(1))
test_eq(output.shape, (B, 1, N, D))
test_eq(attn.shape, (B, 1, N, M))
test_eq(scores.shape, (B, 1, N, M))
scores.mean(), scores.std()
# +
#export
class MultiheadAttention(Module):
def __init__(self, d_model:int, n_heads:int, d_k:Optional[int]=None, d_v:Optional[int]=None, res_attention:bool=False,
dropout:float=0., qkv_bias:bool=True):
"""Multi Head Attention Layer
Input shape:
Q: [batch_size (bs) x max_q_len x d_model]
K, V: [batch_size (bs) x q_len x d_model]
mask: [q_len x q_len]
"""
d_k = ifnone(d_k, d_model // n_heads)
d_v = ifnone(d_v, d_model // n_heads)
self.n_heads, self.d_k, self.d_v = n_heads, d_k, d_v
self.W_Q = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias)
self.W_K = nn.Linear(d_model, d_k * n_heads, bias=qkv_bias)
self.W_V = nn.Linear(d_model, d_v * n_heads, bias=qkv_bias)
# Scaled Dot-Product Attention (multiple heads)
self.res_attention = res_attention
self.sdp_attn = ScaledDotProductAttention(res_attention=self.res_attention)
# Poject output
project_out = not (n_heads == 1 and d_model == d_k)
self.to_out = nn.Sequential(nn.Linear(n_heads * d_v, d_model), nn.Dropout(dropout)) if project_out else nn.Identity()
def forward(self, Q:Tensor, K:Optional[Tensor]=None, V:Optional[Tensor]=None, prev:Optional[Tensor]=None,
key_padding_mask:Optional[Tensor]=None, attn_mask:Optional[Tensor]=None):
bs = Q.size(0)
if K is None: K = Q
if V is None: V = Q
# Linear (+ split in multiple heads)
q_s = self.W_Q(Q).view(bs, -1, self.n_heads, self.d_k).transpose(1,2) # q_s : [bs x n_heads x max_q_len x d_k]
k_s = self.W_K(K).view(bs, -1, self.n_heads, self.d_k).permute(0,2,3,1) # k_s : [bs x n_heads x d_k x q_len] - transpose(1,2) + transpose(2,3)
v_s = self.W_V(V).view(bs, -1, self.n_heads, self.d_v).transpose(1,2) # v_s : [bs x n_heads x q_len x d_v]
# Apply Scaled Dot-Product Attention (multiple heads)
if self.res_attention:
output, attn_weights, attn_scores = self.sdp_attn(q_s, k_s, v_s, prev=prev, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
else:
output, attn_weights = self.sdp_attn(q_s, k_s, v_s, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
# output: [bs x n_heads x q_len x d_v], attn: [bs x n_heads x q_len x q_len], scores: [bs x n_heads x max_q_len x q_len]
# back to the original inputs dimensions
output = output.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * self.d_v) # output: [bs x q_len x n_heads * d_v]
output = self.to_out(output)
if self.res_attention: return output, attn_weights, attn_scores
else: return output, attn_weights
# -
q = torch.rand([16, 3, 50, 8])
k = torch.rand([16, 3, 50, 8]).transpose(-1, -2)
v = torch.rand([16, 3, 50, 6])
attn_mask = torch.triu(torch.ones(50, 50)) # shape: q_len x q_len
key_padding_mask = torch.zeros(16, 50)
key_padding_mask[[1, 3, 6, 15], -10:] = 1
key_padding_mask = key_padding_mask.bool()
print('attn_mask', attn_mask.shape, 'key_padding_mask', key_padding_mask.shape)
output, attn = ScaledDotProductAttention()(q, k, v, attn_mask=attn_mask, key_padding_mask=key_padding_mask)
output.shape, attn.shape
t = torch.rand(16, 50, 128)
output, attn = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)(t, t, t, key_padding_mask=key_padding_mask, attn_mask=attn_mask)
output.shape, attn.shape
# +
t = torch.rand(16, 50, 128)
att_mask = (torch.rand((50, 50)) > .85).float()
att_mask[att_mask == 1] = -np.inf
mha = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)
output, attn = mha(t, t, t, attn_mask=att_mask)
test_eq(torch.isnan(output).sum().item(), 0)
test_eq(torch.isnan(attn).sum().item(), 0)
loss = output[:2, :].sum()
test_eq(torch.isnan(loss).sum().item(), 0)
loss.backward()
for n, p in mha.named_parameters(): test_eq(torch.isnan(p.grad).sum().item(), 0)
# +
t = torch.rand(16, 50, 128)
attn_mask = (torch.rand((50, 50)) > .85)
# True values will be masked
mha = MultiheadAttention(d_model=128, n_heads=3, d_k=8, d_v=6)
output, attn = mha(t, t, t, attn_mask=att_mask)
test_eq(torch.isnan(output).sum().item(), 0)
test_eq(torch.isnan(attn).sum().item(), 0)
loss = output[:2, :].sum()
test_eq(torch.isnan(loss).sum().item(), 0)
loss.backward()
for n, p in mha.named_parameters(): test_eq(torch.isnan(p.grad).sum().item(), 0)
# +
# export
class MultiConcatConv1d(Module):
"""Module that applies one or multiple kernels (and optionally maxpool)"""
def __init__(self, ni, nf, kss=[3,5,7], kernel_sizes=None, maxpool=True, stride=1):
kss = ifnone(kss, kernel_sizes)
assert kss is not None, "you need to pass a kss argument"
if not is_listy(kss): kss = [kss]
_nf = nf // (len(kss) + maxpool)
_total_nf = _nf * (len(kss) + maxpool)
self.layers = nn.ModuleList()
for k in kss:
self.layers.append(Conv1d(ni, _nf, k, stride=stride))
if maxpool: self.layers.append(nn.Sequential(nn.MaxPool1d(3, stride=stride, padding=1), Conv1d(ni, _nf, 1)))
self.to_output = Conv1d(_total_nf, nf, 1) if _total_nf != nf else nn.Identity()
def forward(self, x):
for i,l in enumerate(self.layers):
out = l(x) if i == 0 else torch.cat((out, l(x)), 1)
return self.to_output(out)
# -
t = torch.rand(16, 6, 37)
test_eq(MultiConcatConv1d(t.shape[1], 128, kernel_sizes=[3,5,7], maxpool=True)(t).shape, (t.shape[0], nf, t.shape[-1]))
test_eq(MultiConcatConv1d(t.shape[1], 128, kernel_sizes=[3,5,7], maxpool=True, stride=2)(t).shape, (t.shape[0], nf, math.ceil(t.shape[-1]/2)))
#export
class LSTMOutput(Module):
def forward(self, x): return x[0]
def __repr__(self): return f'{self.__class__.__name__}()'
t = ([1], [2], [3])
test_eq(LSTMOutput()(t), [1])
# +
#export
def trunc_normal_(x, mean=0., std=1.):
"Truncated normal initialization (approximation)"
# From fastai.layers
# From https://discuss.pytorch.org/t/implementing-truncated-normal-initializer/4778/12
return x.normal_().fmod_(2).mul_(std).add_(mean)
class Embedding(nn.Embedding):
"Embedding layer with truncated normal initialization"
# From fastai.layers
def __init__(self, ni, nf, std=0.01):
super(Embedding, self).__init__(ni, nf)
trunc_normal_(self.weight.data, std=std)
class MultiEmbeddding(Module):
def __init__(self, n_embeds, embed_dims=None, static=True):
if embed_dims is None:
assert not static, "you need to pass an embed_dims as a single int"
self.embed_dims = [emb_sz_rule(s) for s in n_embeds]
else:
embed_dims = listify(embed_dims)
if len(embed_dims) == 1: self.embed_dims = embed_dims * len(n_embeds)
assert len(self.embed_dims) == len(n_embeds)
self.cat_embed = nn.ModuleList([Embedding(n,d) for n,d in zip(n_embeds, self.embed_dims)])
self.static = static
def forward(self, x):
if x.ndim == 3:
if self.static:
return torch.cat([e(x[:, i, 0].to(dtype=int))[:, None] for i,e in enumerate(self.cat_embed)],1).transpose(1,2)
else:
return torch.cat([e(x[:,i].to(dtype=int)).transpose(1,2) for i,e in enumerate(self.cat_embed)],1)
elif x.ndim == 2:
assert len(list(set(self.embed_dims))) == 1, "you need to pass embed_dims of type int when using a 2d input"
return torch.cat([e(x[:,i].to(dtype=int))[:, None] for i,e in enumerate(self.cat_embed)],1).transpose(1,2)
# -
# Embedding of 2d input
a = alphabet[np.random.randint(0,3,40)]
b = ALPHABET[np.random.randint(6,10,40)]
map_a = {k:v for v,k in enumerate(np.unique(a))}
map_b = {k:v for v,k in enumerate(np.unique(b))}
n_embeds = [len(m.keys()) for m in [map_a, map_b]]
szs = [emb_sz_rule(n) for n in n_embeds]
a = np.asarray(a.map(map_a)).reshape(4,1,10)
b = np.asarray(b.map(map_b)).reshape(4,1,10)
out = torch.from_numpy(np.concatenate((a,b), 1)).float()
embeddings = MultiEmbeddding(n_embeds, static=False)(out)
print(n_embeds, out.shape, embeddings.shape)
test_eq(embeddings.shape, (out.shape[0],sum(szs),out.shape[-1]))
# Embedding of 3d input static variable
a = alphabet[np.random.randint(0,3,40)]
b = ALPHABET[np.random.randint(6,10,40)]
map_a = {k:v for v,k in enumerate(np.unique(a))}
map_b = {k:v for v,k in enumerate(np.unique(b))}
n_embeds = [len(m.keys()) for m in [map_a, map_b]]
a = np.asarray(a.map(map_a)).reshape(4,1,10)
b = np.asarray(b.map(map_b)).reshape(4,1,10)
out = torch.from_numpy(np.concatenate((a,b), 1)).float()
embeddings = MultiEmbeddding(n_embeds, 128)(out)
print(n_embeds, x.shape, embeddings.shape)
test_eq(embeddings.shape, (4, 128, 2))
# Embedding of 3d input
a = np.asarray(alphabet[np.random.randint(0,15,30)]).reshape(10,3)
b = np.asarray(ALPHABET[np.random.randint(6,10,30)]).reshape(10,3)
x = concat(a,b,dim=1)
n_embeds = [len(np.unique(x[:, i])) for i in range(x.shape[1])]
out = stack([np.unique(x[:, i], return_inverse=True)[1] for i in range(x.shape[1])]).T
out = torch.from_numpy(out).float()
embeddings = MultiEmbeddding(n_embeds, 64)(out)
print(n_embeds, out.shape, embeddings.shape)
test_eq(embeddings.shape, (10, 64, 6))
#hide
from tsai.imports import create_scripts
from tsai.export import get_nb_name
nb_name = get_nb_name()
create_scripts(nb_name);
| nbs/100_models.layers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="C-z71414BnfZ"
# # UX 2 Collecting Data
#
# Author:
# - |
# <NAME>, <EMAIL>\
# Dept. Architecture, Design and Media Technology, Aalborg University Copenhagen
# ---
#
# ## Learning Goals
# After working your way through this notebook you should be able to:
#
# - Explain what a variable is
# - Give examples of different kinds of data
# - Set up a contingency table
#
#
# ## Table of Contents
#
# - [1. Variables](#Variables)
# - [1.1 Manipulating Variables](#Manipulating)
# - [2. Kinds of Data](#Datatypes)
# - [2.1 Categorical Data](#Categorical)
# - [2.2 Quantitative Data](#Quantitative)
# - [2.3 Completion rate](#CompletionRate)
# - [2.4 Continuous Data](#Continous)
# - [3. Contingency Tables](#Contingency)
# - [4. Sensitivity of Quantitative Data](#Sensitivity)
#
# -
# + [markdown] id="Itlq_U7VBnfi"
# <a id= 'Variables'> </a>
# + [markdown] id="7BD6GoVGBnfi"
#
# ## 1 Variables
#
# A variable is something that can take at least two values. It can be pretty much anything: the number of users clicking the 'help' button during a day; how fast a player can get through a puzzle game; or how users rate a product that is being tested.
#
# We tend to be interested in how variables vary because this can help us answer our research questions. Which, in turn, could be: "Does it have an effect where we place the help button?"; " Can sound effects help players solve a puzzle game more efficiently?"; " Do users prefer our product to another?"
#
#
# + executionInfo={"elapsed": 889, "status": "ok", "timestamp": 1620109279291, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="CixUwUz0Bnfg"
#Loading libraries we will use in this notebook
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# + [markdown] id="BMy5XJXtBnfi"
# <a id= 'Manipulating'> </a>
# + [markdown] id="BjA_6dzjBnfj"
#
# ### 1.1 Manipulating Variables
#
# The previous examples of research questions all concern whether two variables are *related* in some way. That is, does A have an effect on B?
# To answer our question, we collect data on variables A and B and see if there is a relationship between them. For example, we can do this by observations, or by checking the correlation between variables. But to be completely sure that A indeed has an effect on B, we need to set up experiments where we *manipulate* A and measure B. If B changes when nothing but A is altered, we can (with some confidence) say that A has an effect on B. This is the essence of an experiment: manipulating only one variable, keeping everything else constant, and then measuring the effect.
#
# The *INDEPENDENT* variable (A) is the one that is controlled and manipulated
# by the experimenter in order to create different conditions for the
# experiment. The aim is to see if these manipulations have an
# effect on the *DEPENDENT* variable (B), which is the one being measured.
#
# + colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"elapsed": 1095, "status": "ok", "timestamp": 1620109818350, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="p6z_hEa8Bnfj" outputId="bdf123e9-b7c4-4d93-8a1c-4ff9307c62a9"
#Let's assume we have a variable A
# we can generate an array (using numpy library)
A=np.array([3,5,11,17,25,27])
#And if B is a dependent variable there may be a relationship between the two
B=2.5*A
#We plot using matplotlib
plt.plot(A,B, 'r*') #the 'r*' specify color and shape of the markers
#in this scatter plot we will use red stars
plt.xlabel('A (Independent Variable)')
plt.ylabel('B (Dependent Variable)')
# + [markdown] id="FGHgb1LnBnfk"
# ### Try out 1:
# Now specify another variable
# + colab={"base_uri": "https://localhost:8080/", "height": 282} executionInfo={"elapsed": 859, "status": "ok", "timestamp": 1620109826864, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="vkOLZSWvBnfk" outputId="49a748db-33ab-4bd7-b4ba-f01637897a17"
#TRY OUT:
#specify a variable C= A+ 20 and plot this vs. A
C= A + 20
plt.plot(A,C, 'b+') #the 'b+' means blue plusses
# -
# ANSWER: What kind of relationship is there betwewn variable A and C?
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 772, "status": "ok", "timestamp": 1620109851580, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="RmjW4ARYBnfl" outputId="da0b0f25-cd63-4c03-97bb-34d5aa973a99"
# We can combine our variables into a 2D array
array=np.array([A,B,C])
array
# -
# Note that because B was defined as 2.5 times A (resulting in non integer numbers) the whole 2D array is floats as a result.
#
# But, just like previous notebook, we would like to have the variables in a DATA FRAME where we define the three variables A, B, C.
# + colab={"base_uri": "https://localhost:8080/", "height": 225} executionInfo={"elapsed": 938, "status": "ok", "timestamp": 1620109857344, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="azrKxvqDBnfl" outputId="77f7107b-5cd6-4ee0-9f9a-651b2e5631f2"
#convert array into DATA FRAME
df = pd.DataFrame(array.T, columns=(["A","B","C"]), index=[1,2,3,4,5,6])
df
# + colab={"base_uri": "https://localhost:8080/", "height": 284} executionInfo={"elapsed": 754, "status": "ok", "timestamp": 1620109862538, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="bganbHwjBnfl" outputId="56e494ae-0148-44e6-fb70-1cbfaad9d198"
#Data frame allows some quick diagnostics
df.describe()
# + [markdown] id="4x9m2i4uBnfm"
# <a id= 'Scales'> </a>
# + [markdown] id="eZvGgu_9Bnfm"
# ## 2. Kinds of Data
#
# We can measure different kinds of data: *quantitative* or *categorical*. Different kinds of
# measurement types call for different analysis methods. That
# is, depending on the question, one might be more suitable to use than the
# other.
#
# *Categorical:* Examples might be categorization of gender, race, religion, or type of sport of choice. In terms of UX testing it could be type of usability problem, or a participant choosing the preferred version among two or more implementations. When the categories can be ordered the data is referred to as *ordinal* (as in sizes small, medium, large, etc.). However, the difference between 'small' and 'medium' is not necessarily the same as the difference between 'medium' and 'large'.
#
# *Quantitative:* Quantitative can be discrete (pass/fail) or continuous (completion time, cm). The continous data can take any value and infinite number of steps (time can be measured in fractions of seconds for instance).
# + [markdown] id="4x9m2i4uBnfm"
# <a id= 'Categorical'> </a>
# -
# ### 2.1 Categorical Data
#
# One example of categorical data is whether a user successfully can complete a task using the system, or not.
# Another that we will use for our purposes now is the random act of flipping a coin: The outcome is either head or tail. We can simulate flipping a coin using a numpy function.
# + colab={"base_uri": "https://localhost:8080/", "height": 319} executionInfo={"elapsed": 938, "status": "ok", "timestamp": 1620110291116, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="UH9F0F6_Bnfm" outputId="3167ca10-e187-437c-dda6-8672fbdcb13e"
# We can see the two outcomes by simulating N=100 coin flips
score=np.random.randint(2, size=100) #generate randomo vector with 100 scores
#and plot the distribution of this in a histogram
plt.hist(score)
# -
# Here 0 stands for one category and 1 for the other. In the plot we can see how many of the tossess that result in 0 and 1. Note that these are categories, there is no point in trying to calculate an average of the two outcomes. Just as we would not calculate means of how many that chose product A or product B.
#
# + [markdown] id="4x9m2i4uBnfm"
# <a id= 'Quantitative'> </a>
# -
# ### 2.2 Quantitative Data
#
# If we instead count the number of heads in each 100 tosses and obtain the total number of heads we have quantitative data.
#
# Similarly we can study the number of "successful" task completions, counting how many users that completed, and obtain the completion rate.
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1491, "status": "ok", "timestamp": 1620110309658, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="TAPm0mu4Bnfn" outputId="c32fed4f-8a1c-472b-ec50-7fa3b53ea7a8"
# If we count the number of heads in each 100 tosses and obtain the total number of heads
# we use another type of measurement.
sum(score) #this is still discrete
# -
# This number is quantitative with a meaningful zero (0 heads in 100 tosses). It can only take discrete values but if we re-do the 100 tosses we can record the measure to compare the outcome and calculate measures in a meaningful way.
#
# Note that the sum of heads will vary slightly, even if we expect that both categories occur approximately equal amount of times. It is a random outcome after all. But if we draw a lot of samples, each time making 100 tosses, we should see the number of heads centered around the expected 50-50
# + colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"elapsed": 777, "status": "ok", "timestamp": 1620110602864, "user": {"displayName": "", "photoUrl": "", "userId": ""}, "user_tz": -120} id="8S-BR4OhBnfn" outputId="bb06bb2f-17fc-41ca-de6f-03b1b4b49d8f"
# If we draw a lot of samples, we should see the proportion of heads centered around the expected 50-50
# We can repeat this a number of times with a different number of tosses (N)
K=1000 # how many times we run the N-toss simulation
N=[100] # number of tosses (N) = sample size
meanarr=np.zeros(K, float) #allocate an array of size K
for j in range(0,K): #repeat for length of K
score=np.random.randint(2, size=N) #generate vector of size N with random scores
meanarr[j]=sum(score) # calculate the frequency of heads for K=j
count, bins, ignored =plt.hist(meanarr) #plot histogram with K frequencies
plt.ylabel('Occurrences')
plt.xlabel('#Heads Obtained')
#Try to re-run this code several times, and see the histogram change
# + [markdown] id="4x9m2i4uBnfm"
# <a id= 'CompletionRate'> </a>
# -
# ### 2.3 Completion rate
#
# It is difficult to compare our values of successful task completion if we have different number of users trying it out. It therefore makes sense to record values in proportion to the number of attempts (or participants).
#
# *Completion rate* is the number of successfully completing users divided by the total number of users, moving from discrete values to proportion or percentages. We can do the same with our head tosses and look at proportions instead.
#
# +
#Rather than plotting total frequency we use the histogram option density=True
# This means that it will normalize all values so the total area under the curve is equal to 1.
count, bins, ignored =plt.hist(meanarr,density= True) #plot histogram with K means but with area=1
plt.ylabel('Fraction of Occurrences')
plt.xlabel('#Heads Obtained')
# + [markdown] id="4x9m2i4uBnfm"
# <a id= 'Continous'> </a>
# -
# ### 2.4 Continuous Data
# Rather than recording completion scores (0 or 1) and rates, we can measure the task time. That is how *long* does it take for users to complete the task.
#
# When we measure things like time, the data can take any value. Examples are time measured, height, weight...
# Depending on the precision of measurement we can record, two measurements will vary.
#
# ### Try out 2
#
# Record your reaction time using this page https://humanbenchmark.com/tests/reactiontime.
# Redo the test 7 times and record your reaction times (in ms) in a variable. Plot the values.
# Record the reaction times in a variable and plot the histogram
# reactime=np.array([])
# ANSWER: If you would continue to collect a larger sample of reaction time data, would you expect the histogram to have similar shapes as the occurrences of heads in the flipping coin examples? If not, why not?
# <a id= 'Contingency'> </a>
# ### 3 Contingency Table
# Returning once more to categorical data, you might have a selection task where people are asked to make a choice between two or more alternatives. For instance, you may be interested in testing which of two designs the participants prefer. In psychology, the "N-alternative-forced-choice" paradigm is often used, where "N" is typically 2, 3, or some other number of choices that the participants have to select from.
#
# For instance, we could have users select between two images, and we record whether they chose the original file or a manipulated one.
#
# Thus the categorical data, we typically get it in a form similar to:
#
# | Image pair |Original|
# |-----|--------|
# |1 |Yes |
# |2 |No |
# |3 |No |
# |4 |No |
# |5 |No |
# |6 |Yes |
# |7 |Yes |
# |8 |No |
# |: |: |
#
#
# and so on.
#
# For analysis, we instead want a *contingency table* where we summarize the observed frequencies. In the above example, we can imagine that for a total of 144 image pairs, in 80 cases our participant chose the original image.
#
# |Original |Manipulated|Total|
# |-----|--------|-----|
# |80 |64 |144 |
#
#
# In other cases, we have more variables resulting in several rows, giving us a R x C table. For instance, we might have two different groups of users (novices and experts) that evaluate and select their preferences from among three different physical interfaces for a digital application.
# The data for the different participants and their preferences could look something like this:
df = pd.DataFrame({'Participant': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20],
'Experience': ['novice', 'expert', 'expert', 'novice', 'novice', 'expert',
'expert', 'expert', 'expert', 'expert', 'novice', 'novice', 'novice',
'novice', 'novice', 'novice', 'novice', 'novice', 'expert', 'novice'],
'Interface': ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
'B', 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C']})
# Print out the first four participants
df.head(4)
# This can now be summarized in a contingency table with Instrument in the rows, and choice of Interface in columns.
# +
# Create contingency table with Instrument as rows, and choice of Interface in columns
Interfacetable = pd.crosstab(df.Experience, [df.Interface], margins = False)
# Margins = True adds the row and column sums
Interfacetable
# -
# The table should be interpreted as
# - Among the eight experts (first row): 2 chose interface A, 5 interface B, and 1 chose interface C
# - Among the 12 novices (second row): 2 chose interface A, 3 interface B, and 7 chose interface C
#
# Alternatively, if we choose to focus on the columns:
# - Interface A was selected by a total of 4 participants, while interface B and C were both preferred by 8.
#
# From the table, it seems like experts and novices had different preferences in their choice of control interface.
# + [markdown] id="BMy5XJXtBnfi"
# <a id= 'Sensitivity'> </a>
# -
#
# ### 4 Sensitivity of quantitative data
# There a difference between asking:
#
# *Q. Did you like this product? (Select one)*
#
# YES / NO
#
# **and**
#
# *Q. To what degree did you like this product? (put a cross on the scale)*
#
# Not at all<------------------------------> Very much
#
#
# The first response example generates data that can take one of two values. The other example can take many different values. You have probably encountered the latter as a type of rating scale in surveys, where there might be 3, 5 or more steps between "Not at all" and "Very much". But if done on paper or with an online slider, the number values the data can take between the two extremes can be many more. Even continuous.
#
# Depending on how we ask and collect quantitative data, it will end up in different places on the spectrum from binary (can take one of two values) to continous. This also means the measures have different *sensitivities*. The second way of asking will give you more detailed information on the degree to which the product is liked.
#
# Not only does the measur we choose for our data collection determine the sensitivity, it will also determine what type of statistical analysis we can apply. Therefore it is always smart to think as far as data analsysis *before* you collect your data.
#
# + [markdown] id="N8jNiR4gBnfp"
# ### Try out 3:
# Compare the above examples to the commonly-used experience rating found here in Microsoft Teams.
#
# What kind of data is this? How is the sensitivity compared to other scales that could have been used?
#
#
# 
# -
| UX2_Collecting_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Working with Data
# ## Loading data from files
# ### Loading data
# An important part of this course is about using Python to analyse and visualise data.
# Most data, of course, is supplied to us in various *formats*: spreadsheets, database dumps, or text files in various formats (csv, tsv, json, yaml, hdf5, netcdf)
# It is also stored in some *medium*: on a local disk, a network drive, or on the internet in various ways.
# It is important to distinguish the data format, how the data is structured into a file, from the data's storage, where it is put.
#
# We'll look first at the question of data *transport*: loading data from a disk, and at downloading data from the internet.
# Then we'll look at data *parsing*: building Python structures from the data.
# These are related, but separate questions.
# ### An example datafile
# Let's write an example datafile to disk so we can investigate it. We'll just use a plain-text file. Jupyter notebook provides a way to do this: if we put
# `%%writefile` at the top of a cell, instead of being interpreted as python, the cell contents are saved to disk.
# +
# %%writefile mydata.txt
A poet once said, 'The whole universe is in a glass of wine.'
We will probably never know in what sense he meant it,
for poets do not write to be understood.
But it is true that if we look at a glass of wine closely enough we see the entire universe.
There are the things of physics: the twisting liquid which evaporates depending
on the wind and weather, the reflection in the glass;
and our imagination adds atoms.
The glass is a distillation of the earth's rocks,
and in its composition we see the secrets of the universe's age, and the evolution of stars.
What strange array of chemicals are in the wine? How did they come to be?
There are the ferments, the enzymes, the substrates, and the products.
There in wine is found the great generalization; all life is fermentation.
Nobody can discover the chemistry of wine without discovering,
as did Louis Pasteur, the cause of much disease.
How vivid is the claret, pressing its existence into the consciousness that watches it!
If our small minds, for some convenience, divide this glass of wine, this universe,
into parts --
physics, biology, geology, astronomy, psychology, and so on --
remember that nature does not know it!
So let us put it all back together, not forgetting ultimately what it is for.
Let it give us one more final pleasure; drink it and forget it all!
- <NAME>
# -
# Where did that go? It went to the current folder, which for a notebook, by default, is where the notebook is on disk.
# +
import os # The 'os' module gives us all the tools we need to search in the file system
os.getcwd() # Use the 'getcwd' function from the 'os' module to find where we are on disk.
# -
# Can we see if it is there?
# +
import os
[x for x in os.listdir(os.getcwd()) if ".txt" in x]
# -
# Yep! Note how we used a list comprehension to filter all the extraneous files.
# ### Path independence and `os`
# We can use `dirname` to get the parent folder for a folder, in a platform independent-way.
os.path.dirname(os.getcwd())
# We could do this manually using `split`:
"/".join(os.getcwd().split("/")[:-1])
# But this would not work on Windows, where path elements are separated with a `\` instead of a `/`. So it's important
# to use `os.path` for this stuff.
# **Supplementary Materials**: If you're not already comfortable with how files fit into folders, and folders form a tree,
# with folders containing subfolders, then look at http://swcarpentry.github.io/shell-novice/02-filedir/index.html.
#
# Satisfy yourself that after using `%%writefile`, you can then find the file on disk with Windows Explorer, OSX Finder, or the Linux Shell.
# We can see how in Python we can investigate the file system with functions in the `os` module, using just the same programming approaches as for anything else.
# We'll gradually learn more features of the `os` module as we go, allowing us to move around the disk, `walk` around the
# disk looking for relevant files, and so on. These will be important to master for automating our data analyses.
# ### The python `file` type
# So, let's read our file:
myfile = open("mydata.txt")
type(myfile)
# We can go line-by-line, by treating the file as an iterable:
[x for x in myfile]
# If we do that again, the file has already finished, there is no more data.
[x for x in myfile]
# We need to 'rewind' it!
myfile.seek(0)
[len(x) for x in myfile if "know" in x]
# It's really important to remember that a file is a *different* built in type than a string.
# ### Working with files.
# We can read one line at a time with `readline`:
myfile.seek(0)
first = myfile.readline()
first
second = myfile.readline()
second
# We can read the whole remaining file with `read`:
rest = myfile.read()
rest
# Which means that when a file is first opened, read is useful to just get the whole thing as a string:
open("mydata.txt").read()
# You can also read just a few characters:
myfile.seek(1335)
myfile.read(15)
# ### Converting Strings to Files
# Because files and strings are different types, we CANNOT just treat strings as if they were files:
mystring = "Hello World\n My name is James"
mystring
# + tags=["raises-exception"]
mystring.readline()
# -
# This is important, because some file format parsers expect input from a **file** and not a string.
# We can convert between them using the StringIO class of the [io module](https://docs.python.org/3/library/io.html) in the standard library:
from io import StringIO
mystringasafile = StringIO(mystring)
mystringasafile.readline()
mystringasafile.readline()
# Note that in a string, `\n` is used to represent a newline.
# ### Closing files
# We really ought to close files when we've finished with them, as it makes the computer more efficient. (On a shared computer,
# this is particularly important)
myfile.close()
# Because it's so easy to forget this, python provides a **context manager** to open a file, then close it automatically at
# the end of an indented block:
# +
with open("mydata.txt") as somefile:
content = somefile.read()
content
# -
# The code to be done while the file is open is indented, just like for an `if` statement.
# You should pretty much **always** use this syntax for working with files.
# ### Writing files
# We might want to create a file from a string in memory. We can't do this with the notebook's `%%writefile` -- this is
# just a notebook convenience, and isn't very programmable.
# When we open a file, we can specify a 'mode', in this case, 'w' for writing. ('r' for reading is the default.)
with open("mywrittenfile", "w") as target:
target.write("Hello")
target.write("World")
with open("mywrittenfile", "r") as source:
print(source.read())
# And we can "append" to a file with mode 'a':
with open("mywrittenfile", "a") as target:
target.write("Hello")
target.write("James")
with open("mywrittenfile", "r") as source:
print(source.read())
# If a file already exists, mode `w` will overwrite it.
| module02_intermediate_python/02_04_working_with_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Vector manipulation in Python
#
# In this lab, you will have the opportunity to practice once again with the NumPy library. This time, we will explore some advanced operations with arrays and matrices.
#
# At the end of the previous module, we used PCA to transform a set of many variables into a set of only two uncorrelated variables. This process was made through a transformation of the data called rotation.
#
# In this week's assignment, you will need to find a transformation matrix from English to French vector space embeddings. Such a transformation matrix is nothing else but a matrix that rotates and scales vector spaces.
#
# In this notebook, we will explain in detail the rotation transformation.
# ## Transforming vectors
#
# There are three main vector transformations:
# * Scaling
# * Translation
# * Rotation
#
# In previous notebooks, we have applied the first two kinds of transformations. Now, let us learn how to use a fundamental transformation on vectors called _rotation_.
#
# The rotation operation changes the direction of a vector, letting unaffected its dimensionality and its norm. Let us explain with some examples.
#
# In the following cells, we will define a NumPy matrix and a NumPy array. Soon we will explain how this is related to matrix rotation.
import numpy as np # Import numpy for array manipulation
import matplotlib.pyplot as plt # Import matplotlib for charts
from utils_nb import plot_vectors # Function to plot vectors (arrows)
# ### Example 1
# Create a 2 x 2 matrix
R = np.array([[2, 0],
[0, -2]])
x = np.array([[1, 1]]) # Create a 1 x 2 matrix
# The dot product between a vector and a square matrix produces a rotation and a scaling of the original vector.
#
# Remember that our recommended way to get the dot product in Python is np.dot(a, b):
y = np.dot(x, R) # Apply the dot product between x and R
y
# We are going to use Pyplot to inspect the effect of the rotation on 2D vectors visually. For that, we have created a function `plot_vectors()` that takes care of all the intricate parts of the visual formatting. The code for this function is inside the `utils_nb.py` file.
#
# Now we can plot the vector $\vec x = [1, 1]$ in a cartesian plane. The cartesian plane will be centered at `[0,0]` and its x and y limits will be between `[-4, +4]`
plot_vectors([x], axes=[4, 4], fname='transform_x.svg')
# Now, let's plot in the same system our vector $\vec x = [1, 1]$ and its dot product with the matrix
#
# $$Ro = \begin{bmatrix} 2 & 0 \\ 0 & -2 \end{bmatrix}$$
#
# $$y = x \cdot Ro = [[2, -2]]$$
plot_vectors([x, y], axes=[4, 4], fname='transformx_and_y.svg')
# Note that the output vector `y` (blue) is transformed in another vector.
# ### Example 2
#
# We are going to use Pyplot to inspect the effect of the rotation on 2D vectors visually. For that, we have created a function that takes care of all the intricate parts of the visual formatting. The following procedure plots an arrow within a Pyplot canvas.
#
# Data that is composed of 2 real attributes is telling to belong to a $ RxR $ or $ R^2 $ space. Rotation matrices in $R^2$ rotate a given vector $\vec x$ by a counterclockwise angle $\theta$ in a fixed coordinate system. Rotation matrices are of the form:
#
# $$Ro = \begin{bmatrix} cos \theta & -sin \theta \\ sin \theta & cos \theta \end{bmatrix}$$
#
# **(Note:** This notebook uses $$y = x \cdot Ro$$ But if you use $$y = Ro \cdot x.T$$
#
# Then the rotation matrices in $R^2$ rotate a given vector $\vec x$ by a clockwise angle $\theta$ in a fixed coordinate system.**)**
#
# The trigonometric functions in Numpy require the angle in radians, not in degrees. In the next cell, we define a rotation matrix that rotates vectors by $100^o$.
# +
angle = 100 * (np.pi / 180) #convert degrees to radians
Ro = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
x2 = np.array([2, 2]).reshape(1, -1) # make it a row vector
y2 = np.dot(x2, Ro)
print('Rotation matrix')
print(Ro)
print('\nRotated vector')
print(y2)
print('\n x2 norm', np.linalg.norm(x2))
print('\n y2 norm', np.linalg.norm(y2))
print('\n Rotation matrix norm', np.linalg.norm(Ro))
# -
plot_vectors([x2, y2], fname='transform_02.svg')
# Some points to note:
#
# * The norm of the input vector is the same as the norm of the output vector. Rotations matrices do not modify the norm of the vector, only its direction.
# * The norm of any $R^2$ rotation matrix is always $\sqrt 2 = 1.414221$
# ## Frobenius Norm
#
# The Frobenius norm is the generalization to $R^2$ of the already known norm function for vectors
#
# $$\| \vec a \| = \sqrt {{\vec a} \cdot {\vec a}} $$
#
# For a given $R^2$ matrix A, the frobenius norm is defined as:
#
# $$\|\mathrm{A}\|_{F} \equiv \sqrt{\sum_{i=1}^{m} \sum_{j=1}^{n}\left|a_{i j}\right|^{2}}$$
#
A = np.array([[2, 2],
[2, 2]])
# `np.square()` is a way to square each element of a matrix. It must be equivalent to use the * operator in Numpy arrays.
A_squared = np.square(A)
A_squared
# Now you can sum over the elements of the resulting array, and then get the square root of the sum.
A_Frobenius = np.sqrt(np.sum(A_squared))
A_Frobenius
# That was the extended version of the `np.linalg.norm()` function. You can check that it yields the same result.
print('Frobenius norm of the Rotation matrix')
print(np.sqrt(np.sum(Ro * Ro)), '== ', np.linalg.norm(Ro))
# **Congratulations!! We've covered a few more matrix operations in this lab. This will come in handy in this week's programming assignment!**
| 1. Natural Language Processing with Classification and Vector Spaces/Week 4 Machine Translation and Document Search/Lab_1_Transforming vectors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### To Train Object Detector
#
#Clone the darknet repository
# !git clone https://github.com/AlexeyAB/darknet.git
# cd darknet
#Open the makefile and enable the GPU and OpenCV
# !sed -i 's/OPENCV=0/OPENCV=1/' Makefile
# !sed -i 's/GPU=0/GPU=1/' Makefile
# !sed -i 's/CUDNN=0/CUDNN=1/' Makefile
# !sed -i 's/CUDNN_HALF=0/CUDNN_HALF=1/' Makefile
#This accelerates training and increases the speed of detection
#This will build the Makefile which will enable the darknet executable to be further used for training,testing an detecting
# !make
# !chmod +x ./darknet
# To train on Google Colab
from google.colab import drive
drive.mount('/content/drive')
# this creates a symbolic link so that now the path /content/gdrive/My\ Drive/ is equal to /mydrive
# !ln -s /content/gdrive/My\ Drive/ /mydrive
# !ls /mydrive
# +
# # cd back into the darknet folder to run detections
# %cd darknet
# -
#Using Transfer Learning by using pre-trained weights on COCO dataset.
# !wget https://github.com/AlexeyAB/darknet/releases/download/darknet_yolo_v3_optimal/yolov4.conv.137
# ### To Train Object Detector
#
# 1.Labeled Custom Dataset
#
# 2.Custom .cfg file
#
# 3.obj.data and obj.names files
#
# 4.train.txt file (test.txt is optional here as well)
#
#
# download cfg to google drive and change its name
# !cp cfg/yolov4-custom.cfg /mydrive/ObjectDetectionYOLOV4/yolov4-obj.cfg
# to download to local machine (change its name to yolov4-obj.cfg once you download)
download('cfg/yolov4-custom.cfg')
# Now you need to edit the .cfg to fit your needs based on your object detector. Open it up in a code or text editor to do so.
#
# batch = 64 and subdivisions = 64 If you run into GPU memory issues. Otherwise to 16 or 32
#
# Make the rest of the changes to the cfg based on how many classes you are training your detector on.
#
# Note: I set my max_batches = 6000 and policy=sgdr with sgdr_cycle=1000 and sgdr_mult=2 I changed the classes = 1 in the three YOLO layers and filters = 18 in the three convolutional layers before the YOLO layers.
#
# How to Configure Your Variables:
#
# width = 512
#
# height = 512 (these can be any multiple of 32, 416 is standard, you can sometimes improve results by making value larger like 608 but will slow down training)
#
# max_batches = (# of classes) * 2000 (but no less than 6000 so if you are training for 1, 2, or 3 classes it will be 6000)
#
# policy=steps - policy for changing learning rate: constant (by default), sgdr, steps, step, sig, exp, poly, random (f.e., if policy=random - then current learning rate will be changed in this way = learning_rate * pow(rand_uniform(0,1), power)
#
# filters = (# of classes + 5) * 3 (so if you are training for one class then your filters = 18, but if you are training for 2 classes then your filters = 21)
#
# sgdr_cycle=1000 - if policy=sgdr - the initial number of iterations in cosine-cycle
#
# sgdr_mult=2 - if policy=sgdr
#
# Upload the images with the corresponding annotation files into darknet directory into obj inside data. Both Training and Testing data along with the txt files generated by using the python scripts generate_train_txt.py and generate_validation_txt.py
# create a obj.data file and fill it in like this (change your number of classes accordingly, as well as your backup location)
# obj.names and obj.data
#
# Create a new file within a code or text editor called obj.names where you will have one class name per line from the dataset generation step. Here obj.names is edited in notepad. Cruise is the class to be detected
# 
#
#
# You will also create a obj.data file and fill it in like this (change your number of classes accordingly, as well as your backup location)
#
# 
#
# train is the location where the train.txt file is saved by using the python script. It will link to the text file with the training images
#
# valid is the location where the test.txt/validation.txt file is saved using the python script. It will link to the text file with the validation images
#
# backup saves the location of the weights during training
# ##### Training
#
# Note These commands should be run from darknet directory.
#
#To start training custom yolo model
# !./darknet detector train data/obj.data cfg/yolov4-obj.cfg yolov4.conv.137 -dont_show -map
#Google Colab has the tendency to disconnect after 6 hours of training. Hence to start training from previous weight
# !./darknet detector train data/obj.data cfg/yolov4-obj.cfg /mydrive/ObjectDetectionYOLOV4/backup/yolov4-obj_last.weights -dont_show
# Checking the Mean Average Precision (mAP) of Your Model
#Once the training is completed, in obj.data the test data can be input into the valid field.
# !./darknet detector map data/obj.data cfg/yolov4-obj.cfg /mydrive/ObjectDetectionYOLOV4/backup/yolov4-obj_best.weights
# Running Detection on test images
# +
# need to set the custom cfg to test mode
# %cd cfg
# !sed -i 's/batch=64/batch=1/' yolov4-obj.cfg
# !sed -i 's/subdivisions=64/subdivisions=1/' yolov4-obj.cfg
# %cd ..
# -
# run your custom detector with this command (upload an image to your google drive to test, thresh flag sets accuracy that detection must be in order to show it)
# !./darknet detector test data/obj.data cfg/yolov4-obj.cfg /mydrive/ObjDetYOLOv4MoreClasses/backup/yolov4-obj_best.weights /mydrive/ObjectDetectionYOLOV4/test3.jpg -thresh 0.5
# +
#run your custom detector on video
# !./darknet detector demo data/obj.data cfg/yolov4-obj.cfg /mydrive/ObjDetYOLOv4MoreClasses/backup/yolov4-obj_best.weights -dont_show /mydrive/ObjectDetectionYOLOV4/Neulander_Test.mov -i 0 -out_filename /mydrive/ObjectDetectionYOLOV4/results.avi
| Object_detection/YoloV4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kinematics of a particle
#
# <NAME>
# **Kinematics** is the branch of classical mechanics that describes the motion of objects without consideration of the causes of motion ([Wikipedia](http://en.wikipedia.org/wiki/Kinematics)). Kinematics of a particle is the description of the motion when the object is considered a particle. A particle as a physical object does not exist in nature; it is a simplification to understand the motion of a body or it is a conceptual definition such as the center of mass of a system of objects.
# ## Position
#
# Consider a point in the three-dimensional Euclidean space described in a Cartesian coordinate system:
# <br>
# <figure><img src="./../images/vector3Dijk.png" width=350/><figcaption><center><i>Figure. Representation of a point **P** and its position vector **a** in a Cartesian coordinate system. The versors <b>i, j, k</b> form a basis for this coordinate system and are usually represented in the color sequence RGB (red, green, blue) for easier visualization.</i></center></figcaption></figure>
#
# The position of this point in space can be represented as a triple of values each representing the coordinate at each axis of the Cartesian coordinate system following the $ \mathbf{X, Y, Z} $ convention order (which is omitted):
#
# $$ (x,\: y,\: z) $$
#
# The position of a particle in space can also be represented by a vector in the Cartesian coordinate system, with the origin of the vector at the origin of the coordinate system and the tip of the vector at the point position:
#
# $$ \mathbf{r}(t) = x\:\mathbf{i} + y\:\mathbf{j} + z\:\mathbf{k} $$
#
# Where $ \mathbf{i,\: j,\: k} $ are unit vectors in the directions of the axes $ \mathbf{X, Y, Z} $.
#
# For a review on vectors, see the notebook [Scalar and vector](http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/ScalarVector.ipynb).
#
# With this new notation, the coordinates of a point representing the position of a particle that vary with time would be expressed by the following position vector $ \mathbf{r}(t)$:
#
# $$ \mathbf{r}(t) = x(t)\:\mathbf{i} + y(t)\:\mathbf{j} + z(t)\:\mathbf{k} $$
#
# A vector can also be represented in matrix form:
#
# $$ \mathbf{r}(t) = \begin{bmatrix} x(t) \\y(t) \\z(t) \end{bmatrix}$$
#
# And the unit vectors in each Cartesian coordinate in matrix form are given by:
#
# $$ \mathbf{i}= \begin{bmatrix}1\\0\\0 \end{bmatrix},\; \mathbf{j}=\begin{bmatrix}0\\1\\0 \end{bmatrix},\; \mathbf{k}=\begin{bmatrix} 0 \\ 0 \\ 1 \end{bmatrix}$$
#
# In [linear algebra](http://en.wikipedia.org/wiki/Linear_algebra), a set of unit linearly independent vectors as the three vectors above (orthogonal in the Euclidean space) that can represent any vector via [linear combination](http://en.wikipedia.org/wiki/Linear_combination) is called a basis. A basis is the foundation of creating a reference frame and we will study how to do that other time.
# ### Displacement
#
# The shortest distance from the initial to the final position of a particle. As the difference between two vectors; displacement is also a vector quantity.
# ## Velocity
#
# Velocity is the rate (with respect to time) of change of the position of a particle:
#
# $$ \mathbf{v}(t) = \frac{\mathbf{r}(t_2)-\mathbf{r}(t_1)}{t_2-t_1} = \frac{\Delta \mathbf{r}}{\Delta t}$$
#
# The instantaneous velocity of the particle is obtained when $\Delta t\;$ approaches to zero, which from calculus is the first-order [derivative](http://en.wikipedia.org/wiki/Derivative) of the position vector. The derivative of a vector is obtained by differentiating each vector component:
#
# $$ \mathbf{v}(t) = \frac{\mathrm{d}\mathbf{r}(t)}{dt} = \frac{\mathrm{d}x(t)}{\mathrm{d}t}\mathbf{i} + \frac{\mathrm{d}y(t)}{\mathrm{d}t}\mathbf{j} + \frac{\mathrm{d}z(t)}{\mathrm{d}t}\mathbf{k} $$
#
# And in matrix form:
#
# $$ \mathbf{v}(t) = \begin{bmatrix}
# \frac{\mathrm{d}x(t)}{\mathrm{d}t} \\
# \frac{\mathrm{d}y(t)}{\mathrm{d}t} \\
# \frac{\mathrm{d}z(t)}{\mathrm{d}t}
# \end{bmatrix}$$
# ## Acceleration
#
# Acceleration is the rate (with respect to time) of change of the velocity of a particle, which can also be given by the second-order rate of change of the position:
#
# $$ \mathbf{a}(t) = \frac{\mathbf{v}(t_2)-\mathbf{v}(t_1)}{t_2-t_1} = \frac{\Delta \mathbf{v}}{\Delta t} = \frac{\Delta^2 \mathbf{r}}{\Delta t^2}$$
#
# Likewise, acceleration is the first-order derivative of the velocity or the second-order derivative of the position vector:
#
# $$ \mathbf{a}(t) = \frac{\mathrm{d}\mathbf{v}(t)}{\mathrm{d}t} = \frac{\mathrm{d}^2\mathbf{r}(t)}{\mathrm{d}t^2} = \frac{\mathrm{d}^2x(t)}{\mathrm{d}t^2}\mathbf{i} + \frac{\mathrm{d}^2y(t)}{\mathrm{d}t^2}\mathbf{j} + \frac{\mathrm{d}^2z(t)}{\mathrm{d}t^2}\mathbf{k} $$
#
# And in matrix form:
#
# $$ \mathbf{a}(t) = \begin{bmatrix}
# \frac{\mathrm{d}^2x(t)}{\mathrm{d}t^2} \\
# \frac{\mathrm{d}^2y(t)}{\mathrm{d}t^2} \\
# \frac{\mathrm{d}^2z(t)}{\mathrm{d}t^2}
# \end{bmatrix}$$
# ## The antiderivative
#
# As the acceleration is the derivative of the velocity which is the derivative of position, the inverse mathematical operation is the [antiderivative](http://en.wikipedia.org/wiki/Antiderivative) (or integral):
#
# $$ \begin{array}{l l}
# \mathbf{r}(t) = \mathbf{r}_0 + \int \mathbf{v}(t) \:\mathrm{d}t \\
# \mathbf{v}(t) = \mathbf{v}_0 + \int \mathbf{a}(t) \:\mathrm{d}t
# \end{array} $$
# ## Some cases of motion of a particle
#
# #### Particle at rest
#
# $$ \begin{array}{l l}
# \mathbf{a}(t) = 0 \\
# \mathbf{v}(t) = 0 \\
# \mathbf{r}(t) = \mathbf{r}_0
# \end{array} $$
#
# #### Particle at constant speed
#
# $$ \begin{array}{l l}
# \mathbf{a}(t) = 0 \\
# \mathbf{v}(t) = \mathbf{v}_0 \\
# \mathbf{r}(t) = \mathbf{r}_0 + \mathbf{v}_0t
# \end{array} $$
#
# #### Particle at constant acceleration
#
# $$ \begin{array}{l l}
# \mathbf{a}(t) = \mathbf{a}_0 \\
# \mathbf{v}(t) = \mathbf{v}_0 + \mathbf{a}_0t \\
# \mathbf{r}(t) = \mathbf{r}_0 + \mathbf{v}_0t + \frac{\mathbf{a}_0t^2}{2}
# \end{array} $$
# ### Visual representation of these cases
# Import the necessary libraries
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
#sns.set()
sns.set_context("notebook", font_scale=1.2, rc={"lines.linewidth": 2, "lines.markersize": 10})
# +
t = np.arange(0, 2.0, 0.02)
r0 = 1; v0 = 2; a0 = 4
plt.rc('axes', labelsize=14, titlesize=14)
plt.rc('xtick', labelsize=10)
plt.rc('ytick', labelsize=10)
f, axarr = plt.subplots(3, 3, sharex = True, sharey = True, figsize=(14,7))
plt.suptitle('Kinematics of a particle', fontsize=20);
tones = np.ones(np.size(t))
axarr[0, 0].set_title('at rest', fontsize=14);
axarr[0, 0].plot(t, r0*tones, 'g', linewidth=4, label='$r(t)=1$')
axarr[1, 0].plot(t, 0*tones, 'b', linewidth=4, label='$v(t)=0$')
axarr[2, 0].plot(t, 0*tones, 'r', linewidth=4, label='$a(t)=0$')
axarr[0, 0].set_ylabel('r(t) [m]')
axarr[1, 0].set_ylabel('v(t) [m/s]')
axarr[2, 0].set_ylabel('a(t) [m/s$^2$]')
axarr[0, 1].set_title('at constant speed');
axarr[0, 1].plot(t, r0*tones+v0*t, 'g', linewidth=4, label='$r(t)=1+2t$')
axarr[1, 1].plot(t, v0*tones, 'b', linewidth=4, label='$v(t)=2$')
axarr[2, 1].plot(t, 0*tones, 'r', linewidth=4, label='$a(t)=0$')
axarr[0, 2].set_title('at constant acceleration');
axarr[0, 2].plot(t, r0*tones+v0*t+1/2.*a0*t**2,'g', linewidth=4,
label='$r(t)=1+2t+\\frac{1}{2}4t^2$')
axarr[1, 2].plot(t, v0*tones+a0*t, 'b', linewidth=4,
label='$v(t)=2+4t$')
axarr[2, 2].plot(t, a0*tones, 'r', linewidth=4,
label='$a(t)=4$')
for i in range(3):
axarr[2, i].set_xlabel('Time [s]');
for j in range(3):
axarr[i,j].set_ylim((-.2, 10))
axarr[i,j].legend(loc = 'upper left', frameon=True, framealpha = 0.9, fontsize=16)
plt.subplots_adjust(hspace=0.09, wspace=0.07)
# -
# ## Kinematics of human movement
#
# An example where the analysis of some aspects of the human body movement can be reduced to the analysis of a particle is the study of the biomechanics of the 100-metre race. Watch the video below to understand how this can be done.
from IPython.display import IFrame
IFrame('http://video.mit.edu/embed/12042/', width=640, height=360)
# A technical report with the kinematic data for the 100-m world record by <NAME> discussed in the video above can be downloaded from the [website for Research Projects](http://www.iaaf.org/development/research) from the International Association of Athletics Federations. [Here is a direct link for that report](http://www.iaaf.org/download/download?filename=76ade5f9-75a0-4fda-b9bf-1b30be6f60d2.pdf&urlSlug=1-biomechanics-report-wc-berlin-2009-sprint). In particular, the following table shows the data for the three medalists in that race:
# <br>
# <figure><img src="./../images/Berlin2009_100m.png" width=700 alt="partial times of the 100m-race at Berlin 2009"/><figcaption><center><i>Figure. Data from the three medalists of the 100-m dash in Berlin, 2009 (<a href="http://www.iaaf.org/download/download?filename=76ade5f9-75a0-4fda-b9bf-1b30be6f60d2.pdf&urlSlug=1-biomechanics-report-wc-berlin-2009-sprint)">IAAF report</a>).</i></center></figcaption></figure>
#
# The column **RT** in the table above refers to the reaction time of each athlete. The IAAF has a very strict rule about reaction time: any athlete with a reaction time less than 100 ms is disqualified from the competition! See the website [Reaction Times and Sprint False Starts](http://condellpark.com/kd/reactiontime.htm) for a discussion about this rule.
#
# You can measure your own reaction time visiting this website: [http://www.humanbenchmark.com/tests/reactiontime](http://www.humanbenchmark.com/tests/reactiontime).
#
# The article [A Kinematics Analysis Of Three Best 100 M Performances Ever](http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3661886/) by Krzysztof and Mero presents a detailed kinematic analysis of 100-m races.
# ## Problems
#
# 1. Consider the data for the three medalists of the 100-m dash in Berlin, 2009, shown previously.
# a. Calculate the instantaneous velocity and acceleration.
# b. Plot the graphs for the displacement, velocity, and acceleration versus time.
# c. Plot the graphs velocity and acceleration versus partial distance (every 20m).
# d. Calculate the mean velocity and mean acceleration and the instants and values of the peak velocity and peak acceleration.
#
# 2. The article "Biomechanical Analysis of the Sprint and Hurdles Events at the 2009 IAAF World Championships in Athletics" by <NAME> Nixdorf lists the 10-m split times for the three medalists of the 100-m dash in Berlin, 2009:
# <br>
# <figure><img src="./../images/Berlin2009_100m_10.png" width=600 alt="partial times of the 100m-race at Berlin 2009"/></figure>
#
# a. Repeat the same calculations performed in problem 1 and compare the results.
#
# 3. A body attached to a spring has its position (in cm) described by the equation $x(t) = 2\sin(4\pi t + \pi/4)$.
# a. Calculate the equation for the body velocity and acceleration.
# b. Plot the position, velocity, and acceleration in the interval [0, 1] s.
#
# 4. There are some nice free software that can be used for the kinematic analysis of human motion. Some of them are: [Kinovea](http://www.kinovea.org/), [Tracker](http://www.cabrillo.edu/~dbrown/tracker/), and [SkillSpector](http://video4coach.com/index.php?option=com_content&task=view&id=13&Itemid=45). Visit their websites and explore these software to understand in which biomechanical applications they could be used.
# ## References
#
# - <NAME>, <NAME> (2011) [Biomechanical Analysis of the Sprint and Hurdles Events at the 2009 IAAF World Championships in Athletics ](http://www.meathathletics.ie/devathletes/pdf/Biomechanics%20of%20Sprints.pdf). [New Studies in Athletics](http://www.iaaf.org/development/new-studies-in-athletics), 1/2, 19-53.
# - <NAME>, <NAME> (2013) [A Kinematics Analysis Of Three Best 100 M Performances Ever](http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3661886/). Journal of Human Kinetics, 36, 149–160.
# - [Research Projects](http://www.iaaf.org/development/research) from the International Association of Athletics Federations.
| notebooks/KinematicsParticle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Taylor Polynomial Approximations
#
# <NAME>
#
# Created 4/17/21
#
# Based on a <a href="https://github.com/adam-rumpf/mathematica-class-demonstrations#taylor-and-fourier-series-approximations" target="_blank">Mathematica class demonstration</a>.
#
# This is a standalone widget for playing around with Taylor polynomial approximations of various functions. See the full notebook [here](./taylor-series.ipynb).
#
# [Main Project Page](.././index.ipynb)
# + jupyter={"source_hidden": true} tags=[]
# %matplotlib widget
import ipywidgets as widgets
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
# Define parameters
INF = 100 # out-of-bounds value
LIM = 10 # axis bounds
# Define functions and derivatives
def d_sin(x, n=0):
"""n-th derivative of sin(x)."""
r = n % 4
if r == 0:
return np.sin(x)
elif r == 1:
return np.cos(x)
elif r == 2:
return -np.sin(x)
elif r == 3:
return -np.cos(x)
def d_cos(x, n=0):
"""n-th derivative of cos(x)."""
return d_sin(x, n=n+1)
def d_exp(x, n=0):
"""n-th derivative of exp(x)."""
return np.exp(x)
def d_log(x, n=0):
"""n-th derivative of log(x+1)."""
if n == 0:
return np.log(x+1)
else:
return (np.math.factorial(n-1) * (-1)**(n+1)) / (x+1)**n
def d_bell(x, n=0):
"""n-th derivative of a bell curve."""
out = np.exp(-(x**2)/5) * 5**(-(n-1))
if n == 0:
return out
elif n == 1 or n == 2:
out *= -2
if n == 1:
out *= x
elif n == 2:
out *= 5 - 2*(x**2)
elif n == 3 or n == 4:
out *= 4
if n == 3:
out *= (15 - 2*(x**2))*x
elif n == 4:
out *= 75 - 60*(x**2) + 4*(x**4)
elif n == 5 or n == 6:
out *= -8
if n == 5:
out *= (375 - 100*(x**2) + 4*(x**4))*x
elif n == 6:
out *= 1875 - 2250*(x**2) + 300*(x**4) - 8*(x**6)
elif n == 7 or n == 8:
out *= 16
if n == 7:
out *= (13125 - 5250*(x**2) + 420*(x**4) - 8*(x**6))*x
elif n == 8:
out *= 65625 - 105000*(x**2) + 21000*(x**4) - 1120*(x**6) + 16*(x**8)
elif n == 9 or n == 10:
out *= -32
if n == 9:
out *= (590625 - 315000*(x**2) + 37800*(x**4) + 1440*(x**6) + 16*(x**8))*x
elif n == 10:
out *= 2953125 - 5906250*(x**2) + 1575000*(x**4) - 126000*(x**6) + 3600*(x**8) - 32*(x**10)
return out
def d_poly(x, n=0):
"""n-th derivative of a polynomial function."""
if n == 0:
return 0.35 + x*(0.16 + x*(-0.1875 + x*(0.005 + x*0.0025)))
elif n == 1:
return 0.16 + x*(-0.375 + x*(0.015 + x*0.01))
elif n == 2:
return -0.375 + x*(0.03 + x*0.03)
elif n == 3:
return 0.03 + x*0.06
elif n == 4:
return 0.06 + x*0.0
else:
return x*0.0
def d_ratio(x, n=0):
"""n-th derivative of 1/(x+1)."""
return ((-1)**n)/((x+1)**(n+1))
# Define a dictionary of function definitions
func = {}
func["sine"] = d_sin
func["cosine"] = d_cos
func["exponential"] = d_exp
func["logarithm"] = d_log
func["bell curve"] = d_bell
func["polynomial"] = d_poly
func["rational"] = d_ratio
# Define a dictionary of function name strings
func_name = {}
func_name["sine"] = "$\sin x$"
func_name["cosine"] = "$\cos x$"
func_name["exponential"] = "$e^x$"
func_name["logarithm"] = "$\log(x+1)$"
func_name["bell curve"] = "$5e^{-x^2/5}$"
func_name["polynomial"] = "$0.35 + 0.16x - 0.1875x^2 + 0.005x^3 + 0.0025x^4$"
func_name["rational"] = "$1/(x+1)$"
# Define Taylor polynomial
def taylor(x, fname, a, n):
"""Taylor polynomial for a given function.
Positional arguments:
x - input value
fname - key from 'func' dictionary
a - center
n - polynomial degree
"""
out = 0.0 # output value
# Add terms of Taylor polynomial
for i in range(n+1):
out += (func[fname](a, n=i) / np.math.factorial(i)) * (x-a)**i
return out
# Set up plot
fig, ax = plt.subplots()
xbase = np.linspace(-LIM, LIM, 101) # base x-values
# Draw plot lines
@widgets.interact(fname=func.keys(), a=(-LIM, LIM, 0.05), n=(0, 10, 1))
def update1(fname="sine", a=0.0, n=1):
global ax
a0 = a
# Generate function values
if fname == "logarithm":
x = np.linspace(-0.99, LIM, 101)
a0 = max(a0, -0.9)
elif fname == "rational":
x = np.linspace(-LIM, LIM, 100)
if a0 == -1.0:
a0 += 0.05
else:
x = np.linspace(-LIM, LIM, 101)
y = np.zeros_like(x)
# Redraw plot
ax.clear()
ax.set_xlim([-LIM, LIM])
ax.set_ylim([-LIM, LIM])
ax.set_aspect(1)
plt.title(func_name[fname])
ax.grid(False)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.plot(x, func[fname](x), color="C0")
y = taylor(xbase, fname, a0, n)
ax.plot(xbase, y, color="C1")
ax.plot(a0, func[fname](a0), color="C1", marker=".", markersize=10)
if fname in {"logarithm", "rational"}:
ax.plot([-1, -1], [-INF, INF], color="white")
ax.plot([-1, -1], [-INF, INF], color="black", linestyle="dashed")
# -
| calc-diffeq-analysis/taylor-series-standalone.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # First TensorFlow Graphs
# In this notebook, we execute elementary TensorFlow computational graphs.
# #### Load dependencies
import numpy as np
import tensorflow as tf
# #### Simple arithmetic
x1 = tf.placeholder(tf.float32)
x2 = tf.placeholder(tf.float32)
sum_op = tf.add(x1, x2)
product_op = tf.multiply(x1, x2)
with tf.Session() as session:
sum_result = session.run(sum_op, feed_dict={x1: , x2: })
product_result = session.run(product_op, feed_dict={x1: , x2: })
sum_result
product_result
# #### Simple array arithmetic
with tf.Session() as session:
sum_result = session.run(sum_op, feed_dict={x1: , x2: })
product_result = session.run(product_op, feed_dict={x1: , x2: })
sum_result
product_result
| notebooks/live_training/first_tensorflow_graphs_LT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Transport for London Cycle Data Exploration
#
# ## Dataset
# The data was provided from TFL and was retrieved from Kaggle: https://www.kaggle.com/hmavrodiev/london-bike-sharing-dataset
# The dataset counts the number of journeys made per hour in each day of 2015-2017.
# There are 17414 rows.
# +
import data_proc as dp
cycle_data = dp.load_tfl_csv()
# cycle_data.head().to_markdown()
# -
# | | timestamp | cnt | t1 | t2 | hum | wind_speed | weather_code | is_holiday | is_weekend | season |
# |---:|:--------------------|------:|-----:|-----:|------:|-------------:|---------------:|-------------:|-------------:|---------:|
# | 0 | 2015-01-04 00:00:00 | 182 | 3 | 2 | 93 | 6 | 3 | 0 | 1 | 3 |
# | 1 | 2015-01-04 01:00:00 | 138 | 3 | 2.5 | 93 | 5 | 1 | 0 | 1 | 3 |
# | 2 | 2015-01-04 02:00:00 | 134 | 2.5 | 2.5 | 96.5 | 0 | 1 | 0 | 1 | 3 |
# | 3 | 2015-01-04 03:00:00 | 72 | 2 | 2 | 100 | 0 | 1 | 0 | 1 | 3 |
# | 4 | 2015-01-04 04:00:00 | 47 | 2 | 0 | 93 | 6.5 | 1 | 0 | 1 | 3 |
# ## Preprocessing
# The data is preprocessed to change column names and convert some columns. We also aggregate the data into days and save it for later use.
#
# The details of the functions can be found in the `data_proc.py` file
# +
cycle_data = dp.change_column_names(cycle_data)
cycle_data = dp.convert_to_timestamp_objects(cycle_data)
cycle_day_data = dp.aggregate_data_over_each_day(cycle_data)
dp.export_parquet(cycle_day_data)
# -
# # Looking at time trends
# Against week day there are generally fewer journeys on weekends than
# weekdays, but not by a large amount.
# The highest count of journeys in a single day was 72.5k.
# +
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("whitegrid")
plt.style.use("seaborn-whitegrid")
# -
# ## Weekly Trends
plt.figure(num=None, figsize=(10, 6), dpi=80)
sns.boxplot(x="week_day", y="count", data=cycle_day_data.reset_index())
plt.tight_layout()
plt.xlabel("Day of week")
plt.ylabel("Number of trips/day")
plt.savefig("images/journeys_per_week.png")
plt.show()
# 
#
# However, breaking it down by hour shows that the distribution of journeys
# over the day are very different. There are two clear commuting times per
# weekday, whereas the weekend has a flatter distribution. Friday evening
# also suggest fewer journeys are made.
plt.figure(num=None, figsize=(10, 6), dpi=80)
agr_counts = (
cycle_data[["week_day", "hour", "count"]]
.groupby(by=["week_day", "hour"], axis=0)
.mean()
)
agr_counts_pivot = agr_counts.reset_index().pivot(
index="week_day", columns="hour", values="count"
)
sns.heatmap(agr_counts_pivot)
plt.title("Mean journeys per hour")
plt.xlabel("Hour")
plt.ylabel("Week day")
plt.savefig("images/journeys_per_hour.png")
plt.show()
# 
#
# ## Monthly Trends
# Against month - there are fewer journeys made in winter time:
plt.figure(num=None, figsize=(10, 6), dpi=80)
sns.boxplot(x="month", y="count", data=cycle_day_data.reset_index())
plt.tight_layout()
plt.xlabel("Month")
plt.ylabel("Number of trips/day")
plt.savefig("images/journeys_per_month.png")
plt.show()
# 
#
# Looking at the distribution over the day against each month, shows that in
# summer a higher proportion of journeys are made later in the evening.
# The two commuting peaks are more spread out.
# +
plt.figure(num=None, figsize=(10, 6), dpi=80)
agr_counts = (
cycle_data[["month", "hour", "count"]].groupby(by=["month", "hour"], axis=0).mean()
)
# Normalise over the sum of each day
agr_counts_norm = agr_counts.groupby("month").transform(lambda x: (x / x.sum()))
agr_counts_norm_pivot = agr_counts_norm.reset_index().pivot(
index="month", columns="hour", values="count"
)
sns.heatmap(agr_counts_norm_pivot)
plt.title("% journeys per hour")
plt.xlabel("Hour")
plt.ylabel("Month")
plt.savefig("images/journeys_per_hour_month_prop.png")
plt.show()
# -
# 
#
# ## Yearly Trends
# Is there an increase in journeys over time?
# +
import statsmodels.api as sm
import datetime
# generate datenum as regress on the number of journeys
temp = cycle_day_data.reset_index().copy()
temp["datetime"] = temp.apply(
func=lambda x: datetime.date(x["year"], x["month"], x["day"]), axis=1
)
temp["datetimeint"] = temp["datetime"].apply(lambda x: x.toordinal())
temp["datetimeint"] = temp["datetimeint"] - temp["datetimeint"].mean()
temp = sm.add_constant(temp)
model = sm.OLS(temp["count"], temp.loc[:, ["const", "datetimeint"]])
results = model.fit()
print(results.summary())
# -
# The coefficient for the datetime feature is a statistically significant and positive.
# ```
# coef std err t P>|t| [0.025 0.975]
# -------------------------------------------------------------------------------
# const 2.727e+04 316.652 86.115 0.000 2.66e+04 2.79e+04
# datetimeint 4.7294 1.501 3.151 0.002 1.783 7.676
# ```
# This suggests the number of journeys is increasing on average by 4.7 journeys each day.
# We can plot this over all our data as follows:
# +
import matplotlib.dates as mdates
fig = plt.figure(num=None, figsize=(10, 6), dpi=80)
ax = fig.subplots()
# add trend
temp["exp"] = results.predict(temp.loc[:, ["const", "datetimeint"]])
ax.scatter("datetime", "count", data=temp, alpha=0.2)
plt.plot(temp["datetime"], temp["exp"], "r-", lw=2)
plt.xlabel("Date")
plt.ylabel("Number of trips/day")
# format the ticks
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter("%Y"))
ax.xaxis.set_minor_locator(mdates.MonthLocator())
fig.autofmt_xdate()
plt.savefig("images/against_time.png")
plt.show()
# -
# 
#
# ### Prophet Time Series Analysis
# This trend can be confirmed through the use of the Prophet library, which has some robustness to outliers.
# We can split the time series into its various time components - years, months and weeks.
# This is similar to running a Fourier analysis.
# The prophet library includes considerations for holidays dates.
# +
# Confirm trend with prophet (facebook)
from fbprophet import Prophet
time_model = Prophet()
prophet_data = temp.loc[:, ["datetime", "count"]]
prophet_data.columns = ["ds", "y"]
time_model.fit(prophet_data)
# Show components
forecast = time_model.predict(prophet_data)
fig_components = time_model.plot_components(forecast, weekly_start=1)
# Make future predictions
future = time_model.make_future_dataframe(periods=365, include_history=True)
fig_pred = time_model.plot(
time_model.predict(future), xlabel="Date", ylabel="Number of trips/day"
)
fig_components.savefig("images/prophet_comp.png")
fig_pred.savefig("images/prophet_pred.png")
# -
# 
#
# This matches our conclusions that weekends are less popular overall, and there is a summer month boom.
#
# The overall fitted trend, with a year prediction is shown below:
# 
#
# # Weather data
# Weather features are engineered by averaging the various weather measures over the whole day.
# 'Real feel' temperature is very similar to temperature other than low temperatures so only using temp_feels for now:
# ```
# cycle_data.plot(x="temp", y="temp_feels", kind="scatter")
# ````
#
# First, looking at different weather types:
# +
cycle_day_data["weather_code_label"] = cycle_day_data["weather_code"].replace(
{
1: "Clear",
2: "Scattered clouds",
3: "Broken clouds",
4: "Cloudy",
7: "Rain",
26: "Snowfall",
}
)
plt.figure(num=None, figsize=(10, 6), dpi=80)
sns.boxplot(
x="weather_code_label",
y="count",
data=cycle_day_data,
order=["Clear", "Scattered clouds", "Broken clouds", "Cloudy", "Rain", "Snowfall"],
)
plt.tight_layout()
plt.ylabel("Number of trips")
plt.xlabel("Weather type")
plt.savefig("images/weather_codes.png")
plt.show()
# -
# 
#
# There was only one day of data where snowfall was present, which explains the tight box plot.
# Generally it can be seen that fewer journeys are made if its raining or possibly snowing.
#
# Looking at temperature shows that high temperatures are related to higher journey counts as we would expect.
group_size = 2.5
temp = cycle_day_data.copy()
temp["temp_feels_rn"] = (temp["temp_feels"] / group_size).round() * group_size
plt.figure(num=None, figsize=(10, 6), dpi=80)
sns.boxplot(x="temp_feels_rn", y="count", data=temp)
plt.tight_layout()
plt.xlabel("Temperature")
plt.ylabel("Number of trips/hour")
plt.savefig("images/temperature.png")
plt.show()
#
# 
#
# However the above result will be confounded by seasonal trends.
# We should remove seasonal trends for a better look at how day to day temperature changes relate to journey numbers.
#
# We can apply this to the other weather features.
temp = cycle_day_data[["count", "temp_feels", "wind_speed", "hum", "is_weekend"]]
temp["is_weekend"] = temp["is_weekend"].astype(int)
sns.pairplot(
temp,
hue="is_weekend",
diag_kind="hist",
corner=True,
)
plt.savefig("images/pairplot.png")
plt.show()
#
# 
#
# Similarly to temperature, humidity has a strong correlation with journey numbers.
# Whereas wind speed is fairly flat. The relationships are similar between weekdays and weekends.
#
# Better conditions generally correlate with high number of journeys.
# This is likely part confounded by the seasonality seen.
| TFLCycles/data_exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="zQBKILCUwz17"
# # Create a CNN to classify Cifar-10
#
# Learn about Cifar-10 here: https://www.cs.toronto.edu/~kriz/cifar.html
#
# In class you saw how to build a Convolutional Neural Network that classified Fashion MNIST. Take what you learned to build a CNN that recognizes the 10 classes of CIFAR. It will be a similar network, but there are some key differences you'll need to take into account.
#
# First, while MNIST were 28x28 monochome images (1 color channel), CIFAR are 32x32 color images (3 color channels).
#
# Second, MNIST images are simple, containing just the object, centered in the image, with no background. CIFAR ones can have the object with a background -- for example airplanes might have a cloudy sky behind them! As such you should expect your accuracy to be a bit lower.
#
# We start by setting up the problem for you.
# + id="3tqKtqW3wz19"
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images = train_images / 255.0
test_images = test_images / 255.0
# + [markdown] id="GN6ZYIeRwz1-"
# We then definte some of the model for you but leave most of it for you to fill in!
#
# *A hint: your model may want to learn some high level features and then classify them.*
# + id="t4y02yqZwz1_"
FIRST_LAYER = tf.keras.layers.Conv2D(32,(3,3),activation="relu",input_shape=(32,32,3)) #YOUR CODE HERE#
HIDDEN_LAYER_TYPE_1 = tf.keras.layers.MaxPool2D(2,2) #YOUR CODE HERE#
HIDDEN_LAYER_TYPE_2 = tf.keras.layers.Conv2D(64,(3,3),activation="relu") #YOUR CODE HERE#
HIDDEN_LAYER_TYPE_3 = tf.keras.layers.MaxPool2D(2,2) #YOUR CODE HERE#
HIDDEN_LAYER_TYPE_4 = tf.keras.layers.Conv2D(64,(3,3),activation="relu") #YOUR CODE HERE#
HIDDEN_LAYER_TYPE_5 = tf.keras.layers.Dense(64,activation="relu") #YOUR CODE HERE#
LAST_LAYER = tf.keras.layers.Dense(20,activation="softmax") #YOUR CODE HERE#
model = models.Sequential([
FIRST_LAYER,
HIDDEN_LAYER_TYPE_1,
HIDDEN_LAYER_TYPE_2,
HIDDEN_LAYER_TYPE_3,
HIDDEN_LAYER_TYPE_4,
layers.Flatten(),
HIDDEN_LAYER_TYPE_5,
LAST_LAYER,
])
# + [markdown] id="FLmE39odwz2A"
# You then need to define loss function. And you can then train your model. Once training is done you'll see a plot of training and validation accuracy. You'll know you have a reasonable model with a reasonable loss funciton if your final training accuracy ends up in the 70s (or possibly higher).
#
# *A hint: your model may want to learn different categories.*
# + colab={"base_uri": "https://localhost:8080/", "height": 975} id="Qmpa4Dg0wz2B" outputId="16874733-9631-41a7-8a76-de05ddcc780d"
LOSS = "sparse_categorical_crossentropy" #YOUR CODE HERE#
NUM_EPOCHS = 20 #You can change this value if you like to experiment with it to get better accuracy
# Compile the model
model.compile(optimizer='sgd',
loss=LOSS,
metrics=['accuracy'])
# Fit the model
history = model.fit(train_images, train_labels, epochs=NUM_EPOCHS,
validation_data=(test_images, test_labels))
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.xlim([0,NUM_EPOCHS])
plt.ylim([0.4,1.0])
plt.show()
# + [markdown] id="70sBeJwjwz2D"
# Finally, pick a better optimizer. And re-train your model. You'll know you have a reasonable model with a reasonable loss funciton and optimizer if your final training accuracy ends up in the 80s (or possibly higher).
#
# *A hint: your model may want to learn adaptively.*
# + colab={"base_uri": "https://localhost:8080/", "height": 975} id="l7FvkCV2wz2D" outputId="ec5af47b-6365-4629-e0c2-1d7cc8aed364"
OPTIMIZER = "adam" #YOUR CODE HERE#
# Compile the model
model.compile(optimizer=OPTIMIZER,
loss=LOSS,
metrics=['accuracy'])
# Fit the model
history = model.fit(train_images, train_labels, epochs=NUM_EPOCHS,
validation_data=(test_images, test_labels))
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.xlim([0,NUM_EPOCHS])
plt.ylim([0.4,1.0])
plt.show()
| ML/TinyML_Assignment_2_3_9_Question.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Function Introspection
# +
#dummy code
i = 100
# TODO: Fix this function
# currently does nothing, but should do ....
def my_func(a: "mandatory positional",
b: "optional positional"=1,
c=2,
*args: "add extra positional here",
kw1,
kw2=100,
kw3=200,
**kwargs: "provide extra kw-only here") -> "does nothing":
"""This function does nothing but does have various parameters and annotations.
"""
i = 10
j = 20
a = i + j
return a
# -
my_func.__doc__
my_func.__annotations__
my_func.short_description = "this is a function that does nothing much"
my_func.short_description
dir(my_func)
my_func.__name__
my_func.__name__
id(my_func)
def func_call(f):
print(id(f))
print(f.__name__)
func_call(my_func)
my_func.__defaults__
my_func.__kwdefaults__
my_func.__code__
dir(my_func.__code__)
my_func.__code__.co_name
my_func.__code__.co_varnames
my_func.__code__.co_argcount
import inspect
from inspect import isfunction, ismethod, isroutine
a = 10
isfunction((a))
isfunction((my_func))
ismethod(my_func)
class MyClass:
def f(self):
pass
isfunction((MyClass.f))
my_obj = MyClass()
my_obj = MyClass()
isfunction(my_obj.f)
ismethod(my_obj.f)
isroutine((MyClass.f))
print(inspect.getsource(my_func))
inspect.getmodule(my_func)
inspect.getmodule(print)
import math
inspect.getmodule(math.sin)
inspect.getcomments((my_func))
my_func.__doc__
inspect.signature(my_func)
dir(inspect.signature(my_func))
my_func.__annotations__
inspect.signature(my_func).return_annotation
sig = inspect.signature(my_func)
sig
sig.parameters
for k, v in sig.parameters.items():
print(k, type(v))
for k, v in sig.parameters.items():
print(dir(v))
for param in sig.parameters.values():
print('Name:', param.name)
print('Default:', param.default)
print('Annotation:', param.annotation)
print('Kind:', param.kind)
print('-------------------------------------------')
help(divmod)
divmod(4, 3)
divmod(x=3, y=3)
for param in inspect.signature(divmod).parameters.values():
print(param.kind)
| my_classes/FirstClassFunctions/finction-introspection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Ninja - Secret places to open a restaurant (Part I)
# **<NAME>**
# Data Scientist
# ## Table of contents
# 1. [Introduction](#introduction)
# 2. [Data acquisition](#data)
# 3. [Methodology](#methodology)
# 4. [Analysis](#Analysis)
# 5. [Results and Discussion](#results)
# 6. [Conclusion](#conclusion)
# ## 1. Introduction <a name="introduction"></a>
# ### 1.1 Background
# The city of Berlin is well known to be a cosmopolitan city where you can find people from all around the world. Berlin offers a very wide commercial variety, especially in the area of gastronomy. The trend that comes to stay, are Asian restaurants, particularly Japanese restaurants. Although there are a lot of them spread in the city, there are new ones opening all the time. Therefore to analyze locations, types, and the number of these restaurants is a plus for those who want to open a new restaurant in the city.
# ### 1.2 Problem
# Searching an optimal location to open a Japanese restaurant in the city of Berlin can be challenging. One could think that the better location for it should be at a place where there is no Japanese restaurant. But the problem is that perhaps most of the interested customers instead of going to an isolated neighborhood, prefer to go to a popular neighborhood, where there are more options and also there is movement of people. At the same time that the concurrence will be big in these regions, the flux of interested customers in this specific region will be relevant as well. Many people, for example, go on the weekends to a specific Japanese restaurant and when they arrive, there is a large line waiting for them. This usually happens because it is also a new trend in Berlin, in some popular restaurants, not to have an option to make a reservation. The good news is that perhaps some of the customers, those who do not want wait too long in line, might want to search for similar options in the neighborhood.
# ### 1.3 Interest
# This project is ideal for a person or a branch that is interested in opening a Japanese restaurant.
import numpy as np # library to handle data in a vectorized manner
# +
import pandas as pd # library for data analysis
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
import json # library to handle JSON files
# -
import requests # library to handle requests
# +
from pandas.io.json import json_normalize # tranform JSON file into a pandas dataframe
# Matplotlib and associated plotting modules
import matplotlib.cm as cm
import matplotlib.colors as colors
# -
# #!pip install seaborn
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.ticker import EngFormatter
from matplotlib.ticker import PercentFormatter
# import k-means from clustering stage
from sklearn.cluster import KMeans
import geocoder
# from geopy.geocoders import Nominatim
# # !pip install pygeocoder
# from pygeocoder import Geocoder
# +
# #!conda install -c conda-forge geopy --yes
#from geopy.geocoders import Nominatim # convert an address into latitude and longitude values
# +
# #!conda install -c conda-forge folium=0.5.0 --yes
#import folium # map rendering library
# +
#print('Libraries imported.')
# -
# ## 2. Data acquisition <a name="Data source "></a>
# ### 2.1 Data source
# The data and tools that I will use are the following:
#
# * **Foursquare API** to select the number of restaurants and their location in some neighborhoods of Berlin
# * **Geocoder** to get the latitudes and longitudes of places to rent, together with information from https://www.sebuyo.com
# ### 2.2 Feature selection
# * I will first create a dataset thought the Foursquare API, exploring several types of venues, such as ID, name, category (Japanese restaurant), latitude, longitude, neighborhood, and distance (in meters) to Charllotenburg, a borough of Berlin, where is very famous to have Japanese restaurants. Then I will apply again the search using Foursquare API for public transportation categories, city train, and metro in Berlin.
#
# * I will save the data collected using Foursquare API to a CSV file and then read them with `Pandas`.
#
# * Then, I will create another dataset that has information about available places to rent in Berlin. First, I will create the features "postal codes" and "prices" of these places and then with the help of Geocoder, I will get the latitude, longitude features. Then, I will save to a file CSV and read it with `Pandas`.
# +
CLIENT_ID = 'myclientID' # your Foursquare ID
CLIENT_SECRET = '<PASSWORD>' # your Foursquare Secret
VERSION = '20180605' # Foursquare API version
print('Your credentails:')
print('CLIENT_ID: ' + CLIENT_ID)
print('CLIENT_SECRET:' + CLIENT_SECRET)
# -
# define Berlin's geolocation coordinates (center of Charllotenburg)
berlin_latitude = 52.50333132 #52.520008
berlin_longitude = 13.308665432 #13.404954
type your answer here
LIMIT = 500 # limit of number of venues returned by Foursquare API
radius = 15000 # define radius
category = '4bf58dd8d48988d111941735' # Japanese restaurants
#category = '4bf58dd8d48988d1fc931735' # S-Bahnhof
#category = '4bf58dd8d48988d1fd931735' #U-Bahnhof
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&categoryId={}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
berlin_latitude,
berlin_longitude,
category,
radius,
LIMIT)
results = requests.get(url).json()
results.values();
# function that extracts the category of the venue
def get_category_type(row):
try:
categories_list = row['categories']
except:
categories_list = row['venue.categories']
if len(categories_list) == 0:
return None
else:
return categories_list[0]['name']
# +
venues = results['response']['groups'][0]['items']
venues_neighborhood = [results['response']['groups'][0]['items'][n]['venue']['location']['formattedAddress'] for n in range(len(venues))]
venues_distance = [results['response']['groups'][0]['items'][n]['venue']['location']['distance'] for n in range(len(venues))]
nearby_venues = json_normalize(venues) # flatten JSON
# filter columns
filtered_columns = ['venue.id','venue.name', 'venue.categories', 'venue.location.lat', 'venue.location.lng']
nearby_venues =nearby_venues.loc[:, filtered_columns]
# filter the category for each row
nearby_venues['venue.categories'] = nearby_venues.apply(get_category_type, axis=1)
# clean columns
nearby_venues.columns = [col.split(".")[-1] for col in nearby_venues.columns]
[venues_neighborhood[k][0] for k in range(len(venues_neighborhood))]
#nearby_venues['neighborhood'] = venues_neighborhood[0][0]
nearby_venues['neighborhood'] = [venues_neighborhood[k][0] for k in range(len(venues_neighborhood))]
nearby_venues['distance [m]'] = venues_distance
df = nearby_venues
df.head()
import os
outname = 'japanesecategory.csv'
outdir = '/'
if not os.path.exists(outdir):
os.mkdir(outdir)
fullname = os.path.join(outdir, outname)
df.to_csv(fullname)
# -
# Using the website https://www.sebuyo.com I made a search of prices and code postal of the avaiable places to rent in Berlin
df_rent = pd.DataFrame({'Postcode': [10247, 10777, 10713, 10719, 12359, 12057, 10785, 12043, 13595, 12053, 10435, 10119, 10245, 13597, 12347, 10115, 10717, 13585, 12057, 16727],'Price': [2400, 1142.36, 3269, 5900, 300, 400, 3900, 10000, 0, 1600, 2500, 3000, 1095, 0, 1000, 0, 2700, 570, 400, 0]})
df_rent.head()
# Using Geocoder I found the respective latitudes and longitudes using the information of the code postal.
def get_latlng(postal_code):
latlng_coords = None
while(latlng_coords is None):
g = geocoder.arcgis('{}, Berlin, Berlin'.format(postal_code))
latlng_coords = g.latlng
return latlng_coords
codepost = df_rent['Postcode']
coords = [get_latlng(postal_code)
for postal_code
in codepost.tolist()]
df_coords = pd.DataFrame(coords, columns = ['Latitude', 'Longitude'])
df_rent['Latitude'] = df_coords['Latitude']
df_rent['Longitude'] = df_coords['Longitude']
| OliveiraDataNinjaPartI.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#import pandas module
import pandas as pd
#create path for files
csv1_path = "./Resources/JC-201901-citibike-tripdata.csv/JC-201901-citibike-tripdata.csv"
bike_data = pd.read_csv(csv1_path)
bike_data
for x in range(2, 13):
if (x == 10):
file = f"JC-2019{x}-citibike-tripdata.csv"
path = f"./Resources/{file}/{file}"
bike_loop = pd.read_csv(path)
bike_data = bike_data.append(bike_loop, ignore_index=True)
elif (x == 11):
file = f"JC-2019{x}-citibike-tripdata.csv"
path = f"./Resources/{file}/{file}"
bike_loop = pd.read_csv(path)
bike_data = bike_data.append(bike_loop, ignore_index=True)
elif (x == 12):
file = f"JC-2019{x}-citibike-tripdata.csv"
path = f"./Resources/{file}/{file}"
bike_loop = pd.read_csv(path)
bike_data = bike_data.append(bike_loop, ignore_index=True)
else:
file = f"JC-20190{x}-citibike-tripdata.csv"
path = f"./Resources/{file}/{file}"
bike_loop = pd.read_csv(path)
bike_data = bike_data.append(bike_loop, ignore_index=True)
bike_data
output_data_file1 = "./Resources/JC-2019-citibike-tripdata.csv"
bike_data2.to_csv(output_data_file1, index_label = "Bike ID")
#create path for files
csv2_path = "./Resources/JC-202001-citibike-tripdata.csv/JC-202001-citibike-tripdata.csv"
bike_data2 = pd.read_csv(csv2_path)
bike_data2
for x in range(2, 13):
if (x == 10):
file = f"JC-2020{x}-citibike-tripdata.csv"
path = f"./Resources/{file}/{file}"
bike_loop = pd.read_csv(path)
bike_data2 = bike_data2.append(bike_loop, ignore_index=True)
elif (x == 11):
file = f"JC-2020{x}-citibike-tripdata.csv"
path = f"./Resources/{file}/{file}"
bike_loop = pd.read_csv(path)
bike_data2 = bike_data2.append(bike_loop, ignore_index=True)
elif (x == 12):
file = f"JC-2020{x}-citibike-tripdata.csv"
path = f"./Resources/{file}/{file}"
bike_loop = pd.read_csv(path)
bike_data2 = bike_data2.append(bike_loop, ignore_index=True)
else:
file = f"JC-20200{x}-citibike-tripdata.csv"
path = f"./Resources/{file}/{file}"
bike_loop = pd.read_csv(path)
bike_data2 = bike_data2.append(bike_loop, ignore_index=True)
bike_data2
output_data_file2 = "./Resources/JC-2020-citibike-tripdata.csv"
bike_data2.to_csv(output_data_file2, index_label = "Bike ID")
| .ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cobra
import pandas as pd
import glob
# +
def changeMedia(model, media_list):
for index in model.exchanges:
if index.id in media_list:
model.reactions.get_by_id(index.id).lower_bound = -1000.0
else:
model.reactions.get_by_id(index.id).lower_bound = 0.0
return model
def create_media_dict(media_info_dict, universal_model, estimated_uptake=False):
'''
Read a media file and create a media dictionary. Use a universal model so
all objects are created before calling the function.
in:
media_filename - string specifying media file. file must have metabolite ids
as index when read as a dataframe.
universal_model - cobra.core.Model object. Any metabolites specified in the
media file that are missing from this model will not be
included in the media dictionary
out:
media_dict - dictionary of {cpd_id:lower_bound}.
'''
# media = pd.DataFrame.from_csv(media_filename,sep=',')
media_dict = {}
# met_ids = list(media.index)
met_ids = list(media_info_dict.keys())
met_ids_c = [x + '_c' for x in met_ids] # universal model will have _c suffix
universal_met_ids = [met.id for met in universal_model.metabolites]
for met in met_ids_c:
if met in universal_met_ids:
# find met in universal model
met_obj = universal_model.metabolites.get_by_id(met).copy()
met_obj.id = met_obj.id[:-1] + 'e'# switch c to e
if estimated_uptake:
met_index = met.split('_c')[0] # get cpd ID without compartment
# media_dict[met_obj] = -1*media.loc[met_index,'estimated_uptake']
media_dict[met_obj] = float(media_info_dict[met_index])
else:
media_dict[met_obj] = -1000.0 # For now, assume -1000 instead of any values in the media file
else:
print('WARNING: metabolite ' + met + ' is not in universal model. Excluded from media.')
return media_dict
def set_media(model, media, verbose=False):
'''
Set exchange bounds for model according to metabolite and bounds in media. Model is changed in place
(e.g. the original object is modified; None is returned). Metabolite
ids in the dictionary should be cpd#####_e for consistency with modelSEED ids.
in:
model - cobra.core.Model object
media - dictionary of {cobra.core.Metabolite:lower_bound}, where lower bound is a float
out:
None
'''
# Find and close all exchange reactions in the model
model_rxns = [rxn.id for rxn in model.reactions]
for rxn in model_rxns:
if rxn.startswith('EX_') and rxn.endswith('_e'):
model.reactions.get_by_id(rxn).lower_bound = 0.0
# Check for existence of exchange reactions for the media metabolites in the model
for met in media.keys():
if 'EX_'+met.id in model_rxns:
model.reactions.get_by_id('EX_'+met.id).lower_bound = media[met]
else:
# Create exchange reaction and add to model
if verbose:
print("added exchange rxn for " + met.name)
new_exchange = cobra.Reaction('EX_'+met.id)
new_exchange.name = met.name + ' exchange'
new_exchange.add_metabolites({met:-1})
new_exchange.lower_bound = media[met]
new_exchange.upper_bound = 1000
model.add_reaction(new_exchange)
model.repair()
# +
# Read in single media condition from media text file
# Create media object based on text file
# Close all exchange reactions
# Open all exchange reactions to make model.media match
# Check-for/add transporters for all media components
# Run FBA and save resulting value to generate heatmap
# -
with open("../Data/media_list_with_meta.txt", "r") as media:
first = media.readline()
name_list = []
media_dict_list = []
for line in media:
x_dict = {}
line = line.split()
name_list.append(line[0])
for x in line[5:]:
x_temp = x.split(";")
x_dict[x_temp[0]] = x_temp[1]
if x == line[-1]:
media_dict_list.append(x_dict)
x_dict = {}
print(name_list[0])
media_dict_list[0]
universal = cobra.io.load_json_model("../Data/GramPosUni.json")
media_info_dict = media_dict_list[0]
media_dict = create_media_dict(media_info_dict, universal, estimated_uptake=True)
model = cobra.io.read_sbml_model('../models/220668.9.xml')
# model.medium
obj_val = model.slim_optimize()
print(obj_val)
print((1/obj_val)*3600) #doubling time in minutes
model_input_exchanges = model.medium
model_input_exchanges_ids = list(model_input_exchanges.keys())
media_component_list = []
for cpd_id in model_input_exchanges_ids:
cpd = cpd_id.split("_")[1]
media_component_list.append(cpd)
def find_transporters(model):
model_input_exchanges = model.medium
model_input_exchanges_ids = list(model_input_exchanges.keys())
media_component_list = []
for cpd_id in model_input_exchanges_ids:
cpd = cpd_id.split("_")[1]
cpd = cpd + "_e"
# print(cpd)
media_component_list.append(cpd)
transporters = set()
for media_component in media_component_list:
# print("Media: " + media_component)
for rxn in model.reactions:
for reactant in rxn.reactants:
# print("REACTANTS" + reactant.id)
if reactant.id == (media_component):
# print("add transporter")
# transporters |= set(rxn.id)
transporters.add(rxn.id)
# model.reactions[0].reactants[0].id == 'cpd00009_e'
return(transporters)
# +
def media_cpds(model):
model_input_exchanges = model.medium
model_input_exchanges_ids = list(model_input_exchanges.keys())
media_component_list = []
for cpd_id in model_input_exchanges_ids:
cpd = cpd_id.split("_")[1]
cpd = cpd + "_e"
media_component_list.append(cpd)
return(media_component_list)
def find_transporters(model):
media_component_list = media_cpds(model)
transporters = set()
media_cpds_w_import = set()
for media_component in media_component_list:
for rxn in model.reactions:
for reactant in rxn.metabolites:
if reactant.id == (media_component):
if (rxn.id).startswith("rxn"):
transporters.add(rxn.id)
media_cpds_w_import.add(media_component)
missing_importers = set(media_component_list) - media_cpds_w_import
return(transporters, missing_importers)
def add_simple_import(model, cpd):
trans_cpd01079 = cobra.Reaction('trans_cpd01079')
trans_cpd01079.name = 'Automatically added transporter'
trans_cpd01079.lower_bound = -1000.
trans_cpd01079.upper_bound = 1000.
trans_cpd01079.add_metabolites({
cpd01079_e: -1.0,
cpd01079_c: 1.0,
})
x_dict[x_temp[0]] = x_temp[1]
return()
# genome_ids_list
# models = glob.glob('../models/*.xml')
# genome_ids = [x.replace("../models/","").replace(".xml","") for x in models]
all_compounds = set()
genome_ids = ['220668.9']
for genome_id in genome_ids:
model = cobra.io.read_sbml_model('../models/'+ genome_id +'.xml')
# Ensure free water exhange
model.reactions.get_by_id('rxn05319_c').name = "Water transport"
model.reactions.get_by_id('rxn05319_c').bounds = (-1000., 1000.)
for media_info_dict in media_dict_list:
media_dict = create_media_dict(media_info_dict, universal, estimated_uptake=False)
set_media(model, media_dict)
importers, compounds = find_transporters(model)
all_compounds |= compounds
# -
all_compounds
model.metabolites.get_by_id('cpd00015_e')
# +
reactant_ids = set()
for reactant in model.reactions[0].reactants:
reactant_ids.add(reactant.id)
product_ids = set()
for product in model.reactions[0].products:
product_ids.add(product.id)
rxn_met_ids = set()
for rxn_met in model.reactions[0].metabolites:
rxn_met_ids.add(rxn_met.id)
# -
model.reactions.get_by_id('rxn10806_c')
for metabolite in list(model.reactions[1].metabolites.keys()):
print(metabolite.name)
model.metabolites
# Change media for one model
set_media(model, media_dict)
model.medium
# model.slim_optimize()
transporters = find_transporters(model)
transporters
# Run through all media conditions
model = cobra.io.read_sbml_model('../models/220668.9.xml')
obj_val_list = []
for media_info_dict in media_dict_list:
media_dict = create_media_dict(media_info_dict, universal, estimated_uptake=True)
set_media(model, media_dict)
obj_val_list.append(model.slim_optimize())
obj_val_list
# +
| Code/OldCode/OldCode_T6_Change_Media_and_Run_FBA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Pytorch (Buzznauts)
# language: python
# name: buzznauts
# ---
# <a href="https://colab.research.google.com/github/eduardojdiniz/Buzznauts/blob/master/scripts/demo_VideoDataFrame_colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Demo VideoDataFrame Class
# This demo uses the Algonauts dataset.
#
# TABLE OF CODE CONTENTS:
# 1. Minimal demo without image transforms
# 2. Minimal demo without sparse temporal sampling for single continuous frame clips, without image transforms
# 3. Demo with image transforms
# 4. Demo with image transforms and dataloader
#
# For more details about the VideoDataFrame Class, see the [VideoDataset Repo](https://video-dataset-loading-pytorch.readthedocs.io/en/latest/VideoDataset.html)
# ### Setup
# Download Buzznauts
# !pip install duecredit --quiet
# !git clone https://github.com/eduardojdiniz/Buzznauts --quiet
# Mount Google Drive
from google.colab import drive
drive.mount("/content/drive")
# Load Buzznauts data functions
# !pip install decord --quiet
from Buzznauts.Buzznauts.data.utils import plot_video
from Buzznauts.Buzznauts.data.videodataframe import VideoFrameDataset, ImglistToTensor
# Import pytorch
from torchvision import transforms
import torch
# Set videos and annotation file path
import os.path as op
stimuli = "/content/drive/MyDrive/Buzznauts/data/stimuli"
videos_root = op.join(stimuli, "frames")
annotation_file = op.join(videos_root, "annotations.txt")
# ### Demo 1 - Sampled Frames, without Image Transforms
# +
dataset = VideoFrameDataset(
root_path=videos_root,
annotationfile_path=annotation_file,
num_segments=3,
frames_per_segment=1,
imagefile_template='img_{:05d}.jpg',
transform=None,
random_shift=True,
test_mode=False)
sample = dataset[0]
frames = sample[0] # list of PIL images
label = sample[1] # integer label
plot_video(rows=1, cols=3, frame_list=frames, plot_width=15., plot_height=3.)
# -
# ### Demo 2 - Single Continuous Frame Clip instead of Sampled Frames, without Image Transforms
# +
dataset = VideoFrameDataset(
root_path=videos_root,
annotationfile_path=annotation_file,
num_segments=1,
frames_per_segment=9,
imagefile_template='img_{:05d}.jpg',
transform=None,
random_shift=True,
test_mode=False)
sample = dataset[5]
frames = sample[0] # list of PIL images
label = sample[1] # integer label
plot_video(rows=3, cols=3, frame_list=frames, plot_width=10., plot_height=5.)
# -
# ### Demo 3 - Sampled Frames, with Image Transforms
def denormalize(video_tensor):
"""Undoes mean/standard deviation normalization, zero to one scaling, and channel rearrangement for a batch of images.
Parameters
----------
video_tensor : tensor.FloatTensor
A (FRAMES x CHANNELS x HEIGHT x WIDTH) tensor
Returns
----------
video_array : numpy.ndarray[float]
A (FRAMES x CHANNELS x HEIGHT x WIDTH) numpy array of floats
"""
inverse_normalize = transforms.Normalize(
mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.225],
std=[1 / 0.229, 1 / 0.224, 1 / 0.225])
return (inverse_normalize(video_tensor) * 255.).type(torch.uint8).permute(0, 2, 3, 1).numpy()
# +
# As of torchvision 0.8.0, torchvision transforms support batches of images
# of size (BATCH x CHANNELS x HEIGHT x WIDTH) and apply deterministic or random
# transformations on the batch identically on all images of the batch. Any torchvision
# transform for image augmentation can thus also be used for video augmentation.
preprocess = transforms.Compose([
ImglistToTensor(), # list of PIL images to (FRAMES x CHANNELS x HEIGHT x WIDTH) tensor
transforms.Resize(299), # image batch, resize smaller edge to 299
transforms.CenterCrop(299), # image batch, center crop to square 299x299
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
])
dataset = VideoFrameDataset(
root_path=videos_root,
annotationfile_path=annotation_file,
num_segments=5,
frames_per_segment=1,
imagefile_template='img_{:05d}.jpg',
transform=preprocess,
random_shift=True,
test_mode=False
)
sample = dataset[2]
frame_tensor = sample[0] # tensor of shape (NUM_SEGMENTS*FRAMES_PER_SEGMENT) x CHANNELS x HEIGHT x WIDTH
label = sample[1] # integer label
print('Video Tensor Size:', frame_tensor.size())
# -
frame_array = denormalize(frame_tensor)
plot_video(rows=1, cols=5, frame_list=frame_array, plot_width=15., plot_height=3.)
# ### Demo 4 - Sampled Frames Dataloader, with Image Transforms
# +
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=2,
shuffle=True,
num_workers=2,
pin_memory=True)
for epoch in range(10):
for video_batch, labels in dataloader:
"""
Insert Training Code Here
"""
print(labels)
print("\nVideo Batch Tensor Size:", video_batch.size())
break
break
# -
# method-1:
from torch.utils.data.sampler import SubsetRandomSampler
train_size = int(0.8 * len(dataset))
train_set = torch.utils.data.DataLoader(dataset, batch_size=2, sampler=SubsetRandomSampler(indices[:train_size]))
test_set = torch.utils.data.DataLoader(dataset, batch_size=2, sampler=SubsetRandomSampler(indices[train_size:]))
# method-2:
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
| scripts/test_train_demo_VideoDataFrame_colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: LSST
# language: python
# name: lsst
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Import Tricks: Remote Modules, and Importing Notebooks
# <br>Author(s): **<NAME>** ([@drphilmarshall](https://github.com/LSSTScienceCollaborations/StackClub/issues/new?body=@drphilmarshall))
# <br>Maintainer(s): **<NAME>** ([@kadrlica](https://github.com/LSSTScienceCollaborations/StackClub/issues/new?body=@kadrlica))
# <br>Last Verified to Run: **2020-07-13**
# <br>Verified Stack Release: **20.0.0**
#
# ### Learning Objectives:
#
# After working through this tutorial you should be able to:
# 1. Use the `stackclub.wimport` function to import a python module from the web;
# 2. Import a notebook as a module, following an `import stackclub`
# 3. Understand the current limitations of these utilities
#
# ### Logistics
# This notebook is intended to be runnable on `lsst-lsp-stable.ncsa.illinois.edu` from a local git clone of https://github.com/LSSTScienceCollaborations/StackClub.
#
# ## Set-up
# -
# We'll need the `stackclub` package to be installed. If you are not developing this package, you can install it using `pip`, like this:
# ```
# pip install git+git://github.com/LSSTScienceCollaborations/StackClub.git#egg=stackclub
# ```
# If you are developing the `stackclub` package (eg by adding modules to it to support the Stack Club tutorial that you are writing, you'll need to make a local, editable installation. In the top level folder of the `StackClub` repo, do:
# ! cd .. && python setup.py -q develop --user && cd -
# When editing the `stackclub` package files, we want the latest version to be imported when we re-run the import command. To enable this, we need the %autoreload magic command.
# %load_ext autoreload
# %autoreload 2
# + [markdown] slideshow={"slide_type": "slide"}
# You can find the Stack version that this notebook is running by using eups list -s on the terminal command line:
# -
# What version of the Stack am I using?
# ! echo $HOSTNAME
# ! eups list -s | grep lsst_distrib
# + [markdown] slideshow={"slide_type": "subslide"}
# For this tutorial we'll need the following modules:
# -
import stackclub
# + [markdown] slideshow={"slide_type": "slide"}
# ## Importing Python Modules from the Web
#
# Sometimes we may want to import a python module without installing an entire package - eg. from a GitHub gist. We can do that by first downloading it and then importing it: this is what the `stackclub.wimport` function does.
# + slideshow={"slide_type": "-"}
# # %load -n stackclub.wimport
# -
# For example, suppose the ``stackclub`` library did _not_ include the `where_is` module: we could still download it and import it, like this:
where_is_url = "https://github.com/LSSTScienceCollaborations/StackClub/raw/master/stackclub/where_is.py"
so = stackclub.wimport(where_is_url, vb=True)
print(so)
# In this example, `so` is an imported module - so we can invoke it's functions as normal.
from lsst.daf.persistence import Butler
so.where_is(Butler.get, in_the='source')
mpl_url = "https://matplotlib.org/mpl_examples/lines_bars_and_markers/fill_demo_features.py"
mpl = stackclub.wimport(mpl_url, vb=True)
# Here's another example - a simple python gist:
# ## Importing Notebooks as Modules
#
# Sometimes we will come across Jupyter notebooks that contain functions and classes that we can re-use. Rather than duplicating the code, we can import the other notebook (ie, run it), and then call the function or class as it is.
# The ability to import notebooks as modules is enabled by the `import stackclub` statement, which sets up a new "loader" that can handle Jupyter notebooks. Here's a demo, using the `HelloWorld` notebook that is also in this folder:
import stackclub
import HelloWorld as Yo
Yo.take_that_first_baby_step(and_follow_up="I'm using code imported from a notebook!")
# ## Current Limitations
#
# At present, it is not possible to `wimport` a Jupyter notebook. But this would be very useful functionality to have, indeed!
# ## Summary
#
# You should now be able to import and use remote modules with the `stackclub.wimport` function, and import local notebooks (in the current working directory) as modules.
| GettingStarted/ImportTricks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Setting up
# +
import pandas as pd
from sklearn.model_selection import train_test_split
# enable multiple outputs per cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# -
# ## Preparing dataset into dataframe
# +
# header for dataset
headernames = ['id', 'label', 'statement', 'subject', 'speaker', 'job', 'state', 'party',
'barely_true', 'false', 'half_true', 'mostly_true', 'pants_on_fire', 'venue']
df_train = pd.read_csv('./datasets/liar_dataset/train.tsv', sep='\t', names=headernames)
df_test = pd.read_csv('./datasets/liar_dataset/test.tsv', sep='\t', names=headernames)
df_valid = pd.read_csv('./datasets/liar_dataset/valid.tsv', sep='\t', names=headernames)
# grouping labels into true or fake
# -
# ## Having a look
# remove duplicate data entries
df_train.drop_duplicates(subset ="statement",
keep = False, inplace = True)
df_test.drop_duplicates(subset ="statement",
keep = False, inplace = True)
df_valid.drop_duplicates(subset ="statement",
keep = False, inplace = True)
# we are only doing text-content analysis
# creating new dataframe with only statement and label
statement_train = df_train["statement"]
label_train = df_train["label"]
train_cleaned = pd.concat([statement_train, label_train], axis=1)
statement_test = df_test["statement"]
label_test = df_test["label"]
test_cleaned = pd.concat([statement_test, label_test], axis=1)
statement_valid = df_valid["statement"]
label_valid = df_valid["label"]
valid_cleaned = pd.concat([statement_valid, label_valid], axis=1)
# +
# change label string to numerical values
truth_val = {'false':0.,'half-true':0.5,'mostly-true':0.75,'true':1.
,'pants-fire':-0.25,'barely-true':0.25}
train_cleaned = train_cleaned.replace({"label":truth_val})
test_cleaned = test_cleaned.replace({"label":truth_val})
valid_cleaned = valid_cleaned.replace({"label":truth_val})
# -
train_cleaned.to_csv('./datasets/train_cleaned.csv', sep=',', encoding='utf-8', index=False)
test_cleaned.to_csv('./datasets/test_cleaned.csv', sep=',', encoding='utf-8', index=False)
valid_cleaned.to_csv('./datasets/valid_cleaned.csv', sep=',', encoding='utf-8', index=False)
| .ipynb_checkpoints/data-cleaning-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Calculating values on total and net peak demand change reported in the text.
#
# Developed by <NAME>, 2022.
import os
os.chdir('../')
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
from matplotlib.gridspec import GridSpec
import pickle
import datetime
noev_scenario = pd.read_csv('Results/NoEVs_year2035_solar3.5x_wind3x_withstorage_dpdf_20220408.csv')
# # Total Demand
# +
scens1 = ['_TimersRandom_noWPcontrol', '_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem']
scens2 = ['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork']
vals = np.zeros((10, 7, 5)) # peak mean weekday total demand
vals2 = np.zeros((10, 7, 5)) # time of day
tables_dfs = {penlevel: pd.DataFrame(np.zeros((7, 5)),
index=['_TimersRandom_noWPcontrol', '_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem', '_TimersMixed_WPcontrol_minpeak'],
columns=['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork', 'BusinessAsUsual']) for penlevel in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]}
tables_dfs_timeofpeak = {penlevel: pd.DataFrame(np.zeros((7, 5)),
index=['_TimersRandom_noWPcontrol', '_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem', '_TimersMixed_WPcontrol_minpeak'],
columns=['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork', 'BusinessAsUsual']) for penlevel in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]}
for k, penlevel in enumerate([0.5, 1.0]):#0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]):
# if penlevel == 0.5:
folder = 'Fuel1_Solar35_Wind3'
# else:
# folder = 'Fuel1_Solar35_Wind3_Curves'
for i, scen1 in enumerate(scens1):
for j, scen2 in enumerate(scens2):
demand_df = pd.read_csv('Results/'+folder+'/fuel1_solar3.5_wind3_'+scen2+scen1+'_penlevel'+str(penlevel)+'_withstorage_dpdf_20220408.csv')
inds = demand_df[pd.to_datetime(demand_df['datetime']).dt.weekday.isin([0,1,2,3,4])].index
vals[k, i, j] = demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
vals2[k, i, j] = np.where(demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0) == demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max())[0][0]
tables_dfs[penlevel].loc[scen1, scen2] = demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
tables_dfs_timeofpeak[penlevel].loc[scen1, scen2] = np.where(demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0) == demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max())[0][0]
scen2 = 'BusinessAsUsual'
scen1 = '_TimersMixed_WPcontrol_minpeak'
i = i+1
j = j+1
demand_df = pd.read_csv('Results/'+folder+'/fuel1_solar3.5_wind3_'+scen2+scen1+'_penlevel'+str(penlevel)+'_withstorage_dpdf_20220408.csv')
inds = demand_df[pd.to_datetime(demand_df['datetime']).dt.weekday.isin([0,1,2,3,4])].index
vals[k, i, j] = demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
vals2[k, i, j] = np.where(demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0) == demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max())[0][0]
tables_dfs[penlevel].loc[scen1, scen2] = demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
tables_dfs_timeofpeak[penlevel].loc[scen1, scen2] = np.where(demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0) == demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max())[0][0]
# -
vals_shift = noev_scenario.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
print('Peak total demand increase in 2035 from adding EVs, with 100% is up to:', ((tables_dfs[1.0] - vals_shift) / vals_shift).max().max())
print('Peak total demand increase in 2035 from adding EVs, with 50% is up to:', ((tables_dfs[0.5] - vals_shift) / vals_shift).max().max())
vals_tmp = (tables_dfs[1.0] - vals_shift) / vals_shift
print('Peak total demand increase in 2035 from adding EVs, with 100%, ranges between:', vals_tmp[vals_tmp>0].min().min(), ((tables_dfs[1.0] - vals_shift) / vals_shift).max().max())
vals_tmp = (tables_dfs[0.5] - vals_shift) / vals_shift
print('Peak total demand increase in 2035 from adding EVs, with 50%, ranges between:', vals_tmp[vals_tmp>0].min().min(), ((tables_dfs[0.5] - vals_shift) / vals_shift).max().max())
print('Max value without EVs occurs at 5pm: ', np.round(noev_scenario.loc[pd.to_datetime(noev_scenario['datetime']).dt.weekday.isin([0,1,2,3,4])]['total_incl_noncombustion'].values.reshape(-1, 24).mean(axis=0).max(), 2))
# +
vals_shift = noev_scenario.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
print('Percent change at 50% adoption: ')
(tables_dfs[0.5] - vals_shift) / vals_shift
# -
print('Ranges from _ to _:')
print(np.round(((tables_dfs[0.5] - vals_shift) / vals_shift)[((tables_dfs[0.5] - vals_shift) / vals_shift) > 0].min().min(), 4))
print(np.round(((tables_dfs[0.5] - vals_shift) / vals_shift)[((tables_dfs[0.5] - vals_shift) / vals_shift) > 0].max().max(), 4))
# +
vals_shift = noev_scenario.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
print('Percent change at 100% adoption: ')
(tables_dfs[1.0] - vals_shift) / vals_shift
# -
print('Ranges from _ to _:')
print(np.round(((tables_dfs[1.0] - vals_shift) / vals_shift)[((tables_dfs[1.0] - vals_shift) / vals_shift) > 0].min().min(), 4))
print(np.round(((tables_dfs[1.0] - vals_shift) / vals_shift)[((tables_dfs[1.0] - vals_shift) / vals_shift) > 0].max().max(), 4))
print('Hour of peak in mean weekday total demand at 100%:')
tables_dfs_timeofpeak[1.0]
# # Net Demand
# +
scens1 = ['_TimersRandom_noWPcontrol', '_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem']
scens2 = ['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork']
tables_dfs_timeofpeak = {penlevel: pd.DataFrame(np.zeros((7, 5)),
index=['_TimersRandom_noWPcontrol', '_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem', '_TimersMixed_WPcontrol_minpeak'],
columns=['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork', 'BusinessAsUsual']) for penlevel in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]}
for k, penlevel in enumerate([0.5, 1.0]):
# if penlevel == 0.5:
folder = 'Fuel1_Solar35_Wind3'
# else:
# folder = 'Fuel1_Solar35_Wind3_Curves'
for i, scen1 in enumerate(scens1):
for j, scen2 in enumerate(scens2):
demand_df = pd.read_csv('Results/'+folder+'/fuel1_solar3.5_wind3_'+scen2+scen1+'_penlevel'+str(penlevel)+'_withstorage_dpdf_20220408.csv')
inds = demand_df[pd.to_datetime(demand_df['datetime']).dt.weekday.isin([0,1,2,3,4])].index
tables_dfs_timeofpeak[penlevel].loc[scen1, scen2] = np.where(demand_df.loc[inds, 'demand'].values.reshape(-1,24).mean(axis=0) == demand_df.loc[inds, 'demand'].values.reshape(-1,24).mean(axis=0).max())[0][0]
scen2 = 'BusinessAsUsual'
scen1 = '_TimersMixed_WPcontrol_minpeak'
i = i+1
j = j+1
demand_df = pd.read_csv('Results/'+folder+'/fuel1_solar3.5_wind3_'+scen2+scen1+'_penlevel'+str(penlevel)+'_withstorage_dpdf_20220408.csv')
inds = demand_df[pd.to_datetime(demand_df['datetime']).dt.weekday.isin([0,1,2,3,4])].index
tables_dfs_timeofpeak[penlevel].loc[scen1, scen2] = np.where(demand_df.loc[inds, 'demand'].values.reshape(-1,24).mean(axis=0) == demand_df.loc[inds, 'demand'].values.reshape(-1,24).mean(axis=0).max())[0][0]
# -
print('Hour of peak in mean weekday *net* demand at 50%:')
tables_dfs_timeofpeak[0.5]
print('Hour of peak in mean weekday *net* demand at 100%:')
tables_dfs_timeofpeak[1.0]
# +
scens1 = ['_TimersRandom_noWPcontrol', '_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem']
scens2 = ['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork']
vals1 = np.zeros((10, 7, 5)) # peak mean weekday total demand
vals2 = np.zeros((10, 7, 5)) # time of day
tables_dfs1 = {penlevel: pd.DataFrame(np.zeros((7, 5)),
index=['_TimersRandom_noWPcontrol', '_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem', '_TimersMixed_WPcontrol_minpeak'],
columns=['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork', 'BusinessAsUsual']) for penlevel in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]}
tables_dfs2 = {penlevel: pd.DataFrame(np.zeros((7, 5)),
index=['_TimersRandom_noWPcontrol', '_Timers9pm_noWPcontrol', '_Timers12am_noWPcontrol', '_TimersNone_noWPcontrol', '_TimersNone_WPcontrol_minpeak', '_TimersNone_WPcontrol_avgem', '_TimersMixed_WPcontrol_minpeak'],
columns=['UniversalHome', 'HighHome', 'LowHome_HighWork', 'LowHome_LowWork', 'BusinessAsUsual']) for penlevel in [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]}
for k, penlevel in enumerate([0.5, 1.0]):#0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]):
# if penlevel == 0.5:
folder = 'Fuel1_Solar35_Wind3'
# else:
# folder = 'Fuel1_Solar35_Wind3_Curves'
for i, scen1 in enumerate(scens1):
for j, scen2 in enumerate(scens2):
demand_df = pd.read_csv('Results/'+folder+'/fuel1_solar3.5_wind3_'+scen2+scen1+'_penlevel'+str(penlevel)+'_withstorage_dpdf_20220408.csv')
inds = demand_df[pd.to_datetime(demand_df['datetime']).dt.weekday.isin([0,1,2,3,4])].index
vals1[k, i, j] = demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
vals2[k, i, j] = demand_df.loc[inds, 'demand'].values.reshape(-1,24).mean(axis=0).max()
tables_dfs1[penlevel].loc[scen1, scen2] = demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
tables_dfs2[penlevel].loc[scen1, scen2] = demand_df.loc[inds, 'demand'].values.reshape(-1,24).mean(axis=0).max()
scen2 = 'BusinessAsUsual'
scen1 = '_TimersMixed_WPcontrol_minpeak'
i = i+1
j = j+1
demand_df = pd.read_csv('Results/'+folder+'/fuel1_solar3.5_wind3_'+scen2+scen1+'_penlevel'+str(penlevel)+'_withstorage_dpdf_20220408.csv')
inds = demand_df[pd.to_datetime(demand_df['datetime']).dt.weekday.isin([0,1,2,3,4])].index
vals1[k, i, j] = demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
vals2[k, i, j] = demand_df.loc[inds, 'demand'].values.reshape(-1,24).mean(axis=0).max()
tables_dfs1[penlevel].loc[scen1, scen2] = demand_df.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
tables_dfs2[penlevel].loc[scen1, scen2] = demand_df.loc[inds, 'demand'].values.reshape(-1,24).mean(axis=0).max()
# -
refval1 = noev_scenario.loc[inds, 'total_incl_noncombustion'].values.reshape(-1,24).mean(axis=0).max()
refval2 = noev_scenario.loc[inds, 'demand'].values.reshape(-1,24).mean(axis=0).max()
(100*((tables_dfs2[1.0] - refval2)/refval2))
(100*((tables_dfs2[0.5] - refval2)/refval2))
print('Max at 50%:', (100*((tables_dfs2[0.5] - refval2)/refval2)).max())
print('Max at 100%:', (100*((tables_dfs2[1.0] - refval2)/refval2)).max())
# +
print("The Business As Usual scenario increases typical peak net demand by X times more than the Low Home High Work scenario:")
val1 = (tables_dfs2[0.5].loc['_TimersMixed_WPcontrol_minpeak', 'BusinessAsUsual'] - refval2)/refval2
val2 = (tables_dfs2[0.5].loc['_TimersNone_noWPcontrol', 'LowHome_HighWork'] - refval2)/refval2
print(val1)
print(val2)
print(np.round(val1/val2, 2))
print('---'*5)
print('With 100% EVs')
val1 = (tables_dfs2[1.0].loc['_TimersMixed_WPcontrol_minpeak', 'BusinessAsUsual'] - refval2)/refval2
val2 = (tables_dfs2[1.0].loc['_TimersNone_noWPcontrol', 'LowHome_HighWork'] - refval2)/refval2
print(val1)
print(val2)
print(np.round(val1/val2, 2))
# +
print("The Universal Home scenario with 9pm timers increases typical peak net demand by X times more than the Low Home High Work scenario:")
val1 = (tables_dfs2[0.5].loc['_Timers9pm_noWPcontrol', 'UniversalHome'] - refval2)/refval2
val2 = (tables_dfs2[0.5].loc['_TimersNone_noWPcontrol', 'LowHome_HighWork'] - refval2)/refval2
print(val1)
print(val2)
print(np.round(val1/val2, 2))
print('---'*5)
print('With 100% EVs')
val1 = (tables_dfs2[1.0].loc['_Timers9pm_noWPcontrol', 'UniversalHome'] - refval2)/refval2
val2 = (tables_dfs2[1.0].loc['_TimersNone_noWPcontrol', 'LowHome_HighWork'] - refval2)/refval2
print(val1)
print(val2)
print(np.round(val1/val2, 2))
# -
| GridModel_GridImpact/MainPlotting/ReportedValuesInText/ReportedValues_peakchange.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PyVTK
# PyVTK is a Python library for creating files for use with ParaView or VTK toolkit.
# VTK supports the following data types:
#
# 
# ## Example1
#
# From https://github.com/pearu/pyvtk/tree/master/examples
# Define the structure
# +
from pyvtk import *
structure = PolyData(points=[[0,0,0],[1,0,0],[1,1,0],[0,1,0],
[0,0,1],[1,0,1],[1,1,1],[0,1,1]],
polygons=[[0,1,2,3],[4,5,6,7],[0,1,5,4],
[2,3,7,6],[0,4,7,3],[1,2,6,5]])
# -
# Add data to points
pointdata = PointData(\
Scalars([0,1,2,3,4,5,6,7],
name='sample_scalars'))
# Add data to cells
# + jupyter={"outputs_hidden": true}
celldata = CellData(\
Scalars([0,1,2,3,4,5],
name='cell_scalars'),
Normals([[0,0,1],[0,0,1],[0,-1,0],
[0,1,0],[-1,0,0],[1,0,0]],
name='cell_normals'),
Field('FieldData',
cellIds=[[0],[1],[2],[3],[4],[5]],
faceAttributes=[[0,1],[1,2],[2,3],[3,4],[4,5],[5,6]]))
# -
# Create VtkData object to write data to disk. This will create text based or binary vtk files.
# + jupyter={"outputs_hidden": true}
vtk = VtkData(structure,pointdata,celldata)
vtk.tofile('example1','ascii')
vtk.tofile('example1b','binary')
# -
# A vtk-file has the following structure:
# + jupyter={"outputs_hidden": true}
# %pycat example1.vtk
# -
# # Example 2
# + jupyter={"outputs_hidden": true}
vtk = VtkData(StructuredPoints([3,4,6]),
PointData(Scalars([0,0,0,0,0,0,0,0,0,0,0,0,
0,5,10,15,20,25,25,20,15,10,5,0,
0,10,20,30,40,50,50,40,30,20,10,0,
0,10,20,30,40,50,50,40,30,20,10,0,
0,5,10,15,20,25,25,20,15,10,5,0,
0,0,0,0,0,0,0,0,0,0,0,0
])))
vtk.tofile('example2')
# -
# # Example 3
# + jupyter={"outputs_hidden": true}
vtk = VtkData('example2',only_structure = 1)
def f(x,y,z):
return x*y*z
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_sp')
# -
# # Example 4
# +
pp = [(i,j,k) for k in range(6) for j in range(4) for i in range(3)]
print(pp)
vtk = VtkData(StructuredGrid([3,4,6],pp))
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_sg')
# -
# # Example 5
# + jupyter={"outputs_hidden": true}
vtk = VtkData(RectilinearGrid(range(3),range(4),range(6)))
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_rg')
# -
# # Example 6
# + jupyter={"outputs_hidden": true}
voxels = []
points = []
n = 0
for k in range(6):
for j in range(4):
for i in range(3):
points.append((i,j,k))
if not (k==5 or j==3 or i==2):
voxels.append([n,n+1,n+3,n+3+1,n+3*4,n+3*4+1,n+3*4+3,n+3*4+3+1])
n += 1
vtk = VtkData(UnstructuredGrid(points,voxel=voxels))
vtk.point_data.append(vtk.structure.Scalars(f,'x*y*z'))
vtk.tofile('example2f_usg')
# -
# # Example 7
# + jupyter={"outputs_hidden": true}
points = [[0,0,0],[1,0,0],[2,0,0],[0,1,0],[1,1,0],[2,1,0],
[0,0,1],[1,0,1],[2,0,1],[0,1,1],[1,1,1],[2,1,1],
[0,1,2],[1,1,2],[2,1,2],[0,1,3],[1,1,3],[2,1,3],
[0,1,4],[1,1,4],[2,1,4],[0,1,5],[1,1,5],[2,1,5],
[0,1,6],[1,1,6],[2,1,6]
]
vectors = [[1,0,0],[1,1,0],[0,2,0],[1,0,0],[1,1,0],[0,2,0],
[1,0,0],[1,1,0],[0,2,0],[1,0,0],[1,1,0],[0,2,0],
[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],
[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],[0,0,1],
[0,0,1],[0,0,1],[0,0,1]
]
vtk = VtkData(\
UnstructuredGrid(points,
hexahedron=[[0,1,4,3,6,7,10,9],
[1,2,5,4,7,8,11,10]],
tetra=[[6,10,9,12],
[5,11,10,14]],
polygon=[15,16,17,14,13,12],
triangle_strip=[18,15,19,16,20,17],
quad=[22,23,20,19],
triangle=[[21,22,18],
[22,19,18]],
line=[26,25],
vertex=[24]
),
PointData(Vectors(vectors),Scalars(range(27))),
'Unstructured Grid Example'
)
vtk.tofile('example3')
# +
import numpy as np
uvw_arr = np.loadtxt('uvw.dat', skiprows=2)
print(uvw_arr.shape)
# -
# !head uvw.dat
points = uvw_arr[:,0:3]
print(points.shape)
point_data = uvw_arr[:,3:6]
print(point_data.shape)
vtk = VtkData(UnstructuredGrid(point_data), PointData(Vectors(point_data)))
vtk.tofile("uvw.vtk")
| dataviz/PyVTK.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
# default_exp gradientMethod
# -
# # Gradient Based Estimation
# $\hat{\alpha_i}$: the local dictCurve estimate for the $i^{th}$ bag
#
# $\hat{\alpha_{c_i}}$: the $i^{th}$ global distCurve estimate using bootstrapped sample
#
# $w_{ji}$: the contribution of bag j to the $i^{th}$ global estimate
#
# $\tilde{\alpha_i}$: the expected global class prior given the current contribution values and local estimates for each bag
# $\tilde{\alpha_i} = \frac{w_{1i} \cdot \hat{\alpha_1} \cdot n_1 \dots w_{Ni} \cdot \hat{\alpha_N} \cdot n_N}{w_{1i} \cdot n_1 \dots w_{Ni} \cdot n_N} $
#
#
# Loss for cluster $c_i$
#
#
# $\mathcal{L}_{c_i} = \frac{1}{2}(\tilde{\alpha_i} - \hat{\alpha_{c_i}})^2$
# def gradientMethod(ds):
# alphaHat : init alphaHat for each bag
# alpha_C : get K global alpha estimates
# init W randomly
# for each iteration:
# # calcualte loss given the current values of alphaHat and w
# loss = lossFunction(w[:,1], alpha_C[1]) + ... + lossFunction(w[:,K], alpha_C[K])
# # update alphaHat
# alphaHat = alphaHat - eta * grad(loss)
# # calculate the loss give the current w and new alphaHats
# loss = lossFunction(1) + ... + lossFunction(K)
# w = w - eta * grad(loss)
# getMAE(alphaHat, alpha)
# +
# export
from tqdm.notebook import tqdm
import autograd.numpy as np
from autograd import grad
import autograd.scipy.stats as agss
import matplotlib.pyplot as plt
from multiinstance.dataset_utils import buildDataset
from multiinstance.utils import *
from multiinstance.distanceApproaches import *
from multiinstance.agglomerative_clustering import AgglomerativeClustering
from numba import set_num_threads
import scipy.stats as ss
# -
set_num_threads(20)
def bimodal():
if np.random.binomial(1,.5):
return np.random.beta(2,10)
return np.random.beta(10,3)
# export
def getAlphaHat(dsi,reps=10):
P, U = list(zip(*[dsi.getBag(int(i)) for i in range(dsi.N)]))
p = np.concatenate(P)
u = np.concatenate(U)
alphaHats,_ = getEsts(p,u,reps)
return alphaHats
# +
def initDS(ds_size=100,n_alpha_ests=50, nP=None, nU=None,
alphaDistr=lambda: np.random.uniform(0.1,.5),posMean=None, negMean=None,cov=None):
dsi = buildDataset(ds_size,alphaDistr=alphaDistr, nP=nP,
nU=nU,posMean=posMean, negMean=negMean,cov=cov)
# dsi = addTransformScores(dsi)
dsi.alphaHats,dsi.curves = getBagAlphaHats(dsi,numbootstraps=n_alpha_ests)
dsi.globalAlphaHats = getAlphaHat(dsi,reps=n_alpha_ests)
return dsi
# +
# export
def getAlphaLoss(w,n, alphaHats):
def loss(alpha):
lossVal = 0
for wi, aH in zip(w, alphaHats):
tilde = (1 / np.dot(wi,n)) * np.dot(np.multiply(alpha,wi),n)
lossVal = lossVal + .5 * np.square(aH - tilde)
return lossVal
return loss
def getWLoss(a,n, alphaHats):
def loss(w):
lossVal = 0
for wi,aH in zip(w, alphaHats):
den = (1 / np.dot(wi,n))
aXw = np.multiply(a,wi)
dot = np.dot(aXw,n)
tilde = den * dot
lossVal = lossVal + .5 * np.square(aH - tilde)
return lossVal
return loss
# -
ds = initDS(ds_size=5,n_alpha_ests=10,alphaDistr=lambda: np.random.uniform(0.25,0.99))
# export
def gradientMethod(dsi, n_epochs=100):
alphaHats = dsi.globalAlphaHats
# initialize values for gradient method
a = dsi.alphaHats.mean(1)
n = dsi.numU
w = np.random.uniform(low=0.01,high=1,size=(len(alphaHats),
n.shape[0]))
maes = [np.mean(np.abs(a - dsi.trueAlphas.flatten()))]
# Run gradient method
for i in tqdm(range(n_epochs),total=n_epochs):
alphaLossFn = getAlphaLoss(w,n,alphaHats)
alphaGrad = grad(alphaLossFn)
a = a - .025 * alphaGrad(a.flatten()).reshape(a.shape)
wLossFn = getWLoss(a,n,alphaHats)
wGrad = grad(wLossFn)
w = w - .025 * wGrad(w)
maes.append(np.mean(np.abs(a - dsi.trueAlphas.flatten())))
return maes
def g2(dsi, n_epochs=100):
globalAlphaHats = dsi.globalAlphaHats
# initialize values for gradient method
a = dsi.alphaHats
n = np.tile(dsi.numU.reshape((-1,1)), (1,a.shape[1])).flatten()
w = np.random.uniform(low=0.01, high=1,size=(len(globalAlphaHats),
n.shape[0]))
maes = [np.mean(np.abs(a.mean(1) - dsi.trueAlphas.flatten()))]
for i in tqdm(range(n_epochs), total=n_epochs):
alphaLossFn = getAlphaLoss(w,n,globalAlphaHats)
alphaGrad = grad(alphaLossFn)
a = a - alphaGrad(a.flatten()).reshape(a.shape)
wLossFn = getWLoss(a.flatten(),n,globalAlphaHats)
wGrad = grad(wLossFn)
w = w - .025 * wGrad(w)
maes.append(np.mean(np.abs(a.mean(1) - dsi.trueAlphas.flatten())))
return maes
plt.plot(g2(ds))
# +
def yangDistributionDifference(posMean, negMean, cov, p=1):
"""
Eq. (7) from :
<NAME>., <NAME>., <NAME>. et al.
Data Min Knowl Disc (2019) 33: 995.
https://doi.org/10.1007/s10618-019-00622-6
"""
sampleSize = 1000
#negSample = np.random.beta(aNeg, bNeg, sampleSize)
#posSample = np.random.beta(aPos, bPos, sampleSize)
#negPDF_neg = ss.beta.pdf(negSample,aNeg,bNeg)
#posPDF_neg = ss.beta.pdf(negSample,aPos,bPos)
#negPDF_pos = ss.beta.pdf(posSample,aNeg,bNeg)
#posPDF_pos = ss.beta.pdf(posSample,aPos,bPos)
posSample = np.random.multivariate_normal(mean=posMean, cov=cov,size=sampleSize)
negSample = np.random.multivariate_normal(mean=negMean, cov=cov,size=sampleSize)
negPDF_neg = ss.multivariate_normal.pdf(negSample,mean=negMean, cov=cov)
posPDF_neg = ss.multivariate_normal.pdf(negSample,mean=posMean,cov=cov)
negPDF_pos = ss.multivariate_normal.pdf(posSample,mean=negMean,cov=cov)
posPDF_pos = ss.multivariate_normal.pdf(posSample,mean=posMean,cov=cov)
z = np.zeros(sampleSize)
pdfDiffPos_NEG, pdfDiffNeg_NEG, pdfMax_NEG = _yangHelper(negPDF_neg, posPDF_neg, z)
pdfDiffPos_POS, pdfDiffNeg_POS, pdfMax_POS = _yangHelper(negPDF_pos, posPDF_pos, z)
return _yH2(pdfDiffNeg_NEG, negPDF_neg, pdfDiffPos_POS, posPDF_pos, posPDF_neg, negPDF_pos, pdfMax_NEG, pdfMax_POS,p,sampleSize)
def _yangHelper(negPDF,posPDF,z):
pdfDiff = negPDF - posPDF
pdfDiffNeg = np.maximum(pdfDiff, z)
minus1 = -1 * pdfDiff
pdfDiffPos = np.maximum(minus1, z)
pdfMax = np.maximum(negPDF, posPDF)
return pdfDiffPos, pdfDiffNeg, pdfMax
def _yH2(pdfDiffNeg_NEG, negPDF_NEG, pdfDiffPos_POS, posPDF_POS, posPDF_NEG, negPDF_POS, pdfMax_NEG, pdfMax_POS,p,sampleSize):
numerator1 = np.mean(pdfDiffNeg_NEG / negPDF_NEG)
numerator2 = np.mean(pdfDiffPos_POS / posPDF_POS)
sumVecs = np.power(numerator1, np.ones_like(numerator1) * p) + np.power(numerator2, np.ones_like(numerator2) * p)
dPHat = np.power(sumVecs, np.ones_like(sumVecs) * (1/p))
dTermNeg = (posPDF_NEG * 0.5) + (negPDF_NEG * 0.5)
dTermPos = (posPDF_POS * 0.5) + (negPDF_POS * 0.5)
denominator = (np.sum(pdfMax_NEG / dTermNeg) + np.sum(pdfMax_POS / dTermPos)) / (2 * sampleSize)
return dPHat / denominator
# -
for rep in tqdm(range(10),total=10,desc="reps"):
# build dataset
dsi = initDS(ds_size=25, n_alpha_ests=10)
# Run gradient method
maes = g2(dsi,n_epochs=100)
# Run agglomerative clustering
agg0 = AgglomerativeClustering(dsi, .5,use_alphas_as_scores=True)
agg0.cluster()
# plot results
fig,ax = plt.subplots(1,5,figsize=(20,4))
# Plot MAEs
ax[0].plot(maes,label="gradient")
maes2 =agg0.meanAbsErrs
ax[0].plot(maes2, label="agg")
globalMAE = np.mean(np.abs(dsi.trueAlphas - dsi.globalAlphaHats.mean()))
ax[0].hlines(globalMAE, 0,100)
ax[0].legend()
ax[1].hist(dsi.trueAlphas)
ax[1].set_title(r"$\alpha$")
ax[2].hist(dsi.numP)
ax[2].set_title("Num Positive")
ax[3].hist(dsi.numU)
ax[3].set_title("Num Unlabeled")
ax[4].hist([h[:n].sum() for h,n in zip(dsi.hiddenLabels, dsi.numU)])
ax[4].set_title("Num Unlabeled Positive")
fig.suptitle("Distr Distance: {:.4f} dim:{}".format(yangDistributionDifference(dsi.posDistMean,dsi.negDistMean,dsi.cov),
dsi.posDistMean.shape))
plt.savefig("figs/nb_09/distrDistFigs/fig_{}.pdf".format(rep),format="pdf")
plt.show()
# # Diagnosis
#
# After all, I wasn't using the same distribution for each data set but was sampling the dimension and the means of the mvn distributions randomly. The method performs well on the data sets in which the distributions are further away, but will lead to MAE values worse than that of the local estimates when the distributions have smaller distance.
| 09_Gradient_Method.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # $\delta_F$-vs-$\rho_{DM}$
# +
import h5py
import numpy as np
from scipy.ndimage import gaussian_filter as gf
data_dir = '/run/media/mahdi/HD2/Lya/LyTomo_data_new/'
import os
import importlib
import matplotlib.pyplot as plt
def get_deltaF_deltam_est(n , z=2.4, fit_deg=2, sigma=4):
"""Estimator for each mock"""
mockmap = np.fromfile(os.path.join(data_dir,'mock_maps_z'+str(z)+'/map_TNG_z'+str(z)+'_n'+str(n)+'.dat'))
deltaF_mock = np.ravel(gf(mockmap.reshape(205,205,205), sigma=sigma, mode='wrap'))
DM = np.ravel(gf(h5py.File(os.path.join(data_dir,'DM_Density_field/TNG_DM_z'+str(z)+'.hdf5'),'r')['DM/dens'][:],
sigma=sigma , mode='wrap'))
co = np.polyfit(deltaF_mock, DM, fit_deg)
return co
def get_deltaF_deltam_est_noiseless(z=2.4, fit_deg=2, sigma=4):
"""Estimator for each mock"""
truemap = h5py.File(os.path.join(data_dir,'noiseless_maps/map_TNG_true_1.0_z'+str(z)+'.hdf5'),'r')['map'][:]
deltaF_true = np.ravel(gf(truemap, sigma=sigma, mode='wrap'))
DM = np.ravel(gf(h5py.File(os.path.join(data_dir,'DM_Density_field/TNG_DM_z'+str(z)+'.hdf5'),'r')['DM/dens'][:],
sigma=sigma , mode='wrap'))
co = np.polyfit(deltaF_true, DM, fit_deg)
return co
def get_deltaF_deltam_mean_std(nmocks=20, z=2.4, fit_deg=1, sigma=4):
"""Get the mean and stdev of the estimator's parameters"""
co_all = np.empty((nmocks, int(fit_deg+1)))
for i,n in enumerate(np.arange(1,nmocks+1)):
co = np.array(get_deltaF_deltam_est(n, z=z, fit_deg=fit_deg, sigma=sigma))
co_all[i,:] = co
mean = np.median(co_all, axis=0)
std = np.std(co_all, axis=0)
for i in range(fit_deg+1):
print(str(np.around(mean[i],2))+'+-'+ str(np.around(std[i],2)))
return mean, std
# -
print(get_deltaF_deltam_mean_std(z=2.3, fit_deg=2, sigma=4))
print(get_deltaF_deltam_mean_std(z=2.4, fit_deg=2, sigma=4))
print(get_deltaF_deltam_mean_std(z=2.6, fit_deg=2, sigma=4))
print(get_deltaF_deltam_mean_std(z=2.4, fit_deg=2, sigma=2))
# Summarizing the estimator for the redshift bins : $ \rm \left(\frac{\rho_{\rm DM}}{\langle \rho_{\rm DM} \rangle}\right)^{sm} = a_0 \ {\delta^{sm}_F}\ {}^2 \ + \ a_1 \ \delta^{sm}_F + a_2 $
#
# | | $a_0$ | $a_1$ | $a_2$ |
# |----|---|---| --- |
# | z=2.3 | 20.78 $\pm$ 0.74| -6.05$\pm$ 0.07 | 0.95 $\pm$ 0.01|
# | z=2.45 | 16.78 $\pm$ 0.53| -5.62$\pm$ 0.07 | 0.96 $\pm$ 0.01|
# | z=2.6 | 15.23 $\pm$ 0.7| -5.44$\pm$ 0.06 | 0.98 $\pm$ 0.00|
#
#
#
# The relation changes with redshift. Later here and in [this notbook](https://github.com/mahdiqezlou/LyTomo-Watershed/blob/main/notebooks/M0_Mtomo.ipynb) we show this actually does not impact the masses of the watersheds, so one can ignore the redshfit evolution of $\delta_F$-$\rho_{DM}$ relation.
#
#
# Now, Write the mean and std (2d-hist) of that figure on a file :
# +
def write_mean_std_hist(z=2.4, nmocks=20):
bins=[np.linspace(-.4,.4,100), np.linspace(0,3.5,200)]
htot1 = np.zeros((nmocks, bins[0].size-1, bins[0].size-1))
htot2 = np.zeros((nmocks, bins[0].size-1, bins[1].size-1))
DM = np.ravel(gf(h5py.File(os.path.join(data_dir,'DM_Density_field/TNG_DM_z'+str(z)+'.hdf5'),'r')['DM/dens'][:],
sigma=4 , mode='wrap'))
for i,n in enumerate(np.arange(1,nmocks+1)):
mockmap = np.fromfile(os.path.join(data_dir,'mock_maps_z'+str(z)+'/map_TNG_z'+str(z)+'_n'+str(n)+'.dat'))
deltaF_mock = np.ravel(gf(mockmap.reshape(205,205,205), 4, mode='wrap'))
with h5py.File(os.path.join(data_dir,'noiseless_maps/map_TNG_true_1.0_z'+str(z)+'.hdf5'),'r') as ftrue :
deltaF_true = np.ravel(gf(ftrue['map'][:], 4, mode='wrap'))
deltaF_true *= np.std(deltaF_mock)/np.std(deltaF_true)
h1,_, _ = np.histogram2d(deltaF_mock, deltaF_true, bins=[bins[0],bins[0]], density=True)
h2, _, _= np.histogram2d(deltaF_mock, DM, bins=bins, density=True)
htot1[i,:,:] = h1
htot2[i,:,:] = h2
with h5py.File(os.path.join(data_dir,'plotting_data/df_mock_true_dm_z'+str(z)+'.hdf5'),'w') as fw:
fw['df_mock_true/median'] = np.median(htot1, axis=0)
fw['df_mock_true/std'] = np.std(htot1, axis=0)
fw['df_dm/median'] = np.median(htot2, axis=0)
fw['df_dm/std'] = np.std(htot2, axis=0)
def write_hist_noiseless(z=2.4):
bins=[np.linspace(-.4,.4,100), np.linspace(0,3.5,200)]
noiseless_map = h5py.File(os.path.join(data_dir,'noiseless_maps/map_TNG_true_1.0_z'+str(z)+'.hdf5'),'r')['map'][:]
deltaF_true = np.ravel(gf(noiseless_map, 4, mode='wrap'))
DM = np.ravel(gf(h5py.File(os.path.join(data_dir,'DM_Density_field/TNG_DM_z'+str(z)+'.hdf5'),'r')['DM/dens'][:], 4 , mode='wrap'))
h,_, _ = np.histogram2d(deltaF_true, DM, bins=[bins[0],bins[1]], density=True)
with h5py.File(os.path.join(data_dir,'plotting_data/df_dm_true_z'+str(z)+'.hdf5'),'w') as fw:
fw['df_dm'] = h
# -
write_hist_noiseless(z=2.4)
write_mean_std_hist(z=2.4)
def plot_for_deltaF_deltam(data_dir):
from lytomo_watershed import plot
importlib.reload(plot)
dfdm = plot.Df_dm(data_dir=data_dir)
left, width = .2, .7
bottom2, height2 = 0.5, 0.4
bottom1, height1 = .1, .4
rect_scatter1 = [left, bottom1, width, height1]
rect_scatter2 = [left, bottom2, width, height2]
fig = plt.figure(figsize=(8, 16))
ax = []
ax.append(fig.add_axes(rect_scatter1))
ax.append(fig.add_axes(rect_scatter2))
ax[1].tick_params(axis="x", labelbottom=False)
dfdm.deltaF_true_mock(fig, ax, z=2.4)
x = np.arange(-0.4,0.4,0.01)
#ax[1].plot(x, np.polyval([14.56817442, -4.96863497, 0.96664919], x), label='z=2.3', color='C2')
ax[1].plot(x, np.polyval([16.78018621, -5.61535006, 0.96007353],x), label='Power-law estimator', color='C1')
#ax[1].plot(x, np.polyval([10.1530904 , -4.37489626, 0.99182931], x), label='z=2.6', ls='--', color='C3')
ax[1].legend(loc=(0,-.05), facecolor='w')
fig.savefig('../figures/deltam_deltaF_z2.4.pdf', transparent=False)
plot_for_deltaF_deltam(data_dir)
# # $M_{DM}$-vs-$M_{tomo}$:
#
# +
import h5py
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage import gaussian_filter
import importlib
plt.style.use('./paper.mystyle')
def find_optimal_offset(ax, Mtomo, MDM, ls, label):
offset_range = np.arange(0,1,0.0001)
err = np.array([])
for offset in offset_range:
dev = MDM - (Mtomo+offset)
err = np.append(err, np.sqrt(np.mean(dev*dev)))
ind = np.where(err == np.min(err))
print('Minimum error is at offset = ', offset_range[ind])
ax.plot(offset_range, err, ls=ls, label=label)
# -
def load_watersheds(n, z, th, lc, data_dir):
with h5py.File(os.path.join(data_dir,'watersheds_z'+str(z)+'/mocks/n'+str(n)
+'/labeled_map_TNG_z'+str(z)+'_n'+str(n)
+'_sigma4_th'+str(np.around(th,2)).ljust(4,'0')
+'_lc'+str(np.around(lc,2)).ljust(4,'0')+'.hdf5'),'r') as f:
lmap_mock = f['map'][:]
peaks_mock = h5py.File(os.path.join(data_dir,'watersheds_z'+str(z)+'/mocks/n'
+str(n)+'/peaks_TNG_z'+str(z)+'_n'+str(n)
+'_sigma4_th'+str(np.around(th,2)).ljust(4,'0')
+'_lc'+str(np.around(lc,2)).ljust(4,'0')+'.hdf5'), 'r')
with h5py.File(os.path.join(data_dir,'watersheds_z'+str(z)+'/noiseless/labeled_map_TNG_true_z'
+str(z)+'_n1_sigma4_th'+str(np.around(th,2)).ljust(4,'0')+'_lc'
+str(np.around(lc,2)).ljust(4,'0')+'.hdf5'),'r') as f :
lmap_true = f['map'][:]
peaks_true = h5py.File(os.path.join(data_dir,'watersheds_z'+str(z)+'/noiseless/peaks_TNG_true_z'
+str(z)+'_n1_sigma4_th'+str(np.around(th,2)).ljust(4,'0')
+'_lc'+str(np.around(lc,2)).ljust(4,'0')+'.hdf5'),'r')
return lmap_mock, peaks_mock, lmap_true, peaks_true
def write_data(z_accurate = 2.4442257045541464, z=2.4, th=2.35, lc=2.00, offset=0.1414):
"""Writes the Mtomo and Dm masses on a file since they are slow to produce"""
from lytomo_watershed import minima
importlib.reload(minima)
fname = (os.path.join(data_dir,'plotting_data/Mtomo_MDM_z'+str(z)+'_th'+str(th).ljust(4,'0')
+'_lc'+str(lc).ljust(4,'0')+'.hdf5'))
DM_file = os.path.join(data_dir,'DM_Density_field/TNG_DM_z'+str(z)+'.hdf5')
with h5py.File(fname,'w') as fw:
for n in range(1,21):
lmap_mock, peaks_mock, lmap_true, peaks_true = load_watersheds(n=n,z=z,th=th,lc=lc, data_dir=data_dir)
Mtomo_mock, Mtomo_true, MDM_mock, MDM_true, MDM_mock_true, id_max_overlap = minima.get_Mtomo_MDM(z_accurate=z_accurate, lmap_mock=lmap_mock,
lmap_true=lmap_true, peaks_mock=peaks_mock,
peaks_true=peaks_true, DM_file=DM_file)
fw[str(n)+'/Mtomo_mock'] = Mtomo_mock[:]+offset
fw[str(n)+'/MDM_mock'] = MDM_mock[:]
fw[str(n)+'/Mtomo_mock_overlap'] = Mtomo_mock[:][id_max_overlap['mock'][:].astype(int)-1]+offset
fw[str(n)+'/MDM_true_overlap'] = MDM_mock_true
fw[str(n)+'/id_max_overlap/mock'] = id_max_overlap['mock'][:]
fw[str(n)+'/id_max_overlap/true'] = id_max_overlap['true'][:]
def plot_for_paper(n, z, th, lc, co=(0.40,14.50), vmin=0.01, vmax=1e2):
"""plot the M_DM vs M_tomo for paper"""
import os
from lytomo_watershed import plot
importlib.reload(plot)
data_dir = '/run/media/mahdi/HD2/Lya/LyTomo_data_new/'
mass_file = os.path.join(data_dir,'plotting_data/Mtomo_MDM_z'+str(z)
+'_th'+str(th).ljust(4,'0')+'_lc'+str(lc).ljust(4,'0')
+'.hdf5')
Mtomo_MDM = plot.Mtomo_MDM(mass_file=mass_file)
signif = h5py.File(os.path.join(data_dir,'watersheds_z'+str(z)+'/mocks/n'
+str(n)+'/peaks_TNG_z'+str(z)+'_n'+str(n)
+'_sigma4_th'+str(np.around(th,2)).ljust(4,'0')
+'_lc'+str(np.around(lc,2)).ljust(4,'0')
+'.hdf5'), 'r')['signif'][:]
f = h5py.File(mass_file,'r')
fig, ax = plt.subplots(1,2, figsize=(18,9))
Mtomo_MDM.plot_Mtomo_MDM(fig, ax[0], f[str(n)+'/Mtomo_mock'][:], f[str(n)+'/MDM_mock'][:],
z=z, th= th, lc=lc, signif=signif, mass_file= mass_file,
xlabel=r'$\rm log [ M_{tomo} ]$', ylabel=r'$\rm log [ M_{DM, mock}]$',
legend=False, plot_kde=True, first_plot=True, vmin=vmin, vmax=vmax)
#signif = h5py.File('./thresh/n'+str(n)+'/peaks_TNG_z'+str(z)+'_n'+str(n)+'_sigma4_th'+str(np.around(th,2)).ljust(4,'0')+'_lc'+str(np.around(lc,2)).ljust(4,'0')+'.hdf5', 'r')['signif'][:]
signif = signif[f[str(n)+'/id_max_overlap/mock'][:].astype(int)-1]
Mtomo_MDM.plot_Mtomo_MDM(fig, ax[1], f[str(n)+'/Mtomo_mock_overlap'][:],
f[str(n)+'/MDM_true_overlap'][:], z=z, th=th,
lc=lc, signif=signif, mass_file=mass_file, co=co, legend=True,
plot_kde=True, first_plot=False)
f.close()
plt.tight_layout(pad=0)
return fig
# ## For $z=2.3, 2.4, 2.6$ and $\nu = -2.0$, $\kappa = -2.35$ :
#
# ### Find optimal offset in $M_{tomo}$:
# First, we need to find the best offset introduced in equation 7 in [the paper](https://arxiv.org/pdf/2112.03930.pdf).
#
# Find the optimal offset needed to get the Mtomo close to MDM within same waterhseds in mock maps.
def get_offset():
import os
from lytomo_watershed import minima
data_dir = '/run/media/mahdi/HD2/Lya/LyTomo_data_new/'
z, z_acc, ls= [2.4], [2.4442257045541464], ['--', 'solid', 'dotted']
fig, ax = plt.subplots()
ax.set_xlabel('offset (dex)')
ax.set_ylabel(r'$ rms \ [ M_{tomo, raw} - M_{DM, mock}]$')
for i in range(1):
DM = h5py.File(os.path.join(data_dir,'DM_Density_field/TNG_DM_z'
+str(z[i])+'.hdf5'),'r')['DM/dens'][:]
lmap_mock, peaks_mock, lmap_true, peaks_true = load_watersheds(z=z[i], n=1, th=2.35, lc=2.00)
Mtomo_mock, _, MDM_mock, _, _, _ = minima.get_Mtomo_MDM(z_accurate=z_acc[i], lmap_mock=lmap_mock,
lmap_true=lmap_true, peaks_mock=peaks_mock,
peaks_true=peaks_true, DM=DM)
find_optimal_offset(ax, Mtomo_mock, MDM_mock, ls=ls[i], label='z='+str(z[i]))
ax.legend()
get_offset()
# - The offset (0.142 dex) is insensitive to the variations in $\delta_F$-vs-$\rho_{DM}$ estimator.
#
# Write the data on file for later ease. It stores the $M_{tomo}$ and $M_{DM}$ for 20 of mock maps.
#
# **Note** : Each line in the cell below takes ~ 3 minutes to run, so it is a bit slow. \
# The data already exists in `./LyTomo_data/plotting_data/`, so if you wish, you can skip running the next cell.
write_data(z=2.4, z_accurate=2.4442257045541464, th=2.35, lc=2.00, offset=0.142)
# Below we find the mean power-law estimators and comapre the $M_{tomo}$ with $M_{DM}$.
#
# ### z = 2.4:
#
# Mean estimator:
def get_the_fit(z):
from lytomo_watershed import plot
data_dir = '/run/media/mahdi/HD2/Lya/LyTomo_data_new/'
importlib.reload(plot)
mass_file = os.path.join(data_dir,'plotting_data/Mtomo_MDM_z'+str(z)+'_th2.35_lc2.00.hdf5')
Mtomo_plot = plot.Mtomo_MDM(mass_file=mass_file)
#ntrain, co = plot_different_fits(mass_file = os.path.join(data_dir,'plotting_data/Mtomo_MDM_z2.4_th2.35_lc2.00.hdf5'), z=2.4, th=2.35, lc=2.00)
ntrain, co = Mtomo_plot.plot_different_fits(z=z, th=2.35, lc=2.00)
ntest = np.arange(1,21)[np.in1d(np.arange(1,21), ntrain, invert=True)]
print(Mtomo_plot.test_fit(co=co, ntest=ntest, z=z, th=2.35, lc=2.00))
get_the_fit(z=2.4)
fig = plot_for_paper(n=1, z=2.4, th=2.35, lc=2.00, co=(0.40,14.54), vmin=0.2, vmax=1)
fig.savefig('../figures/Mtomo_mock_MDM_true_kde.pdf')
# ### z=2.3 :
#
# Mean estimator:
#
# Find the mean power-law estimator among the 20 mock maps.
write_data(z=2.3, z_accurate=2.3161107439568918, th=2.35, lc=2.00, offset=0.142)
get_the_fit(z=2.3)
# ### z=2.6:
# Mean estimator :
write_data(z=2.6, z_accurate=2.5772902716018935, th=2.35, lc=2.00, offset=0.142)
get_the_fit(z=2.6)
fig = plot_for_paper(n=1, z=2.6, th=2.35, lc=2.00, co=(0.40,14.54), vmin=0.2, vmax=1)
# |z | # Watersheds | slope | intercept |
# |--|--|--| --|
# |2.3| | 0.43+-0.09 | 14.59+-0.06|
# |2.4| | 0.39+-0.07 | 14.60+-0.05|
# |2.6| | 0.34+-0.06 | 14.63+-0.04|
# - We found the estimators for $M_{DM}$-vs-$M_{tomo}$ realtion and the scatter around them.
#
# - Our estimator and the scatter around it is not changed whether we use the same $\delta_F$-vs-$\rho_{DM}$ estimator or not. So, we can use the mid-redshift parameters for the entire map.
#
# In [this notebook](https://github.com/mahdiqezlou/LyTomo-Watershed/blob/main/notebooks/M0_Mtomo.ipynb), we show the estimators for $M_{desc}$-vs-$M_{tomo}$ stays the same too.
| notebooks/MDM_Mtomo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: flymc
# language: python
# name: flymc
# ---
# + tags=[]
import numpy as np
import emcee
from matplotlib import pyplot as plt
import corner
# %load_ext autoreload
# %autoreload 2
# + tags=[]
mean = 3
sd = 2
data_size = 100
ndim = 2
nwalkers = 10
gaussian_data = np.random.normal(mean, sd, size=data_size)
pseudo_log_prob_per = lambda d, p: 0
bound_prob = lambda p: (np.mean(gaussian_data) - p[0]) ** 2 / (2 * p[1] ** 2)
def log_prob(param):
# yay for exponential families
if param[1] < 0:
return -1e6
return -(np.mean(gaussian_data) - param[0]) ** 2 / (2 * param[1] ** 2)
def proposal(params, rng=np.random.default_rng):
candidate = np.array([np.random.multivariate_normal(param, np.diag([0.01, 0.01])) for param in params])
diff_log_prob = np.array([
log_prob(cand) - log_prob(param) for cand, param in zip(candidate, params)
])
return candidate, diff_log_prob
p0 = np.random.randn(nwalkers, ndim)
mh_sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, moves=[emcee.moves.MHMove(proposal)])
_ = mh_sampler.run_mcmc(p0, 50000, progress='notebook')
# -
for i in range(nwalkers):
plt.plot(mh_sampler.chain[i,:,0])
_ = corner.corner(mh_sampler.flatchain)
from IPython.display import display, Math
labels = ['mean', 'sd']
best_params = np.empty(ndim,)
for i in range(ndim):
mcmc = np.percentile(mh_sampler.get_chain(thin=15, flat=True)[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.3f}_{{-{1:.3f}}}^{{{2:.3f}}}"
txt = txt.format(mcmc[1], q[0], q[1], labels[i])
best_params[i] = mcmc[1]
display(Math(txt))
# +
firefly_move = emcee.moves.FireflyMove(
datapoints=gaussian_data,
pseudo_log_prob_per=pseudo_log_prob_per,
bound_prob=bound_prob,
proposal_function=proposal,
nwalkers=nwalkers,
ndim=ndim
)
firefly_sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, moves=[firefly_move])
_ = firefly_sampler.run_mcmc(p0, 1000, progress='notebook')
# -
_ = corner.corner(firefly_sampler.flatchain)
from scipy.special import expit
theta = np.array([1, 2, 3])
X = np.random.randn(100, 3)
y = ((theta @ X.T > 0).astype(int) * 2 - 1)
dataset = np.vstack((X.T, y.reshape(1,100))).T
dataset[:,-1] @ dataset[:,:-1] @ theta
# +
# first let's do normal MCMC
def log_prob(param):
if not all(np.isfinite(param)):
return -np.inf
logistic_factors = dataset[:,-1] @ dataset[:,:-1] @ param
probs = -expit(logistic_factors)
if np.isnan(probs):
return -np.inf
return probs
nwalkers = 2 * len(theta)
ndim = len(theta)
logistic_sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob)
p0 = np.random.randn(nwalkers, ndim)
_ = logistic_sampler.run_mcmc(p0, 10000, progress='notebook')
# +
squig = 1.5
a = (-1 / (4 * squig)) * ((np.exp(squig) - 1)/(np.exp(squig) + 1))
b = 1/2
c = -a * squig ** 2 + (squig / 2) - np.log(np.exp(squig) + 1)
def pseudo_log_prob_per(datapoint, param):
x = datapoint[-1] * param @ datapoint[:-1]
return 1 - np.exp(a * x ** 2 + b * x + c) * (1 + np.exp(-x))
# -
| notebooks/basic_emcee_prototyping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ** This file gives a brief overview of the capabilities of the code. **
#
# * The codes fit red arm spectra from GALAH.
#
# * Since the codes are meant to be clean and minimal, it should not be hard to tweak the source codes for other purposes (say fitting the blue arm spectrum).
#
# * The code fit for basic stellar parameters (Teff, logg, [Fe/H], [$\alpha$/Fe]), the broadening $v_{\rm broad}$, radial velocity, and continuum, to all spectral orders, simultaneously.
#
# * Note that we does not assume any spectral mask here. Due to the imperfectness of Kurucz models, there will be non-negligible systematics. To mitigate that and impose your favorite spectral mask, simply set spectrum_err to large values (e.g., 999) to wavelength regions that you want mask out.
# +
# %matplotlib inline
# import packages
import numpy as np
from scipy.optimize import curve_fit
from scipy import interpolate
from scipy import signal
from scipy.stats import norm
import time
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib import gridspec
from cycler import cycler
# import The Payne (https://github.com/tingyuansen/The_Payne)
from Payne4GALAH import spectral_model
from Payne4GALAH import utils
from Payne4GALAH import fitting
from Payne4GALAH.read_spectrum import read_carpy_fits
# +
# define plot properties
import matplotlib.cm as cm
from matplotlib import rcParams
from matplotlib import rc
from mpl_toolkits.axes_grid1 import make_axes_locatable
def rgb(r,g,b):
return (float(r)/256.,float(g)/256.,float(b)/256.)
cb2 = [rgb(31,120,180), rgb(255,127,0), rgb(51,160,44), rgb(227,26,28), \
rgb(10,10,10), rgb(253,191,111), rgb(178,223,138), rgb(251,154,153)]
rcParams['figure.figsize'] = (11,7.5)
rcParams['figure.dpi'] = 300
rcParams['lines.linewidth'] = 1
rcParams['axes.prop_cycle'] = cycler('color', cb2)
rcParams['axes.facecolor'] = 'white'
rcParams['axes.grid'] = False
rcParams['patch.facecolor'] = cb2[0]
rcParams['patch.edgecolor'] = 'white'
rcParams['font.family'] = 'Bitstream Vera Sans'
rcParams['font.size'] = 25
rcParams['font.weight'] = 300
# -
# > Restore The Payne emulator.
# assuming Kurucz models
NN_coeffs, wavelength_payne = utils.read_in_neural_network()
w_array_0, w_array_1, w_array_2, b_array_0, b_array_1, b_array_2, x_min, x_max = NN_coeffs
# > Read in GALAH spectra.
# +
# an example of a GALAH spectrum to be fitted
wavelength, spectrum, spectrum_err = utils.read_in_example()
# or restore your own here
#read_path = "star-109red_multi.fits"
#wavelength, spectrum, spectrum_err = read_carpy_fits(read_path)
#-----------------------------------------------------------------------------------
# restore a default hot star spectrum to determine telluric features
wavelength_blaze, spectrum_blaze, spectrum_err_blaze = utils.read_in_blaze_spectrum()
# or restore your own here
#read_path = "Hot_Star_HR9087.fits"
#wavelength_blaze, spectrum_blaze, spectrum_err_blaze = read_carpy_fits(read_path)
#-----------------------------------------------------------------------------------
# match the order, some times reduction can drop some of the orders for low S/N data
dist = np.abs(wavelength[:, np.newaxis] - wavelength_blaze)
potentialClosest = dist.argmin(axis=1)[:,0]
wavelength_blaze = wavelength_blaze[potentialClosest,:]
spectrum_blaze = spectrum_blaze[potentialClosest,:]
spectrum_err_blaze = spectrum_err_blaze[potentialClosest,:]
# -
# > Massaging the spectra into a digestable format.
# +
# cull nonsensible values
spectrum = np.abs(spectrum)
spectrum_blaze = np.abs(spectrum_blaze)
# rescale the spectra by its median so it has a more reasonable y-range
spectrum, spectrum_err = utils.scale_spectrum_by_median(spectrum, spectrum_err)
spectrum_blaze, spectrum_err_blaze = utils.scale_spectrum_by_median(spectrum_blaze, spectrum_err_blaze)
# eliminate zero values in the blaze function to avoid dividing with zeros
# the truncation is quite aggresive, can be improved if needed
ind_valid = np.min(np.abs(spectrum_blaze), axis=0) != 0
spectrum_blaze = spectrum_blaze[:,ind_valid]
spectrum_err_blaze = spectrum_err_blaze[:,ind_valid]
wavelength_blaze = wavelength_blaze[:,ind_valid]
# match the wavelength (blaze -> spectrum)
spectrum_blaze, wavelength_blaze = utils.match_blaze_to_spectrum(wavelength, spectrum, wavelength_blaze, spectrum_blaze)
# use the blaze to determine telluric region
smooth_length = 30 # number of pixel in a block that we use to search for telluric features
threshold = 0.9
spectrum_err = utils.mask_telluric_region(spectrum_err, spectrum_blaze, smooth_length=30, threshold=0.9)
# -
# > Fit the spectrum.
# +
# the range of RV that we will search (in the unit of 100 km/s)
# expand/refine the range of RV if the fit is stuck in a local minimum
RV_array=np.linspace(-2,2.,21)
# set boundaries for the fit [Teff [1000K], logg, Fe/H, Alpha/Fe, vbroad, RV [100 km/s]]
bounds = None
# teff_min, teff_max = x_min[0], x_max[0]
# logg_min, logg_max = x_min[1], x_max[1]
# feh_min, feh_max = x_min[2], x_max[2]
# alphafe_min, alphafe_max = x_min[3], x_max[3]
# vbroad_min, vbroad_max = 0.1, 10.
# RV_min, RV_max = -2., 2.
# bounds = np.zeros((2,6))
# bounds[0,0] = (teff_min - x_min[0])/(x_max[0]-x_min[0]) - 0.5
# bounds[1,0] = (teff_max - x_min[0])/(x_max[0]-x_min[0]) - 0.5
# bounds[0,1] = (logg_min - x_min[1])/(x_max[1]-x_min[1]) - 0.5
# bounds[1,1] = (logg_max - x_min[1])/(x_max[1]-x_min[1]) - 0.5
# bounds[0,2] = (feh_min - x_min[2])/(x_max[2]-x_min[2]) - 0.5
# bounds[1,2] = (feh_max - x_min[2])/(x_max[2]-x_min[2]) - 0.5
# bounds[0,3] = (alphafe_min - x_min[3])/(x_max[3]-x_min[3]) - 0.5
# bounds[1,3] = (alphafe_max - x_min[3])/(x_max[3]-x_min[3]) - 0.5
# bounds[0,-2] = vbroad_min
# bounds[1,-2] = vbroad_max
# bounds[0,-1] = RV_min
# bounds[1,-1] = RV_max
# perfort the fit
start_time = time.time()
popt_best, model_spec_best, chi_square = fitting.fit_global(spectrum, spectrum_err, spectrum_blaze, wavelength,\
NN_coeffs, wavelength_payne, RV_array=RV_array,\
polynomial_order=6, bounds_set=bounds)
print('Run Time : ', time.time()-start_time, ' s')
# save the results
np.savez("popt_best.npz",\
popt_best=popt_best,\
model_spec_best=model_spec_best,\
chi_square=chi_square)
# print the best fit parameters
popt_best[:4] = (popt_best[:4] + 0.5)*(x_max-x_min) + x_min
popt_best[0] = popt_best[0]*1000.
print("[Teff [K], logg, Fe/H, Alpha/Fe] = ",\
int(popt_best[0]*1.)/1.,\
int(popt_best[1]*100.)/100.,\
int(popt_best[2]*100.)/100.,\
int(popt_best[3]*100.)/100.)
print("vbroad [km/s] = ", int(popt_best[-2]*10.)/10.)
print("RV [km/s] = ", int(popt_best[-1]*1000.)/10.)
print("Chi square = ", chi_square)
# -
# > Plot the fits.
#
# The telluric region is shaded in gray.
#
# Blue is the observed spectrum, orange is the prediction +- the observation uncertainties.
# +
# make plot for individual order
for k in range(wavelength.shape[0]):
fig = plt.figure(figsize=[18,20]);
ax = fig.add_subplot(111)
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.tick_params(labelcolor='w', top='off', bottom='off', left='off', right='off')
#----------------------------------------------------------------------
# zooming in the wavelength by plotting in a few panels
for i in range(5):
# wavelength range
wavelength_min = np.min(wavelength[k,:])-10.
wavelength_max = np.max(wavelength[k,:])+10.
wave_period = (wavelength_max-wavelength_min)/5.
# the yaxis range
spec_min = np.min(spectrum[k,:])
spec_max = np.max(spectrum[k,:])
ax = fig.add_subplot(5,1,i+1)
plt.xlim([wavelength_min+wave_period*(i),wavelength_min+wave_period*(i+1)])
plt.ylim([spec_min-0.2,spec_max+0.2])
# observe spectrum
plt.plot(wavelength[k,:], spectrum[k,:], lw=2, label="GALAH", color=cb2[0])
# best prediction
plt.plot(wavelength[k,:], model_spec_best[k,:], label="Kurucz", lw=2, color=cb2[1])
# plotting errors
plt.fill_between(wavelength[k,:], model_spec_best[k,:]-spectrum_err[k,:],\
model_spec_best[k,:]+spectrum_err[k,:], alpha=0.5, color=cb2[1])
# shade the telluric region in gray
telluric_region = np.where(spectrum_err[k,:] == 999.)[0]
start_telluric = np.where(np.diff(telluric_region) != 1)[0] ## find the blocks
start_telluric = np.concatenate([[0], start_telluric+1, [telluric_region.size-1]])
for m in range(start_telluric.size-1):
telluric_block = wavelength[k,telluric_region[start_telluric[m]:start_telluric[m+1]]]
num_telluric = telluric_block.size
plt.fill_between(telluric_block, np.ones(num_telluric)*-10., np.ones(num_telluric)*10.,\
alpha=0.5, color="gray")
#----------------------------------------------------------------------
# add axis and legend
plt.xlabel("Wavelength [A]")
plt.legend(loc="lower right", fontsize=28, frameon=False,\
borderpad=0.05, labelspacing=0.1)
# save figure
plt.tight_layout()
plt.savefig("Order_" +str(k+1) + ".png")
plt.close()
# -
| tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Notebook generation
# This module contains the scripts and API to auto-generate or update a documentation notebook skeleton from a fastai module (e.g. - fastai.*) or existing documentation file (e.g. - docs_src/*.ipynb). It is not expected you'd use this skeleton as your final docs - you should add markdown, examples, etc to it. The skeleton just has a minimal list of exported symbols.
#
# [`tools/build-docs`](https://github.com/fastai/fastai/blob/master/tools/build-docs) contains a command line tool that transforms a given module into a notebook skeleton. It's essentially a wrapper around [`gen_notebooks.update_notebooks`](/gen_doc.gen_notebooks.html#update_notebooks). For usage around the command line tool, please follow instructions at [`gen_doc_main#updating-notebooks`](/gen_doc_main.html#updating-notebooks).
#
# Alternatively, you can access the same functionality through the module API, documented below.
#
# **Important note:** The notebooks automatically generated or updated need to be trusted before you can see the results in the output cells. To trust a notebook, click on File, then Trust notebook.
#
# This module also contains the scripts and API to convert the documentation notebooks into HTML, which is the format used for the final documentation site.
# + hide_input=true
from fastai import gen_doc
from fastai.gen_doc import nbdoc
from fastai.gen_doc.nbdoc import *
from fastai.gen_doc.gen_notebooks import *
# -
# ## Installation
# This package requires:
# - [nbconvert](https://github.com/jupyter/nbconvert): conda install nbconvert
# - [nb_extensions](https://github.com/ipython-contrib/jupyter_contrib_nbextensions): conda install -c conda-forge jupyter_contrib_nbextensions
# Once nbextensions is installed, your home page of Jupyter Notebook will look like this:
#
# 
# Click on the Nbextensions tab then make sure the hide inputs extension is activated:
#
# 
# As its name suggests, this will allow you to hide input cells and only show their results.
#
# There is also the `Hide Input all` extension, but don't use it, since it toggles all inputs on/off and once executed it'll be very difficult to restore the notebook to its original state where some inputs are supposed to be hidden and some are not.
# ## Convert modules into notebook skeleton
# The first (optional) step is to create a notebook "skeleton" - i.e. a notebook containing all the classes, methods, functions, and other symbols you wish to document. You can create this manually if you prefer, however using the automatic approach can save you some time and ensure you don't miss anything.
#
# For the initial skeleton, use [`create_module_page`](/gen_doc.gen_notebooks.html#create_module_page), which creates a new module from scratch. To update it later with any newly-added symbols, use [`update_module_page`](/gen_doc.gen_notebooks.html#update_module_page).
# + hide_input=true
show_doc(create_module_page, arg_comments={
'mod': 'the module',
'dest_path': 'the folder in which to generate the notebook',
'force': 'if False, will raise an exception if the notebook is already present'})
# -
# Equivalent [CLI](/gen_doc_main.html#creating-a-new-documentation-notebook-from-existing-module):
# ```bash
# tools/build-docs fastai.subpackage.module
# ```
# + hide_input=true
show_doc(link_nb)
# + hide_input=true
show_doc(update_module_page, arg_comments={
'mod': 'the module',
'dest_path': 'the folder in which to generate the notebook'})
# -
# All the cells added by a user are conserved, only the cells of new symbols (aka that weren't documented before) will be inserted at the end. You can then move them to wherever you like in the notebook. For instance, to update this module's documentation, simply run:
#
# ```
# update_module_page(gen_doc.gen_notebooks, '.')
# ```
#
# You can also generate and update *all* modules in a package using [`update_notebooks`](/gen_doc.gen_notebooks.html#update_notebooks).
# Equivalent [CLI](/gen_doc_main.html#updating-an-existing-functionclass):
# ```bash
# tools/build-docs docs_src/gen_doc.gen_notebooks.ipynb
# ```
# ### Updating module metadata
# Jekyll pulls the documentation title, summary, and keywords from the metadata of each notebook.
# Notebook metadata structure looks like this: `'metadata': { 'jekyll': {...} }`
#
# To update metadata of these notebooks, run `generate_missing_metadata('.')`. Then open the notebook `jekyll_metadata.ipynb` to change the metadata.
# + hide_input=true
show_doc(generate_missing_metadata)
# + hide_input=true
show_doc(update_nb_metadata)
# -
# ### Updating all module docs
# + hide_input=true
show_doc(update_notebooks)
# -
# As a convenience method, this can update all notebooks. This snippet does the whole lot for you:
#
# ```python
# update_notebooks('docs_src', update_html=False, update_nb=True):
# ```
#
# This will update all ipynb documentation notebooks specified under source_path
# ## Add documentation
# The automatically generated module will only contain the table of contents and the doc string of the functions and classes in your module (or the ones you picked with \_\_all\_\_). You should add more prose to them in markdown cells, or examples of uses inside the notebook.
# At any time, if you don't want the input of a code cell to figure in the final result, you can use the little button in your tool bar to hide it.
#
# 
# The same button can show you the hidden input from a cell. This used in conjunction with the helper functions from [nbdoc](gen_doc.nbdoc.ipynb) should allow you to easily add any content you need.
# ## Convert notebook to html
# Once you're finished, don't forget to properly save your notebook, then you can either convert all the notebooks together with the script:
# ```
# python -m convert2html dir
# ```
# - **dir** is the directory where all your notebooks are stored.
#
# If you prefer to do this in a notebook, you can simply type:
#
# ```python
# from fastai.gen_doc.convert2html import convert_nb
# convert_nb('gen_doc.gen_notebooks.ipynb', '../docs')
# ```
#
# For more information see the [documentation of convert2html](/gen_doc.convert2html.html).
# ## Undocumented Methods - Methods moved below this line will intentionally be hidden
| docs_src/gen_doc.gen_notebooks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from wordlist import secretWords
# +
# Standard NLP stuff to do word -> vector.
import numpy as np
import torch
from transformers import AutoTokenizer, AutoModel
def get_word_idx(sent: str, word: str):
return sent.split(" ").index(word)
def get_hidden_states(encoded, token_ids_word, model, layers):
"""Push input IDs through model. Stack and sum `layers` (last four by default).
Select only those subword token outputs that belong to our word of interest
and average them."""
with torch.no_grad():
output = model(**encoded)
# Get all hidden states
states = output.hidden_states
# Stack and sum all requested layers
output = torch.stack([states[i] for i in layers]).sum(0).squeeze()
# Only select the tokens that constitute the requested word
word_tokens_output = output[token_ids_word]
return word_tokens_output.mean(dim=0)
def get_word_vector(sent, idx, tokenizer, model, layers):
"""Get a word vector by first tokenizing the input sentence, getting all token idxs
that make up the word of interest, and then `get_hidden_states`."""
encoded = tokenizer.encode_plus(sent, return_tensors="pt")
# get all token idxs that belong to the word of interest
token_ids_word = np.where(np.array(encoded.word_ids()) == idx)
return get_hidden_states(encoded, token_ids_word, model, layers)
layers = [-4, -3, -2, -1]
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
model = AutoModel.from_pretrained("bert-base-cased", output_hidden_states=True)
# +
## This is the part I wrote
def vec(word, sentence):
idx = get_word_idx(sentence, word)
word_embedding = get_word_vector(sentence, idx, tokenizer, model, layers)
return word_embedding
from scipy.spatial.distance import cosine as cos_dist
# This is my closest guess so far. I want to find words in the word list with vectors near this word.
# The sentence is based on my other guesses.
sent = "A {} who studies at the university"
target_vec = vec('scholar', sent.format('scholar'))
words = []
for w in sorted(secretWords):
w_vec = vec(w, sent.format(w))
print(w, w_vec[:3])
words.append((w, cos_dist(target_vec, w_vec)))
# -
# let's see the top 20
words = list(sorted(words, key=lambda x: x[1]))
for w in words[:20]:
print(w)
# +
# the answer turned out to be 'historian'
# -
| semantle-helper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
def rect_integral(f, a, b, num):
segment = np.linspace(a, b, num)
res = [f((segment[0] + segment[1]) / 2) * (segment[1] - segment[0])]
for i in range(1, num - 1):
res += [res[i - 1] + f((segment[i + 1] + segment[i]) / 2) * (segment[i + 1] - segment[i])]
return res
def trap_integral(f, a, b, num):
segment = np.linspace(a, b, num)
res = [(f(segment[0]) + f(segment[1])) / 2 * (segment[1] - segment[0])]
for i in range(1, num - 1):
res += [res[i - 1] + (f(segment[i + 1]) + f(segment[i])) / 2 * (segment[i + 1] - segment[i])]
return res
def parab_integral(f, a, b, num):
segment = np.linspace(a, b, num)
res = [(f(segment[0]) + f(segment[1]) + 4 * f((segment[0] + segment[1]) / 2)) / 6 * (segment[1] - segment[0])]
for i in range(1, num - 1):
res += [res[i - 1] + (f(segment[i + 1]) + f(segment[i]) + 4 * f((segment[i + 1] + segment[i]) / 2)) / 6 * (segment[i + 1] - segment[i])]
return res
def integral(f, functions, titles, segments, n):
assert(len(functions) == len(segments) == len(titles) and n > 0)
fig, axes = plt.subplots(len(functions), 1, figsize=(9, 8 * len(functions)))
for ax, func, title, segment in zip(axes, functions, titles, segments):
delta = (segment[1] - segment[0]) / n
ax.grid(linestyle='--')
ax.plot(np.linspace(segment[0] + delta / 2, segment[1] - delta / 2, n - 1), f(func, segment[0], segment[1], n))
ax.set_title(title)
functions = [np.sin, np.cos, lambda x: x ** 2, lambda x: x ** 3]
titles = ["$sin x$", "$cos x$", "$x ^ 2$", "$x ^ 3$"]
segments = [(0, 2 * np.pi),
(0, 2 * np.pi),
(-1, 4),
(-1, 1)]
n = 10000
integral(rect_integral, functions, titles, segments, n)
integral(trap_integral, functions, titles, segments, n)
integral(parab_integral, functions, titles, segments, n)
| Damarad_Viktor/Integral.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Conditional and Slice Selection
import numpy as np
arr = np.arange(0,9)
arr = arr.reshape(3,3)
up_to_five = arr < 6
up_to_five
arr[up_to_five]
# it's equivalent to the above but in one go
arr[arr < 6]
arr = np.arange(40).reshape(8,5)
# any boolean operation applicable to the datatype contained in the array
arr[arr % 2 == 0]
arr
# slice notation to get an internal sub matrix
arr[1:5, 1:4]
arr[:3,3:]
| Numpy - Conditional and Slice Selection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## Implementing binary decision trees
# The goal of this notebook is to implement your own binary decision tree classifier. You will:
#
# * Use SFrames to do some feature engineering.
# * Transform categorical variables into binary variables.
# * Write a function to compute the number of misclassified examples in an intermediate node.
# * Write a function to find the best feature to split on.
# * Build a binary decision tree from scratch.
# * Make predictions using the decision tree.
# * Evaluate the accuracy of the decision tree.
# * Visualize the decision at the root node.
#
# **Important Note**: In this assignment, we will focus on building decision trees where the data contain **only binary (0 or 1) features**. This allows us to avoid dealing with:
# * Multiple intermediate nodes in a split
# * The thresholding issues of real-valued features.
#
# This assignment **may be challenging**, so brace yourself :)
# # Fire up Graphlab Create
# Make sure you have the latest version of GraphLab Create.
import graphlab
# # Load the lending club dataset
# We will be using the same [LendingClub](https://www.lendingclub.com/) dataset as in the previous assignment.
loans = graphlab.SFrame('lending-club-data.gl/')
# Like the previous assignment, we reassign the labels to have +1 for a safe loan, and -1 for a risky (bad) loan.
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
loans = loans.remove_column('bad_loans')
# Unlike the previous assignment where we used several features, in this assignment, we will just be using 4 categorical
# features:
#
# 1. grade of the loan
# 2. the length of the loan term
# 3. the home ownership status: own, mortgage, rent
# 4. number of years of employment.
#
# Since we are building a binary decision tree, we will have to convert these categorical features to a binary representation in a subsequent section using 1-hot encoding.
features = ['grade', # grade of the loan
'term', # the term of the loan
'home_ownership', # home_ownership status: own, mortgage or rent
'emp_length', # number of years of employment
]
target = 'safe_loans'
loans = loans[features + [target]]
# Let's explore what the dataset looks like.
loans
# ## Subsample dataset to make sure classes are balanced
# Just as we did in the previous assignment, we will undersample the larger class (safe loans) in order to balance out our dataset. This means we are throwing away many data points. We use `seed=1` so everyone gets the same results.
# +
safe_loans_raw = loans[loans[target] == 1]
risky_loans_raw = loans[loans[target] == -1]
# Since there are less risky loans than safe loans, find the ratio of the sizes
# and use that percentage to undersample the safe loans.
percentage = len(risky_loans_raw)/float(len(safe_loans_raw))
safe_loans = safe_loans_raw.sample(percentage, seed = 1)
risky_loans = risky_loans_raw
loans_data = risky_loans.append(safe_loans)
print "Percentage of safe loans :", len(safe_loans) / float(len(loans_data))
print "Percentage of risky loans :", len(risky_loans) / float(len(loans_data))
print "Total number of loans in our new dataset :", len(loans_data)
# -
# **Note:** There are many approaches for dealing with imbalanced data, including some where we modify the learning algorithm. These approaches are beyond the scope of this course, but some of them are reviewed in this [paper](http://ieeexplore.ieee.org/xpl/login.jsp?tp=&arnumber=5128907&url=http%3A%2F%2Fieeexplore.ieee.org%2Fiel5%2F69%2F5173046%2F05128907.pdf%3Farnumber%3D5128907 ). For this assignment, we use the simplest possible approach, where we subsample the overly represented class to get a more balanced dataset. In general, and especially when the data is highly imbalanced, we recommend using more advanced methods.
# ## Transform categorical data into binary features
# In this assignment, we will implement **binary decision trees** (decision trees for binary features, a specific case of categorical variables taking on two values, e.g., true/false). Since all of our features are currently categorical features, we want to turn them into binary features.
#
# For instance, the **home_ownership** feature represents the home ownership status of the loanee, which is either `own`, `mortgage` or `rent`. For example, if a data point has the feature
# ```
# {'home_ownership': 'RENT'}
# ```
# we want to turn this into three features:
# ```
# {
# 'home_ownership = OWN' : 0,
# 'home_ownership = MORTGAGE' : 0,
# 'home_ownership = RENT' : 1
# }
# ```
#
# Since this code requires a few Python and GraphLab tricks, feel free to use this block of code as is. Refer to the API documentation for a deeper understanding.
loans_data = risky_loans.append(safe_loans)
for feature in features:
loans_data_one_hot_encoded = loans_data[feature].apply(lambda x: {x: 1})
loans_data_unpacked = loans_data_one_hot_encoded.unpack(column_name_prefix=feature)
# Change None's to 0's
for column in loans_data_unpacked.column_names():
loans_data_unpacked[column] = loans_data_unpacked[column].fillna(0)
loans_data.remove_column(feature)
loans_data.add_columns(loans_data_unpacked)
# Let's see what the feature columns look like now:
features = loans_data.column_names()
features.remove('safe_loans') # Remove the response variable
features
print "Number of features (after binarizing categorical variables) = %s" % len(features)
# Let's explore what one of these columns looks like:
loans_data['grade.A']
# This column is set to 1 if the loan grade is A and 0 otherwise.
#
# **Checkpoint:** Make sure the following answers match up.
print "Total number of grade.A loans : %s" % loans_data['grade.A'].sum()
print "Expexted answer : 6422"
# ## Train-test split
#
# We split the data into a train test split with 80% of the data in the training set and 20% of the data in the test set. We use `seed=1` so that everyone gets the same result.
train_data, test_data = loans_data.random_split(.8, seed=1)
# # Decision tree implementation
# In this section, we will implement binary decision trees from scratch. There are several steps involved in building a decision tree. For that reason, we have split the entire assignment into several sections.
#
# ## Function to count number of mistakes while predicting majority class
#
# Recall from the lecture that prediction at an intermediate node works by predicting the **majority class** for all data points that belong to this node.
#
# Now, we will write a function that calculates the number of **missclassified examples** when predicting the **majority class**. This will be used to help determine which feature is the best to split on at a given node of the tree.
#
# **Note**: Keep in mind that in order to compute the number of mistakes for a majority classifier, we only need the label (y values) of the data points in the node.
#
# ** Steps to follow **:
# * ** Step 1:** Calculate the number of safe loans and risky loans.
# * ** Step 2:** Since we are assuming majority class prediction, all the data points that are **not** in the majority class are considered **mistakes**.
# * ** Step 3:** Return the number of **mistakes**.
#
#
# Now, let us write the function `intermediate_node_num_mistakes` which computes the number of misclassified examples of an intermediate node given the set of labels (y values) of the data points contained in the node. Fill in the places where you find `## YOUR CODE HERE`. There are **three** places in this function for you to fill in.
# +
def intermediate_node_num_mistakes(labels_in_node):
# Corner case: If labels_in_node is empty, return 0
if len(labels_in_node) == 0:
return 0
# Count the number of 1's (safe loans)
## YOUR CODE HERE
num_safe = sum(labels_in_node.apply(lambda x: 1 if x==1 else 0))
# Count the number of -1's (risky loans)
## YOUR CODE HERE
num_risky = sum(labels_in_node.apply(lambda x: 1 if x==-1 else 0))
# Return the number of mistakes that the majority classifier makes.
## YOUR CODE HERE
if num_safe > num_risky:
return num_risky
else:
return num_safe
# -
# Because there are several steps in this assignment, we have introduced some stopping points where you can check your code and make sure it is correct before proceeding. To test your `intermediate_node_num_mistakes` function, run the following code until you get a **Test passed!**, then you should proceed. Otherwise, you should spend some time figuring out where things went wrong.
# +
# Test case 1
example_labels = graphlab.SArray([-1, -1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print 'Test passed!'
else:
print 'Test 1 failed... try again!'
# Test case 2
example_labels = graphlab.SArray([-1, -1, 1, 1, 1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print 'Test passed!'
else:
print 'Test 2 failed... try again!'
# Test case 3
example_labels = graphlab.SArray([-1, -1, -1, -1, -1, 1, 1])
if intermediate_node_num_mistakes(example_labels) == 2:
print 'Test passed!'
else:
print 'Test 3 failed... try again!'
# -
# ## Function to pick best feature to split on
# The function **best_splitting_feature** takes 3 arguments:
# 1. The data (SFrame of data which includes all of the feature columns and label column)
# 2. The features to consider for splits (a list of strings of column names to consider for splits)
# 3. The name of the target/label column (string)
#
# The function will loop through the list of possible features, and consider splitting on each of them. It will calculate the classification error of each split and return the feature that had the smallest classification error when split on.
#
# Recall that the **classification error** is defined as follows:
# $$
# \mbox{classification error} = \frac{\mbox{# mistakes}}{\mbox{# total examples}}
# $$
#
# Follow these steps:
# * **Step 1:** Loop over each feature in the feature list
# * **Step 2:** Within the loop, split the data into two groups: one group where all of the data has feature value 0 or False (we will call this the **left** split), and one group where all of the data has feature value 1 or True (we will call this the **right** split). Make sure the **left** split corresponds with 0 and the **right** split corresponds with 1 to ensure your implementation fits with our implementation of the tree building process.
# * **Step 3:** Calculate the number of misclassified examples in both groups of data and use the above formula to compute the **classification error**.
# * **Step 4:** If the computed error is smaller than the best error found so far, store this **feature and its error**.
#
# This may seem like a lot, but we have provided pseudocode in the comments in order to help you implement the function correctly.
#
# **Note:** Remember that since we are only dealing with binary features, we do not have to consider thresholds for real-valued features. This makes the implementation of this function much easier.
#
# Fill in the places where you find `## YOUR CODE HERE`. There are **five** places in this function for you to fill in.
def best_splitting_feature(data, features, target):
best_feature = None # Keep track of the best feature
best_error = 10 # Keep track of the best error so far
# Note: Since error is always <= 1, we should intialize it with something larger than 1.
# Convert to float to make sure error gets computed correctly.
num_data_points = float(len(data))
# Loop through each feature to consider splitting on that feature
for feature in features:
# The left split will have all data points where the feature value is 0
left_split = data[data[feature] == 0]
# The right split will have all data points where the feature value is 1
## YOUR CODE HERE
right_split = data[data[feature] == 1]
# Calculate the number of misclassified examples in the left split.
# Remember that we implemented a function for this! (It was called intermediate_node_num_mistakes)
# YOUR CODE HERE
left_mistakes = intermediate_node_num_mistakes(left_split[target])
# Calculate the number of misclassified examples in the right split.
## YOUR CODE HERE
right_mistakes = intermediate_node_num_mistakes(right_split[target])
# Compute the classification error of this split.
# Error = (# of mistakes (left) + # of mistakes (right)) / (# of data points)
## YOUR CODE HERE
error = (left_mistakes + right_mistakes) / float(num_data_points)
# If this is the best error we have found so far, store the feature as best_feature and the error as best_error
## YOUR CODE HERE
if error < best_error:
best_feature = feature
best_error = error
return best_feature # Return the best feature we found
# To test your `best_splitting_feature` function, run the following code:
if best_splitting_feature(train_data, features, 'safe_loans') == 'term. 36 months':
print 'Test passed!'
else:
print 'Test failed... try again!'
# ## Building the tree
#
# With the above functions implemented correctly, we are now ready to build our decision tree. Each node in the decision tree is represented as a dictionary which contains the following keys and possible values:
#
# {
# 'is_leaf' : True/False.
# 'prediction' : Prediction at the leaf node.
# 'left' : (dictionary corresponding to the left tree).
# 'right' : (dictionary corresponding to the right tree).
# 'splitting_feature' : The feature that this node splits on.
# }
#
# First, we will write a function that creates a leaf node given a set of target values. Fill in the places where you find `## YOUR CODE HERE`. There are **three** places in this function for you to fill in.
def create_leaf(target_values):
# Create a leaf node
leaf = {'splitting_feature' : None,
'left' : None,
'right' : None,
'is_leaf': True } ## YOUR CODE HERE
# Count the number of data points that are +1 and -1 in this node.
num_ones = len(target_values[target_values == +1])
num_minus_ones = len(target_values[target_values == -1])
# For the leaf node, set the prediction to be the majority class.
# Store the predicted class (1 or -1) in leaf['prediction']
if num_ones > num_minus_ones:
leaf['prediction'] = 1 ## YOUR CODE HERE
else:
leaf['prediction'] = -1 ## YOUR CODE HERE
# Return the leaf node
return leaf
# We have provided a function that learns the decision tree recursively and implements 3 stopping conditions:
# 1. **Stopping condition 1:** All data points in a node are from the same class.
# 2. **Stopping condition 2:** No more features to split on.
# 3. **Additional stopping condition:** In addition to the above two stopping conditions covered in lecture, in this assignment we will also consider a stopping condition based on the **max_depth** of the tree. By not letting the tree grow too deep, we will save computational effort in the learning process.
#
# Now, we will write down the skeleton of the learning algorithm. Fill in the places where you find `## YOUR CODE HERE`. There are **seven** places in this function for you to fill in.
def decision_tree_create(data, features, target, current_depth = 0, max_depth = 10):
remaining_features = features[:] # Make a copy of the features.
target_values = data[target]
print "--------------------------------------------------------------------"
print "Subtree, depth = %s (%s data points)." % (current_depth, len(target_values))
# Stopping condition 1
# (Check if there are mistakes at current node.
# Recall you wrote a function intermediate_node_num_mistakes to compute this.)
if intermediate_node_num_mistakes(target_values)== 0:
## YOUR CODE HERE
print "Stopping condition 1 reached."
# If not mistakes at current node, make current node a leaf node
return create_leaf(target_values)
# Stopping condition 2 (check if there are remaining features to consider splitting on)
if remaining_features == 0 : ## YOUR CODE HERE
print "Stopping condition 2 reached."
# If there are no remaining features to consider, make current node a leaf node
return create_leaf(target_values)
# Additional stopping condition (limit tree depth)
if current_depth >= max_depth: ## YOUR CODE HERE
print "Reached maximum depth. Stopping for now."
# If the max tree depth has been reached, make current node a leaf node
return create_leaf(target_values)
# Find the best splitting feature (recall the function best_splitting_feature implemented above)
## YOUR CODE HERE
splitting_feature = best_splitting_feature(data,features,target)
# Split on the best feature that we found.
left_split = data[data[splitting_feature] == 0]
right_split = data[data[splitting_feature] == 1] ## YOUR CODE HERE
remaining_features.remove(splitting_feature)
print "Split on feature %s. (%s, %s)" % (\
splitting_feature, len(left_split), len(right_split))
# Create a leaf node if the split is "perfect"
if len(left_split) == len(data):
print "Creating leaf node."
return create_leaf(left_split[target])
if len(right_split) == len(data):
print "Creating leaf node."
## YOUR CODE HERE
return create_leaf(right_split[target])
# Repeat (recurse) on left and right subtrees
left_tree = decision_tree_create(left_split, remaining_features, target, current_depth + 1, max_depth)
## YOUR CODE HERE
right_tree = decision_tree_create(right_split, remaining_features, target,current_depth+1 ,max_depth)
return {'is_leaf' : False,
'prediction' : None,
'splitting_feature': splitting_feature,
'left' : left_tree,
'right' : right_tree}
# Here is a recursive function to count the nodes in your tree:
def count_nodes(tree):
if tree['is_leaf']:
return 1
return 1 + count_nodes(tree['left']) + count_nodes(tree['right'])
# Run the following test code to check your implementation. Make sure you get **'Test passed'** before proceeding.
small_data_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 3)
if count_nodes(small_data_decision_tree) == 13:
print 'Test passed!'
else:
print 'Test failed... try again!'
print 'Number of nodes found :', count_nodes(small_data_decision_tree)
print 'Number of nodes that should be there : 13'
# ## Build the tree!
#
# Now that all the tests are passing, we will train a tree model on the **train_data**. Limit the depth to 6 (**max_depth = 6**) to make sure the algorithm doesn't run for too long. Call this tree **my_decision_tree**.
#
# **Warning**: This code block may take 1-2 minutes to learn.
# Make sure to cap the depth at 6 by using max_depth = 6
# small_data_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth = 3)
my_decision_tree = decision_tree_create(train_data, features, 'safe_loans', max_depth=6)
# ## Making predictions with a decision tree
#
# As discussed in the lecture, we can make predictions from the decision tree with a simple recursive function. Below, we call this function `classify`, which takes in a learned `tree` and a test point `x` to classify. We include an option `annotate` that describes the prediction path when set to `True`.
#
# Fill in the places where you find `## YOUR CODE HERE`. There is **one** place in this function for you to fill in.
def classify(tree, x, annotate = False):
# if the node is a leaf node.
if tree['is_leaf']:
if annotate:
print "At leaf, predicting %s" % tree['prediction']
return tree['prediction']
else:
# split on feature.
split_feature_value = x[tree['splitting_feature']]
if annotate:
print "Split on %s = %s" % (tree['splitting_feature'], split_feature_value)
if split_feature_value == 0:
return classify(tree['left'], x, annotate)
else:
annotate = True
return classify(tree['right'], x, annotate)
### YOUR CODE HERE
# Now, let's consider the first example of the test set and see what `my_decision_tree` model predicts for this data point.
test_data[0]
print 'Predicted class: %s ' % classify(my_decision_tree, test_data[0])
# Let's add some annotations to our prediction to see what the prediction path was that lead to this predicted class:
classify(my_decision_tree, test_data[0], annotate=True)
# ** Quiz question:** What was the feature that **my_decision_tree** first split on while making the prediction for test_data[0]?
# ** Quiz question:** What was the first feature that lead to a right split of test_data[0]?
# ** Quiz question:** What was the last feature split on before reaching a leaf node for test_data[0]?
# ## Evaluating your decision tree
# Now, we will write a function to evaluate a decision tree by computing the classification error of the tree on the given dataset.
#
# Again, recall that the **classification error** is defined as follows:
# $$
# \mbox{classification error} = \frac{\mbox{# mistakes}}{\mbox{# total examples}}
# $$
#
# Now, write a function called `evaluate_classification_error` that takes in as input:
# 1. `tree` (as described above)
# 2. `data` (an SFrame)
#
# This function should return a prediction (class label) for each row in `data` using the decision `tree`. Fill in the places where you find `## YOUR CODE HERE`. There is **one** place in this function for you to fill in.
# +
def evaluate_classification_error(tree, data):
# Apply the classify(tree, x) to each row in your data
prediction = data.apply(lambda x: classify(tree, x))
# Once you've made the predictions, calculate the classification error and return it
return 1 - sum(data['safe_loans']==prediction)/float(len(prediction))
# -
# Now, let's use this function to evaluate the classification error on the test set.
evaluate_classification_error(my_decision_tree, test_data)
# **Quiz Question:** Rounded to 2nd decimal point, what is the classification error of **my_decision_tree** on the **test_data**?
# ## Printing out a decision stump
# As discussed in the lecture, we can print out a single decision stump (printing out the entire tree is left as an exercise to the curious reader).
def print_stump(tree, name = 'root'):
split_name = tree['splitting_feature'] # split_name is something like 'term. 36 months'
if split_name is None:
print "(leaf, label: %s)" % tree['prediction']
return None
split_feature, split_value = split_name.split('.')
print ' %s' % name
print ' |---------------|----------------|'
print ' | |'
print ' | |'
print ' | |'
print ' [{0} == 0] [{0} == 1] '.format(split_name)
print ' | |'
print ' | |'
print ' | |'
print ' (%s) (%s)' \
% (('leaf, label: ' + str(tree['left']['prediction']) if tree['left']['is_leaf'] else 'subtree'),
('leaf, label: ' + str(tree['right']['prediction']) if tree['right']['is_leaf'] else 'subtree'))
print_stump(my_decision_tree)
# **Quiz Question:** What is the feature that is used for the split at the root node?
#
# ### Exploring the intermediate left subtree
#
# The tree is a recursive dictionary, so we do have access to all the nodes! We can use
# * `my_decision_tree['left']` to go left
# * `my_decision_tree['right']` to go right
print_stump(my_decision_tree['left'], my_decision_tree['splitting_feature'])
# ### Exploring the left subtree of the left subtree
#
print_stump(my_decision_tree['left']['left'], my_decision_tree['left']['splitting_feature'])
# **Quiz question:** What is the path of the **first 3 feature splits** considered along the **left-most** branch of **my_decision_tree**?
# **Quiz question:** What is the path of the **first 3 feature splits** considered along the **right-most** branch of **my_decision_tree**?
# print_stump(my_decision_tree['right'],my_decision_tree['splitting_feature'])
print_stump(my_decision_tree['right'],my_decision_tree['right']['splitting_feature'])
print_stump(my_decision_tree['right']['right'],my_decision_tree['right']['splitting_feature'])
print_stump(my_decision_tree['right'],my_decision_tree['splitting_feature'])
| classification/Programming-And-Assignments/module-5-decision-tree-assignment-2-blank.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import os
import tqdm
filename='../FDDC/dingzeng/dingzeng.train'
def getDict(name,start=-1,end=-1,sentence=-1):
return {'name':name,'start':start,'end':end,'sentence':sentence}
class hetong():
def __init__(self,name,addObj,addType,addNum,addPrice,lookup,buyType):
self.name=name
self.addObj=[]
self.addObj.append(getDict(addObj))
self.addType=[]
self.addType.append(getDict(addType))
self.addNum=[]
self.addNum.append(getDict(addNum))
self.addPrice=[]
self.addPrice.append(getDict(addPrice))
self.lookup=[]
self.lookup.append(getDict(lookup))
self.buyType=[]
self.buyType.append(getDict(buyType))
length,length2,ht=[],[],[]
with open(filename,'r') as fr:
for line in fr.readlines():
a=line.split('\t')
length.append(len(a))
if len(a)<7:
a.extend(['']*(7-len(a)))
length2.append(len(a))
ht.append(hetong(*a))
# +
dirname='../FDDC/dingzeng/textWithMoney'
if not os.path.exists(dirname):
os.makedirs(dirname)
for i in tqdm.tqdm(ht):
i=i.__dict__
if i['addObj']=='' or i['addType']=='':
continue
name=i['name']
i.pop('name')
with open(os.path.join(dirname,name+'.txt'),'w') as fw:
with open('../FDDC/dingzeng/dataHalf/'+name+'.txt','r') as fr:
text=fr.readline()
sentence=text.split('。')
addObj_name=i['addObj'][0]['name']
addType_name=i['addType'][0]['name']
addNum_name=i['addNum'][0]['name']
addPrice_name=i['addPrice'][0]['name']
lookup_name=i['lookup'][0]['name']
buyType_name=i['buyType'][0]['name']
index=0
for num,sen in enumerate(sentence):
addObj_start=sen.find(addObj_name)
addType_start=sen.find(addType_name)
if addObj_start!=-1 and addType_start!=-1:
i['addObj'].append(getDict(addObj_name,index+addObj_start,index+addObj_start+len(addObj_name),num))
i['addType'].append(getDict(addType_name,index+addType_start,index+addType_start+len(addType_name),num))
index+=len(sen)+1
index=0
for num,sen in enumerate(sentence):
addObj_start=sen.find(addObj_name)
addType_start=sen.find(addType_name)
addNum_start=sen.find(addNum_name)
addPrice_start=sen.find(addPrice_name)
lookup_start=sen.find(lookup_name)
buyType_start=sen.find(buyType_name)
if addNum_name!='' and addNum_start !=-1 and (addObj_start!=-1 or addType_start!=-1):
i['addPrice'].append(getDict(addPrice_name,index+addPrice_start,index+addPrice_start+len(addPrice_name),num))
if addObj_start!=-1:
i['addObj'].append(getDict(addObj_name,index+addObj_start,index+addObj_start+len(addObj_name),num))
if addType_start!=-1:
i['addType'].append(getDict(addType_name,index+addType_start,index+addType_start+len(addType_name),num))
if addPrice_name!='' and addPrice_start !=-1 and (addObj_start!=-1 or addType_start!=-1):
i['addPrice'].append(getDict(addPrice_name,index+addPrice_start,index+addPrice_start+len(addPrice_name),num))
if addObj_start!=-1:
i['addObj'].append(getDict(addObj_name,index+addObj_start,index+addObj_start+len(addObj_name),num))
if addType_start!=-1:
i['addType'].append(getDict(addType_name,index+addType_start,index+addType_start+len(addType_name),num))
if lookup_name!='' and lookup_start !=-1 and (addObj_start!=-1 or addType_start!=-1):
i['lookup'].append(getDict(lookup_name,index+lookup_start,index+lookup_start+len(lookup_name),num))
if addObj_start!=-1:
i['addObj'].append(getDict(addObj_name,index+addObj_start,index+addObj_start+len(addObj_name),num))
if addType_start!=-1:
i['addType'].append(getDict(addType_name,index+addType_start,index+addType_start+len(addType_name),num))
if buyType_name!='' and buyType_start !=-1 and (addObj_start!=-1 or addType_start!=-1):
i['buyType'].append(getDict(buyType_name,index+buyType_start,index+buyType_start+len(buyType_name),num))
if addObj_start!=-1:
i['addObj'].append(getDict(addObj_name,index+addObj_start,index+addObj_start+len(addObj_name),num))
if addType_start!=-1:
i['addType'].append(getDict(addType_name,index+addType_start,index+addType_start+len(addType_name),num))
index+=len(sen)+1
li=['O' for i in text]
for k,v in i.items():
for item in v:
if item['name']!='' and item['start']!=-1:
li[item['start']]='B-'+k
for i in range(item['start']+1,item['end']):
li[i]='I-'+k
for j,con in enumerate(li):
if con!='O':
sub_start=j
break
for j,con in enumerate(li):
if li[len(li)-j-1]!='O':
sub_end=len(li)-j-1
break
li=li[sub_start-10:sub_end+10]
text=text[sub_start-10:sub_end+10]
sss=""
for j,con in enumerate(li):
sss+=text[j]+' '+con+'\n'
fw.write(sss)
# +
import os
import numpy as np
import random
dirname='../FDDC/dingzeng/textWithMoney'
files=list(os.walk(dirname))[0][2]
random.shuffle(files)
train="example.train"
test="example.test"
dev="example.dev"
with open(dev,'w') as fw:
for file in files[int(len(files)*0.9):]:
with open(os.path.join(dirname,file),'r') as fr:
fw.writelines(fr.readlines())
fw.write('\n')
# -
| utils/model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from mikeio import Mesh
meshfilename = r"..tests/testdata/odense_rough.mesh"
msh = Mesh(meshfilename)
msh.plot()
# -
# Convert mesh to [shapely](https://shapely.readthedocs.io/en/latest/manual.html) MultiPolygon object, requires that the `shapeply` library is installed.
mp = msh.to_shapely()
mp
# Now a lot of methods are available
mp.area
mp.bounds
domain = mp.buffer(0)
domain
# +
open_water = domain.buffer(-500)
coastalzone = domain - open_water
coastalzone
# -
# Find if points are inside the domain
# +
from shapely.geometry import Point
p1 = Point(216000, 6162000)
p2 = Point(220000, 6156000)
print(mp.contains(p1))
print(mp.contains(p2))
# -
import matplotlib.pyplot as plt
msh.plot()
plt.scatter(p1.x,p1.y,marker="*",s=200,c="red",label="inside")
plt.scatter(p2.x,p2.y,marker="+",s=200,c="green",label="outside")
plt.legend()
| notebooks/Mesh.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Custom Analysis of Training Results
#
# Notebook demonstrates two methods for plotting training results. First method uses Ludwig's visualization api. Second method illustrates converting Ludwig training statistics into a pandas dataframe and plotting data with seaborn package.
#
# This notebook is dependent on running the multiple model training example beforehand. To run the mulitple model training example, enter this command:
# ```
# python multiple_model_training.py
# ```
# ## Import required libraries
# + pycharm={"is_executing": false}
from ludwig.utils.data_utils import load_json
from ludwig.visualize import learning_curves
import pandas as pd
import numpy as np
import os.path
import matplotlib.pyplot as plt
import seaborn as sns
# -
# ## Generate Annotated Learning Curves Using Ludwig Visualization API
# +
# retrieve training statistics
list_of_stats = []
list_of_models = []
for model in ['model1', 'model2']:
experiment_model_dir = './results/multiple_experiment_' + model
train_stats = load_json(os.path.join(experiment_model_dir,'training_statistics.json'))
list_of_stats.append(train_stats)
list_of_models.append(model)
# generating learning curves from training
learning_curves(list_of_stats, 'Survived',
model_names=list_of_models,
output_directory='./visualizations',
file_format='png')
# -
# ## Generate Annotated Learning Curves Using seaborn package
#
# ### Helper function to collect training statistics
# +
# function to generate pandas data frame from training statistcs
# Parameter:
# experiment_model_dir: directory containing the training statistics for a specific model training experiment
#
# Returns: pandas dataframe containing the performance metric and loss
#
def extract_training_stats(experiment_model_dir):
list_of_splits = ['training', 'validation', 'test']
list_of_df = []
for split in list_of_splits:
train_stats = load_json(os.path.join(experiment_model_dir,'training_statistics.json'))
df = pd.DataFrame(train_stats[split]['combined'])
df.columns = [split + '_' + c for c in df.columns]
list_of_df.append(df)
df = pd.concat(list_of_df, axis=1)
df['epoch'] = df.index + 1
return df
# -
# ### Retrieve training results
# + pycharm={"is_executing": false}
model1 = extract_training_stats('./results/multiple_experiment_model1')
model1.name = 'model1'
model2 = extract_training_stats('./results/multiple_experiment_model2')
model2.name = 'model2'
# -
model1.head()
# ### Helper function to generate plot ready data
# +
# create pandas dataframe suitable for plotting learning curves
# Parameters
# train_df_list: list of pandas datatframe containing training statistics
#
# Returns: plot ready pandas dataframe
def create_plot_ready_data(list_of_train_stats_df):
# holding ready for plot ready data
plot_ready_list = []
# consolidate the multiple training statistics dataframes
for df in list_of_train_stats_df:
for col in ['training', 'validation']:
df2 = df[['epoch', col + '_loss']].copy()
df2.columns = ['epoch', 'loss']
df2['type'] = col
df2['model'] = df.name
plot_ready_list.append(df2)
return pd.concat(plot_ready_list, axis=0, ignore_index=True)
# -
# ### Plot learning curves
# create plot ready data
learning_curves = create_plot_ready_data([model1, model2])
# Plot learning curves for the different models
fig = plt.figure(figsize=(10,6))
sns.set_style(style='dark')
ax = sns.lineplot(x='epoch', y='loss',
style='type',
hue='model',
data=learning_curves)
ax.set_title('Learning Curves', fontdict={'fontsize': 16})
fig.savefig('./visualizations/custom_learning_curve.png')
| examples/titanic/model_training_results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Prepare Features For Modeling: Create Training And Test Sets
# ### Read In Data
# +
# Read in data
import pandas as pd
from sklearn.model_selection import train_test_split
titanic = pd.read_csv('../../../data/titanic_numeric.csv')
titanic.head()
# -
# ### Split Into Train, Validation, And Test Set
# +
# Drop unnecccessary features and split into training/test sets
features = titanic.drop(['PassengerId', 'Ticket', 'Name', 'Survived'], axis=1)
labels = titanic['Survived']
X_train, X_test, y_train, y_test = train_test_split()
# -
for dataset in [y_train, y_val, y_test]:
print(round(len(dataset) / len(labels), 2))
# ### Write Out All Data
# +
X_train.to_csv('../../../data/split_data/train_features.csv', index=False)
X_val.to_csv('../../../data/split_data/val_features.csv', index=False)
X_test.to_csv('../../../data/split_data/test_features.csv', index=False)
y_train.to_csv('../../../data/split_data/train_labels.csv', index=False)
y_val.to_csv('../../../data/split_data/val_labels.csv', index=False)
y_test.to_csv('../../../data/split_data/test_labels.csv', index=False)
# -
| ml_feature/05_Prepare_Features/05_01/Begin/05_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rahmanaul/textBlob-Sentiment-Analysis/blob/main/Vaksin_Sinovac_TextBlob_Sentiment_Analysis.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="_Vl1jxOASNrR"
# sentiment Analisis
# + colab={"base_uri": "https://localhost:8080/"} id="w27o87R0SUJN" outputId="1b54c6cd-d63a-4d5c-ab1e-ddfddd859d2f"
#import libraries
import tweepy
from textblob import TextBlob as tb
from subprocess import check_output
from wordcloud import WordCloud, STOPWORDS
import pandas as pd
import numpy as np
import re
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
# !pip install PySastrawi
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
plt.style.use('fivethirtyeight')
import json
# !pip install googletrans==4.0.0-rc1
from googletrans import Translator
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} id="8btPB7BGS2RE" outputId="7ed9ade9-0e1f-4f7c-b073-c20b0c28f383"
# Load the Data
from google.colab import files
uploaded = files.upload()
# + id="diQqCEl2UM92"
#Get the data
log = pd.read_csv('login.csv')
# + id="lq3FpdIVUZYB"
#Twiter API credentials
consumerKey = log['key'][0]
consumerSecret = log['key'][1]
accessToken = log['key'][2]
accessTokenSecret = log['key'][3]
# + id="9Hz4Q37PWWAO"
# Create the Authentication objaect
authenticate = tweepy.OAuthHandler(consumerKey, consumerSecret)
#Set the access token and access toke secret
authenticate.set_access_token(accessToken, accessTokenSecret)
#Create the Api Object while passing in the auth info
api = tweepy.API(authenticate, wait_on_rate_limit=True)
# + colab={"base_uri": "https://localhost:8080/"} id="vIuZTM6S1QXs" outputId="96ea6e35-365b-4826-a899-0135bf7aaa7e"
search_words = "vaksin OR sinovac -filter:retweets"
tweets = api.search(
q=search_words,
lang="id",
count=100,
result_type="recent",
tweet_mode="extended")
# Print the last 5 tweets from query
print("Show 5 Recent Tweets : \n")
i=1
for tweet in tweets[0:5]:
print(str(i) + ') '+ tweet.full_text + '\n')
i += 1
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="nW2pRdmci2yc" outputId="21a22ed1-96da-41fd-a50f-cf3d2581db7e"
#Create dataframe
df= pd.DataFrame([tweet.full_text for tweet in tweets], columns=['Tweets'])
#shiw first five row
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="7ys6Knf7XDOq" outputId="17e78026-ab92-4f9e-a30a-d8847d18f30a"
# Menghilangkan Duplikat
df = df.drop_duplicates(subset='Tweets', keep='first').reset_index()
df.shape
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 73} id="QnilUgz7Yc-J" outputId="697bfe9f-4728-4487-81bc-e18484983864"
#import kamus alay dan Stopword bahasa indonesia
from google.colab import files
uploaded = files.upload()
# + id="xzvMByNlaAzt"
#Slang Word Replacement
slang_dict = pd.read_csv('new_kamusalay.csv', encoding='latin-1', header=None)
slang_dict = slang_dict.rename(columns={0: 'original',
1: 'replacement'})
id_stopword_dict = pd.read_csv('stopwordbahasa.csv', header=None)
id_stopword_dict = id_stopword_dict.rename(columns={0: 'stopword'})
stopwords_new = pd.DataFrame(['sih','nya', 'iya', 'nih', 'biar', 'tau', 'kayak', 'banget'], columns=['stopword'])
id_stopword_dict = pd.concat([id_stopword_dict,stopwords_new]).reset_index()
id_stopword_dict = pd.DataFrame(id_stopword_dict['stopword'])
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="mbjUD88xd_0r" outputId="33cefccf-f6d0-440c-ba7d-a24d0521fd2d"
slang_dict.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="Q3LORiirG8Fv" outputId="0e4f19fb-cf75-41ef-84b2-04b5164841e5"
id_stopword_dict.head()
# + id="eHsAdt1AHGZr"
#Stemmer, link, char, dan beberapa kata ganti
factory = StemmerFactory()
stemmer = factory.create_stemmer()
def lowercase(text):
return text.lower()
def remove_unnecessary_char(text):
text = re.sub(r'pic.twitter.com.[\w]+', '', text) # Remove every pic
text = re.sub('((www\.[^\s]+)|(https?://[^\s]+)|(http?://[^\s]+))',' ',text) # Remove every URL
text = re.sub('gue','saya',text) # Sub gue saya
text = re.sub('\n',' ',text) # Remove every '\n'
text = re.sub(r'[^\x00-\x7F]+',' ', text)
text = re.sub(r':', '', text)
text = re.sub(r'…', '', text)
to_delete = ['hypertext', 'transfer', 'protocol', 'over', 'secure', 'socket', 'layer', 'dtype', 'tweet', 'name', 'object'
,'twitter','com', 'pic', ' ya ']
for word in to_delete:
text = re.sub(word,'', text)
text = re.sub(word.upper(),' ',text)
retweet_user = [' rt ', ' user ']
for word in retweet_user:
text = re.sub(word,' ',text) # Remove every retweet symbol & username
text = re.sub(word.upper(),' ',text)
text = re.sub(' +', ' ', text) # Remove extra spaces
return text
def remove_nonaplhanumeric(text):
text = re.sub('[^0-9a-zA-Z]+', ' ', text)
return text
slang_dict_map = dict(zip(slang_dict['original'], slang_dict['replacement']))
def normalize_slang(text):
return ' '.join([slang_dict_map[word] if word in slang_dict_map else word for word in text.split(' ')])
def remove_stopword(text):
text = ' '.join(['' if word in id_stopword_dict.stopword.values else word for word in text.split(' ')])
text = re.sub(' +', ' ', text) # Remove extra spaces
text = text.strip()
return text
def stemming(text):
return stemmer.stem(text)
# + id="okAlsZpPHdvF"
def preprocess(text):
text = lowercase(text)
text = remove_unnecessary_char(text)
text = remove_nonaplhanumeric(text)
text = normalize_slang(text)
text = stemming(text)
text = remove_stopword(text)
return text
# + id="BduCH9FyHriv"
df['Tweets'] = df['Tweets'].apply(preprocess).apply(preprocess)
# + colab={"base_uri": "https://localhost:8080/"} id="tvE3gOVpICs4" outputId="5cf5729c-2e19-4948-f347-685796fb4ffd"
df = df.drop_duplicates(subset='Tweets', keep='first').reset_index()
df.shape
# + id="S96lCMgzILT9"
df.to_csv('vaksin-pemerintah-preprocessed.csv', index=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="if7e8KnGIaud" outputId="f3b8af00-5cf2-4693-cb4e-66ff4717727e"
mpl.rcParams['figure.figsize']=(12.0,12.0)
mpl.rcParams['font.size']=12
mpl.rcParams['savefig.dpi']=100
mpl.rcParams['figure.subplot.bottom']=.1
stopwords = set(STOPWORDS)
wordcloud = WordCloud(
background_color='white',
stopwords=id_stopword_dict,
max_words=400,
max_font_size=50,
random_state=21
).generate(str(df['Tweets']))
print(wordcloud)
fig = plt.figure(1)
plt.imshow(wordcloud)
plt.axis('off')
plt.show()
fig.savefig("word1.png", dpi=900)
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="o48CKfbmI-xZ" outputId="d7250958-aee2-472b-8b2c-8a2c151eb527"
translator = Translator()
translator.translate('nice', dest='id').text
# + id="-g-fv_WeJLRb"
def en_to_id(sentence):
if tb(sentence).detect_language() == 'en':
return tb(sentence)
translator = Translator()
output = translator.translate(sentence, dest='en')
return tb(output.text)
def get_sentiment(sentence):
sentence = en_to_id(sentence)
return sentence.sentiment
def round_polarity(value):
if value >= 0.3:
return 1
elif value == 0:
return 0
return -1
def round_subjectivity(value):
if value >= 0:
return 1
elif value == 0:
return 0
return -1
# + id="NRzLQ71rOSZg"
tweets = df['Tweets']
polarity = []
subjectivity = []
for tweet in tweets:
sentiment = get_sentiment(tweet)
#print(sentiment)
polarity.append(sentiment[0])
subjectivity.append(sentiment[1])
# + id="o2RJQxyUOaVC"
df['Subjectivity'] = subjectivity
df['Polarity'] = polarity
# + colab={"base_uri": "https://localhost:8080/", "height": 553} id="26A9POtHd71t" outputId="fd1f6d97-0972-4bb6-a6f2-a70a681abab4"
df
# + colab={"base_uri": "https://localhost:8080/", "height": 553} id="zxmQpfxLenXR" outputId="05b2ba52-af3b-4cf1-d425-6c8a8a3c6f81"
#create a function to compute the negative , neutral and positive analysis
def getAnalysis(score):
if score < 0:
return 'Negative'
elif score == 0:
return 'Neutral'
else:
return 'Positive'
df['Analysis'] = df['Polarity'].apply(getAnalysis)
df
# + colab={"base_uri": "https://localhost:8080/"} id="3J75q4rFoZ_0" outputId="be357261-d4fe-45a8-a145-e914ec9d6977"
# Print all of the positive tweets
j = 1
sortedDF = df.sort_values(by=['Polarity'])
for i in range(0, sortedDF.shape[0]):
if (sortedDF['Analysis'][i] == 'Positive'):
print(str(j) + ') ' + sortedDF['Tweets'][i])
print()
j = j+1
# + colab={"base_uri": "https://localhost:8080/"} id="g4N5L-BSokmX" outputId="57f096bc-920a-43de-904c-9a17eb19fa42"
# Print all of the Negative tweets
j = 1
sortedDF = df.sort_values(by=['Polarity'], ascending='False')
for i in range(0, sortedDF.shape[0]):
if (sortedDF['Analysis'][i] == 'Negative'):
print(str(j) + ') ' + sortedDF['Tweets'][i])
print()
j = j+1
# + colab={"base_uri": "https://localhost:8080/", "height": 422} id="zrndRDfUqsTh" outputId="7728a04b-f2b4-4d31-ef2c-64b32c4872f8"
#plot Polarity and Subjectivity
plt.figure(figsize=(8,6))
for i in range(0, df.shape[0]):
plt.scatter(df['Polarity'][i], df['Subjectivity'][i], color='Blue')
plt.title('Analisis Sentimen')
plt.xlabel('Polarity')
plt.ylabel('Subjectivity')
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="ksEkMSkIryfp" outputId="65e0b29f-1cca-4028-cd96-5293188b90c0"
#Get the Precentage of Positive tweets
ptweets = df[df.Analysis == 'Positive']
ptweets = ptweets['Tweets']
round((ptweets.shape[0] / df.shape[0]) * 100, 1)
# + colab={"base_uri": "https://localhost:8080/"} id="AUV7PkvNs9N_" outputId="199ba330-6109-4e2d-bcba-4e0a2aba072a"
#Get the Precentage of Negative tweets
ntweets = df[df.Analysis == 'Negative']
ntweets = ntweets['Tweets']
round((ntweets.shape[0] / df.shape[0] * 100), 1)
# + colab={"base_uri": "https://localhost:8080/", "height": 800} id="CxQRW3pHtHng" outputId="32c68b64-fa80-4346-bebf-894e2ba3397b"
#Show the Value Counts
df['Analysis'].value_counts()
#plot and visualize the counts
plt.title('Sentiment Analysis')
plt.xlabel('Sentiment')
plt.ylabel('Counts')
df['Analysis'].value_counts().plot(kind='bar')
plt.show()
# + [markdown] id="lN7DFAKuv_8y"
# Dari Hasil Kesimulan terlihat dari 100 tweet berbahasa indonesia Mayoritas Netral terhadap Vaksin Sinovac dan pemerintah
# + id="AgfzgI87t1fE"
| Vaksin_Sinovac_TextBlob_Sentiment_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Advanced Lane Finding Project
#
# The goals / steps of this project are the following:
#
# * Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
# * Apply a distortion correction to raw images.
# * Use color transforms, gradients, etc., to create a thresholded binary image.
# * Apply a perspective transform to rectify binary image ("birds-eye view").
# * Detect lane pixels and fit to find the lane boundary.
# * Determine the curvature of the lane and vehicle position with respect to center.
# * Warp the detected lane boundaries back onto the original image.
# * Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
# +
# imports
from collections import deque
import warnings
import numpy as np
import cv2
import glob
import os
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# %matplotlib inline
from calibration import Calibration
from utils import *
from preprocessing import *
# -
# sample images
test_images = glob.glob('test_images/*.jpg')
video_images = glob.glob('test_images/video_samples/*.jpg')
project_images = glob.glob('test_images/video_samples/project*.jpg')
challange_images = glob.glob('test_images/video_samples/challange*.jpg')
harder_challange_images = glob.glob('test_images/video_samples/harder_challange*.jpg')
cal = Calibration(nx=9, ny=6)
cal.compute_cal('camera_cal/calibration*.jpg')
M, M_inv = get_prespective_transform(ROI_SRC, ROI_DST)
def process_img(img):
# undistort
dst = cal.undistort(img)
# color threshold
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
hls_mask = or_masks(color_threshold_image3(hls, HLS_THRESHOLD[0]),
color_threshold_image3(hls, HLS_THRESHOLD[1]))
# gradient threshold
red_channel = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)[:,:,2]
gradient_mask = gradient_threshold(red_channel, kernel_size=GRADIENT_KERNEL_SIZE,
thresholds=GRADIENT_THRESHOLD)
# combine thresholds
mask = or_masks(hls_mask, gradient_mask)
# roi
mask = roi(mask, ROI_SRC)
# warp image
warped = warp_image(mask, M)
return warped
plt.imshow(process_img(mpimg.imread(test_images[0])))
# ## Lane detection
# +
def find_lane_pixels(img, nwindows=9, margin=150, minpix=50):
'''
Searches mask image for lanes
'''
# Create histogram of image binary activations
histogram = np.sum(img[img.shape[0]//2:,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows - based on nwindows above and image shape
window_height = np.int(img.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - window*window_height
# Find the four below boundaries of the window
win_xleft_low = leftx_current-margin
win_xleft_high = leftx_current+margin
win_xright_low = rightx_current-margin
win_xright_high = rightx_current+margin
#Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window
# (`right` or `leftx_current`) on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return (leftx, lefty), (rightx, righty)
def search_around_poly(mask, left_fit, right_fit, margin=100):
'''
search for lane around a poly line
'''
# Grab activated pixels
nonzero = mask.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Set the area of search based on activated x-values
# within the +/- margin of our polynomial function ###
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return (leftx, lefty), (rightx, righty)
def fit_polynomial(x, y):
'''
fit line to pixels
'''
# Fit a second order polynomial to each using `np.polyfit`
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
return np.polyfit(y, x, 2)
except np.RankWarning:
return None
# +
# test lane points
img = mpimg.imread(test_images[0])
mask = process_img(img)
left_points, right_points = find_lane_pixels(mask)
left_lane = fit_polynomial(left_points[0], left_points[1])
right_lane = fit_polynomial(right_points[0], right_points[1])
plt.imshow(plot_lane_lines(mask, left_points, left_lane, right_points, right_lane))
# -
lane_image = draw_lanes(img, left_lane, right_lane, M_inv)
# Combine the result with the original image
result = cv2.addWeighted(img, 1, lane_image, 0.3, 0)
plt.imshow(result)
# ## Smoothing lanes
# +
class LaneHistory:
def __init__(self, history_steps=25, error_limit=100):
self.lines = deque(maxlen=history_steps)
self.smoothed_line = None
self.diff = []
self.error_limit = error_limit
def append(self, line):
if line is None:
return False
if self.smoothed_line is not None:
if self.error(line) > self.error_limit:
# bad line don't use
return False
self.lines.append(line)
self.compute_average()
return True
def compute_average(self):
if len(self.lines) == 0:
return
else:
self.smoothed_line = np.mean(self.lines, axis=0)
def get_line(self):
return self.smoothed_line
def error(self, line):
return np.absolute(self.norm(line) - self.norm(self.smoothed_line))
def norm(self, x):
return np.sqrt(x[0]**2+x[1]**2+x[2]**2)
def measure_curvature(y, line_eq):
'''
Calculates the curvature of polynomial functions.
'''
# calculation of R_curve (radius of curvature)
return ((1 + (2*y*line_eq[0]+line_eq[1])**2)**(3/2))/np.absolute(2*line_eq[0])
def cvt_line_to_meters(line_eq, xm, ym):
'''
cvt line from pixels to meters
'''
new_line = np.copy(line_eq)
new_line[0] *= xm/(ym**2)
new_line[1] *= xm/ym
return new_line
def eval_point_at_line(y, line):
return line[0]*(y**2) + line[1]*y + line[2]
def offset(y, width, xm, left_line, right_line):
left_x = eval_point_at_line(y, left_line)
right_x = eval_point_at_line(y, right_line)
return (((left_x+right_x)/2)-(width/2))*xm
# -
# Test
# +
# pixels to meters
xm = 3.7/800
ym = 32/720
left_lane = LaneHistory(3)
right_lane = LaneHistory(3)
MIN_POINTS = 4000
for fname in video_images:
img = mpimg.imread(fname)
mask = process_img(img)
if left_lane.get_line() is not None and right_lane.get_line() is not None:
# try search near prev margin
left_points, right_points = search_around_poly(mask, left_lane.get_line(), right_lane.get_line())
if len(left_points[0]) < MIN_POINTS or len(right_points[1]) < MIN_POINTS:
left_points, right_points = find_lane_pixels(mask)
else:
left_points, right_points = find_lane_pixels(mask)
print('Full search')
left_fit = fit_polynomial(left_points[0], left_points[1])
left_lane.append(left_fit)
right_fit = fit_polynomial(right_points[0], right_points[1])
right_lane.append(right_fit)
# measure
print('Offset: {}'.format(offset(720, 1280, xm, left_lane.get_line(), right_lane.get_line())))
left_c = measure_curvature(720*ym, cvt_line_to_meters(left_lane.get_line(), xm, ym))
right_c = measure_curvature(720*ym, cvt_line_to_meters(right_lane.get_line(), xm, ym))
print('Curvature left: {} right:{}'.format(left_c, right_c))
plt.imshow(plot_lane_lines(mask, left_points, left_fit, right_points, right_fit, left_lane.get_line(), right_lane.get_line()))
plt.show()
# -
# ## Complete algorithm
# +
class LaneDetector():
def __init__(self, xm=3.7/800, ym=32/720, history=10, min_points=4000):
self.xm = xm
self.ym = ym
self.width = None
self.height = None
self.min_points = min_points
self.cal = Calibration(nx=9, ny=6)
cal.compute_cal('camera_cal/calibration*.jpg')
self.M, self.M_inv = get_prespective_transform(ROI_SRC, ROI_DST)
self.left_lane = LaneHistory(history)
self.right_lane = LaneHistory(history)
def process(self, img, debug=True):
if self.width is None or self.height is None:
self.height = img.shape[0]
self.width = img.shape[1]
mask = process_img(img)
# find lanes
if self.left_lane.get_line() is not None and self.right_lane.get_line() is not None:
# try search near prev margin
left_points, right_points = search_around_poly(mask,
self.left_lane.get_line(),
self.right_lane.get_line())
if len(left_points[0]) < self.min_points or len(right_points[1]) < self.min_points:
left_points, right_points = find_lane_pixels(mask)
else:
left_points, right_points = find_lane_pixels(mask)
left_fit = fit_polynomial(left_points[0], left_points[1])
self.left_lane.append(left_fit)
right_fit = fit_polynomial(right_points[0], right_points[1])
self.right_lane.append(right_fit)
# measure
offset_m = offset(self.height, self.width, self.xm,
self.left_lane.get_line(), self.right_lane.get_line())
left_c = measure_curvature(self.height*self.ym,
cvt_line_to_meters(self.left_lane.get_line(), self.xm, self.ym))
right_c = measure_curvature(self.height*self.ym,
cvt_line_to_meters(self.right_lane.get_line(), self.xm, self.ym))
# draw lane image
lane_image = draw_lanes(img, self.left_lane.get_line(), self.right_lane.get_line(), self.M_inv)
result = cv2.addWeighted(img, 1, lane_image, 0.3, 0)
cv2.putText(result, self.get_text_string(offset_m, (left_c, right_c)),
(350,70), cv2.FONT_HERSHEY_SIMPLEX, 1,(255,255,255),2,cv2.LINE_AA)
if debug:
debug_img = plot_lane_lines(mask, left_points, left_fit, right_points, right_fit,
self.left_lane.get_line(), self.right_lane.get_line())
result = embed_image(result, debug_img)
return result
def get_text_string(self, offset_m, curvature_m):
return 'Curvature L:{:.1f}m R:{:.1f}m | Offset:{:.2f}m {}'.format(
curvature_m[0], curvature_m[1], abs(offset_m),
'left' if offset_m >= 0 else 'right')
l = LaneDetector()
plt.imshow(l.process(img))
# -
# ## Process project video
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
detector = LaneDetector()
clip = VideoFileClip("test_videos/project_video.mp4")
output = clip.fl_image(detector.process)
output.write_videofile('output_videos/output.mp4', audio=False)
HTML("""
<video width="960" height="540" controls>
<source src="{0}">
</video>
""".format('output_videos/output.mp4'))
| Advance Lane Finding.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table>
# <tr>
# <td>Auhor:</td>
# <td><NAME> </td>
# </tr>
# <tr>
# <td>Purpose:</td>
# <td>Demonstrate some of the basic conversion and tools in toolbox_circuits <br>
# These are just basic utility functions
# </td>
# </tr>
# <td>File Status:</td>
# <td>In construction </td>
# </tr>
# </table>
# These conversions are quite basic, so I decided to not use an external package, but just manaully handle them.
#
# For all the calculations anyhow, we will only work in reduced units of MHz for energies (or GHz if need be) and nH and fF for ind and cap, resp.
# %load_ext autoreload
# %autoreload 2
# # Conversions
# ##### Elementary units
import pyEPR.toolbox_circuits
from pyEPR.toolbox_circuits import Convert
print("Convert.toSI(1,'nH') = ", Convert.toSI(1,'nH'), "H")
print("Convert.fromSI(1.0,'nH') = ", Convert.fromSI(1.0,'nH'), "nH")
print("Identity: ", Convert.toSI(Convert.fromSI(1.0,'nH'),'nH'))
# ##### Josephson Junction Parameters
# +
from IPython.display import Latex
Lj = 10
display(Latex(r"$E_J = %.2f \text{ GHz} \qquad \text{for } L_J=%.2f\text{ nH}$" % (\
Convert.Ej_from_Lj(Lj, 'nH', "GHz"),Lj)))
print('\nConvert back %.2f nH' % Convert.Lj_from_Ej(16.35E3, 'MHz', 'nH'),'\n')
display(Latex(r"$E_C = %.2f \text{ MHz} \qquad \text{for } C_J=%.2f\text{ fF}$" % (\
Convert.Ec_from_Cs(65., 'fF', "MHz"),65.)))
display( 'Convert back:',Latex(r"$C_J = %.2f \text{ fF} \qquad \text{for } E_C=%.2f\text{ MHz}$" % (\
Convert.Cs_from_Ec(300, 'MHz', "fF"),300)))
# -
# ###### Critical current
print(Convert.Ic_from_Lj(10))
Convert.Lj_from_Ic(32)
# ##### Convinience units
from pyEPR.toolbox_circuits import π, pi, ϕ0, fluxQ, Planck, ħ, hbar, elementary_charge, e_el
print("Test EJ raw calculation = %.2f"%( ϕ0**2 / (10E-9 * Planck) *1E-9 ) ,'GHz')
# ##### Transmon
# Linear harmonic oscillator approximation of transmon.<br>
# Convinince func
Convert.transmon_print_all_params(13, 65);
# and raw
Convert.transmon_get_all_params(Convert.Ej_from_Lj(13, 'nH', 'MHz'), Convert.Ec_from_Cs(65, 'fF', 'MHz'))
| _tutorial_notebooks/Tutorial 3. toolbox_circuits.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Project 2: Predicting Catalog Demand
#
# **The Business Problem** <br/>
# You recently started working for a company that manufactures and sells high-end home goods. Last year the company sent out its first print catalog, and is preparing to send out this year's catalog in the coming months. The company has 250 new customers from their mailing list that they want to send the catalog to.
#
# Your manager has been asked to determine how much profit the company can expect from sending a catalog to these customers. You, the business analyst, are assigned to help your manager run the numbers. While fairly knowledgeable about data analysis, your manager is not very familiar with predictive models.
#
# You’ve been asked to predict the expected profit from these 250 new customers. Management does not want to send the catalog out to these new customers unless the expected profit contribution exceeds $10,000.
#
# **Details**
#
# <ol>
# <li>The costs of printing and distributing is USD6.50 per catalog. </li>
# <li>The average gross margin (price - cost) on all products sold through the catalog is 50%. </li>
# <li>Make sure to multiply your revenue by the gross margin first before you subtract out the $6.50 cost when calculating your profit. </li>
# <li>Write a short report with your recommendations outlining your reasons why the company should go with your recommendations to your manager. </li>
# </ol>
#
# ## Step 1: Business and Data Understanding
#
# 1. What decisions needs to be made?
#
# Sales manager need to answer the question of whether to send the catalog can be sent to 250 customers or not, based on the expected profit.
#
# 2. What data is needed to inform those decisions?
#
# The previous sales data, which is from `p1-customers.xlsx`.The data is as below.
# ```
# Customer_Segment
# Customer_ID
# Address
# City
# State
# ZIP
# Avg_Sale_Amount
# Store_Number
# Responded_to_Last_Catalog
# Avg_Num_Products_Purchased
# #_Years_as_Customer
# ```
# So, we need to find which column (features) that actually contributed to the `Avg_Sale_Amount`. Then, we need to calculate the profit to ensure that we can get more than USD 10,000 when send the catalogs to 250 customers.
#
# +
# import necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# %matplotlib inline
# -
df = pd.read_excel('p1-customers.xlsx',index_col = 0)
df.head()
df.describe()
df.info()
# ## Step 2: Analysis, Modeling, and Validation
#
# 1. How and why did you select the predictor variables in your model? You must explain how your continuous predictor variables you’ve chosen have a linear relationship with the target variable. Please refer back to the “Multiple Linear Regression with Excel” lesson to help you explore your data and use scatterplots to search for linear relationships. You must include scatterplots in your answer.
#
# Please refer to section `Features Engineering` for this answer.
#
# 2. Explain why you believe your linear model is a good model. You must justify your reasoning using the statistical results that your regression model created. For each variable you selected, please justify how each variable is a good fit for your model by using the p-values and R-squared values that your model produced.
#
# Please refer to the `Modeling` section for this answer. The p-value is less than 0.05 and adjusted R-square is 0.837. Thus, our model is believed to be a good fit.
#
# 3. What is the best linear regression equation based on the available data? Each coefficient should have no more than 2 digits after the decimal (ex: 1.28)
#
# $ AvgSaleAmount = 154.11 + 66.98 \times AvgNumProductsPurchased + 149.36 \times CustomerCreditCardOnly + 431.19 \times CustomerLoyaltyClubCreditCard - 96.06 \times CustomerStoreMailingList $
# ### Data Cleaning
#
# Applying the One Hot Encoding
df['Customer_Segment'].unique()
df["Responded_to_Last_Catalog"].unique()
df["Responded_to_Last_Catalog"].replace({"Yes": 1, "No": 0}, inplace=True)
df_Customer_Segment = pd.get_dummies(df['Customer_Segment'])
df_Customer_Segment.head()
df_Customer_Segment['Credit Card Only'].unique()
df_Customer_Segment = df_Customer_Segment.rename(columns={
"Credit Card Only" : "Customer_CreditCardOnly",
"Loyalty Club Only": "Customer_LoyaltyClubOnly",
"Loyalty Club and Credit Card": "Customer_LoyaltyClubCreditCard",
"Store Mailing List": "Customer_StoreMailingList"
})
df1 = pd.concat([df, df_Customer_Segment], axis = 1)
df1.head()
df1 = df1.drop(columns=['Customer_Segment'])
df1['City'].unique()
df1['State'].unique()
# ### Features Engineering
plt.figure(figsize=(12,12))
sns.heatmap(df1.corr(),annot=True,cmap='cubehelix_r')
plt.show()
corr = df1.corr()
#Correlation with output variable
cor_target = abs(corr["Avg_Sale_Amount"])
#Selecting highly correlated features
relevant_features = cor_target[cor_target>=0.1]
df_corr = pd.DataFrame(relevant_features)
df_corr = df_corr.reset_index()
df_corr = df_corr.rename(columns = {'Avg_Sale_Amount' : 'Corr'})
df_corr = df_corr.sort_values(['Corr'], ascending=False)
df_corr
# Only the above features are selected to train the model.
#
# ### Data Visualization
plt.figure(figsize=(8,6))
sns.scatterplot(data = df1, x = 'Avg_Num_Products_Purchased', y = 'Avg_Sale_Amount', hue = '#_Years_as_Customer')
plt.savefig("Years", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)
plt.figure(figsize=(8,6))
sns.scatterplot(data = df, x = 'Avg_Num_Products_Purchased', y = 'Avg_Sale_Amount', hue = 'Customer_Segment')
plt.savefig("CustomerSegment", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)
plt.figure(figsize=(8, 6))
ax = plt.scatter(df1['Avg_Num_Products_Purchased'], df1['Avg_Sale_Amount'])
plt.title("Scatterplot of Average Number of Products Purchased versus Average Sale Amount")
plt.xlabel("Avg_Num_Products_Purchased")
plt.ylabel("Average_Sale_Amount")
plt.savefig("AvgSales", dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)
plt.show()
df1.info()
df1 = df1.drop(['Customer_ID', 'Address', 'City', 'State', 'ZIP',
'Store_Number', '#_Years_as_Customer', 'Responded_to_Last_Catalog', 'Customer_LoyaltyClubOnly'], axis = 1)
df1.info()
X = df1.drop(columns=['Avg_Sale_Amount'])
X = sm.add_constant(X)
y = df1['Avg_Sale_Amount']
# ### Modeling
#
# Linear regression model
model = sm.OLS(y, X)
fit = model.fit()
print(fit.summary())
random_state = 1
X_train, X_test, y_train,y_test = train_test_split(X,y, test_size = 0.2,random_state=random_state)
reg = LinearRegression()
reg.fit(X_train, y_train)
print("The y-intercept coefficent is: ",reg.intercept_)
# ## Step 3: Presentation/Visualization
#
# 1. What is your recommendation? Should the company send the catalog to these 250 customers?
#
# Yes, I will recommend to send the catalog to these 250 customers
#
# 2. How did you come up with your recommendation? (Please explain your process so reviewers can give you feedback on your process)
#
# Please refer to the `Profit Calculation` section.
# The profit is calculated as
# $ totalProfit = totalRevenue \times 0.5 - 6.5 \times 250 $ <br/>
# 0.5 of the gross margin and the 250 customers
#
# 3. What is the expected profit from the new catalog (assuming the catalog is sent to these 250 customers)?
#
# USD 21851.15
# ## Mailing List dataframe
df_m = pd.read_excel('p1-mailinglist.xlsx')
df_m.head()
df_m2 = df_m[['Customer_Segment', 'Avg_Num_Products_Purchased']]
df_m2.Customer_Segment.unique()
segment = pd.get_dummies(df_m2['Customer_Segment'])
df_m2 = pd.concat([df_m2, segment], axis = 1)
df_m2 = df_m2.drop(['Customer_Segment', 'Loyalty Club Only'], axis = 1)
df_m2.head()
# ### Profit calculation
df_m2 = sm.add_constant(df_m2)
y_hat = reg.predict(df_m2)
# +
result = pd.DataFrame(y_hat, columns = ['Avg_Sale_Amount'])
#Score_yes is the probability that the customer buys profuct when they recieve catalog.
result = result.join(df_m[['Score_Yes']].apply(pd.to_numeric))
# -
def profit_calculation(i):
calc = (i['Avg_Sale_Amount'] * i['Score_Yes'] * 0.50) - 6.5
return calc
result['Profit'] = result.apply (profit_calculation, axis=1)
result.head()
totalProfit=result['Profit'].sum()
if (totalProfit >= 1000):
print('The total profit is:', totalProfit, '.Therefore we can suggest the send the catalogs to the 250 new customers.')
else:
print('The total profit is:', totalProfit, '.We do not suggest to send the catalog to the 250 new customers.')
| project-2/Predicting-catalog-demand.ipynb |