code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: summer2020
# language: python
# name: summer2020
# ---
import torch
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2LMHeadModel.from_pretrained('gpt2')
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# # Reference
prompt_text = "a robot must obey the orders given"
encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False, return_tensors="pt")
encoded_prompt
with torch.no_grad():
prediction_scores, past = model(encoded_prompt)
next_token_logits = prediction_scores[0, -1, :]
next_token_logits.argmax()
[tok.replace('Ġ', ' ') for tok in tokenizer.convert_ids_to_tokens(next_token_logits.topk(10).indices)]
# # Peel off LM head
type(model.transformer)
last_hidden_states = model.transformer(encoded_prompt)[0]
last_hidden_states.shape
prediction_scores = model.lm_head(last_hidden_states)
next_token_logits = prediction_scores[0, -1, :]
next_token_logits.argmax()
# # Delve into Transformer
xformer = model.transformer
word_to_embedding = xformer.wte
word_to_embedding.weight.shape
first_word_idx = encoded_prompt[0, 0]
first_word_embedding = word_to_embedding.weight[first_word_idx]
first_word_embedding.shape
# ## Here's the `forward` code, simplified
xformer.config
input_ids = encoded_prompt
batch_size, seq_len = input_ids.size()
assert batch_size == 1
xformer_layers = xformer.h
len(xformer_layers)
xformer.config.n_layer
device = input_ids.device
position_ids = torch.arange(0, seq_len, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0)
position_ids
# Embed the inputs
inputs_embeds = xformer.wte(input_ids)
position_embeds = xformer.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
hidden_states = xformer.drop(hidden_states)
batch_size, seq_len, n_hidden = hidden_states.shape
hidden_states.shape
inputs_embeds.shape
input_ids.shape
output_shape = (1, seq_len, n_hidden)
output_shape
for block in xformer_layers:
outputs = block(
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
use_cache=False,
output_attentions=False,
)
hidden_states = outputs[0]
layer_normalizer = xformer.ln_f
hidden_states = layer_normalizer(hidden_states)
hidden_states = hidden_states.view(*output_shape)
hidden_states.shape
last_hidden_states = hidden_states
last_hidden_states.shape
# ## now pass that on to the LM head
prediction_scores = model.lm_head(last_hidden_states)
next_token_logits = prediction_scores[0, -1, :]
next_token_logits.argmax()
# yay, same output!
# # Delve into a block
input_ids = encoded_prompt
batch_size, seq_len = input_ids.size()
assert batch_size == 1
device = input_ids.device
position_ids = torch.arange(0, seq_len, dtype=torch.long, device=device).unsqueeze(0)
position_ids
# Embed the inputs
inputs_embeds = xformer.wte(input_ids)
position_embeds = xformer.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
hidden_states = xformer.drop(hidden_states)
batch_size, seq_len, n_hidden = hidden_states.shape
hidden_states.shape
for block in xformer_layers:
layer_input = hidden_states
output_attn = block.attn(
block.ln_1(layer_input),
layer_past=None,
attention_mask=None,
head_mask=None,
use_cache=False,
output_attentions=False,
)
a = output_attn[0] # output_attn: a, present, (attentions)
x = layer_input + a
m = block.mlp(block.ln_2(x))
hidden_states = x + m
layer_normalizer = xformer.ln_f
hidden_states = layer_normalizer(hidden_states)
hidden_states = hidden_states.view(*output_shape)
hidden_states.shape
last_hidden_states = hidden_states
last_hidden_states.shape
prediction_scores = model.lm_head(last_hidden_states)
next_token_logits = prediction_scores[0, -1, :]
next_token_logits.argmax()
|
demo_transformer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
CELL_SIZE = 20
NCLASSES = 10
TRAIN_RATIO = 0.8
digits_img = cv2.imread('../data/digits.png', 0)
digits = [np.hsplit(r, digits_img.shape[1] // CELL_SIZE)
for r in np.vsplit(digits_img, digits_img.shape[0] // CELL_SIZE)]
digits = np.array(digits).reshape(-1, CELL_SIZE, CELL_SIZE)
nsamples = digits.shape[0]
labels = np.repeat(np.arange(NCLASSES), nsamples // NCLASSES)
for i in range(nsamples):
m = cv2.moments(digits[i])
if m['mu02'] > 1e-3:
s = m['mu11'] / m['mu02']
M = np.float32([[1, -s, 0.5*CELL_SIZE*s],
[0, 1, 0]])
digits[i] = cv2.warpAffine(digits[i], M, (CELL_SZ, CELL_SZ))
perm = np.random.permutation(nsamples)
digits = digits[perm]
labels = labels[perm]
ntrain = int(TRAIN_RATIO * nsamples)
ntest = nsamples - ntrain
def calc_hog(digits):
win_size = (20, 20)
block_size = (10, 10)
block_stride = (10, 10)
cell_size = (10, 10)
nbins = 9
hog = cv2.HOGDescriptor(win_size, block_size, block_stride, cell_size, nbins)
samples = []
for d in digits: samples.append(hog.compute(d))
return np.array(samples, np.float32)
fea_hog_train = calc_hog(digits[:ntrain])
fea_hog_test = calc_hog(digits[ntrain:])
labels_train, labels_test = labels[:ntrain], labels[ntrain:]
K = 3
knn_model = cv2.ml.KNearest_create()
knn_model.train(fea_hog_train, cv2.ml.ROW_SAMPLE, labels_train)
svm_model = cv2.ml.SVM_create()
svm_model.setGamma(2)
svm_model.setC(1)
svm_model.setKernel(cv2.ml.SVM_RBF)
svm_model.setType(cv2.ml.SVM_C_SVC)
svm_model.train(fea_hog_train, cv2.ml.ROW_SAMPLE, labels_train)
def eval_model(fea, labels, fpred):
pred = fpred(fea).astype(np.int32)
acc = (pred.T == labels).mean()*100
conf_mat = np.zeros((NCLASSES, NCLASSES), np.int32)
for c_gt, c_pred in zip(labels, pred):
conf_mat[c_gt, c_pred] += 1
return acc, conf_mat
knn_acc, knn_conf_mat = eval_model(fea_hog_test, labels_test, lambda fea: knn_model.findNearest(fea, K)[1])
print('KNN accuracy (%):', knn_acc)
print('KNN confusion matrix:')
print(knn_conf_mat)
svm_acc, svm_conf_mat = eval_model(fea_hog_test, labels_test, lambda fea: svm_model.predict(fea)[1])
print('SVM accuracy (%):', svm_acc)
print('SVM confusion matrix:')
print(svm_conf_mat)
|
Chapter04/10 Optical character recognition using different machine learning model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from src.datasets.cifar100 import CIFAR100
trainset = CIFAR100(root='/data/github/WeightNorm/data', train=True)
trainloader =
dataloader =
|
ipynb/Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # In the previous episode...
#
# ### quick overview of Classes
# ```python
# d = Dog('Fido')
# d.add_trick('roll over')
# d.tricks
# ```
#
# and re-implemented High-Low card game
# # .. and now
# # File system
# Interacting with the file system is one of the most important things to do. It is used in almost all applications as it can :
# - allow user to input large quantity of data at once
# - save temporary intermediate files
# - store final output files
# - keep a log of what has been done, useful for troubleshooting
# but first... let's download a silly little file from the internet
# Don't worry about the code in this cell for now, we'll get to this stuff a future lesson
import urllib.request
urllib.request.urlretrieve("https://raw.githubusercontent.com/gabrielecalvo/Language4Water/master/assets/cat_haiku.txt", 'cat_haiku.txt')
print("downloaded :)")
# ## Opening a file
# This can be achieved using the `open` function, which takes the **path to the file** as an input and returns a **file handle**
# ```python
# file_handle = open('myfile.txt')
# ```
# `open` also takes a second parameter **mode** which is the way in which we are asking to open the file, the most important for us now are:
# - `"r"`: which is the defaut and it stands for **read** mode (load data from existing file)
# - `"w"`: which stands for **write** mode (create a file)
fh = open('cat_haiku.txt')
fh
# to read the content, we can use the `.read()` method of the file handler
# ## Reading the file content
content = fh.read()
content
# those ugly `\n` are newline characters, if you use the `print` function, you can see it better:
print(content)
# ## Closing the file
# It is **important** to ***close*** the handler once you are done with the file.
# - If you opened it in read mode, others won't be able to edit it.
# - If you opened it in write mode, data might not be writtend to the file until you close it.
fh.close()
# ## Using the context manager
# To take all this headache away from you, it is ***strongly*** recommended to use the *context manager* (using the `with` keyword) which will handle the closing for you so you don't forget.
# +
with open('cat_haiku.txt') as fh:
content = fh.read()
print(content)
# -
# ## reading content as list of lines
# sometimes it is helpful to read the content as a list of lines, for example during pre-processing.
#
# The file handle can be used as a sequence of strings separated by *newline* characters `\n`, so we can iterate over it with a for loop to extract each line.
# +
clean_lines = []
with open('cat_haiku.txt') as fh:
for line in fh:
if not line.startswith("#") and line != "\n":
clean_lines.append(line)
clean_lines
# -
# ## writing a file
# All that changes when writing a file is the mode attribute (`w` instead of `r`) and the use of the `write` method instead of `read`
with open("new_shiny_file.txt", "w") as fh:
fh.write("hey, this is cool!!\n" * 20)
# ## warning about Windows file paths
# Paths are sparated by different characters on Windows `\` compared to Linux/MacOs `/`.
#
# When using paths in windows that contain `\`, they need to be prefix by `r` to avoid that character being used to create special characters (e.g. `\n`). So use:
#
# open(**r**".\new_shiny_file.txt")
# +
# this will work
with open(r".\new_shiny_file.txt") as f:
print(f.readlines()[0])
# this won't work
with open(".\new_shiny_file.txt") as f:
print(f.readlines()[0])
# -
# # Exercise: Ishmael Counter
# Count "Ishmael"s in Moby Dick and write the count to another file called `ishmael_counts.txt`
#
# For this lesson we'll need an example file. Let's use the book ["Moby Dick"](http://www.gutenberg.org/cache/epub/2701/pg2701.txt), let's download it.
#
# The actual text starts at line 536 and ends at 21743 but for this exercise it doesn't matter.
# Don't worry about the code in this cell for now, we'll get to this stuff a future lesson
import urllib.request
urllib.request.urlretrieve("https://raw.githubusercontent.com/egh/moby-dick/master/mobydick.txt", "moby_dick.txt")
print("downloaded :)")
# enter your solution here
...
# #### possible solution
# + tags=[]
with open("moby_dick.txt") as fh:
content = fh.read()
counter = 0
for word in content.split():
if "Ishmael" in word:
counter +=1
# or just `content.count("Ishmael")`
result = f"There are {counter} `Ishmael`s in Moby Dick"
with open("ishmael_counts.txt", "w") as fh:
fh.write(result)
# -
# # Exercise: Haiku Checker
# let's create a small program that will:
#
# - open the cat_haiku.txt file,
# - take the haiku part and clean up empty rows
# - check if it matches the following criteria:
# - 3 sentences
# - pattern 5, 7, 5 syllables
# To start, use the following `sentence_syllable_count` and `remove_punctuation` as given below, but, if you want a challenge you can try to implement them from scratch.
#
# ### sub-exercise: syllable counting
# try to implement it using the following rules ([1-3](https://personal.utdallas.edu/~pervin/Flesch.txt), [4](http://english.glendale.cc.ca.us/phonics.rules.html)):
# 1. Each group of adjacent vowels {a,e,i,o,u,y} counts as one syllable (for example, the "ea" in "real" contributes one syllable, but the "e..a" in "regal" count as two syllables).
# 2. An "e" at the end of a word doesn't count as a syllable.
# 3. Each word has at least one syllable, even if the previous rules give a count of 0.
# 4. The diphthongs are: "oi, oy, ou, ow, au, aw, oo" always count as 1 syllable.
# + tags=[]
def word_syllable_count(word):
"""simplified syllable counting, won't work every time: e.g. `vehicle`"""
word = word.lower()
vowels = "aeiouy"
diphthongs = "oi,oy,ou,ow,au,aw,oo".split(',')
syllables = 0
last_was_consonant = False
for diphthong in diphthongs:
word = word.replace(diphthong, "xox")
for char in word:
if (char in vowels) and last_was_consonant:
syllables += 1
last_was_consonant = False
else:
last_was_consonant = True
if word[-1] == 'e':
syllables -= 1
return max(1, syllables)
def sentence_syllable_count(sentence):
syllable_count = 0
for word in sentence.split():
syllable_count += word_syllable_count(word)
return syllable_count
def remove_punctuation(s):
to_remove = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~\n'
for i in to_remove:
s = s.replace(i, "")
return s
# + jupyter={"source_hidden": true} tags=[]
# tests
print(word_syllable_count(word="flower") == 2)
print(word_syllable_count(word="thought") == 1)
print(word_syllable_count(word="teacher") == 2)
print(word_syllable_count(word="broadcast") == 2)
print(word_syllable_count(word="dreamed") == 2)
print(word_syllable_count(word="face") == 1)
print(word_syllable_count(word="meow") == 2)
print(sentence_syllable_count(sentence="cat in a hat") == 4)
print(remove_punctuation("my.ha;i#ku\n") == "myhaiku")
print(remove_punctuation("my.ha;i#ku\n") == "myhaiku")
# +
with open("cat_haiku.txt") as fh:
lines = fh.readlines()
clean_lines = []
for item in lines:
if item.startswith("#") or item.startswith("\n"):
continue
clean_lines.append(item)
is_haiku = True
if len(clean_lines) != 3:
is_haiku = False
print("NOT AN HAIKU: it had != 3 lines")
if sentence_syllable_count(clean_lines[0]) != 5:
is_haiku = False
print("NOT AN HAIKU the first sentence does not have 5 sillables")
if sentence_syllable_count(clean_lines[1]) != 7:
is_haiku = False
print("NOT AN HAIKU the second sentence does not have 7 sillables")
if sentence_syllable_count(clean_lines[2]) != 5:
is_haiku = False
print("NOT AN HAIKU the third sentence does not have 5 sillables")
if is_haiku:
print("IT IS AN HAIKU")
# -
# ### Possible Alternative Solution
# + jupyter={"source_hidden": true} tags=[]
def load_valid_lines(filepath):
clean_lines = []
with open(filepath) as fh:
for line in fh.readlines():
if not line.startswith("#") and line != "\n":
clean_lines.append(line)
return clean_lines
def check_sentence(sentence, expected_count):
clean_sencence = remove_punctuation(sentence)
actual_count = sentence_syllable_count(clean_sencence)
if actual_count != expected_count:
print(f"The following sentence in the Haiku should have {expected_count} syllables but has {actual_count}:\n`{clean_sencence}`")
return False
return True
def is_haiku(filepath):
clean_lines = load_valid_lines(filepath)
print(''.join(clean_lines))
if len(clean_lines) != 3:
print(f"Haiku must have 3 sentences! This has {len(clean_lines)}")
return False
if (
check_sentence(sentence=clean_lines[0], expected_count=5) and
check_sentence(sentence=clean_lines[1], expected_count=7) and
check_sentence(sentence=clean_lines[2], expected_count=5)
):
return True
else:
return False
is_haiku(filepath='cat_haiku.txt')
# -
|
2020-21_semester2/07_Interacting_with_files.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Importing the ITU-Rpy module
import itur
# +
#Calling the high-level function
#Location: El Segundo Aerospace Campus
lat = 33.915335
lon = -118.38257
#Frequency: 20 GHz
freq = 20.0
#Elecation Angle: 45 Degrees
ele = 45.0
#Signal Availability: 99%
availability = 99
probability = 100 - availability
#Antenna Diameter: 3 m
D = 3.0
#Polarization Angle: 45 Degrees
tau = 45.0
Ag, Ac, Ar, As, At = itur.atmospheric_attenuation_slant_path(lat, lon, freq, ele, probability, D,
tau = tau, return_contributions = True)
# -
# It is important to note that the total atmospheric attenuation is not a simple sum of each of the attenuation components:
# 
#The Total Attenuation
At
#The Gaseous Attenuation Component
Ag
#The Cloud Attenuation Component
Ac
#The Rain Attenuation Component
Ar
#The Scintillation Attenuation Component
As
|
HighLevelFunctionDemo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:research]
# language: python
# name: conda-env-research-py
# ---
# # Import library
# +
import pandas as pd
import yaml
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib as mpl
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib import container
import seaborn as sns
import glob
import json
from matplotlib.colors import LogNorm
import copy
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from matplotlib.ticker import (AutoMinorLocator, MultipleLocator)
from matplotlib import rc
# +
from matplotlib.patches import PathPatch
def adjust_box_widths(ax, fac):
"""
Adjust the withs of a seaborn-generated boxplot.
"""
# iterating through Axes instances
#for ax in g.axes:
# iterating through axes artists:
for c in ax.get_children():
# searching for PathPatches
if isinstance(c, PathPatch):
# getting current width of box:
p = c.get_path()
verts = p.vertices
verts_sub = verts[:-1]
xmin = np.min(verts_sub[:, 0])
xmax = np.max(verts_sub[:, 0])
xmid = 0.5*(xmin+xmax)
xhalf = 0.5*(xmax - xmin)
# setting new width of box
xmin_new = xmid-fac*xhalf
xmax_new = xmid+fac*xhalf
verts_sub[verts_sub[:, 0] == xmin, 0] = xmin_new
verts_sub[verts_sub[:, 0] == xmax, 0] = xmax_new
# setting new width of median line
for l in ax.lines:
if np.all(l.get_xdata() == [xmin, xmax]):
l.set_xdata([xmin_new, xmax_new])
# +
def change_width(ax, new_value) :
for patch in ax.patches :
current_width = patch.get_width()
diff = current_width - new_value
patch.set_width(new_value)
patch.set_x(patch.get_x() + diff * .5)
def latexify(fig_width=None, fig_height=None, columns=1, largeFonts=False, font_scale=1):
"""Set up matplotlib's RC params for LaTeX plotting.
Call this before plotting a figure.
Parameters
----------
fig_width : float, optional, inches
fig_height : float, optional, inches
columns : {1, 2}
"""
# code adapted from http://www.scipy.org/Cookbook/Matplotlib/LaTeX_Examples
# Width and max height in inches for IEEE journals taken from
# computer.org/cms/Computer.org/Journal%20templates/transactions_art_guide.pdf
assert(columns in [1, 2])
if fig_width is None:
fig_width = 3.39 if columns == 1 else 6.9 # width in inches
if fig_height is None:
golden_mean = (np.sqrt(5) - 1.0) / 2.0 # Aesthetic ratio
fig_height = fig_width * golden_mean # height in inches
MAX_HEIGHT_INCHES = 28.0
if fig_height > MAX_HEIGHT_INCHES:
print("WARNING: fig_height too large:" + fig_height +
"so will reduce to" + MAX_HEIGHT_INCHES + "inches.")
fig_height = MAX_HEIGHT_INCHES
params = {'backend': 'ps',
'axes.labelsize': font_scale * 10 if largeFonts else font_scale * 7,
'axes.titlesize': font_scale * 10 if largeFonts else font_scale * 7,
'font.size': font_scale * 10 if largeFonts else font_scale * 7, # was 10
'legend.fontsize': font_scale * 10 if largeFonts else font_scale * 7, # was 10
'xtick.labelsize': font_scale * 10 if largeFonts else font_scale * 7,
'ytick.labelsize': font_scale * 10 if largeFonts else font_scale * 7,
'figure.figsize': [fig_width, fig_height],
'font.family': 'serif',
'xtick.minor.size': 0.5,
'xtick.major.pad': 1.5,
'xtick.major.size': 1,
'ytick.minor.size': 0.5,
'ytick.major.pad': 1.5,
'ytick.major.size': 1,
'lines.linewidth': 1.5,
'lines.markersize': 0.1,
'hatch.linewidth': 0.5
}
matplotlib.rcParams.update(params)
plt.rcParams.update(params)
import colorsys
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
def alter(alist, col, factor=1.1):
tmp = np.array(alist)
tmp[:,col] = tmp[:,col] * factor
tmp[tmp > 1] = 1
tmp[tmp < 0] = 0
new = []
for row in tmp.tolist():
new.append(tuple(row))
return new
def rgb2hls(alist):
alist = alist[:]
for i, row in enumerate(alist):
hls = colorsys.rgb_to_hls(row[0], row[1], row[2])
alist[i] = hls
return alist
def hls2rgb(alist):
alist = alist[:]
for i, row in enumerate(alist):
hls = colorsys.hls_to_rgb(row[0], row[1], row[2])
alist[i] = hls
return alist
def lighten(alist, increase=0.2):
factor = 1 + increase
hls = rgb2hls(alist)
new = alter(hls, 1, factor=factor)
rgb = hls2rgb(new)
return rgb
def darken(alist, decrease=0.2):
factor = 1 - decrease
hls = rgb2hls(alist)
new = alter(hls, 1, factor=factor)
rgb = hls2rgb(new)
return rgb
def saturate(alist, increase=0.2):
factor = 1 + increase
hls = rgb2hls(alist)
new = alter(hls, 2, factor=factor)
rgb = hls2rgb(new)
return rgb
def desaturate(alist, decrease=0.2):
factor = 1 - decrease
hls = rgb2hls(alist)
new = alter(hls, 2, factor=factor)
rgb = hls2rgb(new)
return rgb
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
import colorsys
def scale_lightness(rgb, scale_l):
# convert rgb to hls
h, l, s = colorsys.rgb_to_hls(*rgb)
# manipulate h, l, s values and return as rgb
return colorsys.hls_to_rgb(h, min(1, l * scale_l), s = s)
def scale(arr_rgb, scale_l):
return [scale_lightness(x, scale_l) for x in arr_rgb]
# -
"""Load configuration"""
with open("/home/trduong/Data/counterfactual_fairness_game_theoric/configuration.yml", 'r') as stream:
try:
conf = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
# # Law dataset
# ## Evaluation
df_evaluate = pd.read_csv(conf['result_evaluate_law'])
df_evaluate
# +
# method_name = {
# "full_prediction" : "Full model",
# "unaware_prediction" : "Unawareness model",
# "cf_prediction" : "Counterfactual Fairness model",
# "inv_prediction": "Invariant model"
# }
# +
# df['method'] = df['method'].map(method_name)
# -
# ## Read data
df_baseline = pd.read_csv(conf["result_law_baseline"])
df_ivr = pd.read_csv(conf["result_ivr_law"])
df_ivr
# ## Visualization
def plot_distribution(df, x_value, y_value, alpha, linewidth, ax):
sns.kdeplot(data=df, x=x_value, hue=y_value,fill=True,
common_norm=False, palette=palette,alpha=alpha, linewidth=linewidth,ax=ax)
# +
sns.set_context(rc={'lines.markeredgewidth': 0.1})
latexify(25,18, font_scale=3)
sns.plotting_context("poster", rc={"lines.linewidth": 40})
sns.set_style("darkgrid")
fig, axes = plt.subplots(nrows=3, ncols=2)
ax1, ax2, ax3, ax4, ax5, ax6 = axes[0,0], axes[0,1], axes[1,0], axes[1,1], axes[2,0], axes[2,1]
alpha = 0.4
linewidth = 3
palette = "magma"
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
plot_distribution(df_baseline, "full_prediction", "sex", alpha, linewidth, ax1)
plot_distribution(df_baseline, "full_prediction", "race", alpha, linewidth, ax2)
plot_distribution(df_baseline, "unaware_prediction", "sex", alpha, linewidth, ax3)
plot_distribution(df_baseline, "unaware_prediction", "race", alpha, linewidth, ax4)
plot_distribution(df_baseline, "cf_prediction", "sex", alpha, linewidth, ax5)
plot_distribution(df_baseline, "cf_prediction", "race", alpha, linewidth, ax6)
# -
df_baseline
df_ivr
# +
latexify(25,18, font_scale=3)
sns.set_context(rc={'lines.markeredgewidth': 0.1})
sns.plotting_context("poster", rc={"lines.linewidth": 40})
sns.set_style("darkgrid")
fig, axes = plt.subplots(nrows=3, ncols=2)
ax1, ax2, ax3, ax4, ax5, ax6 = axes[0,0], axes[0,1], axes[1,0], axes[1,1], axes[2,0], axes[2,1]
alpha = 0.4
linewidth = 3
palette = "magma"
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
plot_distribution(df_ivr, "AL_prediction", "sex", alpha, linewidth, ax1)
plot_distribution(df_ivr, "AL_prediction", "race", alpha, linewidth, ax2)
plot_distribution(df_ivr, "GL_prediction", "sex", alpha, linewidth, ax3)
plot_distribution(df_ivr, "GL_prediction", "race", alpha, linewidth, ax4)
plot_distribution(df_ivr, "GD_prediction", "sex", alpha, linewidth, ax5)
plot_distribution(df_ivr, "GD_prediction", "race", alpha, linewidth, ax6)
# -
from geomloss import SamplesLoss
import torch
backend = "auto"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
df_ivr.head()
# +
R1_ivr = df_ivr[df_ivr['race'] == 1]
R0_ivr = df_ivr[df_ivr['race'] == 0]
R1_baseline = df_baseline[df_baseline['race'] == 1]
R0_baseline = df_baseline[df_baseline['race'] == 0]
# +
ys = R1_ivr["GD_prediction"].values
ys_hat = R0_ivr["GD_prediction"].values
ys = torch.Tensor(ys).to(device).reshape(-1,1)
ys_hat = torch.Tensor(ys_hat).to(device).reshape(-1,1)
Loss = SamplesLoss("sinkhorn", p=2, blur=0.05, scaling=0.95, backend = backend)
print(Loss(ys, ys_hat).cpu().detach().numpy())
ys = R1_baseline["cf_prediction"].values
ys_hat = R0_baseline["cf_prediction"].values
ys = torch.Tensor(ys).to(device).reshape(-1,1)
ys_hat = torch.Tensor(ys_hat).to(device).reshape(-1,1)
Loss = SamplesLoss("sinkhorn", p=2, blur=0.05, scaling=0.95, backend = backend)
print(Loss(ys, ys_hat).cpu().detach().numpy())
# -
# # Adult dataset
adult_evaluate = pd.read_csv(conf['result_evaluate_adult'])
adult_evaluate
adult_baseline = pd.read_csv(conf['result_adult'])
adult_ivr = pd.read_csv(conf['result_ivr_adult'])
adult_baseline
adult_ivr
# +
from fairlearn.widget import FairlearnDashboard
FairlearnDashboard(y_true = adult_ivr['income'].values,
y_pred = adult_ivr['GD_prediction'].values,
sensitive_features = gender,
sensitive_feature_names = ["gender"])
# +
latexify(29,18, font_scale=3)
sns.set_context(rc={'lines.markeredgewidth': 0.1})
sns.plotting_context("poster", rc={"lines.linewidth": 40})
sns.set_style("darkgrid")
fig, axes = plt.subplots(nrows=2, ncols=2)
ax1, ax2, ax3, ax4 = axes[0,0], axes[0,1], axes[1,0], axes[1,1]
alpha = 0.4
linewidth = 3
palette = "magma"
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
plot_distribution(adult_baseline, "full_prediction_proba", "gender", alpha, linewidth, ax1)
plot_distribution(adult_baseline, "full_prediction_proba", "race", alpha, linewidth, ax2)
plot_distribution(adult_baseline, "unaware_prediction_proba", "gender", alpha, linewidth, ax3)
plot_distribution(adult_baseline, "unaware_prediction_proba", "race", alpha, linewidth, ax4)
# +
latexify(29,18, font_scale=3)
sns.set_context(rc={'lines.markeredgewidth': 0.1})
sns.plotting_context("poster", rc={"lines.linewidth": 40})
sns.set_style("darkgrid")
fig, axes = plt.subplots(nrows=3, ncols=2)
ax1, ax2, ax3, ax4, ax5, ax6 = axes[0,0], axes[0,1], axes[1,0], axes[1,1], axes[2,0], axes[2,1]
alpha = 0.4
linewidth = 3
palette = "magma"
axes = [ax1, ax2, ax3, ax4, ax5, ax6]
plot_distribution(adult_ivr, "AL_prediction_proba", "gender", alpha, linewidth, ax1)
plot_distribution(adult_ivr, "AL_prediction_proba", "race", alpha, linewidth, ax2)
plot_distribution(adult_ivr, "GL_prediction_proba", "gender", alpha, linewidth, ax3)
plot_distribution(adult_ivr, "GL_prediction_proba", "race", alpha, linewidth, ax4)
plot_distribution(adult_ivr, "GD_prediction_proba", "gender", alpha, linewidth, ax5)
plot_distribution(adult_ivr, "GD_prediction_proba", "race", alpha, linewidth, ax6)
# -
# # Compas
compas_evaluate = pd.read_csv(conf['result_evaluate_compas'])
compas_evaluate
compas_baseline = pd.read_csv(conf['result_compas'])
compas_ivr = pd.read_csv(conf['result_ivr_compas'])
compas_baseline
compas_ivr
from aif360.metrics import BinaryLabelDatasetMetric, ClassificationMetric
# +
privileged_race = np.where(categorical_names['Victim Race'] == 'White')[0]
unprivileged_race = np.where(categorical_names['Victim Race'] == 'Black')[0]
# I format variable like in the documentation of ClassificationMetric and BinaryLabelDatasetMetric
privileged_groups = [{'Victim Race' : privileged_race}]
unprivileged_groups = [{'Victim Race' : unprivileged_race}]
# I create both classes
classified_metric_race = ClassificationMetric(dataset,
dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
metric_pred_race = BinaryLabelDatasetMetric(dataset_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
|
notebooks/.ipynb_checkpoints/Exploration-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import numpy as np
import tensorflow as tf
import chess
from chesslab.training_tf import load_model
def encode(board,encoding):
b=str(board).replace(' ','').split('\n')
a=np.zeros([8,8,len(encoding['.'])])
for i,row in enumerate(b):
for j,val in enumerate(row):
a[i,j,:]=encoding[val]
return a
class Model_2(tf.Module):
def __init__(self,
n_classes=2):
initializer = tf.keras.initializers.GlorotNormal()
self.hw=[]
self.hb=[]
self.hw.append( tf.Variable(initializer(shape=(7,7,4,32),dtype=np.float32),name="hl1weigths",dtype="float32") )
self.hb.append( tf.Variable(np.zeros(32,dtype=np.float32),name="hl1bias",dtype="float32") )
#8x8x32
self.hw.append( tf.Variable(initializer(shape=(5,5,32,64),dtype=np.float32),name="hl2weigths",dtype="float32"))
self.hb.append( tf.Variable(np.zeros(64,dtype=np.float32),name="hl2bias",dtype="float32"))
#8x8x64
self.hw.append( tf.Variable(initializer(shape=(3,3,64,128),dtype=np.float32),name="hl3weigths",dtype="float32"))
self.hb.append( tf.Variable(np.zeros(128,dtype=np.float32),name="hl3bias",dtype="float32"))
#8x8x128
self.hw.append( tf.Variable(initializer(shape=(8*8*128,256),dtype=np.float32),name="hl4weigths",dtype="float32"))
self.hb.append( tf.Variable(np.zeros(256,dtype=np.float32),name="hl4bias",dtype="float32"))
self.hw.append( tf.Variable(initializer(shape=(256, n_classes),dtype=np.float32),name="outweigths",dtype="float32"))
self.hb.append( tf.Variable(np.zeros(n_classes,dtype=np.float32),name="outbias",dtype="float32"))
#self.trainable_variables = []
#for i in range(len(self.hw)):
# self.trainable_variables.append(self.hw[i])
# self.trainable_variables.append(self.hb[i])
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def __call__(self,x):
out = tf.cast(x, tf.float32)
out = tf.reshape(out, shape=[-1, 8, 8, 4])
out = tf.nn.conv2d(out,self.hw[0], strides=[1,1,1,1], padding='SAME')
out = tf.add(out, self.hb[0])
out = tf.nn.relu(out)
out = tf.nn.conv2d(out,self.hw[1], strides=[1,1,1,1], padding='SAME')
out = tf.add(out, self.hb[1])
out = tf.nn.relu(out)
out = tf.nn.conv2d(out,self.hw[2], strides=[1,1,1,1], padding='SAME')
out = tf.add(out, self.hb[2])
out = tf.nn.elu(out)
out = tf.reshape(out,[-1, 8*8*128])
out = tf.matmul(out,self.hw[3])
out = tf.add(out, self.hb[3])
out = tf.nn.relu(out)
out = tf.matmul(out,self.hw[4])
out = tf.add(out, self.hb[4])
return out
model = Model_2()
path_model = './tmp/tf_weights-relu.0.1.h5'
encoding,history=load_model(model,path_model)
board = chess.Board()
code = encode(board,encoding)
code2 = np.tile(code,(20,1,1,1))
code3 = np.tile(code,(35,1,1,1))
code4 = np.tile(code,(100,1,1,1))
code5 = np.tile(code,(400,1,1,1))
code6 = np.tile(code,(1000,1,1,1))
code7 = np.tile(code,(1225,1,1,1))
code8 = np.tile(code,(8000,1,1,1))
code9 = np.tile(code,(10000,1,1,1))
code10 = np.tile(code,(20000,1,1,1))
print(code.shape)
print(code2.shape)
# + tags=[]
model(code3)
# -
save_model_path = './tmp/saved_model/'
call_output = model.__call__.get_concrete_function(tf.TensorSpec(None, tf.float32))
tf.saved_model.save(model,save_model_path,signatures={'serving_default': call_output})
imported = tf.saved_model.load(save_model_path)
# + tags=[]
imported(code)
# -
imported(code2)
encoding
# + active=""
# board = chess.Board()
# board
# + active=""
# a = list(board.legal_moves)
# b=[]
# b.extend(a)
# b
# + active=""
# a=[]
# b
# -
np.concatenate((code2,code3)).shape
cuda = False
physical_devices = tf.config.list_physical_devices('GPU')
device = "/device:GPU:0" if cuda and len(physical_devices)>0 else "/cpu:0"
# %%timeit
with tf.device(device):
model(code)
# %%timeit
with tf.device(device):
model(code2)
# %%timeit
with tf.device(device):
model(code3)
# %%timeit
with tf.device(device):
model(code4)
# %%timeit
with tf.device(device):
model(code5)
# %%timeit
with tf.device(device):
model(code6)
# %%timeit
with tf.device(device):
model(code7)
# %%timeit
with tf.device(device):
model(code8)
# %%timeit
with tf.device(device):
model(code9)
# %%timeit
with tf.device(device):
model(code10)
import numpy as np
import chess
board = chess.Board()
# +
encoding={'.': np.array([0., 0., 0.]),
'p': np.array([0., 0., 1.]),
'P': np.array([ 0., 0., -1.]),
'b': np.array([0., 1., 0.]),
'B': np.array([ 0., -1., 0.]),
'n': np.array([1., 0., 0.]),
'N': np.array([-1., 0., 0.]),
'r': np.array([0., 1., 1.]),
'R': np.array([ 0., -1., -1.]),
'q': np.array([1., 0., 1.]),
'Q': np.array([-1., 0., -1.]),
'k': np.array([1., 1., 0.]),
'K': np.array([-1., -1., 0.])}
def encode(board,encoding):
b=str(board).replace(' ','').split('\n')
a=np.zeros([8,8,len(encoding['.'])])
for i,row in enumerate(b):
for j,val in enumerate(row):
a[i,j,:]=encoding[val]
return a
# -
# %%timeit
code = encode(board,encoding)
def prueba():
code = encode(board,encoding)
import time
def timer(function):
i = 10000
elapsed = 0
for _ in range(i):
start = time.time()
function()
elapsed += time.time()-start
elapsed/=i
return elapsed
timer(prueba)*1e6
8e-5 - 80e-6
# + jupyter={"source_hidden": true} tags=[]
import re, sys, time
from itertools import count
from collections import namedtuple
###############################################################################
# Piece-Square tables. Tune these to change sunfish's behaviour
###############################################################################
piece = { 'P': 100, 'N': 280, 'B': 320, 'R': 479, 'Q': 929, 'K': 60000 }
pst = {
'P': ( 0, 0, 0, 0, 0, 0, 0, 0,
78, 83, 86, 73, 102, 82, 85, 90,
7, 29, 21, 44, 40, 31, 44, 7,
-17, 16, -2, 15, 14, 0, 15, -13,
-26, 3, 10, 9, 6, 1, 0, -23,
-22, 9, 5, -11, -10, -2, 3, -19,
-31, 8, -7, -37, -36, -14, 3, -31,
0, 0, 0, 0, 0, 0, 0, 0),
'N': ( -66, -53, -75, -75, -10, -55, -58, -70,
-3, -6, 100, -36, 4, 62, -4, -14,
10, 67, 1, 74, 73, 27, 62, -2,
24, 24, 45, 37, 33, 41, 25, 17,
-1, 5, 31, 21, 22, 35, 2, 0,
-18, 10, 13, 22, 18, 15, 11, -14,
-23, -15, 2, 0, 2, 0, -23, -20,
-74, -23, -26, -24, -19, -35, -22, -69),
'B': ( -59, -78, -82, -76, -23,-107, -37, -50,
-11, 20, 35, -42, -39, 31, 2, -22,
-9, 39, -32, 41, 52, -10, 28, -14,
25, 17, 20, 34, 26, 25, 15, 10,
13, 10, 17, 23, 17, 16, 0, 7,
14, 25, 24, 15, 8, 25, 20, 15,
19, 20, 11, 6, 7, 6, 20, 16,
-7, 2, -15, -12, -14, -15, -10, -10),
'R': ( 35, 29, 33, 4, 37, 33, 56, 50,
55, 29, 56, 67, 55, 62, 34, 60,
19, 35, 28, 33, 45, 27, 25, 15,
0, 5, 16, 13, 18, -4, -9, -6,
-28, -35, -16, -21, -13, -29, -46, -30,
-42, -28, -42, -25, -25, -35, -26, -46,
-53, -38, -31, -26, -29, -43, -44, -53,
-30, -24, -18, 5, -2, -18, -31, -32),
'Q': ( 6, 1, -8,-104, 69, 24, 88, 26,
14, 32, 60, -10, 20, 76, 57, 24,
-2, 43, 32, 60, 72, 63, 43, 2,
1, -16, 22, 17, 25, 20, -13, -6,
-14, -15, -2, -5, -1, -10, -20, -22,
-30, -6, -13, -11, -16, -11, -16, -27,
-36, -18, 0, -19, -15, -15, -21, -38,
-39, -30, -31, -13, -31, -36, -34, -42),
'K': ( 4, 54, 47, -99, -99, 60, 83, -62,
-32, 10, 55, 56, 56, 55, 10, 3,
-62, 12, -57, 44, -67, 28, 37, -31,
-55, 50, 11, -4, -19, 13, 0, -49,
-55, -43, -52, -28, -51, -47, -8, -50,
-47, -42, -43, -79, -64, -32, -29, -32,
-4, 3, -14, -50, -57, -18, 13, 4,
17, 30, -3, -14, 6, -1, 40, 18),
}
# Pad tables and join piece and pst dictionaries
for k, table in pst.items():
padrow = lambda row: (0,) + tuple(x+piece[k] for x in row) + (0,)
pst[k] = sum((padrow(table[i*8:i*8+8]) for i in range(8)), ())
pst[k] = (0,)*20 + pst[k] + (0,)*20
###############################################################################
# Global constants
###############################################################################
# Our board is represented as a 120 character string. The padding allows for
# fast detection of moves that don't stay within the board.
A1, H1, A8, H8 = 91, 98, 21, 28
initial = (
' \n' # 0 - 9
' \n' # 10 - 19
' rnbqkbnr\n' # 20 - 29
' pppppppp\n' # 30 - 39
' ........\n' # 40 - 49
' ........\n' # 50 - 59
' ........\n' # 60 - 69
' ........\n' # 70 - 79
' PPPPPPPP\n' # 80 - 89
' RNBQKBNR\n' # 90 - 99
' \n' # 100 -109
' \n' # 110 -119
)
# Lists of possible moves for each piece type.
N, E, S, W = -10, 1, 10, -1
directions = {
'P': (N, N+N, N+W, N+E),
'N': (N+N+E, E+N+E, E+S+E, S+S+E, S+S+W, W+S+W, W+N+W, N+N+W),
'B': (N+E, S+E, S+W, N+W),
'R': (N, E, S, W),
'Q': (N, E, S, W, N+E, S+E, S+W, N+W),
'K': (N, E, S, W, N+E, S+E, S+W, N+W)
}
# Mate value must be greater than 8*queen + 2*(rook+knight+bishop)
# King value is set to twice this value such that if the opponent is
# 8 queens up, but we got the king, we still exceed MATE_VALUE.
# When a MATE is detected, we'll set the score to MATE_UPPER - plies to get there
# E.g. Mate in 3 will be MATE_UPPER - 6
MATE_LOWER = piece['K'] - 10*piece['Q']
MATE_UPPER = piece['K'] + 10*piece['Q']
# The table size is the maximum number of elements in the transposition table.
TABLE_SIZE = 1e7
# Constants for tuning search
QS_LIMIT = 219
EVAL_ROUGHNESS = 13
DRAW_TEST = True
###############################################################################
# Chess logic
###############################################################################
class Position(namedtuple('Position', 'board score wc bc ep kp')):
""" A state of a chess game
board -- a 120 char representation of the board
score -- the board evaluation
wc -- the castling rights, [west/queen side, east/king side]
bc -- the opponent castling rights, [west/king side, east/queen side]
ep - the en passant square
kp - the king passant square
"""
def gen_moves(self):
# For each of our pieces, iterate through each possible 'ray' of moves,
# as defined in the 'directions' map. The rays are broken e.g. by
# captures or immediately in case of pieces such as knights.
for i, p in enumerate(self.board):
if not p.isupper(): continue
for d in directions[p]:
for j in count(i+d, d):
q = self.board[j]
# Stay inside the board, and off friendly pieces
if q.isspace() or q.isupper(): break
# Pawn move, double move and capture
if p == 'P' and d in (N, N+N) and q != '.': break
if p == 'P' and d == N+N and (i < A1+N or self.board[i+N] != '.'): break
if p == 'P' and d in (N+W, N+E) and q == '.' \
and j not in (self.ep, self.kp, self.kp-1, self.kp+1): break
# Move it
yield (i, j)
# Stop crawlers from sliding, and sliding after captures
if p in 'PNK' or q.islower(): break
# Castling, by sliding the rook next to the king
if i == A1 and self.board[j+E] == 'K' and self.wc[0]: yield (j+E, j+W)
if i == H1 and self.board[j+W] == 'K' and self.wc[1]: yield (j+W, j+E)
def rotate(self):
''' Rotates the board, preserving enpassant '''
return Position(
self.board[::-1].swapcase(), -self.score, self.bc, self.wc,
119-self.ep if self.ep else 0,
119-self.kp if self.kp else 0)
def nullmove(self):
''' Like rotate, but clears ep and kp '''
return Position(
self.board[::-1].swapcase(), -self.score,
self.bc, self.wc, 0, 0)
def move(self, move):
i, j = move
p, q = self.board[i], self.board[j]
put = lambda board, i, p: board[:i] + p + board[i+1:]
# Copy variables and reset ep and kp
board = self.board
wc, bc, ep, kp = self.wc, self.bc, 0, 0
score = self.score + self.value(move)
# Actual move
board = put(board, j, board[i])
board = put(board, i, '.')
# Castling rights, we move the rook or capture the opponent's
if i == A1: wc = (False, wc[1])
if i == H1: wc = (wc[0], False)
if j == A8: bc = (bc[0], False)
if j == H8: bc = (False, bc[1])
# Castling
if p == 'K':
wc = (False, False)
if abs(j-i) == 2:
kp = (i+j)//2
board = put(board, A1 if j < i else H1, '.')
board = put(board, kp, 'R')
# Pawn promotion, double move and en passant capture
if p == 'P':
if A8 <= j <= H8:
board = put(board, j, 'Q')
if j - i == 2*N:
ep = i + N
if j == self.ep:
board = put(board, j+S, '.')
# We rotate the returned position, so it's ready for the next player
return Position(board, score, wc, bc, ep, kp).rotate()
def value(self, move):
i, j = move
p, q = self.board[i], self.board[j]
# Actual move
score = pst[p][j] - pst[p][i]
# Capture
if q.islower():
score += pst[q.upper()][119-j]
# Castling check detection
if abs(j-self.kp) < 2:
score += pst['K'][119-j]
# Castling
if p == 'K' and abs(i-j) == 2:
score += pst['R'][(i+j)//2]
score -= pst['R'][A1 if j < i else H1]
# Special pawn stuff
if p == 'P':
if A8 <= j <= H8:
score += pst['Q'][j] - pst['P'][j]
if j == self.ep:
score += pst['P'][119-(j+S)]
return score
# -
a = Position(initial, 0, (True,True), (True,True), 0, 0)
moves = a.gen_moves()
len(list(moves))
import chess
board = chess.Board()
print(bin(board.pawns))
print(bin(board.knights))
print(board.ep_square)
board._board_state()
print("{:064b}".format(board.pawns))
print("{:064b}".format(board.knights))
print("{:064b}".format(board.bishops))
print("{:064b}".format(board.rooks))
print("{:064b}".format(board.queens))
print("{:064b}".format(board.kings))
print("{:064b}".format(board.occupied_co[True]))
print("{:064b}".format(board.occupied_co[False]))
print("{:064b}".format(board.pawns & board.occupied_co[False]))
b=str(board).replace(' ','').split('\n')
b
board.piece_at(0).symbol()
SQUARES = chess.SQUARES_180
# + jupyter={"outputs_hidden": true} tags=[]
SQUARES
# +
def encode_2(board,encoding):
builder = []
a=np.zeros([64,len(encoding['.'])])
for i,square in enumerate(SQUARES):
piece = board.piece_at(square)
if piece:
a[i,:]=encoding[piece.symbol()]
return a.reshape([8,8,3])
encoding={'.': np.array([0., 0., 0.]),
'p': np.array([0., 0., 1.]),
'P': np.array([ 0., 0., -1.]),
'b': np.array([0., 1., 0.]),
'B': np.array([ 0., -1., 0.]),
'n': np.array([1., 0., 0.]),
'N': np.array([-1., 0., 0.]),
'r': np.array([0., 1., 1.]),
'R': np.array([ 0., -1., -1.]),
'q': np.array([1., 0., 1.]),
'Q': np.array([-1., 0., -1.]),
'k': np.array([1., 1., 0.]),
'K': np.array([-1., -1., 0.])}
# + tags=[]
np.sum( encode(board,encoding) - encode_2(board,encoding))
# -
# %%timeit
code = encode_2(board,encoding)
piece = board.piece_type_at(0)
bin(piece)
bin(100)
import chess
from chesslab.utils import perft
board = chess.Board("6B1/3p4/8/3P4/6p1/1rPnbk2/pBR4K/q3q3 b - - 3 63")
board
class obj:
def __init__(self):
self.mates=0
def perft(self,board, depth):
if depth == 0:
if board.is_checkmate():
self.mates+=1
return 1
n = 0
moves = list(board.legal_moves)
if len(moves)==0:
if board.is_checkmate():
self.mates+=1
n+=1
else:
for m in moves:
board.push(m)
n+=self.perft(board,depth-1);
board.pop()
return n
o = obj()
o.perft(board,3)
o.mates
board
board.push_uci("e1h4")
board
len(list(board.legal_moves))
|
examples/NN_speed.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
##### Back room #####
#### In this notebook, we use antevents to read in some previously
# captured lux (light level) sensor data. We use a sliding window
# transducer to smooth the data and then convert both the raw data and
# the smoothed data to Pandas "Series" data values. These can then be
# plotted directly in jupyter.
# Some initial setup
import pandas as pd
# needed to get the graphs to show up
# %matplotlib inline
import matplotlib.style
# To workaround a bug in pandas, see
# http://stackoverflow.com/questions/33995707/attributeerror-unknown-property-color-cycle
matplotlib.style.use('ggplot')
import matplotlib.pyplot as plt
print("pandas version=%s"% pd.__version__)
print("matplotlib version=%s" % matplotlib.__version__)
# -
# Make sure we can see the antevents package
import sys
import os.path
antpath=os.path.abspath('../../../antevents-python')
assert os.path.exists(antpath)
assert os.path.exists(os.path.join(antpath, 'antevents'))
if antpath not in sys.path:
sys.path.append(antpath)
import antevents.base
# +
# Some setup for our analysis
import asyncio
from pytz import timezone
from antevents.base import Filter,Publisher,Scheduler,SensorEvent,\
filtermethod
from antevents.adapters.csv import CsvReader
from antevents.adapters.pandas import PandasSeriesWriter
from antevents.linq.transducer import SensorSlidingMean
import antevents.linq.combinators
import antevents.linq.select
PDT=timezone('America/Los_Angeles')
scheduler = Scheduler(asyncio.get_event_loop())
# +
# Now, read in the spreadsheet of lux values and build the
# dataflow to process the data
import math
from lux_analysis import SensorSlidingMeanPassNaNs, fill_in_missing_times, CaptureNaNIndexes
DIR = '../data_files/'
ROOM = 'back-room'
reader = CsvReader(DIR+ROOM+'.csv')
# The raw writer captures the raw spreadsheet data
raw_series_writer = PandasSeriesWriter(tz=PDT)
# The smoothed writer captures the data that was passed through a
# sliding average transducer. We average over the last 5 samples.
smoothed_series_writer = PandasSeriesWriter(tz=PDT)
# We build and maintain a list of the NaN indexes, so that we can
# update them at will. This is needed because the clustering algorithm
# doesn't handle them
capture_nan_indexes = CaptureNaNIndexes()
# The smoothed data coming out of the sliding window mean is floating
# point. We round everything to the nearest integer, since that the
# accuracy we started with.
def round_event_val(x):
if not math.isnan(x.val):
return SensorEvent(ts=x.ts, sensor_id=x.sensor_id,
val=int(round(x.val)))
else:
return x
# Now we can put all the processing steps together
reader.fill_in_missing_times()\
.passthrough(raw_series_writer)\
.transduce(SensorSlidingMeanPassNaNs(5)).select(round_event_val).passthrough(smoothed_series_writer)\
.passthrough(capture_nan_indexes).output_count()
reader.print_downstream()
# run it!
scheduler.schedule_recurring(reader)
scheduler.run_forever()
print("Ran the stream")
# -
# graph the full raw series
raw_series_writer.result.plot(figsize=(15,10))
# Graph the last day of the raw data
raw_series_writer.result[:1440].plot(figsize=(15,10))
# graph the smoothed series
smoothed_series_writer.result.plot(figsize=(15,10))
# Graph the last day of the smoothed data
smoothed_series_writer.result[:1440].plot(figsize=(15,10))
# Now, lets try clustering the data
CLUSTER_LABELS = 4 # how many groups will we create
import numpy as np
from sklearn.cluster import KMeans
# create an array that just has the values, no timestamps
npa = np.array(smoothed_series_writer.result).reshape(-1,1)
capture_nan_indexes.replace_nans(npa, 0.0)
kmeans = KMeans(CLUSTER_LABELS)
kmeans.fit(npa)
# print the raw clustered data
print("kmeans labels: " + str(kmeans.labels_))
from numpy.core.numeric import NaN
labels_with_nans = capture_nan_indexes.new_array_replace_nans(kmeans.labels_, NaN)
labels_against_time = pd.Series(labels_with_nans, index=smoothed_series_writer.index)
labels_against_time.plot(figsize=(15,10))
import pylab
pylab.ylim([0,3.5]) # force the graph top a little higher so that we can see the 3.0 lines
# Unfortunately, the clustered data is creating arbitrary groups. We want the
# integer number to be proportional to the lux value so that the graphs are
# easier to interpret. To do this, we map 0 to the lowest lux level, 3 to
# the highest, etc. In cluster_mapping, the index is the cluster number and
# the value is the one we will graph.
label_totals = [0]*CLUSTER_LABELS
label_counts = [0]*CLUSTER_LABELS
for i in range(len(labels_with_nans)):
l = labels_with_nans[i]
if not math.isnan(l):
lux = smoothed_series_writer.result[i]
label_totals[l] += lux
label_counts[l] += 1
averages = [(label_totals[i]/label_counts[i], i) for i in range(CLUSTER_LABELS)]
averages.sort()
print("averages = %s" % averages)
def pivot(array):
result = [-1]*len(array)
for i in range(len(array)):
result[array[i]] = i
return result
cluster_mapping = pivot([i for (avg, i) in averages])
print("cluster_mapping = %s" % cluster_mapping)
kmeans_lux = [cluster_mapping[l] if not math.isnan(l) else NaN for l in labels_with_nans]
# Now, we can convert it back to a Pandas series by using our original timestamp
# array with the clustered data.
clusters_against_time = pd.Series(kmeans_lux, index=smoothed_series_writer.index)
clusters_against_time.plot(figsize=(15,10))
import pylab
pylab.ylim([0,3.5]) # force the graph top a little higher so that we can see the 3.0 lines
# Lets look at the last day of data to get a closer view
clusters_against_time[:1440].plot(figsize=(15,10))
pylab.ylim([0,3.5]) # force the graph top a little higher so that we can see the 3.0 lines
# +
# Now, lets map this to on-off values
# We pick an "on threshold" based on the values we see in the above graph,
# trying to account for ambiant light.
ON_THRESHOLD = 1
def on_off_mapping(v):
if math.isnan(v):
return NaN
elif v>=ON_THRESHOLD:
return 1
else:
return 0
on_off_series = pd.Series([on_off_mapping(l) for l in kmeans_lux], index=smoothed_series_writer.index)
on_off_series.plot(figsize=(15,10))
pylab.ylim([0,1.5])
# -
# Graph the last day of the data to get a closer view
on_off_series[:1440].plot(figsize=(15,10))
pylab.ylim([0,1.5])
# +
# Import our code that will analyze the data.
import numpy as np
from numpy import float64
import datetime
from lux_analysis import time_of_day_to_zone, dt_to_minutes, NUM_ZONES, get_sunrise_sunset
SUNRISE = 352 # time in minutes since midnight on July 2 (5:52 am)
SUNSET = 1233 # time in minutes since midnight on July 2 (8:33 pm)
(sunrise, sunset) = get_sunrise_sunset(7, 2)
assert SUNRISE==sunrise
assert SUNSET==sunset
# We divide a day into "zones" based on time relative to sunrise/sunset.
# Let's plot our zones for a day
zone_test_index = [datetime.datetime(year=2016, month=7, day=2, hour=h, minute=m)
for (h, m) in [(h, m) for h in range(0, 24) for m in range(0, 60)]]
zone_test_data = [time_of_day_to_zone(dt_to_minutes(dt), SUNRISE, SUNSET) for dt in zone_test_index]
zone_test_series = pd.Series(zone_test_data, index=zone_test_index)
zone_test_series.plot(figsize=(15,10))
pylab.ylim([0, NUM_ZONES])
# +
# Build some histograms of the data
from lux_analysis import build_length_histogram_data
length_data = build_length_histogram_data(on_off_series, smoothed_series_writer.index)
# look at the distribution of lengths
from itertools import chain
all_on_lengths = list(chain.from_iterable(length_data.on_lengths))
plt.hist(all_on_lengths)
# -
# Show the histogram for all off lengths
all_off_lengths = list(chain.from_iterable(length_data.off_lengths))
plt.hist(all_off_lengths)
# +
from hmmlearn.hmm import MultinomialHMM as MHMM
from lux_analysis import HmmScanner
# Scan all the samples and produce subsequences of samples for each
# zone, suitable for the hmm fit() method.
scanner = HmmScanner()
scanner.process_samples(on_off_series, smoothed_series_writer.index)
# train hmms by zone
hmm_by_zone = []
for z in range(NUM_ZONES):
num_states = 5 #max(3, int(1+len(trainer.on_lengths[z])/2))
#print("%d states for zone %d" % (num_states, z))
hmm = MHMM(n_components=num_states)
data = np.array(scanner.samples_by_zone[z]).reshape(-1,1)
lengths = np.array(scanner.lengths_by_zone[z])
print("zone %s: %s samples in %s subsequences" % (z, len(data), len(lengths)))
hmm.fit(data, lengths)
hmm_by_zone.append(hmm)
def predict_hmm_by_zone(dt_series):
predictions = []
last_zone = None
last_cnt = None
for dt in dt_series:
(sunrise, sunset) = get_sunrise_sunset(dt.month, dt.day)
zone = time_of_day_to_zone(dt_to_minutes(dt), sunrise, sunset)
if zone != last_zone:
if last_cnt is not None:
print("add %d events for zone %d on day %d/%d" % (last_cnt, last_zone, dt.month, dt.day))
(samples, states) = hmm_by_zone[last_zone].sample(last_cnt)
predictions.extend([x[0] for x in samples])
last_cnt = 1
else:
last_cnt += 1
last_zone = zone
if last_zone is not None:
print("add %d events for zone %d" % (last_cnt, last_zone))
(samples, states) = hmm_by_zone[last_zone].sample(last_cnt)
predictions.extend([x[0] for x in samples])
assert len(predictions)==len(dt_series)
return pd.Series(predictions, index=dt_series)
# generate some perdictions for July 31, 2016
predict_dts_short = [datetime.datetime(year=2016, month=7, day=31, hour=h, minute=m)
for (h, m) in [(h, m) for h in range(0, 24) for m in range(0, 60)]]
short_predictions = predict_hmm_by_zone(predict_dts_short)
def plot(predictions):
predictions.plot(figsize=(15,10))
pylab.ylim([0, 1.5])
plot(short_predictions)
# -
# Now, let's look at some predictions over a week
predict_dts_long = [datetime.datetime(year=2016, month=8, day=d, hour=h, minute=m)
for (d, h, m) in [(d, h, m) for d in range(1, 8) for h in range(0, 24)
for m in range(0, 60)]]
long_predictions = predict_hmm_by_zone(predict_dts_long)
plot(long_predictions)
# save the trained HMM to a file
from sklearn.externals import joblib
import os.path
if not os.path.exists('saved_hmms'):
os.mkdir('saved_hmms')
print("created saved_hmms direectory")
for zone in range(NUM_ZONES):
fname = 'saved_hmms/hmm-%s-zone-%d.pkl' % (ROOM, zone)
joblib.dump(hmm_by_zone[zone], fname)
print("save hmm to %s" % fname)
|
lighting_replay_app/analysis/lux-analysis-back-room.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Hand-written Furigana Classification
#
# A convolutional neural network (CNN) based model is built to classify japanese characters, i.e. Furigana. Furigana consists of Hiragana (Hi.) for normal use , and Katakana (Ka.) for foreign words or enphasis purpose. The model is trained to run Android on-device hand-written input classification powered by TensorFlow lite.
# ### The work includes
# - extremely simple image noise clean and signal enhancement
# - augmentation generates variation of original sample to increase field accurary
# - 4 layered CNN model can reach validation accuracy ~98%
#
# ### Data Source:
# All training sample are from [ETL Character Database](http://etlcdb.db.aist.go.jp/). The site collects multiple database from ETL1 to ETL9 with various contents, source, and format. This work uses ETL6 (Katakana) and ETL7 (Hiragana) databases. All data should retrieve from the site and cannot be commercial use without officient agreement.
#
# ### Reference:
# [colab digit_classifier](https://colab.research.google.com/github/tensorflow/examples/blob/master/lite/codelabs/digit_classifier/ml/step2_train_ml_model.ipynb)
#
# [colab digit_classifier enhancement](https://colab.research.google.com/github/tensorflow/examples/blob/master/lite/codelabs/digit_classifier/ml/step7_improve_accuracy.ipynb)
#
#
# ## Import
# +
# direct import ETL provide sample reading script
# can be get from http://etlcdb.db.aist.go.jp/file-formats-and-sample-unpacking-code
from unpack.unpack import *
# python built-in module
import math
import random
import gc
# predefined map class provides table to encode characters to index and vise versa
from encoder_table import decoding_map, encoding_map
# third-party modules for visual and data preparation
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Tensorflow and Keras for deep learning
import tensorflow as tf
from tensorflow import keras
#to solve th error, ref: https://stackoverflow.com/questions/43990046/tensorflow-blas-gemm-launch-failed
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# -
tf.__version__
# ## Setting
FILE_INPUT_PATH = ".\input"
MODEL_OUPUT_PATH = ".\model"
OUTPUT_MODEL_NAME = "\model.tflite"
# hiragana and katakata share the same process with different input
isHiraganaTraining = False
if isHiraganaTraining:
# Hiragana Training setting
LABEL_PREFIX = "Hi."
FILE_NAME_PREFIX = "\ETL7LC_"
START_FILE_SERIAL_NO = 1
END_FILE_SERIAL_NO = 2
NUM_CLASSES = 46
else:
# Katakana Training setting
LABEL_PREFIX = "Ka."
FILE_NAME_PREFIX = "\ETL6C_"
START_FILE_SERIAL_NO = 1
END_FILE_SERIAL_NO = 5
NUM_CLASSES = 46
# ## Data Reading
def readRawData(filename, label_prefix):
# img width and height related to input tensor shape, so will be preserved globally
global img_width
global img_height
if label_prefix not in ("Hi.", "Ka."):
print("Prefix should be either Hi. or Ka. in order to match encoding table")
return;
f = bitstring.ConstBitStream(filename=filename)
etln_record = ETL167_Record() #set reader for ETL 1, 6, 7 format
labels = None
images = None
tmp_labels = []
tmp_img = []
while f.pos < f.length:
# read BitStream into a formated map
record = etln_record.read(f)
# get labels
label = record["Character Code"].strip(" ")
if ("," in label) or (")" in label) or ("(" in label):
# skip symbols in ETL6
# skip Dakuten, Handakuten marks for ETL7
continue
tmp_labels.append(label)
# get images in numpy array
img = etln_record.get_image()
img_width = img.size[0]
img_height = img.size[1]
img_array = np.array(img).reshape(1, img_height, img_width) #reshape into 3d for concatenating
tmp_img.append(img_array)
images = np.concatenate(tmp_img)
labels = np.array(tmp_labels)
return images, labels
# +
tmp_images_list = []
tmp_labels_list = []
for i in range(START_FILE_SERIAL_NO, END_FILE_SERIAL_NO + 1):
if isHiraganaTraining:
file_serial_number = f"{i}" #ETL7LC_1 without padding zero
else:
file_serial_number = f"{i:02}" #ETL6C_01 with padding zero
file_name = FILE_INPUT_PATH + FILE_NAME_PREFIX + file_serial_number
tmp_images, tmp_labels = readRawData(file_name, LABEL_PREFIX)
tmp_images_list.append(tmp_images)
tmp_labels_list.append(tmp_labels)
# -
# wrap data from different file into one as numpy array
images = np.concatenate(tmp_images_list, axis=0)
labels = np.concatenate(tmp_labels_list, axis=0)
del tmp_images_list
del tmp_labels_list
gc.collect()
# each image with size 64x63 px(w x h). The sample should be read as (#sample, height, width)
print(images.shape)
print(labels.shape)
# ## Preprocessing
# +
# draw images from serialized data with their label
def show_sample(images, labels, sample_count=25, rand=False):
grid_count = math.ceil(math.ceil(math.sqrt(sample_count)))
grid_count = min(grid_count, len(images), len(labels))
plt.figure(figsize=(2*grid_count, 2*grid_count))
for i in range(sample_count):
if rand:
# random iamges
rand_idx = random.randint(0, images.shape[0])
image = images[rand_idx]
label = labels[rand_idx]
else:
image = images[i]
label = labels[i]
plt.subplot(grid_count, grid_count, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap=plt.cm.gray)
plt.xlabel(label)
plt.show()
# -
# Tensorflow needs labels in integer format
def encode(labels, encoding_map):
encoded = []
for label in labels:
encoded.append(encoding_map[label])
return np.array(encoded)
# Each px is saved in 4bit, 16 types gray scale intensity.
# This process filter lower intensity to supress background noise, and enhance higher intensity to make it clear aclose to on-device situation.
def preprocess(raw_images):
noise_filter_threshold = 130;
contrast_enhancement_threshold = 150
raw_images[raw_images<noise_filter_threshold] = 0 # supressed surrounding noise
raw_images[raw_images>contrast_enhancement_threshold] = 255 # make withe stroke brighter
raw_images = raw_images / 255.0 # scaled value from 0~255 to 0~1
return raw_images
# +
X = preprocess(images)
y = encode(labels, encoding_map) #label need to be integer
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# -
show_sample(X_train, [f"{label} : {decoding_map[label]}" for label in y_train], 25, rand = True)
# ## Augmentation
# Apply rotaion, shift, distortion, resizing, etc. to create variations from original database to make it more robust for real inputs.
# +
datagen = keras.preprocessing.image.ImageDataGenerator(
rotation_range=20,
width_shift_range=0.15,
height_shift_range=0.15,
shear_range=0.35,
zoom_range=0.2
)
# ImageDataGenerator takes 3D input for each image -> expand from (63, 64) -> (63, 64, 1)
train_generator = datagen.flow(np.expand_dims(X_train,axis=3), y_train)
test_generator = datagen.flow(np.expand_dims(X_test,axis=3), y_test)
# -
# Images after augmentation
argumented_X_train, argumented_y_train = next(train_generator)
show_sample(np.squeeze(argumented_X_train, axis=3), [f"{label} : {decoding_map[label]}" for label in argumented_y_train])
# ## Training
assert img_height > 0
assert img_width > 0
# Totally 4 convolution layers are used:
# - Conv2D :
# - 3x3 kernal (=filter) to create feature map from each layer
# - increasing depth (#filters) from 32 to 128 to be able to extract more abstract features at latter layers
# - MaxPooling2D :
# - 2x2 matrix to down-sample the inputs to reduce dimensionality (ex. 28x28 px after pooling will be 14x14 px)
# - Dropout layer
# - supress overfitting
# +
# Create Model
model = keras.Sequential([
keras.layers.Reshape(target_shape=(img_height, img_width, 1)),
keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Dropout(0.25),
keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Dropout(0.25),
keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Dropout(0.25),
keras.layers.Conv2D(filters=128, kernel_size=(3, 3), activation=tf.nn.relu),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Dropout(0.25),
keras.layers.Flatten(),
keras.layers.Dense(64, activation=tf.nn.relu),
keras.layers.Dropout(0.5),
keras.layers.Dense(NUM_CLASSES)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# -
callback = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=20)
model.fit(train_generator, epochs=200, callbacks=[callback], validation_data=test_generator)
# ## Validation
argumented_X_test, argumented_y_test = next(test_generator)
y_pred = model.predict(argumented_X_test)
show_sample(np.squeeze(argumented_X_test, axis=3), [f"{np.argmax(pred)} : {decoding_map[np.argmax(pred)]}" for pred in y_pred])
# +
# Convert Keras model to TF Lite format.
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
# Save the TF Lite model as file
f = open(MODEL_OUPUT_PATH + OUTPUT_MODEL_NAME, "wb")
f.write(tflite_model)
f.close()
|
TensorFlowFurigana.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.10.2 64-bit
# language: python
# name: python3
# ---
# +
def reporte_cohete(tanque1,tanque2,tanque3):
total_prom=(tanque1+tanque2+tanque3)/3
return f"""Reporte combustibles:
Total Promedio: {total_prom}%
tanque1: {tanque1}%
tanque2: {tanque2}%
tanque3: {tanque3}%
"""
print(reporte_cohete(30,40,50))
# Función promedio
def prom(values):
total = sum(values)
numero_tanques = len(values)
return total / numero_tanques
# datos
prom([46,52,48])
def reporte_cohete(tanque1,tanque2,tanque3):
return f"""Reporte combustibles:
Total Promedio: {prom([tanque1,tanque2,tanque3])}%
tanque1: {tanque1}%
tanque2: {tanque2}%
tanque3: {tanque3}%
"""
# Call the updated function again with different values
print(reporte_cohete(88, 76, 70))
# -
# En este ejercicio, construirás un informe de cohete que requiere varias piezas de información, como el tiempo hasta el destino, el combustible a la izquierda y el nombre del destino. Comienza por crear una función que creará un informe preciso de la misión:
# +
def reporte_cohete2(hora_prelanzamiento,tiempo_recorrido,tanque_interno,tanque_externo,nombredestino):
return f"""Viaje:
Viaje espacio: {nombredestino}
Tiempo total del viaje {hora_prelanzamiento+tiempo_recorrido} minutos
Total de {tanque_interno+tanque_externo} combustible
"""
print(reporte_cohete2(20, 31,206900, 623000,"luna"))
def reporte_c(destino, *minutes, **fuel_reservoirs):
return f"""
Viaje espacio {destino}
Tiempo total del viaje : {sum(minutes)} minutos
Total de combustible : {sum(fuel_reservoirs.values())}
"""
print(reporte_c("Luna", 10, 21, 14, t_in=109900,t_ex=800000))
#nueva funcion
def mission_report(destination, *minutes, **fuel_reservoirs):
main_report = f"""
Mission to {destination}
Total travel time: {sum(minutes)} minutes
Total fuel left: {sum(fuel_reservoirs.values())}
"""
for tank_name, gallons in fuel_reservoirs.items():
main_report += f"{tank_name} tank --> {gallons} gallons left\n"
return main_report
print(mission_report("Moon", 8, 11, 55, main=300000, external=200000))
|
kata9.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
gomc_df = pd.read_csv("gomc.csv")
gomc_df.size
gomc_df = gomc_df[gomc_df.country_value != "----------------"]
gomc_df.head(10)
#df = df[df.line_race != 0]
gomc_df.size
gomc_df.rating_value.value_counts()
# +
def unique_values(column):
return column.unique()
def count_unique_values(column):
return len(column.unique())
def count_column_values(column):
return column.value_counts()
# -
# # Countries
# Number of participating countries:
# +
count_unique_values(gomc_df.country_value)
# -
unique_values(gomc_df.country_value)
unique_values(gomc_df.region_value)
# +
def filter_col(df, column, value):
return df[column == value]
#return newDf
# -
# # European countries
gomc_europe = filter_col(gomc_df, gomc_df.region_value, "Europe")
gomc_europe.head(2)
count_unique_values(gomc_europe.country_value)
unique_values(gomc_europe.country_value)
count_column_values(gomc_europe.country_value)
# # Americas
gomc_americas = filter_col(gomc_df, gomc_df.region_value, "Americas")
gomc_americas.head(2)
count_unique_values(gomc_americas.country_value)
unique_values(gomc_americas.country_value)
count_column_values(gomc_americas.country_value)
# # Asia Pacific
gomc_asiaP = filter_col(gomc_df, gomc_df.region_value, "Asia Pacific")
gomc_asiaP.head(2)
count_unique_values(gomc_asiaP.country_value)
unique_values(gomc_asiaP.country_value)
count_column_values(gomc_asiaP.country_value)
# # Middle East and Africa
gomc_meAf = filter_col(gomc_df, gomc_df.region_value, "Middle East and Africa")
gomc_meAf.head(2)
count_unique_values(gomc_meAf.country_value)
unique_values(gomc_meAf.country_value)
count_column_values(gomc_meAf.country_value)
|
exploration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import warnings
#from sklearn.datasets import load_iris
from random import shuffle
import seaborn as sns
# %matplotlib inline
df = pd.read_csv("/home/kuldeep/Documents/iris-species/Iris.csv",low_memory=False)
df.head()
df.shape
df.head()
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
x=df[['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']]
y=df.Species
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=1/3,random_state=1)
knn.fit(x_train,y_train)
ypred=knn.predict(x_test)
knn.score(x_test,y_test)*100
z=pd.DataFrame(y_test,ypred)
z.head()
y_test=y_test.as_matrix()
y_test
c=0
for i in range(50):
if ypred[i]==y_test[i]:
c+=1
(c/50)*100
|
iris.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import random
final = []
for i in range(20):
temp = []
for j in range(4):
temp.append(random.randint(1, 100))
final.append(temp)
final
final = [[5, 5, 0, 0],
[4, 10, 0, 0],
[5, 15, 0, 0],
[5, 20, 0, 0],
[5, 25, 0, 0],
[15, 5, 0, 0],
[14, 10, 0, 0],
[15, 15, 0, 0],
[15, 20, 0, 0],
[14, 25, 0, 0]]
final
new = sorted(final, key=lambda val: val[1])
new
new = sorted(new, key=lambda val: val[0])
new
# # Aadhar Back Labels
# 1
details = ['mn SIO oF qr AT AE Address S/O Shankar Thorvae', 'near ranvir krida mandal',
'1/12devki misra chawl bhatwad', '400084', 'Ghatkopar West Mumbai',
'Maharashtra 400084', 'helpuidaigovin wwwuidalgovin PO Box No 1947',
'1947', '1800 180 1947', 'Bengalury-580 001']
# 2
details = ['Address ROOM NO B20', 'ANGAN APART DR', 'MUZUMDAR MARG NEAR',
'AXE FEAT FTATYT WEY', 'SPANDAN HOSPITAL', '421503', 'KULGAON Badiapur Kuigaon',
'Thane Maharashtra 421503', 'wenitigavin FO Bone 1941Seach S08 001']
# 3
details = ['Address S/O Muralidharan', '8/0', '- Warier BARC COLONY 53-',
'400094 alkananda Anu Shakti Nagar', 'Mumbai Sub Urban', 'Maharashtra 400094',
'helpuidaigovin wwwuidaigovin PO Box No 1947', '1947', '1800 180 1947', 'Bengaluru-560 001']
# 4
details = ['Address', 'D/O D/O Dnyaneshwar Kakade', 'Mukatabai Hospital H-9Room',
'- -', 'No-7RBKadam', 'MargBhatwadiBarvenagar', 'ghatkopar west 50 Mumbai',
'- 400084', 'Maharashtra - 400084', '8921 4402 3664', 'WWW', '1947',
'help uidalgovin', 'wwwuidaigovin']
# 5
details = ['wen Are', 'FETAADRAARINDIAERR OF', 'Address', 'akot akola road chohotta Akot Akola',
'axKot aKkaia roag cnono Maharashtra - 444101', '- 444101', '9380 1178 5186',
'Aadhaar-Mera Aadhaar Meri Pehachan']
# 6
details = ['UNIQUE IDENTIFICATION AUTHORITY OF INDIA', 'AADNAAR', 'Address',
'J-40/12 J-SECT N-2 CIDCO', '-40/12 - -2', 'JALNA ROAD Aurangabad',
'Aurangabad MH Aurangabad', 'Maharashtra 431001', '431001']
# 7
details = ['Address Nagargaon Pal Kolhapur', 'WW ATE TN', 'Maharashtra 416209',
'np 416209', '7191 2085 0750', '1947', 'help uidaigovin',
'wwwuidalgovin', '1800 300 1947']
# 8
details = ['Unique identification Authority of india', '7 1- Address Akshay Society C7 1- 2 NearnA enh ew',
'2 Sai Plaza Markel Sector 15 Air Navi AA a',
'Crewe me mews wy 15 Wer Mumbai Thane Airgii Maharashtra', '400708', '400708',
'8156 1305 9387', '8488', '1947', 'vil govin', '1800 300 1947', 'ww uidalgovin']
# 9
details = ['10 aeeomddig', 'Unique Identification Authority of India',
'S/O <NAME>iriya akmoakden amIckdeo eae corey ewes',
'Mouse Shiriya Kumbla Via', 'AA Bary slick mis ad ide', 'Shiriya Kasaragod',
'Kerala - 671321', 'sapg - 671321', '6408 9520 2', 'Nol']
# # Cropped Aadhar Back Details
details = ['Address', 'D/O Dnyaneshwar Kakade', 'Mukatabai Hospital H-9Room', 'No-7RBKadam', 'MargBhatwadiBarve nagar', 'ghatkopar west s0 Mumbai', 'Maharashtra - 400084']
details = ['Address S/O Shankar Thorvae', 'THE', 'near ranvir krida mandal', '112devki misra chawl bhatwadi', 'Ghatkopar West Mumbai', 'Maharashtra 400084']
details = ['Address ROOM NO B/201', 'ANE', 'ANGAN APART DR', 'MUZUMDAR MARG NEAR', 'SPANDAN HOSPITAL', 'KULGAON Badiapur Kuigaon', 'Thane Maharashira 421503']
details = ['Address Nagargaon Pal Kolhapur', 'Maharashtra 416209']
details = ['iya', '- 671321']
details = ['Address', 'J-40/12 J-SECT N-2 CIDCO', 'JALNA ROAD Aurangabad', 'Aurangabad, (M.H.) Aurangabad', 'Maharashtra 431001']
details = ['7 1-', 'Address Akshay Society C-7 1- 2 Near', 'Mumbai Thane Airci Maharashtra', '400708']
details = ['Address S/O Muralidharan', 'Warier BARC COLONY 53-', '400094 alkananda Anu Shakti Nagar', 'Mumbai Sub Urban', 'Maharashtra 400094']
details = ['Address', 'akot akola road chohotta Akot Akola', '8151 Maharashtra - 444101']
details
for i in details:
text = re.sub('[^A-Za-z0-9-/,.() ]+', '', i)
print(text)
# +
imp = {'Address': ''}
try:
if 'Address' in details[0]:
if details[0].split('Address', 1)[1].strip() != '':
imp["Address"] = details[0].split('Address', 1)[1].strip()
for line in details[1:]:
imp["Address"] += '\n' + line
elif 'Address' in details[1]:
if details[1].split('Address', 1)[1].strip() != '':
imp["Address"] = details[1].split('Address', 1)[1].strip()
for line in details[2:]:
imp["Address"] += '\n' + line
elif 'Address' in details[2]:
if details[2].split('Address', 1)[1].strip() != '':
imp["Address"] = details[2].split('Address', 1)[1].strip()
for line in details[3:]:
imp["Address"] += '\n' + line
else:
imp["Address"] = 'Failed to read Address'
except Exception as _:
imp["Address"] = 'Failed to read Address'
imp['Address'] = imp['Address'].strip()
print(imp['Address'])
# -
s = 'UPLOAD/dasd/adsad/tes.jpg'
print('/'.join(s.split('/')[:-1]))
image = cv2.imread('ignore/aadhar_back/2.jpg')
image = image[200:600, 200:600]
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
for i in os.listdir('ignore/aadhar_back/'):
image = cv2.imread('ignore/aadhar_back/' + i, 0)
# cv2.imshow('og', image)
# cv2.waitKey(0)
height, width = image.shape
image = image[int(height * (15 / 100)):int(height * (70 / 100)), int(width * (40 / 100)):]
# image = cv2.Canny(image, 30, 150)
_, image = cv2.threshold(image, 150, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
# image = cv2.dilate(image, np.ones((45, 5)))
# cv2.imshow('image', image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
text = pyt.image_to_string(image, lang='eng+hin+mar+mal', config=('--oem 1 --psm 3'))
text = re.sub('[^A-Za-z0-9-/():,\n ]+', '', text)
print(text)
# print(image.shape)
import os
import pytesseract as pyt
import re
# # Perspective Transform
import cv2
import numpy as np
# +
image = cv2.imread('ignore/aadhar_back/20190124_171047.jpg')
og = cv2.imread('ignore/aadhar_back/20190124_171047.jpg')
image = cv2.circle(image, (1004, 624), 15, (0, 0, 255), -2)
image = cv2.circle(image, (3380, 580), 15, (0, 0, 255), -2)
image = cv2.circle(image, (392, 2040), 15, (0, 0, 255), -2)
image = cv2.circle(image, (3260, 2500), 15, (0, 0, 255), -2)
pts1 = np.float32([[1004, 624], [3380, 580], [392, 2040], [3260, 2500]])
pts2 = np.float32([[0, 0], [1500, 0], [0, 1000], [1500, 1000]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
og = cv2.warpPerspective(og, matrix, (1500, 1000))
image = cv2.resize(og, None, fx = 0.5, fy = 0.5)
cv2.imwrite('ignore/aadhar_back/9.jpg', og)
cv2.imshow('image', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
# # Seven Segment Display
import cv2
import numpy as np
import matplotlib.pyplot as plt
# +
og = cv2.imread('ignore/sevenseg/2019-01-21 20:41:40.908206.png')
lab = cv2.cvtColor(og, cv2.COLOR_BGR2HSV)
luminance, a, b = cv2.split(lab)
lum = luminance.copy()
test = luminance.copy()
cv2.imshow('test', test)
cv2.waitKey(0)
cv2.destroyAllWindows()
hist,bins = np.histogram(luminance,256,[0,256])
mean = int((np.argmax(hist) + np.argmin(hist)) / 5)
luminance[luminance > mean] = 255
luminance[luminance <= mean] = 0
plt.plot(hist)
plt.show()
blur = cv2.GaussianBlur(lum,(5, 5),0)
his,bins = np.histogram(blur,256,[0,256])
# meanl = int((np.argmax(his) + np.argmin(his)) / 4)
# meanl = h
lum = lum.ravel() / lum.max()
# cv2.imshow('klum', lum)
# cv2.wait
# lum[lum <= meanl] = 0
plt.plot(his)
plt.show()
fig, ax = plt.subplots(1, 2)
ax1, ax2 = ax.ravel()
ax1.imshow(test, cmap=plt.cm.gray)
ax1.set_title('Original')
ax1.axis('off')
# ax2.imshow(lum, cmap=plt.cm.gray)
# ax2.set_title('Skeleton of the image')
# ax2.axis('off')
# plt.show()
# -
values = ["NOLG STTT", ".MAHARASHTRA STATE MOTOR DRIVING LICENCE", "DL No MHO01 20180006033", "DOI 15-02-2018",
"arty w", "Valid Till 14-02-2038 NT", "15-02-2013", "AUTHORISATION TO DRIVE FOLLOWING CLASS", "cov",
"15-02-2018", "MCWG 15-02-2018", "DOB 11-12-1997 BG A", "Name <NAME>", "S/DW of JAY ADIVAREKAR",
"Add 3 421 RAMDOOT BLDG", "M P ROAD CURRY ROAD", "Mumbai MH", "OF Nd", "PIN 400012Signature ID of Ags Arn",
"PIN 400012", "If IAL EF ersSuing Authority MHO1", "Impression of Holde"]
details = {}
for idx, i in enumerate(values):
details[idx] = i
print(details)
details1 = {0: '111 1', 1: '2 VIL ALMAHARASHTRA STATE MOTOR DRIVING LICENCE', 2: 'DOI 05-11-2008', 3: '2 DL No MHO1 20080116201', 4: 'w aid Tiff 04-11-2028 NT', 5: 'FORM 7RULE 16 2', 6: 'DLD 24-08-2016', 7: 'RUL', 8: 'AUTHORISATION TO DRIVE FOLLOWING CLASS22 IT IANA', 9: 'OF VEHICLES THROUGHOUT INDIA', 10: 'MCWG 05-11-2008', 11: 'DOB 19-04-1989 BG', 12: 'Name <NAME>', 13: 'S/D/W of RAJAB TINWALA', 14: 'Add 2ND FLR R/21 43/47 JEEVANBHAI BLDG', 15: 'MASTAN TANK ST MUMBAI', 16: 'PIN 400008', 17: 'Signature/Thumb', 18: 'S$gnature ID of a name', 19: '117 750 49 01 2016280', 20: 'Impression of Holder'}
details1 = {0: 'THE UNION OF INDIA', 1: 'MAHARASHTRA STATE MOTOR DRIVING LICENCE', 2: 'DL No MHO01 20090077155', 3: 'DOI 24-07-2009', 4: 'Valid Tilf 23-07-2029 NT', 5: 'FORM 7', 6: 'RULE 16 2', 7: 'AUTHORISATION TO DRIVE FOLLOWING CLASS', 8: 'OF VEHICLES THROUGHOUT INDIA', 9: 'DOI', 10: 'LMV 24-07-2009', 11: 'DOB 28-04-1990 BG', 12: 'Name <NAME>', 13: 'S/D/W of MANOJ SHAH', 14: 'NH AF FW Add 151/B KALPATARU RESIDENCY SION E', 15: 'MUMBAI', 16: 'Rab pd Sy -', 17: 'PIN 400022', 18: 'Signature/Thumb', 19: 'ID of', 20: 'Impression of Holder', 21: 'Gg ure oisSuing Authority MHO1 2009150'}
details1
details1 = {0: 'THE UNION OF INDIA', 1: 'MAHARASHTRA STATE MOTOR DRIVING LICENCE', 2: 'DL No MH01 20060039783STH ANA', 3: 'DO 19-07-2006', 4: 'Vaid Till 18-07-2026 NT', 5: 'FORM', 6: 'DLD 25-03-2017', 7: 'FF Jim RULE 16 2', 8: 'AUTHORISATION TO DRIVE FOLLOWING CLASS', 9: 'OF VEHICLES THROUGHOUT INDIA', 10: 'Cov', 11: 'MCWG 19-07-2006', 12: 'LMV 19-07-2006', 13: 'DOB 12-03-1987 BG', 14: 'Name <NAME>', 15: 'S/DMV of PRAKASH BHAUD', 16: 'Add 19 A 3RD FLR COLABA', 17: 'CHAMBER COLABA CROSS LANE', 18: 'MUMB3AI', 19: 'PIN 400005', 20: 'Signature/Thumb', 21: 'Signature ID of', 22: 'Jsuing Authority MHG1 2017284', 23: 'Impression of Holder'}
details1
details1 = {0: 'THE UNION OF INDIA', 1: 'MAHARASHTRA STATE MOTOR DRIVING LICENCE', 2: 'DOI 16-05-2016', 3: 'Valid Till 15-05-2036 NT', 4: 'FORM 7ire rr 4', 5: '16-10-2018', 6: 'Ise 3RULE 16 2', 7: 'AUTHORISATION TO DRIVE FOLLOWING CLASS', 8: 'OF VEHICLES THROUGHOUT INDIA', 9: 'cov', 10: 'LMV 16-05-2016', 11: 'DOB 04-09-1989 BG O', 12: 'Name <NAME>', 13: 'S/DWW of SUBHASH PALVE', 14: 'Add SANTOSH NAGAR NR DHANANJAY NIWAS', 15: 'O T SECTION ULHASNAGAR- 4 DIST- THANE Ulhasnagar ------- -', 16: 'Ulhasnagar Thane MH', 17: 'PIN 421004', 18: 'Signature ID of', 19: 'Signature/Thumb', 20: 'IsSuing Authority MHO5', 21: 'Impression of Holder'}
details1
details1 = {0: 'THE UNION OF INDIA', 1: 'aa a a a a a a a a a a a a EEEMAHARASHTRA STATE MOTOR DRIVING LICENCE', 2: 'GE DL No MH02 20090128158', 3: 'DO 27-07-2009', 4: 'Valid Till 26-07-2029 NT', 5: 'FORM', 6: 'AED 01-10-2011', 7: 'RULE 16 2', 8: 'AUTHORISATION TO DRIVE FOLLOWING CLASS', 9: 'OF VEHICLES THROUGHOUT INDIA', 10: 'DOJ', 11: 'LMV 19-09-2011',
12: 'MCWG 27-07-2009', 13: 'DOB 27-09-1986 BG', 14: 'Name <NAME>', 15: 'S/D/W Of NAVIN PATEL', 16: 'Add D-603 GOKUL RESIDENCY THAUKAR VILLAGE', 17: 'KAN<NAME>', 18: 'MUMBAI', 19: 'PIN 400101', 20: 'Signature/Thumb', 21: 'Signature ID of', 22: 'Jsuing Authority MHO02 2011287', 23: 'Impression of Holder'}
details1
details1 = {0: 'RENE fel Reig ir', 1: 'MAHARASHTRA STATE MOTOR DRIVING LICENCE', 2: 'DOI 1 1502-2018', 3: 'DL No MH01 20180006033', 4: 'TM Fa', 5: 'Valid Till 14-02-2038 NT', 6: '15-02-2018', 7: 'FAIRS RULE 16 2', 8: 'AUTHORISATION TO DRIVE FOLLOWING CLASS', 9: 'OF VEHICLES THROUGHOUT INDIA', 10: 'CoV', 11: 'LMV 16-02-2018- mm maa', 12: 'MCWG 15-02-2018', 13: 'DOB 11-12-1997 BG A', 14: 'Name <NAME>', 15: 'S/DMW of VIJAY ADIVAREKAR', 16: 'Add 3 421 RAMDOOT BLDG', 17: 'MP RCAD CURRY ROAD', 18: 'Mumbai MH', 19: '-PIN 400012 yong FerransSignature 1D ofBE Ah', 20: 'PIN 400012', 21: 'Signature/Thumb', 22: 'impression jon of Holder'}
print(details1)
details1 = {0: 'THE UNION OF INDIA', 1: 'EE I MAHARASHTRA STATE MOTOR DRIVING LICENCE',
2: 'DL Noex MH01 20130006033', 3: 'DO 1 15-02-2018', 4: 'aoa ad', 5: '15-02-2018',
6: 'RULE 16 2', 7: 'AUTHORISATION TO DRIVE FOLLOWING CLASS',
8: 'OF VEHICLES THROUGHQUT INDIA', 9: 'Cov', 10: 'LMV 16-02-2018',
11: 'MCWG 15-02-2018', 12: 'DOB 11-12-1997 BG A', 13: 'Name <NAME>',
14: 'S/DMW of VIJAY ADIVAREKAR', 15: 'Add 3 421 RAMDOOT BLDG',
16: 'MP ROAD CURRY ROAD', 17: 'Mumbai MH', 18: 'Nad zn', 19: 'Azim Ure',
20: 'PIN 400012', 21: 'Signature/Thumb', 22: 'Signature ID ofas',
23: 'issuing Authority MHO1', 24: 'Impression of Holder'}
print(details1)
for i in values:
print(i)
DL NO
Name
Fathers Name
Add
PIN
details1
details1 = [ text for text in details1.values() ]
import re
# # Licence Labels extraction
imp = {}
num = 0
for idx in range(len(details1)):
if 'DL No' in details1[idx]:
imp["DL NO"] = details1[idx].split('DL No')[1].strip()
elif 'Di No' in details1[idx]:
imp["DL NO"] = details1[idx].split('Di No')[1].strip()
# del details1[idx]
elif details1[idx].startswith('DOB'):
dob = re.findall("([0-9]{2}\-[0-9]{2}\-[0-9]{4})", details1[idx].split(' ', 1)[1])[0]
#imp["DOB"] = details1[idx].split(' ', 1)[1].strip().split(r"[0-9]{0,2}\-[0-9]{0,2}\-[0-9]{0,4}", 1)[0]
imp["Date of Birth"] = dob
# del details1[idx]
imp["Name"] = details1[idx + 1].split(' ', 1)[1].strip()
# del details1[idx + 1]
try:
imp["Father's Name"] = details1[idx + 2].split('of',1)[1].strip()
# del details1[idx + 2]
except Exception as _:
imp["Father's Name"] = details1[idx + 2].split('Of',1)[1].strip()
# del details1[idx + 2]
i = 4
address = details1[idx + 3].split('Add', 1)[1].strip()
# del details1[idx + 3]
while not details1[idx + i].startswith('PIN') and i < 8:
if details1[idx + i].isupper() != True:
# del details1[idx + i]
i += 1
continue
address += ' ' + details1[idx + i]
# del details1[idx + i]
i += 1
imp["Address"] = address
imp["Pin Code"] = details1[idx + i].split(' ', 1)[1]
# del details1[idx + i]
break
elif details1[idx].startswith('Name'):
dob = re.findall("([0-9]{2}\-[0-9]{2}\-[0-9]{4})", details1[idx - 1].split(' ', 1)[1])[0]
imp["Date of Birth"] = dob
# del details1[idx - 1]
imp["Name"] = details1[idx][4:].strip()
# del details1[idx]
try:
imp["Father's Name"] = details1[idx + 2].split('of',1)[1].strip()
# del details1[idx + 2]
except Exception as _:
imp["Father's Name"] = details1[idx + 2].split('Of',1)[1].strip()
# del details1[idx + 2]
i = 3
address = details1[idx + 2].split('Add', 1)[1].strip()
# del details1[idx + 2]
while not details1[idx + i].startswith('PIN') and i < 7:
if details1[idx + i].isupper() != True:
# del details1[idx + i]
i += 1
continue
address += ' ' + details1[idx + i]
# del details1[idx + i]
i += 1
imp["Address"] = address
imp["Pin Code"] = details1[idx + i].split(' ', 1)[1]
# del details1[idx + i]
break
imp
# details1
# print(details1)
aadhar = {0: '<NAME>', 1: 'WH a4 / Yeaar of Birth 1a994', 2: 'EY / Male', 3: '4622 7574 0201'}
aadhar = {0: 'AFA', 1: '<NAME>', 2: 'DOB 04-10-1996', 3: '2808 4878 6547'}
aadhar = {0: '<NAME>', 1: '/ 008 28/04/1990 Wik', 2: '/ MALE', 3: '4455 8760 7069', 4: 'AT-1T7 -'}
aadhar = {0: 'GOVERNMENT OF INDIA', 1: '<NAME>', 2: 'DOaB 04-10-1996', 3: 'Ea EFs', 4: 'RIE', 5: 'GenderMale', 6: '2808 4878 6547'}
aadhar
pan = {0:" CSUPR6644H",1:"08/11/1997",2:"DATE OF BIRTH",3:"NAME",
4:"<NAME>",5:"<NAME>",6:"FATHERS NAME",
7:"§ SEES",8:"© AES WRE"}
pan
temp = []
for i, j in aadhar.items():
temp.append(j)
temp = temp[::-1]
temp
# # Aadhar Labels Extraction
# +
imp = {}
for idx in range(len(temp)):
if re.search("[0-9]{4}\s[0-9]{4}\s[0-9]{4}", temp[idx]):
try:
imp['Aadhar No'] = re.findall("[0-9]{4}\s[0-9]{4}\s[0-9]{4}", temp[idx])[0]
except Exception as _:
imp['Aadhar No'] = "Not Found"
if temp[idx + 1].endswith("Female") or temp[idx + 1].endswith("FEMALE"):
imp["Gender"] = "Female"
elif temp[idx + 1].endswith("Male") or temp[idx + 1].endswith("MALE"):
imp["Gender"] = "Male"
elif temp[idx + 2].endswith("Female") or temp[idx + 2].endswith("FEMALE"):
imp["Gender"] = "Female"
elif temp[idx + 2].endswith("Male") or temp[idx + 2].endswith("MALE"):
imp["Gender"] = "Male"
elif temp[idx + 3].endswith("Female") or temp[idx + 3].endswith("FEMALE"):
imp["Gender"] = "Female"
elif temp[idx + 3].endswith("Male") or temp[idx + 3].endswith("MALE"):
imp["Gender"] = "Male"
elif re.search("[0-9]{2}\-|/[0-9]{2}\-|/[0-9]{4}", temp[idx]):
try:
imp["Date of Birth"] = re.findall("[0-9]{2}\-[0-9]{2}\-[0-9]{4}", temp[idx])[0]
except Exception as e:
imp["Date of Birth"] = re.findall("[0-9]{2}/[0-9]{2}/[0-9]{4}", temp[idx])[0]
imp["Name"] = temp[idx + 1]
elif "Year of Birth" in temp[idx]:
try:
imp["Year of Birth"] = re.findall("[0-9]{4}", temp[idx])[0]
except Exception as _:
imp["Year of Birth"] = "Not Found"
imp["Name"] = temp[idx + 1]
elif re.search("[0-9]{4}", temp[idx]):
try:
imp["Year of Birth"] = re.findall("[0-9]{4}", temp[idx])[0]
except Exception as e:
imp["Year of Birth"] = "Not Found"
imp["Name"] = temp[idx + 1]
elif len(temp[idx].split(' ')) > 2:
if 'GOVERNMENT' in temp[idx] or 'OF' in temp[idx] or 'INDIA' in temp[idx]:
continue
else:
imp["Name"] = temp[idx]
imp
# -
# # Pan Labels
check = "/ 008 28/04/1990 Wik"
re.findall("[0-9]{2}/[0-9]{2}/[0-9]{4}", check)
sting = "GOVERNMENT OF INDIA"
if "GOVERNaMENT" or "OaF" in sting:
print("HERE")
# +
test = ['check', 'test', 'okay']
if 'check' in test or 'taest' in test or 'oakay' in test:
print("here")
# -
s = '<NAME> ow'
s.split(' ')
# +
img = cv2.imread('ignore/mp6.jpg', 0)
# img = cv2.resize(img, (600, 300))
# img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 5)
kernel_sharpening = np.array([[-1,-1,-1],
[-1, 9,-1],
[-1,-1,-1]])
hist,bins = np.histogram(img,256,[0,256])
plt.plot(hist),
plt.show()
mean = int((np.argmax(hist) + np.argmin(hist)) / 2)
# img = cv2.GaussianBlur(img, (5, 5), 0)
# img = cv2.adaptiveThreshold(img,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,15,5)
_, img = cv2.threshold(img, np.argmax(hist) - 15, 255, cv2.THRESH_BINARY)
# img = cv2.GaussianBlur(img, (5, 5), 0)
# img = cv2.filter2D(img, -1, kernel_sharpening)
kernel = np.ones((5, 2), np.uint8)
# test = cv2.morphologyEx(OBinary, cv2.MORPH_OPEN, kernel)
# test = cv2.dilate(img, kernel, iterations = 1)
# test = cv2.Canny(test, 30, 150)
# _, ctrs_line, _ = cv2.findContours(test.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# sorted_ctrs_line = sorted(ctrs_line, key=lambda ctr: cv2.boundingRect(ctr)[1])
# # img = cv2.copyMakeBorder(img, 40, 40, 40, 40, cv2.BORDER_CONSTANT)
# # img = cv2.resize(img, (32, 32))
# cv2.imshow('test', test)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# for i, ctr_line in enumerate(sorted_ctrs_line):
# x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
# if w_line * h_line < 200 or h_line < 20:
# continue
# print(x_line, y_line, w_line, h_line)
# cropped_line = img[y_line:y_line + h_line, x_line:x_line + w_line]
# cropped_line = cv2.copyMakeBorder(cropped_line, 1,1,1,1,cv2.BORDER_CONSTANT)
# cropped_line = cv2.resize(cropped_line, (32, 32))
# cropped_line = cv2.copyMakeBorder(cropped_line, 2,2,2,2,cv2.BORDER_CONSTANT)
# cropped_line = cv2.resize(cropped_line, (32, 32))
# cv2.imshow('test', cropped_line)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# output = model.predict_classes(cropped_line.reshape(-1, 32, 32, 1))
# print(output)
# for i, ctr_line in enumerate(sorted_ctrs_line):
# # getting coordinates of the line contour
# x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
cv2.imshow('test', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
# -
import cv2
import numpy as np
# +
img = cv2.imread('ignore/test1.jpg', 0)
# img = cv2.resize(img, (600, 300))
# img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 15, 5)
kernel_sharpening = np.array([[-1,-1,-1],
[-1, 9,-1],
[-1,-1,-1]])
# img = cv2.GaussianBlur(img, (5, 5), 0)
# img = cv2.adaptiveThreshold(img,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,15,5)
_, img = cv2.threshold(img, 212, 255, cv2.THRESH_BINARY_INV)
# img = cv2.GaussianBlur(img, (5, 5), 0)
# img = cv2.filter2D(img, -1, kernel_sharpening)
kernel = np.ones((5, 2), np.uint8)
# test = cv2.morphologyEx(OBinary, cv2.MORPH_OPEN, kernel)
# test = cv2.dilate(img, kernel, iterations = 1)
# test = cv2.Canny(test, 30, 150)
# _, ctrs_line, _ = cv2.findContours(test.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# sorted_ctrs_line = sorted(ctrs_line, key=lambda ctr: cv2.boundingRect(ctr)[1])
# # img = cv2.copyMakeBorder(img, 40, 40, 40, 40, cv2.BORDER_CONSTANT)
# # img = cv2.resize(img, (32, 32))
# cv2.imshow('test', test)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# for i, ctr_line in enumerate(sorted_ctrs_line):
# x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
# if w_line * h_line < 200 or h_line < 20:
# continue
# print(x_line, y_line, w_line, h_line)
# cropped_line = img[y_line:y_line + h_line, x_line:x_line + w_line]
# cropped_line = cv2.copyMakeBorder(cropped_line, 1,1,1,1,cv2.BORDER_CONSTANT)
# cropped_line = cv2.resize(cropped_line, (32, 32))
# cropped_line = cv2.copyMakeBorder(cropped_line, 2,2,2,2,cv2.BORDER_CONSTANT)
# cropped_line = cv2.resize(cropped_line, (32, 32))
# cv2.imshow('test', cropped_line)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# output = model.predict_classes(cropped_line.reshape(-1, 32, 32, 1))
# print(output)
# for i, ctr_line in enumerate(sorted_ctrs_line):
# # getting coordinates of the line contour
# x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
cv2.imshow('test', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.destroyAllWindows()
# -
# +
def pre_process(template_type, image_path):
image = cv2.imread(image_path)
kernel_sharpening = np.array([[-1,-1,-1],
[-1, 9,-1],
[-1,-1,-1]])
# image = cv2.medianBlur(image, 5)
# image = cv2.filter2D(image, -1, kernel_sharpening)
# image = cv2.medianBlur(image, 5)
# image = cv2.filter2D(image, -1, kernel_sharpening)
# image = cv2.medianBlur(image, 5)
image = cv2.GaussianBlur(image, (5, 5), 0)
image = cv2.filter2D(image, -1, kernel_sharpening)
image = cv2.GaussianBlur(image, (5, 5), 0)
image = cv2.filter2D(image, -1, kernel_sharpening)
image = cv2.GaussianBlur(image, (5, 5), 0)
# showImage('luminance', image)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
luminance, a, b = cv2.split(lab)
hist,bins = np.histogram(luminance,256,[0,256])
mean = int((np.argmax(hist) + np.argmin(hist)) / 2)
luminance[luminance > mean] = 255
luminance[luminance <= mean] = 0
# cv2.imwrite('luminance.jpg', luminance)
template = cv2.imread(template_type, 0)
ret3, template = cv2.threshold(template, 220, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
luminance = np.subtract(template, luminance)
kernel = np.ones((2, 2), np.uint8)
luminance = cv2.erode(luminance, kernel, iterations=1)
luminance = np.invert(luminance)
# showImage('luminance', luminance)
# text = pyt.image_to_string(luminance)
# data = text.replace("#", "4").replace("'", "").replace('"', '').replace('!', 'I').replace(']', 'I').upper().split('\n')
return luminance
def get_text(image):
# getting edges of the text in image
image_edges = cv2.Canny(image, 30, 150)
# dilating image to detect individual lines
kernel_line = np.ones((2, 100), np.uint8)
dilated_line = cv2.dilate(image_edges, kernel_line, iterations=1)
# finding contours of the line
im2, ctrs_line, hier = cv2.findContours(dilated_line.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs_line = sorted(ctrs_line, key=lambda ctr: cv2.boundingRect(ctr)[1])
text = []
for i, ctr_line in enumerate(sorted_ctrs_line):
# getting coordinates of the line contour
x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
if w_line * h_line < 3000 or w_line * h_line > 50000 or h_line <= 20:
continue
cropped_line = image[y_line:y_line + h_line, x_line:x_line + w_line]
# cropped_line = np.invert(cropped_line)
cropped_line= cv2.copyMakeBorder(cropped_line,10,10,10,10,cv2.BORDER_CONSTANT,value=[255, 255, 255])
# showImage('test', cropped_line)
line_text = pyt.image_to_string(cropped_line)
line_text = line_text.replace('/', 'i').replace("#", "4").replace("'", "").replace('"', '').replace('!', 'I').replace(']', 'I').upper()
if line_text != '':
# print(line_text)
text.append(line_text)
return text
def showImage(title, image):
cv2.imshow(title, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
test = pre_process('pancard_template.jpg', 'test4.png')
text = get_text(test)
print(text)
import os
for i in os.listdir('pancards/'):
test = pre_process('pancard_template.jpg', 'pancards/' + i)
text = get_text(test)
print(text)
def clean_text(text_list):
my_list = []
for i in text_list:
if i != ' ' or i != ' ' or i != '':
i = re.sub('[^A-Za-z0-9-/ ]+', '', i)
shortword = re.compile(r'\W*\b[^0-9/]\w{1,2}\b')
i = shortword.sub('', i)
i = i.lstrip()
i = i.rstrip()
i = re.sub('\s{2,}', '', i)
if i == '':
continue
my_list.append(i)
return my_list
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pytesseract as pyt
import re
# +
image = cv2.imread('ignore/test1.jpg')
# image = cv2.resize(image, None, fx = 0.3, fy = 0.3)
# cv2.imshow('title', image)
# cv2.waitKey(0)
# image = image[450:1080, 0:900]
print(image.shape)
kernel_sharpening = np.array([[-1,-1,-1],
[-1, 9,-1],
[-1,-1,-1]])
image = cv2.GaussianBlur(image, (5, 5), 0)
image = cv2.filter2D(image, -1, kernel_sharpening)
image = cv2.GaussianBlur(image, (3, 3), 0)
image = cv2.filter2D(image, -1, kernel_sharpening)
image = cv2.GaussianBlur(image, (3, 3), 0)
# cv2.imshow('title', image)
# cv2.waitKey(0)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
luminance, a, b = cv2.split(lab)
# luminance, a, b = cv2.split(image)
# b,bins = np.histogram(luminance,256,[0,256])
# g,bins = np.histogram(a,256,[0,256])
# r,bins = np.histogram(b,256,[0,256])
# plt.plot(b),
# plt.show()
# plt.plot(g)
# plt.show()
# plt.plot(r)
# plt.show()
# cv2.imshow('title', b)
# cv2.waitKey(0)
hist,bins = np.histogram(luminance,256,[0,256])
print(np.argmax(hist))
luminance = np.clip(luminance, np.argmax(hist) - 15, np.argmax(hist) + 15)
luminance[luminance == np.argmax(hist) - 15] = 0
luminance[luminance == np.argmax(hist) + 15] = 255
luminance[luminance == 255] = 0
# luminance[(luminance > np.argmax(hist) - 15) and (luminance < np.argmax(hist) + 15)] = 255
# luminance[luminance > np.argmax(hist) - 15] = 255
# luminance[luminance < np.argmax(hist) - 15] = 0
# luminance[luminance < np.argmax(hist) + 15] = 255
# luminance[luminance > np.argmax(hist) + 15] = 255
# mean = int((np.argmax(hist) + np.argmin(hist)) / 2)
# mean = np.argmax(hist)
# plt.plot(hist)
# plt.show()
# luminance[luminance > mean] = 255
# luminance[luminance <= mean] = 0
# image = cv2.GaussianBlur(image, (3, 3), 0)
# image = cv2.filter2D(image, -1, kernel_sharpening)
# image = cv2.Laplacian(image, cv2.CV_64F)
# image = cv2.Canny(image, 0, 10)
# showImage('lines', dilated_line)
# # finding contours of the line
# _, ctrs_line, _ = cv2.findContours(image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# sorted_ctrs_line = sorted(ctrs_line, key=lambda ctr: cv2.boundingRect(ctr)[3])
# x_line, y_line, w_line, h_line = cv2.boundingRect(sorted_ctrs_line[-1])
# print(x_line, y_line, w_line, h_line)
# cropped_line = image[y_line:y_line + h_line, x_line:x_line + w_line]
# for i, ctr_line in enumerate(sorted_ctrs_line):
# # getting coordinates of the line contour
# x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
# if w_line * h_line < 3000 or w_line * h_line > 50000:
# continue
# cropped_line = image[y_line:y_line + h_line, x_line:x_line + w_line]
# blac = np.invert(cropped_line)
# cv2.imwrite('bdgdfg' + str(i) + '.jpg', blac)
# testing = og_image[y_line:y_line + h_line, x_line:x_line + w_line]
cv2.imshow('title', luminance)
cv2.waitKey(0)
text = pyt.image_to_string(luminance, config=('--oem 1 --psm 3'))
data = text.replace("#", "4").replace("'", "").replace('"', '').replace('!', 'I').replace(']', 'I').upper().split('\n')
# clean_text(data)
# print(data)
# cv2.imshow('title', luminance)
# cv2.waitKey(0)
# cv2.imshow('title', cv2.resize(a, None, fx = 0.5, fy = 0.5))
# cv2.waitKey(0)
# cv2.imshow('title', cv2.resize(b, None, fx = 0.5, fy = 0.5))
# cv2.waitKey(0)
cv2.destroyAllWindows()
# -
BE CE
INCOME TAX DEPARTMENT ZL GOVLOF INDIA
= SET = IE
PERMANENT ACCOUNT NUMBER CARD
CSUPR6644H
RE MAME
<NAME>
ATT ITH [ FATHERS NAME
<NAME>
TEETER D > OF BIR
08/11/1997
WIPED
GTATHY [ SIGNATURE
import cv2
import os
import shutil
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import thinning
import pytesseract as pyt
import imutils
def showImage(title, image):
cv2.imshow(title, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# +
image = cv2.imread('IP/licence_image2.png')
# Create our shapening kernel, it must equal to one eventually
kernel_sharpening = np.array([[-1,-1,-1],
[-1, 9,-1],
[-1,-1,-1]])
# applying the sharpening kernel to the input image & displaying it.
# image = cv2.filter2D(image, -1, kernel_sharpening)
# showImage('test', image)
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
showImage('test', lab)
l, a, b = cv2.split(lab)
lm = l.copy()
la = l.copy()
aa = a.copy()
bb = b.copy()
hist,bins = np.histogram(l,256,[0,256])
hista,bins = np.histogram(a,256,[0,256])
histb,bins = np.histogram(b,256,[0,256])
plt.plot(hist), plt.plot(hista)
# plt.plot(hista), plt.plot(histb)
plt.show()
# lm -= 100
# lm[lm < 0] = 0
print(lm.shape)
print(np.argmax(hist))
mean = int((np.argmax(hist) + np.argmin(hist)) / 2)
max_a = np.argmax(hista)
print(max_a)
# kernel = np.ones((2, 2), np.uint8)
# lm = cv2.erode(lm, kernel, iterations = 2)
bb = cv2.filter2D(bb, -1, kernel_sharpening)
showImage('test', lm)
# aa[aa > int(120)] = 255
# aa[aa <= int(120)] = 0
showImage('test', aa)
showImage('test', bb)
lm[lm > mean] = 255
lm[lm <= mean] = 0
# aa[aa > 150] = 255
# aa[aa <= 150] = 0
# bb[bb > 95] = 255
# bb[bb <= 95] = 0
# la += 40
# la[la > 255] = 255
# lab = cv2.merge(l, a, b)
# laba = np.concatenate((la, a, b))
# # laba = np.invert(laba)
# # print(np.max(l))
# showImage('test', laba)
hist,bins = np.histogram(lm,256,[0,256])
plt.plot(hist)
plt.show()
labm = np.concatenate((lm, aa, bb))
# lm = cv2.morphologyEx(lm, cv2.MORPH_OPEN, kernel)
# lm = cv2.erode(lm, kernel, iterations = 2)
# lm = cv2.dilate(lm, kernel, iterations = 3)
template = cv2.imread('IP/license_template.jpg', 0)
ret3, template = cv2.threshold(template, 220, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
lm = np.subtract(template, lm)
lm = np.invert(lm)
showImage('test', lm)
text = pyt.image_to_string(lm)
print(text)
showImage('test', lm)
cv2.imwrite('tess.jpg', lm)
# -
import cv2
import os
import shutil
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import thinning
import pytesseract as pyt
import imutils
# +
def getText(template, image):
template = cv2.imread(template, 0)
# showImage('og', template)
# testing = image
image = cv2.imread(image, 0)
ret3, template = cv2.threshold(template, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
global og_image
og_image = image
# testing = cv2.imread(testing)
# testing = increase_brightness(testing)
# showImage('testing', testing)
# testing = cv2.cvtColor(testing, cv2.COLOR_BGR2GRAY)
# kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
# testing = cv2.filter2D(testing, -1, kernel)
# kernel = np.ones((3, 3), np.uint8)
# testing = cv2.dilate(testing, kernel, iterations = 1)
# ret3, testing = cv2.threshold(testing, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# thresh = np.invert(testing)
# testing1 = cv2.Canny(testing, 30, 150)
# kernel = np.ones((3, 3), np.uint8)
# testing = cv2.dilate(testing1, kernel, iterations = 1)
# testing = np.subtract(thresh, testing)
# showImage('testing', testing)
# cv2.imwrite('bright.jpg', testing)
# Sharpening the image
kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])
unsharp_image = cv2.filter2D(image, -1, kernel)
# cv2.imshow('og', unsharp_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Adaptive Thresholding
# OBinary = cv2.adaptiveThreshold(unsharp_image,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,15,5)
# Thresholding
ret3, OBinary = cv2.threshold(unsharp_image, 220, 255, cv2.THRESH_BINARY_INV+ cv2.THRESH_OTSU)
showImage('testing', OBinary)
# OBinary = cv2.medianBlur(OBinary, 5)
# OBinary = cv2.bilateralFilter(OBinary,12,25,25)
# cv2.imwrite('nonbilateral.jpg', OBinary)
kernel = np.ones((2, 2), np.uint8)
#
# OBinary = cv2.morphologyEx(OBinary, cv2.MORPH_OPEN, kernel)
OBinary = cv2.erode(OBinary, kernel, iterations = 3)
# OBinary = cv2.medianBlur(OBinary, 3)
showImage('testing', OBinary)
# showImage("subtracted", OBinary)
# Eroding
# kernel = np.ones((3, 3), np.uint8)
# OBinary = cv2.erode(OBinary, kernel, iterations = 1)
subtract = np.subtract(OBinary, template)
showImage('testing', subtract)
text = pyt.image_to_string(subtract)
text = text.replace('/', 'i').replace("#", "4").replace("'", "").replace('"', '').replace('!', 'I').replace(']', 'I').upper()
# if text == '':
# continue
# print(text)
testing = imutils.skeletonize(subtract, size=(3, 3))
# showImage('testing', testing)
get_lines(subtract)
# showImage("subtracted", subtract)
cv2.destroyAllWindows()
def showImage(title, image):
cv2.imshow(title, image)
cv2.waitKey(0)
cv2.destroyAllWindows()
def get_lines(image):
# getting edges of the text in image
image_edges = cv2.Canny(image, 30, 150)
# dilating image to detect individual lines
kernel_line = np.ones((2, 100), np.uint8)
dilated_line = cv2.dilate(image_edges, kernel_line, iterations=1)
showImage('lines', dilated_line)
# finding contours of the line
im2, ctrs_line, hier = cv2.findContours(dilated_line.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs_line = sorted(ctrs_line, key=lambda ctr: cv2.boundingRect(ctr)[1])
for i, ctr_line in enumerate(sorted_ctrs_line):
# getting coordinates of the line contour
x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
if w_line * h_line < 3000 or w_line * h_line > 50000:
continue
cropped_line = image[y_line:y_line + h_line, x_line:x_line + w_line]
blac = np.invert(cropped_line)
cv2.imwrite('bdgdfg' + str(i) + '.jpg', blac)
testing = og_image[y_line:y_line + h_line, x_line:x_line + w_line]
ret3, testing = cv2.threshold(testing, 220, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
testing = np.invert(testing)
testing = imutils.skeletonize(testing, size=(3, 3))
cv2.imwrite('casdsad' + str(i) + '.jpg', testing)
kernel = np.ones((3, 3), np.uint8)
testing = cv2.dilate(testing, kernel, iterations = 1)
cv2.imwrite('asdsad' + str(i) + '.jpg', testing)
text = pyt.image_to_string(cropped_line)
text = text.replace('/', 'i').replace("#", "4").replace("'", "").replace('"', '').replace('!', 'I').replace(']', 'I').upper()
if text == '':
continue
print(text)
def increase_brightness(img, value=60):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(hsv)
# lim = 255 - value
# v[v > lim] = 255
# v[v <= lim] += value
v += 255
v[v > 255] = 255
print(np.max(v))
final_hsv = cv2.merge((h, s, v))
img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR)
return img
# -
# +
image = cv2.imread('IP/pancard.jpg')
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
lower_red = np.array([110,50,50])
upper_red = np.array([130,255,255])
mask = cv2.inRange(hsv, lower_red, upper_red)
# The bitwise and of the frame and mask is done so
# that only the blue coloured objects are highlighted
# and stored in res
res = cv2.bitwise_and(image,image, mask= mask)
cv2.imshow('frame',image)
cv2.imshow('mask',mask)
cv2.imshow('res',res)
# This displays the frame, mask
# and res which we created in 3 separate windows.
k = cv2.waitKey(0
# Destroys all of the HighGUI windows.
cv2.destroyAllWindows()
# -
# # Working on thinned image
import cv2
import os
import shutil
import numpy as np
from matplotlib.pyplot import imshow
from PIL import Image
import thinning
# # Preprocessing
def pre_processing(path):
# Read Image
img = cv2.imread(path)
# Grayscale step
grayscaled = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Noise Clearing step
noise_cleared = cv2.fastNlMeansDenoising(grayscaled, None, 4, 7, 21)
# Adaptive Thresholding
OBinary = cv2.adaptiveThreshold(noise_cleared,255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV,15,5)
kernel = np.ones((3, 3), np.uint8)
OBinary = cv2.erode(OBinary, kernel, iterations = 1)
thinned = thinning.guo_hall_thinning(OBinary)
cv2.imshow('test', thinned)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Thresholding
ret3, OBinary = cv2.threshold(noise_cleared, 250, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
thinned = thinning.guo_hall_thinning(OBinary)
cv2.imshow('test', thinned)
cv2.waitKey(0)
cv2.destroyAllWindows()
global og_image
og_image = OBinary
# Eroding
kernel = np.ones((3, 3), np.uint8)
OBinary = cv2.erode(OBinary, kernel, iterations = 1)
return OBinary
# # Convert image into matrix of image rows and image columns
# +
'''
this function convert image into matrix of image rows
'''
def imgToMatrixR(img):
# get dimensions
height, width = img.shape
matrix = []
# getting pixels values for all rows
for i in range(0, height):
row = []
for j in range(0, width):
row.append(img[i, j])
matrix.append(row)
return matrix
'''
this function convert image into matrix of image columns
'''
def imgToMatrixC(img):
# get dimensions
height, width = img.shape
matrix = []
# getting pixels values for all columns
for i in range(0, width):
col = []
for j in range(0, height):
col.append(img[j, i])
matrix.append(col)
return matrix
'''
this function count a specific value (parameter p) in matrix
'''
def countPixel(matrix, p):
counter = []
for k in range(0, len(matrix)):
counter.append(matrix[k].count(p))
return counter
# -
# # Line Segmentation
# +
def get_lines(image):
# getting edges of the text in image
image_edges = cv2.Canny(image, 30, 150)
# dilating image to detect individual lines
kernel_line = np.ones((3, 25), np.uint8)
dilated_line = cv2.dilate(image_edges, kernel_line, iterations=1)
# finding contours of the line
im2, ctrs_line, hier = cv2.findContours(dilated_line.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs_line = sorted(ctrs_line, key=lambda ctr: cv2.boundingRect(ctr)[1])
for i, ctr_line in enumerate(sorted_ctrs_line):
# getting coordinates of the line contour
x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
if w_line * h_line < 3000 or w_line * h_line > 50000:
continue
# get_words(x_line, y_line, w_line, h_line)
# -
# # Word Segmentation
def get_words(x, y, w, h):
cropped_line = pre_processed[y:y + h, x:x + w]
# getting edges of the text in image
# Edge Detection
image_edges = cv2.Canny(cropped_line, 30, 150)
kernel_word = np.ones((3, 12), np.uint8)
dilated_word = cv2.dilate(image_edges, kernel_word, iterations=1)
# Getting contours from the line
im2, ctrs_word, hier = cv2.findContours(dilated_word.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs_word = sorted(ctrs_word, key=lambda ctr: cv2.boundingRect(ctr)[0])
for i, ctr_word in enumerate(sorted_ctrs_word):
# getting coordinates of the word contour
x_word, y_word, w_word, h_word = cv2.boundingRect(ctr_word)
adjusted_x = x + x_word
adjusted_y = y + y_word
cropped_word = pre_processed[adjusted_y:adjusted_y + h_word, adjusted_x:adjusted_x + w_word]
get_characters(adjusted_x, adjusted_y, w_word, h_word)
# # Character Segmentation
# +
def get_characters(x, y, w, h):
cropped_word = pre_processed[y:y + h, x:x + w]
kernel_word = np.ones((5, 3), np.uint8)
cropped_word = cv2.dilate(cropped_word, kernel_word, iterations=1)
# cv2.imshow('test', cropped_word)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# get image dimension
height, width = cropped_word.shape
# convert image to matrix of columns
matrix = imgToMatrixC(cropped_word)
# count white pixels in each column
white_counter = countPixel(matrix, 255)
# determine potential segmentation columns (psc).
# psc is any column contains no or one white bixel at most , last column in image is also a psc .
psc = [0]
for p in range(0, len(white_counter)):
if white_counter[p] <= 2 or p >= len(white_counter) - 1:
psc.append(p)
# determine segmentation columns (sc)
# sc is the average column between each sequence set of psc , average = summ of columns index/count of columns
sc = []
summ = 0
count = 0
for n in range(0, len(psc) - 1):
summ = summ + psc[n]
count = count + 1
# combine each set of sequence black columns into only one sc ,last column is also a sc
if psc[n] + 3 < psc[n + 1] or n >= len(psc) - 2:
sc.append(int(summ / count))
summ = 0
count = 0
# segment image into chars
for c in range(0, len(sc) - 1):
crop_img = og_image[y:y + height, x + sc[c]:x + sc[c + 1] + 2]
if crop_img.shape[0] < 20 or crop_img.shape[0] * crop_img.shape[1] < 300:
continue
global j
j += 1
directory= "output/characters"
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite(directory+"/"+str(j)+".png", crop_img)
# -
pre_processed = pre_processing('licence.jpg')
image = pre_processed.copy()
j = 0
# get_lines(image)
# get_words(lines[80], img)
# for i in lines:
# print(i[2] * i[3])
# get_words(i, img)
import pytesseract as pyt
import cv2
import os
import shutil
import numpy as np
from matplotlib.pyplot import imshow
from PIL import Image
import thinning
# +
def pre_processing(path):
# Read Image
img = cv2.imread(path)
# scaling
height, width = img.shape[:2]
# scaled = img
# if height < 1600 and width < 1200:
# scaled = cv2.resize(img, (2 * width, 2* height), interpolation=cv2.INTER_LINEAR)
# # cv2.imwrite('(0)scaled.jpg', scaled)
# Deskew step
# deskewed = deskew(scaled)
# cv2.imwrite("(1)deskewed.jpg", deskewed)
# Grayscale step
grayscaled = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# cv2.imwrite("(2)grayscale.jpg", grayscaled)
# Noise Clearing step
noise_cleared = cv2.fastNlMeansDenoising(grayscaled, None, 4, 7, 21)
cv2.imwrite("(3)denoise.jpg", noise_cleared)
# x = np.random.randn(10000) # example data, random normal distribution
# num_bins = 50
# n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor="green", alpha=0.5)
# plt.xlabel(r"Description of $x$ coordinate (units)")
# plt.ylabel(r"Description of $y$ coordinate (units)")
# plt.title(r"Histogram title here (remove for papers)")
# plt.show();
'''
# GAUSSIAN's thresholding
GBinary = cv2.adaptiveThreshold(noise_cleared,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,15,5)
# cv2.imwrite("(4)binarization.jpg", GBinary)
'''
# Otsu's thresholding after Gaussian filtering
# blur = cv2.GaussianBlur(noise_cleared, (5, 5), 0)
# cv2.imwrite("(4)thresbinarization.jpg", blur)
OBinary = cv2.adaptiveThreshold(noise_cleared,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV,15,5)
ret3, OBinary = cv2.threshold(OBinary, 250, 255,
cv2.THRESH_BINARY+ cv2.THRESH_OTSU)
cv2.imwrite("(4)thresbinarization.jpg", OBinary)
kernel = np.ones((3, 3), np.uint8)
# print(type(kernel))
# kernel = np.array([[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0],
# [1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0]], np.uint8)
# print(type(kernel))
print(kernel)
OBinary = cv2.erode(OBinary, kernel, iterations = 1)
# if os.path.exists("output"):
# shutil.rmtree("output")
# os.makedirs("output")
# cv2.imwrite("output/Preprocessed.png", OBinary)
return OBinary
# -
# # Convert image into matrix of image rows and image columns
# +
'''
this function convert image into matrix of image rows
'''
def imgToMatrixR(img):
# get dimensions
height, width = img.shape
matrix = []
# getting pixels values for all rows
for i in range(0, height):
row = []
for j in range(0, width):
row.append(img[i, j])
matrix.append(row)
return matrix
'''
this function convert image into matrix of image columns
'''
def imgToMatrixC(img):
# get dimensions
height, width = img.shape
matrix = []
# getting pixels values for all columns
for i in range(0, width):
col = []
for j in range(0, height):
col.append(img[j, i])
matrix.append(col)
return matrix
'''
this function count a specific value (parameter p) in matrix
'''
def countPixel(matrix, p):
counter = []
for k in range(0, len(matrix)):
counter.append(matrix[k].count(p))
return counter
# -
# # Line Segmentation
# +
def get_lines(image):
# getting edges of the text in image
image_edges = cv2.Canny(image, 30, 150)
# dilating image /=to detect individual lines
kernel_line = np.ones((3, 25), np.uint8)
dilated_line = cv2.dilate(image_edges, kernel_line, iterations=1)
# finding contours of the line
im2, ctrs_line, hier = cv2.findContours(
dilated_line.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs_line = sorted(
ctrs_line, key=lambda ctr: cv2.boundingRect(ctr)[1])
# image = cv2.drawContours(image, sorted_ctrs_line, -1, (255,255,0), 5)
for i, ctr_line in enumerate(sorted_ctrs_line):
# getting coordinates of the line contour
x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
if w_line * h_line < 3000 or w_line * h_line > 50000:
continue
# lines.append(xywh)
get_words(x_line, y_line, w_line, h_line)
# -
# # Word Segmentation
def get_words(x, y, w, h):
cropped_line = pre_processed[y:y + h, x:x + w]
# getting edges of the text in image
global j
j += 1
directory= "output/lines_og"
if not os.path.exists(directory):
os.makedirs(directory)
# cropped_word = pre_processed[adjusted_y:adjusted_y + h_word, adjusted_x:adjusted_x + w_word]
cv2.imwrite(directory+"/"+str(j)+".png", cropped_line)
dilated_word = cv2.Canny(cropped_line, 30, 150)
# dilating image /=to detect individual words
kernel_word = np.ones((3, 10), np.uint8)
# dilated_word = cv2.dilate(image_edges, kernel_word, iterations=1)
# finding contours of the word
im2, ctrs_word, hier = cv2.findContours(
dilated_word.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs_word = sorted(
ctrs_word, key=lambda ctr: cv2.boundingRect(ctr)[1])
for i, ctr_word in enumerate(sorted_ctrs_word):
# getting coordinates of the word contour
x_word, y_word, w_word, h_word = cv2.boundingRect(ctr_word)
adjusted_x = x + x_word
adjusted_y = y + y_word
directory= "output/words_og"
if not os.path.exists(directory):
os.makedirs(directory)
cropped_word = pre_processed[adjusted_y:adjusted_y + h_word, adjusted_x:adjusted_x + w_word]
cv2.imwrite(directory+"/"+str(i)+".png", cropped_word)
# get_characters(adjusted_x, adjusted_y, w_word, h_word)
# # Character Segmentation
# +
def get_characters(x, y, w, h):
cropped_word = pre_processed[y:y + h, x:x + w]
# print(cropped_word.shape)
global i
i += 1
directory= "output/words_og"
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite(directory+"/"+str(i)+".png", cropped_word)
kernel = np.ones((1, 1), np.uint8)
# print(kernel)
thinned = cv2.erode(cropped_word, kernel, iterations = 1)
# cropped_word = cv2.Canny(cropped_word, 30, 150)
directory= "output/words_eroded"
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite(directory+"/"+str(i)+".png", thinned)
cropped_word = thinning.guo_hall_thinning(thinned)
directory= "output/words_thinned"
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite(directory+"/"+str(i)+".png", cropped_word)
# cv2.imshow('test', cropped_word)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# get image dimension
height, width = cropped_word.shape
# convert image to matrix of columns
matrix = imgToMatrixC(cropped_word)
# count white pixels in each column
white_counter = countPixel(matrix, 255)
# determine potential segmentation columns (psc).
# psc is any column contains no or one white bixel at most , last column in image is also a psc .
psc = [0]
for p in range(0, len(white_counter)):
if white_counter[p] <= 4 or p >= len(white_counter) - 1:
psc.append(p)
# determine segmentation columns (sc)
# sc is the average column between each sequence set of psc , average = summ of columns index/count of columns
sc = []
summ = 0
count = 0
for n in range(0, len(psc) - 1):
summ = summ + psc[n]
count = count + 1
# combine each set of sequence black columns into only one sc ,last column is also a sc
if psc[n] + 2 < psc[n + 1] or n >= len(psc) - 1:
sc.append(int(summ / count))
summ = 0
count = 0
# segment image into chars
for c in range(0, len(sc) - 1):
crop_img = cropped_word[0:height, sc[c]:sc[c + 1] + 2]
# cv2.imshow('test', crop_img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# crop_img = clearBounds_horiz(crop_img)
# crop_img = clearBounds_vert(crop_img)
# crop_img = cv2.resize(crop_img, (64, 128), interpolation=cv2.INTER_LINEAR)
# directory= "output/line " + str(lineNum)+"/word "+str(wordNum)
# if not os.path.exists(directory):
# os.makedirs(directory)
# cv2.imwrite(directory+"/"+str(c)+".png", crop_img)
# lines[lineNum].append(crop_img)
# if c == (len(sc) - 2):
# lines[lineNum].append(",")
# +
# '''
# this function segment words into chars
# lineNum and wordNum parameters used for naming
# segmentation technique consists of 3 steps :
# 1- define potential segmentation columns that contains no or only one white pixels.
# 2- filter potential segmentation columns by determine segmentation columns which the crop operation depends on .
# 3- segment the image into segments (chars).
# '''
# def char_segment(binary,lineNum,wordNum):
# # get image dimension
# height, width = binary.shape
# # convert image to matrix of columns
# matrix = imgToMatrixC(binary)
# # count black pixels in each column
# black_counter = countPixel(matrix,0)
# # determine potential segmentation columns (psc).
# # psc is any column contains no or one black bixel at most , last column in image is also a psc .
# psc = [0]
# for p in range(0, len(black_counter)):
# if black_counter[p] <= 1 or p >= len(black_counter) - 2:
# psc.append(p)
# # determine segmentation columns (sc)
# # sc is the average column between each sequence set of psc , average = summ of columns index/count of columns
# sc = []
# summ = 0
# count = 0
# for n in range(0, len(psc) - 1):
# summ = summ + psc[n]
# count = count + 1
# # combine each set of sequence white columns into only one sc ,last column is also a sc
# if psc[n] + 3 < psc[n + 1] or n >= len(psc) - 2:
# sc.append(int(summ / count))
# summ = 0
# count = 0
# global lines
# # segment image into chars
# for c in range(0, len(sc) - 1):
# crop_img = binary[0:height, sc[c]:sc[c + 1]+2]
# crop_img = clearBounds_horiz(crop_img)
# crop_img = clearBounds_vert(crop_img)
# crop_img = cv2.resize(crop_img, (64, 128), interpolation=cv2.INTER_LINEAR)
# directory= "output/line " + str(lineNum)+"/word "+str(wordNum)
# if not os.path.exists(directory):
# os.makedirs(directory)
# cv2.imwrite(directory+"/"+str(c)+".png", crop_img)
# lines[lineNum].append(crop_img)
# if c == (len(sc) - 2):
# lines[lineNum].append(",")
# print("HERE")
# print(lines)
# +
# from skimage.morphology import skeletonize
# +
cropped_word = cv2.imread('62.png', 0)
kernel = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [1, 1, 0, 0, 0],
[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [1, 1, 0, 0, 0]], np.uint8)
print(kernel)
eroded = cv2.erode(cropped_word, kernel, iterations = 1)
dkernel = np.array([[1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [1, 1, 0, 0, 0], [0, 0, 0, 0, 0],
[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], np.uint8)
print(dkernel)
dilated = cv2.dilate(cropped_word, dkernel, iterations = 1)
# Dilation then Erosion
cropped_word = cv2.morphologyEx(eroded, cv2.MORPH_OPEN, kernel)
directory= "output/characters"
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite(directory+"/eroded"+str(i)+".png", eroded)
cv2.imwrite(directory+"/dilated"+str(i)+".png", dilated)
cv2.imwrite(directory+"/"+str(i)+".png", cropped_word)
i += 1
height, width = cropped_word.shape
# convert image to matrix of columns
matrix = imgToMatrixC(cropped_word)
# count white pixels in each column
white_counter = countPixel(matrix, 255)
# determine potential segmentation columns (psc).
# psc is any column contains no or one white bixel at most , last column in image is also a psc .
psc = [0]
for p in range(0, len(white_counter)):
if white_counter[p] <= 2 or p >= len(white_counter) - 1:
psc.append(p)
# determine segmentation columns (sc)
# sc is the average column between each sequence set of psc , average = summ of columns index/count of columns
sc = []
summ = 0
count = 0
for n in range(0, len(psc) - 1):
summ = summ + psc[n]
count = count + 1
# combine each set of sequence black columns into only one sc ,last column is also a sc
if psc[n] + 3 < psc[n + 1] or n >= len(psc) - 2:
sc.append(int(summ / count))
summ = 0
count = 0
# segment image into chars
for c in range(0, len(sc) - 1):
crop_img = cropped_word[0:height, sc[c]:sc[c + 1] + 2]
directory= "output/characters"
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite(directory+"/"+str(i)+".png", crop_img)
i += 1
# -
# # Thinning example
# +
import cv2
import thinning
import numpy as np
img = cv2.imread("./cropped.jpg", 0)
adap = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
cv2.THRESH_BINARY, 11, 2)
ret, img = cv2.threshold(img, 200, 255,
cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
# img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
kernel = np.ones((4, 4), np.uint8)
thinned = cv2.erode(img, kernel, iterations=1)
thinned = thinning.guo_hall_thinning(img)
resized = cv2.resize(thinned, None, fx=0.5, fy=0.5)
cv2.imwrite("./thinned.png", resized)
# -
# # Testing denoising
import cv2
import numpy as np
import thinning
# +
img = cv2.imread("licence.jpg",0)
denoised = cv2.fastNlMeansDenoising(img, None, 6, 7, 21)
kernel = np.ones((3, 3), np.uint8)
thinned = cv2.erode(img, kernel, iterations = 1)
adapgaussian = cv2.adaptiveThreshold(denoised, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
adapgaussian = cv2.erode(adapgaussian, kernel, iterations = 1)
adapgaussian = thinning.guo_hall_thinning(adapgaussian)
adapgaussian = cv2.resize(adapgaussian, None, fx = 0.5, fy = 0.5)
noisedgaussian = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 2)
noisedgaussian = cv2.erode(noisedgaussian, kernel, iterations = 1)
noisedgaussian = thinning.guo_hall_thinning(noisedgaussian)
noisedgaussian = cv2.resize(noisedgaussian, None, fx = 0.5, fy = 0.5)
adapmean = cv2.adaptiveThreshold(denoised, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 2)
adapmean = cv2.erode(adapmean, kernel, iterations = 1)
adapmean = thinning.guo_hall_thinning(adapmean)
adapmean = cv2.resize(adapmean, None, fx = 0.5, fy = 0.5)
noisedmean = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 2)
noisedmean = cv2.erode(noisedmean, kernel, iterations = 1)
noisedmean = thinning.guo_hall_thinning(noisedmean)
noisedmean = cv2.resize(noisedmean, None, fx = 0.5, fy = 0.5)
cv2.imshow('Denoised mean', adapmean)
cv2.waitKey(0)
cv2.imshow('noised mean', noisedmean)
cv2.waitKey(0)
cv2.imshow('Denoised gaussian', adapgaussian)
cv2.waitKey(0)
cv2.imshow('noised gaussian', noisedgaussian)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
# +
image = cv2.imread('licence.jpg', 0)
cv2.imshow('Edges after contouring', image)
cv2.waitKey(0)
# image = cv2.GaussianBlur(image, (3, 3), 0)
# kernel = np.array([[-1, 2, -1],
# [-1, 2, -1],
# [-1, 2, -1]])
kernel = np.ones((4, 4), np.uint8)
print(kernel)
# image = cv2.dilate(image, kernel, iterations = 1)
ret, threshold = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
erosion = cv2.erode(threshold, kernel, iterations = 1)
cv2.imshow('Erosion', erosion)
cv2.waitKey()
# closing = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, kernel)
# cv2.imshow('Closing', closing)
# cv2.waitKey()
resized = cv2.resize(erosion, None, fx = 0.5, fy = 0.5)
cv2.imshow('Edges after contouring', resized)
cv2.waitKey(0)
edges = cv2.Canny(threshold, 30, 200)
cv2.imshow('Edges', edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
# +
import matplotlib
import matplotlib.pyplot as plt
import skimage.io as io
"load image data"
# Img_Original = io.imread( 'download.jpeg') # Gray image, rgb images need pre-conversion
Img_Original = cv2.imread('cropped.jpg', 0)
"Convert gray images to binary images using Otsu's method"
from skimage.filters import threshold_otsu
Otsu_Threshold = threshold_otsu(Img_Original)
BW_Original = Img_Original < Otsu_Threshold # must set object region as 1, background region as 0 !
# def neighbours(x,y,image):
# "Return 8-neighbours of image point P1(x,y), in a clockwise order"
# img = image
# x_1, y_1, x1, y1 = x-1, y-1, x+1, y+1
# return [ img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], # P2,P3,P4,P5
# img[x1][y], img[x1][y_1], img[x][y_1], img[x_1][y_1] ] # P6,P7,P8,P9
# def transitions(neighbours):
# "No. of 0,1 patterns (transitions from 0 to 1) in the ordered sequence"
# n = neighbours + neighbours[0:1] # P2, P3, ... , P8, P9, P2
# return sum( (n1, n2) == (0, 1) for n1, n2 in zip(n, n[1:]) ) # (P2,P3), (P3,P4), ... , (P8,P9), (P9,P2)
# def zhangSuen(image):
# "the Zhang-Suen Thinning Algorithm"
# Image_Thinned = image.copy() # deepcopy to protect the original image
# changing1 = changing2 = 1 # the points to be removed (set as 0)
# while changing1 or changing2: # iterates until no further changes occur in the image
# # Step 1
# changing1 = []
# rows, columns = Image_Thinned.shape[0],Image_Thinned.shape[1] # x for rows, y for columns
# for x in range(1, rows - 1): # No. of rows
# for y in range(1, columns - 1): # No. of columns
# P2,P3,P4,P5,P6,P7,P8,P9 = n = neighbours(x, y, Image_Thinned)
# if (Image_Thinned[x][y] == 1 and # Condition 0: Point P1 in the object regions
# 2 <= sum(n) <= 6 and # Condition 1: 2<= N(P1) <= 6
# transitions(n) == 1 and # Condition 2: S(P1)=1
# P2 * P4 * P6 == 0 and # Condition 3
# P4 * P6 * P8 == 0): # Condition 4
# changing1.append((x,y))
# for x, y in changing1:
# Image_Thinned[x][y] = 0
# # Step 2
# changing2 = []
# for x in range(1, rows - 1):
# for y in range(1, columns - 1):
# P2,P3,P4,P5,P6,P7,P8,P9 = n = neighbours(x, y, Image_Thinned)
# if (Image_Thinned[x][y] == 1 and # Condition 0
# 2 <= sum(n) <= 6 and # Condition 1
# transitions(n) == 1 and # Condition 2
# P2 * P4 * P8 == 0 and # Condition 3
# P2 * P6 * P8 == 0): # Condition 4
# changing2.append((x,y))
# for x, y in changing2:
# Image_Thinned[x][y] = 0
# return Image_Thinned
def neighbours_vec(image):
return image[2:,1:-1], image[2:,2:], image[1:-1,2:], image[:-2,2:], image[:-2,1:-1], image[:-2,:-2], image[1:-1,:-2], image[2:,:-2]
def transitions_vec(P2, P3, P4, P5, P6, P7, P8, P9):
return ((P3-P2) > 0).astype(int) + ((P4-P3) > 0).astype(int) + \
((P5-P4) > 0).astype(int) + ((P6-P5) > 0).astype(int) + \
((P7-P6) > 0).astype(int) + ((P8-P7) > 0).astype(int) + \
((P9-P8) > 0).astype(int) + ((P2-P9) > 0).astype(int)
def zhangSuen_vec(image, iterations):
for iter in range (1, iterations):
print(iter)
# step 1
P2,P3,P4,P5,P6,P7,P8,P9 = neighbours_vec(image)
condition0 = image[1:-1,1:-1]
condition4 = P4*P6*P8
condition3 = P2*P4*P6
condition2 = transitions_vec(P2, P3, P4, P5, P6, P7, P8, P9) == 1
condition1 = (2 <= P2+P3+P4+P5+P6+P7+P8+P9) * (P2+P3+P4+P5+P6+P7+P8+P9 <= 8)
cond = (condition0 == 1) * (condition4 == 0) * (condition3 == 0) * (condition2 == 1) * (condition1 == 1)
changing1 = numpy.where(cond == 1)
image[changing1[0]+1,changing1[1]+1] = 0
# step 2
P2,P3,P4,P5,P6,P7,P8,P9 = neighbours_vec(image)
condition0 = image[1:-1,1:-1]
condition4 = P2*P6*P8
condition3 = P2*P4*P8
condition2 = transitions_vec(P2, P3, P4, P5, P6, P7, P8, P9) == 1
condition1 = (2 <= P2+P3+P4+P5+P6+P7+P8+P9) * (P2+P3+P4+P5+P6+P7+P8+P9 <= 6)
cond = (condition0 == 1) * (condition4 == 0) * (condition3 == 0) * (condition2 == 1) * (condition1 == 1)
changing2 = numpy.where(cond == 1)
image[changing2[0]+1,changing2[1]+1] = 0
return image
"Apply the algorithm on images"
BW_Skeleton = zhangSuen(BW_Original)
# BW_Skeleton = BW_Original
"Display the results"
fig, ax = plt.subplots(1, 2)
ax1, ax2 = ax.ravel()
ax1.imshow(BW_Original, cmap=plt.cm.gray)
ax1.set_title('Original binary image')
ax1.axis('off')
ax2.imshow(BW_Skeleton, cmap=plt.cm.gray)
ax2.set_title('Skeleton of the image')
ax2.axis('off')
plt.show()
# -
from skimage.
# +
img = cv2.imread('cropped.jpg',0)
size = np.size(img)
skel = np.zeros(img.shape,np.uint8)
ret,img = cv2.threshold(img,127,255,0)
element = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))
done = False
while( not done):
eroded = cv2.erode(img,element)
temp = cv2.dilate(eroded,element)
temp = cv2.subtract(img,temp)
skel = cv2.bitwise_or(skel,temp)
img = eroded.copy()
zeros = size - cv2.countNonZero(img)
if zeros==size:
done = True
cv2.imshow("skel",skel)
cv2.waitKey(0)
cv2.destroyAllWindows()
# +
from skimage.morphology import skeletonize
from skimage import data
import matplotlib.pyplot as plt
from skimage.util import invert
# Invert the horse image
print(data.horse())
image = invert(data.horse())
print(image)
# perform skeletonization
skeleton = skeletonize(image)
# display results
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(8, 4),
sharex=True, sharey=True)
ax = axes.ravel()
ax[0].imshow(image, cmap=plt.cm.gray)
ax[0].axis('off')
ax[0].set_title('original', fontsize=20)
ax[1].imshow(skeleton, cmap=plt.cm.gray)
ax[1].axis('off')
ax[1].set_title('skeleton', fontsize=20)
fig.tight_layout()
plt.show()
# +
image = cv2.imread('licence.jpg')
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, threshold = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY_INV)
edges = cv2.Canny(image, 50, 220)
resized = cv2.resize(edges, None, fx = 0.5, fy = 0.5)
cv2.imshow('Edges', resized)
cv2.waitKey(0)
# Find Contours
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(edges, contours, -1, (0,255,0), 2)
edges = cv2.resize(edges, None, fx = 0.5, fy = 0.5)
cv2.imshow('Edges after contouring', edges)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
kernel = np.ones((10, 1), np.uint8)
img_dilation = cv2.dilate(word, kernel, iterations=1)
im2, ctrs, hier = cv2.findContours(img_dilation.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs = sorted(ctrs, key=lambda ctr: cv2.boundingRect(ctr)[0])
character_list = []
for i, ctr in enumerate(sorted_ctrs):
# Get bounding box
x, y, w, h = cv2.boundingRect(ctr)
#dimensions = [x, y, w, h]
# Getting ROI
roi = word[y:y + h, x:x + w]
squared = makeSquare(roi)
final = resize_to_pixel(28, squared)
letter = characterPrediction(final)
character_list.append(letter)
return character_list
# # Line Detection
# # Word Detection
# # Character Detection
# # Drawing Contours
import cv2
import numpy
# +
# Image with shapes
image = cv2.imread('abc.jpg')
cv2.imshow('Original', image)
cv2.waitKey(0)
resized = cv2.resize(image, None, fx = 2, fy = 2)
# Grayscale
gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
#gray_blur = cv2.GaussianBlur(gray, (3, 3), 0)
# Find Canny Edges
edges = cv2.Canny(gray, 30, 200)
cv2.imshow('Edges', edges)
cv2.waitKey(0)
# Find Contours
_, contours, hierarchy = cv2.findContours(edges, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
cv2.imshow('Edges after contouring', edges)
cv2.waitKey()
print('Number of Contours found = ' + str(len(contours)))
#contours = np.array(contours).reshape((-1,1,2)).astype(np.int32)
cv2.drawContours(resized, contours, -1, (0,255,0), 2)
cv2.imshow('Contours', resized)
cv2.waitKey(0)
cv2.destroyAllWindows()
# -
def pre_processing(path):
img = cv2.imread(path, 0)
showImage('main image', img)
hist,bins = np.histogram(img,256,[0,256])
_, img = cv2.threshold(img, np.argmax(hist) - 15, 255, cv2.THRESH_BINARY)
# Deskew step
# deskewed = deskew(img)
# cv2.imwrite("(1)deskewed.jpg", deskewed)
# Noise Clearing step
noise_cleared = cv2.fastNlMeansDenoising(img, None, 4, 7, 21)
# cv2.imwrite("(3)denoise.jpg", noise_cleared)
lines_removed = lineRemoval(noise_cleared)
character_segmentation(lines_removed)
def showImage(title, img):
cv2.imshow(title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# +
def character_segmentation(img):
height = img.shape[0] / 3
img = np.invert(img)
kernel = np.ones((40, 1))
dilated = cv2.dilate(img, np.ones((1, 1)), iterations = 1)
dilated = cv2.dilate(img, kernel, iterations = 1)
canny = cv2.Canny(dilated, 30, 150)
showImage('image', canny)
_, ctrs_line, _ = cv2.findContours(dilated.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
sorted_ctrs_line = sorted(ctrs_line, key=lambda ctr: cv2.boundingRect(ctr)[0])
# img = cv2.copyMakeBorder(img, 40, 40, 40, 40, cv2.BORDER_CONSTANT)
# # img = cv2.resize(img, (32, 32))
for i, ctr_line in enumerate(sorted_ctrs_line):
x_line, y_line, w_line, h_line = cv2.boundingRect(ctr_line)
if h_line < height:
continue
# print(x_line, y_line, w_line, h_line)
cropped_line = img[y_line:y_line + h_line, x_line:x_line + w_line]
# cropped_line = cv2.copyMakeBorder(cropped_line, 2, 2, 2, 2, cv2.BORDER_CONSTANT)
# showImage('image', cropped_line)
cropped_line = cv2.resize(cropped_line, (20, 20), None)
cropped_line = cv2.copyMakeBorder(cropped_line, 6, 6, 6, 6, cv2.BORDER_CONSTANT)
# cropped_line = cv2.resize(cropped_line, (32, 32), None)
showImage('image', cropped_line)
output = model.predict_classes(cropped_line.reshape(-1, 32, 32, 1))
# probab = model.predict_proba(cropped_line.reshape(-1, 32, 32, 1))
probab = model.predict(cropped_line.reshape(-1, 32, 32, 1))
print(probab, output)
# cropped_line = cv2.copyMakeBorder(cropped_line, 2,2,2,2,cv2.BORDER_CONSTANT)
# cropped_line = cv2.resize(cropped_line, (32, 32))
# +
from keras.models import model_from_json
import tensorflow as tf
json_file = open('../../Seven_Segment_Display_Prediction/models/model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
model = model_from_json(loaded_model_json)
# load weights into new model
model.load_weights("../../Seven_Segment_Display_Prediction/models/model.h5")
print("Loaded Model from disk")
# compile and evaluate model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# -
graph = tf.get_default_graph()
graph = tf.Graph()
import cv2
import os
import shutil
import numpy as np
from matplotlib.pyplot import imshow
from PIL import Image
pre_processing('ignore/test1.jpg')
# # Potential Segmentation Columns
# +
import cv2
import os
import shutil
import numpy as np
from matplotlib.pyplot import imshow
from PIL import Image
'''
this function convert image into matrix of image rows
'''
def imgToMatrixR(img):
# get dimensions
height, width = img.shape
matrix = []
# getting pixels values for all rows
for i in range(0, height):
row = []
for j in range(0, width):
row.append(img[i,j])
matrix.append(row)
return matrix
'''
this function convert image into matrix of image columns
'''
def imgToMatrixC(img):
# get dimensions
height, width = int(img.shape[1] / 2)
matrix = []
# getting pixels values for all columns
for i in range(0, width):
col = []
for j in range(0, height):
col.append(img[j, i])
matrix.append(col)
return matrix
'''
this function count a specific value (parameter p) in matrix
'''
def countPixel(matrix,p):
counter = []
for k in range(0, len(matrix)):
counter.append(matrix[k].count(p))
return counter
'''
this function searches for underlines and replace the pixels that formed it with white pixels
'''
def lineRemoval(img):
min_length=140
matrix = imgToMatrixR(img)
for i in range(0, len(matrix)):
row=matrix[i]
start=-1
end=0
conn=0
for j in range(0, len(row)):
if (row[j]==0):
conn=conn+1
# first point in the line .
if( start == -1 ):
start = j
# last point in the row .
if( j == len(row)-1 ):
end =j
if (conn > min_length):
img[i-2:i+4, start:end+1] = 255
start = -1
end = 0
conn = 0
# end of the line
else:
end =j
if (conn >min_length):
img[i-2:i+4, start:end+1] = 255
start = -1
end = 0
conn = 0
# showImage('after line', img)
return img
'''
this function clears all horizontal boundaries around the input image
'''
def clearBounds_horiz(img):
# showImage('before horizontal', img)
height, width = img.shape
matrix = imgToMatrixR(img)
white_counter = countPixel(matrix,255)
for i in range (0,height):
if(white_counter[i]>= width-1):
img = img[1:height,0:width]
else:
break
new_height, width = img.shape
for i in range (1,height):
if(white_counter[height-i]>= width-1):
img = img[0:new_height-i,0:width]
else:
break
# showImage('after horizontal', img)
return img
'''
this function clears all vertical boundaries around the input image
'''
def clearBounds_vert(img):
# showImage('before vertical', img)
height, width = img.shape
matrix = imgToMatrixC(img)
white_counter = countPixel(matrix,255)
for i in range (0,width):
if(white_counter[i]>= height-1):
img = img[0:height,1:width]
else:
break
height, new_width = img.shape
for i in range (1,width):
if(white_counter[width-i]>= height-1):
img = img[0:height,0:new_width-i]
else:
break
# showImage('after vertical', img)
return img
'''
this function makes lines of text perfectly horizontal.
'''
def deskew(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.bitwise_not(gray)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
coords = np.column_stack(np.where(thresh > 0))
angle = cv2.minAreaRect(coords)[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
(h, w) = img.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(img, M, (w, h),flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
return rotated
'''
this function applies a set of preprocessing operations to the input image, function output is a binarized image.
operations are scaling , grayscaling , noise clearing , gaussian's thresholding and remove underlines.
finally the function calls lineSegment function to segment the image into lines.
'''
# def pre_processing(path):
# # Read Image
# img = cv2.imread(path)
# # scaling
# height, width = img.shape[:2]
# scaled = img
# if height < 1600 and width < 1200:
# scaled = cv2.resize(img, (2 * width, 2* height), interpolation=cv2.INTER_LINEAR)
# # cv2.imwrite('(0)scaled.jpg', scaled)
# # Deskew step
# deskewed = deskew(scaled)
# # cv2.imwrite("(1)deskewed.jpg", deskewed)
# # Grayscale step
# grayscaled = cv2.cvtColor(deskewed, cv2.COLOR_BGR2GRAY)
# # cv2.imwrite("(2)grayscale.jpg", grayscaled)
# # Noise Clearing step
# noise_cleared = cv2.fastNlMeansDenoising(grayscaled, None, 4, 7, 21)
# # cv2.imwrite("(3)denoise.jpg", noise_cleared)
# '''
# # GAUSSIAN's thresholding
# GBinary = cv2.adaptiveThreshold(noise_cleared,255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,15,5)
# # cv2.imwrite("(4)binarization.jpg", GBinary)
# '''
# # Otsu's thresholding after Gaussian filtering
# blur = cv2.GaussianBlur(noise_cleared, (5, 5), 0)
# ret3, OBinary = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# # cv2.imwrite("(4)binarization.jpg", OBinary)
# if os.path.exists("output"):
# shutil.rmtree("output")
# os.makedirs("output")
# cv2.imwrite("output/Preprocessed.png", OBinary)
# line_segment(OBinary)
# return lines
'''
this function segment the binarized image into lines
segmentation technique consists of 3 steps :
1- define potential segmentation rows that contains no or 20 black pixels at most.
2- filter the potential segmentation rows and determine only segmentation rows which the crop operation depends on.
3- segment the image into lines.
'''
def line_segment(binary):
# get dimensions
height, width = binary.shape
# convert image into matrix of rows
matrix = imgToMatrixR(binary)
# count black pixels in each row
black_counter =countPixel(matrix,0)
# determine potential segmentation rows (psr)
# psr is any row contains 0-2 black pixels, last row in image is also a psr .
psr=[0]
for i in range(0, len(black_counter)):
if black_counter[i] <= 2 or i >= len(black_counter) - 2:
psr.append(i)
# determine segmentation rows sr
# sr is the index where we segment the image
sr = []
count = 0
for n in range(0, len(psr) - 1):
# combine each set of sequence white rows into only one sr
if psr[n] + 3 < psr[n + 1] or n >= len(psr) - 2:
sr.append(psr[n - int(count/2)])
count=0
else:
count += 1
# segment image into lines
global lines
for c in range(0, len(sr) - 1):
crop_img = binary[sr[c]:sr[c + 1], 0:width]
img = lineRemoval(crop_img)
img = clearBounds_horiz(img)
img = clearBounds_vert(img)
directory="output/line "+str(c)
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite("output/line "+str(c)+"/line " + str(c) + ".png", img)
# Call function to Segment the line into words then chars
lines.append([])
word_segment(img, c)
lines[c].append(",")
'''
this function segment lines into columns then words
lineNum parameter used for naming, flag parameter refers to the type of segmentation.
segmentation technique consists of 4 steps :
1- define potential segmentation columns that contains no or only two black pixels.
2- filter the potential segmentation columns,determine only segmentation columns which the crop operation depends on
3- segment the image into segments (column)
4- recursive call to segment columns into words
'''
def word_segment(binary, lineNum):
word_threshold=7
# get image dimension
height, width = binary.shape
# convert image to matrix of columns
matrix = imgToMatrixC(binary)
# count black pixels in each column
black_counter = countPixel(matrix,0)
# determine potential segmentation columns (psc).
# psc is any column contains no or two black pixel at most , last column in image is also a psc .
psc = [0]
for p in range(0, len(black_counter)):
if black_counter[p] <= 2 or p >= len(black_counter) - 2:
psc.append(p)
# determine segmentation columns (sc)
# sc is the index where we segment the image .
sc = []
count = 0
for n in range(0, len(psc) - 1):
# first column is sc
if n == 0:
sc.append(psc[n])
count = 0
# last column is also sc
elif n >= len(psc) - 2:
sc.append(psc[n])
count = 0
# combine each set of sequence white columns into only one sc
elif psc[n] + 3 < psc[n + 1] :
# space between words >= threshold value, low values is a space between chars not words.
if count >= word_threshold :
sc.append(psc[n -int(count/2)])
count = 0
else:
count += 1
for c in range(0, len(sc) - 1):
crop_img = binary[0:height, sc[c]:sc[c + 1] + 2]
directory = "output/line " + str(lineNum)+"/word " + str(c)
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite(directory + "/word " + str(c) + ".png", crop_img)
char_segment(crop_img, lineNum, c)
'''
this function segment words into chars
lineNum and wordNum parameters used for naming
segmentation technique consists of 3 steps :
1- define potential segmentation columns that contains no or only one black pixels.
2- filter potential segmentation columns by determine segmentation columns which the crop operation depends on .
3- segment the image into segments (chars).
'''
def char_segment(binary,lineNum,wordNum):
# get image dimension
height, width = binary.shape
# convert image to matrix of columns
matrix = imgToMatrixC(binary)
# count black pixels in each column
black_counter = countPixel(matrix,0)
# determine potential segmentation columns (psc).
# psc is any column contains no or one black bixel at most , last column in image is also a psc .
psc = [0]
for p in range(0, len(black_counter)):
if black_counter[p] <= 1 or p >= len(black_counter) - 2:
psc.append(p)
# determine segmentation columns (sc)
# sc is the average column between each sequence set of psc , average = summ of columns index/count of columns
sc = []
summ = 0
count = 0
for n in range(0, len(psc) - 1):
summ = summ + psc[n]
count = count + 1
# combine each set of sequence white columns into only one sc ,last column is also a sc
if psc[n] + 3 < psc[n + 1] or n >= len(psc) - 2:
sc.append(int(summ / count))
summ = 0
count = 0
global lines
# segment image into chars
for c in range(0, len(sc) - 1):
crop_img = binary[0:height, sc[c]:sc[c + 1]+2]
crop_img = clearBounds_horiz(crop_img)
crop_img = clearBounds_vert(crop_img)
crop_img = cv2.resize(crop_img, (64, 128), interpolation=cv2.INTER_LINEAR)
directory= "output/line " + str(lineNum)+"/word "+str(wordNum)
if not os.path.exists(directory):
os.makedirs(directory)
cv2.imwrite(directory+"/"+str(c)+".png", crop_img)
lines[lineNum].append(crop_img)
if c == (len(sc) - 2):
lines[lineNum].append(",")
print("HERE")
print(lines)
# Preprocessing function Calling
lines =[]
# lines = g("Test\\testcase1.png")
# -
|
Preprocessing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 4 - Clustering Models
# ## Segment 1 - K-means method
# ### Setting up for clustering analysis
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
from sklearn.preprocessing import scale
import sklearn.metrics as sm
from sklearn.metrics import confusion_matrix, classification_report
# -
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
# %matplotlib inline
plt.figure(figsize=(7,4))
# +
iris = datasets.load_iris()
X = scale(iris.data)
y = pd.DataFrame(iris.target)
variable_names = iris.feature_names
X[0:10]
# -
# ## Building and running your model
# +
clustering = KMeans(n_clusters=3, random_state=5)
clustering.fit(X)
# -
# ## Plotting your model outputs
iris_df = pd.DataFrame(iris.data)
iris_df.columns = ['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width']
y.columns = ['Targets']
# +
color_theme = np.array(['darkgray', 'lightsalmon', 'powderblue'])
plt.subplot(1,2,1)
plt.scatter(x=iris_df.Petal_Length, y=iris_df.Petal_Width, c=color_theme[iris.target], s=50)
plt.title('Ground Truth Classification')
plt.subplot(1,2,2)
plt.scatter(x=iris_df.Petal_Length, y=iris_df.Petal_Width, c=color_theme[clustering.labels_], s=50)
plt.title('K-Means Classification')
# +
relabel = np.choose(clustering.labels_, [2, 0, 1]).astype(np.int64)
plt.subplot(1,2,1)
plt.scatter(x=iris_df.Petal_Length, y=iris_df.Petal_Width, c=color_theme[iris.target], s=50)
plt.title('Ground Truth Classification')
plt.subplot(1,2,2)
plt.scatter(x=iris_df.Petal_Length, y=iris_df.Petal_Width, c=color_theme[relabel], s=50)
plt.title('K-Means Classification')
# -
# ## Evaluate your clustering results
# + tags=[]
print(classification_report(y, relabel))
|
Pt_2/04_01_K-means_method/04_01_end.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 1. Split into train and test data
# 2. Train model on train data normally
# 3. Take test data and duplicate into test prime
# 4. Drop first visit from test prime data
# 5. Get predicted delta from test prime data. Compare to delta from test data. We know the difference (epsilon) because we dropped actual visits. What percent of time is test delta < test prime delta?
# 6. Restrict it only to patients with lot of visits. Is this better?
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pickle
def clean_plot():
ax = plt.subplot(111)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
plt.grid()
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'x-large',
# 'figure.figsize': (10,6),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'}
pylab.rcParams.update(params)
# +
import sys
import torch
sys.path.append('../data')
from load import chf
from data_utils import parse_data
from synthetic_data import load_piecewise_synthetic_data
sys.path.append('../model')
from models import Sublign
from run_experiments import get_hyperparameters
# -
def make_test_prime(test_data_dict_raw, drop_first_T=1.):
# drop first year
test_data_dict = copy.deepcopy(test_data_dict_raw)
eps_lst = list()
X = test_data_dict['obs_t_collect']
Y = test_data_dict['Y_collect']
M = test_data_dict['mask_collect']
N_patients = X.shape[0]
N_visits = X.shape[1]
for i in range(N_patients):
eps_i = X[i,1,0] - X[i,0,0]
first_visit = X[i,1,0]
# move all visits down (essentially destroying the first visit)
for j in range(N_visits-gap):
X[i,j,0] = X[i,j+gap,0] - first_visit
Y[i,j,:] = Y[i,j+gap,:]
M[i,j,:] = M[i,j+gap,:]
for g in range(1,gap+1):
X[i,N_visits-g,0] = int(-1000)
Y[i,N_visits-g,:] = int(-1000)
M[i,N_visits-g,:] = 0.
eps_lst.append(eps_i)
return test_data_dict, eps_lst
# +
data = chf()
max_visits = 38
shuffle = True
num_output_dims = data.shape[1] - 4
data_loader, collect_dict, unique_pid = parse_data(data.values, max_visits=max_visits)
train_data_loader, train_data_dict, test_data_loader, test_data_dict, test_pid, unique_pid = parse_data(data.values,
max_visits=max_visits, test_per=0.2,
shuffle=shuffle)
# model = Sublign(10, 20, 50, dim_biomarkers=num_output_dims, sigmoid=True, reg_type='l1', auto_delta=True,
# max_delta=5, learn_time=True, device=torch.device('cuda'))
# # model.fit(data_loader, data_loader, args.epochs, 0.01, verbose=args.verbose,fname='runs/chf.pt',eval_freq=25)
# fname='../model/chf_good.pt'
# model.load_state_dict(torch.load(fname,map_location=torch.device('cuda')))
test_p_data_dict, eps_lst = make_test_prime(test_data_dict, gap=1)
# test_deltas = model.get_deltas(test_data_dict).detach().numpy()
# test_p_deltas = model.get_deltas(test_p_data_dict).detach().numpy()
# -
print(num_output_dims)
# +
# def make_test_prime(test_data_dict_raw, drop_first_T=1.):
drop_first_T = 0.5
# drop first year
test_data_dict_new = copy.deepcopy(test_data_dict)
eps_lst = list()
X = test_data_dict_new['obs_t_collect']
Y = test_data_dict_new['Y_collect']
M = test_data_dict_new['mask_collect']
N_patients = X.shape[0]
N_visits = X.shape[1]
remove_idx = list()
X[X == -1000] = np.nan
for i in range(N_patients):
N_visits_under_thresh = (X[i] < 0.5).sum()
gap = N_visits_under_thresh
first_valid_visit = X[i,N_visits_under_thresh,0]
eps_i = X[i,N_visits_under_thresh,0]
for j in range(N_visits-N_visits_under_thresh):
X[i,j,0] = X[i,j+gap,0] - first_valid_visit
Y[i,j,:] = Y[i,j+gap,:]
M[i,j,:] = M[i,j+gap,:]
for g in range(1,N_visits_under_thresh+1):
X[i,N_visits-g,0] = np.nan
Y[i,N_visits-g,:] = np.nan
M[i,N_visits-g,:] = 0.
if np.isnan(X[i]).all():
remove_idx.append(i)
else:
eps_lst.append(eps_i)
keep_idx = [i for i in range(N_patients) if i not in remove_idx]
X = X[keep_idx]
Y = Y[keep_idx]
M = M[keep_idx]
print('Removed %d entries' % len(remove_idx))
X[np.isnan(X)] = -1000
# eps_lst.append(eps_i)
# return test_data_dict_new, eps_lst
# -
eps_lst
X[0]
first_valid_visit
# +
test_data_dict_new = copy.deepcopy(test_data_dict)
X = test_data_dict_new['obs_t_collect']
Y = test_data_dict_new['Y_collect']
M = test_data_dict_new['mask_collect']
X[X == -1000] = np.nan
i = 1
N_visits_under_thresh = (X[i] < 0.5).sum()
# for j in range(N_visits-N_visits_under_thresh):
# X[i,j,0] = X[i,j+gap,0] - first_visit
# Y[i,j,:] = Y[i,j+gap,:]
# M[i,j,:] = M[i,j+gap,:]
# for g in range(1,N_visits_under_thresh+1):
# X[i,N_visits-g,0] = np.nan
# Y[i,N_visits-g,:] = np.nan
# M[i,N_visits-g,:] = 0.
# if np.isnan(X[i]).all():
# print('yes')
# remove_idx.append(i)
# -
(X[1] < 0.5).sum()
N_visits_under_thresh
N_visits_under_thresh
len(remove_idx)
X[X == -1000] = np.nan
for i in range(10):
print(X[i].flatten())
remove_idx
X[0][:10]
plt.hist(X.flatten())
X.max()
Y[1][:10]
test_data_dict_new['']
f = open('chf_experiment_results.pk', 'rb')
results = pickle.load(f)
test_deltas = results['test_deltas']
test_p_deltas = results['test_p_deltas']
eps_lst = results['eps_lst']
test_data_dict = results['test_data_dict']
f.close()
test_data_dict['obs_t_collect'][0].shape
# +
# get num of visits per patient
num_visits_patient_lst = list()
for i in test_data_dict['obs_t_collect']:
num_visits = (i!=-1000).sum()
num_visits_patient_lst.append(num_visits)
num_visits_patient_lst = np.array(num_visits_patient_lst)
# -
freq_visit_idx = np.where(num_visits_patient_lst > 10)[0]
test_p_deltas[freq_visit_idx]
test_deltas[freq_visit_idx]
np.mean(np.array(test_p_deltas - test_deltas) > 0)
test_p_deltas[:20]
clean_plot()
plt.plot(eps_lst, test_p_deltas - test_deltas, '.')
plt.xlabel('Actual eps')
plt.ylabel('Estimated eps')
# plt.savefig('')
# +
import copy
def make_test_prime(test_data_dict_raw, gap=1):
test_data_dict = copy.deepcopy(test_data_dict_raw)
eps_lst = list()
X = test_data_dict['obs_t_collect']
Y = test_data_dict['Y_collect']
M = test_data_dict['mask_collect']
N_patients = X.shape[0]
N_visits = X.shape[1]
for i in range(N_patients):
eps_i = X[i,1,0] - X[i,0,0]
first_visit = X[i,1,0]
# move all visits down (essentially destroying the first visit)
for j in range(N_visits-gap):
X[i,j,0] = X[i,j+gap,0] - first_visit
Y[i,j,:] = Y[i,j+gap,:]
M[i,j,:] = M[i,j+gap,:]
for g in range(1,gap+1):
X[i,N_visits-g,0] = int(-1000)
Y[i,N_visits-g,:] = int(-1000)
M[i,N_visits-g,:] = 0.
eps_lst.append(eps_i)
return test_data_dict, eps_lst
# -
t_prime_dict, eps_lst = make_test_prime(test_data_dict)
t_prime_dict['Y_collect'][1,:,0]
test_data_dict['Y_collect'][1,:,0]
# ## Plot successful model
# +
import argparse
import numpy as np
import pickle
import sys
import torch
import copy
from scipy.stats import pearsonr
import matplotlib.pyplot as plt
from run_experiments import get_hyperparameters
from models import Sublign
sys.path.append('../data')
from data_utils import parse_data
from load import load_data_format
sys.path.append('../evaluation')
from eval_utils import swap_metrics
# -
train_data_dict['Y_collect'].shape
train_data_dict['t_collect'].shape
new_Y = np.zeros((600,101,3))
val_idx_dict = {'%.1f' % j: i for i,j in enumerate(np.linspace(0,10,101))}
train_data_dict['obs_t_collect'].max()
# +
rounded_t = np.round(train_data_dict['t_collect'],1)
N, M, _ = rounded_t.shape
for i in range(N):
for j in range(M):
val = rounded_t[i,j,0]
# try:
idx = val_idx_dict['%.1f' % val]
for k in range(3):
new_Y[i,idx,k] = train_data_dict['Y_collect'][i,j,k]
# except:
# print(val)
# -
new_Y.shape
(new_Y == 0).sum() / (600*101*3)
# +
# save the files for comparing against SPARTan baseline
for i in range(3):
a = new_Y[:,:,i]
np.savetxt("data1_dim%d.csv" % i, a, deliREDACTEDer=",")
# +
true_labels = train_data_dict['s_collect'][:,0]
guess_labels = np.ones(600)
adjusted_rand_score(true_labels,guess_labels)
# -
from sklearn.metrics import adjusted_rand_score
# a.shape
# +
data_format_num = 1
# C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
C
data = load_data_format(data_format_num, 0, cache=True)
train_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=4, test_per=0.2, valid_per=0.2, shuffle=False)
model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=True, reg_type='l1', auto_delta=False, max_delta=0, learn_time=False, beta=0.00)
model.fit(train_data_loader, test_data_loader, 800, lr, fname='runs/data%d_chf_experiment.pt' % (data_format_num), eval_freq=25)
z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])
# fname='runs/data%d_chf_experiment.pt' % (data_format_num)
# model.load_state_dict(torch.load(fname))
nolign_results = model.score(train_data_dict, test_data_dict)
print('ARI: %.3f' % nolign_results['ari'])
# -
print(anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr)
# +
data_format_num = 1
# C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=5, learn_time=True, beta=0.01)
model.fit(train_data_loader, test_data_loader, 800, lr, fname='runs/data%d.pt' % (data_format_num), eval_freq=25)
z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])
# fname='runs/data%d_chf_experiment.pt' % (data_format_num)
# model.load_state_dict(torch.load(fname))
results = model.score(train_data_dict, test_data_dict)
print('ARI: %.3f' % results['ari'])
# +
# model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=True, reg_type='l1', auto_delta=True, max_delta=5, learn_time=True, b_vae=0.)
# model.fit(train_data_loader, test_data_loader, 800, lr, fname='runs/data%d_chf_experiment.pt' % (data_format_num), eval_freq=25)
# z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])
# # fname='runs/data%d_chf_experiment.pt' % (data_format_num)
# # model.load_state_dict(torch.load(fname))
# results = model.score(train_data_dict, test_data_dict)
# print('ARI: %.3f' % results['ari'])
# +
# Visualize latent space (change configs above)
X = test_data_dict['obs_t_collect']
Y = test_data_dict['Y_collect']
M = test_data_dict['mask_collect']
test_z, _ = model.get_mu(X,Y)
test_z = test_z.detach().numpy()
test_subtypes = test_data_dict['s_collect']
from sklearn.manifold import TSNE
z_tSNE = TSNE(n_components=2).fit_transform(test_z)
test_s0_idx = np.where(test_subtypes==0)[0]
test_s1_idx = np.where(test_subtypes==1)[0]
clean_plot()
plt.plot(z_tSNE[test_s0_idx,0],z_tSNE[test_s0_idx,1],'.')
plt.plot(z_tSNE[test_s1_idx,0],z_tSNE[test_s1_idx,1],'.')
# plt.title('\nNELBO (down): %.3f, ARI (up): %.3f\n Config: %s\nColors = true subtypes' %
# (nelbo, ari, configs))
plt.show()
# +
def sigmoid_f(x, beta0, beta1):
result = 1. / (1+np.exp(-(beta0 + beta1*x)))
return result
true_betas = [[[-4, 1],
[-1,1.],
[-8,8]
],
[
[-1,1.],
[-8,8],
[-25, 3.5]
]]
# +
# xs = np.linspace(0,10,100)
for dim_i in range(3):
xs = np.linspace(0,10,100)
plt.figure()
clean_plot()
plt.grid(True)
ys = [sigmoid_f(xs_i, true_betas[0][dim_i][0], true_betas[0][dim_i][1]) for xs_i in xs]
plt.plot(xs,ys, ':', color='gray', linewidth=5, label='True function')
ys = [sigmoid_f(xs_i, true_betas[1][dim_i][0], true_betas[1][dim_i][1]) for xs_i in xs]
plt.plot(xs,ys, ':', color='gray', linewidth=5)
for subtype_j in range(2):
xs = np.linspace(0,10,100)
ys = [sigmoid_f(xs_i, nolign_results['cent_lst'][subtype_j,dim_i,0],
nolign_results['cent_lst'][subtype_j,dim_i,1]) for xs_i in xs]
if subtype_j == 0:
plt.plot(xs,ys,linewidth=4, label='SubNoLign subtype', linestyle='-.', color='tab:green')
else:
plt.plot(xs,ys,linewidth=4, linestyle='--', color='tab:green')
ys = [sigmoid_f(xs_i, results['cent_lst'][subtype_j,dim_i,0],
results['cent_lst'][subtype_j,dim_i,1]) for xs_i in xs]
if subtype_j == 0:
plt.plot(xs,ys,linewidth=4, label='SubLign subtype', linestyle='-', color='tab:purple')
else:
plt.plot(xs,ys,linewidth=4, linestyle='-', color='tab:purple')
plt.xlabel('Disease stage')
plt.ylabel('Biomarker')
plt.legend()
plt.savefig('subnolign_data1_subtypes_dim%d.pdf' % dim_i, bbox_inches='tight')
# +
# # number dimensions
# fig, axs = plt.subplots(1,3, figsize=(8,4))
# for dim_i in range(3):
# ax = axs[dim_i]
# # number subtypes
# for subtype_j in range(2):
# xs = np.linspace(0,10,100)
# ys = [sigmoid_f(xs_i, model1_results['cent_lst'][subtype_j,dim_i,0],
# model1_results['cent_lst'][subtype_j,dim_i,1]) for xs_i in xs]
# ax.plot(xs,ys)
# ys = [sigmoid_f(xs_i, true_betas[0][dim_i][0], true_betas[0][dim_i][1]) for xs_i in xs]
# ax.plot(xs,ys, color='gray')
# ys = [sigmoid_f(xs_i, true_betas[1][dim_i][0], true_betas[1][dim_i][1]) for xs_i in xs]
# ax.plot(xs,ys, color='gray')
# fig.suptitle('True data generating function (gray), learned models (orange, blue)')
# plt.savefig('learned_models.pdf',bbox_inches='tight')
# -
# ## Plot CHF Delta distributions
data = pickle.load(open('../clinical_runs/chf_v3_1000.pk', 'rb'))
clean_plot()
plt.hist(data['deltas'], bins=20)
plt.xlabel('Inferred Alignment $\delta_i$ Value')
plt.ylabel('Number Heart Failure Patients')
plt.savefig('Delta_dist_chf.pdf', bbox_inches='tight')
# ## Make piecewise data to measure model misspecification
from scipy import interpolate
x = np.arange(0, 2*np.pi+np.pi/4, 2*np.pi/8)
y = np.sin(x)
tck = interpolate.splrep(x, y, s=0)
xnew = np.arange(0, 2*np.pi, np.pi/50)
ynew = interpolate.splev(xnew, tck, der=0)
xvals = np.array([9.3578453 , 4.9814664 , 7.86530539, 8.91318433, 2.00779188])[sort_idx]
yvals = np.array([0.35722491, 0.12512101, 0.20054626, 0.38183604, 0.58836923])[sort_idx]
tck = interpolate.splrep(xvals, yvals, s=0)
y
# +
N_subtypes,D,N_pts,_ = subtype_points.shape
fig, axes = plt.subplots(ncols=3,nrows=1)
for d, ax in enumerate(axes.flat):
# ax.set_xlim(0,10)
# ax.set_ylim(0,1)
for k in range(N_subtypes):
xs = subtype_points[k,d,:,0]
ys = subtype_points[k,d,:,1]
sort_idx = np.argsort(xs)
ax.plot(xs[sort_idx],ys[sort_idx])
plt.show()
# for d in range(D):
# +
# %%time
N_epochs = 800
N_trials = 5
use_sigmoid = True
sublign_results = {
'ari':[],
'pear': [],
'swaps': []
}
subnolign_results = {'ari': []}
for trial in range(N_trials):
data_format_num = 1
# C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
anneal, b_vae, C, d_s, d_h, d_rnn, reg_type, lr = get_hyperparameters(data_format_num)
# C
# data = load_data_format(data_format_num, 0, cache=True)
use_sigmoid = False
data, subtype_points = load_piecewise_synthetic_data(subtypes=2, increasing=use_sigmoid,
D=3, N=2000,M=4, noise=0.25, N_pts=5)
train_data_loader, train_data_dict, _, _, test_data_loader, test_data_dict, valid_pid, test_pid, unique_pid = parse_data(data.values, max_visits=4, test_per=0.2, valid_per=0.2, shuffle=False)
model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=use_sigmoid, reg_type='l1',
auto_delta=False, max_delta=5, learn_time=True, beta=1.)
model.fit(train_data_loader, test_data_loader, N_epochs, lr, fname='runs/data%d_spline.pt' % (data_format_num), eval_freq=25)
# z = model.get_mu(train_data_dict['obs_t_collect'], train_data_dict['Y_collect'])
# fname='runs/data%d_chf_experiment.pt' % (data_format_num)
# model.load_state_dict(torch.load(fname))
results = model.score(train_data_dict, test_data_dict)
print('Sublign results: ARI: %.3f; Pear: %.3f; Swaps: %.3f' % (results['ari'],results['pear'],results['swaps']))
sublign_results['ari'].append(results['ari'])
sublign_results['pear'].append(results['pear'])
sublign_results['swaps'].append(results['swaps'])
model = Sublign(d_s, d_h, d_rnn, dim_biomarkers=3, sigmoid=use_sigmoid, reg_type='l1',
auto_delta=False, max_delta=0, learn_time=False, beta=1.)
model.fit(train_data_loader, test_data_loader, N_epochs, lr, fname='runs/data%d_spline.pt' % (data_format_num), eval_freq=25)
nolign_results = model.score(train_data_dict, test_data_dict)
print('SubNoLign results: ARI: %.3f' % (nolign_results['ari']))
subnolign_results['ari'].append(nolign_results['ari'])
# +
data_str = 'Increasing' if use_sigmoid else 'Any'
print('SubLign-%s & %.2f $\\pm$ %.2f & %.2f $\\pm$ %.2f & %.2f $\\pm$ %.2f \\\\' % (
data_str,
np.mean(sublign_results['ari']), np.std(sublign_results['ari']),
np.mean(sublign_results['pear']), np.std(sublign_results['pear']),
np.mean(sublign_results['swaps']), np.std(sublign_results['swaps'])
))
print('SubNoLign-%s & %.2f $\\pm$ %.2f & -- & -- \\\\' % (
data_str,
np.mean(sublign_results['ari']), np.std(sublign_results['ari']),
))
# -
results = model.score(train_data_dict, test_data_dict)
print('Sublign results: ARI: %.3f; Pear: %.3f; Swaps: %.3f' % (results['ari'],results['pear'],results['swaps']))
|
model/CHF_Experiment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Earthquake plots
# +
def ProduceSpatialQuakePlot(Observations, FitPredictions):
current_time = timenow()
print_red(
current_time + " Produce Spatial Earthquake Plots " + config.experiment + " " + config.comment
)
dayindexmax = Num_Seq - Plottingdelay
Numdates = 4
denom = 1.0 / np.float64(Numdates - 1)
for plotdays in range(0, Numdates):
dayindexvalue = math.floor(0.1 + (plotdays * dayindexmax) * denom)
if dayindexvalue < 0:
dayindexvalue = 0
if dayindexvalue > dayindexmax:
dayindexvalue = dayindexmax
FixedTimeSpatialQuakePlot(dayindexvalue, Observations, FitPredictions)
def EQrenorm(casesdeath, value):
if Plotrealnumbers:
predaveragevaluespointer = PredictionAverageValuesPointer[casesdeath]
newvalue = (
value / QuantityStatistics[predaveragevaluespointer, 2]
+ QuantityStatistics[predaveragevaluespointer, 0]
)
rootflag = QuantityTakeroot[predaveragevaluespointer]
if rootflag == 2:
newvalue = newvalue ** 2
if rootflag == 3:
newvalue = newvalue ** 3
else:
newvalue = value
return newvalue
def FixedTimeSpatialQuakePlot(PlotTime, Observations, FitPredictions):
Actualday = InitialDate + timedelta(days=(PlotTime + Tseq))
print_red(
"Spatial Earthquake Plots",
Actualday.strftime("%d/%m/%Y"),
config.experiment,
config.comment
)
NlocationsPlotted = Nloc
real = np.zeros([NumpredbasicperTime, NlocationsPlotted])
predict = np.zeros([NumpredbasicperTime, NlocationsPlotted])
print("Ranges for Prediction numbers/names/property pointer")
for PredictedQuantity in range(0, NumpredbasicperTime):
for iloc in range(0, NlocationsPlotted):
real[PredictedQuantity, iloc] = EQrenorm(
PredictedQuantity, Observations[PlotTime, iloc, PredictedQuantity]
)
predict[PredictedQuantity, iloc] = EQrenorm(
PredictedQuantity, FitPredictions[PlotTime, iloc, PredictedQuantity]
)
localmax1 = real[PredictedQuantity].max()
localmin1 = real[PredictedQuantity].min()
localmax2 = predict[PredictedQuantity].max()
localmin2 = predict[PredictedQuantity].min()
predaveragevaluespointer = PredictionAverageValuesPointer[PredictedQuantity]
expectedmax = QuantityStatistics[predaveragevaluespointer, 1]
expectedmin = QuantityStatistics[predaveragevaluespointer, 0]
print(
"Real max/min",
round(localmax1, 3),
round(localmin1, 3),
"Predicted max/min ",
round(localmax2, 3),
round(localmin2, 3),
"Overall max/min ",
round(expectedmax, 3),
round(expectedmin, 3),
PredictedQuantity,
Predictionbasicname[PredictedQuantity],
predaveragevaluespointer
)
InputImages = []
InputTitles = []
for PredictedQuantity in range(0, NumpredbasicperTime):
InputImages.append(real[PredictedQuantity])
InputTitles.append(
Actualday.strftime("%d/%m/%Y")
+ " Observed "
+ Predictionbasicname[PredictedQuantity]
)
InputImages.append(predict[PredictedQuantity])
InputTitles.append(
Actualday.strftime("%d/%m/%Y")
+ " Predicted "
+ Predictionbasicname[PredictedQuantity]
)
plotimages(InputImages, InputTitles, NumpredbasicperTime, 2)
|
benchmarks/earthquake/feb-2022/notebooks/earthquake_plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How-To Guide into Feature Selection
#
# ## Introduction
#
# This is the third post in my series on transforming data into alpha. If you haven't yet see the [framework overview]() or [feature engineering guide](), please take a minute to read that first...
#
# This post is going to delve into the mechanics of _feature selection_, in other words choosing between the many variations of features you've created in the feature engineering stage. By design, many of the features you've created will be very similar to each other (aka "collinear") because you've derived them from the same underlying dataset.
#
# ## Motivation
#
# The previous step of the process, feature engineering, is intended to be a creative, loose process akin to a brainstorming session. The result should be tens (or hundreds) of variations of features to evaluate. However, most models will _generalize_ better (i.e., work well on data they haven't seen) with fewer features. They will also be much more interpretable.
#
# Therefore, we need a systematic approach to deciding which of the many posible features to use. That's where the _feature selection_ process comes in.
#
# ## Philosophy
#
# In feature selection, we strive to meet two goals:
# 1. __Strength__: Choose the features with the strongest, most persistent relationships to the target outcome variable. The reasons for this are obvious.
# 2. __Orthogonality__: Minimize the amount of overlap or collinearity in your selected features. The importance of orthogonality (non-overlap) of features is much greater than you might guess.
#
# I am biased towards making feature selection a relatively mechanical process. The "art" should mainly be encapsulated within the prior step (feature engineering) and the subsequent step (modeling). Feature selection should, in my view, follow a heuristic and can be encoded into an algorithm if desired. For purposes of this tutorial, I'll keep things relatively manual.
#
# ## Getting Started
# Let's dive in. I will begin by loading the feature set created in the prior step. I'm also going to create the _outcomes_ `DataFrame` as done in the Framework Overview post. Please refer to those if you haven't already.
#
#
# +
import numpy as np
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like # remove once updated pandas-datareader issue is fixed
# https://github.com/pydata/pandas-datareader/issues/534
import pandas_datareader.data as web
# %matplotlib inline
def get_symbols(symbols,data_source, begin_date=None,end_date=None):
out = pd.DataFrame()
for symbol in symbols:
df = web.DataReader(symbol, data_source,begin_date, end_date)[['AdjOpen','AdjHigh','AdjLow','AdjClose','AdjVolume']].reset_index()
df.columns = ['date','open','high','low','close','volume'] #my convention: always lowercase
df['symbol'] = symbol # add a new column which contains the symbol so we can keep multiple symbols in the same dataframe
df = df.set_index(['date','symbol'])
out = pd.concat([out,df],axis=0) #stacks on top of previously collected data
return out.sort_index()
prices = get_symbols(['AAPL','CSCO','AMZN','YHOO','MSFT'],data_source='quandl',begin_date='2012-01-01',end_date=None)
prices.sort_index().tail()
# -
# +
outcomes = pd.DataFrame(index=prices.index)
# next day's opening change
outcomes['close_1'] = prices.groupby(level='symbol').close.pct_change(-1) # next day's returns
outcomes['close_5'] = prices.groupby(level='symbol').close.pct_change(-5) # next week's returns
outcomes['close_10'] = prices.groupby(level='symbol').close.pct_change(-10) # next two weeks' returns
outcomes['close_20'] = prices.groupby(level='symbol').close.pct_change(-20) # next month's (approx) returns
outcomes.tail()
# -
# For purposes of illustration, we'll engineer some features to contain some signal buried within the noise. Clearly, this is not something we'd do in real usage but will help to demonstrate the concept more clearly.
#
# Assume we have a target variable called `outcome` which can be (partially) predicted with three factors, `factor_1`, `factor_2` and `factor_3`. There's also an unpredictble noise component. We will "cheat" and create the overall target variable from these factors. All data will follow the same index as the market data we pulled from quandl.
num_obs = prices.close.count()
factor_1 = pd.Series(np.random.randn(num_obs),index=prices.index)
factor_2 = pd.Series(np.random.randn(num_obs),index=prices.index)
factor_3 = pd.Series(np.random.randn(num_obs),index=prices.index)
outcome = 1.*factor_1 + 2.*factor_2 + 3.*factor_3 + 5.*np.random.randn(num_obs)
outcome.name = 'outcome'
outcome.tail()
# Now, we will engineer several variations on features which each contain some information about the three factors, plus a few which contain some interaction effects, and some which do not contain any useful data.
#
# Note that we are, again, "cheating" here for illustration purposes.
# +
features = pd.DataFrame(index=outcome.index)
features['f11'] = 0.2*factor_1 + 0.8*np.random.randn(num_obs)
features['f12'] = 0.4*factor_1 + 0.6*np.random.randn(num_obs)
features['f13'] = 0.6*factor_1 + 0.4*np.random.randn(num_obs)
features['f21'] = 0.2*factor_2 + 0.8*np.random.randn(num_obs)
features['f22'] = 0.4*factor_2 + 0.8*np.random.randn(num_obs)
features['f23'] = 0.6*factor_2 + 0.4*np.random.randn(num_obs)
features['f31'] = 0.2*factor_3 + 0.8*np.random.randn(num_obs)
features['f32'] = 0.4*factor_3 + 0.6*np.random.randn(num_obs)
features['f33'] = 0.6*factor_3 + 0.4*np.random.randn(num_obs)
features['f41'] = 0.2*factor_1+0.2*factor_2 + 0.6*np.random.randn(num_obs)
features['f42'] = 0.2*factor_2+0.2*factor_3 + 0.6*np.random.randn(num_obs)
features['f43'] = 0.2*factor_3+0.2*factor_1 + 0.6*np.random.randn(num_obs)
features['f51'] = np.random.randn(num_obs)
features['f52'] = np.random.randn(num_obs)
features['f53'] = np.random.randn(num_obs)
features.tail()
# -
# Next, we'll import the required packages and modules for the feature selection:
# +
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
from IPython.display import display
from scipy.cluster import hierarchy
from scipy.spatial import distance
from sklearn.preprocessing import StandardScaler,Normalizer
# -
# Before evaluating the features for predictive strength and orthogonality, we'll do a quick data preparation stage. It is sometimes vital to "standardize" or "normalize" data so that we get fair comparisons between features of differing scale. Strictly speaking, since all of the doctored outcome and feature data is already drawn from normal distribution (using the numpy function `random.rnorm()`) we don't really need this step, but good practice to include.
#
# Here, I'll use the scikit-learn `StandardizeScaler()` method and some pandas magic to transform the data.
# +
#f = features.dropna() #optional - to compare apples to apples
# standardize or normalize data
std_scaler = StandardScaler()
features_scaled = std_scaler.fit_transform(features.dropna())
print (features_scaled.shape)
df = pd.DataFrame(features_scaled,index=features.dropna().index)
df.columns = features.dropna().columns
df.tail()
# standardize outcome as well
outcome_df = outcome.to_frame()
outcome_scaled = std_scaler.fit_transform(outcome_df.dropna())
outcome_scaled = pd.DataFrame(outcome_scaled,index=outcome_df.dropna().index)
outcome_scaled.columns = outcome_df.columns
outcome_scaled.tail()
# -
corr = df.corrwith(outcome)
corr.sort_values().plot.barh(color = 'blue',title = 'Strength of Correlation')
# Pretend for a minute that we don't know which features are going to be stronger and weaker, and which are going to tend to cluster together. We've got an idea that there are some quite strong features, some weaker, and some useless.
# Next, we'll take advantage of a very handy seaborn chart type called a "clustermap" which plots a heatmap representation of a correlation matrix and runs a clustering algorithm to group together the most closely related features.
#
# Of course, the diagonal of dark green represents each feature being perfectly correlated with itself.
# +
corr_matrix = df.corr()
correlations_array = np.asarray(corr_matrix)
linkage = hierarchy.linkage(distance.pdist(correlations_array), method='average')
g = sns.clustermap(corr_matrix,row_linkage=linkage,col_linkage=linkage,row_cluster=True,\
col_cluster=True,figsize=(10,10),cmap='Greens')
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
label_order = corr_matrix.iloc[:,g.dendrogram_row.reordered_ind].columns
# -
# The algorithm has done a good job of finding the groupings of features. The cluster in the upper left captures `factor_1` (including some of the interaction effects). `factor_3` is fairly well isolated in the lower right corner, and in the middle we can see `factor_2` as well as some of the noise features.
#
# Let's next focus in only on those features with correlations of greater than 0.1 to exclude the noise and weak features.
# +
correlated_features = corr[corr>0.1].index.tolist()
corr_matrix = df[correlated_features].corr()
correlations_array = np.asarray(corr_matrix)
linkage = hierarchy.linkage(distance.pdist(correlations_array), method='average')
g = sns.clustermap(corr_matrix,row_linkage=linkage,col_linkage=linkage,row_cluster=True,\
col_cluster=True,figsize=(6,6),cmap='Greens')
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
label_order = corr_matrix.iloc[:,g.dendrogram_row.reordered_ind].columns
print("Correlation Strength:")
print(corr[corr>0.1].sort_values(ascending=False))
# -
# Ah, now the clusters look a bit sharper. We'll follow a simple heuristic to manually select the features. Those wishing to take this to the next level can decide how to encapsulate into an algorithm.
#
# 1. Take the most strongly correlated feature (f33) and add it to our list of selected features.
# 2. Take the second correlated feature (f23) and check to see if it's closely correlated (neighboring in the clustermap) to any features already chosen. If no, add to the list. If yes, discard.
# 3. Repeat this process until either (1) we've reached the target feature count, or (2) we've run out strongly correlated features.
#
# Following that heuristic, I get:
selected_features = ['f33','f23','f42','f41','f31']
# Note that this list of features is not simply the highest correlated features. Let's run the clustermap one more time to see if we've missed any major clusters.
# +
corr_matrix = df[selected_features].corr()
correlations_array = np.asarray(corr_matrix)
linkage = hierarchy.linkage(distance.pdist(correlations_array), method='average')
g = sns.clustermap(corr_matrix,row_linkage=linkage,col_linkage=linkage,row_cluster=True,\
col_cluster=True,figsize=(6,6),cmap='Greens')
plt.setp(g.ax_heatmap.yaxis.get_majorticklabels(), rotation=0)
plt.show()
label_order = corr_matrix.iloc[:,g.dendrogram_row.reordered_ind].columns
# -
# Looks generally pretty good. This can be a bit subjective to determine what's "too close" and what's "too weak", but that's the basic idea.
#
# Thus far, we've only taken a simple correlation statistic to be representative of predictive power. In my opinion, that's a good place to start but because financial time series data suffers from [non-stationarity]() and [regime change](), we'll plot the rolling correlation of these selected features to see if any is either (1) less correlated now than in times past or (2) very "hot-and-cold".
tmp = df[selected_features].join(outcome_scaled).reset_index().set_index('date')
tmp.dropna().resample('Q').apply(lambda x: x.corr()).iloc[:,-1].unstack().iloc[:,:-1].plot()
# shows time stability
# As expected, since the data wasn't modeled with any non-stationarity, our features all appear to be robust over time.
# ### Z-Scores
# A very popular/useful transformation for financial time series data is the [z-score](http://stattrek.com/statistics/dictionary.aspx?definition=z-score). We can easily define a generalized lambda function for this, which we can use whenever needed. Importantly, it allows us to mix together very different symbols (some high-beta, some low-beta) in a way that considers the statistical significance of any movement.
#
zscore_fxn = lambda x: (x - x.mean()) / x.std()
features['f09'] =prices.groupby(level='symbol').close.apply(zscore_fxn)
features.f09.unstack().plot.kde(title='Z-Scores (not quite accurate)')
# However, the above example has a subtle but important bug. It uses the mean _of the whole time frame_ and the standard deviation _of the whole time frame_ to calculate each datapoint. This means we are peeking ahead into the future and the feature is potentially very danger-prone (it'll work famously well in sample and fail to work out of sample...).
#
# Fixing this is cumbersome, but necessary.
zscore_fun_improved = lambda x: (x - x.rolling(window=200, min_periods=20).mean())/ x.rolling(window=200, min_periods=20).std()
features['f10'] =prices.groupby(level='symbol').close.apply(zscore_fun_improved)
features.f10.unstack().plot.kde(title='Z-Scores (Correct)')
# ### Percentile
# Less commonly used - but equally useful - is the percentile transformation. Getting this done properly in pandas (with groupby and rolling) is possible but tricky. The below example returns the percentile rank (from 0.00 to 1.00) of traded volume for each value as compared to a trailing 200 day period.
#
# Note that we need to use _a lambda within a lambda_ to make this work properly. We're on the bleeding edge.
#
rollrank_fxn = lambda x: x.rolling(200,min_periods=20).apply(lambda x: pd.Series(x).rank(pct=True)[0],raw=True)
features['f11'] = prices.groupby(level='symbol').volume.apply(rollrank_fxn)
# Another interesting application of this same pattern is to rank each stock _cross-sectionally_ rather than _longitudinally_ as above. In other words, where does this stock rank within all of the stocks on that day, not for all prior days of that stock. The below example isn't very meaningful with only two stocks, but quite useful when using a realistic universe. In this example, we're also making use of an earlier feature (relative volume) to compare which symbol is most heavily traded _for that stock's normal range_ in a given day. Also note that we need to `dropna()` prior to ranking because `rank` doesn't handle nulls very gracefully.
features['f12'] = features['f07'].dropna().groupby(level='date').rank(pct=True)
# ### Technical Analysis
# Those with a taste for technical analysis may find it difficult to let go of your favored TA techniques. While this is not _my_ favored approach, you'll have no problem engineering features using these methods. From my cursory googling, it looked as though the `ta` package would be a good place to start. Very new and only one contributor but it looks fairly complete and well documented. If you find that it's missing your favorite indicators, consider contributing to the package. If you know of better such packages, please post in the comments below...
#
# You may consider mean-centering a technical indicator so that machine learning methods can make better use of the data (or make sure to include that in the pre-processing pipeline when you start modeling).
#
import ta # technical analysis library: https://technical-analysis-library-in-python.readthedocs.io/en/latest/
# money flow index (14 day)
features['f13'] = ta.momentum.money_flow_index(prices.high, prices.low, prices.close, prices.volume, n=14, fillna=False)
# mean-centered money flow index
features['f14'] = features['f13'] - features['f13'].rolling(200,min_periods=20).mean()
# ## Alternative Representations
# A bit different than transforms are "representations", i.e., other ways to represent continuous values. All of the transforms above returned continuous values rather than "labels", and that's often a good place to start - especally for early prototypes.
#
# However, you may want to represent the data in different ways, especially if using classification-based approaches or worried about the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality) due to large numbers of features.
# ### Binning
# We can easily convert a continous variable to discrete "bins" (like 1 to 10). This loses information, of course, but sometimes loss of information is a good thing if you are removing more noise than signal.
#
# The below example shows volumes converted into ten equally sized buckets. In other words, we've converted a continuous variable into a discrete one.
#
# NOTE: this example is not applied in a rolling fashion, so it __does suffer from some data peeking__, a cardinal sin. At the moment, I'm failing in my efforts to implement it in a rolling way. I'd be grateful for code snippets if anyone knows how to do this offhand.
n_bins = 10
bin_fxn = lambda y: pd.qcut(y,q=n_bins,labels = range(1,n_bins+1))
features['f15'] = prices.volume.groupby(level='symbol').apply(bin_fxn)
# ### Sign
# Very simply, you may wish to convert continuous variables into positive or negative (1 or -1) values, depending on input. For instance, was volume increasing or decreasing today?
#
features['f16'] = features['f05'].apply(np.sign)
# ### Plus-Minus
# You may be interested in how many days in a row a value has increased (or decreased). Below is a simple pattern to do just that - it calculates the number of up-days minus the number of down days.
#
plus_minus_fxn = lambda x: x.rolling(20).sum()
features['f17'] = features['f16'].groupby(level='symbol').apply(plus_minus_fxn)
# ### One-Hot Encoding
# Possibly the most frequently used alternative representation is "one-hot encoding" where a categorical variable is represented as a binary. For instance, month_of_year would be represented as twelve different columns, each of which was either 0 or 1. January would be [1,0,0,0,...0] etc...
#
# This is absolutely crucial in a few circumstances. The first is where there is false meaning in the "ordinality" of values. If we were looking to test the "santa claus effect" hypothesis, it wouldn't be helpful to use a month_of_year feature where January was "the least" and December was "the most".
#
# The second is in cases where we are representing events or "states". Does the word "lawsuit" appear within the 10-Q footnotes? Is the company in the blackout period for share buybacks?
#
# Finally, the particular machine learning algorithm (tree-based, neural networks) may find it easier to use binary representations than continuous or discrete ones.
#
# The below example creates twelve one-hot features, one for each month, and names them automatically
# +
month_of_year = prices.index.get_level_values(level='date').month
one_hot_frame = pd.DataFrame(pd.get_dummies(month_of_year))
one_hot_frame.index = prices.index # Careful! This is forcing index values without usual pandas alignments!
# create column names
begin_num = int(features.columns[-1][-2:]) + 1 #first available feature
feat_names = ['f'+str(num) for num in list(range(begin_num,begin_num+12,1))]
# rename columns and merge
one_hot_frame.columns = feat_names
features = features.join(one_hot_frame)
# -
# ## Data Cleansing
# OK, I've put this off long enough. It's time to cover the least interesting and possibly most critical aspect of feature engineering... data cleansing!
#
# Many will include data cleansing as part of the raw data collection pipeline rather than the feature engineering step - and I can't argue with cleansing data as early in the process as possible. However, your data can never be too clean so I take the "belt and suspenders" approach. Clean your data on collection, clean on usage. Clean, clean, clean!
#
# The motivation for
# * to_datetime, to_numeric, astype() (int, string, float...)
# * fillna(ffill, 0, mean)
# ### Data Typing
# If you've spent any time with data work in python, you're already familiar with the sometimes annoying data typing issues of a "duck typed" language. Pandas does an admirable job of inferring types from your data but you'll sometimes want to exercise more control to make sure your data is perfect.
#
# The first data typing issue I face is representation of dates and times, which can be represented in several different formats. I prefer to standardize all datetimes using the pandas pd.to_datetime() method which yields two main benefits: (1) you will be able to align and join multiple datetime values together and (2) you'll be able to take advantage of the many pandas date/time functions.
#
# Example:
## code of casting to datetime, selecting weekday etc...
# If you fail to control your datetime typing, you'll inevitably end up with difficulty in aligning and joining data on date, like this:
# +
# example of a str and a datetime repr which are joined on axis=1 and result in an awkward dataframe
# -
# Among the pandas date/time functions is a very useful resampling method, which allows you to aggregate from a higher frequency (e.g., hourly) to a lower frequency (e.g., daily or weekly or monthly). Depending on the timeframe of your strategy, you may seek to resample everything to a lower frequency
## example of resampling
# The other main typing issue I find is with numeric types. Number values are commonly represented as integers, floats, and strings which look like integers or floats. Pandas attempts to guess the right type for data when it's loaded (via `read_csv` or `read_sql` etc..). Problems arise when there are some values within a column which don't follow the type .
#
# The below example illustrates how
# +
df = pd.DataFrame({'symbol':['a','b','c','d','e'],'price':[1,2,3,4,'None']})
print(df)
print()
print('Average: ',df.mean()) # no results
print()
print('######################')
# retype to numeric
print()
df['price'] = pd.to_numeric(df.price,errors='coerce')
print(df)
print()
print('Average: ',df.mean()) # works
# -
# ### Handling Missing Data
# Incomplete data is a reality for us all. Whether it's because some input sources are of a lower frequency, shorter history (i.e., don't go back as far in time) or have unexplained unavailable data points at times, we need a thoughtful approach for addressing missing data.
#
# Most machine learning algorithms require a valid value for each feature at each observation point (or they will fail to run...). If we don't apply some sensible workarounds, we'll end up dropping lots of _valid_ data points because of a single missing feature.
#
# Before outlining the tactics and code patterns we can apply, my core principles for data cleansing are:
# 1. Always try to reflect the data you might have applied _at the time_ of the missing data point. In other words, don't peek into the future if at all possible.
# 2. Drop valid data only as a last resort (and as late in the process as possible).
# 3. Questionable data (i.e., extreme outliers) should be treated like missing data.
#
#
### Formatting
# Whew! That was (much) longer than intended. Feature engineering is a broad subject of which I've only scratched the surface. Hopefully this will provide you with a framework and starting point to get your own process up and running so that you can focus on applying your creativity and your expertise on the subject matter of choice.
#
# In the next post of this series, I will outline a process [feature selection]() - the next logical step following feature engineering. Questions, comments, or suggestions are welcomed below.
# +
import numpy as np
arrays = [np.array([1,2,3,4,1,2,3,4]),np.array(['bar', 'bar', 'bar', 'bar', 'foo', 'foo', 'foo', 'foo'])]
s = pd.Series(np.array([100,101,102,103,200,201,202,203]), index=arrays)
s.name='values'
df = pd.DataFrame(s, index=arrays).sort_index()
df.index.names =['day','symbol']
print(df)
print(df.groupby(level='symbol').values.diff())
print(df.groupby(level='symbol').values.pct_change())
my_func = lambda x: x.pct_change()
print(df.groupby(level='symbol').values.apply(my_func))
print(df.groupby(level='symbol').values.diff() / df.groupby(level='symbol').values.shift(1))
|
03_Feature_Selection.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 概念
# ## canvas
# 画板 用户无法接触的。
# ## figure
# 画布 用户接触的第一层。
# ## axes
# 绘图区 一个画布上可以多个绘图区。
# ## 辅助显示层
# axes内的除了绘制的图像之外的内容,包括 Axes外观、边框线、坐标轴、坐标轴名称、坐标轴刻度、坐标轴刻度标签、网络线、图例、标题等内容。
# ## 图像层
# Axes内通过plot、scatter、bar、histogram、pie等函数根据数据绘制出的图像。
# # 折线图的绘制
# matplotlib.pyplot包含了一系列的画图函数。它的函数作用于当前图形(figure)的当前坐标系(axes)
# ## 简单案例
import matplotlib.pyplot as plt
# %matplotlib inline
# 创建画布,并设置画布属性
# figsize:指定图的长宽
# dpi:图像的清晰度
# 返回fig对象
plt.figure(figsize=(5, 4), dpi=80)
# 绘制图像
plt.plot([1,2,3],[3,4,1])
# 显示图像
plt.show()
# 图片保存到指定路径,show()会释放figure资源,如果在显示图像之后保存图片只能保存空图片。
# plt.savefig('1.png')
# ## 案例:显示温度变化状况
# 需求:画出某城市11点到12点这一小时内每分钟的温度变化折线图,温度范围在15°~18°
# 需求:再添加一个城市的温度变化,收集到北京当天温度变化情况,温度在1度到3度。
# +
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
import random
# 1)准备x, y坐标的数据。
x = range(60) # 一小时60分钟
y_shanghai = [random.uniform(15, 18) for i in x] # 上海的温度数据 ,uniform() 方法将随机生成下一个实数,它在 [x, y) 范围内。
y_beijing = [random.uniform(1, 3) for i in x] # 北京的温度数据
# 2)添加自定义x,y刻度。
x_ticks_label = ["11点{}分".format(i) for i in x]
y_ticks = range(40)
# 3)创建画布。
plt.figure(figsize=(20, 8), dpi=80)
# 4)绘制折线图。
plt.plot(x, y_shanghai, label="上海") # 绘制上海数据
plt.plot(x, y_beijing, color='r', linestyle='--', label="北京") # 绘制北京数据,设置颜色与折线风格
# 5) 修改x,y轴坐标的刻度显示。第一个参数是刻度的间距,第二个参数是修改默认的刻度名称为什么。
plt.xticks(x[::5], x_ticks_label[::5])
plt.yticks(y_ticks[::5])
# 6)添加网格线,第一个参数:是否显示网格线,第二个参数:网格线的风格,第三个参数:网格线的透明度
plt.grid(True, linestyle='--', alpha=0.5)
# 7) 添加 x,y 轴描述信息,并添加标题
plt.xlabel("时间")
plt.ylabel("温度")
plt.title("中午11点0分到12点之间的温度变化图示")
# 8) 显示图例
plt.legend(loc="best") # loc:图例位置
# 9)显示图像。
plt.show()
# -
# | 颜色字符 | 风格字符
# |----------------|-------------------------------|
# |r 红色|- 实线 |
# |g 绿色 |- - 虚线 |
# |b 蓝色 |-. 点划线 |
# |w 白色 |: 点虚线 |
# |c 青色 |' ' 留空、空格 |
# |m 洋红 | |
# |y 黄色 | |
# |k 黑色 | |
# ## 多个坐标系显示-plt.subplots
# 我们想要将上海和北京的天气图显示在同一个图的不同坐标系当中
# +
# 1)准备x, y坐标的数据。
x = range(60) # 一小时60分钟
y_shanghai = [random.uniform(15, 18) for i in x] # 上海的温度数据
y_beijing = [random.uniform(1, 3) for i in x] # 北京的温度数据
# 2)添加自定义x,y刻度。
x_ticks_label = ["11点{}分".format(i) for i in x]
y_ticks = range(40)
# 3)创建画布。
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 8), dpi=80)
# 4)绘制折线图。
axes[0].plot(x, y_shanghai, label="上海")
axes[1].plot(x, y_beijing, color='r', linestyle='--', label="北京")
# 5) 修改x,y轴坐标的刻度显示。
axes[0].set_xticks(x[::5], x_ticks_label[::5])
axes[0].set_yticks(y_ticks[::5])
axes[1].set_xticks(x[::5], x_ticks_label[::5])
axes[1].set_yticks(y_ticks[::5])
# 6)添加网格线
axes[0].grid(True, linestyle='--', alpha=0.5)
axes[1].grid(True, linestyle='--', alpha=0.5)
# 7) 添加 x,y 轴描述信息,并添加标题
axes[0].set_xlabel("时间")
axes[0].set_ylabel("温度")
axes[0].set_title("上海中午11点0分到12点之间的温度变化图示")
axes[1].set_xlabel("时间")
axes[1].set_ylabel("温度")
axes[1].set_title("北京中午11点0分到12点之间的温度变化图示")
# 8) 显示图例
axes[0].legend()
axes[1].legend()
# 9)显示图像。
plt.show()
# -
# ## 应用场景 - 数学函数图像的绘制
# +
import numpy as np
# 1)准备数据
x = np.linspace(-10, 10, 1000) # -10到10之间取1000个点
y = np.sin(x)
# 2)创建画布
plt.figure(figsize=(20, 8), dpi=100)
# 3)绘制函数图像
plt.plot(x, y)
plt.grid() # 添加网格显示
# 4)显示图像
plt.show()
# -
# # 小结
# + 如何解决中文显示问题
# + matplotlib的图结构
# + figure实现创建绘图区域大小
# + plot实现折线图的绘制
# + title,xlabel,ylabel实现标题以及x,y轴名设置
# + xticks,yticks实现axes的刻度设置和标注
# + savefig实现图形的本地保存
# + grid实现显示网格应用axis实现图像形状修改
# + legend实现图形标注信息显示
# + plt.subplots实现多坐标系的创建
# + 如何设置多个axes的标题、刻度
# ## 图形种类及其意义
# + 折线图 变化情况,变化趋势
# + 散点图 判断数据之间是否存在数量关联趋势,展示离群点(分布规律)
# + 柱状图 对离散数据之间数量关系进行统计和对比
# + 直方图 展示连续性变量的统计分布
# + 饼图 分类数据的占比情况
# ## 散点图(scatter)
# 需求:探究房屋面积和房屋价格的关系
# +
# 1> 房屋面积与房屋价格数据
x = [225.98, 247.07, 253.14, 457.85, 241.58, 301.01, 20.67, 288.64,
163.56, 120.06, 207.83, 342.75, 147.9 , 53.06, 224.72, 29.51,
21.61, 483.21, 245.25, 399.25, 343.35]
y = [196.63, 203.88, 210.75, 372.74, 202.41, 247.61, 24.9 , 239.34,
140.32, 104.15, 176.84, 288.23, 128.79, 49.64, 191.74, 33.1 ,
30.74, 400.02, 205.35, 330.64, 283.45]
# 2)创建画布
plt.figure(figsize=(20, 8), dpi=100)
# 3)绘制散点图
plt.scatter(x, y)
# 4)显示图像
plt.show()
# -
# ## 柱状图
# matplotlib.pyplot.bar(x, width, align='center', **kwargs)
# 需求1-对比每部电影的票房收入
#
# +
# 1)准备数据
movie_name = ['雷神3:诸神黄昏','正义联盟','东方快车谋杀案','寻梦环游记','全球风暴','降魔传','追捕','七十七天','密战','狂兽','其它']
# 横坐标
x = range(len(movie_name))
# 票房数据
y = [73853,57767,22354,15969,14839,8725,8716,8318,7916,6764,52222]
# 2)创建画布
plt.figure(figsize=(20, 8), dpi=100)
# 3)绘制柱状图
plt.bar(x, y, width=0.5, color=['b','r','g','y','c','m','y','k','c','g','b'])
# 4)修改x轴的刻度显示
plt.xticks(x, movie_name)
# 5)添加网格显示
plt.grid(linestyle="--", alpha=0.5)
# 6)添加标题
plt.title("电影票房收入对比")
# 7)显示图像
plt.show()
# -
# ## 需求2-如何对比电影票房收入才更能加有说服力?
# 有时候为了公平起见,我们需要对比不同电影首日和首周的票房
# +
# 1)准备数据
movie_name = ['雷神3:诸神黄昏','正义联盟','寻梦环游记']
first_day = [10587.6,10062.5,1275.7]
first_weekend=[36224.9,34479.6,11830]
x = range(len(movie_name))
# 2)创建画布
plt.figure(figsize=(20, 8), dpi=100)
# 3)绘制柱状图
plt.bar(x, first_day, width=0.2, label="首日票房")
plt.bar([i+0.2 for i in x], first_weekend, width=0.2, label="首周票房")
# 显示图例
plt.legend()
# 修改x轴刻度显示
plt.xticks([i+0.1 for i in x], movie_name)
# 4)显示图像
plt.show()
# -
# ## 直方图
# matplotlib.pyplot.hist(x, bins=None, normed=None, **kwargs)
# 对连续变量研究统计分布
# 需求:电影时长分布状况
# +
# 1)准备数据
time = [131, 98, 125, 131, 124, 139, 131, 117, 128, 108, 135, 138, 131, 102, 107, 114, 119, 128, 121, 142, 127, 130, 124, 101, 110, 116, 117, 110, 128, 128, 115, 99, 136, 126, 134, 95, 138, 117, 111,78, 132, 124, 113, 150, 110, 117, 86, 95, 144, 105, 126, 130,126, 130, 126, 116, 123, 106, 112, 138, 123, 86, 101, 99, 136,123, 117, 119, 105, 137, 123, 128, 125, 104, 109, 134, 125, 127,105, 120, 107, 129, 116, 108, 132, 103, 136, 118, 102, 120, 114,105, 115, 132, 145, 119, 121, 112, 139, 125, 138, 109, 132, 134,156, 106, 117, 127, 144, 139, 139, 119, 140, 83, 110, 102,123,107, 143, 115, 136, 118, 139, 123, 112, 118, 125, 109, 119, 133,112, 114, 122, 109, 106, 123, 116, 131, 127, 115, 118, 112, 135,115, 146, 137, 116, 103, 144, 83, 123, 111, 110, 111, 100, 154,136, 100, 118, 119, 133, 134, 106, 129, 126, 110, 111, 109, 141,120, 117, 106, 149, 122, 122, 110, 118, 127, 121, 114, 125, 126,114, 140, 103, 130, 141, 117, 106, 114, 121, 114, 133, 137, 92,121, 112, 146, 97, 137, 105, 98, 117, 112, 81, 97, 139, 113,134, 106, 144, 110, 137, 137, 111, 104, 117, 100, 111, 101, 110,105, 129, 137, 112, 120, 113, 133, 112, 83, 94, 146, 133, 101,131, 116, 111, 84, 137, 115, 122, 106, 144, 109, 123, 116, 111,111, 133, 150]
# 2)创建画布
plt.figure(figsize=(20, 8), dpi=100)
# 3)绘制直方图
distance = 2 # 设置组距
group_num = int((max(time) - min(time)) / distance) # 计算组数 通常设置组数会有相应公式:组数 = 极差/组距= (max-min)/bins
plt.hist(time, bins=group_num) # 绘制直方图
# 修改x轴刻度显示
plt.xticks(range(min(time), max(time))[::2])
# 添加网格显示
plt.grid(linestyle="--", alpha=0.5)
# 添加x, y轴描述信息
plt.xlabel("电影时长大小")
plt.ylabel("电影的数据量")
# 4)显示图像
plt.show()
# -
# ## 饼图
# plt.pie(x, labels=,autopct=,colors)
# x:数量,自动算百分比
# labels:每部分名称
# autopct:占比显示指定%1.2f%%
# colors:每部分颜色
# 需求:显示不同的电影的排片占比
#
# +
# 1)准备数据
movie_name = ['雷神3:诸神黄昏','正义联盟','东方快车谋杀案','寻梦环游记','全球风暴','降魔传','追捕','七十七天','密战','狂兽','其它']
place_count = [60605,54546,45819,28243,13270,9945,7679,6799,6101,4621,20105]
# 2)创建画布
plt.figure(figsize=(20, 8), dpi=100)
# 3)绘制饼图
plt.pie(place_count, labels=movie_name, autopct="%1.2f%%", colors=['b','r','g','y','c','m','y','k','c','g','y'])
# 显示图例
plt.legend()
# 添加标题
plt.title("电影排片占比")
# 4)显示图像
plt.show()
# -
# 为了让显示的饼图保持圆形,需要添加axis保证长宽一样
#
# plt.axis('equal')
#
|
matplotlib_study/Matplotlib_test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# %matplotlib inline
import numpy as np
import os
import pandas
import seaborn
import items as itemmaker
from glob import glob
from math import sqrt
from matplotlib import pyplot
from matplotlib import ticker
from mpl_toolkits.mplot3d import Axes3D
from openpyxl import load_workbook
import analyze
nop = 4
runs = 20
folders = ['/Users/gelliebeenz/Documents/Python/ObjectiveMethod/Static/NSGA-II/',
'/Users/gelliebeenz/Documents/Python/ObjectiveMethod/Static/MOMAD/',
'/Users/gelliebeenz/Documents/Python/ObjectiveMethod/Static/MOEPSO/',
'/Users/gelliebeenz/Documents/Python/ObjectiveMethod/Static/MOMA/',
'/Users/gelliebeenz/Documents/Python/ObjectiveMethod/Analysis/']
files = ['ApproximationSet.csv', 'ApproximationSet.csv',
'ApproximationSet.csv', 'ApproximationSet.csv']
levels = ['SBSBPP500/']
levellabels = ['500 Items']
methods = ['NSGA-II', 'MOMAD', 'MOEPSO', 'MOMA']
colors = ['#49ADA2', '#7797F4', '#C973F4', '#EF6E8B', '#FFAA6C']
solsindex = ['level', 'number', 'method']
dfsolseries = []
seaborn.set(font_scale=1.25)
lvl = 0
r = 7
print('Analyzing Experiment{0:02d}'.format(r + 1))
runfolds = analyze.listfolders(r, lvl, nop, folders, levels)
data, opcat = analyze.getparetofront(nop, runfolds, files)
for opal in range(nop):
method = []
for m in range(len(data[opal])):
method.append(methods[opal])
data[opal]['Method'] = method
seaborn.set_style('whitegrid')
plotname0 = runfolds[nop] + 'ParetoPlot3D'
plot0 = pyplot.figure().gca(projection='3d')
for opal in range(nop):
plot0.scatter(data[opal]['No. of Bins'], data[opal]['Max. Bin Height'],
data[opal]['Avg. Bin Weight'], c=colors[opal], label=methods[opal],
linewidths=1, edgecolor="w", s=40)
plot0.set_xlabel(opcat[0], labelpad=10)
start, end = plot0.get_xlim()
plot0.xaxis.set_ticks(np.arange(start, end, 40))
plot0.set_ylabel(opcat[1], labelpad=10)
start, end = plot0.get_ylim()
plot0.yaxis.set_ticks(np.arange(start, end, 25))
plot0.set_zlabel(opcat[2], labelpad=10)
plot0.legend(bbox_to_anchor=(0, -1, 1, 1), ncol=nop)
plot0.view_init(20, 45)
pyplot.savefig(plotname0 + '.eps', format='eps', dpi=2000)
pyplot.savefig(plotname0 + '.pdf', format='pdf', dpi=2000)
pyplot.close()
seaborn.set_style('darkgrid')
plotname2 = runfolds[nop] + 'ParetoPlot'
dataset = pandas.concat(data, keys=methods)
scat = seaborn.PairGrid(dataset, hue='Method', palette=seaborn.color_palette(colors),
hue_kws={"marker": ["o", "^", "D", "s"]})
scat = scat.map_diag(pyplot.hist)
scat = scat.map_offdiag(pyplot.scatter, linewidths=1, edgecolor="w", s=40)
for ax in scat.axes.flat:
ax.get_xaxis().set_major_formatter(ticker.FuncFormatter(lambda xax, p: format(int(xax))))
start, end = ax.get_xlim()
intervals = round((end - start) / 5, -1)
ax.set(xticks=np.arange(start, end+1, intervals))
start, end = ax.get_ylim()
intervals = round((end - start) / 5, -1)
ax.set(yticks=np.arange(start, end+1, intervals))
pyplot.setp(ax.get_xticklabels(), rotation=45)
scat.add_legend(title=None, frameon=True)
scat.fig.get_children()[-1].set_bbox_to_anchor((1, 0.878, 0, 0))
pyplot.savefig(plotname2+'.eps', format='eps', dpi=4000, bbox_inches='tight')
pyplot.savefig(plotname2+'.pdf', format='pdf', dpi=4000, bbox_inches='tight')
pyplot.close()
|
SampleScripts/Static_Only/analysis_test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Performance Optimization with JSON Documents
# Updated: 2019-10-03
# ## Optimizing Access to JSON Documents
#
# An earlier notebook covered some of the differences between storing data in JSON format versus BSON format. From an `INSERT` perspective, if your incoming data is not already in BSON format, then it is more expensive to store data in BSON format, as shown in the following graph, since you will need to invoke the `JSON_TO_BSON` conversion function.
#
# The CUSTOMER document data set was used which includes 20,000 customer documents in JSON format with details on individual customers including an array of product purchases. The JSON column is defined as VARCHAR(2000), while the BSON column is defined as VARBINARY(2000) to avoid the additional overhead of dealing with BLOB objects.
# ### Load the Customer file into a table
# You must run the first command to get the working directory for the IMPORT command.
import os
fname = os.getcwd() + "/customers.js"
print("Input file: " + fname)
# ### Load Db2 Extensions
# The Db2 Jupyter extensions need to be loaded in order to run any of the examples in this notebook. In addition, a `CONNECT` command needs to be issued to connect to the local Db2 database. The default `SAMPLE` database is assumed to exist on the local system. If not, you need to modify the `CONNECT` command to use the appropriate userid, database, and host parameters.
# %run ../db2.ipynb
# %run ../connection.ipynb
# Next we create the table that will contain the customer data.
# + magic_args="-quiet " language="sql"
# DROP TABLE JSON_RAW_DATA;
# CREATE TABLE JSON_RAW_DATA
# (
# CUSTOMER VARCHAR(2000)
# );
# -
# The following code will load the data into the table.
import io
import json
print("Starting Load")
start_time = time.time()
# %sql autocommit off
# x = %sql prepare INSERT INTO JSON_RAW_DATA VALUES (?)
if (x != False):
i = 0
with open(fname,"r") as records:
for record in records:
i += 1
# rc = %sql execute :x using record@char
if (rc == False): break
if ((i % 5000) == 0):
print(str(i)+" rows read.")
# %sql commit hold
# %sql commit work
# %sql autocommit on
end_time = time.time()
print('Total load time for {:d} records is {:.2f} seconds'.format(i,end_time-start_time))
# In this step we will create three tables to hold the data: one using a character format, one using a BLOB format (inlined), and the third using a binary format.
# + magic_args="-q" language="sql"
# DROP TABLE JSON_CHAR;
# CREATE TABLE JSON_CHAR
# (
# CUSTOMER VARCHAR(2000)
# );
#
# DROP TABLE JSON_BINARY;
# CREATE TABLE JSON_BINARY
# (
# CUSTOMER VARBINARY(2000)
# );
#
# DROP TABLE JSON_BLOB;
# CREATE TABLE JSON_BLOB
# (
# CUSTOMER BLOB(2000) INLINE LENGTH 2000
# );
# -
# The data from the base table will be inserted into these two tables using `INSERT INTO SELECT FROM` syntax. The performance of inserting data into each table is compared after the `INSERT` completes.
# %sql -q INSERT INTO JSON_CHAR SELECT * FROM JSON_RAW_DATA;
char_load = sqlelapsed
# %sql -q INSERT INTO JSON_BINARY SELECT JSON_TO_BSON(CUSTOMER) FROM JSON_RAW_DATA;
binary_load = sqlelapsed
# %sql -q INSERT INTO JSON_BLOB SELECT JSON_TO_BSON(CUSTOMER) FROM JSON_RAW_DATA;
blob_load = sqlelapsed
# %sql -bar values ('CHAR',:char_load),('BINARY',:binary_load),('BLOB',:blob_load)
# Converting the data to BSON does add overhead to the `INSERT` time. You may also note that the BLOB insert time is longer because Db2 must add control information to each object. If you are only storing and retrieving entire JSON documents then the conversion to BSON may not be unnecessary.
# ## Searching and Retrieving JSON Documents
# Db2 uses the BSON format internally for the processing done by the JSON access functions. The BSON format has the advantage of having already parsed the document into key-value pairs as well as having a tree structure available for easy traversal. JSON documents need to be converted internally to BSON to allow the Db2 functions to be able traverse them. Any data stored in JSON format that is accessed by these functions is first implicitly converted to BSON format and any result returned is converted back to JSON format (if this is requested). This overhead occurs for each unique access to the JSON data and can significantly impact the performance of a query.
#
# This means that there are two areas where this implicit overhead from JSON to BSON can impact query performance when accessing a JSON document:
# * How many values do you need to materialize as part of the SELECT column list
# * How many values do you need to reference in the SQL predicates
#
# We ran a number of sample tests to explore the performance impacts of the different choices (see the previous section). In the graphs that follow, 3 bars are shown with the labels JSON, Binary, and BLOB. They represent the following:
#
# * JSON – Data stored as JSON in a VARCHAR column
# * Binary – Data stored as BSON in a VARBINARY column
# * BLOB – Data stored as BSON in a BLOB (inlined) column
#
# The Db2 JSON functions need to traverse a document for both display and predicate purposes. If JSON documents are identified by predicates on non-JSON columns, then storing the fields in JSON or BSON format makes little difference from the perspective of predicate processing. If the SQL requires columns or predicates based on the JSON data itself, then additional overhead is required to evaluate each predicate for JSON formatted documents. Finally, the actual retrieval of the target value will also incur conversion overhead (if needed).
# ```sql
# SELECT COUNT(*) FROM JSON_TABLE
# WHERE
# JSON_VALUE(CUSTOMER, '$.contact.state' RETURNING CHAR(2)) = 'OH'
# ```
# The above statement was repeated as many times as possible in a 10 second interval and the execution count (throughput) is shown on the chart below (higher is better!).
# %sql option runtime 10
# count_json = %sql -t \
# SELECT COUNT(*) FROM JSON_CHAR WHERE JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)) = 'OH'
# count_blob = %sql -t \
# SELECT COUNT(*) FROM JSON_BLOB WHERE JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)) = 'OH'
# count_binary = %sql -t \
# SELECT COUNT(*) FROM JSON_BINARY WHERE JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)) = 'OH'
# %sql -bar values ('CHAR',:count_json),('BINARY',:count_binary),('BLOB',:count_blob)
# %sql option runtime 1
# The graph highlights the performance benefits of storing the data as JSON, BSON in a BLOB, or BSON in a VARBINARY field. The performance between VARBINARY and BLOB fields may vary between runs, but generally VARBINARY has the additional benefit of faster retrieval speed because it doesn't need to deal with large object pointers and can reside directly on a buffered data page (which inlined LOB fields can also do but only for the portion that fits on the page). Note that VARBINARY is limited to approximately 32K documents (the maximum Db2 page size) so if your documents are larger than that you will need to use LOB storage.
#
# If you examine the cost of retrieving columns in a SELECT list, the performance ratio is almost the same.
# %sql option runtime 10
# count_json = %sql -t \
# SELECT JSON_VALUE(CUSTOMER, '$.contact.city' RETURNING CHAR(30)) \
# FROM JSON_CHAR WHERE JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)) = 'OH'
# count_blob = %sql -t \
# SELECT JSON_VALUE(CUSTOMER, '$.contact.city' RETURNING CHAR(30)) \
# FROM JSON_BLOB WHERE JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)) = 'OH'
# count_binary = %sql -t \
# SELECT JSON_VALUE(CUSTOMER, '$.contact.city' RETURNING CHAR(30)) \
# FROM JSON_BINARY WHERE JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)) = 'OH'
# %sql -bar values ('CHAR',:count_json),('BINARY',:count_binary),('BLOB',:count_blob)
# %sql option runtime 1
# The decision to use BSON versus JSON as the storage format comes down to whether or not the application needs to regularly search for fields within a JSON document. If the majority of the JSON access is to store and retrieve entire documents, then the overhead of BSON conversion is unnecessary. However, if the access pattern to the JSON document is unknown, then it may be worthwhile to convert the documents to BSON for faster retrieval. The other option is to use indexes which is discussed in the next section.
# ## Indexing JSON Documents
# If your application is always scanning documents for specific values using SQL predicates, then it may be worth placing indexes on the target fields. Db2 supports computed indexes (aka index on expression or expression-based index), which allows for the use of functions like `JSON_VALUE` to be used as part of the index definition. For instance, searching for a customer number will result in a scan against the table if no indexes are defined:
# ```sql
# SELECT COUNT(*)
# FROM CUSTOMERS
# WHERE JSON_VALUE(DETAILS, '$.customerid' RETURNING INT) = 100000
# ```
# To create an index on the customerid field, we use the `JSON_VALUE` function to extract the value from the JSON field.
# ```sql
# CREATE INDEX IX_CUSTOMERID ON CUSTOMERS
# (JSON_VALUE(DETAILS,'$.customerid' RETURNING INT));
# ```
#
# One consideration when creating indexes on JSON documents is that the `JSON_VALUE` function must include a `RETURNING` clause. The `CREATE INDEX` statement cannot determine the data type from the command and it will raise an error message when it attempts to create the index.
# + language="sql"
# CREATE INDEX IX_CUSTOMERID ON JSON_CHAR
# (JSON_VALUE(CUSTOMER,'$.customerid'))
# -
# Each of the three tables will have an index placed on the `contact.state` key so that we can see what the performance difference will be on our previous queries.
# + language="sql"
# CREATE INDEX IX_STATE_CHAR ON JSON_CHAR
# (JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)));
# CREATE INDEX IX_STATE_BINARY ON JSON_BINARY
# (JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)));
# CREATE INDEX IX_STATE_BLOB ON JSON_BLOB
# (JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)));
# -
# Now we can run our queries again to see what the results will be.
# %sql option runtime 10
# count_json = %sql -t \
# SELECT COUNT(*) FROM JSON_CHAR WHERE JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)) = 'OH'
# count_blob = %sql -t \
# SELECT COUNT(*) FROM JSON_BLOB WHERE JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)) = 'OH'
# count_binary = %sql -t \
# SELECT COUNT(*) FROM JSON_BINARY WHERE JSON_VALUE(CUSTOMER,'$.contact.state' RETURNING CHAR(2)) = 'OH'
# %sql -bar values ('CHAR',:count_json),('BINARY',:count_binary),('BLOB',:count_blob)
# %sql option runtime 1
# In this scenario, where we have an SQL statement selecting which records to read based on predicates on JSON values, we can see that running the statement across all three storage options results in uniform performance. This is because we do not actually access the JSON document to evaluate the predicate at execution time due to the use of the index in the access plan.
# ## Summary
# As always, the time you need to spend on performance considerations will depend on the way that your JSON documents will be accessed by your application(s) and the performance requirements of the application. And the decisions you make will have to balance the benefits and cost of each possible solution.
# If you are going to do a lot of individual key access on your JSON documents or want to maximize your performance, then converting any incoming JSON data to BSON format as the data is stored will improve performance at time of access. As well, the process of normal query performance tuning may indicate that indexes on key JSON predicates will help performance considerably.
# #### Credits: IBM 2019, <NAME> [<EMAIL>]
|
Db2_11.5_Features/Db2_11.5_JSON_10_Performance_Optimization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import os; import sys; sys.path.insert(0,'../')
import warnings
import pandas as pd
warnings.simplefilter(action='ignore', category=pd.errors.PerformanceWarning)
import socceraction.spadl.api as spadl
# -
## Configure file and folder names
datafolder = "../data"
statsbomb_json = os.path.join(datafolder,"statsbomb-root","open-data-master","data")
statsbomb_h5 = os.path.join(datafolder,"statsbomb.h5")
spadl_h5 = os.path.join(datafolder,"spadl-statsbomb.h5")
# ### Convert raw Statsbomb json files to Statsbomb HDF5 file
spadl.statsbombjson_to_statsbombh5(statsbomb_json,statsbomb_h5)
# ### Inspect StatsBomb HDF5 file
# +
tablenames = ["matches","players","teams","competitions"]
tables = {name : pd.read_hdf(statsbomb_h5,key=name) for name in tablenames}
match_id = tables["matches"].match_id[0]
tables["events"] = pd.read_hdf(statsbomb_h5,f"events/match_{match_id}")
for k,df in tables.items():
print("#",k)
print(df.columns,"\n")
# -
# ### Convert Statsbomb data (in a HDF5 file) to the SPADL format (in a HDF5 file)
spadl.statsbombh5_to_spadlh5(statsbomb_h5,spadl_h5)
# ### Inspect SPADL HDF5 file
# +
tablenames = ["games","players","teams","competitions","actiontypes","bodyparts","results"]
tables = {name : pd.read_hdf(spadl_h5,key=name) for name in tablenames}
game_id = tables["games"].game_id[0]
tables["actions"] = pd.read_hdf(spadl_h5,f"actions/game_{game_id}")
for k,df in tables.items():
print("#",k)
print(df.columns,"\n")
# -
# ### (optional) Plotting actions
# Extra library required: ```pip install matplotsoccer```
# +
import matplotsoccer
tablenames = [
"games",
"players",
"teams",
"competitions",
"actiontypes",
"bodyparts",
"results",
]
tables = {name: pd.read_hdf(spadl_h5, key=name) for name in tablenames}
# Select England vs Belgium game at World Cup
games = tables["games"].merge(tables["competitions"])
game_id = games[(games.competition_name == "FIFA World Cup")
& (games.away_team_name == "England")
& (games.home_team_name == "Belgium")].game_id.values[0]
actions = pd.read_hdf(spadl_h5, f"actions/game_{game_id}")
actions = (
actions.merge(tables["actiontypes"])
.merge(tables["results"])
.merge(tables["bodyparts"])
.merge(tables["players"],"left",on="player_id")
.merge(tables["teams"],"left",on="team_id")
.sort_values(["period_id", "time_seconds", "timestamp"])
.reset_index(drop=True)
)
# use nickname if available else use full name
actions["player"] = actions[["player_nickname","player_name"]].apply(lambda x: x[0] if x[0] else x[1],axis=1)
#shot = 128
shot = 2201
a = actions[shot-4:shot+1]
games = tables["games"]
g = list(games[games.game_id == a.game_id.values[0]].itertuples())[0]
minute = int((a.period_id.values[0]-1)*45 +a.time_seconds.values[0] // 60) + 1
game_info = f"{g.match_date} {g.home_team_name} {g.home_score}-{g.away_score} {g.away_team_name} {minute}'"
print(game_info)
labels = a[["time_seconds", "type_name", "player", "team_name"]]
matplotsoccer.actions(
location=a[["start_x", "start_y", "end_x", "end_y"]],
action_type=a.type_name,
team= a.team_name,
result= a.result_name == "success",
label=labels,
labeltitle=["time","actiontype","player","team"],
zoom=False,
figsize=6
)
matplotsoccer.actions(
location=a[["start_x", "start_y", "end_x", "end_y"]],
action_type=a.type_name,
team=a.team_name,
result=a.result_name == "success",
label=labels,
labeltitle=["time","actiontype","player","team"],
zoom=True,
)
|
public-notebooks/2-convert-statsbomb-to-spadl.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
y=int(input("Enter a number"))
fact=1
i=1
while i<=y:
fact=fact*i
i=i+1
print("factorial is ",fact)
x=0
total=0
while x<=100:
if (x%5 ==0):
total=total+x
x=x+1
print(total)
for x in range(100):
print(x)
e=0
for i in range(100):
e=e+i
print(e)
d=int(input("enter a number"))
e=1
for i in range(1,d+1):
e=e*i
print("factorial is ",e)
|
Untitled10.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
time = []
jogador = dict()
partidas = list()
while True:
jogador.clear()
jogador['nome'] = str(input('Nome do Jogador: '))
tot = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
partidas.clear()
for c in range(0,tot):
partidas.append(int(input(f' Quantos gols na partida {c}? ')))
jogador['gols'] = partidas[:]
jogador['total'] = sum(partidas)
time.append(jogador.copy())
while True:
resp = str(input('Quer continuar? [S/N] ')).upper()[0]
if resp in 'SN':
break
print('ERRO! Responda apenas S ou N.')
if resp == 'N':
break
print('cod ', end='')
for i in jogador.keys():
print(f'{i:<15} ',end='')
print()
print('-='*40)
for k,v in enumerate(time):
print(f'{k:>3} ', end='')
for d in v.values():
print(f'{str(d):<15} ', end='')
print()
print('-='*40)
while True:
busca = int(input('Mostrar dados de qual jogador? !999 cancela!'))
if busca == 999:
break
if busca >= len(time):
print(f'ERRO! Não existe jogador com codigo {busca}! ')
else:
print(f' -- LEVANTAMENTO DO JOGADOR {time[busca]["nome"]}:')
for i,g in enumerate(time[busca]["gols"]):
print(f' No jogo {i+1} fez {g} gols.')
print('-' * 40)
print('<< Volte Sempre >>')
# -
|
.ipynb_checkpoints/EX095 - Aprimorando os Dicionários -checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from os import path
import os
import pandas as pd
pd.set_option('display.max_columns', None)
# +
# 100 trees with Aug-26 features:
# df = pd.read_csv('opt-aug-26/hyperopt-2015-08-26T14:15:39.704429.csv')
# 1000 trees with Aug-26 features:
df = pd.read_csv('opt-aug-26/hyperopt-2015-08-27T01:32:24.393333.csv')
# var trees with Aug-26 features:
# df = pd.read_csv('opt-aug-26/hyperopt-2015-08-27T21:23:25.051104.csv')
# 1000 trees with Aug-27 features:
# df = pd.read_csv('ec2/hyperopt-2015-08-28T06:00:21.522118.csv')
len(df)
# +
dirname = 'opt-aug-27-layer2'
df = pd.DataFrame()
for filename in os.listdir(dirname):
if filename.endswith('.csv'):
path = os.path.join(dirname, filename)
tmp_df = pd.read_csv(path)
df = df.append(tmp_df)
len(df)
# -
df.sort('loss', inplace=True)
df[:10]
subset = df[:20]
subset.max_depth.value_counts()
df[df.num_rounds == 1000][:10]
|
exploration/hyperopt_results.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Machine Learning Engineer Nanodegree
# ## Model Evaluation & Validation
# ## Project: Predicting Boston Housing Prices
#
# Welcome to the first project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
#
# In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
#
# >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
# ## Getting Started
# In this project, you will evaluate the performance and predictive power of a model that has been trained and tested on data collected from homes in suburbs of Boston, Massachusetts. A model trained on this data that is seen as a *good fit* could then be used to make certain predictions about a home — in particular, its monetary value. This model would prove to be invaluable for someone like a real estate agent who could make use of such information on a daily basis.
#
# The dataset for this project originates from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Housing). The Boston housing data was collected in 1978 and each of the 506 entries represent aggregated data about 14 features for homes from various suburbs in Boston, Massachusetts. For the purposes of this project, the following preprocessing steps have been made to the dataset:
# - 16 data points have an `'MEDV'` value of 50.0. These data points likely contain **missing or censored values** and have been removed.
# - 1 data point has an `'RM'` value of 8.78. This data point can be considered an **outlier** and has been removed.
# - The features `'RM'`, `'LSTAT'`, `'PTRATIO'`, and `'MEDV'` are essential. The remaining **non-relevant features** have been excluded.
# - The feature `'MEDV'` has been **multiplicatively scaled** to account for 35 years of market inflation.
#
# Run the code cell below to load the Boston housing dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.
# +
# Import libraries necessary for this project
import numpy as np
import pandas as pd
from sklearn.cross_validation import ShuffleSplit
# Import supplementary visualizations code visuals.py
import visuals as vs
# Pretty display for notebooks
# %matplotlib inline
# Load the Boston housing dataset
data = pd.read_csv('housing.csv')
prices = data['MEDV']
features = data.drop('MEDV', axis = 1)
# Success
print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape)
# -
# ## Data Exploration
# In this first section of this project, you will make a cursory investigation about the Boston housing data and provide your observations. Familiarizing yourself with the data through an explorative process is a fundamental practice to help you better understand and justify your results.
#
# Since the main goal of this project is to construct a working model which has the capability of predicting the value of houses, we will need to separate the dataset into **features** and the **target variable**. The **features**, `'RM'`, `'LSTAT'`, and `'PTRATIO'`, give us quantitative information about each data point. The **target variable**, `'MEDV'`, will be the variable we seek to predict. These are stored in `features` and `prices`, respectively.
# ### Implementation: Calculate Statistics
# For your very first coding implementation, you will calculate descriptive statistics about the Boston housing prices. Since `numpy` has already been imported for you, use this library to perform the necessary calculations. These statistics will be extremely important later on to analyze various prediction results from the constructed model.
#
# In the code cell below, you will need to implement the following:
# - Calculate the minimum, maximum, mean, median, and standard deviation of `'MEDV'`, which is stored in `prices`.
# - Store each calculation in their respective variable.
# +
# TODO: Minimum price of the data
minimum_price = np.amin(prices)
# TODO: Maximum price of the data
maximum_price = np.amax(prices)
# TODO: Mean price of the data
mean_price = np.mean(prices)
# TODO: Median price of the data
median_price = np.median(prices)
# TODO: Standard deviation of prices of the data
std_price = np.std(prices)
# Show the calculated statistics
print "Statistics for Boston housing dataset:\n"
print "Minimum price: ${:,.2f}".format(minimum_price)
print "Maximum price: ${:,.2f}".format(maximum_price)
print "Mean price: ${:,.2f}".format(mean_price)
print "Median price ${:,.2f}".format(median_price)
print "Standard deviation of prices: ${:,.2f}".format(std_price)
# -
# ### Question 1 - Feature Observation
# As a reminder, we are using three features from the Boston housing dataset: `'RM'`, `'LSTAT'`, and `'PTRATIO'`. For each data point (neighborhood):
# - `'RM'` is the average number of rooms among homes in the neighborhood.
# - `'LSTAT'` is the percentage of homeowners in the neighborhood considered "lower class" (working poor).
# - `'PTRATIO'` is the ratio of students to teachers in primary and secondary schools in the neighborhood.
#
#
# ** Using your intuition, for each of the three features above, do you think that an increase in the value of that feature would lead to an **increase** in the value of `'MEDV'` or a **decrease** in the value of `'MEDV'`? Justify your answer for each.**
#
# **Hint:** This problem can phrased using examples like below.
# * Would you expect a home that has an `'RM'` value(number of rooms) of 6 be worth more or less than a home that has an `'RM'` value of 7?
# * Would you expect a neighborhood that has an `'LSTAT'` value(percent of lower class workers) of 15 have home prices be worth more or less than a neighborhood that has an `'LSTAT'` value of 20?
# * Would you expect a neighborhood that has an `'PTRATIO'` value(ratio of students to teachers) of 10 have home prices be worth more or less than a neighborhood that has an `'PTRATIO'` value of 15?
# **Answer: I think that RM is directly related to MEDV, while LSTAT and PTRATIO are inversely related to MEDV, so when RM goes up, MEDV goes up and when LSTAT or PTRATIO go up, MEDV goes down. This is because houses that have more rooms (a higher RM) generally have a larger square footage, so they should cost more (higher MEDV). Additionally, houses in a neighborhod with more lower class workers and a higher ratio of students to teachers are worse qualities in a house, driving the price of houses down and resulting in a lower MEDV. **
# ----
#
# ## Developing a Model
# In this second section of the project, you will develop the tools and techniques necessary for a model to make a prediction. Being able to make accurate evaluations of each model's performance through the use of these tools and techniques helps to greatly reinforce the confidence in your predictions.
# ### Implementation: Define a Performance Metric
# It is difficult to measure the quality of a given model without quantifying its performance over training and testing. This is typically done using some type of performance metric, whether it is through calculating some type of error, the goodness of fit, or some other useful measurement. For this project, you will be calculating the [*coefficient of determination*](http://stattrek.com/statistics/dictionary.aspx?definition=coefficient_of_determination), R<sup>2</sup>, to quantify your model's performance. The coefficient of determination for a model is a useful statistic in regression analysis, as it often describes how "good" that model is at making predictions.
#
# The values for R<sup>2</sup> range from 0 to 1, which captures the percentage of squared correlation between the predicted and actual values of the **target variable**. A model with an R<sup>2</sup> of 0 is no better than a model that always predicts the *mean* of the target variable, whereas a model with an R<sup>2</sup> of 1 perfectly predicts the target variable. Any value between 0 and 1 indicates what percentage of the target variable, using this model, can be explained by the **features**. _A model can be given a negative R<sup>2</sup> as well, which indicates that the model is **arbitrarily worse** than one that always predicts the mean of the target variable._
#
# For the `performance_metric` function in the code cell below, you will need to implement the following:
# - Use `r2_score` from `sklearn.metrics` to perform a performance calculation between `y_true` and `y_predict`.
# - Assign the performance score to the `score` variable.
# +
# TODO: Import 'r2_score'
from sklearn.metrics import r2_score
def performance_metric(y_true, y_predict):
""" Calculates and returns the performance score between
true and predicted values based on the metric chosen. """
# TODO: Calculate the performance score between 'y_true' and 'y_predict'
score = r2_score(y_true, y_predict)
# Return the score
return score
print score
# -
# ### Question 2 - Goodness of Fit
# Assume that a dataset contains five data points and a model made the following predictions for the target variable:
#
# | True Value | Prediction |
# | :-------------: | :--------: |
# | 3.0 | 2.5 |
# | -0.5 | 0.0 |
# | 2.0 | 2.1 |
# | 7.0 | 7.8 |
# | 4.2 | 5.3 |
#
# Run the code cell below to use the `performance_metric` function and calculate this model's coefficient of determination.
# Calculate the performance of this model
score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])
print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score)
# * Would you consider this model to have successfully captured the variation of the target variable?
# * Why or why not?
#
# ** Hint: ** The R2 score is the proportion of the variance in the dependent variable that is predictable from the independent variable. In other words:
# * R2 score of 0 means that the dependent variable cannot be predicted from the independent variable.
# * R2 score of 1 means the dependent variable can be predicted from the independent variable.
# * R2 score between 0 and 1 indicates the extent to which the dependent variable is predictable. An
# * R2 score of 0.40 means that 40 percent of the variance in Y is predictable from X.
# **Answer: Yes, I would say the r2 score correctly captures the variation of the target variable because the prediction is consistently close to the true vale. A r2 score of 0.923 correlates with that variation.
# ### Implementation: Shuffle and Split Data
# Your next implementation requires that you take the Boston housing dataset and split the data into training and testing subsets. Typically, the data is also shuffled into a random order when creating the training and testing subsets to remove any bias in the ordering of the dataset.
#
# For the code cell below, you will need to implement the following:
# - Use `train_test_split` from `sklearn.cross_validation` to shuffle and split the `features` and `prices` data into training and testing sets.
# - Split the data into 80% training and 20% testing.
# - Set the `random_state` for `train_test_split` to a value of your choice. This ensures results are consistent.
# - Assign the train and testing splits to `X_train`, `X_test`, `y_train`, and `y_test`.
# +
# TODO: Import 'train_test_split'
from sklearn.cross_validation import train_test_split
# TODO: Shuffle and split the data into training and testing subsets
X_train, X_test, y_train, y_test = train_test_split(features, prices, test_size = 0.20, random_state = 100)
# Success
print "Training and testing split was successful."
# -
# ### Question 3 - Training and Testing
#
# * What is the benefit to splitting a dataset into some ratio of training and testing subsets for a learning algorithm?
#
# **Hint:** Think about how overfitting or underfitting is contingent upon how splits on data is done.
# **Answer: It is necessary to split your dataset into training and testing data sets because you cannot use training data on testing because it will not be able to detect overfitting. Additionally, it can be difficult to create fake data, so you can just randomly set aside some data to use as testing.
# ----
#
# ## Analyzing Model Performance
# In this third section of the project, you'll take a look at several models' learning and testing performances on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing `'max_depth'` parameter on the full training set to observe how model complexity affects performance. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone.
# ### Learning Curves
# The following code cell produces four graphs for a decision tree model with different maximum depths. Each graph visualizes the learning curves of the model for both training and testing as the size of the training set is increased. Note that the shaded region of a learning curve denotes the uncertainty of that curve (measured as the standard deviation). The model is scored on both the training and testing sets using R<sup>2</sup>, the coefficient of determination.
#
# Run the code cell below and use these graphs to answer the following question.
# Produce learning curves for varying training set sizes and maximum depths
vs.ModelLearning(features, prices)
# ### Question 4 - Learning the Data
# * Choose one of the graphs above and state the maximum depth for the model.
# * What happens to the score of the training curve as more training points are added? What about the testing curve?
# * Would having more training points benefit the model?
#
# **Hint:** Are the learning curves converging to particular scores? Generally speaking, the more data you have, the better. But if your training and testing curves are converging with a score above your benchmark threshold, would this be necessary?
# Think about the pros and cons of adding more training points based on if the training and testing curves are converging.
# **Answer: Graph with max_depth = 3. When more training points are added, the training score decreases and the testing score increases. Then the R2 score of the training curve converges or settles around 0.8. The testing score similarly converges towards 0.8. Having more training points would not benefit or change the model because the scores would just converge further - the model has plenty of data.
# ### Complexity Curves
# The following code cell produces a graph for a decision tree model that has been trained and validated on the training data using different maximum depths. The graph produces two complexity curves — one for training and one for validation. Similar to the **learning curves**, the shaded regions of both the complexity curves denote the uncertainty in those curves, and the model is scored on both the training and validation sets using the `performance_metric` function.
#
# ** Run the code cell below and use this graph to answer the following two questions Q5 and Q6. **
vs.ModelComplexity(X_train, y_train)
# ### Question 5 - Bias-Variance Tradeoff
# * When the model is trained with a maximum depth of 1, does the model suffer from high bias or from high variance?
# * How about when the model is trained with a maximum depth of 10? What visual cues in the graph justify your conclusions?
#
# **Hint:** High bias is a sign of underfitting(model is not complex enough to pick up the nuances in the data) and high variance is a sign of overfitting(model is by-hearting the data and cannot generalize well). Think about which model(depth 1 or 10) aligns with which part of the tradeoff.
# **Answer: With max_depth = 1, the model has high bias (underfitting) because I can see that the error is high for both the training data and the testing data. The model is not complex enough to pick up the nuances of the data. The model also has a low variance because the training and testing scores are closer together. At max_depth = 10, the model has high variance (overfitting) because I can see that the training data has no error, but the testing data experiences more error. The training and testing scores are farther apart. The model is memorizing the data and cannot generalize well. **
# ### Question 6 - Best-Guess Optimal Model
# * Which maximum depth do you think results in a model that best generalizes to unseen data?
# * What intuition lead you to this answer?
#
# ** Hint: ** Look at the graph above Question 5 and see where the validation scores lie for the various depths that have been assigned to the model. Does it get better with increased depth? At what point do we get our best validation score without overcomplicating our model? And remember, Occams Razor states "Among competing hypotheses, the one with the fewest assumptions should be selected."
# **Answer: For the graph above Question 5, we should use a max_depth of 3 or 4 because that is when we get our best validation score without compicating the model.
# -----
#
# ## Evaluating Model Performance
# In this final section of the project, you will construct a model and make a prediction on the client's feature set using an optimized model from `fit_model`.
# ### Question 7 - Grid Search
# * What is the grid search technique?
# * How it can be applied to optimize a learning algorithm?
#
# ** Hint: ** When explaining the Grid Search technique, be sure to touch upon why it is used, what the 'grid' entails and what the end goal of this method is. To solidify your answer, you can also give an example of a parameter in a model that can be optimized using this approach.
# **Answer: ** The grid search technique (GridSearchCV) performs exhaustive search over specified parameter values for an estimator to get the best cross-validation score for a model (from scikit-learn.org). An example might be using GridSearchCV to find the optimal parameter values for an estimator like max_depth, which we do below.
# ### Question 8 - Cross-Validation
#
# * What is the k-fold cross-validation training technique?
#
# * What benefit does this technique provide for grid search when optimizing a model?
#
# **Hint:** When explaining the k-fold cross validation technique, be sure to touch upon what 'k' is, how the dataset is split into different parts for training and testing and the number of times it is run based on the 'k' value.
#
# When thinking about how k-fold cross validation helps grid search, think about the main drawbacks of grid search which are hinged upon **using a particular subset of data for training or testing** and how k-fold cv could help alleviate that. You can refer to the [docs](http://scikit-learn.org/stable/modules/cross_validation.html#cross-validation) for your answer.
# **Answer: (with help from Introduction to Machine Learning with Python) K-fold splits the training set into k subsets called folds, of equal sizes (example: k=5). A sequence of models is trained, beginning with the first fold as the test set and folds 2-5 as training set and then accuracy evaluated on fold 1. Then another model is built with fold 2 as the test set and folds 1 and 3-5 as the training set. This method is repeated on folds 3,4, and 5 as test sets. At the end, there are 5 accuracy values. Then we take the average of all of the accuracy scores. K-fold creates less variance, whereas gridsearch may be biased by a bad selection of data.
# ### Implementation: Fitting a Model
# Your final implementation requires that you bring everything together and train a model using the **decision tree algorithm**. To ensure that you are producing an optimized model, you will train the model using the grid search technique to optimize the `'max_depth'` parameter for the decision tree. The `'max_depth'` parameter can be thought of as how many questions the decision tree algorithm is allowed to ask about the data before making a prediction. Decision trees are part of a class of algorithms called *supervised learning algorithms*.
#
# In addition, you will find your implementation is using `ShuffleSplit()` for an alternative form of cross-validation (see the `'cv_sets'` variable). While it is not the K-Fold cross-validation technique you describe in **Question 8**, this type of cross-validation technique is just as useful!. The `ShuffleSplit()` implementation below will create 10 (`'n_splits'`) shuffled sets, and for each shuffle, 20% (`'test_size'`) of the data will be used as the *validation set*. While you're working on your implementation, think about the contrasts and similarities it has to the K-fold cross-validation technique.
#
# Please note that ShuffleSplit has different parameters in scikit-learn versions 0.17 and 0.18.
# For the `fit_model` function in the code cell below, you will need to implement the following:
# - Use [`DecisionTreeRegressor`](http://scikit-learn.org/stable/modules/generated/sklearn.tree.DecisionTreeRegressor.html) from `sklearn.tree` to create a decision tree regressor object.
# - Assign this object to the `'regressor'` variable.
# - Create a dictionary for `'max_depth'` with the values from 1 to 10, and assign this to the `'params'` variable.
# - Use [`make_scorer`](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.make_scorer.html) from `sklearn.metrics` to create a scoring function object.
# - Pass the `performance_metric` function as a parameter to the object.
# - Assign this scoring function to the `'scoring_fnc'` variable.
# - Use [`GridSearchCV`](http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html) from `sklearn.grid_search` to create a grid search object.
# - Pass the variables `'regressor'`, `'params'`, `'scoring_fnc'`, and `'cv_sets'` as parameters to the object.
# - Assign the `GridSearchCV` object to the `'grid'` variable.
# +
# TODO: Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'
from sklearn.metrics import make_scorer
from sklearn.tree import DecisionTreeRegressor
from sklearn.grid_search import GridSearchCV
def fit_model(X, y):
""" Performs grid search over the 'max_depth' parameter for a
decision tree regressor trained on the input data [X, y]. """
# Create cross-validation sets from the training data
# sklearn version 0.18: ShuffleSplit(n_splits=10, test_size=0.1, train_size=None, random_state=None)
# sklearn versiin 0.17: ShuffleSplit(n, n_iter=10, test_size=0.1, train_size=None, random_state=None)
cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)
# TODO: Create a decision tree regressor object
regressor = DecisionTreeRegressor(random_state=0)
# TODO: Create a dictionary for the parameter 'max_depth' with a range from 1 to 10
params = {'max_depth': range(1,11)}
# TODO: Transform 'performance_metric' into a scoring function using 'make_scorer'
scoring_fnc = make_scorer(performance_metric)
# TODO: Create the grid search cv object --> GridSearchCV()
# Make sure to include the right parameters in the object:
# (estimator, param_grid, scoring, cv) which have values 'regressor', 'params', 'scoring_fnc', and 'cv_sets' respectively.
grid = GridSearchCV(regressor, params, scoring_fnc, cv=cv_sets)
# Fit the grid search object to the data to compute the optimal model
grid = grid.fit(X, y)
# Return the optimal model after fitting the data
return grid.best_estimator_
# -
# ### Making Predictions
# Once a model has been trained on a given set of data, it can now be used to make predictions on new sets of input data. In the case of a *decision tree regressor*, the model has learned *what the best questions to ask about the input data are*, and can respond with a prediction for the **target variable**. You can use these predictions to gain information about data where the value of the target variable is unknown — such as data the model was not trained on.
# ### Question 9 - Optimal Model
#
# * What maximum depth does the optimal model have? How does this result compare to your guess in **Question 6**?
#
# Run the code block below to fit the decision tree regressor to the training data and produce an optimal model.
# +
# Fit the training data to the model using grid search
reg = fit_model(X_train, y_train)
# Produce the value for 'max_depth'
print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth'])
# -
# ** Hint: ** The answer comes from the output of the code snipped above.
#
# **Answer: ** The optimal model has a max_depth of 4. This is consistent with my guess in Question 6 and the complexity curve graph.
# ### Question 10 - Predicting Selling Prices
# Imagine that you were a real estate agent in the Boston area looking to use this model to help price homes owned by your clients that they wish to sell. You have collected the following information from three of your clients:
#
# | Feature | Client 1 | Client 2 | Client 3 |
# | :---: | :---: | :---: | :---: |
# | Total number of rooms in home | 5 rooms | 4 rooms | 8 rooms |
# | Neighborhood poverty level (as %) | 17% | 32% | 3% |
# | Student-teacher ratio of nearby schools | 15-to-1 | 22-to-1 | 12-to-1 |
#
# * What price would you recommend each client sell his/her home at?
# * Do these prices seem reasonable given the values for the respective features?
#
# **Hint:** Use the statistics you calculated in the **Data Exploration** section to help justify your response. Of the three clients, client 3 has has the biggest house, in the best public school neighborhood with the lowest poverty level; while client 2 has the smallest house, in a neighborhood with a relatively high poverty rate and not the best public schools.
#
# Run the code block below to have your optimized model make predictions for each client's home.
# +
# Produce a matrix for client data
client_data = [[5, 17, 15], # Client 1
[4, 32, 22], # Client 2
[8, 3, 12]] # Client 3
# Show predictions
for i, price in enumerate(reg.predict(client_data)):
print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price)
# -
# **Answer: ** I would recommend that Client 1 sells for $401,333.33, Client 2 for $240,947.37, and Client 3 for 893,700.00. These prices seem reasonable given the values of their features. Client 3 has the biggest house (highest RM), lowest ratio of student to teachers (PTRATIO), and lowest percentage of lower class workers (LSTAT) - relative to the others- so it should be listed with the highest listing price. Client 2 has the worst house, as in the smallest (lowest RM), highest ratio of student to teachers (PTRATIO), and highest percentage of lower class workers (LSTAT) - relative to the others - so it should be listed with the lowest listing price. Client 1 has a house of quality between Client 2 and 3, so their listing price is in between the others.
# ### Sensitivity
# An optimal model is not necessarily a robust model. Sometimes, a model is either too complex or too simple to sufficiently generalize to new data. Sometimes, a model could use a learning algorithm that is not appropriate for the structure of the data given. Other times, the data itself could be too noisy or contain too few samples to allow a model to adequately capture the target variable — i.e., the model is underfitted.
#
# **Run the code cell below to run the `fit_model` function ten times with different training and testing sets to see how the prediction for a specific client changes with respect to the data it's trained on.**
vs.PredictTrials(features, prices, fit_model, client_data)
# ### Question 11 - Applicability
#
# * In a few sentences, discuss whether the constructed model should or should not be used in a real-world setting.
#
# **Hint:** Take a look at the range in prices as calculated in the code snippet above. Some questions to answering:
# - How relevant today is data that was collected from 1978? How important is inflation?
# - Are the features present in the data sufficient to describe a home? Do you think factors like quality of apppliances in the home, square feet of the plot area, presence of pool or not etc should factor in?
# - Is the model robust enough to make consistent predictions?
# - Would data collected in an urban city like Boston be applicable in a rural city?
# - Is it fair to judge the price of an individual home based on the characteristics of the entire neighborhood?
# **Answer: **Although this model did well when testing data from the three different clients in ranking based on qualities, the data used to produce the model is not current and this model is not accurate for a modern day setting. The prices of houses are now much more expensive, so inflation needs to be taken into account. Additionally, other features of the house should be taken into account, such as the ones listed above and also things like views, accessability, city/surburban, location, etc. The model is not robust enough to make consistent predictions because it needs updated data and it needs to take into account more features.
# > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to
# **File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
|
boston_housing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ryancliffew44/DAP-skill-based-challenge/blob/main/dapchallenge.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="eyKxYvz9f4xI" outputId="c7c4fefe-9ad8-4eb8-8407-9328d7f21821"
import pandas as pd
import json
import urllib.request
#url = 'https://raw.githubusercontent.com/onaio/ona-tech/master/data/water_points.json'
def calculate(url):
with urllib.request.urlopen(url) as link:
dataset = pd.DataFrame(json.loads(link.read().decode()))
return {
"number_functional": (dataset['water_point_condition']=='functioning').sum(),
"number_water_points":dataset.groupby('communities_villages').size().to_dict(),
"community_ranking":dataset.groupby('communities_villages')['water_point_condition'].value_counts(normalize=True).unstack()['broken'].fillna(0).sort_values(ascending=False).to_dict()
}
if __name__ == "__main__":
url = input('Enter the URL to the dataset and press Enter Key:')
print(calculate(url))
|
dapchallenge.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="bEH-CRbeA6NU"
# # Better Retrieval via "Dense Passage Retrieval"
#
# EXECUTABLE VERSION: [colab](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial6_Better_Retrieval_via_DPR.ipynb)
#
# ### Importance of Retrievers
#
# The Retriever has a huge impact on the performance of our overall search pipeline.
#
#
# ### Different types of Retrievers
# #### Sparse
# Family of algorithms based on counting the occurrences of words (bag-of-words) resulting in very sparse vectors with length = vocab size.
#
# **Examples**: BM25, TF-IDF
#
# **Pros**: Simple, fast, well explainable
#
# **Cons**: Relies on exact keyword matches between query and text
#
#
# #### Dense
# These retrievers use neural network models to create "dense" embedding vectors. Within this family there are two different approaches:
#
# a) Single encoder: Use a **single model** to embed both query and passage.
# b) Dual-encoder: Use **two models**, one to embed the query and one to embed the passage
#
# Recent work suggests that dual encoders work better, likely because they can deal better with the different nature of query and passage (length, style, syntax ...).
#
# **Examples**: REALM, DPR, Sentence-Transformers
#
# **Pros**: Captures semantinc similarity instead of "word matches" (e.g. synonyms, related topics ...)
#
# **Cons**: Computationally more heavy, initial training of model
#
#
# ### "Dense Passage Retrieval"
#
# In this Tutorial, we want to highlight one "Dense Dual-Encoder" called Dense Passage Retriever.
# It was introdoced by Karpukhin et al. (2020, https://arxiv.org/abs/2004.04906.
#
# Original Abstract:
#
# _"Open-domain question answering relies on efficient passage retrieval to select candidate contexts, where traditional sparse vector space models, such as TF-IDF or BM25, are the de facto method. In this work, we show that retrieval can be practically implemented using dense representations alone, where embeddings are learned from a small number of questions and passages by a simple dual-encoder framework. When evaluated on a wide range of open-domain QA datasets, our dense retriever outperforms a strong Lucene-BM25 system largely by 9%-19% absolute in terms of top-20 passage retrieval accuracy, and helps our end-to-end QA system establish new state-of-the-art on multiple open-domain QA benchmarks."_
#
# Paper: https://arxiv.org/abs/2004.04906
# Original Code: https://fburl.com/qa-dpr
#
#
# *Use this* [link](https://colab.research.google.com/github/deepset-ai/haystack/blob/master/tutorials/Tutorial6_Better_Retrieval_via_DPR.ipynb) *to open the notebook in Google Colab.*
#
# + [markdown] colab_type="text" id="3K27Y5FbA6NV"
# ### Prepare environment
#
# #### Colab: Enable the GPU runtime
# Make sure you enable the GPU runtime to experience decent speed in this tutorial.
# **Runtime -> Change Runtime type -> Hardware accelerator -> GPU**
#
# <img src="https://raw.githubusercontent.com/deepset-ai/haystack/master/docs/_src/img/colab_gpu_runtime.jpg">
# + colab={"base_uri": "https://localhost:8080/", "height": 357} colab_type="code" id="JlZgP8q1A6NW" outputId="c893ac99-b7a0-4d49-a8eb-1a9951d364d9"
# Make sure you have a GPU running
# !nvidia-smi
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="NM36kbRFA6Nc" outputId="af1a9d85-9557-4d68-ea87-a01f00c584f9"
# Install the latest release of Haystack in your own environment
# #! pip install farm-haystack
# Install the latest master of Haystack
# !pip install git+https://github.com/deepset-ai/haystack.git
# !pip install urllib3==1.25.4
# !pip install torch==1.6.0+cu101 torchvision==0.6.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html
# + colab={} colab_type="code" id="xmRuhTQ7A6Nh"
from haystack import Finder
from haystack.preprocessor.cleaning import clean_wiki_text
from haystack.preprocessor.utils import convert_files_to_dicts, fetch_archive_from_http
from haystack.reader.farm import FARMReader
from haystack.reader.transformers import TransformersReader
from haystack.utils import print_answers
# + [markdown] colab_type="text" id="q3dSo7ZtA6Nl"
# ### Document Store
#
# FAISS is a library for efficient similarity search on a cluster of dense vectors.
# The `FAISSDocumentStore` uses a SQL(SQLite in-memory be default) database under-the-hood
# to store the document text and other meta data. The vector embeddings of the text are
# indexed on a FAISS Index that later is queried for searching answers.
# The default flavour of FAISSDocumentStore is "Flat" but can also be set to "HNSW" for
# faster search at the expense of some accuracy. Just set the faiss_index_factor_str argument in the constructor.
# For more info on which suits your use case: https://github.com/facebookresearch/faiss/wiki/Guidelines-to-choose-an-index
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="1cYgDJmrA6Nv" outputId="a8aa6da1-9acf-43b1-fa3c-200123e9bdce" pycharm={"name": "#%%\n"}
from haystack.document_store.faiss import FAISSDocumentStore
document_store = FAISSDocumentStore(faiss_index_factory_str="Flat")
# + [markdown] colab_type="text" id="06LatTJBA6N0" pycharm={"name": "#%% md\n"}
# ### Cleaning & indexing documents
#
# Similarly to the previous tutorials, we download, convert and index some Game of Thrones articles to our DocumentStore
# + colab={"base_uri": "https://localhost:8080/", "height": 156} colab_type="code" id="iqKnu6wxA6N1" outputId="bb5dcc7b-b65f-49ed-db0b-842981af213b" pycharm={"name": "#%%\n"}
# Let's first get some files that we want to use
doc_dir = "data/article_txt_got"
s3_url = "https://s3.eu-central-1.amazonaws.com/deepset.ai-farm-qa/datasets/documents/wiki_gameofthrones_txt.zip"
fetch_archive_from_http(url=s3_url, output_dir=doc_dir)
# Convert files to dicts
dicts = convert_files_to_dicts(dir_path=doc_dir, clean_func=clean_wiki_text, split_paragraphs=True)
# Now, let's write the dicts containing documents to our DB.
document_store.write_documents(dicts)
# + [markdown] colab_type="text" id="wgjedxx_A6N6"
# ### Initalize Retriever, Reader, & Finder
#
# #### Retriever
#
# **Here:** We use a `DensePassageRetriever`
#
# **Alternatives:**
#
# - The `ElasticsearchRetriever`with custom queries (e.g. boosting) and filters
# - Use `EmbeddingRetriever` to find candidate documents based on the similarity of embeddings (e.g. created via Sentence-BERT)
# - Use `TfidfRetriever` in combination with a SQL or InMemory Document store for simple prototyping and debugging
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["20affb86c4574e3a9829136fdfe40470", "<KEY>", "84311c037c6e44b5b621237f59f027a0", "<KEY>", "ad2ce6a8b4f844ac93b425f1261c131f", "bb45d5e4c9944fcd87b408e2fbfea440", "248d02e01dea4a63a3296e28e4537eaf", "74a9c43eb61a43aa973194b0b70e18f5", "58fc3339f13644aea1d4c6d8e1d43a65", "460bef2bfa7d4aa480639095555577ac", "8553a48fb3144739b99fa04adf8b407c", "babe35bb292f4010b64104b2b5bc92af", "<KEY>", "b4b950d899df4e3fbed9255b281e988a", "<KEY>", "<KEY>", "<KEY>", "5b8d5975d2674e7e9ada64e77c463c0a", "4afa2be1c2c5483f932a42ea4a7897af", "0e7186eeb5fa47d89c8c111ebe43c5af", "fa946133dfcc4a6ebc6fef2ef9dd92f7", "518b6a993e42490297289f2328d0270a", "cea074a636d34a75b311569fc3f0b3ab", "2630fd2fa91d498796af6d7d8d73aba4"]} colab_type="code" id="kFwiPP60A6N7" outputId="07249856-3222-4898-9246-68e9ecbf5a1b" pycharm={"is_executing": true}
from haystack.retriever.dense import DensePassageRetriever
retriever = DensePassageRetriever(document_store=document_store,
query_embedding_model="facebook/dpr-question_encoder-single-nq-base",
passage_embedding_model="facebook/dpr-ctx_encoder-single-nq-base",
max_seq_len_query=64,
max_seq_len_passage=256,
batch_size=16,
use_gpu=True,
embed_title=True,
use_fast_tokenizers=True)
# Important:
# Now that after we have the DPR initialized, we need to call update_embeddings() to iterate over all
# previously indexed documents and update their embedding representation.
# While this can be a time consuming operation (depending on corpus size), it only needs to be done once.
# At query time, we only need to embed the query and compare it the existing doc embeddings which is very fast.
document_store.update_embeddings(retriever)
# + [markdown] colab_type="text" id="rnVR28OXA6OA"
# #### Reader
#
# Similar to previous Tutorials we now initalize our reader.
#
# Here we use a FARMReader with the *deepset/roberta-base-squad2* model (see: https://huggingface.co/deepset/roberta-base-squad2)
#
#
#
# ##### FARMReader
# + colab={"base_uri": "https://localhost:8080/", "height": 739, "referenced_widgets": ["3d273d2d3b25435ba4eb4ffd8e812b6f", "5104b7cddf6d4d0f92d3dd142b9f4c42", "e0510255a31d448497af3ca0f4915cb4", "670270fd06274932adad4d42c8a1912e", "6ca292cd3f46417ea296684e48863af9", "75578e0466cd4b84ba7dfee1028ae4cd", "cbe09b984b804402b1fe82739cbc375c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "5efa895c53284b72adec629a6fc59fa9", "<KEY>", "243600e420f449089c1b5ed0d2715339", "466222c8b2e1403ca69c8130423f0a8b", "a458be4cc49240e4b9bc1c95c05551e8", "d9ee08fa621d4b558bd1a415e3ee6f62", "1b905c5551b940ed9bc5320e1e5a9213", "64fc7775a84e425c8082a545f7c2a0c1", "<KEY>", "<KEY>", "<KEY>", "f9289caeac404087ad4973a646e3a117", "<KEY>", "98781635b86244aca5d22be4280c32de", "e148b28d946549a9b5eb09294ebe124e", "<KEY>", "bbef597f804e4ca580aee665399a3bc1", "<KEY>", "e3724385769d443cb4ea39b92e0b2abd", "<KEY>", "<KEY>", "<KEY>", "885390f24e08495db6a1febd661531e0", "c2a614f48e974fb8b13a3c5d7cafaed6", "ada8fa1c88954ef8b839f29090de9e79", "<KEY>", "<KEY>", "<KEY>", "09a647660cf94131a1c140d06eb293ab", "3e482e9ef4d34d93b4ba4f7f07b0e44f", "<KEY>", "aa4becf2e33d4f1e9fdac70236d48f6e", "78d087ed952e429b97eb3d8fcdc7c8ec", "<KEY>", "<KEY>", "<KEY>", "c8f1f7e8462d4d14a507816f67953eae"]} colab_type="code" id="fyIuWVwhA6OB" outputId="33113253-8b95-4604-f9e5-1aa28ee66a91"
# Load a local model or any of the QA models on
# Hugging Face's model hub (https://huggingface.co/models)
reader = FARMReader(model_name_or_path="deepset/roberta-base-squad2", use_gpu=True)
# + [markdown] colab_type="text" id="unhLD18yA6OF"
# #### Finder
#
# The Finder sticks together reader and retriever in a pipeline to answer our actual questions.
# + colab={} colab_type="code" id="TssPQyzWA6OG"
finder = Finder(reader, retriever)
# + [markdown] colab_type="text" id="bXlBBxKXA6OL"
# ### Voilà! Ask a question!
# + colab={"base_uri": "https://localhost:8080/", "height": 275} colab_type="code" id="Zi97Hif2A6OM" outputId="5eb9363d-ba92-45d5-c4d0-63ada3073f02"
# You can configure how many candidates the reader and retriever shall return
# The higher top_k_retriever, the better (but also the slower) your answers.
prediction = finder.get_answers(question="Who created the Dothraki vocabulary?", top_k_retriever=10, top_k_reader=5)
#prediction = finder.get_answers(question="Who is the father of <NAME>?", top_k_retriever=10, top_k_reader=5)
#prediction = finder.get_answers(question="Who is the sister of Sansa?", top_k_retriever=10, top_k_reader=5)
# + colab={"base_uri": "https://localhost:8080/", "height": 561} colab_type="code" id="N70FgfkwA6OQ" outputId="9419c75d-181c-4ef6-cea8-b328a503f19a" pycharm={"name": "#%%\n"}
print_answers(prediction, details="minimal")
# -
|
tutorials/Tutorial6_Better_Retrieval_via_DPR.ipynb
|
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// + [markdown] slideshow={"slide_type": "slide"}
// ### Google Colab Integration
//
// Die folgende Zelle können Sie überspringen, wenn Sie mit einer lokalen Installation arbeiten. Wenn Sie das Notebook auf Google-Colab ausführen, dann müssen Sie als erstes diese Zelle ausführen und danach die Seite neu laden (F5).
// + slideshow={"slide_type": "fragment"}
!echo "Update environment..."
!apt update -q &> /dev/null
!echo "Install Java..."
!apt-get install -q openjdk-11-jdk-headless &> /dev/null
!echo "Install Jupyter java kernel..."
!curl -L https://github.com/SpencerPark/IJava/releases/download/v1.3.0/ijava-1.3.0.zip -o ijava-kernel.zip &> /dev/null
!unzip -q ijava-kernel.zip -d ijava-kernel && cd ijava-kernel && python3 install.py --sys-prefix &> /dev/null
!echo "Downloading turtle jar ..."
!curl -L https://github.com/Andreas-Forster/gyminf-programmieren/raw/master/notebooks/jturtle-0.6.jar -o jturtle-0.6.jar &> /dev/null
!echo "Done."
// + [markdown] slideshow={"slide_type": "slide"}
// # Einfache Programme
//
// #### <NAME>, Departement Mathematik und Informatik, Universität Basel
// + [markdown] slideshow={"slide_type": "-"}
// ## Grundsymbole
//
// + [markdown] slideshow={"slide_type": "slide"}
// ### Ein typisches Java Programm
//
// <pre class="stretch highlight java"><code data-trim>
//
// /* Berechnet die Kubikwurzel einer Zahl mittels dem Newton Verfahren */
// public class CubicRoot {
//
// public static void main(String[] args) {
// double a = Double.parseDouble(args[0]);
//
// double xn = 1.0;
// double xnPlus1 = 1.0;
//
// do {
// xn = xnPlus1;
// xnPlus1 = 1.0 / 3.0 * ( 2.0 * xn + (a / (xn * xn)));
// } while (Math.abs(xn - xnPlus1) > 1e-8);
//
//
// System.out.println("Resultat: " +xnPlus1);
// }
// }
// </code></pre>
// + [markdown] slideshow={"slide_type": "slide"}
//
// ### Namen
//
// * bezeichnen Variablen, Typen, ... in einem Programm
// * bestehen aus Buchstaben, Ziffern und "_"
// * beginnen mit Buchstaben ( oder "_" )
// * Gross und Kleinschreibung wird unterschieden
// ```java
// System.out.println("hello world");
// ```
// + [markdown] slideshow={"slide_type": "fragment"}
// #### Beispiele
// ```java
// a
// x
// xnPlus1
// args
// CubicRoot
// main
// ```
// + [markdown] slideshow={"slide_type": "slide"}
// #### Schlüsselwörter
//
// 
//
// + [markdown] slideshow={"slide_type": "fragment"}
// > Schlüsselwörter dürfen nicht als Namen verwendet werden
// + [markdown] slideshow={"slide_type": "slide"}
// ### Zahlen
//
// * Ganze Zahlen
// * dezimal, keine spezielle Schreibweise
// * (binär, mit Präfix `0b`)
// * (hexadezimal, mit Präfix `0x`, vorallem für Farben verwendet)
// * Gleitkommazahlen
// + [markdown] slideshow={"slide_type": "fragment"}
//
// #### Beispiele
//
// ```
// 376
// 0x9F
// 0b1101
// 3.14
// ```
// + [markdown] slideshow={"slide_type": "slide"}
// ### Zeichenketten
//
// * Beliebige Zeichen zwischen Hochkommas (" ")
// * Falls man " in Zeichenketten verwenden will, muss man ein Backslash ```\``` als Escape-Sequenz voranstellen
// + [markdown] slideshow={"slide_type": "fragment"}
// #### Beispiele
//
// ```java
// "a simple string"
// "sie sagt \" Hallo \" "
// ```
// + [markdown] slideshow={"slide_type": "slide"}
// ### Datentypen
//
// Datentypen definieren:
//
// 1. Menge von Werten, die zu diesem Typ gehören
// 2. Menge von Operationen, die auf diesem Typ ausgeführt werden können
// + [markdown] slideshow={"slide_type": "fragment"}
// #### Ganzzahltypen:
//
// | Name | Grösse | Wertebereich <img width=100/> | |
// |------|--------|-------------------------------|--|
// | byte | 8 Bit | $-2^7 \ldots, 2^7-1 $ | $(-128, \ldots, 127 )$ |
// | short | 16 Bit | $-2^{15} \ldots, 2^{15} - 1$ | $(-32768, \ldots, 32767)$ |
// | int | 32 Bit | $-2^{31} \ldots, 2^{31}-1$ | $(-2141483648, \ldots, 2147483647)$|
// | long | 64 Bit | $-2^{63} \ldots, 2^{63}-1$ | |
// + [markdown] slideshow={"slide_type": "slide"}
// ### Variablendeklaration
// + [markdown] slideshow={"slide_type": "-"}
// Jede Variable muss vor ihrer Verwendung deklariert werden
//
// * macht Namen und Typ der Variablen bekannt
// * Speicherplatz wird reserviert
// + [markdown] slideshow={"slide_type": "fragment"}
// #### Beispiele
//
// ```java
// int x;
// short a;
// ```
// + [markdown] slideshow={"slide_type": "slide"}
// ### Variableninitialisierung
//
// * Variable kann bei der Erzeugung direkt initialisiert werden.
// + [markdown] slideshow={"slide_type": "fragment"}
// #### Beispiele
//
// ```java
// int x = 100;
// short a = 1;
// ```
// + [markdown] slideshow={"slide_type": "slide"}
// ### Namenswahl für Variablen
//
// Einige Tipps:
//
// * lesbar
// * z.B. sum, phoneNumber, zipCode
// * Hilfsvariablen können kurz gehalten werden
// * z.B. i, j, k
// * Variablen, die man im ganzen Programm braucht, eher länger
// * z.B. inputTest, streetNumber, expirationDate
// * Worttrennung durch Grossbuchstaben (camelCase)
// * z.B. inputTest
//
// > Variablennamen sollten Konzept klar machen
// + [markdown] slideshow={"slide_type": "slide"}
// ### Kommentare
// + [markdown] slideshow={"slide_type": "-"}
// Java unterscheidet zwei Arten von Kommentaren:
//
// 1. Zeilenendekommentare ```//```
// * kommentiert ab aktueller Position bis Zeilenende
//
// 2. Klammerkommentare ```/* ... */```
// * kommentiert allen Code zwischen ```/*``` und ```*/```
// * ***dürfen nicht geschachtelt werden***
//
// > Alles ausserhalb von Kommentaren muss gültiger Java Code sein.
//
// + [markdown] slideshow={"slide_type": "slide"}
// ### Kommentare: Beispiele
// -
// Folgendes kompiliert nicht:
Dies gibt beim Kompilieren einen Fehler
// Folgendes kompiliert:
/* Dies gibt beim Kompilieren keinen Fehler */
// +
// Und dies auch nicht
// + [markdown] slideshow={"slide_type": "slide"}
// #### Mini Übung
//
// * Die folgenden Codestücke sollen kommentiert werden, so dass nur noch "Hello" ausgegeben wird.
// * Welche Art von Kommentar (```/* */``` oder ```//```) ist dafür besser geeignet?
// * Weshalb ist beim letzten Codestück die Kommentarsetzung nicht ideal?
//
// -
System.out.println("Hello world");
String s = "Hello";
s = s + " world"; // Mit + werden zwei Strings aneinander gehängt
System.out.println(s);
// +
String s = "Hello ";
s = s + "w"; /* Erster Kommentar */
s = s + "o"; /* Zweiter Kommentar */
s = s + "r"; /* Dritter Kommentar */
s = s + "l"; /* Vierter Kommentar */
s = s + "d"; /* Fünfter Kommentar */
System.out.println(s);
// + [markdown] slideshow={"slide_type": "slide"}
// ### Kommentare (Anmerkung)
//
// Sinnvolles kommentieren!
//
// * Kommentieren, was Erklärung bedarf nicht was schon im Programm steht
// * Kommentare und Code sollen auf unterschiedlicher "Ebenen" Programm erklären
// * Kommentare haben keinen Einfluss auf das Programm, aber auf die Programmierenden
//
//
// ```java
// // Unsinniger Kommentar
// int sum = 0; // weist der Variable sum den Wert 0 zu
//
// // Sinnvoller Kommentar
// short sum = 0 // Summe kann nie grösser 20 werden. Short reicht deshalb aus
// ```
// + [markdown] slideshow={"slide_type": "slide"}
// ## Zuweisungen und arithmetische Funktionen
// + [markdown] slideshow={"slide_type": "slide"}
// ### Zuweisungen (assignment)
//
// ```java
// x = x + 1
// ```
//
// 1. Berechne Ausdruck auf rechter Seite
// 2. Speichere den Wert in der Variablen (linke Seite)
// + [markdown] slideshow={"slide_type": "fragment"}
// #### Zuweisungskompatibilität
//
// linke und rechte Seite müssen
// * dieselben Typen haben, oder
// * Typ links $\supseteq$ Typ rechts
//
// $\text{long} \supseteq \text{int} \supseteq \text{short} \supseteq \text{byte}$
// + slideshow={"slide_type": "fragment"}
int i = 5;
long l = i;
// + [markdown] slideshow={"slide_type": "slide"}
// ### Statische Typenprüfung
//
// Compiler prüft
//
// * dass Variablen nur erlaubte Werte enthalten
// * dass auf Werte nur erlaubte Operationen ausgeführt werden
//
// #### Beispiel
//
// Folgendes funktioniert nicht:
// -
int i = 1;
long l = i;
int i2 = l;
"abc" / 7
// + [markdown] slideshow={"slide_type": "slide"}
// ### Arithmetische Ausdrücke
//
// Beispiel:
// ```java
// 5 * 3 / -7 % (2 + 1);
// ```
//
// Binäre Operatoren:
//
// ``+`` Addition <br/>
// ``-`` Subtraktion <br/>
// ``*`` Multiplikation <br/>
// ``/`` Division <br/>
// ``%`` Modulo (Divisionsrest)
//
// Unäre Operatoren:
//
// `` + `` Identität ($+x == x$) <br/>
// `` - `` Vorzeichenumkehrung
// + [markdown] slideshow={"slide_type": "slide"}
// ### Miniübung
//
// Experimentieren Sie mit arithmetischen Ausdrücken
//
// * Schreiben Sie einige einfache und zusammengesetzte arithmetische Ausdrücke
// * Arbeiten Sie auch mit Variablen
// * Gilt die Punkt vor Strich Regel?
// * Was bindet stärker, ``+`` oder unäres Minus?
// * Was passiert, wenn eine berechnete Zahl grösser ist als die Resultatvariable?
// + slideshow={"slide_type": "slide"}
// + [markdown] slideshow={"slide_type": "slide"}
// ### Typumwandlung
//
// Wenn Resultattyp zu klein ist, braucht es explizite Typumwandlung (type cast)
//
// >
// > ( `TYP` ) `AUSDRUCK`
// >
//
// * Dies wandelt den Resultatwert vom Ausdruck in den angegebenen Typ um.
// * Dabei kann Präzision verloren gehen.
//
// #### Beispiel
// -
byte b = (byte) 10;
b
// + [markdown] slideshow={"slide_type": "slide"}
// ### Zuweisungsoperatoren
//
// Arithmetische Operationen lassen sich mit Zuweisungen kombinieren
//
//
// | | Kurzform | Langform |
// |----|----------| ---------|
// |``+=`` | x += y | x = x + y |
// |``-=`` | x -= y | x = x - y |
// |``*=`` | x *= y | x = x * y |
// |``/=`` | x /= y | x = x / y |
// |``++`` | x++ | x = x + 1 |
// |``--`` | x-- | x = x -1 |
//
//
// Spart Schreibarbeit, sollte aber nur spärlich verwendet werden.
//
// + [markdown] slideshow={"slide_type": "slide"}
// ### Konstanten
//
// Konstanten sind initialisierte "Variablen", deren Wert man nicht mehr ändern kann
//
// Definition in Java
//
// ```java
// static final int MAX_VALUE = 100;
// ```
//
// Zweck
//
// * bessere Lesbarkeit
// * bessere Wartbarkeit (muss nur an 1 Stelle geändert werden)
//
// + [markdown] slideshow={"slide_type": "slide"}
// ### Beispiel:
//
// Wie kann man folgendes Programm verbessern?
// +
public class CubicRoot {
// Führen Sie hier Konstantendefinitionen ein
public static void computeRoot(double a) {
double xn = 1.0;
double xnPlus1 = 1.0;
do {
xn = xnPlus1;
xnPlus1 = 1.0 / 3.0 * ( 2.0 * xn + (a / (xn * xn)));
} while (Math.abs(xn - xnPlus1) > 1e-8);
System.out.println(xnPlus1 + "±" + 1e-8);
}
}
|
notebooks/EinfacheProgramme.ipynb
|
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
#skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
#export
from fastai.data.all import *
from fastai.text.core import *
from fastai.text.models.awdlstm import dropout_mask
#hide
from nbdev.showdoc import *
# +
# all_cpp
# +
#default_exp text.models.qrnn
#default_cls_lvl 3
# -
# # QRNN
#
# > Quasi-recurrent neural networks introduced in [Bradbury et al.](https://arxiv.org/abs/1611.01576)
# ## ForgetMult
#export
from torch.utils import cpp_extension
from torch.autograd import Function
__file__ = Path.cwd().parent/'fastai'/'text'/'models'/'qrnn.py'
#export
def load_cpp(name, files, path):
os.makedirs(Config().model/'qrnn', exist_ok=True)
return cpp_extension.load(name=name, sources=[path/f for f in files], build_directory=Config().model/'qrnn')
#export
class _LazyBuiltModule():
"A module with a CPP extension that builds itself at first use"
def __init__(self, name, files): self.name,self.files,self.mod = name,files,None
def _build(self):
self.mod = load_cpp(name=self.name, files=self.files, path=Path(__file__).parent)
def forward(self, *args, **kwargs):
if self.mod is None: self._build()
return self.mod.forward(*args, **kwargs)
def backward(self, *args, **kwargs):
if self.mod is None: self._build()
return self.mod.backward(*args, **kwargs)
#export
forget_mult_cuda = _LazyBuiltModule('forget_mult_cuda', ['forget_mult_cuda.cpp', 'forget_mult_cuda_kernel.cu'])
bwd_forget_mult_cuda = _LazyBuiltModule('bwd_forget_mult_cuda', ['bwd_forget_mult_cuda.cpp', 'bwd_forget_mult_cuda_kernel.cu'])
#export
def dispatch_cuda(cuda_class, cpu_func, x):
"Depending on `x.device` uses `cpu_func` or `cuda_class.apply`"
return cuda_class.apply if x.device.type == 'cuda' else cpu_func
# The ForgetMult gate is the quasi-recurrent part of the network, computing the following from `x` and `f`.
# ``` python
# h[i+1] = x[i] * f[i] + h[i] + (1-f[i])
# ```
# The initial value for `h[0]` is either a tensor of zeros or the previous hidden state.
#export
def forget_mult_CPU(x, f, first_h=None, batch_first=True, backward=False):
"ForgetMult gate applied to `x` and `f` on the CPU."
result = []
dim = (1 if batch_first else 0)
forgets = f.split(1, dim=dim)
inputs = x.split(1, dim=dim)
prev_h = None if first_h is None else first_h.unsqueeze(dim)
idx_range = range(len(inputs)-1,-1,-1) if backward else range(len(inputs))
for i in idx_range:
prev_h = inputs[i] * forgets[i] if prev_h is None else inputs[i] * forgets[i] + (1-forgets[i]) * prev_h
if backward: result.insert(0, prev_h)
else: result.append(prev_h)
return torch.cat(result, dim=dim)
# `first_h` is the tensor used for the value of `h[0]` (defaults to a tensor of zeros). If `batch_first=True`, `x` and `f` are expected to be of shape `batch_size x seq_length x n_hid`, otherwise they are expected to be of shape `seq_length x batch_size x n_hid`. If `backwards=True`, the elements in `x` and `f` on the sequence dimension are read in reverse.
# +
def manual_forget_mult(x, f, h=None, batch_first=True, backward=False):
if batch_first: x,f = x.transpose(0,1),f.transpose(0,1)
out = torch.zeros_like(x)
prev = h if h is not None else torch.zeros_like(out[0])
idx_range = range(x.shape[0]-1,-1,-1) if backward else range(x.shape[0])
for i in idx_range:
out[i] = f[i] * x[i] + (1-f[i]) * prev
prev = out[i]
if batch_first: out = out.transpose(0,1)
return out
x,f = torch.randn(5,3,20).chunk(2, dim=2)
for (bf, bw) in [(True,True), (False,True), (True,False), (False,False)]:
th_out = manual_forget_mult(x, f, batch_first=bf, backward=bw)
out = forget_mult_CPU(x, f, batch_first=bf, backward=bw)
test_close(th_out,out)
h = torch.randn((5 if bf else 3), 10)
th_out = manual_forget_mult(x, f, h=h, batch_first=bf, backward=bw)
out = forget_mult_CPU(x, f, first_h=h, batch_first=bf, backward=bw)
test_close(th_out,out)
# -
x = torch.randn(3,4,5)
x.size() + torch.Size([0,1,0])
#export
class ForgetMultGPU(Function):
"Wrapper around the CUDA kernels for the ForgetMult gate."
@staticmethod
def forward(ctx, x, f, first_h=None, batch_first=True, backward=False):
ind = -1 if backward else 0
(i,j) = (0,1) if batch_first else (1,0)
output = x.new_zeros(x.shape[0]+i, x.shape[1]+j, x.shape[2])
if first_h is not None:
if batch_first: output[:, ind] = first_h
else: output[ind] = first_h
else: output.zero_()
ctx.forget_mult = bwd_forget_mult_cuda if backward else forget_mult_cuda
output = ctx.forget_mult.forward(x, f, output, batch_first)
ctx.save_for_backward(x, f, first_h, output)
ctx.batch_first = batch_first
if backward: return output[:,:-1] if batch_first else output[:-1]
else: return output[:,1:] if batch_first else output[1:]
@staticmethod
def backward(ctx, grad_output):
x, f, first_h, output = ctx.saved_tensors
grad_x, grad_f, grad_h = ctx.forget_mult.backward(x, f, output, grad_output, ctx.batch_first)
return (grad_x, grad_f, (None if first_h is None else grad_h), None, None)
# +
#hide
#cuda
#cpp
def detach_and_clone(t):
return t.detach().clone().requires_grad_(True)
x,f = torch.randn(5,3,20).cuda().chunk(2, dim=2)
x,f = x.contiguous().requires_grad_(True),f.contiguous().requires_grad_(True)
th_x,th_f = detach_and_clone(x),detach_and_clone(f)
for (bf, bw) in [(True,True), (False,True), (True,False), (False,False)]:
th_out = forget_mult_CPU(th_x, th_f, first_h=None, batch_first=bf, backward=bw)
th_loss = th_out.pow(2).mean()
th_loss.backward()
out = ForgetMultGPU.apply(x, f, None, bf, bw)
loss = out.pow(2).mean()
loss.backward()
test_close(th_out,out, eps=1e-4)
test_close(th_x.grad,x.grad, eps=1e-4)
test_close(th_f.grad,f.grad, eps=1e-4)
for p in [x,f, th_x, th_f]:
p = p.detach()
p.grad = None
h = torch.randn((5 if bf else 3), 10).cuda().requires_grad_(True)
th_h = detach_and_clone(h)
th_out = forget_mult_CPU(th_x, th_f, first_h=th_h, batch_first=bf, backward=bw)
th_loss = th_out.pow(2).mean()
th_loss.backward()
out = ForgetMultGPU.apply(x.contiguous(), f.contiguous(), h, bf, bw)
loss = out.pow(2).mean()
loss.backward()
test_close(th_out,out, eps=1e-4)
test_close(th_x.grad,x.grad, eps=1e-4)
test_close(th_f.grad,f.grad, eps=1e-4)
test_close(th_h.grad,h.grad, eps=1e-4)
for p in [x,f, th_x, th_f]:
p = p.detach()
p.grad = None
# -
# ## QRNN
#export
class QRNNLayer(Module):
"Apply a single layer Quasi-Recurrent Neural Network (QRNN) to an input sequence."
def __init__(self, input_size, hidden_size=None, save_prev_x=False, zoneout=0, window=1,
output_gate=True, batch_first=True, backward=False):
assert window in [1, 2], "This QRNN implementation currently only handles convolutional window of size 1 or size 2"
self.save_prev_x,self.zoneout,self.window = save_prev_x,zoneout,window
self.output_gate,self.batch_first,self.backward = output_gate,batch_first,backward
hidden_size = ifnone(hidden_size, input_size)
#One large matmul with concat is faster than N small matmuls and no concat
mult = (3 if output_gate else 2)
self.linear = nn.Linear(window * input_size, mult * hidden_size)
self.prevX = None
def reset(self): self.prevX = None
def forward(self, inp, hid=None):
y = self.linear(self._get_source(inp))
if self.output_gate: z_gate,f_gate,o_gate = y.chunk(3, dim=2)
else: z_gate,f_gate = y.chunk(2, dim=2)
z_gate,f_gate = z_gate.tanh(),f_gate.sigmoid()
if self.zoneout and self.training:
f_gate *= dropout_mask(f_gate, f_gate.size(), self.zoneout).requires_grad_(False)
forget_mult = dispatch_cuda(ForgetMultGPU, partial(forget_mult_CPU), inp)
c_gate = forget_mult(z_gate, f_gate, hid, self.batch_first, self.backward)
output = torch.sigmoid(o_gate) * c_gate if self.output_gate else c_gate
if self.window > 1 and self.save_prev_x:
if self.backward: self.prevX = (inp[:, :1] if self.batch_first else inp[:1]) .detach()
else: self.prevX = (inp[:, -1:] if self.batch_first else inp[-1:]).detach()
idx = 0 if self.backward else -1
return output, (c_gate[:, idx] if self.batch_first else c_gate[idx])
def _get_source(self, inp):
if self.window == 1: return inp
dim = (1 if self.batch_first else 0)
if self.batch_first:
prev = torch.zeros_like(inp[:,:1]) if self.prevX is None else self.prevX
if prev.shape[0] < inp.shape[0]: prev = torch.cat([prev, torch.zeros_like(inp[prev.shape[0]:, :1])], dim=0)
if prev.shape[0] > inp.shape[0]: prev= prev[:inp.shape[0]]
else:
prev = torch.zeros_like(inp[:1]) if self.prevX is None else self.prevX
if prev.shape[1] < inp.shape[1]: prev = torch.cat([prev, torch.zeros_like(inp[:1, prev.shape[0]:])], dim=1)
if prev.shape[1] > inp.shape[1]: prev= prev[:,:inp.shape[1]]
inp_shift = [prev]
if self.backward: inp_shift.insert(0,inp[:,1:] if self.batch_first else inp[1:])
else: inp_shift.append(inp[:,:-1] if self.batch_first else inp[:-1])
inp_shift = torch.cat(inp_shift, dim)
return torch.cat([inp, inp_shift], 2)
qrnn_fwd = QRNNLayer(10, 20, save_prev_x=True, zoneout=0, window=2, output_gate=True)
qrnn_bwd = QRNNLayer(10, 20, save_prev_x=True, zoneout=0, window=2, output_gate=True, backward=True)
qrnn_bwd.load_state_dict(qrnn_fwd.state_dict())
x_fwd = torch.randn(7,5,10)
x_bwd = x_fwd.clone().flip(1)
y_fwd,h_fwd = qrnn_fwd(x_fwd)
y_bwd,h_bwd = qrnn_bwd(x_bwd)
test_close(y_fwd, y_bwd.flip(1), eps=1e-4)
test_close(h_fwd, h_bwd, eps=1e-4)
y_fwd,h_fwd = qrnn_fwd(x_fwd, h_fwd)
y_bwd,h_bwd = qrnn_bwd(x_bwd, h_bwd)
test_close(y_fwd, y_bwd.flip(1), eps=1e-4)
test_close(h_fwd, h_bwd, eps=1e-4)
#export
class QRNN(Module):
"Apply a multiple layer Quasi-Recurrent Neural Network (QRNN) to an input sequence."
def __init__(self, input_size, hidden_size, n_layers=1, batch_first=True, dropout=0,
bidirectional=False, save_prev_x=False, zoneout=0, window=None, output_gate=True):
assert not (save_prev_x and bidirectional), "Can't save the previous X with bidirectional."
kwargs = dict(batch_first=batch_first, zoneout=zoneout, output_gate=output_gate)
self.layers = nn.ModuleList([QRNNLayer(input_size if l == 0 else hidden_size, hidden_size, save_prev_x=save_prev_x,
window=((2 if l ==0 else 1) if window is None else window), **kwargs)
for l in range(n_layers)])
if bidirectional:
self.layers_bwd = nn.ModuleList([QRNNLayer(input_size if l == 0 else hidden_size, hidden_size,
backward=True, window=((2 if l ==0 else 1) if window is None else window),
**kwargs) for l in range(n_layers)])
self.n_layers,self.batch_first,self.dropout,self.bidirectional = n_layers,batch_first,dropout,bidirectional
def reset(self):
"Reset the hidden state."
for layer in self.layers: layer.reset()
if self.bidirectional:
for layer in self.layers_bwd: layer.reset()
def forward(self, inp, hid=None):
new_hid = []
if self.bidirectional: inp_bwd = inp.clone()
for i, layer in enumerate(self.layers):
inp, h = layer(inp, None if hid is None else hid[2*i if self.bidirectional else i])
new_hid.append(h)
if self.bidirectional:
inp_bwd, h_bwd = self.layers_bwd[i](inp_bwd, None if hid is None else hid[2*i+1])
new_hid.append(h_bwd)
if self.dropout != 0 and i < len(self.layers) - 1:
for o in ([inp, inp_bwd] if self.bidirectional else [inp]):
o = F.dropout(o, p=self.dropout, training=self.training, inplace=False)
if self.bidirectional: inp = torch.cat([inp, inp_bwd], dim=2)
return inp, torch.stack(new_hid, 0)
qrnn = QRNN(10, 20, 2, bidirectional=True, batch_first=True, window=2, output_gate=False)
x = torch.randn(7,5,10)
y,h = qrnn(x)
test_eq(y.size(), [7, 5, 40])
test_eq(h.size(), [4, 7, 20])
#Without an out gate, the last timestamp in the forward output is the second to last hidden
#and the first timestamp of the backward output is the last hidden
test_close(y[:,-1,:20], h[2])
test_close(y[:,0,20:], h[3])
# ## Export -
#hide
from nbdev.export import notebook2script
notebook2script()
|
fastai/nbs/36_text.models.qrnn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ipytest
# language: python
# name: ipytest
# ---
# +
import pytest
import ipytest
__file__ = 'TestAsync.ipynb'
ipytest.config(magics=True, rewrite_asserts=True, addopts=["-qq"])
# -
@pytest.mark.asyncio
async def test_foo():
assert True
# NBVAL_RAISES_EXCEPTION
# NOTE: pytest + ipython clash, when running async tests in the main thread
ipytest.run()
# # When running in a separate thread no error is raised
ipytest.config(run_in_thread=True)
ipytest.run()
|
tests/TestAsync.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
from tqdm import tqdm
import os
import random
import argparse
import numpy as np
from PIL import Image
import torch
import torchvision.transforms.functional as F
# + pycharm={"name": "#%%\n"}
a = np.load('/ssd1/DynamicConv/Detectron_Datasets/cityscapes/gtFine/neighbor/train/aachen_000000_000019_gtFine_neighbor.npy').astype(np.uint8)
print(a.shape)
# b = torch.from_numpy(a)
# b = F.hflip(b)
# b = F.hflip(b)
#
# c = np.array(b)
# print(np.array_equal(a, c))
# + pycharm={"name": "#%%\n"}
a = np.load('/ssd1/DynamicConv/Detectron_Datasets/cityscapes/cityscapes_class_weights.npy')
a
# + pycharm={"name": "#%%\n"}
|
check.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + init_cell=true slideshow={"slide_type": "skip"}
# %%HTML
<link rel="stylesheet" type="text/css" href="custom.css">
# + [markdown] slideshow={"slide_type": "slide"}
# # Warsztaty modelowania w nanofizyce
#
# <video style="margin:10px;" height=340 preload="metadata" type="video/webm" autoplay loop
# src="http://wolf.ifj.edu.pl/~jochym/p/notebooks/anim_T2000.webm" >
# </video>
#
# **<NAME>**
#
# Zakład Komputerowych Badań Materiałów
#
# Instytut Fizyki Jądrowej PAN, Kraków
# + [markdown] slideshow={"slide_type": "subslide"}
# Prezentację można śledzić równolegle pod adresem: https://goo.gl/Nj8tah
#
# Materiały pomocnicze do warsztatów mają formę zbioru zeszytów [Jupyter](https://jupyter.org/) dostępnych w publicznym repozytorium: https://goo.gl/5JKOJK
#
# Kompletne środowisko do ćwiczeń wraz prezentacją i danymi znajduje się w serwisie binder (https://goo.gl/RsOgwu).
#
# [](http://beta.mybinder.org/v2/gh/jochym/abinitio-workshop/master)
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Plan
#
# * Środowisko
# * Schemat pracy
# * Wyliczenie struktury nanocząstki
# * Dynamika molekularna nanocząstki
# * Termodynamika nanocząstki
# + [markdown] slideshow={"slide_type": "slide"}
# ## Środowisko pracy
#
# Istnieje wiele sposobów organizacji pracy przy obliczeniowych badaniach materiałów. Kilka z nich można zobaczyć w pozostałych wykładach. Moja prezentacja posługuje się systemem Jupyter, który pozwala na mieszanie dokumentacji/notatek z kodem służącym do analizy danych, a także na przygotowanie materiału do prezentacji (jak np. slajdy które państwo widzą czy tekst artykułu).
#
# Środowisko pracy składa się z następujących elementów:
# + [markdown] slideshow={"slide_type": "fragment"}
# * System kolejkowy (wykonujacy zadania)
#
# * Serwer zarządzający (pozwalający zlecać zadania)
#
# * Serwer Jupyter (z nim łączymy się przeglądarką)
#
# * Terminal/klient (np. laptop z przeglądarką WWW)
# + [markdown] slideshow={"slide_type": "fragment"}
# **Uwaga:** *Tylko ostatnie dwa elementy dostępne są w paczce z serwisu binder - zawiera ona jedynie wyniki już przeprowadzonych obliczeń*
# + [markdown] slideshow={"slide_type": "slide"}
# ## Cykl życia publikacji w dziedzinie fizyki obliczeniowej
# <img src="life-cycle_opt.svg" style="height:400px;" />
# + [markdown] slideshow={"slide_type": "subslide"}
# * Nie ma wielkiej różnicy między badaniami eksperymentalnymi a teoretycznymi
# * Programy obliczeń z pierwszych zasad pełnią rolę aparatury eksperymentalnej
# * Cykl obliczeń, wizualizacji, interpretacji i modyfikacji obliczeń powtarza się wielokrotnie
# + [markdown] slideshow={"slide_type": "fragment"}
# * Ważna jest dokumentacja etapów pracy
# * Dobrze gdy ostateczna publikacja budowana jest jednocześnie z produkcją wyników
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Podsumowanie
#
# * Obliczenia z pierwszych zasad rzadko dostarczają bezpośrednio mierzalnych wielkości fizycznych.
# * Analiza i dalsze przetwarzanie danych uzyskanych metodami obliczeniowej mechaniki kwantowej są tak samo ważne jak same obliczenia z pierwszych zasad.
# * Wygodne środowisko i bogata biblioteka funkcji może znacząco usprawnić proces analizy uzyskanych wyników.
# + [markdown] slideshow={"slide_type": "fragment"}
# * Należy pamiętać że komputery pracują w systemie:
#
# **Ś-Ś** **(Śmieci na wejściu - Śmieci na wyjściu)**
#
#
# * Chwila zastanowienia jest niejednokrotnie więcej warta niż miesiące obliczeń
#
# *Wszystkie obliczenia prowadzące do pokazanych wyników, oraz wszystkie konieczne dane są dostępne w plikach źródłowych oraz materiałach ćwiczeniowych. Wszystkie materiały można znaleźć pod adresem: https://goo.gl/5JKOJK *
# + [markdown] slideshow={"slide_type": "slide"}
# ## Dziękuję za uwagę!
# ---
# ### *Zapraszam na ćwiczenia praktyczne!*
|
notebooks/Prezentacja.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # Configuration
#
# _**Setting up your Azure Machine Learning services workspace and configuring your notebook library**_
#
# ---
# ---
#
# ## Table of Contents
#
# 1. [Introduction](#Introduction)
# 1. What is an Azure Machine Learning workspace
# 1. [Setup](#Setup)
# 1. Azure subscription
# 1. Azure ML SDK and other library installation
# 1. Azure Container Instance registration
# 1. [Configure your Azure ML Workspace](#Configure%20your%20Azure%20ML%20workspace)
# 1. Workspace parameters
# 1. Access your workspace
# 1. Create a new workspace
# 1. Create compute resources
# 1. [Next steps](#Next%20steps)
#
# ---
#
# ## Introduction
#
# This notebook configures your library of notebooks to connect to an Azure Machine Learning (ML) workspace. In this case, a library contains all of the notebooks in the current folder and any nested folders. You can configure this notebook library to use an existing workspace or create a new workspace.
#
# Typically you will need to run this notebook only once per notebook library as all other notebooks will use connection information that is written here. If you want to redirect your notebook library to work with a different workspace, then you should re-run this notebook.
#
# In this notebook you will
# * Learn about getting an Azure subscription
# * Specify your workspace parameters
# * Access or create your workspace
# * Add a default compute cluster for your workspace
#
# ### What is an Azure Machine Learning workspace
#
# An Azure ML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.
# ## Setup
#
# This section describes activities required before you can access any Azure ML services functionality.
# ### 1. Azure Subscription
#
# In order to create an Azure ML Workspace, first you need access to an Azure subscription. An Azure subscription allows you to manage storage, compute, and other assets in the Azure cloud. You can [create a new subscription](https://azure.microsoft.com/en-us/free/) or access existing subscription information from the [Azure portal](https://portal.azure.com). Later in this notebook you will need information such as your subscription ID in order to create and access AML workspaces.
#
# ### 2. Azure ML SDK and other library installation
#
# If you are running in your own environment, follow [SDK installation instructions](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment). If you are running in Azure Notebooks or another Microsoft managed environment, the SDK is already installed.
#
# Also install following libraries to your environment. Many of the example notebooks depend on them
#
# ```
# (myenv) $ conda install -y matplotlib tqdm scikit-learn
# ```
#
# Once installation is complete, the following cell checks the Azure ML SDK version:
# + tags=["install"]
import azureml.core
print("This notebook was created using version 1.30.0 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
# -
# If you are using an older version of the SDK then this notebook was created using, you should upgrade your SDK.
#
# ### 3. Azure Container Instance registration
# Azure Machine Learning uses of [Azure Container Instance (ACI)](https://azure.microsoft.com/services/container-instances) to deploy dev/test web services. An Azure subscription needs to be registered to use ACI. If you or the subscription owner have not yet registered ACI on your subscription, you will need to use the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and execute the following commands. Note that if you ran through the AML [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) you have already registered ACI.
#
# ```shell
# # check to see if ACI is already registered
# (myenv) $ az provider show -n Microsoft.ContainerInstance -o table
#
# # if ACI is not registered, run this command.
# # note you need to be the subscription owner in order to execute this command successfully.
# (myenv) $ az provider register -n Microsoft.ContainerInstance
# ```
#
# ---
# ## Configure your Azure ML workspace
#
# ### Workspace parameters
#
# To use an AML Workspace, you will need to import the Azure ML SDK and supply the following information:
# * Your subscription id
# * A resource group name
# * (optional) The region that will host your workspace
# * A name for your workspace
#
# You can get your subscription ID from the [Azure portal](https://portal.azure.com).
#
# You will also need access to a [_resource group_](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview#resource-groups), which organizes Azure resources and provides a default region for the resources in a group. You can see what resource groups to which you have access, or create a new one in the [Azure portal](https://portal.azure.com). If you don't have a resource group, the create workspace command will create one for you using the name you provide.
#
# The region to host your workspace will be used if you are creating a new workspace. You do not need to specify this if you are using an existing workspace. You can find the list of supported regions [here](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=machine-learning-service). You should pick a region that is close to your location or that contains your data.
#
# The name for your workspace is unique within the subscription and should be descriptive enough to discern among other AML Workspaces. The subscription may be used only by you, or it may be used by your department or your entire enterprise, so choose a name that makes sense for your situation.
#
# The following cell allows you to specify your workspace parameters. This cell uses the python method `os.getenv` to read values from environment variables which is useful for automation. If no environment variable exists, the parameters will be set to the specified default values.
#
# If you ran the Azure Machine Learning [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) in Azure Notebooks, you already have a configured workspace! You can go to your Azure Machine Learning Getting Started library, view *config.json* file, and copy-paste the values for subscription ID, resource group and workspace name below.
#
# Replace the default values in the cell below with your workspace parameters
# +
import os
subscription_id = os.getenv("SUBSCRIPTION_ID", default="<my-subscription-id>")
resource_group = os.getenv("RESOURCE_GROUP", default="<my-resource-group>")
workspace_name = os.getenv("WORKSPACE_NAME", default="<my-workspace-name>")
workspace_region = os.getenv("WORKSPACE_REGION", default="eastus2")
# -
# ### Access your workspace
#
# The following cell uses the Azure ML SDK to attempt to load the workspace specified by your parameters. If this cell succeeds, your notebook library will be configured to access the workspace from all notebooks using the `Workspace.from_config()` method. The cell can fail if the specified workspace doesn't exist or you don't have permissions to access it.
# +
from azureml.core import Workspace
try:
ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)
# write the details of the workspace to a configuration file to the notebook library
ws.write_config()
print("Workspace configuration succeeded. Skip the workspace creation steps below")
except:
print("Workspace not accessible. Change your parameters or create a new workspace below")
# -
# ### Create a new workspace
#
# If you don't have an existing workspace and are the owner of the subscription or resource group, you can create a new workspace. If you don't have a resource group, the create workspace command will create one for you using the name you provide.
#
# **Note**: As with other Azure services, there are limits on certain resources (for example AmlCompute quota) associated with the Azure ML service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
#
# This cell will create an Azure ML workspace for you in a subscription provided you have the correct permissions.
#
# This will fail if:
# * You do not have permission to create a workspace in the resource group
# * You do not have permission to create a resource group if it's non-existing.
# * You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription
#
# If workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.
#
# **Note**: A Basic workspace is created by default. If you would like to create an Enterprise workspace, please specify sku = 'enterprise'.
# Please visit our [pricing page](https://azure.microsoft.com/en-us/pricing/details/machine-learning/) for more details on our Enterprise edition.
#
# + tags=["create workspace"]
from azureml.core import Workspace
# Create the workspace using the specified parameters
ws = Workspace.create(name = workspace_name,
subscription_id = subscription_id,
resource_group = resource_group,
location = workspace_region,
create_resource_group = True,
sku = 'basic',
exist_ok = True)
ws.get_details()
# write the details of the workspace to a configuration file to the notebook library
ws.write_config()
# -
# ### Create compute resources for your training experiments
#
# Many of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.
#
# > Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.
#
# To create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.
#
# The cluster parameters are:
# * vm_size - this describes the virtual machine type and size used in the cluster. All machines in the cluster are the same type. You can get the list of vm sizes available in your region by using the CLI command
#
# ```shell
# az vm list-skus -o tsv
# ```
# * min_nodes - this sets the minimum size of the cluster. If you set the minimum to 0 the cluster will shut down all nodes while not in use. Setting this number to a value higher than 0 will allow for faster start-up times, but you will also be billed when the cluster is not in use.
# * max_nodes - this sets the maximum size of the cluster. Setting this to a larger number allows for more concurrency and a greater distributed processing of scale-out jobs.
#
#
# To create a **CPU** cluster now, run the cell below. The autoscale settings mean that the cluster will scale down to 0 nodes when inactive and up to 4 nodes when busy.
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
cpu_cluster_name = "cpu-cluster"
# Verify that cluster does not exist already
try:
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
print("Found existing cpu-cluster")
except ComputeTargetException:
print("Creating new cpu-cluster")
# Specify the configuration for the new cluster
compute_config = AmlCompute.provisioning_configuration(vm_size="STANDARD_D2_V2",
min_nodes=0,
max_nodes=4)
# Create the cluster with the specified name and configuration
cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)
# Wait for the cluster to complete, show the output log
cpu_cluster.wait_for_completion(show_output=True)
# -
# To create a **GPU** cluster, run the cell below. Note that your subscription must have sufficient quota for GPU VMs or the command will fail. To increase quota, see [these instructions](https://docs.microsoft.com/en-us/azure/azure-supportability/resource-manager-core-quotas-request).
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your GPU cluster
gpu_cluster_name = "gpu-cluster"
# Verify that cluster does not exist already
try:
gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)
print("Found existing gpu cluster")
except ComputeTargetException:
print("Creating new gpu-cluster")
# Specify the configuration for the new cluster
compute_config = AmlCompute.provisioning_configuration(vm_size="STANDARD_NC6",
min_nodes=0,
max_nodes=4)
# Create the cluster with the specified name and configuration
gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)
# Wait for the cluster to complete, show the output log
gpu_cluster.wait_for_completion(show_output=True)
# -
# ---
#
# ## Next steps
#
# In this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.
#
# If you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into "how-to" notebooks and start creating and deploying models. A good place to start is the [train within notebook](./how-to-use-azureml/training/train-within-notebook) example that walks through a simplified but complete end to end machine learning process.
|
configuration.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "29b72b5f8cb2ce33aa81c939b8d2138c", "grade": false, "grade_id": "cell-02487845739eb4fd", "locked": true, "schema_version": 3, "solution": false}
# ### Lab 3: Expectation Maximization and Variational Autoencoder
#
# ### Machine Learning 2 (2019)
#
# * The lab exercises can be done in groups of two people, or individually.
# * The deadline is Tuesday, October 15th at 17:00.
# * Assignment should be submitted through Canvas! Make sure to include your and your teammates' names with the submission.
# * Attach the .IPYNB (IPython Notebook) file containing your code and answers. Naming of the file should be "studentid1\_studentid2\_lab#", for example, the attached file should be "12345\_12346\_lab1.ipynb". Only use underscores ("\_") to connect ids, otherwise the files cannot be parsed.
#
# Notes on implementation:
#
# * You should write your code and answers in an IPython Notebook: http://ipython.org/notebook.html. If you have problems, please ask.
# * Use __one cell__ for code and markdown answers only!
# * Put all code in the cell with the ```# YOUR CODE HERE``` comment and overwrite the ```raise NotImplementedError()``` line.
# * For theoretical questions, put your solution using LaTeX style formatting in the YOUR ANSWER HERE cell.
# * Among the first lines of your notebook should be "%pylab inline". This imports all required modules, and your plots will appear inline.
# * Large parts of you notebook will be graded automatically. Therefore it is important that your notebook can be run completely without errors and within a reasonable time limit. To test your notebook before submission, select Kernel -> Restart \& Run All.
# $\newcommand{\bx}{\mathbf{x}} \newcommand{\bpi}{\mathbf{\pi}} \newcommand{\bmu}{\mathbf{\mu}} \newcommand{\bX}{\mathbf{X}} \newcommand{\bZ}{\mathbf{Z}} \newcommand{\bz}{\mathbf{z}}$
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "e4e05229ee79b55d6589e1ea8de68f32", "grade": false, "grade_id": "cell-a0a6fdb7ca694bee", "locked": true, "schema_version": 3, "solution": false}
# ### Installing PyTorch
#
# In this lab we will use PyTorch. PyTorch is an open source deep learning framework primarily developed by Facebook's artificial-intelligence research group. In order to install PyTorch in your conda environment go to https://pytorch.org and select your operating system, conda, Python 3.6, no cuda. Copy the text from the "Run this command:" box. Now open a terminal and activate your 'ml2labs' conda environment. Paste the text and run. After the installation is done you should restart Jupyter.
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "d9c3d77f550b5fd93b34fd18825c47f0", "grade": false, "grade_id": "cell-746cac8d9a21943b", "locked": true, "schema_version": 3, "solution": false}
# ### MNIST data
#
# In this Lab we will use several methods for unsupervised learning on the MNIST dataset of written digits. The dataset contains digital images of handwritten numbers $0$ through $9$. Each image has 28x28 pixels that each take 256 values in a range from white ($= 0$) to black ($=1$). The labels belonging to the images are also included.
# Fortunately, PyTorch comes with a MNIST data loader. The first time you run the box below it will download the MNIST data set. That can take a couple of minutes.
# The main data types in PyTorch are tensors. For Part 1, we will convert those tensors to numpy arrays. In Part 2, we will use the torch module to directly work with PyTorch tensors.
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "4fbc152afa1255331d7b88bf00b7156c", "grade": false, "grade_id": "cell-7c995be0fda080c0", "locked": true, "schema_version": 3, "solution": false}
# %pylab inline
import torch
from torchvision import datasets, transforms
train_dataset = datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
]))
train_labels = train_dataset.train_labels.numpy()
train_data = train_dataset.train_data.numpy()
# For EM we will use flattened data
train_data = train_data.reshape(train_data.shape[0], -1)
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "4fc852f9bfb0bab10d4c23eada309e89", "grade": false, "grade_id": "cell-8b4a44df532b1867", "locked": true, "schema_version": 3, "solution": false}
# ## Part 1: Expectation Maximization
# We will use the Expectation Maximization (EM) algorithm for the recognition of handwritten digits in the MNIST dataset. The images are modelled as a Bernoulli mixture model (see Bishop $\S9.3.3$):
# $$
# p(\bx|\bmu, \bpi) = \sum_{k=1}^K \pi_k \prod_{i=1}^D \mu_{ki}^{x_i}(1-\mu_{ki})^{(1-x_i)}
# $$
# where $x_i$ is the value of pixel $i$ in an image, $\mu_{ki}$ represents the probability that pixel $i$ in class $k$ is black, and $\{\pi_1, \ldots, \pi_K\}$ are the mixing coefficients of classes in the data. We want to use this data set to classify new images of handwritten numbers.
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "54064637b7e7cf938c0f778d748a226a", "grade": false, "grade_id": "cell-af03fef663aa85b2", "locked": true, "schema_version": 3, "solution": false}
# ### 1.1 Binary data (5 points)
# As we like to apply our Bernoulli mixture model, write a function `binarize` to convert the (flattened) MNIST data to binary images, where each pixel $x_i \in \{0,1\}$, by thresholding at an appropriate level.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "fe8607a4d734f7f26ef1ee1e54b33471", "grade": false, "grade_id": "cell-ec4365531ca57ef3", "locked": false, "schema_version": 3, "solution": true}
def binarize(X):
# YOUR CODE HERE
return 1. * (X >= 0.5)
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "231b2c9f29bc5c536c60cef4d74793a1", "grade": true, "grade_id": "cell-2f16f57cb68a83b3", "locked": true, "points": 5, "schema_version": 3, "solution": false}
# Test test test!
bin_train_data = binarize(train_data)
assert bin_train_data.dtype == np.float
assert bin_train_data.shape == train_data.shape
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "a0a39404cc2f67078b399ee34653a3ac", "grade": false, "grade_id": "cell-462e747685e8670f", "locked": true, "schema_version": 3, "solution": false}
# Sample a few images of digits $2$, $3$ and $4$; and show both the original and the binarized image together with their label.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "3f3c981f0fda5ba3bdfcefb9144305c7", "grade": true, "grade_id": "cell-784c6bd177a9aa42", "locked": false, "points": 5, "schema_version": 3, "solution": true}
# YOUR CODE HERE
def visualize_digit(digit, n_samples=4):
idxs = np.random.choice(np.where(train_labels == digit)[0], n_samples)
fig, ax = plt.subplots(nrows=1, ncols=2*n_samples, figsize=(20, 20))
i=0
for idx in idxs:
img = train_data[idx].reshape(28, 28)
bin_img = binarize(img)
ax[i].imshow(img, cmap='gray')
ax[i].set_title(str(digit), fontsize=16)
ax[i].axis('off')
ax[i+1].imshow(bin_img, cmap='gray')
ax[i+1].set_title('Binarized '+str(digit), fontsize=16)
ax[i+1].axis('off')
i+=2
for digits in [2, 3, 4]:
visualize_digit(digits)
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "4b9da574d24193df76e96ed8ca62c7b0", "grade": false, "grade_id": "cell-56b33654497d4052", "locked": true, "schema_version": 3, "solution": false}
# ### 1.2 Implementation (40 points)
# You are going to write a function ```EM(X, K, max_iter)``` that implements the EM algorithm on the Bernoulli mixture model.
#
# The only parameters the function has are:
# * ```X``` :: (NxD) array of input training images
# * ```K``` :: size of the latent space
# * ```max_iter``` :: maximum number of iterations, i.e. one E-step and one M-step
#
# You are free to specify your return statement.
#
# Make sure you use a sensible way of terminating the iteration process early to prevent unnecessarily running through all epochs. Vectorize computations using ```numpy``` as much as possible.
#
# You should implement the `E_step(X, mu, pi)` and `M_step(X, gamma)` separately in the functions defined below. These you can then use in your function `EM(X, K, max_iter)`.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "316c9131692747c363b5db8e9091d362", "grade": false, "grade_id": "cell-882b13c117a73cc4", "locked": false, "schema_version": 3, "solution": true}
def E_step(X, mu, pi):
# YOUR CODE HERE
eps = 1e-10 # for stability in log operations
gamma = np.exp(np.log(pi+eps) + X @ np.log(mu.T+eps) + (1 - X) @ np.log(1 - mu.T + eps))
gamma /= gamma.sum(axis=1)[:, np.newaxis]
return gamma
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "1418f4014e98024fc97446ce27766c1d", "grade": true, "grade_id": "cell-f7c7dd52d82e2498", "locked": true, "points": 15, "schema_version": 3, "solution": false}
# Let's test on 5 datapoints
n_test = 5
X_test = bin_train_data[:n_test]
D_test, K_test = X_test.shape[1], 10
np.random.seed(2018)
mu_test = np.random.uniform(low=.25, high=.75, size=(K_test,D_test))
pi_test = np.ones(K_test) / K_test
gamma_test = E_step(X_test, mu_test, pi_test)
assert gamma_test.shape == (n_test, K_test)
# + deletable=false nbgrader={"cell_type": "code", "checksum": "2c426a613653174795cd9c8327ab6e20", "grade": false, "grade_id": "cell-f1b11b8765bd1ef6", "locked": false, "schema_version": 3, "solution": true}
def M_step(X, gamma):
# YOUR CODE HERE
N_m = gamma.sum(axis=0)
pi = N_m / X.shape[0]
mu = gamma.T @ X / N_m[:, np.newaxis]
return mu, pi
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "0f60d48b8b22063cef560b42944a0aa4", "grade": true, "grade_id": "cell-6e7c751b30acfd45", "locked": true, "points": 15, "schema_version": 3, "solution": false}
# Oh, let's test again
mu_test, pi_test = M_step(X_test, gamma_test)
assert mu_test.shape == (K_test,D_test)
assert pi_test.shape == (K_test, )
# + deletable=false nbgrader={"cell_type": "code", "checksum": "acfec6384b058cb0ce1932006fbfebc4", "grade": true, "grade_id": "cell-d6c4368246dee7e6", "locked": false, "points": 10, "schema_version": 3, "solution": true}
def EM(X, K, max_iter, threshold=1e-3, mu=None, pi=None):
# YOUR CODE HERE
if mu is None: mu = np.random.uniform(low=.15, high=.85, size=(K, shape(X)[1]))
if pi is None: pi = np.ones(K) / K
for i in range(1, max_iter+1):
mu_old, pi_old = mu, pi
gamma = E_step(X, mu, pi)
mu, pi = M_step(X, gamma)
delta_mu, delta_pi = np.linalg.norm(mu-mu_old), np.linalg.norm(pi-pi_old)
if i%10 == 0:
print('Epoch [{:4d}/{:4d}] | delta mu: {:6.4f} | delta pi: {:6.4f}'.format(
i, max_iter, delta_mu, delta_pi))
if delta_mu < threshold and delta_pi < threshold:
print("\nConverged at iteration {}.".format(i))
return gamma, mu, pi
return gamma, mu, pi
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "b4fc12faa0da660f7a4d9cc7deb41b25", "grade": false, "grade_id": "cell-e1077ed3b83489be", "locked": true, "schema_version": 3, "solution": false}
# ### 1.3 Three digits experiment (10 points)
# In analogue with Bishop $\S9.3.3$, sample a training set consisting of only __binary__ images of written digits $2$, $3$, and $4$. Run your EM algorithm and show the reconstructed digits.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "bdbce0fad0ed151063d4c489ce999e3e", "grade": true, "grade_id": "cell-477155d0264d7259", "locked": false, "points": 5, "schema_version": 3, "solution": true}
# YOUR CODE HERE
def sample_labels(labels, epochs=100, K=None, mu=None, pi=None, true_pi_init=False):
if K is None: K = len(labels)
print('-'*60+'\nTraining EM on labels {} and number of clusters K = {}\n'.format(labels, K)+'-'*60+'\n')
labels_idxs = np.isin(train_labels, labels)
subset_train_labels = train_labels[labels_idxs]
subset_train_data = bin_train_data[labels_idxs]
pi_true = []
for label in labels:
n_labels = np.isin(train_labels, label)
pi_true.append(n_labels.sum())
print('True mixing coefficients: {}'.format(np.array(pi_true)/np.array(pi_true).sum()))
if true_pi_init: pi = np.array(pi_true)
print('\nTraining Progress')
gamma, mu, pi = EM(X=subset_train_data, K=K, max_iter=epochs, mu=mu, pi=pi)
fig, ax = plt.subplots(nrows=1, ncols=K, figsize=(15, 15), dpi=100)
for i in range(K):
ax[i].imshow(mu[i].reshape(28,28), cmap='gray')
ax[i].set_title('Parameters class: {}\n pi = {:0.3f}'.format(i, pi[i]), fontsize=K**(-1)//0.02)
ax[i].axis('off')
plt.show()
sample_labels([2, 3, 4])
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "485543f4893938d2a9dc1c17d8221cbc", "grade": false, "grade_id": "cell-88c9664f995b1909", "locked": true, "schema_version": 3, "solution": false}
# Can you identify which element in the latent space corresponds to which digit? What are the identified mixing coefficients for digits $2$, $3$ and $4$, and how do these compare to the true ones?
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "ae7b5acea6089e2590059f90b0d0a0be", "grade": true, "grade_id": "cell-3680ae2159c48193", "locked": false, "points": 5, "schema_version": 3, "solution": true}
# #### YOUR ANSWER HERE
#
#
# Looking at the results, we can clearly seperate the distinguish the digits between the elements in the latent space. They illustrated as 'gost' digits, which is expected, as we plot the mean distribution of every latent space.
#
# Moreover, the mixing coefficients (appeared in the second row of the title of every class figure), follow a uniform distribution, imitating the original one, since it appears that we train our model on a balanced dataset.
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "98e04feb59a36867367b3027df9e226d", "grade": false, "grade_id": "cell-0891dda1c3e80e9a", "locked": true, "schema_version": 3, "solution": false}
# ### 1.4 Experiments (20 points)
# Perform the follow-up experiments listed below using your implementation of the EM algorithm. For each of these, describe/comment on the obtained results and give an explanation. You may still use your dataset with only digits 2, 3 and 4 as otherwise computations can take very long.
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "439067186fa3ef1d7261a9bcf5a84ea6", "grade": false, "grade_id": "cell-06fe1b1355689928", "locked": true, "schema_version": 3, "solution": false}
# #### 1.4.1 Size of the latent space (5 points)
# Run EM with $K$ larger or smaller than the true number of classes. Describe your results.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "791512aeadd30c4b586b966ca10e6fad", "grade": true, "grade_id": "cell-6c9057f2546b7215", "locked": false, "points": 2, "schema_version": 3, "solution": true}
# YOUR CODE HERE
for K in [7, 2]:
sample_labels(labels=[2, 3, 4], K=K)
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "e12e40c2d2165e3bb500b5504128910d", "grade": true, "grade_id": "cell-f01c37653160244b", "locked": false, "points": 3, "schema_version": 3, "solution": true}
# #### YOUR ANSWER HERE
#
# When we use a bigger number of class, the EM seems to further distinguishes the digits accoring to their variations. For example, it classifies differently the digits `2` where their bottom part is rounded from the standard one. Anothere case it the digit `4`, where it distinguishes thouse who are skewing to the left from all the others.
#
# When we run the experiment with a lower number of class, specifically for `K = 2`, we observed that the digit `4` always make it as a seperate class, and the two others are merged. As the EM seems `2` and `3` closser (similarity) than any combination of the previous with `4`, it makes the algorithm treat `2` as a special case of `3` and it desides to merge them together into one class. Intuitively, one can arguee that the digit `3` is dominand over the `2`, as one can see the `3` cover a big area of `2`.
# This can also be justified by the mixing coefficients of the latend space, as the class where `4` is visible has half the probability than the other (~ 0.35 and 0.65 = 2*0.325). Finally, on the class that `4` is not visible, we can see that shape is the digit `3`, with a more washed-out connected line from the middle part to the bottom, which is the part of `2` which makes it different from the former.
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "b306681523a2e35eea310ac10bb68999", "grade": false, "grade_id": "cell-cf478d67239b7f2e", "locked": true, "schema_version": 3, "solution": false}
# #### 1.4.2 Identify misclassifications (10 points)
# How can you use the data labels to assign a label to each of the clusters/latent variables? Use this to identify images that are 'misclassified' and try to understand why they are. Report your findings.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "000c11bd8756a4e24296c7c55d3ee17e", "grade": true, "grade_id": "cell-daa1a492fbba5c7e", "locked": false, "points": 5, "schema_version": 3, "solution": true}
# YOUR CODE HERE
def sample_misclassifications(labels, epochs=100, K=None, n_mcls=7):
if K is None: K = len(labels)
print('-'*60+'\nTraining EM on labels {} and number of clusters K = {}\n'.format(labels, K)+'-'*60+'\n')
labels_idxs = np.isin(train_labels, labels)
subset_train_labels = train_labels[labels_idxs]
subset_train_data = bin_train_data[labels_idxs]
print('Training Progress')
gamma, mu, pi = EM(subset_train_data, K, epochs)
assigned_cluster = np.argmax(gamma, axis=1)
cluster2class = {assigned_cluster[0]:4, assigned_cluster[1]:2, assigned_cluster[2]:3}
pred_labels = np.array([cluster2class[i] for i in assigned_cluster])
misclas_idxs = pred_labels!=subset_train_labels
misclas_imgs = subset_train_data[misclas_idxs]
misclas_labels = pred_labels[misclas_idxs]
n_mcls = min(n_mcls, misclas_idxs.shape[0])
rand_miss_indxs = np.random.choice(misclas_labels.shape[0]-1, n_mcls)
fig, ax = plt.subplots(nrows=1, ncols=n_mcls, figsize=(15, 15), dpi=100)
for i,miss_indx in enumerate(rand_miss_indxs):
ax[i].imshow(misclas_imgs[miss_indx].reshape(28,28), cmap='gray')
ax[i].set_title('Missclassified as: {}'.format(misclas_labels[miss_indx]), fontsize=n_mcls**(-1)//0.02)
ax[i].axis('off')
plt.show()
sample_misclassifications([2, 3, 4])
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "baf43434481c13d76ad51e3ba07e2bf5", "grade": true, "grade_id": "cell-329245c02df7850d", "locked": false, "points": 5, "schema_version": 3, "solution": true}
# #### YOUR ANSWER HERE
#
# We can seperate the reasons of misclassification into two classes: _type 1_ and _type 2_.
#
# __Type 1__ refers to thoughs missclassifications that are equally hard for humans. These consist of digits that are result of poor handwriting. These types of errors are more or less expected.
#
# __Type 2__ consists thouse digits that are disdinguisible from humans but the algorithm fails to cluster them correctly. These digits are rare variatios that the algorithm did not see ofter in the training set. This can be solved by putting more examples of these into the training set.
#
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "640bc57a2d08c3becf534bb5e4b35971", "grade": false, "grade_id": "cell-67ce1222e8a7837b", "locked": true, "schema_version": 3, "solution": false}
# #### 1.4.3 Initialize with true values (5 points)
# Initialize the three classes with the true values of the parameters and see what happens. Report your results.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "a48f788e286458ef0f776865a3bcd58b", "grade": true, "grade_id": "cell-aa5d6b9f941d985d", "locked": false, "points": 2, "schema_version": 3, "solution": true}
# YOUR CODE HERE
labels = [2, 3, 4]
sample_labels(labels, true_pi_init=True)
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "1dc4adf3081f3bec93f94c3b12b87db9", "grade": true, "grade_id": "cell-981e44f35a3764b0", "locked": false, "points": 3, "schema_version": 3, "solution": true}
# #### YOUR ANSWER HERE
#
# By initializing the classes with the mixing coefficients, we are able to __converge much faster__ , compared with the uniform initialization. In our experiments, the improvement is of about half the number of iterations required. This is of course expected, since we are starting the optimization with a proposed solution with a higher quality than just random. Besides, the final mixture coefficients are close to the true mixture
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "bd613f41e5d2b7d22b0d5b1e7644a48a", "grade": false, "grade_id": "cell-19bfd7cf4017ed84", "locked": true, "schema_version": 3, "solution": false}
# ## Part 2: Variational Auto-Encoder
#
# A Variational Auto-Encoder (VAE) is a probabilistic model $p(\bx, \bz)$ over observed variables $\bx$ and latent variables and/or parameters $\bz$. Here we distinguish the decoder part, $p(\bx | \bz) p(\bz)$ and an encoder part $p(\bz | \bx)$ that are both specified with a neural network. A lower bound on the log marginal likelihood $\log p(\bx)$ can be obtained by approximately inferring the latent variables z from the observed data x using an encoder distribution $q(\bz| \bx)$ that is also specified as a neural network. This lower bound is then optimized to fit the model to the data.
#
# The model was introduced by <NAME> (during his PhD at the UVA) and <NAME> in 2013, https://arxiv.org/abs/1312.6114.
#
# Since it is such an important model there are plenty of well written tutorials that should help you with the assignment. E.g: https://jaan.io/what-is-variational-autoencoder-vae-tutorial/.
#
# In the following, we will make heavily use of the torch module, https://pytorch.org/docs/stable/index.html. Most of the time replacing `np.` with `torch.` will do the trick, e.g. `np.sum` becomes `torch.sum` and `np.log` becomes `torch.log`. In addition, we will use `torch.FloatTensor()` as an equivalent to `np.array()`. In order to train our VAE efficiently we will make use of batching. The number of data points in a batch will become the first dimension of our data tensor, e.g. A batch of 128 MNIST images has the dimensions [128, 1, 28, 28]. To check check the dimensions of a tensor you can call `.size()`.
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "92bd337f41c3f94777f47376c7149ca7", "grade": false, "grade_id": "cell-bcbe35b20c1007d3", "locked": true, "schema_version": 3, "solution": false}
# ### 2.1 Loss function
# The objective function (variational lower bound), that we will use to train the VAE, consists of two terms: a log Bernoulli loss (reconstruction loss) and a Kullback–Leibler divergence. We implement the two terms separately and combine them in the end.
# As seen in Part 1: Expectation Maximization, we can use a multivariate Bernoulli distribution to model the likelihood $p(\bx | \bz)$ of black and white images. Formally, the variational lower bound is maximized but in PyTorch we are always minimizing therefore we need to calculate the negative log Bernoulli loss and Kullback–Leibler divergence.
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "3fb5f70b132e1233983ef89d19998374", "grade": false, "grade_id": "cell-389d81024af846e5", "locked": true, "schema_version": 3, "solution": false}
# ### 2.1.1 Negative Log Bernoulli loss (5 points)
# The negative log Bernoulli loss is defined as,
#
# \begin{align}
# loss = - (\sum_i^D \bx_i \log \hat{\bx_i} + (1 − \bx_i) \log(1 − \hat{\bx_i})).
# \end{align}
#
# Write a function `log_bernoulli_loss` that takes a D dimensional vector `x`, its reconstruction `x_hat` and returns the negative log Bernoulli loss. Make sure that your function works for batches of arbitrary size.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "952435ca03f47ab67a7e88b8306fc9a0", "grade": false, "grade_id": "cell-1d504606d6f99145", "locked": false, "schema_version": 3, "solution": true}
def log_bernoulli_loss(x_hat, x):
# YOUR CODE HERE
return torch.nn.functional.binary_cross_entropy(x_hat, x, reduction='sum')
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "bd2a490aa694507bd032e86d77fc0087", "grade": true, "grade_id": "cell-9666dad0b2a9f483", "locked": true, "points": 5, "schema_version": 3, "solution": false}
### Test test test
x_test = torch.FloatTensor([[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8], [0.9, 0.9, 0.9, 0.9]])
x_hat_test = torch.FloatTensor([[0.11, 0.22, 0.33, 0.44], [0.55, 0.66, 0.77, 0.88], [0.99, 0.99, 0.99, 0.99]])
assert log_bernoulli_loss(x_hat_test, x_test) > 0.0
assert log_bernoulli_loss(x_hat_test, x_test) < 10.0
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "6b75b7a531ecc87bce57925c4da464ee", "grade": false, "grade_id": "cell-b3a7c02dee7aa505", "locked": true, "schema_version": 3, "solution": false}
# ### 2.1.2 Negative Kullback–Leibler divergence (10 Points)
# The variational lower bound (the objective to be maximized) contains a KL term $D_{KL}(q(\bz)||p(\bz))$ that can often be calculated analytically. In the VAE we assume $q = N(\bz, \mu, \sigma^2I)$ and $p = N(\bz, 0, I)$. Solve analytically!
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "d01a7e7fe2dcf5f1c5fb955b85c8a04a", "grade": true, "grade_id": "cell-4cab10fd1a636858", "locked": false, "points": 5, "schema_version": 3, "solution": true}
# \begin{align*}
# \mathcal{KL}(q(\mathbf z)||p(\mathbf z))
# &= \int q(\mathbf z) \ln{\frac{q(\mathbf z)}{p(\mathbf z)}}d\mathbf x=\\
# &= \int q(\mathbf z) \left(\ln{q(\mathbf z)} - \ln{p(\mathbf z)}\right)d\mathbf x=\\
# &= \frac{1}{2} \int q(\mathbf z) (-\log |\sigma^2 I | + z^T z - (z-\mu)^T (\sigma^2 I )^{-1}(z-\mu) ) d\mathbf x \\
# &=\frac{1}{2} (-\log |\sigma^2 I | + \mu^T \mu + Tr(\sigma^2 I ) - Tr(I^{-1}I) ) \\
# &=- \frac{1}{2}\sum_i^D \log \sigma_i^2 + \frac{1}{2} \mu^T \mu + \frac{1}{2} \sum_i^D \sigma_i^2 - \frac{D}{2}
# \end{align*}
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "328115c94a66e8aba0a62896e647c3ba", "grade": false, "grade_id": "cell-c49899cbf2a49362", "locked": true, "schema_version": 3, "solution": false}
# Write a function `KL_loss` that takes two J dimensional vectors `mu` and `logvar` and returns the negative Kullback–Leibler divergence. Where `logvar` is $\log(\sigma^2)$. Make sure that your function works for batches of arbitrary size.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "33b14b79372dd0235d67bb66921cd3e0", "grade": false, "grade_id": "cell-125b41878005206b", "locked": false, "schema_version": 3, "solution": true}
def KL_loss(mu, logvar):
# YOUR CODE HERE
return - 0.5*torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "cf72e196d2b60827e8e940681ac50a07", "grade": true, "grade_id": "cell-ba714bbe270a3f39", "locked": true, "points": 5, "schema_version": 3, "solution": false}
### Test test test
mu_test = torch.FloatTensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
logvar_test = torch.FloatTensor([[0.01, 0.02], [0.03, 0.04], [0.05, 0.06]])
assert KL_loss(mu_test, logvar_test) > 0.0
assert KL_loss(mu_test, logvar_test) < 10.0
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "65335a588baac26bc48dd6c4d275fdca", "grade": false, "grade_id": "cell-18cb3f8031edec23", "locked": true, "schema_version": 3, "solution": false}
# ### 2.1.3 Putting the losses together (5 points)
# Write a function `loss_function` that takes a D dimensional vector `x`, its reconstruction `x_hat`, two J dimensional vectors `mu` and `logvar` and returns the final loss. Make sure that your function works for batches of arbitrary size.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "f6ecb5b60b2c8d7b90070ed59320ee70", "grade": false, "grade_id": "cell-d2d18781683f1302", "locked": false, "schema_version": 3, "solution": true}
def loss_function(x_hat, x, mu, logvar):
# YOUR CODE HERE
return KL_loss(mu, logvar) + log_bernoulli_loss(x_hat, x)
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "816e9508408bfcb2c7332b508d505081", "grade": true, "grade_id": "cell-57747988d29bbb5d", "locked": true, "points": 5, "schema_version": 3, "solution": false}
x_test = torch.FloatTensor([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6], [0.7, 0.8, 0.9]])
x_hat_test = torch.FloatTensor([[0.11, 0.22, 0.33], [0.44, 0.55, 0.66], [0.77, 0.88, 0.99]])
mu_test = torch.FloatTensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
logvar_test = torch.FloatTensor([[0.01, 0.02], [0.03, 0.04], [0.05, 0.06]])
assert loss_function(x_hat_test, x_test, mu_test, logvar_test) > 0.0
assert loss_function(x_hat_test, x_test, mu_test, logvar_test) < 10.0
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "4506e06ed44a0535140582277a528ba4", "grade": false, "grade_id": "cell-9e3ba708967fe918", "locked": true, "schema_version": 3, "solution": false}
# ### 2.2 The model
# Below you see a data structure for the VAE. The modell itself consists of two main parts the encoder (images $\bx$ to latent variables $\bz$) and the decoder (latent variables $\bz$ to images $\bx$). The encoder is using 3 fully-connected layers, whereas the decoder is using fully-connected layers. Right now the data structure is quite empty, step by step will update its functionality. For test purposes we will initialize a VAE for you. After the data structure is completed you will do the hyperparameter search.
#
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "31eccf2f6600764e28eb4bc6c5634e49", "grade": false, "grade_id": "cell-e7d9dafee18f28a1", "locked": true, "schema_version": 3, "solution": false}
from torch import nn
from torch.nn import functional as F
class VAE(nn.Module):
def __init__(self, fc1_dims, fc21_dims, fc22_dims, fc3_dims, fc4_dims):
super(VAE, self).__init__()
self.fc1 = nn.Linear(*fc1_dims)
self.fc21 = nn.Linear(*fc21_dims)
self.fc22 = nn.Linear(*fc22_dims)
self.fc3 = nn.Linear(*fc3_dims)
self.fc4 = nn.Linear(*fc4_dims)
def encode(self, x):
# To be implemented
raise Exception('Method not implemented')
def reparameterize(self, mu, logvar):
# To be implemented
raise Exception('Method not implemented')
def decode(self, z):
# To be implemented
raise Exception('Method not implemented')
def forward(self, x):
# To be implemented
raise Exception('Method not implemented')
VAE_test = VAE(fc1_dims=(784, 4), fc21_dims=(4, 2), fc22_dims=(4, 2), fc3_dims=(2, 4), fc4_dims=(4, 784))
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "1a2243397998b4f55c25dfd734f3e7e0", "grade": false, "grade_id": "cell-c4f9e841b8972a43", "locked": true, "schema_version": 3, "solution": false}
# ### 2.3 Encoding (10 points)
# Write a function `encode` that gets a vector `x` with 784 elements (flattened MNIST image) and returns `mu` and `logvar`. Your function should use three fully-connected layers (`self.fc1()`, `self.fc21()`, `self.fc22()`). First, you should use `self.fc1()` to embed `x`. Second, you should use `self.fc21()` and `self.fc22()` on the embedding of `x` to compute `mu` and `logvar` respectively. PyTorch comes with a variety of activation functions, the most common calls are `F.relu()`, `F.sigmoid()`, `F.tanh()`. Make sure that your function works for batches of arbitrary size.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "628bcd88c611cf01e70f77854600199b", "grade": false, "grade_id": "cell-93cb75b98ae76569", "locked": false, "schema_version": 3, "solution": true}
def encode(self, x):
embeddings = self.fc1.forward(x)
embeddings = F.relu(embeddings)
mu = self.fc21.forward(embeddings)
logvar = self.fc22.forward(embeddings)
return mu, logvar
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "370d930fa9f10f1d3a451f3805c04d88", "grade": true, "grade_id": "cell-9648960b73337a70", "locked": true, "points": 10, "schema_version": 3, "solution": false}
### Test, test, test
VAE.encode = encode
x_test = torch.ones((5,784))
mu_test, logvar_test = VAE_test.encode(x_test)
assert np.allclose(mu_test.size(), [5, 2])
assert np.allclose(logvar_test.size(), [5, 2])
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "6f597cc2b5ef941af282d7162297f865", "grade": false, "grade_id": "cell-581b4ed1996be868", "locked": true, "schema_version": 3, "solution": false}
# ### 2.4 Reparameterization (10 points)
# One of the major question that the VAE is answering, is 'how to take derivatives with respect to the parameters of a stochastic variable?', i.e. if we are given $\bz$ that is drawn from a distribution $q(\bz|\bx)$, and we want to take derivatives. This step is necessary to be able to use gradient-based optimization algorithms like SGD.
# For some distributions, it is possible to reparameterize samples in a clever way, such that the stochasticity is independent of the parameters. We want our samples to deterministically depend on the parameters of the distribution. For example, in a normally-distributed variable with mean $\mu$ and standard deviation $\sigma$, we can sample from it like this:
#
# \begin{align}
# \bz = \mu + \sigma \odot \epsilon,
# \end{align}
#
# where $\odot$ is the element-wise multiplication and $\epsilon$ is sampled from $N(0, I)$.
#
#
# Write a function `reparameterize` that takes two J dimensional vectors `mu` and `logvar`. It should return $\bz = \mu + \sigma \odot \epsilon$.
#
# + deletable=false nbgrader={"cell_type": "code", "checksum": "6331cb5dd23aaacbcf1a52cfecb1afaa", "grade": false, "grade_id": "cell-679aea8b2adf7ec4", "locked": false, "schema_version": 3, "solution": true}
def reparameterize(self, mu, logvar):
# YOUR CODE HERE
epsilon = torch.randn_like(mu)
return mu + torch.exp(0.5*logvar)*epsilon
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "38d4e047717ab334b262c8c177f0a420", "grade": true, "grade_id": "cell-fdd7b27a3d17f84e", "locked": true, "points": 10, "schema_version": 3, "solution": false}
### Test, test, test
VAE.reparameterize = reparameterize
VAE_test.train()
mu_test = torch.FloatTensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]])
logvar_test = torch.FloatTensor([[0.01, 0.02], [0.03, 0.04], [0.05, 0.06]])
z_test = VAE_test.reparameterize(mu_test, logvar_test)
assert np.allclose(z_test.size(), [3, 2])
assert z_test[0][0] < 5.0
assert z_test[0][0] > -5.0
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "9241ab0eaf8366c37ad57072ce66f095", "grade": false, "grade_id": "cell-0be851f9f7f0a93e", "locked": true, "schema_version": 3, "solution": false}
# ### 2.5 Decoding (10 points)
# Write a function `decode` that gets a vector `z` with J elements and returns a vector `x_hat` with 784 elements (flattened MNIST image). Your function should use two fully-connected layers (`self.fc3()`, `self.fc4()`). PyTorch comes with a variety of activation functions, the most common calls are `F.relu()`, `F.sigmoid()`, `F.tanh()`. Make sure that your function works for batches of arbitrary size.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "e8e833cfd7c54a9b67a38056d5d6cab8", "grade": false, "grade_id": "cell-bf92bb3878275a41", "locked": false, "schema_version": 3, "solution": true}
def decode(self, z):
# YOUR CODE HERE
embeddings = self.fc3.forward(z)
embeddings = F.relu(embeddings)
x_hat = self.fc4.forward(embeddings)
x_hat = F.sigmoid(x_hat)
return x_hat
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "7732293fd7d971fcf255496e8c68638d", "grade": true, "grade_id": "cell-4abb91cb9e80af5d", "locked": true, "points": 10, "schema_version": 3, "solution": false}
# test test test
VAE.decode = decode
z_test = torch.ones((5,2))
x_hat_test = VAE_test.decode(z_test)
assert np.allclose(x_hat_test.size(), [5, 784])
assert (x_hat_test <= 1).all()
assert (x_hat_test >= 0).all()
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "e2e113d1f45398b2a1399c336526e755", "grade": false, "grade_id": "cell-97511fbc4f5b469b", "locked": true, "schema_version": 3, "solution": false}
# ### 2.6 Forward pass (10)
# To complete the data structure you have to define a forward pass through the VAE. A single forward pass consists of the encoding of an MNIST image $\bx$ into latent space $\bz$, the reparameterization of $\bz$ and the decoding of $\bz$ into an image $\bx$.
#
# Write a function `forward` that gets a a vector `x` with 784 elements (flattened MNIST image) and returns a vector `x_hat` with 784 elements (flattened MNIST image), `mu` and `logvar`.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "8b7433c4631dd01c07a5fe287e55ae13", "grade": false, "grade_id": "cell-26bb463b9f98ebd5", "locked": false, "schema_version": 3, "solution": true}
def forward(self, x):
x = x.view(-1, 784)
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
x_hat = self.decode(z)
return x_hat, mu, logvar
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "8e7e495f40465c162512e9873c360b25", "grade": true, "grade_id": "cell-347e5fba3d02754b", "locked": true, "points": 10, "schema_version": 3, "solution": false}
# test test test
VAE.forward = forward
x_test = torch.ones((5,784))
x_hat_test, mu_test, logvar_test = VAE_test.forward(x_test)
assert np.allclose(x_hat_test.size(), [5, 784])
assert np.allclose(mu_test.size(), [5, 2])
assert np.allclose(logvar_test.size(), [5, 2])
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "a114a6fd781fb949b887e6a028e07946", "grade": false, "grade_id": "cell-62c89e4d3b253671", "locked": true, "schema_version": 3, "solution": false}
# ### 2.7 Training (15)
# We will now train the VAE using an optimizer called Adam, https://arxiv.org/abs/1412.6980. The code to train a model in PyTorch is given below.
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "f3b6bb965fb48229c63cacda48baea65", "grade": false, "grade_id": "cell-be75f61b09f3b9b6", "locked": true, "schema_version": 3, "solution": false}
from torch.autograd import Variable
def train(epoch, train_loader, model, optimizer):
model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = Variable(data)
optimizer.zero_grad()
recon_batch, mu, logvar = model(data)
loss = loss_function(recon_batch, data.view(-1, 784), mu, logvar)
loss.backward()
train_loss += loss.data
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.data / len(data)))
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_loader.dataset)))
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "48ca730dbef06a668f4dfdb24888f265", "grade": false, "grade_id": "cell-da1b063b7de850b9", "locked": true, "schema_version": 3, "solution": false}
# Let's train. You have to choose the hyperparameters. Make sure your loss is going down in a reasonable amount of epochs (around 10).
# + deletable=false nbgrader={"cell_type": "code", "checksum": "846430258fb80f50b161135448726520", "grade": false, "grade_id": "cell-d4d4408d397f6967", "locked": false, "schema_version": 3, "solution": true}
# Hyperparameters
def get_fc_dims(input_dim, hidden_dim, latent_dim):
fc1_dims = (input_dim,hidden_dim)
fc21_dims = (hidden_dim, latent_dim)
fc22_dims = (hidden_dim, latent_dim)
fc3_dims = (latent_dim, hidden_dim)
fc4_dims = (hidden_dim, input_dim)
return fc1_dims, fc21_dims, fc22_dims, fc3_dims, fc4_dims
fc1_dims, fc21_dims, fc22_dims, fc3_dims, fc4_dims = get_fc_dims(784, 512, 20)
lr = 1e-3
batch_size = 128
epochs = 10
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "b93390f399b743276bc25e67493344f2", "grade": true, "grade_id": "cell-ca352d8389c1809a", "locked": true, "points": 15, "schema_version": 3, "solution": false}
# This cell contains a hidden test, please don't delete it, thx
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "20719070ed85964de9722acc3456a515", "grade": false, "grade_id": "cell-5c77370db7cec9f2", "locked": true, "schema_version": 3, "solution": false}
# Run the box below to train the model using the hyperparameters you entered above.
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "38306be3638e85812bd5b2a052fcc0a4", "grade": false, "grade_id": "cell-5712d42de1068398", "locked": true, "schema_version": 3, "solution": false}
from torchvision import datasets, transforms
from torch import nn, optim
# Load data
train_data = datasets.MNIST('../data', train=True, download=True,
transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size, shuffle=True, **{})
# Init model
VAE_MNIST = VAE(fc1_dims=fc1_dims, fc21_dims=fc21_dims, fc22_dims=fc22_dims, fc3_dims=fc3_dims, fc4_dims=fc4_dims)
# Init optimizer
optimizer = optim.Adam(VAE_MNIST.parameters(), lr=lr)
# Train
for epoch in range(1, epochs + 1):
train(epoch, train_loader, VAE_MNIST, optimizer)
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "e2f8fcc9384e30cb154cf931f223898b", "grade": false, "grade_id": "cell-bd07c058c661b9c6", "locked": true, "schema_version": 3, "solution": false}
# Run the box below to check if the model you trained above is able to correctly reconstruct images.
# + deletable=false editable=false nbgrader={"cell_type": "code", "checksum": "80d198e03b1287741d761a12e38dcf73", "grade": false, "grade_id": "cell-df03d717307a6863", "locked": true, "schema_version": 3, "solution": false}
### Let's check if the reconstructions make sense
# Set model to test mode
VAE_MNIST.eval()
# Reconstructed
train_data_plot = datasets.MNIST('../data', train=True, download=True,
transform=transforms.ToTensor())
train_loader_plot = torch.utils.data.DataLoader(train_data_plot,
batch_size=1, shuffle=False, **{})
for batch_idx, (data, _) in enumerate(train_loader_plot):
x_hat, mu, logvar = VAE_MNIST(data)
plt.imshow(x_hat.view(1,28,28).squeeze().data.numpy(), cmap='gray')
plt.title('%i' % train_data.train_labels[batch_idx])
plt.show()
if batch_idx == 3:
break
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "7f559122b150f5f1228d6b66b62f462c", "grade": false, "grade_id": "cell-76649d51fdf133dc", "locked": true, "schema_version": 3, "solution": false}
# ### 2.8 Visualize latent space (20 points)
# Now, implement the auto-encoder now with a 2-dimensional latent space, and train again over the MNIST data. Make a visualization of the learned manifold by using a linearly spaced coordinate grid as input for the latent space, as seen in https://arxiv.org/abs/1312.6114 Figure 4.
# + deletable=false nbgrader={"cell_type": "code", "checksum": "c879ffdb0d355349d7144a33d16ca93a", "grade": true, "grade_id": "cell-4a0af6d08d055bee", "locked": false, "points": 20, "schema_version": 3, "solution": true}
fc1_dims, fc21_dims, fc22_dims, fc3_dims, fc4_dims = get_fc_dims(784, 512, 2)
# Load data
train_data = datasets.MNIST('../data', train=True, download=True,
transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=batch_size, shuffle=True, **{})
# Init model
VAE_MNIST = VAE(fc1_dims=fc1_dims, fc21_dims=fc21_dims, fc22_dims=fc22_dims, fc3_dims=fc3_dims, fc4_dims=fc4_dims)
# Init optimizer
optimizer = optim.Adam(VAE_MNIST.parameters(), lr=lr)
# Train
for epoch in range(1, epochs + 1):
train(epoch, train_loader, VAE_MNIST, optimizer)
from scipy.stats import norm
from torchvision.utils import make_grid
def generate_manifold(model: VAE, rows: int = 20):
grid = torch.linspace(0, 1, rows)
samples = [torch.erfinv(2 * torch.tensor([x, y]) - 1)
* np.sqrt(2) for x in grid for y in grid]
samples = torch.stack(samples)
manifold = model.decode(samples).view(-1, 1, 28, 28)
image_grid = make_grid(manifold, nrow=rows).detach().numpy().transpose(1, 2, 0)
figure(figsize=(20, 20))
ax = plt.gca()
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.imshow(image_grid)
plt.show()
generate_manifold(VAE_MNIST)
# + [markdown] deletable=false editable=false nbgrader={"cell_type": "markdown", "checksum": "b9eb1684d646eea84a25638d184bfbda", "grade": false, "grade_id": "cell-dc5e1247a1e21009", "locked": true, "schema_version": 3, "solution": false}
# ### 2.8 Amortized inference (10 points)
# What is amortized inference? Where in the code of Part 2 is it used? What is the benefit of using it?
#
# + [markdown] deletable=false nbgrader={"cell_type": "markdown", "checksum": "364ed922da59070f319d0bdfb0e41d92", "grade": true, "grade_id": "cell-6f7808a9b0098dbf", "locked": false, "points": 10, "schema_version": 3, "solution": true}
# Amortized inference is the idea that we are introducing a parameterized function which serves as a mapping from the observation space to the parameters of the approximate posterior distribution. This allows us to share some of the parameters between the datapoints instead of using a different set every time.
#
# It is used in the Variational Auto-encoder where the Decoder part uses the same parameters for any datapoint.
#
# The main benefit in using it is the now constant amount of parameters that we have with respect to the data size. It is also smaller than original and thus reduces the computation cost of the algorithm.
|
lab_3/12402559_12141666_lab3.ipynb
|
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// # Lab Three
// ---
//
// For this lab we're going to be making and using a bunch of functions.
//
// Our Goals are:
// - Switch Case
// - Looping
// - Making our own functions
// - Combining functions
// - Structuring solutions
// +
// Give me an example of you using switch case.
String starter = "Strawberry";
switch(starter){
case "Orange":
System.out.println("An orange is the color orange.");
case "Strawberry":
System.out.println("A strawberry is red.");
case "Banana":
System.out.println("A banana is yellow.");
}
// -
// Give me an example of you using a for loop
for (int x = 6; x < 22; x++) {
System.out.println("The answer is: " + x);
}
// +
// Give me an example of you using a for each loop
String[] Mangan = {"Mike", "Kate", "James", "Julia", "Olivis", "Finn"};
for (String names: Mangan) {
System.out.println(names);
}
// +
// Give me an example of you using a while loop
import java.lang.Math;
int min = 4;
int max = 13;
int range = max - min + 2;
int NewNumber = (int) (Math.random() * range) + min;
int orderNumber = 0;
while (orderNumber != NewNumber) {
System.out.println("This is not the New Number.");
orderNumber++;
}
System.out.println("The new number has been identified: " + NewNumber);
// +
// I want you to write a function that will take in a number and raise it to the power given.
// For example if given the numbers 2 and 3. The math that the function should do is 2^3 and should print out or return 8. Print the output.
double power (int base, int exponent) {
return Math.pow(base,exponent);
}
System.out.println(power(14,3));
// +
// I want you to write a function that will take in a list and see how many times a given number is in the list.
// For example if the array given is [2,3,5,2,3,6,7,8,2] and the number given is 2 the function should print out or return 3. Print the output.
void number_counter(int[] array, int target){
if (array.length == 0) {
System.out.println("The array provided was empty. Please provide a valid array.");
return;
}
int counter = 0;
for (int number: array) {
if (target == number) {
counter++;
}
}
System.out.println("There are " + counter + " " + target + "'s in the given array");
}
// int[] array = {1,2,3,4,1,2,3,4,1,1,1,2,3,4,4,4,4};
int[] array = {1,2,3,4,1,2,3,4,1,1,1,2,3,4,4,4,4};
number_counter(array, 2);
// +
// Give me a function that gives the answer to the pythagorean theorem.
// I'd like you to reuse the exponent function from above as well as the functions below to make your function.
// If you don't remember the pythagorean theorem the formula is (a^2 + b^2 = c^2). Given a and b as parameters i'd like you to return c.
// If this doesn't make sense look up `Pythagorean Theorem Formula` on google.
double addition(double a, double b) {
double answer = a + b;
return answer;
}
double division(int a, int b) {
int answer = a / b;
return answer;
}
void pythagorean_theorem(int a, int b) {
double a_squared = power(a, 2);
double b_squared = power(b, 2);
double c_squared = addition(a_squared, b_squared);
double c = Math.sqrt(c_squared);
System.out.println("c comes out to be " + c + " and c squared was " + c_squared + ".");
}
pythagorean_theorem(3,4);
|
JupyterNotebooks/Labs/Lab 3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + id="ec49290e" outputId="6fcfef25-5268-4a9f-b9a2-93974009cbef"
import transformers
from transformers import BertModel, BertTokenizer,BertForSequenceClassification, BertConfig, AdamW, get_linear_schedule_with_warmup
import torch
import numpy as np
from torch import nn
from torch.utils.data import Dataset
import pandas as pd
from torch.utils.data import DataLoader
from torch import optim
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
from collections import defaultdict
import time
from matplotlib import pyplot as plt
transformers.logging.set_verbosity_error()
torch.cuda.empty_cache()
# + [markdown] id="b6294711"
# ## Dataset
# + id="a2a2de96"
PRE_TRAINED_MODEL_NAME = 'bert-base-cased'
MAX_LEN = 100
BATCH_SIZE = 32
RANDOM_SEED = 42
# + id="f0ad1bca"
tokenizer = BertTokenizer.from_pretrained(PRE_TRAINED_MODEL_NAME)
# + id="aa0fa57b" outputId="bff19f0e-3470-4653-d080-bc79e10e0a38"
train_stances = '../fnc-1/train_stances.csv'
train_bodies = '../fnc-1/train_bodies.csv'
stances_headlines = pd.read_csv(train_stances)[:150]
bodies = pd.read_csv(train_bodies)
stances_bodies = stances_headlines.merge(bodies,on='Body ID')
# + id="429dc8d7" outputId="5d8513d9-540f-4fc0-d966-a60f65cb790e"
len(stances_headlines)
# + id="3139de65"
class StancesDataset(Dataset):
def __init__(self, headlines, bodies, stances, tokenizer, max_len):
self.headlines = headlines
self.bodies = bodies
self.stances = stances
self.tokenizer = tokenizer
self.max_len = max_len
self.categories = {"unrelated": 0, "agree": 1, "discuss": 2, "disagree": 3}
def __len__(self):
return len(self.headlines)
def __getitem__(self, idx):
headline = self.headlines[idx]
body = self.bodies[idx]
stance = self.categories[self.stances[idx]]
stanceVec = [0,0,0,0]
stanceVec[stance] = 1
inputs = self.tokenizer.encode_plus(
headline,
body,
add_special_tokens=True,
max_length=self.max_len,
pad_to_max_length=True,
return_token_type_ids=True
)
ids = inputs['input_ids']
mask = inputs['attention_mask']
token_type_ids = inputs["token_type_ids"]
return {
'input_ids': torch.tensor(ids, dtype=torch.long),
'attention_mask': torch.tensor(mask, dtype=torch.long),
'token_type_ids': torch.tensor(token_type_ids, dtype=torch.long),
"onehotlabels": torch.tensor(stanceVec, dtype=torch.float),
"labels": torch.tensor([stance], dtype=torch.float)
}
# + [markdown] id="565e3e83"
# ## Model
# -
configuration = BertConfig()
model = BertForSequenceClassification(config = configuration)
model = model.from_pretrained(PRE_TRAINED_MODEL_NAME, problem_type="multi_label_classification", num_labels = 4)
# + [markdown] id="27a86309"
# ## Training
# + id="ea61ef95"
def train(batch_size, data, model, loss_fn, num_samples, optimizer, device='cpu'):
model = model.train()
model.to(device)
num_correct_predictions = 0
num_samples = len(data)
training_loss = []
batch_oldtime = time.time()
for i, input_data in enumerate(data):
batch_newtime = time.time()
batch_oldtime = batch_newtime
input_ids = input_data['input_ids'].to(device)
attention_mask = input_data['attention_mask'].to(device)
token_type_ids = input_data['token_type_ids'].to(device)
onehotlabels = input_data['onehotlabels'].to(device).squeeze()
labels = input_data['labels'].to(device).squeeze()
optimizer.zero_grad()
output = model(input_ids, attention_mask, token_type_ids, labels=onehotlabels)
preds = output.logits
num_correct_predictions += torch.sum(torch.argmax(preds, dim=1) == labels)
loss = output.loss
loss.backward()
optimizer.step()
training_loss.append(loss.item())
return num_correct_predictions.item()/num_samples, np.mean(training_loss)
# + id="3b210470"
def validate(model, data, loss_fn, num_samples, device='cpu'):
model = model.eval()
validation_losses = []
correct_predictions = 0
with torch.no_grad():
for i, input_data in enumerate(data):
input_ids = input_data["input_ids"].to(device)
attention_mask = input_data["attention_mask"].to(device)
token_type_ids = input_data['token_type_ids'].to(device)
onehotlabels = input_data['onehotlabels'].to(device).squeeze()
labels = input_data['labels'].to(device).squeeze()
output = model(input_ids, attention_mask, token_type_ids, labels=onehotlabels)
preds=output.logits
loss = output.loss
correct_predictions += torch.sum(torch.argmax(preds, dim=1) == labels)
validation_losses.append(loss.item())
return correct_predictions.item() /num_samples, np.mean(validation_losses)
# + id="3bfd7744"
def create_data_loader(df, tokenizer, max_len, batch_size):
ds = StancesDataset(df['Headline'].to_numpy(),
df['articleBody'].to_numpy(),
df['Stance'].to_numpy(),
tokenizer,
max_len)
return DataLoader(
ds,
batch_size=batch_size
)
df_train, df_test = train_test_split(stances_bodies, test_size=0.1, random_state=RANDOM_SEED)
df_val, df_test = train_test_split(df_test, test_size=0.5, random_state=RANDOM_SEED)
train_dataloader = create_data_loader(df_train, tokenizer, MAX_LEN, BATCH_SIZE)
val_dataloader = create_data_loader(df_val, tokenizer, MAX_LEN, BATCH_SIZE)
test_dataloader = create_data_loader(df_test, tokenizer, MAX_LEN, BATCH_SIZE)
# + id="11984577" outputId="116f9740-27a4-4d4a-f6f3-e38410a97c4c"
epochs = 30
batch_size=BATCH_SIZE
device='cpu'
learning_rate=0.00001
loss_fn = nn.CrossEntropyLoss().to(device)
optimizer = AdamW(model.parameters(), lr=learning_rate, correct_bias=False)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=0,
num_training_steps=total_steps
)
history = defaultdict(list)
best_accuracy = 0
oldtime = time.time()
for i in range(epochs):
newtime = time.time()
delta = newtime - oldtime
oldtime = newtime
print(f'Elapsed time at start of epoch {i} is {delta}s')
accuracy, mean_loss = train(batch_size,train_dataloader, model, loss_fn, len(df_train), optimizer, device)
history['train_acc'].append(accuracy)
history['train_loss'].append(mean_loss)
print(f'Training accuracy at epoch {i} is {accuracy}')
print(f'Mean training loss at epoch {i} is {mean_loss}')
valAccuracy, mean_val_loss = validate(model, val_dataloader, loss_fn, len(df_val), device)
history['val_acc'].append(valAccuracy)
history['val_loss'].append(mean_val_loss)
print(f'Validaton accuracy at epoch {i} is {valAccuracy}')
print(f'Mean validation loss at epoch {i} is {mean_val_loss}')
if valAccuracy > best_accuracy:
torch.save(model.state_dict(), 'best_model_state.bin')
best_accuracy = valAccuracy
# + id="dedaddc9"
plt.plot(history['train_acc'], label='train accuracy')
plt.plot(history['val_acc'], label='validation accuracy')
plt.title('Training history')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend()
plt.ylim([0, 100]);
# + id="07ef2212"
# + id="bdc6dadb"
|
Transformer/bert_stance_detection_for_sequence_classification_final.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
print(iris['feature_names'], iris['data'][:3])
print(iris['target_names'], iris['target'][:3])
# These samples will not use the last 10 samples for training. Results won't make much sense. This is just for illustration
X_train, y_train = iris['data'][:-10], iris['target'][:-10]
# +
clf = SVC()
clf.fit(X_train, y_train)
# -
with open('../ml_service/model/my-model.pkl', 'wb') as f:
pickle.dump(clf, f)
|
notebooks/train-my-model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Predicting home prices with the k-NN algorithm
# Given a record containing features of a house, we want to be able to predict its price. We use the dataset ''House Sales in King County, USA'', downloaded from kaggle. We want to predict a price of a home based on the homes that are closests to it, i.e. have similar properties.
# Download the data file [housing.csv](https://drive.google.com/file/d/1s_muxczF8K4qs5mIohZItlb0b6jA4Rhc/view?usp=sharing) to your local directory.<br>
# Update the variable `file_name` in the cell below to point to your local directory where you store the datasets for this course and then run the cell.
import os
for list_of_files in os.listdir("../data_sets"):
print(list_of_files)
file_name = "../data_sets/housing.csv"
# # Part I. Predicting home price
# ### 1.1. Features are numeric
# We have the following features:
# <ul>
# <li>id - house identifier, numeric.</li>
# <li>price - house price, numeric. <b>This is the target variable that we are trying to predict</b>.</li>
# <li>bedrooms - no. of bedrooms, numeric.</li>
# <li>bathrooms - no. of bathrooms, numeric.</li>
# <li>sqft_living - square footage of the home, numeric.</li>
# <li>sqft_lot - square footage of the lot, numeric.</li>
# <li>floors - no.of floors, numeric.</li>
# <li>waterfront - boolean (expressed as 0 or 1).</li>
# <li>condition - the amount of wear-and-tear, numeric (from 0 to 5).</li>
# <li>sqft_above - square footage of house apart from basement, numeric.</li>
# <li>sqft_basement - square footage of the basement, numeric.</li>
# <li>age - number of years since year built to year sold, numeric.</li>
# </ul>
# <br>
# Read the file into pandas dataframe:
# +
import pandas as pd
import numpy as np
# this creates a pandas.DataFrame
data = pd.read_csv(file_name, index_col='id')
data.columns
# +
# Is there any correlation between features?
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
fig, ax = plt.subplots(figsize=(8,6))
corr = data.corr()
sns.heatmap(corr, center=0, annot=True, linewidths=.1, ax=ax, fmt='.2f')
# -
# ### 1.2. The target is numeric
# We want to build the model that *predicts* house prices: we look into `sklearn.neighbors` [library](https://scikit-learn.org/stable/modules/neighbors.html). There are several modules in this library. Which one do you think we need to use in order to predict prices?
# split data into features and target variable
X = data.drop(columns=['price' ])
Y = data['price'].values
# In order to split the dataset into train and test parts we can use `train_test_split` method from `sklearn.model_selection` module. We set the test set size to be 20% of the entire dataset. This way of splitting the data into train and test sets is called **holdout estimation**: we are holding out part of the data to see how the predictor performs on data that it has never seen.
# +
from sklearn.model_selection import train_test_split
#split dataset into train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2) #Taking 20% to test and 80%to model
# -
# We now buid our model. What do you think happens when we call `knn.fit`? What operations are performed by the algorithm?
#
# Explore all different algorithms [here](https://scikit-learn.org/stable/modules/neighbors.html#nearest-neighbor-algorithms).
#
# # Task 1. Question
#
# Does the the use of a different algorithm make the model itself different? What is the difference between the algorithms?
#
# What does the `weight` parameter specify?
#
# Answer:
# According to the documentation of the Scikit Learn, "weight" is the value assigned to a query point where is computed from a simple majority vote of the nearest neighbors.
# +
#Nearest Neighbors Regression
#Split the dataset into train and test
# -
from sklearn.neighbors import KNeighborsRegressor
knn = KNeighborsRegressor(n_neighbors=3)
knn.fit(X_train, Y_train)
# train set score
knn.score(X_train, Y_train) #Determining the score with the training data (Always very good)
# test set score
knn.score(X_test, Y_test)
# I tried all the imaginable improvements and was unable to get the test accuracy above $0.60$.
# +
#Not a good score, the higher the better.
#Predicting the class is much easier that then predicting the score...
# -
# # Part II. Predicting Home Category
# It does not seem that we can reliably predict the numeric home price using the data and the `KNeighborsRegressor`.
#
# Instead we are going to use `KNeighborsClassifier` to predict a class label of each home.
# # Task 2. Converting target variable into a class label
# Convert the numeric price attribute into the binary class as follows: "price above mean" (class 1) and "price below mean" (class 0).
#In order to get some reasonable classyfication.
# +
# converting target attribute to a class label
# <Your code here>
data['class'] = 1*(data['price'] > data['price'].mean())
#Find out the mean....
data['price'].mean()
# -
# To make sure that both classes are represented properly, find out how many total houses are above mean and how many are below mean.
# How many houses belong to class 0 and to class 1?
data['class'].value_counts()
data.shape
# Here are my results for comparison:
#
# mean price: 540182.1587933188
#
# Total: 21613
#
# Below: 13694
# # Task 3. Baseline experiment
#
# Repeat the same steps as in 1.2. using transformed dataset and `KNeighborsClassifier`.
# +
# we repeat the same steps but with a categorical class label
# split data into features and target variable
X = data.drop(columns=['price', 'class'])
Y = data['class'].values
# split dataset into train and test sets
from sklearn.model_selection import train_test_split
#split dataset into train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2) #Taking 20% to test and 80%to model
# -
# we use a clasifier instead of regressor
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, Y_train)
print("Baseline train score:", knn.score(X_train, Y_train))
print("Baseline test score:", knn.score(X_test, Y_test))
# Here are some results for comparison:
#
# Baseline train score: 0.8657027183342972
#
# Baseline test score: 0.7594263243118204
# # Task 4. Normalizing numeric attributes
# The k-Nearest Neighbor classifier uses distance to find $K$ nearest neighbors. Distance metric is very sensitive to the scale of numeric attributes. For example, the *sqft_living* is represented as three- to four-digit numbers, and the numer of *bedrooms* is in single digits. Thus the distance across *sqft_living* would dominate the distance across the *bedrooms* dimension. To avoid this scale-related bias, we need to project all numeric values into interval from 0 to 1.
#
# Perform data normalization in the cell below.
# +
# normalizing numeric fields
# You can write a simple loop over all columns in data
# You can use pandas or numpy
# Use np.min and np.max
# Alternatively you can use sklearn.preprocessing.MinMaxScaler()
# <Your code here>
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
#X_train_scaled = sc.fit_transform(X_train)
#X_test_scaled = sc.transform(X_test)
X_scaled = X.copy()
X_scaled[:] = sc.fit_transform(X)
#Subtract min and divide by maxscale
# -
X_scaled
# Repeat the same experiment as in Task 3, but with normalized data. Did your model improve?
# +
# we repeat the same steps but with normalized data
# split data into features and target variable
X_train_scaled, X_test_scaled, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.2)
#split dataset into train and test data
# Build a classifier
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train_scaled, Y_train)
print("Normalized train score:", knn.score(X_train_scaled, Y_train))
print("Normalized test score:", knn.score(X_test_scaled, Y_test))
# -
# Here are my results:
#
# Normalized train score: 0.8728744939271255
#
# Normalized test score: 0.7777006708304418
# # Task 5. Adding expert knowledge
# Now that we removed the scale bias of each feature by projecting all of them into the same interval $[0,1]$, we want to introduce some bias based on the expert knowledge.
#
# Look at the correlation map. Which attributes are highly correlated with price? How can we make them contribute more to the overall distance between the houses?
fig, ax = plt.subplots(figsize=(8,6))
corr = data.corr()
sns.heatmap(corr, center=0, annot=True, linewidths=.1, ax=ax, fmt='.2f')
# +
# increase influence of distances across important attributes
#Let's say the square ft living is much more important of the age. How to weight the distance?
#If we want have sqft to influcen and define your neighbor, Should we multply by a big or small number? between 0 or 1.
# -
# Repeat the same experiment but with added bias. Did you get better results?
# +
# we repeat the same steps but with added bias
# split data into features and target variable
X_scaled_bias = X_scaled.copy()
X_scaled_bias[['sqft_living', 'sqft_above']] = X_scaled_bias[['sqft_living', 'sqft_above']]*.01
#split dataset into train and test data
X_train, X_test, Y_train, Y_test = train_test_split(X_scaled, Y, test_size=0.2)
# Build the model
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, Y_train)
print("Expert train score:", knn.score(X_train, Y_train))
print("Expert test score:", knn.score(X_test, Y_test))
# -
# I got more than 78% accuracy for the testing score.
# # Task 6. Neighbor voting
# Look closely at the `weights` parameter of the `KNeighborsClassifier`. How can we make the nearer neighbors contribute more to the decision about the class?
#
# Run the same experiment as in Task 5 but with weighted distance from the neighbors. Did you get better results?
# +
#Here we combine...
knn = KNeighborsClassifier(n_neighbors=3, weights='distance')
knn.fit(X_train, Y_train)
print("Weighted distance train score:", knn.score(X_train, Y_train))
print("Weighted distance score:", knn.score(X_test, Y_test))
# -
# Answer: By including weight as distance, we still do not get much improvement if compared to task 5.
# # Task 7. More reliable score with cross-validation
#
# To produce a more reliable estimation of the model performance we are going to use **cross-validation** instead of holdout estimation.
#
# Cross-validation is when the dataset is randomly split up into $m$ groups (called $folds$). One of the groups is used as the test set and the rest are used as the training set. The model is trained on the training set and scored on the test set. Then the process is repeated until each unique group has been used as the test set. For example, for 5-fold cross validation, the dataset would be split into 5 groups, and the model would be trained and tested 5 separate times so each group would get a chance to be the test set.
#
# Cross-validation is more reliable than the holdout method because the holdout method score is dependent on how the data is split into train and test sets. Cross-validation gives the model an opportunity to test on multiple splits so we can get a better idea on how the model will perform on unseen data.
#
# Repeat the experiment from Task 6, but using the entire sets X and Y, and 10-fold cross-validation. You can use `sklearn.model_selection.cross_val_score` to compute the scores for each fold: [link](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html).
#
# You should print all the scores and report the mean. Notice how the score for each fold is slightly different. The cross-validation helps to make model validation more reliable.
# +
#Cross validation...
from sklearn.model_selection import cross_val_score
#create a new KNN model
knn_cv = KNeighborsClassifier(n_neighbors=3, weights='distance')
# train model with cv of 10
cv_scores = cross_val_score(knn_cv, X_scaled, Y, cv=10) #Ten equal subsets.. to train each time in 9 subsets to predict 1 10th..
#print each cv score (accuracy) and average them
print(cv_scores)
print('cv_scores mean:{}'.format(np.mean(cv_scores)))
# -
# # Task 8. Finding the best value of $K$
# As explained in the lecture, the best value of $K$ can be found using cross-validation. If $K$ is small, then it might be not enough information from the neighbors to correctly predict the target variable. If the number of neighbors is too big, then the prediction will incorporate noise (overfitting).
#
# We are going to run our classification with different values of $K$, to determine the best value that produces the highest score for the test data.
#
# Conduct a series of experiments varying $K$ from 1 to 35 (or more if the score keeprs improving), and for each experiment perform 20-fold cross-validation. For this we can use `sklearn.model_selection.GridSearchCV`: [link](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html).
#
# In a nutshell, you create a new knn classifier, without specifying the value of $K$. You also create a parameter `grid_dictionary`, where the key is `n_neighbors` and the value is `np.range(1,35)`.
# Then you call the `GridSearchCV` passing as parameters the knn classifier, parameter grid, and `cv`-the number of folds for cross-validation.
#
# Finally, you fit the data, and wait until all the experiments are finished.
# +
from sklearn.model_selection import GridSearchCV
#no need to implement myself...
#use n_neigbors... break into 20 folds.
#create new a knn model
new_knn = KNeighborsClassifier(weights='distance')
#create a dictionary of all values we want to test for n_neighbors
grid_dictionary = {'n_neighbors': np.arange(1,35)} #Suggested range... continue until it drops.. and choose the best neighbors
#use gridsearch to test all values for n_neighbors
grid = GridSearchCV(new_knn, grid_dictionary, cv=20)
#fit model to data
grid.fit( X, Y)
# -
# What is the best value of $K$? We can find out by looking into `best_params_` field of the `GridSearchCV` object.
# check top-performing n_neighbors value
grid.best_params_, grid.best_score_
# # Task 9. Final experiment
# Repeat the final cross-validation experiment with the best value of $K$ determined in Task 8, and compute the mean of the cross-validation score for our final model (cv=20).
# +
#create a new KNN model
knn_cv = KNeighborsClassifier(n_neighbors=17, weights='distance')
# train model with cv of 20
cv_scores = cross_val_score(knn_cv, X, Y, cv=34) #Ten equal subsets.. to train each time in 9 subsets to predict 1 10th..
#print each cv score (accuracy) and average them
print(cv_scores)
print('cv_scores mean:{}'.format(np.mean(cv_scores)))
# -
# If you performed all the improvements correctly, the mean cross-validation score should be at least $0.80$. This is already a somewhat useful model. Given a new house, you can feed its features into the model, and find out if it is a cheap or an expensive house - i.e. if it is priced above the mean price in the current area or below it.
#
# Of course you are welcome to improve the model even further for additional bonus points.
# # Task 10. Predicting class of two new houses
# We have two new houses for sale. The data about them is in file two_houses.csv, included in this repository.
#
# We can build the model using the entire dataset (X, Y).
#
# We want to predict the class label of these two houses.
#
# Do not forget to perform the same transformations on the new data as you performed on the original dataset.
knn_full = KNeighborsClassifier(n_neighbors=31, weights='distance')
knn_full.fit(X, Y)
# +
test_file_name = "../data_sets/two_houses.csv"
test = pd.read_csv(test_file_name, index_col='id')
test.head()
# -
#Test-data transformation
x_test = test.drop(columns='price')
# x_test_scaled = sc.transform(x_test)
y_test = 1*(test['price'] > data['price'].mean())
y_predicted = knn_full.predict(x_test)
print("Predicted price:",y_predicted)
print("Actual price:",y_test)
# This is the end of the KNN lab.
#
# Copyright © 2022 <NAME>. All rights reserved.
#
|
knn_home_price.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gesture Recognition
# In this group project, you are going to build a 3D Conv model that will be able to predict the 5 gestures correctly. Please import the following libraries to get started.
import numpy as np
import os
from scipy.misc import imread, imresize
import datetime
from skimage.transform import resize
import matplotlib.pyplot as plt
import keras
# We set the random seed so that the results don't vary drastically.
np.random.seed(30)
import random as rn
rn.seed(30)
from keras import backend as K
import tensorflow as tf
tf.set_random_seed(30)
# In this block, you read the folder names for training and validation. You also set the `batch_size` here. Note that you set the batch size in such a way that you are able to use the GPU in full capacity. You keep increasing the batch size until the machine throws an error.
train_doc = np.random.permutation(open('./Project_data/train.csv').readlines())
val_doc = np.random.permutation(open('./Project_data/val.csv').readlines())
# batch_size = 20 #experiment with the batch size
# ## Generator
# This is one of the most important part of the code. The overall structure of the generator has been given. In the generator, you are going to preprocess the images as you have images of 2 different dimensions as well as create a batch of video frames. You have to experiment with `img_idx`, `y`,`z` and normalization such that you get high accuracy.
def generator(source_path, folder_list, batch_size, x, y, z):
print( 'Source path = ', source_path, '; batch size =', batch_size)
img_idx = [x for x in range(0, 30,2)] #create a list of image numbers you want to use for a particular video
while True:
t = np.random.permutation(folder_list)
num_batches = int(len(t)/batch_size) # calculate the number of batches
for batch in range(num_batches): # we iterate over the number of batches
batch_data = np.zeros((batch_size,x,y,z,3)) # x is the number of images you use for each video, (y,z) is the final size of the input images and 3 is the number of channels RGB
batch_labels = np.zeros((batch_size,5)) # batch_labels is the one hot representation of the output
for folder in range(batch_size): # iterate over the batch_size
imgs = os.listdir(source_path+'/'+ t[folder + (batch*batch_size)].split(';')[0]) # read all the images in the folder
for idx,item in enumerate(img_idx): # Iterate iver the frames/images of a folder to read them in
image = imread(source_path+'/'+ t[folder + (batch*batch_size)].strip().split(';')[0]+'/'+imgs[item]).astype(np.float32)
#crop the images and resize them. Note that the images are of 2 different shape
#and the conv3D will throw error if the inputs in a batch have different shapes
image=resize(image,(y,z,3)).astype(np.float32)
batch_data[folder,idx,:,:,0] = (image[:,:,0]-image[:,:,0].min())/(image[:,:,0].max()-image[:,:,0].min())#normalise and feed in the image
batch_data[folder,idx,:,:,1] = (image[:,:,1]-image[:,:,1].min())/(image[:,:,1].max()-image[:,:,1].min())#normalise and feed in the image
batch_data[folder,idx,:,:,2] = (image[:,:,2]-image[:,:,2].min())/(image[:,:,2].max()-image[:,:,2].min())#normalise and feed in the image
batch_labels[folder, int(t[folder + (batch*batch_size)].strip().split(';')[2])] = 1
yield batch_data, batch_labels #you yield the batch_data and the batch_labels, remember what does yield do
# write the code for the remaining data points which are left after full batches
if (len(t)%batch_size) != 0:
batch_data = np.zeros((len(t)%batch_size,x,y,z,3))
batch_labels = np.zeros((len(t)%batch_size,5))
for folder in range(len(t)%batch_size):
imgs = os.listdir(source_path+'/'+ t[folder + (num_batches*batch_size)].split(';')[0])
for idx,item in enumerate(img_idx):
image = plt.imread(source_path+'/'+ t[folder + (num_batches*batch_size)].strip().split(';')[0]+'/'+imgs[item]).astype(np.float32)
image=resize(image,(y,z,3)).astype(np.float32)
batch_data[folder,idx,:,:,0] = (image[:,:,0]-image[:,:,0].min())/(image[:,:,0].max()-image[:,:,0].min())
batch_data[folder,idx,:,:,1] = (image[:,:,1]-image[:,:,1].min())/(image[:,:,1].max()-image[:,:,1].min())
batch_data[folder,idx,:,:,2] = (image[:,:,2]-image[:,:,2].min())/(image[:,:,2].max()-image[:,:,2].min())
batch_labels[folder, int(t[folder + (num_batches*batch_size)].strip().split(';')[2])] = 1
yield batch_data, batch_labels
# Note here that a video is represented above in the generator as (number of images, height, width, number of channels). Take this into consideration while creating the model architecture.
curr_dt_time = datetime.datetime.now()
train_path = './Project_data/train'
val_path = './Project_data/val'
num_train_sequences = len(train_doc)
print('# training sequences =', num_train_sequences)
num_val_sequences = len(val_doc)
print('# validation sequences =', num_val_sequences)
def train_model(model, num_epochs, num_image_per_video, image_width, image_height, batch_size):
# Let us create the `train_generator` and the `val_generator` which will be used in `.fit_generator`.
train_generator = generator(train_path, train_doc, batch_size, num_image_per_video, image_width, image_height)
val_generator = generator(val_path, val_doc, batch_size, num_image_per_video, image_width, image_height)
model_name = 'model_init' + '_' + str(curr_dt_time).replace(' ','').replace(':','_') + '/'
if not os.path.exists(model_name):
os.mkdir(model_name)
filepath = model_name + 'model-{epoch:05d}-{loss:.5f}-{categorical_accuracy:.5f}-{val_loss:.5f}-{val_categorical_accuracy:.5f}.h5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=False, save_weights_only=False, mode='auto', period=1)
LR = ReduceLROnPlateau(monitor='val_loss',factor=0.01,patience=3,min_lr=0.0001,cooldown=1)# write the REducelronplateau code here
callbacks_list = [checkpoint, LR]
# The `steps_per_epoch` and `validation_steps` are used by `fit_generator` to decide the number of next() calls it need to make.
if (num_train_sequences%batch_size) == 0:
steps_per_epoch = int(num_train_sequences/batch_size)
else:
steps_per_epoch = (num_train_sequences//batch_size) + 1
if (num_val_sequences%batch_size) == 0:
validation_steps = int(num_val_sequences/batch_size)
else:
validation_steps = (num_val_sequences//batch_size) + 1
# Let us now fit the model. This will start training the model and with the help of the checkpoints, you'll be able to save the model at the end of each epoch.
model.fit_generator(train_generator, steps_per_epoch=steps_per_epoch, epochs=num_epochs, verbose=1,
callbacks=callbacks_list, validation_data=val_generator,
validation_steps=validation_steps, class_weight=None, workers=1, initial_epoch=0)
# ## Model
# Here you make the model using different functionalities that Keras provides. Remember to use `Conv3D` and `MaxPooling3D` and not `Conv2D` and `Maxpooling2D` for a 3D convolution model. You would want to use `TimeDistributed` while building a Conv2D + RNN model. Also remember that the last layer is the softmax. Design the network in such a way that the model is able to give good accuracy on the least number of parameters so that it can fit in the memory of the webcam.
# +
from keras.models import Sequential, Model
from keras.layers import Dense, GRU, Flatten, TimeDistributed, Flatten, BatchNormalization, Activation, Dropout, CuDNNLSTM
from keras.layers.convolutional import Conv3D, Conv2D, MaxPooling3D, MaxPooling2D
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from keras import optimizers
#write your model here
# -
# ### Model 1 : Conv2D + RNN
class Conv2DModel:
def define_model(self, num_image_per_video, image_width, image_height):
model = Sequential()
model.add(TimeDistributed(Conv2D(8, (3, 3), strides=(2, 2),activation='relu', padding='same'),
input_shape=(num_image_per_video, image_width, image_height, 3)))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(Conv2D(16, (3,3), activation='relu')))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(Dropout(0.3))
model.add(TimeDistributed(Conv2D(64, (2,2),padding='same', activation='relu')))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))
model.add(Dropout(0.3))
model.add(TimeDistributed(Flatten()))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(GRU(128, return_sequences=False))
model.add(Dense(5, activation='softmax'))
optimiser = optimizers.Adam()
model.compile(optimizer=optimiser, loss='categorical_crossentropy', metrics=['categorical_accuracy'])
return model
# ### Experiment Run 1: Image Res (84 X 84), Epochs = 25, Batch size = 20, Number of images per video = 20
# Now that you have written the model, the next step is to `compile` the model. When you print the `summary` of the model, you'll see the total number of parameters you have to train.
model2D = Conv2DModel()
model2D = model2D.define_model(20,84,84)
print(model2D.summary())
train_model(model2D, 25, 20, 84, 84, 20)
# ### Experiment Run 2: Image Res (100 X 100), Epochs = 25, Batch size = 20, Number of Images per video = 18
# Now that you have written the model, the next step is to `compile` the model. When you print the `summary` of the model, you'll see the total number of parameters you have to train.
model2D = Conv2DModel()
model2D = model2D.define_model(18,100,100)
print(model2D.summary())
train_model(model2D, 25, 18, 100, 100, 20)
# ### Experiment Run 3: Image Res (120 X 120), Epochs = 30, Batch size = 25, Number of Images per video = 30
# Now that you have written the model, the next step is to `compile` the model. When you print the `summary` of the model, you'll see the total number of parameters you have to train.
model2D = Conv2DModel()
model2D = model2D.define_model(30,120,120)
print(model2D.summary())
train_model(model2D, 25, 30, 120, 120, 25)
# ### Model 2 : Convolutional 3D Model without dropout
class Conv3DModel:
def define_model(self, num_image_per_video, image_width, image_height):
model=Sequential()
model.add(Conv3D(64, (3,3,3), strides=(1,1,1), padding='same',
input_shape=(num_image_per_video, image_width, image_height, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,1), strides=(2,2,1)))
model.add(Conv3D(128, (3,3,3), strides=(1,1,1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2)))
model.add(Conv3D(256, (3,3,3), strides=(1,1,1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2)))
model.add(Conv3D(256, (3,3,3), strides=(1,1,1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(5, activation='softmax'))
optimiser = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.7, nesterov=True) #write your optimizer
model.compile(optimizer=optimiser, loss='categorical_crossentropy', metrics=['categorical_accuracy'])
return model
# ### Experiment Run 4: Image Res (84 X 84), Epochs = 25, Batch size = 20, Number of Images per video = 20
# Now that you have written the model, the next step is to `compile` the model. When you print the `summary` of the model, you'll see the total number of parameters you have to train.
model3D = Conv3DModel()
model3D = model3D.define_model(20,84,84)
print(model3D.summary())
train_model(model3D, 25, 20, 84, 84, 20)
# ### Experiment Run 5: Image Res (100 X 100), Epochs = 25, Batch size = 20, Number of Images per video = 20
# Now that you have written the model, the next step is to `compile` the model. When you print the `summary` of the model, you'll see the total number of parameters you have to train.
model3D = Conv3DModel()
model3D = model3D.define_model(20,100,100)
print(model3D.summary())
train_model(model3D, 25, 20, 100, 100, 20)
# ### Model 3 : Convolutional 3D Model with dropout
# ### Experiment Run 6: Image Res (84 X 84), Epochs = 30, Batch size = 20, Number of images per video = 20
# Now that you have written the model, the next step is to `compile` the model. When you print the `summary` of the model, you'll see the total number of parameters you have to train.
class Conv3DModel_2:
def define_model(self, num_image_per_video, image_width, image_height):
model=Sequential()
model.add(Conv3D(64, (3,3,3), strides=(1,1,1), padding='same',
input_shape=(num_image_per_video, image_width, image_height, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,1), strides=(2,2,1)))
model.add(Conv3D(128, (3,3,3), strides=(1,1,1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2)))
model.add(Dropout(0.25))
model.add(Conv3D(256, (3,3,3), strides=(1,1,1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(5, activation='softmax'))
optimiser = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.7, nesterov=True) #write your optimizer
model.compile(optimizer=optimiser, loss='categorical_crossentropy', metrics=['categorical_accuracy'])
return model
model3D1 = Conv3DModel_2()
model3D1 = model3D1.define_model(20,84,84)
print(model3D1.summary())
train_model(model3D1, 30, 20, 84, 84, 20)
# ### Model 4: Convolutional 3D Mode with increased number of layers
class Conv3DModel_1:
def define_model(self, num_image_per_video, image_width, image_height):
model=Sequential()
model.add(Conv3D(64, (3,3,3), strides=(1,1,1), padding='same',
input_shape=(num_image_per_video, image_width, image_height, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,1), strides=(2,2,1)))
model.add(Conv3D(128, (3,3,3), strides=(1,1,1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2)))
model.add(Dropout(0.25))
model.add(Conv3D(256, (3,3,3), strides=(1,1,1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2)))
model.add(Dropout(0.25))
model.add(Conv3D(256, (3,3,3), strides=(1,1,1), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2), strides=(2,2,2)))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(5, activation='softmax'))
optimiser = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.7, nesterov=True) #write your optimizer
model.compile(optimizer=optimiser, loss='categorical_crossentropy', metrics=['categorical_accuracy'])
return model
# ### Experiment Run 7: Image Res (84 X 84), Epochs = 30, Batch size = 20, Number of images per video = 20, Increase number of layers
model3D = Conv3DModel_1()
model3D = model3D.define_model(20,84,84)
print(model3D.summary())
train_model(model3D, 25, 20, 84, 84, 20)
|
src/model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (ebak)
# language: ''
# name: ebak
# ---
# +
import warnings
# Third-party
from astropy.io import fits, ascii
import astropy.table as tbl
import astropy.time as atime
import astropy.coordinates as coord
import astropy.units as u
import emcee
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
plt.style.use('apw-notebook')
# %matplotlib inline
import corner
from scipy.optimize import minimize
from ebak.singleline import RVData, OrbitModel
from ebak.units import usys
from ebak import SimulatedRVOrbit
# -
troup = tbl.Table(np.genfromtxt("../data/troup16-dr12.csv", delimiter=",", names=True, dtype=None))
# troup_i = np.where((troup['NVISITS'] > 30) & (troup['SLOPE'] == 0) & (troup['ECC'] < 0.2))[0][0]
# troup_i = np.where((troup['NVISITS'] > 10) & (troup['SLOPE'] == 0) & (troup['ECC'] < 0.4))[0][0]
troup_i = 211
troup_i
print(troup.colnames)
_tbl = fits.getdata("../data/allVisit-l30e.2.fits", 1)
one_target = tbl.Table(_tbl[_tbl['APOGEE_ID'] == troup['APOGEE_ID'].astype(str)[troup_i]])
print(one_target['APOGEE_ID'][0])
# +
# one_target = tbl.Table(_tbl[_tbl['APOGEE_ID'].astype(str) == "2M03080601+7950502"])
# troup_i, = np.where(troup['APOGEE_ID'].astype(str) == "2M03080601+7950502")[0]
# one_target = tbl.Table(_tbl[_tbl['APOGEE_ID'].astype(str) == "2M00283971+8530377"])
# troup_i, = np.where(troup['APOGEE_ID'].astype(str) == "2M00283971+8530377")[0]
# troup_i
# +
ecc = troup[troup_i]['ECC']
m_f = troup[troup_i]['MASSFN']*u.Msun
K = troup[troup_i]['SEMIAMP']*u.m/u.s
period = troup[troup_i]['PERIOD']*u.day
asini = (K * period/(2*np.pi) * np.sqrt(1 - ecc**2)).to(u.au)
omega = troup[troup_i]['OMEGA']*u.degree
v0 = troup[troup_i]['V0']*u.m/u.s
v_slope = troup[troup_i]['SLOPE']*u.m/u.s/u.day
t_peri = atime.Time(troup[troup_i]['TPERI'], format='jd', scale='tcb')
phi0 = ((2*np.pi*(t_peri.tcb.mjd - 55555.) / period.to(u.day).value) % (2*np.pi)) * u.radian
# +
rv = np.array(one_target['VHELIO']) * u.km/u.s
ivar = 1 / (np.array(one_target['VRELERR'])*u.km/u.s)**2
t = atime.Time(np.array(one_target['JD']), format='jd', scale='tcb')
data = RVData(t, rv, ivar)
# +
troup_orbit = SimulatedRVOrbit(P=period, a_sin_i=asini, ecc=ecc,
omega=omega, phi0=phi0, v0=0*u.km/u.s)
# def min_func(p, data, _orbit):
# a_sin_i, omega, phi0, v0 = p
# _orbit._a_sin_i = a_sin_i
# _orbit._omega = omega
# _orbit._phi0 = phi0
# _orbit._v0 = v0
# return np.sum(data._ivar * (_orbit._generate_rv_curve(data._t) - data._rv)**2)
def min_func(p, data, _orbit):
omega, phi0, v0 = p
# _orbit._a_sin_i = a_sin_i
_orbit._omega = omega
_orbit._phi0 = phi0
_orbit._v0 = v0
return np.sum(data._ivar * (_orbit._generate_rv_curve(data._t) - data._rv)**2)
# -
# x0 = [asini.decompose(usys).value, omega.decompose(usys).value,
# phi0.decompose(usys).value, -v0.decompose(usys).value]
x0 = [omega.decompose(usys).value, phi0.decompose(usys).value, -v0.decompose(usys).value]
res = minimize(min_func, x0=x0, method='powell',
args=(data,troup_orbit.copy()))
# bounds=[(1e-8, None),(None,None),(None,None),(None,None)])
res.success, res.x
orbit = troup_orbit.copy()
# orbit._a_sin_i, orbit._omega, orbit._phi0, orbit._v0 = res.x
orbit._omega, orbit._phi0, orbit._v0 = res.x
data.plot()
orbit.plot(ax=plt.gca())
model = OrbitModel(data=data, orbit=orbit)
model.ln_prior()
np.log(model.orbit._P)
# +
n_steps = 1024
n_walkers = 256
p0 = emcee.utils.sample_ball(model.get_par_vec(),
1E-3*model.get_par_vec(),
size=n_walkers)
# special treatment for ln_P
p0[:,0] = np.random.normal(np.log(model.orbit._P), 0.1, size=p0.shape[0])
# special treatment for s
p0[:,6] = np.abs(np.random.normal(0, 1E-3, size=p0.shape[0]) * u.km/u.s).decompose(usys).value
sampler = emcee.EnsembleSampler(n_walkers, dim=p0.shape[1], lnpostfn=model)
# -
pos,_,_ = sampler.run_mcmc(p0, N=n_steps)
for i in range(p0.shape[1]):
plt.figure()
plt.ylabel(model.vec_labels[i])
plt.plot(sampler.chain[...,i].T, drawstyle='steps', alpha=0.1, marker=None)
# plt.ylim(lims[i])
def plot_rv_curve_samples(sampler, ax=None):
if ax is None:
fig,ax = plt.subplots(1,1)
data.plot(ax=ax, zorder=100)
for p in sampler.chain[:,-1]:
orbit = model.from_vec(p).orbit
orbit.plot(ax=ax, alpha=0.1)
_diff = data.rv.max()-data.rv.min()
ax.set_ylim((data.rv.min()-0.25*_diff).to(u.km/u.s).value,
(data.rv.max()+0.25*_diff).to(u.km/u.s).value)
ax.set_xlabel('MJD')
ax.set_ylabel('RV [km/s]')
return ax.figure
_ = plot_rv_curve_samples(sampler)
flatchain = np.vstack(sampler.chain[:,-256:])
plot_pars = model.vec_to_plot_pars(flatchain)
troup_vals = [np.log(period.to(u.day).value), m_f.value, ecc, omega.to(u.degree).value,
t_peri.mjd, -v0.to(u.km/u.s).value, 0.]
fig = corner.corner(plot_pars, labels=model.plot_labels, truths=troup_vals)
|
notebooks/Fit an APOGEE RV curve.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simple Boston Demo
#
# The PartitionExplainer is still in an Alpha state, but this notebook demonstrates how to use it right now. Note that I am releasing this to get feedback and show how I am working to address concerns about the speed of our model agnostic approaches and the impact of feature correlations. This is all as-yet unpublished work, so treat it accordingly.
#
# When given a balanced partition tree PartitionExplainer has $O(M^2)$ runtime, where $M$ is the number of input features. This is much better than the $O(2^M)$ runtime of KernelExplainer.
import numpy as np
import scipy as sp
import scipy.cluster
import matplotlib.pyplot as pl
import xgboost
import shap
import pandas as pd
# ## Train the model
# +
X,y = shap.datasets.boston()
model = xgboost.XGBRegressor(n_estimators=100, subsample=0.3)
model.fit(X, y)
x = X.values[0:1,:]
refs = X.values[1:100] # use 100 samples for our background references (using the whole dataset would be slower)
# -
# ## Compute a hierarchal clustering of the input features
D = sp.spatial.distance.pdist(X.fillna(X.mean()).T, metric="correlation")
cluster_matrix = sp.cluster.hierarchy.complete(D)
# plot the clustering
pl.figure(figsize=(15, 6))
pl.title('Hierarchical Clustering Dendrogram')
pl.xlabel('sample index')
pl.ylabel('distance')
sp.cluster.hierarchy.dendrogram(
cluster_matrix,
leaf_rotation=90., # rotates the x axis labels
leaf_font_size=10., # font size for the x axis labels
labels=X.columns
)
pl.show()
# ## Explain the first sample with PartitionExplainer
# +
# define the model as a python function
f = lambda x: model.predict(x, output_margin=True, validate_features=False)
# explain the model
e = shap.PartitionExplainer(f, refs, cluster_matrix)
shap_values = e.shap_values(x, tol=-1)
# ...or use something like e.shap_values(x, tol=0.001) to prune the partition tree and so run faster
# -
# ## Compare with TreeExplainer
explainer = shap.TreeExplainer(model, refs, feature_dependence="independent")
shap_values2 = explainer.shap_values(x)
pl.plot(shap_values2[0], label="TreeExplainer")
pl.plot(shap_values[0], label="PartitionExplainer")
pl.legend()
|
notebooks/partition_explainer/Simple Boston Demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercícios da biblioteca Pandas 10 iniciais
# ## Utilização da biblioteca pandas
import pandas as pd
# ## Definição de objeto - Data Frame
# Dados importados do Kagle para critérios didáticos
HR_df = pd.read_csv('input_Files\WA_Fn-UseC_-HR-Employee-Attrition.csv')
# ## Dimensão do Data Frame
# Verificação do número de linhasxcolunas da base de dados
HR_df.shape
# ## Verificação do tipo das informações contidas nos dados
HR_df.dtypes
# ## Visualização dos dados
# Visualização das 5 primeiras e 5 últimas colunas
display(HR_df)
# ## visualizaão de tipo de dados e qualidade dos dados preliminar
# A ausência de null date, dispensa-se preliminarmente a fase de tratamento dos dados
HR_df.info()
# ## Análise descritiva preliminar
HR_df.describe()
# ## Faixa demonstrativa de valores para cada coluna
for coluna in HR_df: #dale
print(HR_df[coluna].value_counts())
print('---------x----------- \n')
# ## Agrupamento de dados por valor específico de coluna
# Verificação dos dados daqueles funcionários mais satisfeitos com o trabalho
HR_df.groupby('JobSatisfaction').get_group(4)
# ## Descrição dos dados agrupados
# Análise sob todas as colunas, para insights sobre o que ganrante maior satisfação com o trabalho com apoio da análise descritiva preliminar.
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
display(HR_df.groupby('JobSatisfaction').get_group(4).describe())
# # Atividades Pandas
# ## Colunas indices no data frame
# Coluna de dados: cochetes duplos
HR_df[['JobSatisfaction']]
# Serie numérica cochetes simples
HR_df['JobSatisfaction']
# ## Metodo .loc, linhas indices do data frame
# recebe uma lista de inteiros que retorna as linhas desejadas
HR_df.loc[[10,3,1469]]
HR_df.loc[HR_df['JobSatisfaction']>2]
# ## Ciação de novos data frames
# Junção das funcionalidades anteriores para um novo data frame
df= HR_df[['DistanceFromHome','Age']].loc[HR_df['JobSatisfaction']!=4]
display(df)
type(df)
# ## Renomear colunas
df.rename(columns={'Age':'Idade','DistanceFromHome':'DistânciaDeCasa'})
df
Persistir renomeação
df.rename(columns={'Age':'Idade','DistanceFromHome':'DistânciaDeCasa'}, inplace=True)
df
# ## Método .iloc
# Método para filtro de linhas e colunas respectivamente
HR_df.iloc[1000:1100,10:22]
# ## Deletar colunas
df.drop('Idade',axis=1,inplace=True)
df
# ## Visualização com a função print
print(HR_df)
# ## Verificação de valores vazios: null
HR_df.isnull()
HR_df.isnull().sum()
# Neste caso não há valores vazios
# ## Deleta colunas com valores vazios
#retirar colunas com NaN
HR_df.dropna()
# ## Prenche valores vazios
# Caso estivesse com valores vazios, o resultado persistiria no data frame, prenchendo dados com valor '0'
HR_df['Age'].fillna(0,inplace = True)
|
.ipynb_checkpoints/Python Exercises Pandas-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def solution(h, q):
return [ get_parent_id(h, x) for x in q]
def get_parent_id(h, node_id):
if node_id >= 2**h - 1: return -1
elif node_id < 2**h - 1:
tree_level = h
tree_shift = 0
while tree_level > 0:
tree_level = tree_level - 1
left = tree_shift + (2**tree_level-1)
right = left + (2**tree_level-1)
if node_id in [left, right]:
return right + 1
else:
if node_id > left:
tree_shift = left
return -1
# -
solution(3, [7, 3, 5, 1])
|
Perfect Binary Tree Postorder Traversal.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PyCharm (tutorials)
# language: python
# name: pycharm-5d4c1c6c
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Pandas 基本介绍
# ## Numpy 和 Pandas 有什么不同
# 如果用 python 的列表和字典来作比较, 那么可以说 Numpy 是列表形式的,没有数值标签,而 Pandas 就是字典形式。Pandas是基于Numpy构建的,让Numpy为中心的应用变得更加简单。
# 要使用pandas,首先需要了解他主要两个数据结构:Series和DataFrame。
# ## Series
# + pycharm={"name": "#%%\n"}
print('324')
# + pycharm={"name": "#%%\n"}
import pandas as pd
import numpy as np
s = pd.Series([1,3,6,np.nan,44,1])
print(s)
"""
0 1.0
1 3.0
2 6.0
3 NaN
4 44.0
5 1.0
dtype: float64
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## DataFrame
# + pycharm={"name": "#%%\n"}
dates = pd.date_range('20160101',periods=6)
df = pd.DataFrame(np.random.randn(6,4),index=dates,columns=['a','b','c','d'])
print(df)
"""
a b c d
2016-01-01 -0.253065 -2.071051 -0.640515 0.613663
2016-01-02 -1.147178 1.532470 0.989255 -0.499761
2016-01-03 1.221656 -2.390171 1.862914 0.778070
2016-01-04 1.473877 -0.046419 0.610046 0.204672
2016-01-05 -1.584752 -0.700592 1.487264 -1.778293
2016-01-06 0.633675 -1.414157 -0.277066 -0.442545
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## DataFrame 的一些简单运用
# + pycharm={"name": "#%%\n"}
print(df['b'])
"""
2016-01-01 -2.071051
2016-01-02 1.532470
2016-01-03 -2.390171
2016-01-04 -0.046419
2016-01-05 -0.700592
2016-01-06 -1.414157
Freq: D, Name: b, dtype: float64
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 默认的从0开始 index.
# + pycharm={"name": "#%%\n"}
df1 = pd.DataFrame(np.arange(12).reshape((3,4)))
print(df1)
"""
0 1 2 3
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 另外一种生成 df 的方法
# + pycharm={"name": "#%%\n"}
df2 = pd.DataFrame({'A' : 1.,
'B' : pd.Timestamp('20130102'),
'C' : pd.Series(1,index=list(range(4)),dtype='float32'),
'D' : np.array([3] * 4,dtype='int32'),
'E' : pd.Categorical(["test","train","test","train"]),
'F' : 'foo'})
print(df2)
"""
A B C D E F
0 1.0 2013-01-02 1.0 3 test foo
1 1.0 2013-01-02 1.0 3 train foo
2 1.0 2013-01-02 1.0 3 test foo
3 1.0 2013-01-02 1.0 3 train foo
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# #### dtype
# + pycharm={"name": "#%%\n"}
print(df2.dtypes)
"""
df2.dtypes
A float64
B datetime64[ns]
C float32
D int32
E category
F object
dtype: object
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# #### index
# + pycharm={"name": "#%%\n"}
print(df2.index)
# Int64Index([0, 1, 2, 3], dtype='int64')
# + [markdown] pycharm={"name": "#%% md\n"}
# #### columns
# + pycharm={"name": "#%%\n"}
print(df2.columns)
# Index(['A', 'B', 'C', 'D', 'E', 'F'], dtype='object')
# + [markdown] pycharm={"name": "#%% md\n"}
# #### values
# + pycharm={"name": "#%%\n"}
print(df2.values)
"""
array([[1.0, Timestamp('2013-01-02 00:00:00'), 1.0, 3, 'test', 'foo'],
[1.0, Timestamp('2013-01-02 00:00:00'), 1.0, 3, 'train', 'foo'],
[1.0, Timestamp('2013-01-02 00:00:00'), 1.0, 3, 'test', 'foo'],
[1.0, Timestamp('2013-01-02 00:00:00'), 1.0, 3, 'train', 'foo']], dtype=object)
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 数据的总结describe()
# + pycharm={"name": "#%%\n"}
df2.describe()
"""
A C D
count 4.0 4.0 4.0
mean 1.0 1.0 3.0
std 0.0 0.0 0.0
min 1.0 1.0 3.0
25% 1.0 1.0 3.0
50% 1.0 1.0 3.0
75% 1.0 1.0 3.0
max 1.0 1.0 3.0
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 翻转数据, transpose:
# + pycharm={"name": "#%%\n"}
print(df2.T)
"""
0 1 2 \
A 1 1 1
B 2013-01-02 00:00:00 2013-01-02 00:00:00 2013-01-02 00:00:00
C 1 1 1
D 3 3 3
E test train test
F foo foo foo
3
A 1
B 2013-01-02 00:00:00
C 1
D 3
E train
F foo
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# #### index排序
# + pycharm={"name": "#%%\n"}
print(df2.sort_index(axis=1, ascending=False))
"""
F E D C B A
0 foo test 3 1.0 2013-01-02 1.0
1 foo train 3 1.0 2013-01-02 1.0
2 foo test 3 1.0 2013-01-02 1.0
3 foo train 3 1.0 2013-01-02 1.0
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 值 排序
# + pycharm={"name": "#%%\n"}
print(df2.sort_values(by='B'))
"""
A B C D E F
0 1.0 2013-01-02 1.0 3 test foo
1 1.0 2013-01-02 1.0 3 train foo
2 1.0 2013-01-02 1.0 3 test foo
3 1.0 2013-01-02 1.0 3 train foo
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# # Pandas 选择数据
# ## 简单的筛选
# + pycharm={"name": "#%%\n"}
from __future__ import print_function
import pandas as pd
import numpy as np
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.random.randn(6,4), index=dates, columns=['A', 'B', 'C', 'D'])
print(df['A'])
print(df.A)
"""
2013-01-01 0
2013-01-02 4
2013-01-03 8
2013-01-04 12
2013-01-05 16
2013-01-06 20
Freq: D, Name: A, dtype: int64
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# 选择跨越多行或多列:
# + pycharm={"name": "#%%\n"}
print(df[0:3])
"""
A B C D
2013-01-01 0 1 2 3
2013-01-02 4 5 6 7
2013-01-03 8 9 10 11
"""
print(df['20130102':'20130104'])
"""
A B C D
2013-01-02 4 5 6 7
2013-01-03 8 9 10 11
2013-01-04 12 13 14 15
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 根据标签 loc
# + pycharm={"name": "#%%\n"}
print(df.loc['20130102'])
"""
A 4
B 5
C 6
D 7
Name: 2013-01-02 00:00:00, dtype: int64
"""
# + pycharm={"name": "#%%\n"}
print(df.loc[:,['A','B']])
"""
A B
2013-01-01 0 1
2013-01-02 4 5
2013-01-03 8 9
2013-01-04 12 13
2013-01-05 16 17
2013-01-06 20 21
"""
# + pycharm={"name": "#%%\n"}
print(df.loc['20130102',['A','B']])
"""
A 4
B 5
Name: 2013-01-02 00:00:00, dtype: int64
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 根据序列 iloc
# + pycharm={"name": "#%%\n"}
print(df.iloc[3,1])
# 13
# + pycharm={"name": "#%%\n"}
print(df.iloc[3:5,1:3])
"""
B C
2013-01-04 13 14
2013-01-05 17 18
"""
# + pycharm={"name": "#%%\n"}
print(df.iloc[[1,3,5],1:3])
"""
B C
2013-01-02 5 6
2013-01-04 13 14
2013-01-06 21 22
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 根据混合的这两种 ix
# + pycharm={"name": "#%%\n"}
#print(df.ix[:3,['A','C']])
"""
A C
2013-01-01 0 2
2013-01-02 4 6
2013-01-03 8 10
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 通过判断的筛选
# + pycharm={"name": "#%%\n"}
print(df[df.A>8])
"""
A B C D
2013-01-04 12 13 14 15
2013-01-05 16 17 18 19
2013-01-06 20 21 22 23
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# # Pandas 设置值
# ## 创建数据
# + pycharm={"name": "#%%\n"}
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6,4)),index=dates, columns=['A','B','C','D'])
print(df)
"""
A B C D
2013-01-01 0 1 2 3
2013-01-02 4 5 6 7
2013-01-03 8 9 10 11
2013-01-04 12 13 14 15
2013-01-05 16 17 18 19
2013-01-06 20 21 22 23
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 根据位置设置 loc 和 iloc
# + pycharm={"name": "#%%\n"}
df.iloc[2,2] = 1111
df.loc['20130101','B'] = 2222
print(df)
"""
A B C D
2013-01-01 0 2222 2 3
2013-01-02 4 5 6 7
2013-01-03 8 9 1111 11
2013-01-04 12 13 14 15
2013-01-05 16 17 18 19
2013-01-06 20 21 22 23
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 根据条件设置
# + pycharm={"name": "#%%\n"}
df.B[df.A>4] = 0
"""
A B C D
2013-01-01 0 2222 2 3
2013-01-02 4 5 6 7
2013-01-03 8 0 1111 11
2013-01-04 12 0 14 15
2013-01-05 16 0 18 19
2013-01-06 20 0 22 23
"""
print(df)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 按行或列设置
# + pycharm={"name": "#%%\n"}
df['F'] = np.nan
print(df)
"""
A B C D F
2013-01-01 0 2222 2 3 NaN
2013-01-02 4 5 6 7 NaN
2013-01-03 8 0 1111 11 NaN
2013-01-04 12 0 14 15 NaN
2013-01-05 16 0 18 19 NaN
2013-01-06 20 0 22 23 NaN
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 添加数据
# + pycharm={"name": "#%%\n"}
df['E'] = pd.Series([1,2,3,4,5,6], index=pd.date_range('20130101',periods=6))
print(df)
"""
A B C D F E
2013-01-01 0 2222 2 3 NaN 1
2013-01-02 4 5 6 7 NaN 2
2013-01-03 8 0 1111 11 NaN 3
2013-01-04 12 0 14 15 NaN 4
2013-01-05 16 0 18 19 NaN 5
2013-01-06 20 0 22 23 NaN 6
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# # Pandas 处理丢失数据
# ## 创建含 NaN 的矩阵
#
# + pycharm={"name": "#%%\n"}
dates = pd.date_range('20130101', periods=6)
df = pd.DataFrame(np.arange(24).reshape((6,4)),index=dates, columns=['A','B','C','D'])
df.iloc[0,1] = np.nan
df.iloc[1,2] = np.nan
print(df)
"""
A B C D
2013-01-01 0 NaN 2.0 3
2013-01-02 4 5.0 NaN 7
2013-01-03 8 9.0 10.0 11
2013-01-04 12 13.0 14.0 15
2013-01-05 16 17.0 18.0 19
2013-01-06 20 21.0 22.0 23
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## pd.dropna()
# + pycharm={"name": "#%%\n"}
df.dropna(
axis=0, # 0: 对行进行操作; 1: 对列进行操作
how='any' # 'any': 只要存在 NaN 就 drop 掉; 'all': 必须全部是 NaN 才 drop
)
print(df)
"""
A B C D
2013-01-03 8 9.0 10.0 11
2013-01-04 12 13.0 14.0 15
2013-01-05 16 17.0 18.0 19
2013-01-06 20 21.0 22.0 23
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## pd.fillna()
# + pycharm={"name": "#%%\n"}
df.fillna(value=0)
print(df)
"""
A B C D
2013-01-01 0 0.0 2.0 3
2013-01-02 4 5.0 0.0 7
2013-01-03 8 9.0 10.0 11
2013-01-04 12 13.0 14.0 15
2013-01-05 16 17.0 18.0 19
2013-01-06 20 21.0 22.0 23
"""
# + [markdown] pycharm={"name": "#%% md\n"}
# ## pd.isnull()
# + pycharm={"name": "#%%\n"}
df.isnull()
print(df.isnull())
"""
A B C D
2013-01-01 False True False False
2013-01-02 False False True False
2013-01-03 False False False False
2013-01-04 False False False False
2013-01-05 False False False False
2013-01-06 False False False False
"""
# + pycharm={"name": "#%%\n"}
np.any(df.isnull()) == True
print(np.any(df.isnull()))
# True
# + [markdown] pycharm={"name": "#%% md\n"}
# # Pandas 导入导出
# ## 读取csv
# + pycharm={"name": "#%%\n"}
import pandas as pd #加载模块
#读取csv
data = pd.read_csv(r'15_read_to/student.csv')
#打印出data
print(data)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 将资料存取成pickle
# + pycharm={"name": "#%%\n"}
data.to_pickle('15_read_to/student.pickle')
# + [markdown] pycharm={"name": "#%% md\n"}
# # Pandas 合并 concat
# + [markdown] pycharm={"name": "#%% md\n"}
# ## axis (合并方向)
# axis=0是预设值,因此未设定任何参数时,函数默认axis=0。
# + pycharm={"name": "#%%\n"}
import pandas as pd
import numpy as np
#定义资料集
df1 = pd.DataFrame(np.ones((3,4))*0, columns=['a','b','c','d'])
df2 = pd.DataFrame(np.ones((3,4))*1, columns=['a','b','c','d'])
df3 = pd.DataFrame(np.ones((3,4))*2, columns=['a','b','c','d'])
print(df1)
print(df2)
print(df3)
# + [markdown] pycharm={"name": "#%% md\n"}
# #### concat纵向合并
# + pycharm={"name": "#%%\n"}
res = pd.concat([df1, df2, df3], axis=0)
#打印结果
print(res)
# a b c d
# 0 0.0 0.0 0.0 0.0
# 1 0.0 0.0 0.0 0.0
# 2 0.0 0.0 0.0 0.0
# 0 1.0 1.0 1.0 1.0
# 1 1.0 1.0 1.0 1.0
# 2 1.0 1.0 1.0 1.0
# 0 2.0 2.0 2.0 2.0
# 1 2.0 2.0 2.0 2.0
# 2 2.0 2.0 2.0 2.0
# + [markdown] pycharm={"name": "#%% md\n"}
# ## ignore_index (重置 index)
# + pycharm={"name": "#%%\n"}
#承上一个例子,并将index_ignore设定为True
res = pd.concat([df1, df2, df3], axis=0, ignore_index=True)
#打印结果
print(res)
# a b c d
# 0 0.0 0.0 0.0 0.0
# 1 0.0 0.0 0.0 0.0
# 2 0.0 0.0 0.0 0.0
# 3 1.0 1.0 1.0 1.0
# 4 1.0 1.0 1.0 1.0
# 5 1.0 1.0 1.0 1.0
# 6 2.0 2.0 2.0 2.0
# 7 2.0 2.0 2.0 2.0
# 8 2.0 2.0 2.0 2.0
# + [markdown] pycharm={"name": "#%% md\n"}
# ## join (合并方式)
# + pycharm={"name": "#%%\n"}
import pandas as pd
import numpy as np
#定义资料集
df1 = pd.DataFrame(np.ones((3,4))*0, columns=['a','b','c','d'], index=[1,2,3])
df2 = pd.DataFrame(np.ones((3,4))*1, columns=['b','c','d','e'], index=[2,3,4])
print(df1)
print(df2)
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 纵向"外"合并df1与df2
# + pycharm={"name": "#%%\n"}
res = pd.concat([df1, df2], axis=0, join='outer')
print(res)
# a b c d e
# 1 0.0 0.0 0.0 0.0 NaN
# 2 0.0 0.0 0.0 0.0 NaN
# 3 0.0 0.0 0.0 0.0 NaN
# 2 NaN 1.0 1.0 1.0 1.0
# 3 NaN 1.0 1.0 1.0 1.0
# 4 NaN 1.0 1.0 1.0 1.0
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 纵向"内"合并df1与df2
# + pycharm={"name": "#%%\n"}
res = pd.concat([df1, df2], axis=0, join='inner')
#打印结果
print(res)
# b c d
# 1 0.0 0.0 0.0
# 2 0.0 0.0 0.0
# 3 0.0 0.0 0.0
# 2 1.0 1.0 1.0
# 3 1.0 1.0 1.0
# 4 1.0 1.0 1.0
#重置index并打印结果
res = pd.concat([df1, df2], axis=0, join='inner', ignore_index=True)
print(res)
# b c d
# 0 0.0 0.0 0.0
# 1 0.0 0.0 0.0
# 2 0.0 0.0 0.0
# 3 1.0 1.0 1.0
# 4 1.0 1.0 1.0
# 5 1.0 1.0 1.0
# + [markdown] pycharm={"name": "#%% md\n"}
# ## join_axes (依照 axes 合并)
# 新版本pandas已经删除了join_axes
# + pycharm={"name": "#%%\n"}
import pandas as pd
import numpy as np
#定义资料集
df1 = pd.DataFrame(np.ones((3,4))*0, columns=['a','b','c','d'], index=[1,2,3])
df2 = pd.DataFrame(np.ones((3,4))*1, columns=['b','c','d','e'], index=[2,3,4])
print(df1)
print(df2)
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 依照`df1.index`进行横向合并
# + pycharm={"name": "#%%\n"}
res=pd.merge(df1, df2, how='left', left_index=True, right_index=True)
#打印结果
print(res)
# a b c d b c d e
# 1 0.0 0.0 0.0 0.0 NaN NaN NaN NaN
# 2 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0
# 3 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0
#移除join_axes,并打印结果
res = pd.concat([df1, df2], axis=1)
print(res)
# a b c d b c d e
# 1 0.0 0.0 0.0 0.0 NaN NaN NaN NaN
# 2 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0
# 3 0.0 0.0 0.0 0.0 1.0 1.0 1.0 1.0
# 4 NaN NaN NaN NaN 1.0 1.0 1.0 1.0
# + [markdown] pycharm={"name": "#%% md\n"}
# ## append (添加数据)
# append只有纵向合并,没有横向合并。
# + pycharm={"name": "#%%\n"}
import pandas as pd
import numpy as np
#定义资料集
df1 = pd.DataFrame(np.ones((3,4))*0, columns=['a','b','c','d'])
df2 = pd.DataFrame(np.ones((3,4))*1, columns=['a','b','c','d'])
df3 = pd.DataFrame(np.ones((3,4))*1, columns=['a','b','c','d'])
s1 = pd.Series([1,2,3,4], index=['a','b','c','d'])
print(df1)
print(df2)
print(df3)
# + pycharm={"name": "#%%\n"}
#将df2合并到df1的下面,以及重置index,并打印出结果
res = df1.append(df2, ignore_index=True)
print(res)
# a b c d
# 0 0.0 0.0 0.0 0.0
# 1 0.0 0.0 0.0 0.0
# 2 0.0 0.0 0.0 0.0
# 3 1.0 1.0 1.0 1.0
# 4 1.0 1.0 1.0 1.0
# 5 1.0 1.0 1.0 1.0
# + pycharm={"name": "#%%\n"}
#合并多个df,将df2与df3合并至df1的下面,以及重置index,并打印出结果
res = df1.append([df2, df3], ignore_index=True)
print(res)
# a b c d
# 0 0.0 0.0 0.0 0.0
# 1 0.0 0.0 0.0 0.0
# 2 0.0 0.0 0.0 0.0
# 3 1.0 1.0 1.0 1.0
# 4 1.0 1.0 1.0 1.0
# 5 1.0 1.0 1.0 1.0
# 6 1.0 1.0 1.0 1.0
# 7 1.0 1.0 1.0 1.0
# 8 1.0 1.0 1.0 1.0
# + pycharm={"name": "#%%\n"}
#合并series,将s1合并至df1,以及重置index,并打印出结果
res = df1.append(s1, ignore_index=True)
print(res)
# a b c d
# 0 0.0 0.0 0.0 0.0
# 1 0.0 0.0 0.0 0.0
# 2 0.0 0.0 0.0 0.0
# 3 1.0 2.0 3.0 4.0
# + [markdown] pycharm={"name": "#%% md\n"}
# # Pandas 合并 merge
# ## 依据一组key合并
# + pycharm={"name": "#%%\n"}
import pandas as pd
#定义资料集并打印出
left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
print(left)
# A B key
# 0 A0 B0 K0
# 1 A1 B1 K1
# 2 A2 B2 K2
# 3 A3 B3 K3
print(right)
# C D key
# 0 C0 D0 K0
# 1 C1 D1 K1
# 2 C2 D2 K2
# 3 C3 D3 K3
#依据key column合并,并打印出
res = pd.merge(left, right, on='key')
print(res)
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 依据两组key合并
# #### 合并时有4种方法how = ['left', 'right', 'outer', 'inner'],预设值how='inner'。
# + pycharm={"name": "#%%\n"}
import pandas as pd
#定义资料集并打印出
left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
print(left)
# A B key1 key2
# 0 A0 B0 K0 K0
# 1 A1 B1 K0 K1
# 2 A2 B2 K1 K0
# 3 A3 B3 K2 K1
print(right)
# C D key1 key2
# 0 C0 D0 K0 K0
# 1 C1 D1 K1 K0
# 2 C2 D2 K1 K0
# 3 C3 D3 K2 K0
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 依据key1与key2 columns进行合并,并打印出四种结果['left', 'right', 'outer', 'inner']
# + pycharm={"name": "#%%\n"}
res = pd.merge(left, right, on=['key1', 'key2'], how='inner')
print(res)
# A B key1 key2 C D
# 0 A0 B0 K0 K0 C0 D0
# 1 A2 B2 K1 K0 C1 D1
# 2 A2 B2 K1 K0 C2 D2
# + pycharm={"name": "#%%\n"}
res = pd.merge(left, right, on=['key1', 'key2'], how='outer')
print(res)
# A B key1 key2 C D
# 0 A0 B0 K0 K0 C0 D0
# 1 A1 B1 K0 K1 NaN NaN
# 2 A2 B2 K1 K0 C1 D1
# 3 A2 B2 K1 K0 C2 D2
# 4 A3 B3 K2 K1 NaN NaN
# 5 NaN NaN K2 K0 C3 D3
# + pycharm={"name": "#%%\n"}
res = pd.merge(left, right, on=['key1', 'key2'], how='left')
print(res)
# A B key1 key2 C D
# 0 A0 B0 K0 K0 C0 D0
# 1 A1 B1 K0 K1 NaN NaN
# 2 A2 B2 K1 K0 C1 D1
# 3 A2 B2 K1 K0 C2 D2
# 4 A3 B3 K2 K1 NaN NaN
# + pycharm={"name": "#%%\n"}
res = pd.merge(left, right, on=['key1', 'key2'], how='right')
print(res)
# A B key1 key2 C D
# 0 A0 B0 K0 K0 C0 D0
# 1 A2 B2 K1 K0 C1 D1
# 2 A2 B2 K1 K0 C2 D2
# 3 NaN NaN K2 K0 C3 D3
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Indicator
# indicator=True会将合并的记录放在新的一列
# + pycharm={"name": "#%%\n"}
import pandas as pd
#定义资料集并打印出
df1 = pd.DataFrame({'col1':[0,1], 'col_left':['a','b']})
df2 = pd.DataFrame({'col1':[1,2,2],'col_right':[2,2,2]})
print(df1)
# col1 col_left
# 0 0 a
# 1 1 b
# + pycharm={"name": "#%%\n"}
print(df2)
# col1 col_right
# 0 1 2
# 1 2 2
# 2 2 2
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 依据col1进行合并,并启用indicator=True,最后打印出
# + pycharm={"name": "#%%\n"}
res = pd.merge(df1, df2, on='col1', how='outer', indicator=True)
print(res)
# col1 col_left col_right _merge
# 0 0.0 a NaN left_only
# 1 1.0 b 2.0 both
# 2 2.0 NaN 2.0 right_only
# 3 2.0 NaN 2.0 right_only
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 自定indicator column的名称,并打印出
# + pycharm={"name": "#%%\n"}
res = pd.merge(df1, df2, on='col1', how='outer', indicator='indicator_column')
print(res)
# col1 col_left col_right indicator_column
# 0 0.0 a NaN left_only
# 1 1.0 b 2.0 both
# 2 2.0 NaN 2.0 right_only
# 3 2.0 NaN 2.0 right_only
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 依据index合并
# + pycharm={"name": "#%%\n"}
import pandas as pd
#定义资料集并打印出
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
print(left)
# A B
# K0 A0 B0
# K1 A1 B1
# K2 A2 B2
print(right)
# C D
# K0 C0 D0
# K2 C2 D2
# K3 C3 D3
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 依据左右资料集的index进行合并,how='outer',并打印出
# res = pd.merge(left, right, left_index=True, right_index=True, how='outer')
# print(res)
# + pycharm={"name": "#%%\n"}
# A B C D
# K0 A0 B0 C0 D0
# K1 A1 B1 NaN NaN
# K2 A2 B2 C2 D2
# K3 NaN NaN C3 D3
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 依据左右资料集的index进行合并,how='inner',并打印出
# + pycharm={"name": "#%%\n"}
res = pd.merge(left, right, left_index=True, right_index=True, how='inner')
print(res)
# A B C D
# K0 A0 B0 C0 D0
# K2 A2 B2 C2 D2
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 解决overlapping的问题
# + pycharm={"name": "#%%\n"}
import pandas as pd
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 定义资料集
# + pycharm={"name": "#%%\n"}
boys = pd.DataFrame({'k': ['K0', 'K1', 'K2'], 'age': [1, 2, 3]})
girls = pd.DataFrame({'k': ['K0', 'K0', 'K3'], 'age': [4, 5, 6]})
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 使用suffixes解决overlapping的问题
# + pycharm={"name": "#%%\n"}
res = pd.merge(boys, girls, on='k', suffixes=['_boy', '_girl'], how='inner')
print(res)
# age_boy k age_girl
# 0 1 K0 4
# 1 1 K0 5
# + [markdown] pycharm={"name": "#%% md\n"}
# # Pandas plot 出图
# + pycharm={"name": "#%%\n"}
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 创建一个Series
# + pycharm={"name": "#%%\n"}
# 随机生成1000个数据
data = pd.Series(np.random.randn(1000),index=np.arange(1000))
print(data)
# + pycharm={"name": "#%%\n"}
#np.arange(1000)
# + pycharm={"name": "#%%\n"}
#np.random.randn(1000)
# + pycharm={"name": "#%%\n"}
# 为了方便观看效果, 我们累加这个数据
data.cumsum()
# + pycharm={"name": "#%%\n"}
# pandas 数据可以直接观看其可视化形式
data.cumsum().plot()
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Dataframe 可视化
# + pycharm={"name": "#%%\n"}
data = pd.DataFrame(
np.random.randn(1000,4),
index=np.arange(1000),
columns=list("ABCD")
)
print(data)
# + pycharm={"name": "#%%\n"}
data.cumsum()
data.cumsum().plot()
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 分别给x, y指定数据
# + pycharm={"name": "#%%\n"}
ax = data.cumsum().plot.scatter(x='A',y='B',color='DarkBlue',label='Class1')
#将之下这个 data 画在上一个 ax 上面
data.cumsum().plot.scatter(x='A',y='C',color='LightGreen',label='Class2',ax=ax)
plt.show()
# + [markdown] pycharm={"name": "#%% md\n"}
# # 为什么用 Numpy 还是慢, 你用对了吗?
# ## 为什么用 Numpy?
# ## 创建 Numpy Array 的结构
# ## 在 Axis 上的动作
# + pycharm={"name": "#%%\n"}
import time
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
a = np.zeros((200, 200), order='C')
b = np.zeros((200, 200), order='F')
N = 9999
def f1(a):
for _ in range(N):
np.concatenate((a, a), axis=0)
def f2(b):
for _ in range(N):
np.concatenate((b, b), axis=0)
t0 = time.time()
f1(a)
t1 = time.time()
f2(b)
t2 = time.time()
print((t1-t0)/N) # 0.000040
print((t2-t1)/N) # 0.000070
# + [markdown] pycharm={"name": "#%% md\n"}
# np.vstack((a,a))
# np.concatenate((a,a), axis=0)
# + pycharm={"name": "#%%\n"}
a = np.zeros((200, 200), order='C')
N = 9999
def f1(a):
for _ in range(N):
np.vstack((a,a))
def f2(b):
for _ in range(N):
np.concatenate((a, a), axis=0)
t0 = time.time()
f1(a)
t1 = time.time()
f2(b)
t2 = time.time()
print((t1-t0)/N)
print((t2-t1)/N)
# + pycharm={"name": "#%%\n"}
indices = np.random.randint(0, 100, size=10, dtype=np.int32)
a[indices, :] # 0.000003
a[:, indices] # 0.000006
# + [markdown] pycharm={"name": "#%% md\n"}
# ## copy慢 view快
# + pycharm={"name": "#%%\n"}
a = np.arange(1, 7).reshape((3,2))
a_view = a[:2]
a_copy = a[:2].copy()
a_copy[1,1] = 0
print(a)
a_view[1,1] = 0
print(a)
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 测速 copy与view
# + pycharm={"name": "#%%\n"}
a = np.zeros((1000, 1000))
b = np.zeros((1000, 1000))
N = 99
def f1(a):
for _ in range(N):
a *= 2 # same as a[:] *= 2
def f2(b):
for _ in range(N):
b = 2*b
t0 = time.time()
f1(a)
t1 = time.time()
f2(b)
t2 = time.time()
print('%f' % ((t1-t0)/N)) # f1: 0.000837
print('%f' % ((t2-t1)/N)) # f2: 0.001346
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 相比于 flatten, ravel 是神速
# + pycharm={"name": "#%%\n"}
def f1(a):
for _ in range(N):
a.flatten()
def f2(b):
for _ in range(N):
b.ravel()
t0 = time.time()
f1(a)
t1 = time.time()
f2(b)
t2 = time.time()
print('%f' % ((t1-t0)/N)) # 0.001059
print('%f' % ((t2-t1)/N)) # 0.000000
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 选择数据
# + pycharm={"name": "#%%\n"}
a_view1 = a[1:2, 3:6] # 切片 slice
a_view2 = a[:100] # 同上
a_view3 = a[::2] # 跳步
a_view4 = a.ravel() # 上面提到了
# + pycharm={"name": "#%% raw\n"} active=""
#
# a_copy1 = a[[1,4,6], [2,4,6]] # 用 index 选
# a_copy2 = a[[True, True], [False, True]] # 用 mask
# a_copy3 = a[[1,2], :] # 虽然 1,2 的确连在一起了, 但是他们确实是 copy
# a_copy4 = a[a[1,:] != 0, :] # fancy indexing
# a_copy5 = a[np.isnan(a), :] # fancy indexing
#
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 1.使用 np.take(), 替代用 index 选数据的方法.
# + pycharm={"name": "#%%\n"}
a = np.random.rand(100, 10)
N = 99
indices = np.random.randint(0, 100, size=1)
def f1(a):
for _ in range(N):
_ = np.take(a, indices, axis=0)
def f2(b):
for _ in range(N):
_ = b[indices]
t0 = time.time()
f1(a)
t1 = time.time()
f2(b)
t2 = time.time()
print('%f' % ((t1-t0)/N)) # 0.000393
print('%f' % ((t2-t1)/N)) # 0.000569
# + [markdown] pycharm={"name": "#%% md\n"}
# #### 2.使用 np.compress(), 替代用 mask 选数据的方法.
# + pycharm={"name": "#%% raw\n"} active=""
#
# mask = a[:, 0] < 0.5
# def f1(a):
# for _ in range(N):
# _ = np.compress(mask, a, axis=0)
#
# def f2(b):
# for _ in range(N):
# _ = b[mask]
# t0 = time.time()
# f1(a)
# t1 = time.time()
# f2(b)
# t2 = time.time()
# print('%f' % ((t1-t0)/N)) # 0.028109
# print('%f' % ((t2-t1)/N)) # 0.031013
#
#
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 非常有用的 out 参数
# + pycharm={"name": "#%%\n"}
a = a + 1 # 0.035230
a = np.add(a, 1) # 0.032738
a += 1 # 0.011219
np.add(a, 1, out=a) # 0.008843
# + [markdown] pycharm={"name": "#%% md\n"}
# ## 给数据一个名字
# + pycharm={"name": "#%%\n"}
a = np.zeros(3, dtype=[('foo', np.int32), ('bar', np.float16)])
b = pd.DataFrame(np.zeros((3, 2), dtype=np.int32), columns=['foo', 'bar'])
b['bar'] = b['bar'].astype(np.float16)
"""
# a
array([(0, 0.), (0, 0.), (0, 0.)],
dtype=[('foo', '<i4'), ('bar', '<f2')])
# b
foo bar
0 0 0.0
1 0 0.0
2 0 0.0
"""
def f1(a):
for _ in range(N):
a['bar'] *= a['foo']
def f2(b):
for _ in range(N):
b['bar'] *= b['foo']
t0 = time.time()
f1(a)
t1 = time.time()
f2(b)
t2 = time.time()
print('%f' % ((t1-t0)/N)) # 0.000003
print('%f' % ((t2-t1)/N)) # 0.000508
|
numpy&pandas/pandas.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import relevant libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# read grades dataset, save as a pandas dataframe
grades = pd.read_csv('grades.csv')
# display first few rows of grades
grades.head()
# test for missing values in grades dataset
assert grades.isnull().sum().sum() == 0, 'there are missing values'
def lowest_grade(student_id):
"""Find lowest grade across all exams for student with given student_id.
Treat missing exam grades as zeros."""
return grades.loc[grades['student_id'] == student_id]['grade'].fillna(0).min()
# lowest grade for student with student_id 1
lowest_grade(1)
# grades for student with student_id 1
grades.loc[grades['student_id'] == 1]
|
notebooks/ex_files_python_data_mistakes/Exercise Files/01_03_writing_tests.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Integrating a generic, python tool in pyiron (Level B, recommended approach)
# If we need to repeatedly, perform a combination of processes to return a value, a plot, etc., it might make sense to create a Job class for it. Here, we create a simple workflow using available python tools for a specific problem.
#
# Problem:
# we are given as an input the positions of a some particles, plus a energy constant.
# the goal of the workflow:
# - calculate the mean harmonic energy
# - return a 3D plot of the cloud of particles.
from pyiron_base import PythonTemplateJob, DataContainer
import numpy as np
import math
import matplotlib.pyplot as plt
class harmonicEnergy(PythonTemplateJob):
def __init__(self, project, job_name):
super(harmonicEnergy, self).__init__(project, job_name)
self.input=DataContainer(table_name='inputs')
def read_input(self, file_name):
self.input.read(file_name)
def calc_mean_harmonic_energy(self):
counter=0
d2=0
for i,p1 in enumerate(self.input.positions.to_builtin()):
for p2 in self.input.positions.to_builtin()[i+1:]:
p2_vec=np.array(p2)
p1_vec=np.array(p1)
d = p2_vec - p1_vec
d_val = math.sqrt(np.dot(d, d))-self.input.equilibrium_lengh
d2+=d_val**2
counter+=1
self.avg_d2=d2/counter
self.avg_energy=self.input.energy_constant*self.avg_d2
def scatter_plot(self):
x = [ self.input.positions.to_builtin()[k][0] for k in range(0, len(self.input.positions.to_builtin()))]
y = [ self.input.positions.to_builtin()[k][1] for k in range(0, len(self.input.positions.to_builtin()))]
z = [ self.input.positions.to_builtin()[k][1] for k in range(0,len(self.input.positions.to_builtin()))]
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(x, y, z, marker='o')
# This function is executed
def run_static(self):
self.calc_mean_harmonic_energy()
with self.project_hdf5.open("output/generic") as h5out:
h5out["avg_harmonic_energy"] = self.avg_energy
self.status.finished = True
# ## Exercise 1:
# Create a project and a job of type harmonicEnergy, which reads in `input.yml` and calculates the mean harmonic energy. Also create the 3d scatter plot of the particles.
# ## Exercise 2:
# How to access the `energy_tot` from the output of the job.
|
Exercises/2_0_custom_python_job.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: clouds113_kernel
# language: python
# name: clouds113_kernel
# ---
# ## Hyperparameter Tuning: First phase
#
# Run **SHERPA**. Fix batchsize = 1024. Fix Adam. Do not shuffle the input data as that takes a lot of time. <br>
# *First phase:* Start with 3 epochs each. Here we can already discard some models. <br>
# *Second phase:* Run 3 epochs with a parameter space confined to the four best models from phase 1. Add a learning rate scheduler a la Stephan Rasp (Divide learning rate by 20 every two epochs). <br>
# *Third phase:* Run 6 epochs with the two best models from phase 2. With Sherpa, vary only the learning rate scheduler. Use cross-validation here to truly get a good estimate of generalization error!. <br>
#
# To vary:
# - Learning rate (Learning rate scheduler)
# - Model layers (only max 1-4 hidden layers)
# - Regularization methods
# - Hidden Units
# - Activation Functions (not the last)
# +
# Ran with 800GB (750GB should also be fine)
import sys
import numpy as np
import time
import pandas as pd
import matplotlib.pyplot as plt
import os
import copy
import gc
#Import sklearn before tensorflow (static Thread-local storage)
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, BatchNormalization
from tensorflow.keras.regularizers import l1_l2
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation
random_num = np.random.randint(500000)
print(random_num)
t0 = time.time()
path = '/pf/b/b309170'
path_data = path + '/my_work/icon-ml_data/cloud_cover_parameterization/grid_cell_based_QUBICC_R02B05/based_on_var_interpolated_data'
#Adapt these parameters
OPTIMIZER = 'rmsprop'
qubicc_only = False
print(OPTIMIZER)
if qubicc_only:
path_figures = path + '/workspace_icon-ml/cloud_cover_parameterization/grid_cell_based_QUBICC_R02B05/qubicc_only/figures'
path_model = path + '/workspace_icon-ml/cloud_cover_parameterization/grid_cell_based_QUBICC_R02B05/qubicc_only/saved_models'
else:
path_figures = path + '/workspace_icon-ml/cloud_cover_parameterization/grid_cell_based_QUBICC_R02B05/figures'
path_model = path + '/workspace_icon-ml/cloud_cover_parameterization/grid_cell_based_QUBICC_R02B05/saved_models'
# Add path with my_classes to sys.path
sys.path.insert(0, path + '/workspace_icon-ml/cloud_cover_parameterization/')
# Add sherpa
sys.path.insert(0, path + '/my_work/sherpa')
import sherpa
import sherpa.algorithms.bayesian_optimization as bayesian_optimization
# Reloading custom file to incorporate changes dynamically
import importlib
import my_classes
importlib.reload(my_classes)
from my_classes import read_mean_and_std
from my_classes import TimeOut
import datetime
# Minutes per fold
timeout = 2120
# For logging purposes
days = 'all_days'
# Maximum amount of epochs for each model
epochs = 3
# Set seed for reproducibility
seed = 10
tf.random.set_seed(seed)
# For store_mean_model_biases
VERT_LAYERS = 31
gpus = tf.config.experimental.list_physical_devices('GPU')
# tf.config.experimental.set_visible_devices(gpus[3], 'GPU')
# -
# Won't run on a CPU node
try:
# Prevents crashes of the code
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.set_visible_devices(physical_devices[0], 'GPU')
# Allow the growth of memory Tensorflow allocates (limits memory usage overall)
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except:
pass
scaler = StandardScaler()
# ### Load the data
# +
# input_narval = np.load(path_data + '/cloud_cover_input_narval.npy')
# input_qubicc = np.load(path_data + '/cloud_cover_input_qubicc.npy')
# output_narval = np.load(path_data + '/cloud_cover_output_narval.npy')
# output_qubicc = np.load(path_data + '/cloud_cover_output_qubicc.npy')
# -
input_data = np.concatenate((np.load(path_data + '/cloud_cover_input_narval.npy'),
np.load(path_data + '/cloud_cover_input_qubicc.npy')), axis=0)
output_data = np.concatenate((np.load(path_data + '/cloud_cover_output_narval.npy'),
np.load(path_data + '/cloud_cover_output_qubicc.npy')), axis=0)
samples_narval = np.load(path_data + '/cloud_cover_output_narval.npy').shape[0]
if qubicc_only:
input_data = input_data[samples_narval:]
output_data = output_data[samples_narval:]
(samples_total, no_of_features) = input_data.shape
(samples_total, no_of_features)
# *Temporal cross-validation*
#
# Split into 2-weeks increments (when working with 3 months of data). It's 25 day increments with 5 months of data. <br>
# 1.: Validate on increments 1 and 4 <br>
# 2.: Validate on increments 2 and 5 <br>
# 3.: Validate on increments 3 and 6
#
# --> 2/3 training data, 1/3 validation data
# +
training_folds = []
validation_folds = []
two_week_incr = samples_total//6
for i in range(3):
# Note that this is a temporal split since time was the first dimension in the original tensor
first_incr = np.arange(samples_total//6*i, samples_total//6*(i+1))
second_incr = np.arange(samples_total//6*(i+3), samples_total//6*(i+4))
validation_folds.append(np.append(first_incr, second_incr))
training_folds.append(np.arange(samples_total))
training_folds[i] = np.delete(training_folds[i], validation_folds[i])
# -
# ### Define the model
# Activation function for the last layer
# Activation function for the last layer
def my_act_fct(x):
return K.minimum(K.maximum(x, 0), 100)
# + [markdown] jupyter={"outputs_hidden": true}
# ### 3-fold cross-validation
#
# Actually only set i=1 here
# +
# By decreasing timeout we make sure every fold gets the same amount of time
# After all, data-loading took some time (Have 3 folds, 60 seconds/minute)
# timeout = timeout - 1/3*1/60*(time.time() - t0)
timeout = timeout - 1/60*(time.time() - t0)
t0 = time.time()
#We loop through the folds
for i in range(1,2):
filename = 'cross_validation_cell_based_fold_%d'%(i+1)
#Standardize according to the fold
scaler.fit(input_data[training_folds[i]])
#Load the data for the respective fold and convert it to tf data
#The scaled data lies in [-15, 89]
input_train = scaler.transform(input_data[training_folds[i]])
input_valid = scaler.transform(input_data[validation_folds[i]])
del input_data
gc.collect()
# The output lies in [0, 100]
output_train = output_data[training_folds[i]]
output_valid = output_data[validation_folds[i]]
# Clear memory (Reduces memory requirement to 151 GB)
del output_data, first_incr, second_incr, validation_folds, training_folds
gc.collect()
# Column-based: batchsize of 128
# Possibly better to use .apply(tf.data.experimental.copy_to_device("/gpu:0")) before prefetch
# I'm not shuffling for hyperparameter tuning
train_ds = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(input_train),
tf.data.Dataset.from_tensor_slices(output_train))) \
.shuffle(10**5, seed=seed) \
.batch(batch_size=1024, drop_remainder=True) \
.prefetch(1)
# Clear memory
del input_train, output_train
gc.collect()
# No need to add prefetch.
# tf data with batch_size=10**5 makes the validation evaluation 10 times faster
valid_ds = tf.data.Dataset.zip((tf.data.Dataset.from_tensor_slices(input_valid),
tf.data.Dataset.from_tensor_slices(output_valid))) \
.batch(batch_size=10**5, drop_remainder=True)
# Clear memory (Reduces memory requirement to 151 GB)
del input_valid, output_valid
gc.collect()
# #Feed the model
# model.compile(
# optimizer=tf.keras.optimizers.Adam(learning_rate=0.002),
# loss=tf.keras.losses.MeanSquaredError()
# )
# #Train the model
# # time_callback = TimeOut(t0, timeout*(i+1))
# time_callback = TimeOut(t0, timeout)
# history = model.fit(train_ds, epochs=epochs, verbose=2, validation_data=valid_ds,
# callbacks=[time_callback])
# # history = model.fit(train_ds, epochs=epochs, validation_data=valid_ds, callbacks=[time_callback])
# #Save the model
# #Serialize model to YAML
# model_yaml = model.to_yaml()
# with open(os.path.join(path_model, filename+".yaml"), "w") as yaml_file:
# yaml_file.write(model_yaml)
# #Serialize model and weights to a single HDF5-file
# model.save(os.path.join(path_model, filename+'.h5'), "w")
# print('Saved model to disk')
# #Plot the training history
# if len(history.history['loss']) > len(history.history['val_loss']):
# del history.history['loss'][-1]
# pd.DataFrame(history.history).plot(figsize=(8,5))
# plt.grid(True)
# plt.ylabel('Mean Squared Error')
# plt.xlabel('Number of epochs')
# plt.savefig(os.path.join(path_figures, filename+'.pdf'))
# with open(os.path.join(path_model, filename+'.txt'), 'a') as file:
# file.write('Results from the %d-th fold\n'%(i+1))
# file.write('Training epochs: %d\n'%(len(history.history['val_loss'])))
# file.write('Weights restored from epoch: %d\n\n'%(1+np.argmin(history.history['val_loss'])))
# -
def save_model(study, today, optimizer):
out_path = '/pf/b/b309170/workspace_icon-ml/cloud_cover_parameterization/grid_cell_based_QUBICC_R02B05/sherpa_results/'+\
today+'_'+optimizer+'_'+str(random_num)
study.results = study.results[study.results['Status']=='COMPLETED'] #To specify results
study.results.index = study.results['Trial-ID'] #Trial-ID serves as a better index
# Remove those hyperparameters that actually do not appear in the model
for i in range(1, max(study.results['Trial-ID']) + 1):
depth = study.results.at[i, 'model_depth']
for j in range(depth, 5): #Or up to 8
study.results.at[i, 'activation_%d'%j] = None
study.results.at[i, 'bn_%d'%j] = None
# Create the directory and save the SHERPA-output in it
try:
os.mkdir(out_path)
except OSError:
print('Creation of the directory %s failed' % out_path)
else:
print('Successfully created the directory %s' % out_path)
study.save(out_path)
# +
# Good Reference: https://arxiv.org/pdf/1206.5533.pdf (Bengio), https://arxiv.org/pdf/2004.10652.pdf (Ott)
# lrelu = lambda x: relu(x, alpha=0.01)
# For Leaky_ReLU:
from tensorflow import nn
def lrelu(x):
return nn.leaky_relu(x, alpha=0.01)
parameters = [sherpa.Ordinal('num_units', [16, 32, 64, 128, 256, 512]), #No need to vary these per layer. Should add 512.
sherpa.Discrete('model_depth', [2, 5]), #Originally [2,8] although 8 was never truly tested
sherpa.Choice('activation_1', ['relu', 'elu', 'tanh', nn.leaky_relu, lrelu]), #Adding SeLU is trickier
sherpa.Choice('activation_2', ['relu', 'elu', 'tanh', nn.leaky_relu, lrelu]),
sherpa.Choice('activation_3', ['relu', 'elu', 'tanh', nn.leaky_relu, lrelu]),
sherpa.Choice('activation_4', ['relu', 'elu', 'tanh', nn.leaky_relu, lrelu]),
sherpa.Continuous('lrinit', [1e-4, 1e-0], 'log'),
sherpa.Ordinal('epsilon', [1e-8, 1e-7, 0.1, 1]), #Momentum parameter in SGD
sherpa.Continuous('dropout', [0., 0.5]),
sherpa.Continuous('l1_reg', [0, 0.01]),
sherpa.Continuous('l2_reg', [0, 0.01]),
sherpa.Ordinal('bn_1', [0, 1]),
sherpa.Ordinal('bn_2', [0, 1]),
sherpa.Ordinal('bn_3', [0, 1]),
sherpa.Ordinal('bn_4', [0, 1])]
# +
# max_num_trials is left unspecified, so the optimization will run until the end of the job-runtime
# good_hyperparams = pd.DataFrame({'num_units': [256], 'model_depth': [3], 'activation_1': [lrelu], 'activation_2':[lrelu],
# 'activation_3':['relu'], 'activation_4':['relu'], 'activation_5':['relu'], 'activation_6':['relu'],
# 'activation_7':['relu'], 'lrinit':[0.008726], 'epsilon':[0.1], 'dropout':[0.184124],
# 'l1_reg':[0.000162], 'l2_reg':[0.007437]})
# I expect an objective of around 61.
# alg = bayesian_optimization.GPyOpt(initial_data_points=good_hyperparams)
alg = bayesian_optimization.GPyOpt()
study = sherpa.Study(parameters=parameters, algorithm=alg, lower_is_better=True)
# +
# Usually setting patience=8
today = str(datetime.date.today())[:7] # YYYY-MM
for trial in study:
# Create the model
model = Sequential()
par = trial.parameters
# Input layer
model.add(Dense(units=par['num_units'], activation=par['activation_1'], input_dim=no_of_features,
kernel_regularizer=l1_l2(l1=par['l1_reg'], l2=par['l2_reg'])))
if (par['bn_1']==1):
model.add(BatchNormalization()) #There's some debate on whether to use it before or after the activation fct
# Hidden layers
for j in range(2, par['model_depth']):
model.add(Dense(units=par['num_units'], activation=par['activation_'+str(j)],
kernel_regularizer=l1_l2(l1=par['l1_reg'], l2=par['l2_reg'])))
model.add(Dropout(par['dropout'])) #After every hidden layer we (potentially) add a dropout layer
if (par['bn_'+str(j)]==1):
model.add(BatchNormalization())
# Output layer
model.add(Dense(1, activation='linear',
kernel_regularizer=l1_l2(l1=par['l1_reg'], l2=par['l2_reg'])))
optimizer = RMSprop(lr=par['lrinit'], epsilon=par['epsilon'])
model.compile(loss='mse', optimizer=optimizer)
# Train the model
model.fit(train_ds, epochs=epochs, verbose=2, validation_data=valid_ds,
callbacks=[study.keras_callback(trial, objective_name='val_loss')]) ## 3 epochs
study.finalize(trial)
save_model(study, today, OPTIMIZER)
|
q1_cell_based_qubicc_r2b5/source_code/tests/hyperparameter_tuning_sherpa/cross_validation_testing_networks-rmsprop.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: geospatial
# language: python
# name: geospatial
# ---
import psycopg2
import pandas as pd
from pandas import Series, DataFrame
connection = psycopg2.connect(database="01_dvdrental", user="postgres", password="<PASSWORD>")
cursor = connection.cursor()
cursor.execute("select * from PRODUCT_GROUP")
DataFrame(cursor.fetchall(), columns = ["group_id", "group_name"])
cursor.execute("select * from PRODUCT")
df = DataFrame(cursor.fetchall(), columns = ["product_id", "product_name", "price", "group_id"])
df = df.astype({'price':'int'})
df
cursor.execute("SELECT * , COUNT(*) OVER() FROM PRODUCT")
df = DataFrame(cursor.fetchall(), columns = ["product_id", "product_name", "price", "group_id", "count"])
df = df.astype({'price':'int'})
df
cursor.execute("SELECT A.PRODUCT_NAME , A.PRICE , B.GROUP_NAME , AVG (A.PRICE) OVER (PARTITION BY B.GROUP_NAME) FROM PRODUCT A INNER JOIN PRODUCT_GROUP B ON (A.GROUP_ID = B.GROUP_ID);")
df = DataFrame(cursor.fetchall(), columns = ["product_name", "price", "group_name", "avg"])
df = df.astype({'avg':'int','price': 'int'})
df
cursor.execute("select A.PRODUCT_NAME , B.GROUP_NAME , A.PRICE , ROW_NUMBER () OVER ( PARTITION BY B.GROUP_NAME ORDER BY A.PRICE) FROM PRODUCT A INNER JOIN PRODUCT_GROUP B ON (A.GROUP_ID = B.GROUP_ID);")
df = DataFrame(cursor.fetchall(), columns = ["product_name", "group_name", "price", "row_number"])
df = df.astype({'price':'int'})
df
cursor.execute("select A.PRODUCT_NAME , B.GROUP_NAME , A.PRICE , RANK () OVER ( PARTITION BY B.GROUP_NAME ORDER BY A.PRICE) FROM PRODUCT A INNER JOIN PRODUCT_GROUP B ON (A.GROUP_ID = B.GROUP_ID);")
df = DataFrame(cursor.fetchall(), columns = ["product_name", "group_name", "price", "rank"])
df = df.astype({'price':'int'})
df
connection.commit()
cursor.execute("select A.PRODUCT_NAME , B.GROUP_NAME , A.PRICE , dense_rank () OVER ( PARTITION BY B.GROUP_NAME ORDER BY A.PRICE) FROM PRODUCT A INNER JOIN PRODUCT_GROUP B ON (A.GROUP_ID = B.GROUP_ID);")
df = DataFrame(cursor.fetchall(), columns = ["product_name", "group_name", "price", "dense_rank"])
df = df.astype({'price':'int'})
df
cursor.execute("select A.PRODUCT_NAME, B.GROUP_NAME, A.PRICE , FIRST_VALUE (A.PRICE) over (PARTITION BY B.GROUP_NAME ORDER BY A.PRICE ) AS LOWEST_PRICE_PER_GROUP FROM PRODUCT A INNER JOIN PRODUCT_GROUP B ON (A.GROUP_ID = B.GROUP_ID); ")
df = DataFrame(cursor.fetchall(), columns = ["product_name", "group_name", "price", "lowest_price_per_group"])
df = df.astype({'lowest_price_per_group':'int', 'price':'int'})
df
cursor.execute("select A.PRODUCT_NAME, B.GROUP_NAME, A.PRICE , LAST_VALUE (A.PRICE) over (PARTITION BY B.GROUP_NAME ORDER BY A.PRICE RANGE BETWEEN UNBOUNDED preceding AND UNBOUNDED FOLLOWING) AS HIGHEST_PRICE_PER_GROUP FROM PRODUCT A INNER JOIN PRODUCT_GROUP B ON (A.GROUP_ID = B.GROUP_ID);")
df = DataFrame(cursor.fetchall(), columns = ["product_name", "group_name", "price", "upper_price_per_group"])
df = df.astype({'upper_price_per_group':'int', 'price':'int'})
df
cursor.execute("SELECT A.PRODUCT_NAME, B.GROUP_NAME, A.PRICE, LAG (A.PRICE, 1) OVER (PARTITION BY B.GROUP_NAME ORDER BY A.PRICE ) AS PREV_PRICE , A.PRICE - LAG (A.PRICE, 1) OVER (PARTITION BY B.GROUP_NAME ORDER BY A.PRICE ) AS CUR_PREV_DIFF FROM PRODUCT A INNER JOIN PRODUCT_GROUP B ON (A.GROUP_ID = B.GROUP_ID);")
df = DataFrame(cursor.fetchall(), columns = ["product_name", "group_name", "price", "prev_price", "cur_prev_diff"])
df = df.astype({'price':'int'})
df
cursor.execute("SELECT A.PRODUCT_NAME, B.GROUP_NAME, A.PRICE , LEAD (A.PRICE, 1) OVER ( PARTITION BY B.GROUP_NAME ORDER BY A.PRICE ) AS NEXT_PRICE , A.PRICE - LEAD (A.PRICE, 1) OVER ( PARTITION BY B.GROUP_NAME ORDER BY A.PRICE )AS CUR_NEXT_DIFF FROM PRODUCT A INNER JOIN PRODUCT_GROUP B ON (A.GROUP_ID = B.GROUP_ID);")
df = DataFrame(cursor.fetchall(), columns = ["product_name", "group_name", "price", "prev_price", "cur_prev_diff"])
df = df.astype({'price':'int'})
df
|
content/blog/bigdata/sql/sql_4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Observations and Insights
#
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata_path = "Mouse_metadata.csv"
study_results_path = "Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
# -Merged with 'outer' so Mice are in order of their timepoint and ID
merged_data = pd.merge(mouse_metadata, study_results, on="Mouse ID", how="outer")
# Display the data table for preview
merged_data
# +
# Checking the number of mice.
mice = merged_data['Mouse ID'].value_counts()
mice_count = len(mice)
print(f"There are {mice_count} mice.")
# -
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_mice_data = merged_data.loc[merged_data.duplicated(subset=['Mouse ID', 'Timepoint']),'Mouse ID'].unique()
# Get all the data for the duplicate mouse ID.
duplicate_mice_df = pd.DataFrame(duplicate_mice_data)
duplicate_mice_df
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
# Mouse g989 can be dropped specifically, however the '.isin' function is more flexible and re-usable
# .drop_duplicates is a risky function to use, as there are duplicate ID's for each timepoint. '.isin' is best here
clean_df = merged_data[merged_data['Mouse ID'].isin(duplicate_mice_data)==False]
clean_df
# Checking the number of mice in the clean DataFrame.
unclean_value_count = len(merged_data)
clean_value_count = len(clean_df)
print(f"The original dataset was {unclean_value_count} rows long. The clean dataset is {clean_value_count} rows long.")
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
print(f"Drug regimens used in this trial: {clean_df['Drug Regimen'].unique()}")
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
regimen_mean = clean_df.groupby('Drug Regimen').mean()["Tumor Volume (mm3)"]
print(regimen_mean)
regimen_median = clean_df.groupby('Drug Regimen').median()["Tumor Volume (mm3)"]
print(regimen_median)
regimen_variance = clean_df.groupby('Drug Regimen').var()["Tumor Volume (mm3)"]
print(regimen_variance)
regimen_std = clean_df.groupby('Drug Regimen').std()["Tumor Volume (mm3)"]
print(regimen_std)
regimen_sem = clean_df.groupby('Drug Regimen').sem()["Tumor Volume (mm3)"]
print(regimen_sem)
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
summary_regimen_table = pd.DataFrame({"Mean": regimen_mean, "Median":regimen_median, "Variance":regimen_variance, "Standard Deviation": regimen_std, "SEM": regimen_sem})
summary_regimen_table
# Using the aggregation method, produce the same summary statistics in a single line
regimen_group = clean_df.groupby('Drug Regimen')
summary_regimen_table_agg = regimen_group.agg(['mean','median','var','std','sem'])["Tumor Volume (mm3)"]
summary_regimen_table_agg
# -
# ## Bar and Pie Charts
# +
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pandas.
mice_group_count = merged_data.groupby(["Drug Regimen"]).count()["Mouse ID"]
mice_group_count = mice_group_count.sort_values(ascending=False)
pandas_bar = mice_group_count.plot(kind="bar", figsize=(16,8), color='g', fontsize = 16)
plt.xlabel("Drug Regimens", fontsize = 16)
plt.ylabel("Number of Mice / Treatments", fontsize = 16)
plt.title("Number of Mice Treated (Total Measurements Taken)", fontsize = 20)
plt.tight_layout()
plt.show()
mice_group_count
# +
# Generate a bar plot showing the total number of measurements taken on each drug regimen using pyplot.
# Array is needed for this plot
mice_len = (merged_data.groupby(["Drug Regimen"])["Mouse ID"].count()).tolist()
x_axis = np.arange(len(mice_group_count))
plt.subplots(figsize=(16,8))
plt.bar(x_axis, mice_len, color='g', alpha=0.75, align='center')
tick_locations = [value for value in x_axis]
plt.xticks(tick_locations, ['Capomulin', 'Ceftamin', 'Infubinol', 'Ketapril', 'Naftisol', 'Placebo', 'Propriva',
'Ramicane', 'Stelasyn', 'Zoniferol'], rotation='vertical', fontsize = 16)
plt.xlim(-0.75, len(x_axis)-0.25)
plt.ylim(0, max(mice_len)+15)
plt.title("Number of Mice Treated (Total Measurements Taken)",fontsize = 20)
plt.xlabel("Drug Regimens",fontsize = 16)
plt.ylabel("Number of Mice / Treatments",fontsize = 16)
# +
# Generate a pie plot showing the distribution of female versus male mice using pandas
# Find the gender column ~ 'Sex'
#print(merged_data.columns)
gender_group = merged_data.groupby(['Mouse ID', 'Sex'])
gender_df = pd.DataFrame(gender_group.size())
if mice_count == len(gender_group):
print("Correct number of mice detected in gender grouping.")
else:
print("Error in mouse count.")
print('---------------------------------------------------')
mouse_sex = pd.DataFrame(gender_df.groupby(["Sex"]).count())
mouse_sex.columns = ["Total"]
mouse_sex["Percentage of Sex"] = (100*(mouse_sex["Total"]/mouse_sex["Total"].sum()))
#mouse_sex["Percentage of Sex"] = mouse_sex["Percentage of Sex"]
print(mouse_sex)
colors = ['lightpink', 'skyblue']
plot = mouse_sex.plot.pie(y='Total',figsize=(16,8), colors = colors, startangle=295, explode = (0.1, 0), autopct="%1.1f%%", shadow = True)
plt.title('Male vs Female Mice Count',fontsize = 20)
plt.ylabel('Gender')
plt.show()
# +
# Generate a pie plot showing the distribution of female versus male mice using pyplot
labels = ["Female","Male"]
sizes = [49.799197,50.200803]
colors = ['lightpink', 'skyblue']
fig1, ax1 = plt.subplots(figsize=(16,8))
plt.pie(sizes, explode= (0.1, 0),labels= labels, colors= colors, autopct= "%1.1f%%", shadow=True, startangle= 295,)
plt.title('Male vs Female Mice Count',fontsize = 20)
plt.ylabel('Gender')
plt.show()
# -
# ## Quartiles, Outliers and Boxplots
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
capomulin_df = merged_data.loc[merged_data["Drug Regimen"] == "Capomulin", :]
ramicane_df = merged_data.loc[merged_data["Drug Regimen"] == "Ramicane", :]
infubinol_df = merged_data.loc[merged_data["Drug Regimen"] == "Infubinol", :]
ceftamin_df = merged_data.loc[merged_data["Drug Regimen"] == "Ceftamin", :]
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
#1
capomulin_last = Capomulin_df.groupby('Mouse ID').max()['Timepoint']
capomulin_final_vol = pd.DataFrame(Capomulin_last)
capomulin_merged = pd.merge(Capomulin_vol, merged_data, on=("Mouse ID","Timepoint"),how="left")
#2
ramicane_last = ramicane_df.groupby('Mouse ID').max()['Timepoint']
ramicane_final_vol = pd.DataFrame(ramicane_last)
ramicane_merged = pd.merge(ramicane_vol, merged_data, on=("Mouse ID","Timepoint"),how="left")
#3
infubinol_last = infubinol_df.groupby('Mouse ID').max()['Timepoint']
infubinol_final_vol = pd.DataFrame(infubinol_last)
infubinol_merged = pd.merge(infubinol_vol, merged_data, on=("Mouse ID","Timepoint"),how="left")
#4
ceftamin_last = ceftamin_df.groupby('Mouse ID').max()['Timepoint']
ceftamin_final_vol = pd.DataFrame(ceftamin_last)
ceftamin_merged = pd.merge(ceftamin_vol, merged_data, on=("Mouse ID","Timepoint"),how="left")
# +
# Put treatments into a list for for loop (and later for plot labels)
treatments = ["Capomulin", "Ramicane", "Infubinol", "Ceftamin"]
# Create empty list to fill with tumor vol data (for plotting)
capomulin_tumors = []
ramicane_tumors = []
infubinol_tumors = []
ceftamin_tumors = []
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
# -
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# ## Line and Scatter Plots
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
# ## Correlation and Regression
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
|
pharmaceutical_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Information Theory
#
# This notebook summarizes first part of [A Tutorial for Information Theory in Neuroscience](https://www.eneuro.org/content/5/3/ENEURO.0052-18.2018) (Pages 1-20), and contains the code to replicate Figures _2-6_ and _8-9(a,b)_. <u>Please refer to the paper for further information.</u>
# ## Contents
# 1. [Introduction to Information Theory](#introduction)
# 2. [Probability Distributions](#prob-dist)
# 3. [Data Binning](#data-binning)
# 4. [Entropy](#entropy)
# 5. [Joint Entropy](#joint-entropy)
# 6. [Conditional Entropy](#conditional-entropy)
# 7. [Mutual Information](#mutual-information)
# 8. [Transfer Entropy](#transfer-entropy)
# 9. [Bias in Entropy and Mutual Information](#bias)
# 10. [Significance Testing](#significance)
# ## 1. Introduction to Information Theory <a name="introduction"></a>
#
# **Information theory (IT)** methods can capture nonlinear interactions, and **do not** require assumptions about the structure of the underlying data. How can subtle interactions among variables and the computations they perform be optimally captured? Answering this question is difficult: **neuroscience experiments involve data with noise and non-linear interactions.** <br>
#
# In IT, one variable provides information about another variable when **knowledge of the first, on average, reduces uncertainty in the second**. `Bits` can be thought as the average number of yes/no questions required to ascertain the value of a variable. When applying an information theoretic measure of to data, the result is not a parameter in a model, but rather a number that quantifies some relationship within the data. <br>
#
# <u>Pros:</u>
# <ul style="color:green">
# <li> Model independent: When applying an information theoretic measure of to data, the result is not a parameter in a model, but rather a number that quantifies some relationship within the data. </li>
# <li> It can be applied to any mixture of data types.</li>
# <li> Information theory is capable of detecting linear and nonlinear interactions.</li>
# <li> Information theory is naturally multivariate. </li>
# <li> It produces results in general units of bits.</li>
# </ul>
#
# <u>Cons:</u>
# <ul style="color:red">
# <li> Parameters such as bin sizes involved in the discretization are chosen in the analysis. This choice can affect final results.</li>
# </ul>
# ## 2. Probability Distributions <a name="prob-dist"></a>
#
# A **probability distribution** is a distribution that describes the likelihood of certain outcomes of a random variable or a group of variables. Denoted as $p(A)$. There are two types depending on the data:
#
# - _Discrete data_: Probability Mass Function (PMF)
# - _Continuous data_: Probability Density Function (PDF)
#
# <u>Note: The sum of a PMF and the integral of a PDF must equal 1. </u>
#
# Systems with more than one variable can be described with a **joint probability distribution**.
# If variables $A$ and $B$ are independent, $P(A, B) = P(A) \cdot P(B)$ holds. A **marginal probability distribution** represents the likelihood for the outcomes of a subset of variables in the joint distribution. It can be calculated as:
# <br><br>
# $$
# p(A) = \sum_{B} P(A, B)
# $$
# <br>
#
# Finally, **conditional probability distributions** describe the likelihood to obtain outcomes of certain variables assumming the other variables are known. The formula is as follows:
# <br><br>
# $$
# p(A | B) = \frac{P(A, B)}{P(B)}
# $$
# <br>
# ## 3. Data Binning <a name="data-binning"></a>
#
# The probability of a state is estimated as the total number of observations of that state, divided by the total number of observations for all states:
# <br><br>
# $$
# p(s) = \frac{N(s)}{N_{\text{obs}}},
# $$
# <br>
# where $N(s)$ is the number of experimental observations of state $s$ (frequency distribution) and $N_{\text{obs}}$, the total number of experimental observations. This is a form of **maximum likelihood estimation**.
#
# Enough observations must be performed to adequately sample the space of possible joint states. As a minimum, the number of observations must be greater than the number of possible joint states, though more observations are usually necessary to perform an information theory analysis. We must also assume stationarity in our data: probability distributions do not change over time. If data is continuous, we should transform it into discrete via binning. There are two different types:
#
# - _Uniform width binning_ : Divide the total range of the data into $N_{\text{bins}}$ number of equal-width bins.
# - _Uniform count binning_ : Divide data into $N_{\text{bins}}$ with the same number of counts.
# +
import sys
sys.path.append('../')
from collections import Counter
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
np.random.seed(4321)
# +
from information_theory.plots import plot_binnings
N = 200
dist1 = np.random.normal(5, 1.1, size=(N // 2, ))
dist2 = np.random.normal(10, 1.25, size=(N // 2, ))
data = np.concatenate([dist1, dist2])
plot_binnings(data)
# -
# ## 4. Entropy <a name="entropy"></a>
# **Fundamental information theory quantity**. It measures the uncertainty contained in a variable. The entropy $H(X)$ of a discrete random variable corresponds to:
# <br><br>
# $$
# H(X) = \sum_{x \in X} p(X) \log_2 \bigg (\frac{1}{p(X)} \bigg),
# $$
# <br>
#
# where $x \in X$ referes to all of the possible states x can take. The entropy of a fair coin $C$, where $p(\text{heads}) = 0.5$ and $p(\text{tails}) = 0.5$, is computed as:
# <br><br>
# $$
# H(C) = \frac{1}{2} \log_2 \bigg (\frac{1}{\frac{1}{2}} \bigg) + \frac{1}{2} \log_2 \bigg (\frac{1}{\frac{1}{2}} \bigg) = \log_2(2) = 1
# $$
# <br>
#
# For systems with probability ditributions that are more concentrated, the entropy is lower, while for systems with evenly spread probability distributions, the entropy is higher. Conversely, if a variable is equally likely to be in many different states, it has high uncertainty. Entropy will be 0 whenever a variable is perfectly concentrated. Moreover, it takes only positive values.
#
# <u>Note: the _uniform counts binning procedure_, will produce a uniform probability distribution, which will **maximize** the entropy. </u>
#
#
#
# +
from information_theory.metrics import entropy
from information_theory.plots import plot_entropy
p1 = np.array([0.9, 0.01, 0.08, 0.01])
p2 = np.array([0.5, 0.15, 0.25, 0.1])
p3 = np.array([0.25, 0.25, 0.25, 0.25])
probabilities = [p1, p2, p3]
models_entropy = [
entropy(p1),
entropy(p2),
entropy(p3)
]
plot_entropy(probabilities, models_entropy)
# -
# ## 5. Joint Entropy <a name="joint-entropy"></a>
# The joint entropy $H(X, Y)$ of two discrete random variables is given by:
# <br><br>
# $$
# H(X, Y) = \sum_{x \in X, y \in Y}p(x, y)\log_2 \bigg (\frac{1}{p(x, y)} \bigg)
# $$
# <br>
# For two independent coins $C_1$ and $C_2$, their joint entropy equals to:
# <br><br>
# $$
# H(C_1, C_2) = 4 \bigg [ \frac{1}{4} \log_2 \big ( \frac{1}{\frac{1}{4}}\big ) \bigg ] = \log_2 (4) = 2
# $$
# <br>
# In general, when the $X$ and $Y$ variable are independent, the joint probability entropy of the two variables is just the sum of the individual entropies.
# +
from information_theory.metrics import joint_entropy
from information_theory.plots import plot_joint_entropy
p1 = np.array([[0.25, 0.25], [0.25, 0.25]])
p2 = np.array([[0.3, 0], [0.2, 0.5]])
p3 = np.array([[0.5, 0], [0, 0.5]])
probabilities = [p1, p2, p3]
models_joint_entropy = [
joint_entropy(p1),
joint_entropy(p2),
joint_entropy(p3)
]
plot_joint_entropy(probabilities, models_joint_entropy)
# -
# ## 6. Conditional Entropy <a name="conditional-entropy"></a>
# The **conditional entropy** quantifies the average uncertainty in a variable given the state of another variable. The conditinal entropy of two discrete random variables, $H(X|Y)$ is given by:
# <br><br>
# $$
# H(X|Y) = \sum_{x \in X, y \in Y}p(x, y)\log_2 \bigg (\frac{1}{p(x | y)} \bigg)
# $$
# <br>
#
# The conditional entropy between two fair coins $C_1$ and $C_2$ equals to:
# <br><br>
# $$
# H(C_1|C_2) = 4 \bigg [ \frac{1}{4} \log_2 \big ( \frac{1}{\frac{1}{2}}\big ) \bigg ] = \log_2 (2) = 1
# $$
# <br>
# Because the coins are independent, the conditional entropy of the first coin given the second coin should be the same as the entropy of the first coin alone. The relationship between entropy, joint entropy and conditional entropy can be written as:
# <br><br>
# $$
# H(X,Y) = H(X) + H(Y|X),
# $$
# <br>
# regardless $X$ and $Y$ are independent or not.
# +
from information_theory.metrics import conditional_entropy
from information_theory.plots import plot_conditional_entropy
p1 = np.array([[0.25, 0.25], [0.25, 0.25]])
p2 = np.array([[0.3, 0], [0.2, 0.5]])
p3 = np.array([[0.5, 0], [0, 0.5]])
probabilities = [p1, p2, p3]
models_conditional_entropy = [
conditional_entropy(p1),
conditional_entropy(p2),
conditional_entropy(p3)
]
plot_conditional_entropy(probabilities, models_conditional_entropy)
# -
# ## 7. Mutual Information <a name="mutual-information"></a>
#
# If learning the state of one variable reduces our uncertainty in another variable on average, then the first variable provides information about the second variable. Mutual information betwen two random variables $X$ and $Y$, denoted as $I(X; Y)$, can be computed as:
# <br><br>
# $$
# I(X; Y) = H(X) - H(X|Y) = \sum_{x \in X, y \in Y}p(x, y)\log_2 \bigg (\frac{p(x, y)}{p(x)(y)} \bigg)
# $$
# <br>
# For independent variables, the argument of the logarithm becomes one for all states, which produces an information of zero. This agrees with the intutition, because independent variables, cannot provide information about each other.
# <br>
#
# Mutual information is symmetric: $I(X; Y) = I(Y; X)$. In other words, the information $Y$ provides about $X$ is equal to the information X provides about Y. <br>
#
# It is natural to expand mutual information to systems of three or more variables. For instance, we might ask how much information two neurons provide about a stimulus or behavior together instead of individually. The most straightforward method for measuring the information between three variables is to use mutual information between two variables, but make one of the two variables a joint variable of two variables: <br><br>
# <br><br>
# $$
# I({X_1, X_2}; Y) = H(X) - H(X|Y) = \sum_{x_1 \in X_1, x_2 \in X_2, y \in Y}p(x_1, x_2, y)\log_2 \bigg (\frac{p(x_1, x_2, y)}{p(x_1, x_2)p(y)} \bigg)
# $$
# <br>
#
# Also, we can expand it by considering the mutual information between two variables conditioned on a third variable, which is known as conditioned mutual information. It allows us to examine interactions between two variables, while taking into account the effects of a third variable. It is helpful when examining causal relations.
# <br><br>
# $$
# I(X; Y | Z) = H(X|Z) - H(X|Y, Z) = \sum_{x \in X, y \in Y, z \in Z}p(x, y, z)\log_2 \bigg (\frac{p(x, y | z)}{p(x|z)p(y|z)} \bigg)
# $$
# <br>
#
# +
from information_theory.metrics import mutual_information_from_table
from information_theory.plots import plot_mutual_information
p1 = np.array([[0.25, 0.25], [0.25, 0.25]])
p2 = np.array([[0.3, 0], [0.2, 0.5]])
p3 = np.array([[0.5, 0], [0, 0.5]])
probabilities = [p1, p2, p3]
models_mutual_information = [
mutual_information_from_table(p1),
mutual_information_from_table(p2),
mutual_information_from_table(p3)
]
plot_mutual_information(probabilities, models_mutual_information)
# +
from information_theory.metrics import mutual_information_from_data
from information_theory.plots import plot_linear_and_nonlinear
f1 = lambda x: x
f2 = lambda x: - 4 * (x - 0.5) ** 2 + 1
N = 1000
x = np.random.random(size=(N, ))
mean_linear, std_linear = 0, 0.1
X_linear = x + np.random.normal(mean_linear, std_linear, size=(N,))
Y_linear = f1(x) + np.random.normal(mean_linear, std_linear, size=(N,))
mean_nonlinear1, std_nonlinear1 = 0, 0.1
X_nonlinear1 = x + np.random.normal(mean_nonlinear1, std_nonlinear1, size=(N,))
Y_nonlinear1 = f2(x) + np.random.normal(mean_nonlinear1, std_nonlinear1, size=(N,))
radius = 0.5
theta = x * 2 * np.pi
mean_nonlinear1, std_nonlinear2 = 0, 0.2
X_nonlinear2 = radius * (np.cos(theta) + np.random.normal(mean_nonlinear1, std_nonlinear2, size=(N, )) + 1)
Y_nonlinear2 = radius * (np.sin(theta) + np.random.normal(mean_nonlinear1, std_nonlinear2, size=(N, )) + 1)
data = [
(X_linear, Y_linear),
(X_nonlinear1, Y_nonlinear1),
(X_nonlinear2, Y_nonlinear2),
]
models_correlations = [
np.corrcoef(X_linear, Y_linear)[0, 1],
np.corrcoef(X_nonlinear1, Y_nonlinear1)[0, 1],
np.corrcoef(X_nonlinear2, Y_nonlinear2)[0, 1]
]
models_mutual_information = [
mutual_information_from_data(X_linear, Y_linear, 4),
mutual_information_from_data(X_nonlinear1, Y_nonlinear1, 4),
mutual_information_from_data(X_nonlinear2, Y_nonlinear2, 4),
]
plot_linear_and_nonlinear(data, models_correlations, models_mutual_information)
# -
# ## 8. Transfer Entropy <a name="transfer-entropy"></a>
# Transfer entropy measures the information about the future state of a variable ($Y_{\text{future}}$) provided by another variable in the past ($X_{\text{past}}$) given the information provided by the past state of the variable ($Y_{\text{past}}$).
# <br><br>
# $$
# TE(X \rightarrow Y) = I(Y_{\text{future}}; X_{\text{past}} | Y_{\text{past}}) = H(Y_{\text{future}} | Y_{\text{past}}) - H(Y_{\text{future}}|X_{\text{past}}, Y_{\text{past}}) = \sum_{y_f \in Y_{\text{future}}, x_p \in X_{\text{past}}, y_p \in Y_{\text{past}}} p(y_f, x_p, y_p)\log_2 \bigg (\frac{p(y_f, x_p | y_p)}{p(y_f| y_p) p(y_f|x_p)} \bigg)
# $$ <br>
# Transfer entropy is simply conditional mutual information with certain assumptions about temporal order and variable source, which allows it to serve as a measure of causal inference.
# +
from information_theory.metrics import transfer_entropy
from information_theory.plots import plot_transfer_entropy
N = 200
X1 = np.random.randint(2, size=(N, ))
Y1 = np.random.randint(2, size=(N, ))
spikes_X1, = np.where(X1 == 1)
spikes_Y1, = np.where(Y1 == 1)
X2 = np.random.randint(2, size=(N, ))
idx = np.random.choice(X2.size, size=(30, ), replace=False)
Y2 = np.concatenate([np.random.randint(2, size=(1,)), X2[:-1]])
Y2[idx] = abs(Y2[idx] - 1)
spikes_X2, = np.where(X2 == 1)
spikes_Y2, = np.where(Y2 == 1)
X3 = np.random.randint(2, size=(N, ))
Y3 = np.concatenate([np.random.randint(2, size=(1,)), X3[:-1]])
spikes_X3, = np.where(X3 == 1)
spikes_Y3, = np.where(Y3 == 1)
X4 = np.array([1 if i % 2 == 0 else 0 for i in range(N)])
Y4 = X4.copy()
spikes_X4, = np.where(X4 == 1)
spikes_Y4, = np.where(Y4 == 1)
spikes = [
(spikes_X1, spikes_Y1),
(spikes_X2, spikes_Y2),
(spikes_X3, spikes_Y3),
(spikes_X4, spikes_Y4)
]
models_transfer_entropy = [
transfer_entropy(X1, Y1),
transfer_entropy(X2, Y2),
transfer_entropy(X3, Y3),
transfer_entropy(X4, Y4)
]
plot_transfer_entropy(N, spikes, models_transfer_entropy)
# -
# ## 9. Bias in Entropy and Mutual Information <a name="bias"></a>
# <u>Note: Limited data tends to bias results.</u>
#
# These biases can be understood as interplay between the inherent noise associated with any analysis using limited amounts of data and the fact that information theory metrics must be greater than or equal to zero.
# +
from information_theory.biases import entropy_bias, mutual_information_bias
num_simulations = 1000
N = [10, 50, 100, 150, 250, 500, 750, 1000]
probabilities_entropy = [
np.array([0.95, 0.04, 0.009, 0.001]),
np.array([0.25, 0.25, 0.25, 0.25])
]
entropy_true = [
entropy(probabilities_entropy[0]),
entropy(probabilities_entropy[1])
]
entropy_sampled = entropy_bias(probabilities_entropy, N, num_simulations)
probabilities_mi = [
np.array([0.25, 0.25, 0.25, 0.25]),
np.array([0.45, 0.05, 0.05, 0.45])
]
mutual_information_true = [
mutual_information_from_table(np.reshape(probabilities_mi[0], (2, 2))),
mutual_information_from_table(np.reshape(probabilities_mi[1], (2, 2)))
]
mutual_information_sampled = mutual_information_bias(probabilities_mi, N, num_simulations)
# +
from information_theory.plots import plot_biases
plot_biases(N, entropy_true, entropy_sampled, mutual_information_true, mutual_information_sampled)
# -
# ## 10. Significance Testing <a name="significance"></a>
#
# A real experimental system will rarely produce an information theory measurement of precisely zero even when no interactions actually exist between the variables because of the presence of noise. In addition, bias can alter more the results. A vital step is to assess **which information theory measurements are significant**.
#
# **Surrogate data testing** or Monte Carlo analysis is frequently the solution to significance testing in information theory analyses. This type of analysis is performed by generating surrogate null model data that preserve certain aspects of the data while randomizing other aspects. Once the information theory is applied to the surrogate data, a distribution of null model information theory values can be compared to the information theory of the real data. The proportion of null model information theory values that are found to be larger than or equal to the real-data are then taken as an estimate of the $p$-value for the information theory result from the real data.
#
# Null model data can be created after generating probability distributions by randomizing the number of observations in joint states, while preserving the number of observations for each state of each variable.
# +
N_obs = 100
N_sur = 10000
states = [0, 1, 2, 3]
num_states = len(states)
num_bins = 10
A = np.array([0, 0.3])
pvalue = np.zeros((A.shape))
f, ax = plt.subplots(1, A.shape[0], figsize=(12, 3))
for i, a in enumerate(A):
p_true = np.array([0.25 * (1 + a), 0.25 * (1 - a), 0.25 * (1 - a), 0.25 * (1 + a)])
real_data_samples = np.random.choice(states, size=(N_obs, ), p=p_true)
X = np.array([1 if state == 2 or state == 3 else 0 for state in real_data_samples])
Y = np.array([1 if state == 1 or state == 3 else 0 for state in real_data_samples])
joint = Counter(zip(X, Y))
prob_dist_real = np.zeros((2, 2))
for state, count in joint.items():
prob_dist_real[state] = count / N_obs
mi_true = mutual_information_from_table(prob_dist_real)
mi_samples = np.zeros((N_sur, ))
for j in range(N_sur):
X_perm = np.random.permutation(X)
joint_sample = Counter(zip(X_perm, Y))
prob_dist_sample = np.zeros((2, 2))
for state, count in joint_sample.items():
prob_dist_sample[state] = count / N_obs
mi_samples[j] = mutual_information_from_table(prob_dist_sample)
pvalue[i] = np.sum(mi_true < mi_samples) / N_sur
width_mi, bins_mi = np.histogram(mi_samples, bins=num_bins)
centered_bins_mi= (bins_mi[1:] + bins_mi[:-1]) / 2
ax[i].plot(centered_bins_mi, width_mi, 'k')
ax[i].plot([0, centered_bins_mi[0]], [0, width_mi[0]], 'k')
ax[i].plot([mi_true, mi_true], [0, 8000], 'r')
ax[i].text(mi_true + 0.002, 4000, f'p = {pvalue[i]}', rotation=-90, color='r')
ax[i].set_ylim([0, 9000])
ax[i].set_xlim([0, 0.15])
ax[i].set_xticks([0, 0.05, 0.1, 0.15])
ax[i].set_xlabel('Mutual Information (bits)')
ax[i].set_ylabel('Null models')
ax[i].set_title('Example Weak Interaction Null Histogram\n' + r'($a=' + f'{a}'+ r'$, $N_{obs}=100$)')
plt.show()
|
notebooks/information_theory.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# [View in Colaboratory](https://colab.research.google.com/github/lozuwa/dojo_machine_learning_emi/blob/master/Lectura_2_Problema_regresion_lineal_Rodrigo_Loza.ipynb)
# + [markdown] id="SjljAyjgPp-C" colab_type="text"
# <h1>Regresion lineal</h1>
# <h2>Por: <NAME></h2>
# + [markdown] id="f2DxEBZmDJdS" colab_type="text"
# # Sección nueva
# + id="Y-pHBP6mFXPM" colab_type="code" colab={}
# Numerical and tensor manipulations.
import numpy as np
# Visualizations.
import matplotlib.pyplot as plt
from matplotlib import style
style.use("ggplot")
# Things look nicer with this configuration.
# %config InlineBackend.figure_format = 'retina'
# + [markdown] id="qCnIsS2pFXPP" colab_type="text"
# <h1>Machine learning</h1>
# <strong>Definición: </strong>"Machine learning es la ciencia que le permite a las computadoras aprender a resolver un problema sin ser explicitamente programadas."
#
# <p>De manera específica, Machine learning utiliza un estimador frequentista para aprender. Este estimador es conocido como <strong>Maximum likelihood estimator</strong>, el cual define una función parametrica que maximiza la probabilidad de que una variable se adecue a una distribucion de probablididad. </p>
#
# <h3>Likelihood function</h3>
# $$ L(\theta) = \prod_{i=1}^{M} p(x_i | \theta) $$
#
# <h3>Maximizar log likelihood</h3>
# $$ \theta = argmax_{\theta}(ln(L(\theta))) $$
#
# <h3>Funcion parametrica</h3>
# $$ f(x;\theta) = \sum_{j=1}^{N} \theta_j X_j $$
#
# <p> De esta manera obtenemos un estimador de parametros como se muestra en <strong>Maximizar log likelihood</strong>. Para encontrar el parametro theta podemos solucionar analiticamente despejando theta. Sin embargo, no siempre es posible encontrar theta, por tanto, recurrimos a resolver el problema con un problema iterativo de optimizacion. En este sentido, supongamos que decidimos utilizar <strong>batch gradient descent</strong>, pero aun hay un componente que nos falta para aplicar este algoritmo. </p>
#
# <h3>Batch gradient descent</h3>
# Repeat {
# $$ \theta = \theta - \alpha * \frac{\partial{J(\theta)}}{\partial{\theta}} $$
# }
# <br>
# Dónde alpha es el coeficiente de aprendizaje, J(theta)/theta es la derivada de la función costo.
#
# <p> Este ingrediente es una funcion costo que nos permita tener control de nuestra optimizacion. Para encontrar esta funcion costo, debemos asumir que una distribucion normal gobierna el modelo estadistico y que la media del mismo es la funcion parametrica, la cual eventualmente, replicará la distribucion de datos del problema. Por tanto, si aplicamos nuevamente <strong>maximum likelihood estimation (MLE)</strong> y despejamos theta, obtenemos una funcion costo.</p>
#
# <h3>Funcion costo derivada de MLE</h3>
# $$ J(theta) = \frac{1}{2m} \sum_{i=1}^{m} (f(x^{i};theta) - y^{i})^2 $$
#
# <h2>Resumen</h2>
# En resumen <strong>Machine learning</strong> esta compuesta por 3 ingredientes base:
# <ol>
# <li><strong>Hipótesis:</strong> f(x;theta)</li>
# <li><strong>Función costo:</strong> J(theta)</li>
# <li><strong>Algoritmo de optimización:</strong> Batch gradient descent.</li>
# </ol>
# + [markdown] id="q8nWweeSFXPP" colab_type="text"
# <h2>Regresión lineal</h2>
# <p>Veamos un ejemplo para recordar la anterior clase.</p>
#
# <br>
# <h3>Problema</h3>
# <p>Dada una distribucion de probabilidad conjunta p(x,y), utilizar machine learning para crear un modelo que pueda predecir la distribución p(x,y).</p>
#
# <p>La distribución de probabilidad conjunta p(x,y) esta descrita por la siguiente función:</p>
# $$ f(x) = \sin(2 \pi x) $$
#
# + [markdown] id="ICuSQsABFXPQ" colab_type="text"
# <h2>Base de datos</h2>
# Definamos la base de datos.
# + id="K6yKRWRHFXPQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 378} outputId="eac91cbd-c4fc-42e7-c35b-febb528b9683"
x = np.arange(-3, 3, 0.1)
y = [np.sin(i) for i in x]
plt.scatter(x, y)
plt.title("sin(2pix)")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# + [markdown] id="oumPwB1cFXPS" colab_type="text"
# <h2>Hipótesis</h2>
# Definamos la hipotesis. Primero debemos inicializar los pesos theta, luego definimos la funcion parametrica.
# + id="ZusGuFEuFXPT" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1764ac00-aa55-4dd0-9b74-1c61a6348293"
theta = np.array(np.random.rand()).reshape(1, 1)
theta
# + id="mrpOf7rlFXPV" colab_type="code" colab={}
x = x.reshape(-1, 1)
y = np.array(y).reshape(-1, 1)
# print(x)
# print('-----')
# print(y)
# + id="6pFYH_p9FXPX" colab_type="code" colab={}
h_theta_x = np.dot(x, theta)
#print(h_theta_x)
# + [markdown] id="UOl8AiU6FXPY" colab_type="text"
# <p>Visualizemos h_theta_x y comparemos con p(x, y)</p>
# + id="HX2o3zRRFXPZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 378} outputId="4b200d79-59e7-4aae-9f6f-e46e0c1f97bd"
predictions = plt.scatter(x, h_theta_x, c = "r")
ground_truth = plt.scatter(x, y, c = "b")
plt.title("h_theta_x vs y")
plt.xlabel("x")
plt.ylabel("h_theta_x / y")
plt.legend((predictions, ground_truth),
('Predictions', 'Ground truth'),
scatterpoints=1,
loc='lower right',
ncol=3,
fontsize=8)
plt.show()
# + [markdown] id="mE_Nk2I6FXPb" colab_type="text"
# <p>Si analizamos h_theta_x podemos ver que la hipotesis no tiene la forma que deseamos. Nuestra hipotesis no esta entrenada. Continuemos programando los 2 ingredientes restantes.
# <br>
# <h2>Funcion costo</h2>
# Definamos la funcion costo:
# $$ J(theta) = \frac{1}{2m} \sum_{i=1}^{m} (f(x^{i};theta) - y^{i})^2 $$
# + id="DTWkqM5uFXPb" colab_type="code" colab={}
def cost_function(x = None, y = None, h_theta_x = None, deriv = None):
# Local variables.
if (deriv == None):
deriv = False
m = x.shape[0]
# Logic.
if (deriv):
j_theta = (1/(2*m))*np.dot(x.T, (h_theta_x - y))
else:
j_theta = (1/(2*m))*np.sum((h_theta_x - y)**2)
return j_theta
# + [markdown] id="kJeTnL1pFXPd" colab_type="text"
# <h2>Algoritmo de optimizacion</h2>
# Batch gradient descent.
# <br>
# Repeat {
# $$ \theta = \theta - \alpha * \frac{\partial{J(\theta)}}{\partial{\theta}} $$
# }
# + id="mmrdHKyPFXPd" colab_type="code" colab={}
def bgd(theta = None, x = None, y = None, learning_rate = None, epochs = None):
# Local variables.
history_cost_function = []
# Iterate over epochs.
for i in range(epochs):
# Define a local hypothesis.
h_theta_x = np.dot(x, theta)
# Update theta.
theta = theta - learning_rate * cost_function(x = x, y = y, h_theta_x = h_theta_x, deriv = True)
# Have a record of our progress.
history_cost_function.append(cost_function(x = x, y = y, h_theta_x = h_theta_x, deriv = False))
# Return trained values.
return theta, history_cost_function
# + [markdown] id="uCD93goxFXPf" colab_type="text"
# <h2>EJERCICIO 1</h2>
# <p>Reemplazar los valores <strong>learning_rate</strong> y <strong>epochs</strong> hasta encontrar la mejor funcion que pueda predecir los datos del problema. Una vez que hayas corrido la siguiente celda, visualiza tus resultados con las siguientes dos celdas. Las cuales te mostraran el progreso de tu funcion costo y el resultado del aprendizaje comparado con los datos del problema. </p>
# <p><strong>NOTA:</strong> Te recomiendo probar con los siguientes rangos.</p>
# <ol>
# <li><strong>Learning rate:</strong> 0.001 - 0.1</li>
# <li><strong>Epochs:</strong>10 - 1000</li>
# </ol>
# + id="47QU0svxFXPg" colab_type="code" colab={}
# NOTA: Reemplaza las siguientes variables.
learning_rate = # ESCRIBE AQUI TU VALOR
epochs = # ESCRIBE AQUI TU VALOR
# Entrenar algoritmo.
theta_trained, hist_cf = bgd(theta = theta, x = x, y = y, learning_rate = learning_rate, epochs = epochs)
# + id="RQ240wVHFXPj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 378} outputId="4f9c8c7b-207c-4531-b438-a0d5c1e57f0c"
# Plot cost function history.
# print(theta_trained)
# print(hist_cf)
plt.scatter([i for i in range(len(hist_cf))], hist_cf)
plt.title("Funcion costo vs iteraciones")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# + id="2I8clv5AFXPl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 378} outputId="ee39b3b2-5df8-4827-c5ac-bbd044e40d56"
# Veamos la prediccion.
h_theta_x = np.dot(x, theta_trained)
predictions = plt.scatter(x, h_theta_x, c = "r")
ground_truth = plt.scatter(x, y, c = "b")
plt.title("h_theta_x vs y")
plt.xlabel("x")
plt.ylabel("h_theta_x / y")
plt.legend((predictions, ground_truth),
('Predictions', 'Ground truth'),
scatterpoints=1,
loc='lower right',
ncol=3,
fontsize=8)
plt.show()
# + [markdown] id="67j4rGtzFXPn" colab_type="text"
# <p>Como se puede ver en el grafico anterior. El modelo no tiene la <strong>capacidad</strong> de replicar la distribucion de datos p(x,y) porque f(x;theta) solo tiene la capacidad de producir una funcion lineal f(x) = x</p>
#
# <h3>Como solucionamos este problema?</h3>
# <p>Hint: Cómo es una funcion polinómica?</p>
# + [markdown] id="8rQduVVFFXPo" colab_type="text"
# <h3>Solucion: Incrementar el grado del polinomio de la funcion parametrica y de la base de datos.</h3>
# + id="UPWa0DbOFXPp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4b8227f8-b2fd-4167-b885-a343d660a6ce"
# Incrementar el numero de parametros en la base de datos.
x2 = np.c_[x, x**2, x**3]
print(x2.shape)
# + id="13LpAg7iFXPs" colab_type="code" colab={}
# Incrementar el numero de parametros en la funcion parametrica.
theta2 = np.array(np.random.rand(3)).reshape(-1, 1)
# + [markdown] id="l27POshsFXPv" colab_type="text"
# <h2>EJERCICIO 2</h2>
# <p>Reemplazar los valores <strong>learning_rate</strong> y <strong>epochs</strong> hasta encontrar la mejor funcion que pueda predecir los datos del problema. Una vez que hayas corrido la siguiente celda, visualiza tus resultados con las siguientes dos celdas. Las cuales te mostraran el progreso de tu funcion costo y el resultado del aprendizaje comparado con los datos del problema. </p>
# <p><strong>NOTA:</strong> Te recomiendo probar con los siguientes rangos.</p>
# <ol>
# <li><strong>Learning rate:</strong> 0.001 - 0.1</li>
# <li><strong>Epochs:</strong>10 - 1000</li>
# </ol>
# + id="ghScXOT0FXPv" colab_type="code" colab={}
# NOTA: Reemplaza las siguientes variables.
learning_rate = # ESCRIBE AQUI TU VALOR
epochs = # ESCRIBE AQUI TU VALOR
# Corre el entrenamiento.
theta_trained2, hist_cf2 = bgd(theta = theta2, x = x2, y = y, learning_rate = learning_rate, epochs = epochs)
# + id="hwqVD5XMFXPx" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 378} outputId="c68ebaf8-98d6-4d69-880f-784e72db5833"
# Plot cost function history.
plt.scatter([i for i in range(len(hist_cf2))], hist_cf2)
plt.title("Funcion costo vs iteraciones")
plt.xlabel("x2")
plt.ylabel("y")
plt.show()
# + id="kr0MQGdzFXPz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 378} outputId="97fbdf85-d63d-4296-d630-fe0ffabbe99b"
# Veamos la prediccion.
h_theta_x2 = np.dot(x2, theta_trained2)
predictions = plt.scatter(x, h_theta_x2, c = "r")
ground_truth = plt.scatter(x, y, c = "b")
plt.title("f_x_theta vs y")
plt.xlabel("x2")
plt.ylabel("f_x_theta / y")
plt.legend((predictions, ground_truth),
('Predictions', 'Ground truth'),
scatterpoints=1,
loc='lower right',
ncol=3,
fontsize=8)
plt.show()
# + [markdown] id="88obTaiRFXP1" colab_type="text"
# <p>Si lo lograste hasta aqui, muy bien! Ahora resolvamos otro problema.</p>
# <br>
# <h2>Problema</h2>
# <p>La base de datos es una función or. Esto quiere que los valores de entrada y salida son discretos. Crea un modelo estadistico utilizando machine learning para predecir los valores de salida de esta distribucion de datos.</p>
# + [markdown] id="Tp3MA1pPFXP1" colab_type="text"
# <h3>Base de datos</h3>
# <p>Visualizemos la función xor.</p>
# + id="wJhY10rAFXP2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3033446e-baaf-4be5-89bf-9fc69176285d"
# Datos.
x = np.array([[0,0], [0,1], [1,0], [1,1]])
y = np.array([[0,1,1,1]]).T
print(x.shape, y.shape)
# + id="XD8L67m3FXP3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 378} outputId="28b8de6e-1f0f-4db0-f730-61cbf0906b3c"
# Viz.
plt.scatter(x[1:,0], x[1:,1], c = "r")
plt.scatter(x[0,:], x[0,:], c = "b")
plt.xlabel("x1")
plt.ylabel("x2")
plt.title("or")
plt.show()
# + [markdown] id="wAYmdErFFXP5" colab_type="text"
# <h3>Solución</h3>
# + id="1koJrU6sFXP5" colab_type="code" colab={}
theta3 = np.array(np.random.rand(2)).reshape(-1, 1)
# + [markdown] id="ejkDRszeFXP7" colab_type="text"
# <h2>EJERCICIO 3</h2>
# <p>Reemplazar los valores <strong>learning_rate</strong> y <strong>epochs</strong> hasta encontrar la mejor funcion que pueda predecir los datos del problema. Una vez que hayas corrido la siguiente celda, visualiza tus resultados con las siguientes dos celdas. Las cuales te mostraran el progreso de tu funcion costo y el resultado del aprendizaje comparado con los datos del problema. </p>
# <p><strong>NOTA:</strong> Te recomiendo probar con los siguientes rangos.</p>
# <ol>
# <li><strong>Learning rate:</strong> 0.001 - 0.1</li>
# <li><strong>Epochs:</strong>10 - 1000</li>
# </ol>
# + id="zsRU3GvBFXP8" colab_type="code" colab={}
#@title Texto de título predeterminado
# NOTA: Reemplaza las siguientes variables.
learning_rate = # ESCRIBE AQUI TU VALOR
epochs = # ESCRIBE AQUI TU VALOR
# Entrena el algoritmo.
theta_trained3, hist_cf3 = bgd(theta = theta3, x = x, y = y, learning_rate = learning_rate, epochs = epochs)
# + id="07SfdMzGFXP9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 412} outputId="baf9d7b9-a94f-4e9e-db7b-5bb707140d09"
print(theta_trained3)
# Plot cost function history.
plt.scatter([i for i in range(len(hist_cf3))], hist_cf3)
plt.title("Funcion costo vs iteraciones")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
# + [markdown] id="lk1mXqjPFXP_" colab_type="text"
# <p>Con el fin de entender el resultado de la prediccion veamos los valores de salida con los parametros entrenados.</p>
# + id="zAY9pjElFXQA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 86} outputId="bd024985-1255-4ecd-b322-9149ec0d641e"
# Prediccion de la curva segun la base de datos.
h_theta_x3 = np.dot(x, theta_trained3)
for i, k in zip(x, range(h_theta_x3.shape[0])):
print("Para {},{} obtenemos: {}".format(i[0], i[1], h_theta_x3[k]))
# + [markdown] id="PLojVhO4FXQB" colab_type="text"
# <p>Los resultados son continuos ... Nuestro problema require no solo valores discretos pero tambien division de regiones.</p>
# <h3>Como lo solucionamos?</h3>
# <p>Podemos definir un umbral de decision para nuestros valores. Al menos por ahora.</p>
# <h2>EJERCICIO 4</h2>
# <p>Define un umbral de decision con el fin de replicar los resultados de la funcion or. El objetivo es hacer que el ERROR reporte 0. Te recomiendo utilizar un valor entre 0-2. Comienza desde 2 y baja.</p>
# + id="oEVSz5bLFXQC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 120} outputId="4a29ad60-d1b7-4d5b-875d-09cbd81d4471"
# Variable.
umbral = # Escribe aqui tu valor.
# Predicciones
error = 0
h_theta_x3_umbral = [1 if i > umbral else 0 for i in h_theta_x3]
for i, k in zip(x, range(len(h_theta_x3_umbral))):
print("Para {},{} obtenemos: {} y el valor real es {}".format(i[0], i[1], h_theta_x3_umbral[k], i[0] or i[1]))
error += np.abs(h_theta_x3_umbral[k] - (i[0] or i[1]))
print("\nERROR: {}%".format(error*25))
# + [markdown] id="pFEZ-6QSFXQE" colab_type="text"
# <p>Veamos la curva de regresion obtenida en el entrenamiento del algoritmo.</p>
# + id="W1WKKTG-FXQF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 378} outputId="69307755-e861-476d-cd27-ae1c4fac1846"
x_0 = np.arange(-1, 2)
y_0 = [i*theta_trained3[0] for i in x_0]
y_1 = [i*theta_trained3[1] for i in x_0]
plt.plot(y_0, y_1)
plt.title("Intervalo de decision")
plt.xlabel("theta_trained3_0")
plt.ylabel("theta_trained3_1")
plt.show()
# + [markdown] id="lJTlv5qdFXQG" colab_type="text"
# <p> Como pudiste notar, al intentar resolver el problema utilizando algoritmos de regresion, tuvimos inconvenientes en el formato del algoritmo. Entonces, buscamos maneras de resolverlo utilizando algun truco sencillo. Sin embargo, en problemas reales como <strong>prediccion de cancer vs tamano del tumor</strong>, <strong>sentimiento de un comentario en facebook</strong>, <strong>reconocimiento de rostros en imagenes</strong>, etc. este metodo no funcionara. Por eso debemos escalar el algoritmo de prediccion a resolver un problema de <strong>CLASIFICACION</strong> en el cual las variables de salida son discretas.</p>
# + id="7kRQMdNgFXQH" colab_type="code" colab={}
# + [markdown] id="Hgmg5isCNN-o" colab_type="text"
#
|
Clase 1 - Regresion/Ejercicios_regresion_lineal_en_clase.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.9 64-bit
# name: python369jvsc74a57bd031f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6
# ---
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme(style="darkgrid")
dataframe = pd.read_csv('Decentralized_Nodes.csv', delimiter=';', header=0, index_col=0)
dataframe=dataframe.astype(float)
# +
fig, ax = plt.subplots(figsize=(16, 9))
dataframe.plot(kind='bar', y='Média de CPU(Millicores)', yerr=[dataframe['DesvPad de CPU(Millicores)']/2,dataframe['DesvPad de CPU(Millicores)']/2],capsize=3,ax=ax)
plt.xlabel("Nodes ")
plt.ylabel("CPU (Millicores)")
plt.legend()
plt.savefig('out/Nodes_CPU.pdf', bbox_inches='tight')
plt.savefig('out/Nodes_CPU.png', dpi=450, bbox_inches='tight')
# -
|
performance-analysis/article/_old/drc/decentralized/nodes_CPU.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.10 (''.venv'': venv)'
# language: python
# name: python3
# ---
import requests
from config import Instagram
# +
# https://levelup.gitconnected.com/automating-instagram-posts-with-python-and-instagram-graph-api-374f084b9f2b
# Exchange short lived AccessToken with Long Life Token
url = f"https://graph.facebook.com/v13.0/oauth/access_token"
params = {
'grant_type' : 'fb_exchange_token',
'client_id' : Instagram.APP_ID,
'client_secret' : Instagram.APP_SECRET,
'fb_exchange_token': Instagram.USER_ACCESS_TOKEN
}
response = requests.post(url, params=params)
response_json = response.json()
access_token = response_json['access_token']
# +
# curl -i -X POST \
# "https://graph.facebook.com/v13.0/17841451612144935/media?image_url=https%3A%2F%2Fi.imgur.com%2FR4zJRaX.jpeg&caption=test%20caption&access_token=<PASSWORD>"
# https://developers.facebook.com/docs/facebook-login/access-tokens/refreshing/
# https://developers.facebook.com/docs/instagram-api/guides/content-publishing#endpoints
# -
image_url_list = [
"https://i.imgur.com/tT3tgN0.png",
"https://i.imgur.com/UIyd8JR.png",
"https://i.imgur.com/UIyd8JR.png"
]
# +
url = f"https://graph.facebook.com/v13.0/{Instagram.BUSINESS_ACCOUNT_ID}/media"
payload = {
'image_url' : image_url_list[2],
'is_carousel_item' : True,
'access_token': f'{access_token}'
}
response_media = requests.post(url, params=payload)
response_media_json = response_media.json()
media_id = response_media_json['id']
media_id
# +
# Image containers created for the images
# 17919577919362285
# 17929840070313958
# 17848006328745525
# +
image_container_list = [
'17919577919362285',
'17929840070313958',
'17848006328745525'
]
image_container_str = ",".join(image_container_list)
url = f"https://graph.facebook.com/v13.0/{Instagram.BUSINESS_ACCOUNT_ID}/media"
payload = {
'children' : image_container_str,
'media_type' : 'CAROUSEL',
'caption' : 'This is a carousel post on instagram.',
'access_token': f'{access_token}'
}
response_media = requests.post(url, params=payload)
response_media_json = response_media.json()
media_id = response_media_json['id']
media_id
# +
url = "https://graph.facebook.com/v13.0/17841451612144935/media_publish"
payload = {
'creation_id' : f"{media_id}",
'access_token': f'{access_token}'
}
r = requests.post(url, params=payload)
print(r.text)
|
sandbox/social_integration/meta/instagram carousel.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## TASK 4 : Handwritten digit recognition
# ## Author : <NAME>
# ## Task Details:
# ### Predict handwritten digits with MNIST image dataset
# ## Dataset:
# ### https://www.kaggle.com/dillsunnyb11/digit-recognizer/tasks
#
# ### Importing required libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
# ### Loading data in pandas DataFrame
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
# ### Exploring Data
df_train.head()
df_train.shape
# ### In this:
# 1. Training Dataset have 42000 rows and 785 columns.
#
# 2. Each column represent the pixel value of digit.
df_train.describe()
df_train.info()
df_test.head()
df_test.shape
# ### In this:
# 1. Test Dataset have 28000 rows and 784 columns.
#
# 2. Each column represent the pixel value of handwritten digit.
df_test.describe()
df_test.info()
# ### Heatmaps
sns.heatmap(df_train.isnull());
plt.show();
sns.heatmap(df_test.isnull());
plt.show();
# No Missing Values
# ### Let's check value count of each digit in the training dataset
df_train['label'].value_counts().sort_values(ascending = False) #to get count of every digit
# Training Dataset have more images of 1 than any other digit.
plt.hist(df_train['label'], bins = 40, color = "Red")
plt.xlabel("Digits")
plt.xticks(df_train.label.unique())
plt.ylabel("Frequency")
plt.title("Frequency of each digit")
plt.show();
# ### Function to print Image
def print_image(index):
random_digit = df_train.iloc[index,1:].values
random_digit_img = random_digit.reshape(28,28)
plt.imshow(random_digit_img,'binary')
print_image(1906)
# ### Build the Model
X_train = df_train.iloc[:,1:]
Y_train = df_train.iloc[:,0]
# +
from sklearn.model_selection import train_test_split
# splitting data in training set(80%) and test set(20%).
x_train, x_test, y_train, y_test = train_test_split(X_train, Y_train, test_size=0.2)
# -
# ### Random Forest Classifier
# +
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=200,max_features=0.5)
model = model.fit(x_train,y_train)
# -
predicted = model.predict(x_test)
predicted
predicted.shape
# ### Training accuracy
# +
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
act = accuracy_score(y_train,model.predict(x_train))
print('Training Accuracy is: ',(act*100))
# -
# ### Testing accuracy
act = accuracy_score(y_test,model.predict(x_test))
print('Test Accuracy is: ',(act*100))
confusion_matrix(predicted,y_test)
print(classification_report(predicted,y_test))
y_test.values
y_test[7:12]
predicted[7:12]
test_data_prediction = model.predict(df_test)
test_data_prediction
df_test.shape
# ### Function to print Image
def print_testimage(index):
random_digit = df_test.iloc[index].values
random_digit_img = random_digit.reshape(28,28)
plt.imshow(random_digit_img,'binary')
print_testimage(32)
#The 32th value of predict data
test_data_prediction[32]
|
Task-4 Handwritten digit recognition/Handwritten digit recognition.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Tensorflow GPU (tf-gpu)
# language: python
# name: tf-gpu
# ---
# Keras auto encoder tutorial based upon https://blog.keras.io/building-autoencoders-in-keras.html but with some modifications.
#
# Topics include:
#
# * Autoencoder basics
# * Deep Autoencoders
# * Convolutional Autoencoders
# * Image Denoising
# * Variational Autoencoders (VAE's)
# +
from keras.datasets import mnist
from keras.layers import Input, Dense, Lambda, Conv2D, MaxPooling2D, UpSampling2D
from keras.losses import mse, binary_crossentropy
from keras.models import Model
from keras import backend as K
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
# -
# # Create a basic autoencoder
# +
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# -
autoencoder.summary()
# We can extract the encoder model from the first layer of the autoencoder model. Reason we’d want to do this is to get the encoded features (embedding) that in this cvase represent what an encoded image looks like.
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
encoder.summary()
# As well as the decoder model (we don't use this here though):
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
decoder.summary()
# Now let's train our autoencoder to reconstruct MNIST digits.
#
# First, we'll configure our model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer:
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# Let's prepare our input data. We're using MNIST digits, and we're discarding the labels (since we're only interested in encoding/decoding the input images).
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
# We will normalize all values between 0 and 1 and we will flatten the 28x28 images into vectors of size 784.
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print(x_train.shape)
print(x_test.shape)
# Now let's train our autoencoder for 50 epochs:
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# After 50 epochs, the autoencoder seems to reach a stable train/test loss value of about 0.11. We can try to visualize the reconstructed inputs and the encoded representations. We will use Matplotlib.
# +
# encode and decode some digits
# note that we take them from the *test* set
def plot_reconstruction(encoder, autoencoder, x_test, embedding_shape = (8,4)):
#output_imgs = autoencoder.predict(x_test)
encoded_imgs = encoder.predict(x_test)
output_imgs = autoencoder.predict(x_test)
print("Encoded Image Sparsity: ", encoded_imgs.mean())
num_images = 10 # how many digits we will display
np.random.seed(42)
random_test_images = np.random.randint(x_test.shape[0], size=num_images)
plt.figure(figsize=(20, 4))
for i, image_idx in enumerate(random_test_images):
# display original
ax = plt.subplot(3, num_images, i + 1)
plt.imshow(x_test[image_idx].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# plot encoded image
ax = plt.subplot(3, num_images, num_images + i + 1)
plt.imshow(encoded_imgs[image_idx].reshape(embedding_shape[0], embedding_shape[1]).T)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(3, num_images, 2 * num_images + i + 1)
plt.imshow(output_imgs[image_idx].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
plot_reconstruction(encoder, autoencoder, x_test)
# -
# Above is what we get. The top row is the original digits, and the bottom row is the reconstructed digits. We are losing quite a bit of detail with this basic approach.
# # Adding a sparsity constraint on the encoded representations
# In the previous example, the representations were only constrained by the size of the hidden layer (32). In such a situation, what typically happens is that the hidden layer is learning an approximation of PCA (principal component analysis). But another way to constrain the representations to be compact is to add a sparsity contraint on the activity of the hidden representations, so fewer units would "fire" at a given time. In Keras, this can be done by adding an activity_regularizer to our Dense layer:
# +
from keras import regularizers
encoding_dim = 32
input_img = Input(shape=(784,))
# add a Dense layer with a L1 activity regularizer
encoded = Dense(encoding_dim, activation='relu',
activity_regularizer=regularizers.l1(10e-5))(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input_img, decoded)
autoencoder.summary()
# -
encoder = Model(input_img, encoded)
encoder.summary()
# Let's train this model for 100 epochs (with the added regularization the model is less likely to overfit and can be trained longer). The models ends with a train loss of 0.11 and test loss of 0.10. The difference between the two is mostly due to the regularization term being added to the loss during training (worth about 0.01).
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# Here's a visualization of our new results:
plot_reconstruction(encoder, autoencoder, x_test)
# They look pretty similar to the previous model, the only significant difference being the sparsity of the encoded representations. encoded_imgs.mean() yields a value 3.33 (over our 10,000 test images), whereas with the previous model the same quantity was 7.30. So our new model yields encoded representations that are twice sparser.
# # Deep autoencoder
# We do not have to limit ourselves to a single layer as encoder or decoder, we could instead use a stack of layers, such as:
# +
input_img = Input(shape=(784,))
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(32, activation='relu', name='embeddings')(encoded)
decoded = Dense(64, activation='relu')(encoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(784, activation='sigmoid')(decoded)
autoencoder = Model(input_img, decoded)
autoencoder.summary()
# -
#encoder = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('embeddings').output)
encoder = Model(inputs=autoencoder.input, outputs=encoded)
encoder.summary()
# +
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train,
epochs=100,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# -
# After 100 epochs, it reaches a train and test loss of ~0.097, a bit better than our previous models. Our reconstructed digits look a bit better too:
plot_reconstruction(encoder, autoencoder, x_test)
# # Convolutional autoencoder
# Since our inputs are images, it makes sense to use convolutional neural networks (convnets) as encoders and decoders. In practical settings, autoencoders applied to images are always convolutional autoencoders --they simply perform much better.
#
# Let's implement one. The encoder will consist in a stack of Conv2D and MaxPooling2D layers (max pooling being used for spatial down-sampling), while the decoder will consist in a stack of Conv2D and UpSampling2D layers.
# +
input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.summary()
# -
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
encoder = Model(inputs=autoencoder.input, outputs=encoded)
encoder.summary()
# To train it, we will use the original MNIST digits with shape (samples, 3, 28, 28), and we will just normalize pixel values between 0 and 1.
# +
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
# -
# Let's train this model for 50 epochs. For the sake of demonstrating how to visualize the results of a model during training, we will be using the TensorFlow backend and the TensorBoard callback.
#
# First, let's open up a terminal and start a TensorBoard server that will read logs stored at /tmp/autoencoder.
#
# tensorboard --logdir=/tmp/autoencoder
#
# Then let's train our model. In the callbacks list we pass an instance of the TensorBoard callback. After every epoch, this callback will write logs to /tmp/autoencoder, which can be read by our TensorBoard server.
# +
from keras.callbacks import TensorBoard
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=128,
shuffle=True,
validation_data=(x_test, x_test),
callbacks=[TensorBoard(log_dir='/tmp/autoencoder')])
# -
# This allows us to monitor training in the TensorBoard web interface (by navighating to http://0.0.0.0:6006):
#
# The model converges to a loss of 0.094, significantly better than our previous models (this is in large part due to the higher entropic capacity of the encoded representation, 128 dimensions vs. 32 previously). Let's take a look at the reconstructed digits:
plot_reconstruction(encoder, autoencoder, x_test, embedding_shape=(4, 4 * 8))
# We can also have a look at the 128-dimensional encoded representations. These representations are 8x4x4, so we reshape them to 4x32 in order to be able to display them as grayscale images.
# # Application to image denoising
# Let's put our convolutional autoencoder to work on an image denoising problem. It's simple: we will train the autoencoder to map noisy digits images to clean digits images.
#
# Here's how we will generate synthetic noisy digits: we just apply a gaussian noise matrix and clip the images between 0 and 1.
# +
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
noise_factor = 0.5
x_train_noisy = x_train + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_train.shape)
x_test_noisy = x_test + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=x_test.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
x_test_noisy = np.clip(x_test_noisy, 0., 1.)
# -
# Here's what the noisy digits look like:
n = 10
plt.figure(figsize=(20, 2))
for i in range(n):
ax = plt.subplot(1, n, i + 1)
plt.imshow(x_test_noisy[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# If you squint you can still recognize them, but barely. Can our autoencoder learn to recover the original digits? Let's find out.
#
# Compared to the previous convolutional autoencoder, in order to improve the quality of the reconstructed, we'll use a slightly different model with more filters per layer:
# +
input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format
x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (7, 7, 32)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.summary()
# -
encoder = Model(inputs=autoencoder.input, outputs=encoded)
encoder.summary()
# Let's train it for 100 epochs:
autoencoder.fit(x_train_noisy, x_train,
epochs=100,
batch_size=128,
shuffle=True,
validation_data=(x_test_noisy, x_test),
callbacks=[TensorBoard(log_dir='/tmp/tb', histogram_freq=0, write_graph=False)])
# Now let's take a look at the results. Top, the noisy digits fed to the network, and bottom, the digits are reconstructed by the network.
plot_reconstruction(encoder, autoencoder, x_test_noisy, embedding_shape=(7, 7 * 32))
# # Sequence-to-sequence autoencoder
# If you inputs are sequences, rather than vectors or 2D images, then you may want to use as encoder and decoder a type of model that can capture temporal structure, such as a LSTM. To build a LSTM-based autoencoder, first use a LSTM encoder to turn your input sequences into a single vector that contains information about the entire sequence, then repeat this vector n times (where n is the number of timesteps in the output sequence), and run a LSTM decoder to turn this constant sequence into the target sequence.
#
# We won't be demonstrating that one on any specific dataset. We will just put a code example here for future reference for the reader!
# +
# from keras.layers import Input, LSTM, RepeatVector
# from keras.models import Model
#
# inputs = Input(shape=(timesteps, input_dim))
# encoded = LSTM(latent_dim)(inputs)
#
# decoded = RepeatVector(timesteps)(encoded)
# decoded = LSTM(input_dim, return_sequences=True)(decoded)
#
# sequence_autoencoder = Model(inputs, decoded)
# encoder = Model(inputs, encoded)
# -
# # Variational autoencoder (VAE)
# Variational autoencoders are a slightly more modern and interesting take on autoencoding.
#
# What is a variational autoencoder, you ask? It's a type of autoencoder with added constraints on the encoded representations being learned. More precisely, it is an autoencoder that learns a latent variable model for its input data. So instead of letting your neural network learn an arbitrary function, you are learning the parameters of a probability distribution modeling your data. If you sample points from this distribution, you can generate new input data samples: a VAE is a "generative model".
#
# How does a variational autoencoder work?
#
# First, an encoder network turns the input samples x into two parameters in a latent space, which we will note z_mean and z_log_sigma. Then, we randomly sample similar points z from the latent normal distribution that is assumed to generate the data, via z = z_mean + exp(z_log_sigma) * epsilon, where epsilon is a random normal tensor. Finally, a decoder network maps these latent space points back to the original input data.
#
# The parameters of the model are trained via two loss functions: a reconstruction loss forcing the decoded samples to match the initial inputs (just like in our previous autoencoders), and the KL divergence between the learned latent distribution and the prior distribution, acting as a regularization term. You could actually get rid of this latter term entirely, although it does help in learning well-formed latent spaces and reducing overfitting to the training data.
#
# The VAE has a modular design. The encoder, decoder and VAE are 3 models that share weights. After training the VAE model,
# the encoder can be used to generate latent vectors. The decoder can be used to generate MNIST digits by sampling the
# latent vector from a Gaussian distribution with mean = 0 and std = 1.
#
# First load the dataset and setup some parameters
# +
(x_train, y_train), (x_test, y_test) = mnist.load_data()
image_size = x_train.shape[1]
original_dim = image_size * image_size
x_train = np.reshape(x_train, [-1, original_dim])
x_test = np.reshape(x_test, [-1, original_dim])
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# network parameters
input_shape = (original_dim, )
intermediate_dim = 512
batch_size = 128
latent_dim = 2
epochs = 50
# -
# ## Encoder Model
# First, here's our encoder network, mapping inputs to our latent distribution parameters:
inputs = Input(shape=input_shape, name='encoder_input')
x = Dense(intermediate_dim, activation='relu')(inputs)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# We can use these parameters to sample new similar points from the latent space:
# +
# reparameterization trick
# instead of sampling from Q(z|X), sample epsilon = N(0,I)
# z = z_mean + sqrt(var) * epsilon
def sampling(args):
"""Reparameterization trick by sampling from an isotropic unit Gaussian.
# Arguments
args (tensor): mean and log of variance of Q(z|X)
# Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean = 0 and std = 1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
# so you could write `Lambda(sampling)([z_mean, z_log_sigma])`
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# -
# instantiate encoder model
encoder = Model(inputs, [z_mean, z_log_var, z], name='encoder')
encoder.summary()
# ## Decoder Model
# Finally, we can map these sampled latent points back to reconstructed inputs. First we define some layers that can be used for our decoder, autoencoder and later a generator.
latent_inputs = Input(shape=(latent_dim,), name='z_sampling')
x = Dense(intermediate_dim, activation='relu')(latent_inputs)
outputs = Dense(original_dim, activation='sigmoid')(x)
# instantiate decoder model
decoder = Model(latent_inputs, outputs, name='decoder')
decoder.summary()
# ## VAE Model
# An end-to-end autoencoder mapping inputs to reconstructions
outputs = decoder(encoder(inputs)[2]) # take z from encoder.
vae = Model(inputs, outputs, name='vae_mlp')
vae.summary()
# We train the model using the end-to-end model, with a custom loss function: the sum of a reconstruction term, and the KL divergence regularization term.
# +
reconstruction_loss = mse(inputs, outputs)
reconstruction_loss *= original_dim
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
vae.add_loss(vae_loss)
vae.compile(optimizer='adam')
# -
# We train our VAE on MNIST digits:
vae.fit(x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, None))
# Because our latent space is two-dimensional, there are a few cool visualizations that can be done at this point. One is to look at the neighborhoods of different classes on the latent 2D plane:
z_mean, _, _ = encoder.predict(x_test, batch_size=batch_size) # here we ignore z_log_var, z as they aren't needed
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=y_test)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.show()
# Each of these colored clusters is a type of digit. Close clusters are digits that are structurally similar (i.e. digits that share information in the latent space).
# ## VAE as Generator
#
# Because the VAE is a generative model, we can also use it to generate new digits! Here we will scan the latent plane, sampling latent points at regular intervals, and generating the corresponding digit for each of these points. This gives us a visualization of the latent manifold that "generates" the MNIST digits.
# +
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# we will sample n points within [-15, 15] standard deviations
grid_x = np.linspace(-5, 5, n)
grid_y = np.linspace(-5, 5, n)
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]]) # * epsilon_std
x_decoded = decoder.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure)
plt.show()
|
AutoEncoders/Keras Auto Encoder Tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# # Fitting a Morse Diatomic Absorption spectrum with a non-Condon Moment
# In these spectroscopy calculations, we are given $\omega_e$, $\chi_e \omega_e$, the reduced mass $\mu$ and the equilibrium position $r_e$. For each atom, we want to create a system of units out of these.
#
# \begin{align}
# h &= A \cdot e_u\cdot T_u = A \cdot m_u \frac{l_u^2}{T_u}
# \end{align}
# lower case means we are setting it still, capital letters mean they are determined. If we assume right now we want to set $E_u$ to be some spectoscopic value in wavenumbers and set $\hbar$ then we know we have to let time float, which is fine since this code is not a time-dependent one.
# \begin{align}
# A \cdot T_u &= \frac{h}{e_u} \\
# e_u &= m_u \frac{l_u^2}{T_u^2} \\
# T_u &= \sqrt{ \frac{ m_u l_u^2}{e_u} }\\
# A &= \frac{h}{e_u}\sqrt{ \frac{e_u}{ m_u l_u^2} } = \sqrt{ \frac{h^2}{e_u m_u l_u^2} }
# \end{align}
# so we can clearly only select either the mass or the length to fix in a system of units which is self-consistent.
# +
import math
import numpy as np
from scipy.special import gamma, genlaguerre
import scipy.integrate
import scipy.misc
import sympy.mpmath
import sympy.functions.combinatorial.factorials as fact
import matplotlib.pyplot as plt
# %matplotlib inline
# -
joules_per_wavenumber = .01 #kg * m^3 / s^2
h_joules_seconds = 6.62607E-34 #Joule*seconds
# +
TEST1_SYSTEM_DICTIONARY = {"reduced_mass" : 1.0,
"alpha" : 1.0,
"center" : 0.0,
"D" : 2.0}
TEST2_SYSTEM_DICTIONARY = {"reduced_mass" : 1.0,
"alpha" : 1.5,
"center" : 0.5,
"D" : 1.0}
DEFAULT_UNIVERSE_DICTIONARY = {"hbar" : 1.0 / (2.0 * np.pi),
"ZERO_TOLERANCE" : 1.0E-5}
Nitrogen_energy_scale_wavenumbers = 2358.57
Nitrogen_mass_scale_amu = 7.00 #???
Nitrogen_Chi_1_Sigma_g_Plus = {"omega_e_wavenumbers" = Nitrogen_scaling,
"omega_e" = 2358.57 / Nitrogen_scaling,
"omega_e_chi_e" = 14.324/Nitrogen_scaling,
"mu" : 1.0}
# +
class UnboundStateIndexError(Exception):
def __init__(self):
pass
class Morse(object):
def __init__(self, system_dictionary = DEFAULT_SYSTEM_DICTIONARY, universe_dictionary = DEFAULT_UNIVERSE_DICTIONARY):
#define the Universe
self.hbar = universe_dictionary["hbar"]
self.ZERO_TOLERANCE = universe_dictionary["ZERO_TOLERANCE"]
#define the system
#terminology taken from Matsumoto and Iwamoto, 1993
self.mu = system_dictionary["reduced_mass"]
self.center = system_dictionary["center"]
self.r = self.center
if "omega_e" not in system_dictionary:
self.alpha = system_dictionary["alpha"]
self.D = system_dictionary["D"]
#Derive Other useful quantities
self.omega_e = 2.0 * self.alpha * np.sqrt(self.D / (2.0 * self.mu))
self.chi_e_omega_e = self.alpha**2 * self.hbar / (2.0 * self.mu)
else:
self.omega_e = system_dictionary["omega_e"]
self.chi_e_omega_e = system_dictionary["chi_e_omega_e"]
self.alpha = np.sqrt(2.0 * self.mu * self.chi_e_omega_e / self.hbar)
self.D = 2.0 * self.mu *(self.omega_e / (2.0 * self.alpha))**2
self.a = np.sqrt(2.0 * self.mu * self.D) / (self.alpha * self.hbar)
self.maximum_index = int(np.floor(self.a - .5))
#Harmonic Oscillator Approximation:
k = self.potential_energy_gradientSquared(self.r)
self.omega_HO = np.sqrt(k / self.mu)
self.x0 = np.sqrt( self.hbar / (2.0 * self.omega_HO * self.mu))
#determine the needed spatial parameters:
self.index_to_xParams_dictionary = {}
for energy_index in range(self.maximum_index + 1):
#use the analytically calculated spread of the corresponding HO wavefunction to start guessing the needed spatial parameters
HO_spatial_spread = self.x0 * np.sqrt(2 * energy_index + 1)
x_min = self.r - 5.0 * HO_spatial_spread
while np.abs(self.energy_eigenfunction_amplitude(energy_index, x_min)) > self.ZERO_TOLERANCE:
x_min += -HO_spatial_spread
x_max = self.r + 5.0 * HO_spatial_spread
while np.abs(self.energy_eigenfunction_amplitude(energy_index, x_max)) > self.ZERO_TOLERANCE:
x_max += HO_spatial_spread
keep_integrating = True
number_x_points = 10
while keep_integrating:
x_vals = np.linspace(x_min, x_max, number_x_points)
psi_vals = self.energy_eigenfunction_amplitude(energy_index, x_vals)
integral = scipy.integrate.simps(np.conj(psi_vals) * psi_vals, x = x_vals)
if np.abs(integral - 1.0) < self.ZERO_TOLERANCE:
keep_integrating = False
else:
number_x_points = number_x_points + 10
self.index_to_xParams_dictionary[energy_index] = (x_min, x_max, number_x_points)
#POTENTIAL ENERGY STUFF:
def potential_energy(self, x):
return -2 * self.D * np.exp(- self.alpha * (x - self.r)) + self.D * np.exp(-2.0 * self.alpha * (x - self.r))
def potential_energy_gradient(self, x):
return 2.0 * self.alpha * self.D *(np.exp(- self.alpha * (x - self.r)) - np.exp(-2.0 * self.alpha * (x - self.r)))
def potential_energy_gradientSquared(self, x):
return 2.0 * self.alpha**2 * self.D *(-np.exp(- self.alpha * (x - self.r)) + 2.0 * np.exp(-2.0 * self.alpha * (x - self.r)))
#ENERGY EIGENFUNCTION STUFF:
def energy_eigenvalue(self, index):
return -self.D + self.hbar * ( self.omega_e *(index + .5) - self.chi_e_omega_e *(index + .5)**2 )
def energy_eigenfunction_amplitude(self, n, x):
if n > self.maximum_index:
raise UnboundStateIndexError()
b_n = self.a - .5 - n
N_n = np.sqrt(2.0 * self.alpha * b_n * scipy.misc.factorial(n) / gamma(2 * b_n + n + 1))
z = 2.0 * self.a * np.exp(-self.alpha *(x - self.r))
z_poly = np.power(z, b_n)
z_exp = np.exp(-.5 * z)
lag_part = genlaguerre(n, 2 * b_n)(z)
return N_n * z_poly * z_exp * lag_part
class OffsetMorse(object):
def __init__(self, ground_morse, excited_morse, universe_dictionary = DEFAULT_UNIVERSE_DICTIONARY):
#define the Universe
self.hbar = universe_dictionary["hbar"]
self.ZERO_TOLERANCE = universe_dictionary["ZERO_TOLERANCE"]
#assign variables
self.ground_morse = ground_morse
self.excited_morse = excited_morse
self.franck_condon_factors = np.zeros((self.ground_morse.maximum_index + 1, self.excited_morse.maximum_index + 1))
for ground_index in range(self.ground_morse.maximum_index + 1):
ground_xMin, ground_xMax, ground_numPoints = self.ground_morse.index_to_xParams_dictionary[ground_index]
for excited_index in range(self.excited_morse.maximum_index + 1):
excited_xMin, excited_xMax, excited_numPoints = self.excited_morse.index_to_xParams_dictionary[excited_index]
x_min = min([ground_xMin, excited_xMin])
x_max = max([excited_xMax, ground_xMax])
keep_integrating = True
n_points = ground_numPoints * excited_numPoints
#integrate once
x_vals = np.linspace(x_min, x_max, n_points)
g_func_vals = self.ground_morse.energy_eigenfunction_amplitude(ground_index, x_vals)
e_func_vals = self.excited_morse.energy_eigenfunction_amplitude(excited_index, x_vals)
gToE_FCF = scipy.integrate.simps(e_func_vals * np.conj(g_func_vals), x= x_vals)
#check to make sure integral is converged
while keep_integrating:
n_points = n_points * 1.1
x_vals = np.linspace(x_min, x_max, n_points)
g_func_vals = self.ground_morse.energy_eigenfunction_amplitude(ground_index, x_vals)
e_func_vals = self.excited_morse.energy_eigenfunction_amplitude(excited_index, x_vals)
new_integral = scipy.integrate.simps(e_func_vals * np.conj(g_func_vals), x= x_vals)
if np.abs((new_integral - gToE_FCF) / new_integral ) < self.ZERO_TOLERANCE:
keep_integrating = False
else:
print("NEED MOAR POINTz")
self.franck_condon_factors[ground_index, excited_index] = gToE_FCF
def stick_absorption_spectrum(self, starting_ground_index):
relevant_FCFs = self.franck_condon_factors[starting_ground_index,:]
frequency_values = []
ground_energy = self.ground_morse.energy_eigenvalue(starting_ground_index)
for excited_index in range(self.excited_morse.maximum_index + 1):
energy_gap = self.excited_morse.energy_eigenvalue(excited_index) - ground_energy
frequency_values.append(energy_gap / self.hbar)
return frequency_values, relevant_FCFs**2
# -
ground = Morse()
excited = Morse(system_dictionary=ALTERNATE_SYSTEM_DICTIONARY)
test_offsetMorse = OffsetMorse(ground_morse = ground, excited_morse = excited)
for i in range(ground.maximum_index + 1):
w, I = test_offsetMorse.stick_absorption_spectrum(i)
plt.plot(w, np.log(I))
x_vals = np.linspace(-1, 30, 200, dtype=np.complex)
for n in range(int(test.max_ground_index) + 1):
print("n="+str(n))
f = test.ground_eigenfunction(n, x_vals)
plt.plot(x_vals, np.real(f), label=n)
print("integral="+str( scipy.integrate.simps(f * np.conj(f) , x= x_vals)))
print("\n")
# plt.legend(loc=0)
plt.figure()
for n in range(int(test.max_excited_index) + 1):
print("n="+str(n))
f = test.ground_eigenfunction(n, x_vals)
plt.plot(x_vals, np.real(f), label=n)
print("integral="+str( scipy.integrate.simps(f * np.conj(f) , x= x_vals)))
print("\n")
# plt.legend(loc=0)
#
|
DetectingNonCondonPaper/code/.ipynb_checkpoints/Morse_fitting_procedure-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
# %matplotlib inline
# -
pokemon = pd.read_csv(r'C:\Users\venxr\Udacity_exercises\AIPND-master\Matplotlib\data\pokemon.csv')
print(pokemon.shape)
pokemon.head(10)
sb.countplot(data = pokemon, x = 'generation_id', color = base_color,
order = gen_order); #order = [5,1,3,4,2,7,6]
sb.color_palette()
base_color = sb.color_palette()[0]
gen_order = pokemon['generation_id'].value_counts().index
sb.countplot(data = pokemon, x = 'type_1',
color = base_color);#order = [5,1,3,4,2,7,6]
plt.xticks(rotation = 90);
# +
#Absolute vs relative frequency
pkmn_types = pokemon.melt(id_vars = ['id', 'species'],
value_vars = ['type_1','type_2'],
var_name = 'type_level', value_name = 'type').dropna()
pkmn_types[802:812]
# -
pokemon.isna().sum()
# code for the pie chart seen above
sorted_counts = pokemon['type_1'].value_counts()
plt.pie(sorted_counts, labels = sorted_counts.index, startangle = 90,
counterclock = False);
plt.axis('square')
sorted_counts = pokemon['type_1'].value_counts()
plt.pie(sorted_counts, labels = sorted_counts.index, startangle = 90,
counterclock = False, wedgeprops = {'width' : 0.4});
plt.axis('square')
#Descriptive statistics
bins = np.arange(0, pokemon['height'].max()+0.2, 0.2)
plt.hist(data = pokemon, x = 'height', bins = bins);
plt.xlim((0,6));
#Scale and transformations
bins = 10 ** np.arange(-1, 3 + 0.1, 0.1)
ticks = [0.1, 0.3,1,3,10,30,100,300,1000]
labels = ['{}'.format(v) for v in ticks]
plt.hist(data = pokemon, x = 'weight', bins = bins);
plt.xscale('log');
plt.xticks(ticks, labels);
np.log10(pokemon['weight'].describe())
|
matplotlib_assignments/matplotlibpt1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Graph-Theory-and-Graphs-in-Python" data-toc-modified-id="Graph-Theory-and-Graphs-in-Python-1"><span class="toc-item-num">1 </span>Graph Theory and Graphs in Python</a></span><ul class="toc-item"><li><span><a href="#Introduction-into-Graph-Theory-Using-Python" data-toc-modified-id="Introduction-into-Graph-Theory-Using-Python-1.1"><span class="toc-item-num">1.1 </span>Introduction into Graph Theory Using Python</a></span></li><li><span><a href="#Graphs-as-a-Python-class" data-toc-modified-id="Graphs-as-a-Python-class-1.2"><span class="toc-item-num">1.2 </span>Graphs as a Python class</a></span></li><li><span><a href="#Tree-/-Forest" data-toc-modified-id="Tree-/-Forest-1.3"><span class="toc-item-num">1.3 </span>Tree / Forest</a></span><ul class="toc-item"><li><span><a href="#Spanning-Tree" data-toc-modified-id="Spanning-Tree-1.3.1"><span class="toc-item-num">1.3.1 </span>Spanning Tree</a></span></li><li><span><a href="#Hamiltonian-Game" data-toc-modified-id="Hamiltonian-Game-1.3.2"><span class="toc-item-num">1.3.2 </span>Hamiltonian Game</a></span></li></ul></li><li><span><a href="#Distance-and-Diameter-of-a-graph" data-toc-modified-id="Distance-and-Diameter-of-a-graph-1.4"><span class="toc-item-num">1.4 </span>Distance and Diameter of a graph</a></span></li><li><span><a href="#Connected-Graphs" data-toc-modified-id="Connected-Graphs-1.5"><span class="toc-item-num">1.5 </span>Connected Graphs</a></span></li><li><span><a href="#Graph-Density" data-toc-modified-id="Graph-Density-1.6"><span class="toc-item-num">1.6 </span>Graph Density</a></span></li><li><span><a href="#Implementation-of-the-Erdös-Gallai-theorem" data-toc-modified-id="Implementation-of-the-Erdös-Gallai-theorem-1.7"><span class="toc-item-num">1.7 </span>Implementation of the Erdös-Gallai theorem</a></span></li><li><span><a href="#Degree-Sequence" data-toc-modified-id="Degree-Sequence-1.8"><span class="toc-item-num">1.8 </span>Degree Sequence</a></span></li><li><span><a href="#Degree" data-toc-modified-id="Degree-1.9"><span class="toc-item-num">1.9 </span>Degree</a></span></li><li><span><a href="#Paths-in-Graphs" data-toc-modified-id="Paths-in-Graphs-1.10"><span class="toc-item-num">1.10 </span>Paths in Graphs</a></span></li></ul></li></ul></div>
# -
# # Graph Theory and Graphs in Python
#
# [Python Advanced: Graph Theory and Graphs in Python](http://www.python-course.eu/graphs_python.php)
# - Paths
# - Adjacent vertices
# - Two vertices are adjacent when they are both incident to a common edge.
# - Path in an undirected Graph
# - A path in an undirected graph is a sequence of vertices P = ( v1, v2, ..., vn ) ∈ V x V x ... x V such that vi is adjacent to v{i+1} for 1 ≤ i < n. Such a path P is called a path of length n from v1 to vn.
# - (a,c,e,b,c,d) is a path but not a simple path
# - Simple Path
# - A path with no repeated vertices is called a simple path.
# - (a, c, e) is a simple path
#
# - Degree
# - Degree
# - The degree of a vertex v in a graph is the number of edges connecting it, with loops counted twice.
# - The degree of a vertex v is denoted deg(v).
# - The maximum degree of a graph G, denoted by Δ(G), and the minimum degree of a graph, denoted by δ(G), are the maximum and minimum degree of its vertices.
# - regular graph
# - If all the degrees in a graph are the same, the graph is a regular graph. In a regular graph, all degrees are the same, and so we can speak of the degree of the graph.
# - degree sum formular(Handshaking lemma)
# - ∑v ∈ Vdeg(v) = 2 |E|
# - the sum of degrees of all the vertices is equal to the number of edges multiplied by 2.
# - We can conclude that **the number of vertices with odd degree has to be even.**(有奇数度的顶点个数一定是偶数)
# - "handshaking lemma" stems from a popular mathematical problem: In any group of people the number of people who have shaken hands with an odd number of other people from the group is even.
#
# - Degree Sequence
# - The degree sequence of an undirected graph is defined as the sequence of its vertex degrees in a non-increasing order.
# - Isomorphic graphs have the same degree sequence.
# - However, two graphs with the same degree sequence are not necessarily isomorphic.
#
# - The Erdös-Gallai theorem
# - The Erdös-Gallai theorem states that a non-increasing sequence of n numbers di (for i = 1, ..., n) is the degree sequence of a simple graph if and only if the **sum of the sequence is even** and the **following condition** is fulfilled:
# $$\sum_{i=1}^k d_i \leqslant k(k-1) + \sum_{i=k+1}^n min(d_i,k) \quad for \ k \in {1,...,n}$$
# - 什么叫合法的度数序列?就是存在一个无重边无自环(简单)的无向图,使得图中每个点的度数构成的序列为给定的序列。
#
# - Graph Density
# - The graph density is defined as the ratio of the number of edges of a given graph, and the total number of edges, the graph could have.
# - It measures how close a given graph is to a complete graph.
# - $D = \frac {\lvert E \rvert}{\lvert V \rvert(\lvert V \rvert -1)}$
# - A dense graph is a graph in which the number of edges is close to the maximal number of edges. A graph with only a few edges, is called a sparse graph.
# - The precisest mathematical notation uses the big O notation.
# - A dense graph is a graph G = (V, E) in which |E| = Θ(|V|2).
#
# - Connected Graphs
# - A graph is said to be connected if every pair of vertices in the graph is connected.
# - It possible to determine with a simple algorithm whether a graph is connected:
# - Choose an arbitrary node x of the graph G as the starting point
# - Determine the set A of all the nodes which can be reached from x.
# - If A is equal to the set of nodes of G, the graph is connected; otherwise it is disconnected.
#
# - Distance and Diameter of a graph
# - The distance "dist" between two vertices in a graph is the length of the shortest path between these vertices.
# - No backtracks, detours, or loops are allowed for the calculation of a distance.
# - The eccentricity of a vertex s of a graph g is the maximal distance to every other vertex of the graph:
# - e(s) = max( { dist(s,v) | v ∈ V }), (V is the set of all vertices of g)
# - The diameter d of a graph is defined as the maximum eccentricity of any vertex in the graph.
# - **This means that the diameter is the length of the shortest path between the most distanced nodes.**
# - 最短路径里面最长的那个就是图的直径。
# - To determine the diameter of a graph,
# - first find the shortest path between each pair of vertices.
# - The greatest length of any of these paths is the diameter of the graph.
#
# - Tree / Forest
# - A tree is an undirected graph which contains no cycles(某条路径的起点和终点相同). This means that any two vertices of the graph are connected by exactly one simple path.
# - **A forest is a disjoint union of trees.**
# - Contrary to forests in nature, a forest in graph theory can consist of a single tree!
# - A graph with one vertex and no edge is a tree (and a forest).
#
# - Spanning Tree
# - A spanning tree T of a connected, undirected graph G is a subgraph G' of G, which is a tree, and G' contains all the vertices and a subset of the edges of G.
# - G' contains all the edges of G, if G is a tree graph.
# - Informally, a spanning tree of G is a selection of edges of G that form a tree spanning every vertex.
# - That is, every vertex lies in the tree, but no cycles (or loops) are contained.
#
# - Hamiltonian Game
# - An Hamiltonian path is a path in an undirected or directed graph that visits each vertex **exactly once**.
# - A Hamiltonian cycle (or circuit) is a Hamiltonian path that is a cycle.
# - Note for computer scientists: Generally, it is not not possible to determine, whether such paths or cycles exist in arbitrary graphs, because the Hamiltonian path problem has been proven to be NP-complete.
# - It is named after <NAME> who invented the so-called "icosian game", or Hamilton's puzzle, which involves finding a Hamiltonian cycle in the edge graph of the dodecahedron.
# - Hamilton solved this problem using the icosian calculus, an algebraic structure based on roots of unity with many similarities to the quaternions, which he also invented.
# ## Introduction into Graph Theory Using Python
# 
graph = { "a" : ["c"],
"b" : ["c", "e"],
"c" : ["a", "b", "d", "e"],
"d" : ["c"],
"e" : ["c", "b"],
"f" : []
}
graph
def generate_edges(graph):
edges = []
for node in graph:
for neighbour in graph[node]:
edges.append((node, neighbour))
return edges
print(generate_edges(graph))
def find_isolated_nodes(graph):
"""returns a list of isolated nodes."""
isolated = []
for node in graph:
if not graph[node]:
isolated += node
return isolated
print(find_isolated_nodes(graph))
# ## Graphs as a Python class
# 
# +
""" A Python Class
A simple Python graph class, demonstrating the essential
facts and functionalities of graphs.
"""
class Graph(object):
def __init__(self, graph_dict=None):
""" initializes a graph object
If no dictionary or None is given,
an empty dictionary will be used
"""
if graph_dict == None:
graph_dict = {}
self.__graph_dict = graph_dict
def vertices(self):
""" returns the vertices of a graph """
return list(self.__graph_dict.keys())
def edges(self):
""" returns the edges of a graph """
return self.__generate_edges()
def add_vertex(self, vertex):
""" If the vertex "vertex" is not in
self.__graph_dict, a key "vertex" with an empty
list as a value is added to the dictionary.
Otherwise nothing has to be done.
"""
if vertex not in self.__graph_dict:
self.__graph_dict[vertex] = []
def add_edge(self, edge):
""" assumes that edge is of type set, tuple or list;
between two vertices can be multiple edges!
"""
edge = set(edge)
# tuple 后变为 y,x
(vertex1, vertex2) = tuple(edge)
if vertex1 in self.__graph_dict:
self.__graph_dict[vertex1].append(vertex2)
else:
self.__graph_dict[vertex1] = [vertex2]
def __generate_edges(self):
""" A static method generating the edges of the
graph "graph". Edges are represented as sets
with one (a loop back to the vertex) or two
vertices
"""
edges = []
for vertex in self.__graph_dict:
for neighbour in self.__graph_dict[vertex]:
if {neighbour, vertex} not in edges:
# 重复值算一次,集合而非元组
#print((neighbour, vertex))
edges.append({vertex, neighbour})
return edges
def __str__(self):
res = "vertices: "
for k in self.__graph_dict:
res += str(k) + " "
res += "\nedges: "
for edge in self.__generate_edges():
res += str(edge) + " "
return res
def find_path(self, start_vertex, end_vertex, path=None):
""" find a path from start_vertex to end_vertex
in graph """
if path == None:
path = []
graph = self.__graph_dict
path = path + [start_vertex]
if start_vertex == end_vertex:
return path
if start_vertex not in graph:
return None
for vertex in graph[start_vertex]:
if vertex not in path:
# 递归!
extended_path = self.find_path(vertex,
end_vertex,
path)
if extended_path:
return extended_path
return None
def find_all_paths(self, start_vertex, end_vertex, path=[]):
""" find all paths from start_vertex to
end_vertex in graph """
graph = self.__graph_dict
path = path + [start_vertex]
if start_vertex == end_vertex:
return [path]
if start_vertex not in graph:
return []
paths = []
for vertex in graph[start_vertex]:
if vertex not in path:
# 递归!
extended_paths = self.find_all_paths(vertex,
end_vertex,
path)
# if extended_paths != []:
# paths.append(extended_paths)
# print(extended_paths)
for p in extended_paths:
paths.append(p)
return paths
def vertex_degree(self, vertex):
""" The degree of a vertex is the number of edges connecting
it, i.e. the number of adjacent vertices. Loops are counted
double, i.e. every occurence of vertex in the list
of adjacent vertices. """
adj_vertices = self.__graph_dict[vertex]
# 额外加上包含该顶点的量(1)
degree = len(adj_vertices) + adj_vertices.count(vertex)
return degree
def find_isolated_vertices(self):
""" returns a list of isolated vertices. 孤立点"""
graph = self.__graph_dict
isolated = []
for vertex in graph:
#print(isolated, vertex)
if not graph[vertex]:
isolated += [vertex]
return isolated
def delta(self):
""" the minimum degree of the vertices """
min = 100000000
# 遍历顶点
for vertex in self.__graph_dict:
vertex_degree = self.vertex_degree(vertex)
if vertex_degree < min:
min = vertex_degree
return min
def Delta(self):
""" the maximum degree of the vertices """
max = 0
for vertex in self.__graph_dict:
vertex_degree = self.vertex_degree(vertex)
if vertex_degree > max:
max = vertex_degree
return max
def degree_sequence(self):
""" calculates the degree sequence """
seq = []
# 遍历顶点
for vertex in self.__graph_dict:
seq.append(self.vertex_degree(vertex))
# 从大到小排序
seq.sort(reverse=True)
return tuple(seq)
@staticmethod
def is_degree_sequence(sequence):
""" Method returns True, if the sequence "sequence" is a
degree sequence, i.e. a non-increasing sequence.
Otherwise False is returned.
"""
# check if the sequence sequence is non-increasing:
# all 参数是一个 iterable,所有值为真时,返回真
return all( x>=y for x, y in zip(sequence, sequence[1:]))
@staticmethod
def erdoes_gallai(dsequence):
""" Checks if the condition of the Erdoes-Gallai inequality
is fullfilled
"""
if sum(dsequence) % 2:
# sum of sequence is odd
return False
if Graph.is_degree_sequence(dsequence):
for k in range(1,len(dsequence) + 1):
left = sum(dsequence[:k])
right = k * (k-1) + sum([min(x,k) for x in dsequence[k:]])
if left > right:
return False
else:
# sequence is increasing
return False
return True
def density(self):
""" method to calculate the density of a graph """
g = self.__graph_dict
V = len(g.keys())
E = len(self.edges())
return 2.0 * E / (V *(V - 1))
def is_connected(self,
vertices_encountered = None,
start_vertex=None):
""" determines if the graph is connected """
if vertices_encountered is None:
vertices_encountered = set()
gdict = self.__graph_dict
vertices = list(gdict.keys()) # "list" necessary in Python 3
if not start_vertex:
# chosse a vertex from graph as a starting point
start_vertex = vertices[0]
vertices_encountered.add(start_vertex)
if len(vertices_encountered) != len(vertices):
for vertex in gdict[start_vertex]:
if vertex not in vertices_encountered:
# 递归!
if self.is_connected(vertices_encountered, vertex):
return True
else:
return True
return False
def diameter(self):
""" calculates the diameter of the graph """
v = self.vertices()
# 遍历完所有的对儿
pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]
smallest_paths = []
for (s,e) in pairs:
paths = self.find_all_paths(s,e)
# key 是长度,取最短的那个 path
smallest = sorted(paths, key=len)[0]
smallest_paths.append(smallest)
# smallest_paths 里面都是 path,又一种排序方法
smallest_paths.sort(key=len)
# longest path is at the end of list,
# i.e. diameter corresponds to the length of this path
diameter = len(smallest_paths[-1]) - 1
return diameter
if __name__ == "__main__":
g = { "a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
}
graph = Graph(g)
# +
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print("Add vertex:")
graph.add_vertex("z")
print("Vertices of graph:")
print(graph.vertices())
print("Add an edge:")
graph.add_edge({"a","z"})
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print('Adding an edge {"x","y"} with new vertices:')
graph.add_edge({"x","y"})
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
# -
# ## Tree / Forest
# A tree is an undirected graph which contains no cycles(某条路径的起点和终点相同). This means that any two vertices of the graph are connected by exactly one simple path.
#
# **A forest is a disjoint union of trees.**
# Contrary to forests in nature, a forest in graph theory can consist of a single tree!
#
# A graph with one vertex and no edge is a tree (and a forest).
# A tree and forest:
# 
# Two Trees:
# 
# ### Spanning Tree
# A spanning tree T of a connected, undirected graph G is a subgraph G' of G, which is a tree, and G' contains all the vertices and a subset of the edges of G.
#
# G' contains all the edges of G, if G is a tree graph.
#
# Informally, a spanning tree of G is a selection of edges of G that form a tree spanning every vertex.
# That is, every vertex lies in the tree, but no cycles (or loops) are contained.
# ### Hamiltonian Game
# An Hamiltonian path is a path in an undirected or directed graph that visits each vertex **exactly once**.
#
# A Hamiltonian cycle (or circuit) is a Hamiltonian path that is a cycle.
#
# Note for computer scientists: Generally, it is not not possible to determine, whether such paths or cycles exist in arbitrary graphs, because the Hamiltonian path problem has been proven to be NP-complete.
#
# It is named after <NAME> who invented the so-called "icosian game", or Hamilton's puzzle, which involves finding a Hamiltonian cycle in the edge graph of the dodecahedron.
#
# Hamilton solved this problem using the icosian calculus, an algebraic structure based on roots of unity with many similarities to the quaternions, which he also invented.
# ## Distance and Diameter of a graph
#
# The distance "dist" between two vertices in a graph is the length of the shortest path between these vertices. No backtracks, detours, or loops are allowed for the calculation of a distance.
#
# The eccentricity of a vertex s of a graph g is the maximal distance to every other vertex of the graph:
# e(s) = max( { dist(s,v) | v ∈ V })
# (V is the set of all vertices of g)
#
# The diameter d of a graph is defined as the maximum eccentricity of any vertex in the graph.
# **This means that the diameter is the length of the shortest path between the most distanced nodes.**
#
# To determine the diameter of a graph,
# - first find the shortest path between each pair of vertices.
# - The greatest length of any of these paths is the diameter of the graph.
#
# 最短路径里面最长的那个就是图的直径。
v = list(g.keys())
v
pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]
pairs
test = [['h','s','c'],['h'],['h', 'c']]
sorted(test, key=len)[0]
test.sort(key=len)
test
# +
g = { "a" : ["c"],
"b" : ["c","e","f"],
"c" : ["a","b","d","e"],
"d" : ["c"],
"e" : ["b","c","f"],
"f" : ["b","e"]
}
graph = Graph(g)
diameter = graph.diameter()
print(diameter)
# -
# ## Connected Graphs
#
# A graph is said to be connected if every pair of vertices in the graph is connected.
#
# It possible to determine with a simple algorithm whether a graph is connected:
# - Choose an arbitrary node x of the graph G as the starting point
# - Determine the set A of all the nodes which can be reached from x.
# - If A is equal to the set of nodes of G, the graph is connected; otherwise it is disconnected.
# +
g = { "a" : ["d"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : []
}
g2 = { "a" : ["d","f"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["a"]
}
g3 = { "a" : ["d","f"],
"b" : ["c","b"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["a"]
}
graph = Graph(g)
print(graph)
print(graph.is_connected())
graph = Graph(g2)
print(graph)
print(graph.is_connected())
graph = Graph(g3)
print(graph)
print(graph.is_connected())
# -
# ## Graph Density
# +
g = { "a" : ["d","f"],
"b" : ["c","b"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["a"]
}
complete_graph = {
"a" : ["b","c"],
"b" : ["a","c"],
"c" : ["a","b"]
}
isolated_graph = {
"a" : [],
"b" : [],
"c" : []
}
graph = Graph(g)
print(graph.density())
graph = Graph(complete_graph)
print(graph.density())
graph = Graph(isolated_graph)
print(graph.density())
# -
# ## Implementation of the Erdös-Gallai theorem
# The Erdös-Gallai theorem states that a non-increasing sequence of n numbers di (for i = 1, ..., n) is the degree sequence of a simple graph if and only if the sum of the sequence is even and the following condition is fulfilled:
# $$\sum_{i=1}^k d_i \leqslant k(k-1) + \sum_{i=k+1}^n min(d_i,k) \quad for \ k \in {1,...,n}$$
#
# 什么叫合法的度数序列?就是存在一个无重边无自环(简单)的无向图,使得图中每个点的度数构成的序列为给定的序列。
def erdoes_gallai(dsequence):
""" Checks if the condition of the Erdoes-Gallai inequality
is fullfilled
"""
if sum(dsequence) % 2:
# sum of sequence is odd
return False
for k in range(1,len(dsequence) + 1):
left = sum(dsequence[:k])
right = k * (k-1) + sum([min(x,k) for x in dsequence[k:]])
if left > right:
return False
return True
graph.erdoes_gallai(t)
t = graph.degree_sequence()
t
for k in range(1, len(t) + 1):
left = sum(t[:k])
print(left)
right = k * (k-1) + sum([min(x,k) for x in t[k:]])
print(right)
if left>right:
print('False')
else:
print('True')
# ## Degree Sequence
#
# Isomorphic graphs have the same degree sequence.
# However, two graphs with the same degree sequence are not necessarily isomorphic.
print('Degree sequence:')
print(graph.degree_sequence())
# ## Degree
print('Degree of vertex:')
print(graph.vertex_degree("c"))
print('Isolated vertices:')
print(graph.find_isolated_vertices())
print('Minimum degree of the vertices:')
print(graph.delta())
print('Maximum degree of the vertices:')
print(graph.Delta())
# ## Paths in Graphs
# +
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print('The path from vertex "a" to vertex "b":')
path = graph.find_path("a", "b")
print(path)
print('The path from vertex "a" to vertex "f":')
path = graph.find_path("a", "f")
print(path)
print('The path from vertex "c" to vertex "c":')
path = graph.find_path("c", "c")
print(path)
# +
g = { "a" : ["d", "f"],
"b" : ["c"],
"c" : ["b", "c", "d", "e"],
"d" : ["a", "c"],
"e" : ["c"],
"f" : ["d"]
}
graph = Graph(g)
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print('All paths from vertex "a" to vertex "b":')
path = graph.find_all_paths("a", "b")
print(path)
print('All paths from vertex "a" to vertex "f":')
path = graph.find_all_paths("a", "f")
print(path)
print('All paths from vertex "c" to vertex "c":')
path = graph.find_all_paths("c", "c")
print(path)
# -
|
ProbabilisticGraphModel/GraphTheory_Tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Languages Lecture)
# language: python
# name: datalanguages
# ---
# # Gaussian Process Regression
#
# At times you don't care about the underlying model for your data points and just want a model that describes the data. One such fitting technique is know as Gaussian process regression (also know as kriging). This kind of regression assumes all the data points are drawn from a common covariance function. This function is used to generate an (infinite) set of functions and only keeps the ones that pass through the observed data.
#
# ## Packages being used
# + `sklearn`: has a Gaussian process regression function
#
# ## Relevant documentation
# + `sklearn`: http://scikit-learn.org/stable/modules/gaussian_process.html
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF, ConstantKernel
from matplotlib import pyplot as plt
import mpl_style
# %matplotlib inline
plt.style.use(mpl_style.style1)
# ## The squared exponential covariance (or Radial-basis function)
# As an example we will use the squared exponential covariance function:
# $$ \operatorname{Cov}{(x_1, x_2; h)} = \exp{\left( \frac{-(x_1 - x_2)^2}{2h^2} \right)} $$
# Lets using this function to draw some _unconstrained_ functions:
# +
# define the function
def squared_exponential(x1, x2, h):
return np.exp(-0.5 * (x1 - x2)**2 / h**2)
# draw samples from it
x = np.linspace(0, 10, 1000)
h = 1
mu = np.zeros(len(x))
C = squared_exponential(x, x[:, None], h)
draws = np.random.multivariate_normal(mu, C, 6)
plt.figure(1, figsize=(18, 8))
plt.subplot(121)
plt.plot(x, draws.T)
plt.xlabel('x')
plt.ylabel('f(x)')
plt.subplot(122)
plt.imshow(C, origin='upper', extent=[0, 10, 10, 0])
plt.colorbar()
plt.tight_layout();
# -
# ## Constrain the model
#
# Assume we have some data points, we can use Gaussian process regression to only pick the mod-els that pass through those points:
# +
x1 = np.array([1, 3, 5, 6, 7, 8])
y1 = x1 * np.sin(x1)
kernel = ConstantKernel(1, (1e-3, 1e3)) * RBF(10, (1e-2, 1e2))
gp1 = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=9)
gp1.fit(x1[:, None], y1)
f1, f1_err = gp1.predict(x[:, None], return_std=True)
print('Coefficient of determination R^2 of the prediction: {0}'.format(gp1.score(x1[:, None], y1)))
print('Best fit kernel: {0}'.format(gp1.kernel_))
plt.figure(2, figsize=(10, 8))
# plot the best fit with 95% confidence interval
plt.plot(x, f1, '-', label='Prediction')
plt.fill_between(x, f1 - 1.96 * f1_err, f1 + 1.96 * f1_err, alpha=0.3, label='95% confidence interval')
# plot 10 realizations of the fit
plt.plot(x, gp1.sample_y(x[:, None], n_samples=10), color='k', alpha=0.1)
# plot the original data
plt.plot(x1, y1, 'ok', ms=6, label='Observed')
# plot the true function
plt.plot(x, x * np.sin(x), '--', label='True')
# labels and legend
plt.xlabel('x')
plt.ylabel('f(x)')
plt.ylim(-6, 12)
plt.legend(loc='upper left', ncol=2)
plt.tight_layout();
# -
# ## Lets add some noise to the data
# +
dy = 0.5 + np.random.random(y1.shape)
noise = np.random.normal(0, dy)
y2 = y1 + noise
gp2 = GaussianProcessRegressor(kernel=kernel, alpha=dy**2, n_restarts_optimizer=10)
gp2.fit(x1[:, None], y2)
f2, f2_err = gp2.predict(x[:, None], return_std=True)
print('Coefficient of determination R^2 of the prediction: {0}'.format(gp2.score(x1[:, None], y2, sample_weight=1 / dy**2)))
print('Best fit kernel: {0}'.format(gp2.kernel_))
plt.figure(3, figsize=(10, 8))
plt.plot(x, f2, '-', label='Prediction')
plt.fill_between(x, f2 - 1.96 * f2_err, f2 + 1.96 * f2_err, alpha=0.3, label='95% confidence interval')
plt.plot(x, gp2.sample_y(x[:, None], n_samples=10), color='k', alpha=0.1)
plt.errorbar(x1, y2, yerr=1.96*dy, fmt='ok', ms=6, label='Observed')
plt.plot(x, x * np.sin(x), '--', label='True')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.ylim(-6, 12)
plt.legend(loc='upper left', ncol=2)
plt.tight_layout()
# -
# ## Other notes
# + There are many covariance kernels you can pick;
# + `ConstantKernel`: a constant value that can be multiplied or added to any of the other kernels
# + `WhiteKernel`: a white noise kernel
# + `RBF`: Radial-based function, smooth kernel parameterized by a length-scale
# + `Marten`: non-smooth generalization of `RBF`, parameterized by length-scale and smoothness
# + `RationalQuadratic`: a (infinite sum) mixture of different `RBF`'s each with different length-scales
# + `ExpSineSquared`: periodic function kernel, parameterized by a length-scale and periodicity
# + `DotProduct`: a non-stationary kernel commonly combined with exponentiation to produce a 'polynomial like' fit (e.g. raising `DotProduct` to the power of 2 will give a quadratic like fit). This is equivalent to doing a spline fit.
# + The kernel parameter (e.g. the first parameter in the `RBF` function) is automatically fit within the bounds provided (e.g. the second parameter in `RBF`)
# + `n_restarts_optimizer` indicates the number of times to re-run the optimizer starting at different locations (e.g. to find a global max instead of a local max)
# + All `X` positions must be unique
# + The computational complexity is $O(N^3)$ where $N$ is the number of data point
|
Gaussian_process_regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# 
#
# > **Copyright (c) 2021 <EMAIL>ifAI Sdn. Bhd.**<br>
# <br>
# This program is part of OSRFramework. You can redistribute it and/or modify
# <br>it under the terms of the GNU Affero General Public License as published by
# <br>the Free Software Foundation, either version 3 of the License, or
# <br>(at your option) any later version.
# <br>
# <br>This program is distributed in the hope that it will be useful
# <br>but WITHOUT ANY WARRANTY; without even the implied warranty of
# <br>MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# <br>GNU Affero General Public License for more details.
# <br>
# <br>You should have received a copy of the GNU Affero General Public License
# <br>along with this program. If not, see <http://www.gnu.org/licenses/>.
# <br>
# + [markdown] papermill={"duration": 0.030845, "end_time": "2021-06-04T13:10:20.720042", "exception": false, "start_time": "2021-06-04T13:10:20.689197", "status": "completed"} slideshow={"slide_type": "slide"} tags=[]
# # Introduction
#
# Selecting specific values of a pandas DataFrame or Series to work on is an implicit step in almost any data operation you'll run, so one of the first things you need to learn in working with data in Python is how to go about selecting the data points relevant to you quickly and effectively.
# -
# # Notebook Content
#
# * [Native Accessors](#Native-accessors)
#
#
# * [Indexing in Pandas](#Indexing-in-pandas)
# * [Index-based Selection](#Index-based-selection)
# * [Label-based Selection](#Label-based-selection)
#
#
# * [Manipulating the Index](#Manipulating-the-index)
#
#
# * [Conditional Selection](#Conditional-selection)
#
#
# * [Assigning Data](#Assigning-data)
# + _kg_hide-input=true papermill={"duration": 1.856161, "end_time": "2021-06-04T13:10:22.606975", "exception": false, "start_time": "2021-06-04T13:10:20.750814", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
import pandas as pd
water_potability = pd.read_csv("../../../resources/day_01/water_potability.csv")
# + [markdown] papermill={"duration": 0.030227, "end_time": "2021-06-04T13:10:22.729355", "exception": false, "start_time": "2021-06-04T13:10:22.699128", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# # Native accessors
#
# Native Python objects provide good ways of indexing data. Pandas carries all of these over, which helps make it easy to start with.
#
# Consider this DataFrame:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.064315, "end_time": "2021-06-04T13:10:22.824681", "exception": false, "start_time": "2021-06-04T13:10:22.760366", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability
# + [markdown] papermill={"duration": 0.031287, "end_time": "2021-06-04T13:10:22.887586", "exception": false, "start_time": "2021-06-04T13:10:22.856299", "status": "completed"} slideshow={"slide_type": "slide"} tags=[]
# In Python, we can access the property of an object by accessing it as an attribute. A `book` object, for example, might have a `title` property, which we can access by calling `book.title`. Columns in a pandas DataFrame work in much the same way.
#
# Hence to access the `ph` property of `water_potability` we can use:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.044373, "end_time": "2021-06-04T13:10:22.962680", "exception": false, "start_time": "2021-06-04T13:10:22.918307", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.ph
# + [markdown] papermill={"duration": 0.031759, "end_time": "2021-06-04T13:10:23.025659", "exception": false, "start_time": "2021-06-04T13:10:22.993900", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# If we have a Python dictionary, we can access its values using the indexing (`[]`) operator. We can do the same with columns in a DataFrame:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.043286, "end_time": "2021-06-04T13:10:23.101325", "exception": false, "start_time": "2021-06-04T13:10:23.058039", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability['ph']
# + [markdown] papermill={"duration": 0.031759, "end_time": "2021-06-04T13:10:23.165254", "exception": false, "start_time": "2021-06-04T13:10:23.133495", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# These are the two ways of selecting a specific Series out of a DataFrame. Neither of them is more or less syntactically valid than the other, but the indexing operator `[]` does have the advantage that it can handle column names with reserved characters in them (e.g. if we had a `ph values` column, `reviews.ph values` wouldn't work).
#
# Doesn't a pandas Series look kind of like a fancy dictionary? It pretty much is, so it's no surprise that, to drill down to a single specific value, we need only use the indexing operator `[]` once more:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.048049, "end_time": "2021-06-04T13:10:23.245830", "exception": false, "start_time": "2021-06-04T13:10:23.197781", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability['ph'][10]
# + [markdown] papermill={"duration": 0.030968, "end_time": "2021-06-04T13:10:23.308042", "exception": false, "start_time": "2021-06-04T13:10:23.277074", "status": "completed"} slideshow={"slide_type": "slide"} tags=[]
# # Indexing in pandas
#
# The indexing operator and attribute selection are nice because they work just like they do in the rest of the Python ecosystem. As a novice, this makes them easy to pick up and use. However, pandas has its own accessor operators, `loc` and `iloc`. For more advanced operations, these are the ones you're supposed to be using.
#
# ### Index-based selection
#
# Pandas indexing works in one of two paradigms. The first is **index-based selection**: selecting data based on its numerical position in the data. `iloc` follows this paradigm.
#
# To select the first row of data in a DataFrame, we may use the following:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.043976, "end_time": "2021-06-04T13:10:23.383233", "exception": false, "start_time": "2021-06-04T13:10:23.339257", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.iloc[0]
# + [markdown] papermill={"duration": 0.032806, "end_time": "2021-06-04T13:10:23.449168", "exception": false, "start_time": "2021-06-04T13:10:23.416362", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# Both `loc` and `iloc` are row-first, column-second. This is the opposite of what we do in native Python, which is column-first, row-second.
#
# This means that it's marginally easier to retrieve rows, and marginally harder to get retrieve columns. To get a column with `iloc`, we can do the following:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.045914, "end_time": "2021-06-04T13:10:23.528265", "exception": false, "start_time": "2021-06-04T13:10:23.482351", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.iloc[:, 0]
# + [markdown] papermill={"duration": 0.03279, "end_time": "2021-06-04T13:10:23.594874", "exception": false, "start_time": "2021-06-04T13:10:23.562084", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# On its own, the `:` operator, which also comes from native Python, means "everything". When combined with other selectors, however, it can be used to indicate a range of values. For example, to select the `ph` column from just the first, second, and third row, we would do:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.045644, "end_time": "2021-06-04T13:10:23.674176", "exception": false, "start_time": "2021-06-04T13:10:23.628532", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.iloc[:3, 0]
# + [markdown] papermill={"duration": 0.034039, "end_time": "2021-06-04T13:10:23.742410", "exception": false, "start_time": "2021-06-04T13:10:23.708371", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# Or, to select just the second and third entries, we would do:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.04652, "end_time": "2021-06-04T13:10:23.823211", "exception": false, "start_time": "2021-06-04T13:10:23.776691", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.iloc[1:3, 0]
# + [markdown] papermill={"duration": 0.034266, "end_time": "2021-06-04T13:10:23.893018", "exception": false, "start_time": "2021-06-04T13:10:23.858752", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# It's also possible to pass a list:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.046793, "end_time": "2021-06-04T13:10:23.975251", "exception": false, "start_time": "2021-06-04T13:10:23.928458", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.iloc[[0, 1, 2], 0]
# + [markdown] papermill={"duration": 0.035275, "end_time": "2021-06-04T13:10:24.047389", "exception": false, "start_time": "2021-06-04T13:10:24.012114", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# Finally, it's worth knowing that negative numbers can be used in selection. This will start counting forwards from the _end_ of the values. So for example here are the last five elements of the dataset.
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.059734, "end_time": "2021-06-04T13:10:24.141813", "exception": false, "start_time": "2021-06-04T13:10:24.082079", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.iloc[-5:]
# + [markdown] papermill={"duration": 0.034908, "end_time": "2021-06-04T13:10:24.212995", "exception": false, "start_time": "2021-06-04T13:10:24.178087", "status": "completed"} slideshow={"slide_type": "slide"} tags=[]
# ### Label-based selection
#
# The second paradigm for attribute selection is the one followed by the `loc` operator: **label-based selection**. In this paradigm, it's the data index value, not its position, which matters.
#
# For example, to get the first entry in `reviews`, we would now do the following:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.045819, "end_time": "2021-06-04T13:10:24.294311", "exception": false, "start_time": "2021-06-04T13:10:24.248492", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.loc[0, 'ph']
# + [markdown] papermill={"duration": 0.036204, "end_time": "2021-06-04T13:10:24.366969", "exception": false, "start_time": "2021-06-04T13:10:24.330765", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# `iloc` is conceptually simpler than `loc` because it ignores the dataset's indices. When we use `iloc` we treat the dataset like a big matrix (a list of lists), one that we have to index into by position. `loc`, by contrast, uses the information in the indices to do its work. Since your dataset usually has meaningful indices, it's usually easier to do things using `loc` instead. For example, here's one operation that's much easier using `loc`:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.05567, "end_time": "2021-06-04T13:10:24.460699", "exception": false, "start_time": "2021-06-04T13:10:24.405029", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.loc[:, ['Solids', 'Conductivity', 'Turbidity']]
# + [markdown] papermill={"duration": 0.036254, "end_time": "2021-06-04T13:10:24.532694", "exception": false, "start_time": "2021-06-04T13:10:24.496440", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# ### Choosing between `loc` and `iloc`
#
# When choosing or transitioning between `loc` and `iloc`, there is one "gotcha" worth keeping in mind, which is that the two methods use slightly different indexing schemes.
#
# `iloc` uses the Python stdlib indexing scheme, where the first element of the range is included and the last one excluded. So `0:10` will select entries `0,...,9`. `loc`, meanwhile, indexes inclusively. So `0:10` will select entries `0,...,10`.
#
# Why the change? Remember that loc can index any stdlib type: strings, for example. If we have a DataFrame with index values `Apples, ..., Potatoes, ...`, and we want to select "all the alphabetical fruit choices between Apples and Potatoes", then it's a lot more convenient to index `df.loc['Apples':'Potatoes']` than it is to index something like `df.loc['Apples', 'Potatoet']` (`t` coming after `s` in the alphabet).
#
# This is particularly confusing when the DataFrame index is a simple numerical list, e.g. `0,...,1000`. In this case `df.iloc[0:1000]` will return 1000 entries, while `df.loc[0:1000]` return 1001 of them! To get 1000 elements using `loc`, you will need to go one lower and ask for `df.loc[0:999]`.
#
# Otherwise, the semantics of using `loc` are the same as those for `iloc`.
# + [markdown] papermill={"duration": 0.037061, "end_time": "2021-06-04T13:10:24.607195", "exception": false, "start_time": "2021-06-04T13:10:24.570134", "status": "completed"} slideshow={"slide_type": "slide"} tags=[]
# # Manipulating the index
#
# Label-based selection derives its power from the labels in the index. Critically, the index we use is not immutable. We can manipulate the index in any way we see fit.
#
# The `set_index()` method can be used to do the job. Here is what happens when we `set_index` to the `title` field:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.110487, "end_time": "2021-06-04T13:10:24.754654", "exception": false, "start_time": "2021-06-04T13:10:24.644167", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.set_index("ph")
# + [markdown] papermill={"duration": 0.036215, "end_time": "2021-06-04T13:10:24.828081", "exception": false, "start_time": "2021-06-04T13:10:24.791866", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# This is useful if you can come up with an index for the dataset which is better than the current one.
# + [markdown] papermill={"duration": 0.036151, "end_time": "2021-06-04T13:10:24.900815", "exception": false, "start_time": "2021-06-04T13:10:24.864664", "status": "completed"} slideshow={"slide_type": "slide"} tags=[]
# # Conditional selection
#
# So far we've been indexing various strides of data, using structural properties of the DataFrame itself. To do *interesting* things with the data, however, we often need to ask questions based on conditions.
#
# For example, suppose that we're interested specifically in soils that have potability == 1.
#
# We can start by checking if each soild has potability == 1:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.066673, "end_time": "2021-06-04T13:10:25.005921", "exception": false, "start_time": "2021-06-04T13:10:24.939248", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.Potability == 1
# + [markdown] papermill={"duration": 0.037731, "end_time": "2021-06-04T13:10:25.082404", "exception": false, "start_time": "2021-06-04T13:10:25.044673", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# This operation produced a Series of `True`/`False` booleans based on the `country` of each record. This result can then be used inside of `loc` to select the relevant data:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.091434, "end_time": "2021-06-04T13:10:25.211598", "exception": false, "start_time": "2021-06-04T13:10:25.120164", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.loc[water_potability.Potability == 1]
# + [markdown] papermill={"duration": 0.038133, "end_time": "2021-06-04T13:10:25.288109", "exception": false, "start_time": "2021-06-04T13:10:25.249976", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
# This DataFrame has ~1,200 rows. The original had ~3,000. That means that around 40% of solid has potability == 1.
#
# We also wanted to know which ones are better solids. Solids which have ph values between 6.0 to 7.0 are consider good solids.
#
# We can use the ampersand (`&`) to bring the two questions together:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.103349, "end_time": "2021-06-04T13:10:25.429500", "exception": false, "start_time": "2021-06-04T13:10:25.326151", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.loc[(water_potability.Potability == 1) & (water_potability.ph >= 6.0) & (water_potability.ph <= 7.0)]
# + [markdown] papermill={"duration": 0.040451, "end_time": "2021-06-04T13:10:25.510972", "exception": false, "start_time": "2021-06-04T13:10:25.470521", "status": "completed"} slideshow={"slide_type": "slide"} tags=[]
# Suppose we'll buy any solid that has high organic carbon (>18) or high turbidity (>5). For this we use a pipe (`|`):
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.097511, "end_time": "2021-06-04T13:10:25.648260", "exception": false, "start_time": "2021-06-04T13:10:25.550749", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.loc[(water_potability.Organic_carbon > 18) | (water_potability.Turbidity >= 5)]
# + [markdown] slideshow={"slide_type": "subslide"}
# Get all the solids that have known sulfate value.
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.091841, "end_time": "2021-06-04T13:10:26.070608", "exception": false, "start_time": "2021-06-04T13:10:25.978767", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.loc[water_potability.Sulfate.notnull()]
# + [markdown] papermill={"duration": 0.040517, "end_time": "2021-06-04T13:10:26.153686", "exception": false, "start_time": "2021-06-04T13:10:26.113169", "status": "completed"} slideshow={"slide_type": "slide"} tags=[]
# # Assigning data
#
# Going the other way, assigning data to a DataFrame is easy. You can assign either a constant value:
# + jupyter={"outputs_hidden": true} papermill={"duration": 0.054072, "end_time": "2021-06-04T13:10:26.249071", "exception": false, "start_time": "2021-06-04T13:10:26.194999", "status": "completed"} slideshow={"slide_type": "subslide"} tags=[]
water_potability.loc[0, "Potability"] = 1
water_potability
# -
# # Contributors
#
# **Author**
# <br><NAME>
# # References
#
# 1. [Learning Pandas](https://www.kaggle.com/learn/pandas)
# 2. [Pandas Documentation](https://pandas.pydata.org/docs/reference/index.html)
|
nlp-labs/Day_01/Pandas Basic/02_indexing-selecting-assigning.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # H2O Tutorial
#
# Author: <NAME>
#
# Contact: <EMAIL>
#
# This tutorial steps through a quick introduction to H2O's Python API. The goal of this tutorial is to introduce through a complete example H2O's capabilities from Python. Also, to help those that are accustomed to Scikit Learn and Pandas, the demo will be specific call outs for differences between H2O and those packages; this is intended to help anyone that needs to do machine learning on really Big Data make the transition. It is not meant to be a tutorial on machine learning or algorithms.
#
# Detailed documentation about H2O's and the Python API is available at http://docs.h2o.ai.
# ## Setting up your system for this demo
# The following code creates two csv files using data from the [Boston Housing dataset](https://archive.ics.uci.edu/ml/datasets/Housing) which is built into scikit-learn and adds them to the local directory
# +
import pandas as pd
import numpy
from numpy.random import choice
from sklearn.datasets import load_boston
from h2o.estimators.random_forest import H2ORandomForestEstimator
import h2o
h2o.init()
# -
# transfer the boston data from pandas to H2O
boston_data = load_boston()
X = pd.DataFrame(data=boston_data.data, columns=boston_data.feature_names)
X["Median_value"] = boston_data.target
X = h2o.H2OFrame.from_python(X.to_dict("list"))
# +
# select 10% for valdation
r = X.runif(seed=123456789)
train = X[r < 0.9,:]
valid = X[r >= 0.9,:]
h2o.export_file(train, "Boston_housing_train.csv", force=True)
h2o.export_file(valid, "Boston_housing_test.csv", force=True)
# -
# Enable inline plotting in the Jupyter Notebook
# %matplotlib inline
import matplotlib.pyplot as plt
# ## Intro to H2O Data Munging
# Read csv data into H2O. This loads the data into the H2O column compressed, in-memory, key-value store.
fr = h2o.import_file("Boston_housing_train.csv")
# View the top of the H2O frame.
fr.head()
# View the bottom of the H2O Frame
fr.tail()
# Select a column
#
# fr["VAR_NAME"]
fr["CRIM"].head() # Tab completes
# Select a few columns
columns = ["CRIM", "RM", "RAD"]
fr[columns].head()
# Select a subset of rows
#
# Unlike in Pandas, columns may be identified by index or column name. **Therefore, when subsetting by rows, you must also pass the column selection.**
fr[2:7,:] # explicitly select all columns with :
# Key attributes:
# * columns, names, col_names
# * len, shape, dim, nrow, ncol
# * types
#
# Note:
#
# Since the data is _not_ in local python memory
# there is no "values" attribute. If you want to
# pull all of the data into the local python memory
# then do so explicitly with h2o.export_file and
# reading the data into python memory from disk.
# +
# The columns attribute is exactly like Pandas
print("Columns:", fr.columns, "\n")
print("Columns:", fr.names, "\n")
print("Columns:", fr.col_names, "\n")
# There are a number of attributes to get at the shape
print("length:", str( len(fr) ), "\n")
print("shape:", fr.shape, "\n")
print("dim:", fr.dim, "\n")
print("nrow:", fr.nrow, "\n")
print("ncol:", fr.ncol, "\n")
# Use the "types" attribute to list the column types
print("types:", fr.types, "\n")
# -
# Select rows based on value
fr.shape
# Boolean masks can be used to subselect rows based on a criteria.
mask = fr["CRIM"]>1
fr[mask,:].shape
# Get summary statistics of the data and additional data distribution information.
fr.describe()
# Set up the predictor and response column names
#
# Using H2O algorithms, it's easier to reference predictor and response columns
# by name in a single frame (i.e., don't split up X and y)
x = fr.names[:]
y="Median_value"
x.remove(y)
# ## Machine Learning With H2O
# H2O is a machine learning library built in Java with interfaces in Python, R, Scala, and Javascript. It is [open source](http://github.com/h2oai) and [well-documented](http://docs.h2o.ai).
#
# Unlike Scikit-learn, H2O allows for categorical and missing data.
#
# The basic work flow is as follows:
# * Fit the training data with a machine learning algorithm
# * Predict on the testing data
# ### Simple model
# Define and fit first 400 points
model = H2ORandomForestEstimator(seed=42)
model.train(x=x, y=y, training_frame=fr[:400,:])
model.predict(fr[400:fr.nrow,:]) # Predict the rest
# The performance of the model can be checked using the holdout dataset
perf = model.model_performance(fr[400:fr.nrow,:])
perf.r2() # get the r2 on the holdout data
perf.mse() # get the mse on the holdout data
perf # display the performance object
# ### Train-Test Split
# Instead of taking the first 400 observations for training, we can use H2O to create a random test train split of the data.
# +
r = fr.runif(seed=12345) # build random uniform column over [0,1]
train= fr[r<0.75,:] # perform a 75-25 split
test = fr[r>=0.75,:]
model = H2ORandomForestEstimator(seed=42)
model.train(x=x, y=y, training_frame=train, validation_frame=test)
perf = model.model_performance(test)
perf.r2()
# -
# There was a massive jump in the R^2 value. This is because the original data is not shuffled.
# ### Cross validation
# H2O's machine learning algorithms take an optional parameter **nfolds** to specify the number of cross-validation folds to build. H2O's cross-validation uses an internal weight vector to build the folds in an efficient manner (instead of physically building the splits).
# In conjunction with the **nfolds** parameter, a user may specify the way in which observations are assigned to each fold with the **fold_assignment** parameter, which can be set to either:
# * AUTO: Perform random assignment
# * Random: Each row has a equal (1/nfolds) chance of being in any fold.
# * Modulo: Observations are in/out of the fold based by modding on nfolds
model = H2ORandomForestEstimator(nfolds=10) # build a 10-fold cross-validated model
model.train(x=x, y=y, training_frame=fr)
scores = numpy.array([m.r2() for m in model.xvals]) # iterate over the xval models using the xvals attribute
print("Expected R^2: %.2f +/- %.2f \n" % (scores.mean(), scores.std()*1.96))
print("Scores:", scores.round(2))
# However, you can still make use of the cross_val_score from Scikit-Learn
# ### Cross validation: H2O and Scikit-Learn
from sklearn.cross_validation import cross_val_score
from h2o.cross_validation import H2OKFold
from h2o.model.regression import h2o_r2_score
from sklearn.metrics.scorer import make_scorer
# You still must use H2O to make the folds. Currently, there is no H2OStratifiedKFold. Additionally, the H2ORandomForestEstimator is similar to the scikit-learn RandomForestRegressor object with its own ``train`` method.
model = H2ORandomForestEstimator(seed=42)
# +
scorer = make_scorer(h2o_r2_score) # make h2o_r2_score into a scikit_learn scorer
custom_cv = H2OKFold(fr, n_folds=10, seed=42) # make a cv
scores = cross_val_score(model, fr[x], fr[y], scoring=scorer, cv=custom_cv)
print("Expected R^2: %.2f +/- %.2f \n" % (scores.mean(), scores.std()*1.96))
print("Scores:", scores.round(2))
# -
# There isn't much difference in the R^2 value since the fold strategy is exactly the same. However, there was a major difference in terms of computation time and memory usage.
# Since the progress bar print out gets annoying let's disable that
h2o.__PROGRESS_BAR__=False
h2o.no_progress()
# ### Grid Search
# Grid search in H2O is still under active development and it will be available very soon. However, it is possible to make use of Scikit's grid search infrastructure (with some performance penalties)
# ### Randomized grid search: H2O and Scikit-Learn
from sklearn import __version__
sklearn_version = __version__
print(sklearn_version)
# If you have 0.16.1, then your system can't handle complex randomized grid searches (it works in every other version of sklearn, including the soon to be released 0.16.2 and the older versions).
#
# The steps to perform a randomized grid search:
# 1. Import model and RandomizedSearchCV
# 2. Define model
# 3. Specify parameters to test
# 4. Define grid search object
# 5. Fit data to grid search object
# 6. Collect scores
#
# All the steps will be repeated from above.
#
# Because 0.16.1 is installed, we use scipy to define specific distributions
# ADVANCED TIP:
#
# Turn off reference counting for spawning jobs in parallel (n_jobs=-1, or n_jobs > 1).
# We'll turn it back on again in the aftermath of a Parallel job.
#
# If you don't want to run jobs in parallel, don't turn off the reference counting.
#
# Pattern is:
# >>> h2o.turn_off_ref_cnts()
# >>> .... parallel job ....
# >>> h2o.turn_on_ref_cnts()
# +
# %%time
from sklearn.grid_search import RandomizedSearchCV # Import grid search
from scipy.stats import randint, uniform
model = H2ORandomForestEstimator(seed=42) # Define model
params = {"ntrees": randint(20,30),
"max_depth": randint(1,10),
"min_rows": randint(1,10), # scikit's min_samples_leaf
"mtries": randint(2,fr[x].shape[1]),} # Specify parameters to test
scorer = make_scorer(h2o_r2_score) # make h2o_r2_score into a scikit_learn scorer
custom_cv = H2OKFold(fr, n_folds=5, seed=42) # make a cv
random_search = RandomizedSearchCV(model, params,
n_iter=10,
scoring=scorer,
cv=custom_cv,
random_state=42,
n_jobs=1) # Define grid search object
random_search.fit(fr[x], fr[y])
print("Best R^2:", random_search.best_score_, "\n")
print("Best params:", random_search.best_params_)
# -
# We might be tempted to think that we just had a large improvement; however we must be cautious. The function below creates a more detailed report.
# +
def report_grid_score_detail(random_search, charts=True):
"""Input fit grid search estimator. Returns df of scores with details"""
df_list = []
for line in random_search.grid_scores_:
results_dict = dict(line.parameters)
results_dict["score"] = line.mean_validation_score
results_dict["std"] = line.cv_validation_scores.std()*1.96
df_list.append(results_dict)
result_df = pd.DataFrame(df_list)
result_df = result_df.sort("score", ascending=False)
if charts:
for col in get_numeric(result_df):
if col not in ["score", "std"]:
plt.scatter(result_df[col], result_df.score)
plt.title(col)
plt.show()
for col in list(result_df.columns[result_df.dtypes == "object"]):
cat_plot = result_df.score.groupby(result_df[col]).mean()[0]
cat_plot.sort()
cat_plot.plot(kind="barh", xlim=(.5, None), figsize=(7, cat_plot.shape[0]/2))
plt.show()
return result_df
def get_numeric(X):
"""Return list of numeric dtypes variables"""
return X.dtypes[X.dtypes.apply(lambda x: str(x).startswith(("float", "int", "bool")))].index.tolist()
# -
report_grid_score_detail(random_search).head()
# Based on the grid search report, we can narrow the parameters to search and rerun the analysis. The parameters below were chosen after a few runs:
# +
# %%time
params = {"ntrees": randint(30,35),
"max_depth": randint(5,8),
"mtries": randint(4,6),}
custom_cv = H2OKFold(fr, n_folds=5, seed=42) # In small datasets, the fold size can have a big
# impact on the std of the resulting scores. More
random_search = RandomizedSearchCV(model, params, # folds --> Less examples per fold --> higher
n_iter=5, # variation per sample
scoring=scorer,
cv=custom_cv,
random_state=43,
n_jobs=1)
random_search.fit(fr[x], fr[y])
print("Best R^2:", random_search.best_score_, "\n")
print("Best params:", random_search.best_params_)
report_grid_score_detail(random_search)
# -
# ### Transformations
# Rule of machine learning: Don't use your testing data to inform your training data. Unfortunately, this happens all the time when preparing a dataset for the final model. But on smaller datasets, you must be especially careful.
# At the moment, there are no classes for managing data transformations. On the one hand, this requires the user to tote around some extra state, but on the other, it allows the user to be more explicit about transforming H2OFrames.
#
# Basic steps:
#
# 0. Remove the response variable from transformations.
# 1. Import transformer
# 2. Define transformer
# 3. Fit train data to transformer
# 4. Transform test and train data
# 5. Re-attach the response variable.
# First let's normalize the data using the means and standard deviations of the training data.
# Then let's perform a principal component analysis on the training data and select the top 5 components.
# Using these components, let's use them to reduce the train and test design matrices.
from h2o.transforms.preprocessing import H2OScaler
from h2o.transforms.decomposition import H2OPCA
# #### Normalize Data: Use the means and standard deviations from the training data.
y_train = train.pop("Median_value")
y_test = test.pop("Median_value")
norm = H2OScaler()
norm.fit(train)
X_train_norm = norm.transform(train)
X_test_norm = norm.transform(test)
print(X_test_norm.shape)
X_test_norm
# Then, we can apply PCA and keep the top 5 components. A user warning is expected here.
pca = H2OPCA(k=5)
pca.fit(X_train_norm)
X_train_norm_pca = pca.transform(X_train_norm)
X_test_norm_pca = pca.transform(X_test_norm)
# +
# prop of variance explained by top 5 components?
# -
print(X_test_norm_pca.shape)
X_test_norm_pca[:5]
model = H2ORandomForestEstimator(seed=42)
model.train(x=X_train_norm_pca.names, y=y_train.names, training_frame=X_train_norm_pca.cbind(y_train))
y_hat = model.predict(X_test_norm_pca)
h2o_r2_score(y_test,y_hat)
# Although this is MUCH simpler than keeping track of all of these transformations manually, it gets to be somewhat of a burden when you want to chain together multiple transformers.
# ### Pipelines
# "Tranformers unite!"
#
# If your raw data is a mess and you have to perform several transformations before using it, use a pipeline to keep things simple.
#
# Steps:
#
# 1. Import Pipeline, transformers, and model
# 2. Define pipeline. The first and only argument is a *list* of *tuples* where the first element of each tuple is a name you give the step and the second element is a defined transformer. The last step is optionally an estimator class (like a RandomForest).
# 3. Fit the training data to pipeline
# 4. Either transform or predict the testing data
from h2o.transforms.preprocessing import H2OScaler
from h2o.transforms.decomposition import H2OPCA
# +
from sklearn.pipeline import Pipeline # Import Pipeline <other imports not shown>
model = H2ORandomForestEstimator(seed=42)
pipe = Pipeline([("standardize", H2OScaler()), # Define pipeline as a series of steps
("pca", H2OPCA(k=5)),
("rf", model)]) # Notice the last step is an estimator
pipe.fit(train, y_train) # Fit training data
y_hat = pipe.predict(test) # Predict testing data (due to last step being an estimator)
h2o_r2_score(y_test, y_hat) # Notice the final score is identical to before
# -
# This is so much easier!!!
# But, wait a second, we did worse after applying these transformations! We might wonder how different hyperparameters for the transformations impact the final score.
# ### Combining randomized grid search and pipelines
# "Yo dawg, I heard you like models, so I put models in your models to model models."
#
# Steps:
#
# 1. Import Pipeline, grid search, transformers, and estimators <Not shown below>
# 2. Define pipeline
# 3. Define parameters to test in the form: "(Step name)__(argument name)" A double underscore separates the two words.
# 4. Define grid search
# 5. Fit to grid search
# +
pipe = Pipeline([("standardize", H2OScaler()),
("pca", H2OPCA()),
("rf", H2ORandomForestEstimator(seed=42))])
params = {"standardize__center": [True, False], # Parameters to test
"standardize__scale": [True, False],
"pca__k": randint(2, 6),
"rf__ntrees": randint(10,20),
"rf__max_depth": randint(4,10),
"rf__min_rows": randint(5,10), }
# "rf__mtries": randint(1,4),} # gridding over mtries is
# problematic with pca grid over
# k above
from sklearn.grid_search import RandomizedSearchCV
from h2o.cross_validation import H2OKFold
from h2o.model.regression import h2o_r2_score
from sklearn.metrics.scorer import make_scorer
custom_cv = H2OKFold(fr, n_folds=5, seed=42)
random_search = RandomizedSearchCV(pipe, params,
n_iter=5,
scoring=make_scorer(h2o_r2_score),
cv=custom_cv,
random_state=42,
n_jobs=1)
random_search.fit(fr[x],fr[y])
results = report_grid_score_detail(random_search)
results.head()
# -
# Currently Under Development (drop-in scikit-learn pieces):
# * Richer set of transforms (only PCA and Scale are implemented)
# * Richer set of estimators (only RandomForest is available)
# * Full H2O Grid Search
# ### Other Tips: Model Save/Load
# It is useful to save constructed models to disk and reload them between H2O sessions. Here's how:
best_estimator = random_search.best_estimator_ # fetch the pipeline from the grid search
h2o_model = h2o.get_model(best_estimator._final_estimator._id) # fetch the model from the pipeline
save_path = h2o.save_model(h2o_model, path=".", force=True)
print(save_path)
# assumes new session
my_model = h2o.load_model(path=save_path)
my_model.predict(X_test_norm_pca)
|
implementation/h2o-fakegame/h2o-py/demos/H2O_tutorial_medium.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ungraded Lab: Feature Engineering with Images
#
# In this optional notebook, you will be looking at how to prepare features with an image dataset, particularly [CIFAR-10](https://www.tensorflow.org/datasets/catalog/cifar10). You will mostly go through the same steps but you will need to add parser functions in your transform module to successfully read and convert the data. As with the previous notebooks, we will just go briefly over the early stages of the pipeline so you can focus on the Transform component.
#
# Let's begin!
# + [markdown] id="N-ePgV0Lj68Q"
# ## Imports
# + id="YIqpWK9efviJ"
import os
import pprint
import tempfile
import urllib
import absl
import tensorflow as tf
tf.get_logger().propagate = False
pp = pprint.PrettyPrinter()
import tfx
from tfx.components import CsvExampleGen
from tfx.components import ExampleValidator
from tfx.components import SchemaGen
from tfx.components import StatisticsGen
from tfx.orchestration.experimental.interactive.interactive_context import InteractiveContext
from tfx.types import Channel
from tfx.utils.dsl_utils import external_input
from tfx.components.transform.component import Transform
from google.protobuf.json_format import MessageToDict
print('TensorFlow version: {}'.format(tf.__version__))
print('TFX version: {}'.format(tfx.__version__))
# + [markdown] id="ufJKQ6OvkJlY"
# ## Set up pipeline paths
# + id="Se9m0HbDHBvv"
# Location of the pipeline metadata store
_pipeline_root = './pipeline/'
# Data files directory
_data_root = './data/cifar10'
# Path to the training data
_data_filepath = os.path.join(_data_root, 'train.tfrecord')
# + [markdown] id="n2cMMAbSkGfX"
# ## Download example data
#
# We will download the training split of the CIFAR-10 dataset and save it to the `_data_filepath`. Take note that this is already in TFRecord format so we won't need to convert it when we use `ExampleGen` later.
# + id="BywX6OUEhAqn"
# Create data folder for the images
# !mkdir -p {_data_root}
# URL of the hosted dataset
DATA_PATH = 'https://raw.githubusercontent.com/tensorflow/tfx/v0.21.4/tfx/examples/cifar10/data/train.tfrecord'
# Download the dataset and save locally
urllib.request.urlretrieve(DATA_PATH, _data_filepath)
# + [markdown] id="8ONIE_hdkPS4"
# ## Create the InteractiveContext
# + id="0Rh6K5sUf9dd"
# Initialize the InteractiveContext
context = InteractiveContext(pipeline_root=_pipeline_root)
# + [markdown] id="HdQWxfsVkzdJ"
# ## Run TFX components interactively
#
#
# + [markdown] id="L9fwt9gQk3BR"
# ### ExampleGen
#
# As mentioned earlier, the dataset is already in TFRecord format so, unlike the previous TFX labs, there is no need to convert it when we ingest the data. You can simply import it with [ImportExampleGen](https://www.tensorflow.org/tfx/api_docs/python/tfx/components/ImportExampleGen) and here is the syntax and modules for that.
# + id="PyXjuMt8f-9u"
# Module needed to import TFRecord files
from tfx.components import ImportExampleGen
# Ingest the data through ExampleGen
example_gen = ImportExampleGen(input_base=_data_root)
# Run the component
context.run(example_gen)
# + [markdown] id="OqCoZh7KPUm9"
# As usual, this component produces two artifacts, training examples and evaluation examples:
# + id="880KkTAkPeUg"
# Print split names and URI
artifact = example_gen.outputs['examples'].get()[0]
print(artifact.split_names, artifact.uri)
# + [markdown] id="J6vcbW_wPqvl"
# You can also take a look at the first three training examples ingested by using the `tf.io.parse_single_example()` method from the [tf.io](https://www.tensorflow.org/api_docs/python/tf/io) module. See how it is setup in the cell below.
# + id="H4XIXjiCPwzQ"
import IPython.display as display
# Get the URI of the output artifact representing the training examples, which is a directory
train_uri = os.path.join(example_gen.outputs['examples'].get()[0].uri, 'train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# Description per example
image_feature_description = {
'label': tf.io.FixedLenFeature([], tf.int64),
'image_raw': tf.io.FixedLenFeature([], tf.string),
}
# Image parser function
def _parse_image_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_single_example(example_proto, image_feature_description)
# Map the parser to the dataset
parsed_image_dataset = dataset.map(_parse_image_function)
# Display the first three images
for features in parsed_image_dataset.take(3):
image_raw = features['image_raw'].numpy()
display.display(display.Image(data=image_raw))
pprint.pprint('Class ID: {}'.format(features['label'].numpy()))
# + [markdown] id="csM6BFhtk5Aa"
# ### StatisticsGen
#
# Next, you will generate the statistics so you can infer a schema in the next step. You can also look at the visualization of the statistics. As you might expect with CIFAR-10, there is a column for the image and another column for the numeric label.
# + id="MAscCCYWgA-9"
# Run StatisticsGen
statistics_gen = StatisticsGen(
examples=example_gen.outputs['examples'])
context.run(statistics_gen)
# -
# Visualize the results
context.show(statistics_gen.outputs['statistics'])
# + [markdown] id="HLKLTO9Nk60p"
# ### SchemaGen
#
# Here, you pass in the statistics to generate the Schema. For the version of TFX you are using, you will have to explicitly set `infer_feature_shape=True` so the downstream TFX components (e.g. Transform) will parse input as a `Tensor` and not `SparseTensor`. If not set, you will have compatibility issues later when you run the transform.
# + id="ygQvZ6hsiQ_J"
# Run SchemaGen
schema_gen = SchemaGen(
statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True)
context.run(schema_gen)
# + id="Ec9vqDXpXeMb"
# Visualize the results
context.show(schema_gen.outputs['schema'])
# + [markdown] id="V1qcUuO9k9f8"
# ### ExampleValidator
#
# `ExampleValidator` is not required but you can still run it just to make sure that there are no anomalies.
# + id="XRlRUuGgiXks"
# Run ExampleValidator
example_validator = ExampleValidator(
statistics=statistics_gen.outputs['statistics'],
schema=schema_gen.outputs['schema'])
context.run(example_validator)
# + id="TDyAAozQcrk3"
# Visualize the results. There should be no anomalies.
context.show(example_validator.outputs['anomalies'])
# + [markdown] id="JPViEz5RlA36"
# ### Transform
#
# To successfully transform the raw image, you need to parse the current bytes format and convert it to a tensor. For that, you can use the [tf.image.decode_image()](https://www.tensorflow.org/api_docs/python/tf/io/decode_image) function. The transform module below utilizes this and converts the image to a `(32,32,3)` shaped float tensor. It also scales the pixels and converts the labels to one-hot tensors. The output features should then be ready to pass on to a model that accepts this format.
# + id="4AJ9hBs94YJm"
_transform_module_file = 'cifar10_transform.py'
# + id="MYmxxx9A4YJn"
# %%writefile {_transform_module_file}
import tensorflow as tf
import tensorflow_transform as tft
# Keys
_LABEL_KEY = 'label'
_IMAGE_KEY = 'image_raw'
def _transformed_name(key):
return key + '_xf'
def _image_parser(image_str):
'''converts the images to a float tensor'''
image = tf.image.decode_image(image_str, channels=3)
image = tf.reshape(image, (32, 32, 3))
image = tf.cast(image, tf.float32)
return image
def _label_parser(label_id):
'''one hot encodes the labels'''
label = tf.one_hot(label_id, 10)
return label
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
# Convert the raw image and labels to a float array and
# one-hot encoded labels, respectively.
with tf.device("/cpu:0"):
outputs = {
_transformed_name(_IMAGE_KEY):
tf.map_fn(
_image_parser,
tf.squeeze(inputs[_IMAGE_KEY], axis=1),
dtype=tf.float32),
_transformed_name(_LABEL_KEY):
tf.map_fn(
_label_parser,
tf.squeeze(inputs[_LABEL_KEY], axis=1),
dtype=tf.float32)
}
# scale the pixels from 0 to 1
outputs[_transformed_name(_IMAGE_KEY)] = tft.scale_to_0_1(outputs[_transformed_name(_IMAGE_KEY)])
return outputs
# + [markdown] id="wgbmZr3sgbWW"
# Now, we pass in this feature engineering code to the `Transform` component and run it to transform your data.
# + id="jHfhth_GiZI9"
# Ignore TF warning messages
tf.get_logger().setLevel('ERROR')
# Setup the Transform component
transform = Transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
module_file=os.path.abspath(_transform_module_file))
# Run the component
context.run(transform)
# -
# ### Preview the results
#
# Now that the Transform component is finished, you can preview how the transformed images and labels look like. You can use the same sequence and helper function from previous labs.
# + id="pwbW2zPKR_S4"
# Get the URI of the output artifact representing the transformed examples, which is a directory
train_uri = os.path.join(transform.outputs['transformed_examples'].get()[0].uri, 'train')
# Get the list of files in this directory (all compressed TFRecord files)
tfrecord_filenames = [os.path.join(train_uri, name)
for name in os.listdir(train_uri)]
# Create a `TFRecordDataset` to read these files
dataset = tf.data.TFRecordDataset(tfrecord_filenames, compression_type="GZIP")
# -
# Define a helper function to get individual examples
def get_records(dataset, num_records):
'''Extracts records from the given dataset.
Args:
dataset (TFRecordDataset): dataset saved by ExampleGen
num_records (int): number of records to preview
'''
# initialize an empty list
records = []
# Use the `take()` method to specify how many records to get
for tfrecord in dataset.take(num_records):
# Get the numpy property of the tensor
serialized_example = tfrecord.numpy()
# Initialize a `tf.train.Example()` to read the serialized data
example = tf.train.Example()
# Read the example data (output is a protocol buffer message)
example.ParseFromString(serialized_example)
# convert the protocol bufffer message to a Python dictionary
example_dict = (MessageToDict(example))
# append to the records list
records.append(example_dict)
return records
# You should see from the output of the cell below that the transformed raw image (i.e. `image_raw_xf`) now has a float array that is scaled from 0 to 1. Similarly, you'll see that the transformed label (i.e. `label_xf`) is now one-hot encoded.
# + id="mSDZ2rJC7NQW"
# Get 1 record from the dataset
sample_records = get_records(dataset, 1)
# Print the output
pp.pprint(sample_records)
# -
# ### Wrap Up
#
# This notebook demonstrates how to do feature engineering with image datasets as opposed to simple tabular data. This should come in handy in your computer vision projects and you can also try replicating this process with other image datasets from [TFDS](https://www.tensorflow.org/datasets/catalog/overview#image_classification).
|
Course 2. Machine Learning Data Lifecycle in Production/Labs/C2_W4_Lab_3_Images.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import geopandas as gpd
import keplergl as kl
# Get mesh blocks data
mbs = gpd.read_file("zip://../data/mb_2016_vic_shape.zip", low_memory=False)
mbs.shape
mbs = mbs.dropna()
mbs.shape
# Get Greater melbourne
gmel = mbs[mbs.GCC_NAME16.isin(['Greater Melbourne'])]
gmel.shape
gmel.head()
gmel[['SA1_MAIN16','SA2_NAME16','SA3_NAME16','SA4_NAME16','GCC_NAME16']].describe()
# Plot into map
mmap = kl.KeplerGl(height=800)
mmap
mmap.add_data(gmel)
# ### Local Government Areas
lga = gpd.read_file("zip://../data/lga_2020_aust_shp.zip", low_memory=False)
lga.shape
lga = lga.dropna()
lga.head()
lga.STE_NAME16.value_counts()
vic_lga = lga[lga.STE_NAME16.isin(['Victoria'])]
vic_lga.head()
lga_map = kl.KeplerGl(height=800)
lga_map
lga_map.add_data(vic_lga)
# ### Activities
acts = gpd.read_file("../data/activities.geojson")
acts.shape
act_map = kl.KeplerGl(height=800)
act_map
act_map.add_data(acts)
act_map.save_to_html(file_name='act_map.html')
config = {
"version": "v1",
"config": {
"visState": {
"filters": [
{
"dataId": [
"unnamed"
],
"id": "g1fum5ao",
"name": [
"datetime"
],
"type": "timeRange",
"value": [
1616029341400,
1616035686400
],
"enlarged": True,
"plotType": "histogram",
"animationWindow": "free",
"yAxis": None
}
],
"layers": [
{
"id": "sueie4n",
"type": "hexagon",
"config": {
"dataId": "unnamed",
"label": "unnamed",
"color": [
34,
63,
154
],
"columns": {
"lat": "lat",
"lng": "lon"
},
"isVisible": True,
"visConfig": {
"opacity": 0.8,
"worldUnitSize": 1,
"resolution": 8,
"colorRange": {
"name": "Uber Viz Qualitative 1.2",
"type": "qualitative",
"category": "Uber",
"colors": [
"#12939A",
"#DDB27C",
"#88572C",
"#FF991F",
"#F15C17",
"#223F9A"
]
},
"coverage": 1,
"sizeRange": [
0,
500
],
"percentile": [
0,
100
],
"elevationPercentile": [
0,
100
],
"elevationScale": 36.5,
"colorAggregation": "mode",
"sizeAggregation": "average",
"enable3d": True
},
"hidden": False,
"textLabel": [
{
"field": None,
"color": [
255,
255,
255
],
"size": 18,
"offset": [
0,
0
],
"anchor": "start",
"alignment": "center"
}
]
},
"visualChannels": {
"colorField": {
"name": "ActType",
"type": "string"
},
"colorScale": "ordinal",
"sizeField": {
"name": "ActDuration",
"type": "integer"
},
"sizeScale": "linear"
}
}
],
"interactionConfig": {
"tooltip": {
"fieldsToShow": {
"unnamed": [
{
"name": "TRIPID",
"format": None
},
{
"name": "PERSID",
"format": None
},
{
"name": "HHID",
"format": None
},
{
"name": "TRIPNO",
"format": None
},
{
"name": "ActLocation",
"format": None
}
]
},
"compareMode": False,
"compareType": "absolute",
"enabled": True
},
"brush": {
"size": 0.5,
"enabled": False
},
"geocoder": {
"enabled": False
},
"coordinate": {
"enabled": False
}
},
"layerBlending": "normal",
"splitMaps": [],
"animationConfig": {
"currentTime": None,
"speed": 1
}
},
"mapState": {
"bearing": 24,
"dragRotate": True,
"latitude": -37.84094484053064,
"longitude": 144.75401686995386,
"pitch": 50,
"zoom": 7.667760421641584,
"isSplit": False
},
"mapStyle": {
"styleType": "dark",
"topLayerGroups": {},
"visibleLayerGroups": {
"label": True,
"road": True,
"border": False,
"building": True,
"water": True,
"land": True,
"3d building": False
},
"threeDBuildingColor": [
9.665468314072013,
17.18305478057247,
31.1442867897876
],
"mapStyles": {}
}
}
}
act_map.add_config()
|
notebooks/99_kepler-explore.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="797a45b6-fca9-49b3-bcd6-f3b20ea0f367"
# # Proving Proximal Gradient Method's Convergence Rate and a Code Demonstration
# > "A rigorous proof of the convergence rate of PGM, and code implementation from scratch using MNIST data"
# - toc: false
# - branch: master
# - badges: true
# - comments: true
# - image: images/PGD.png
# - hide: false
# - search_exclude: false
# + [markdown] id="632dc8e3-0a7b-4055-843f-f907e21980a9"
# #### Proximal Gradient Descent
# + [markdown] id="7ddcca9c-66ab-4097-bace-e0f323de6139"
# A proximal algorithm is an algorithm for solving a convex optimization problem that uses the proximal operators of the objective terms. It is called such since it consists of a gradient step followed by a proximal mapping. There are three main benefits to the application of proximal algorithms:
# - 1. They work under extremely general conditions, including cases where the functions are nonsmooth and extended real-valued
# - 2. They can be fast, since there can be simple proximal operators for functions that are otherwise challenging to handle in an optimization problem
# - 3. They are amenable to distributed optimization, so they can be used to solve very large scale problems
# + [markdown] id="0e3fbbd0-e7f8-4d52-83c4-194ae784e249"
# The proximal operator is defined as $$ prox_f(x) = argmin\left \{ f(u) + \frac{1}{2}\left \| u-x \right \|^2: u \in \mathbb{R}^n \right \}, \forall x \in \mathbb{R}^n $$
# with the goal being to $$minimize\left \{ f(u) + h(u): u \in \mathbb{R}^n \right \}$$
# where h is a proper lower semi-continuous function and f is a smooth convex function on dom(h).
# <br>
# + [markdown] id="014a02cc-2ae0-4460-b262-5fc800cdb357"
# __Some important assumptions before we begin:__
# + [markdown] id="766b1dcf-1928-4c89-9bc6-9e14890275cf"
# We assume that f has L-Lipschitz continuous gradient, i.e., $$\left \| \bigtriangledown f(x) - \bigtriangledown f(y) \right \| \leq L\left \| x-y \right \|, \forall x, y \in dom(h)$$
# and hence for every $x, y \in dom(h)$, $$ f(x) \leq l_f(x; y) + \frac{L}{2}\left \| x-y \right \|^2$$
# where $l_f(x; y) := f(y) + \left \langle \bigtriangledown f(y), x-y \right \rangle$.
# + [markdown] id="62f42921-7269-4314-a585-014c497d478c"
# Recal that PGM with a constant prox stepsize is recursive in nature and iterates according to : $$x_{k+1}=prox_{\lambda h}(x_k-\lambda\nabla f(x_k)).$$
# + [markdown] id="073e167d-8aa1-438f-b5d3-5c6494048e76"
# #### Let's get started!
# + [markdown] id="276bbe32-c81a-4403-9c32-fe2a4122a52b"
# First, we will derive a single iteration of PGM and prove that it is strong convex.
# + [markdown] id="d7dbb27c-b73b-4f5a-93df-ebc4076c4fe3"
# $$ x_{k+1} = argmin\left \{ h(u) +\frac{1}{2}\left \| u-(x_k-\bigtriangledown f(x_k)) \right \|^2 \right \}$$
# + [markdown] id="14ff5fc9-0aa2-456c-b056-0a49f7de7aad"
# $$ x_{k+1} = argmin\left \{ f(x_k) + \left \langle \bigtriangledown f(x_k), u-x_k \right \rangle +h(u) + \frac{1}{2}\left \| x-x_k \right \|^2 \right \}$$
# + [markdown] id="bafad07c-d696-47b8-8146-96555cd4d8e5"
# $$x_{k+1}= argmin \left\{\ell_f(u;x_k)+h(u) + \frac{1}{2\lambda}||u-x_k||^2 \right\}, $$
# + [markdown] id="e26a33f3-473d-40bd-afd8-69f12bb9fae6"
# And proving strong convexity $\left \langle \bigtriangledown h(u) - \bigtriangledown h(x), u - x \right \rangle \geq \lambda \left \| u-x \right \|^{2}$:
# + [markdown] id="9bd53c6c-56d1-4798-ba4f-9ed5af0e0645"
# $$ \left \langle prox_{\lambda h}(u) - prox_{\lambda h}(x), (u-\frac{1}{\lambda}\bigtriangledown h(u)) -(x-\frac{1}{\lambda}\bigtriangledown h(x)) \right \rangle \geq \left \| prox_{\lambda h}(u)-prox_{\lambda h}(x) \right \|^{2} $$
# + [markdown] id="9b63b924-9d05-4607-8882-3cdc594f4b1a"
# $$ \left \langle (u - \frac{1}{\lambda}\bigtriangledown h(u)) - (x - \frac{1}{\lambda}\bigtriangledown h(x)), (u-\frac{1}{\lambda}\bigtriangledown h(u)) -(x-\frac{1}{\lambda}\bigtriangledown h(x)) \right \rangle \geq \left \| (u - \frac{1}{\lambda}\bigtriangledown h(u)) - (x - \frac{1}{\lambda}\bigtriangledown h(x)) \right \|^{2} $$
# + [markdown] id="acb65112-8ebe-4ed1-9156-05d514b5d465"
# Using the definition of $x_{k+1}$ and the strong convexity, we obtain upon rearranging terms that:
# + [markdown] id="cfab72fc-3f68-404c-8f59-048b1ac1f1f5"
# $$ h(x_{k+1}) \leq h(x) + \left \langle -\bigtriangledown f(u), x^{k+1}-x \right \rangle + \frac{1}{2}\left \| u-x \right \|^2 - \frac{1}{2}\left \| u-x^{k+1} \right \|^2 - \frac{1}{2}\left \| x^{k+1}-x \right \|^2 $$
# + [markdown] id="d66888d6-311c-4a41-9475-0527db8fc21b"
# Due to the Lipschitz continuity:
# $$ f(x_{k+1}) \leq f(u) + \left \langle -\bigtriangledown f(u), u-x^{k+1} \right \rangle + \frac{1}{2}\left \| u-x_{k+1} \right \|^2 $$
# + [markdown] id="116cc4b9-d126-46bb-bd15-3baaa7308800"
# Adding the two: $$ f(x_{k+1}) + h(x_{k+1}) \leq f(u) + h(x) + \left \langle \bigtriangledown f(u), u-x \right \rangle - \frac{1}{2}\left \| x_{k+1}-x \right \|^2 + \frac{1}{2}\left \| u-x \right \|^2 $$
# + [markdown] id="2302fd3a-bba6-4241-8c1a-f10381609f13"
# Using definition for $\ell_f(u;x_k)$ $$\ell_f(u;x_k)+h(u) + \frac{1}{2}||u-x_k||^2 \geq \ell_f(x_{k+1};x_k)+h(x_{k+1})+\frac{1}{2}||x_{k+1}-x_k||^2 + \frac{1}{2}||u-x_{k+1}||^2$$
# + [markdown] id="24a600b8-008d-4a1a-b005-e436900f8335"
# Similarly for any x in int(dom(f)):
# $$ f(x_*) \leq f(x) + \left \langle \bigtriangledown f(x), x_*-x \right \rangle + \frac{1}{2\lambda}\left \| x_*-x \right \|^2 $$
# It holds that $$ (f+h)(x)-(f+h)(x_*) \geq \frac{1}{2\lambda}\left \| x-x_* \right \|^2 $$
# + [markdown] id="41e01e79-6cbb-4f61-9df6-949babd05280"
# Consider $$ g(u) = f(x_{k+1})+\left \langle \bigtriangledown f(x_{k+1}), u-x_{k+1} \right \rangle + g(u)+\frac{1}{2\lambda}\left \| u-x_{k+1} \right \|^2$$
# + [markdown] id="8e211e92-4cc8-4015-9937-af3a110db694"
# $ x_* = argmin_g(u) $
# $$ g(x)-g(x_*) \geq \frac{1}{2\lambda}\left \| x-x_* \right \|^2 $$
# + [markdown] id="781a3bd4-af3a-4727-8a59-60b5293e1510"
# Since $$ g(x_*) = f(x_{k+1}) + \left \langle \bigtriangledown f(x_{k+1}),x_*-x_{k+1} \right \rangle + \frac{1}{2\lambda}\left \| x_*-x_{k+1} \right \|^2 + h(x_*) $$
# $$ \geq f(x_*)+h(x_*) = (f+h)(x_*) $$
# + [markdown] id="a80aa15d-64a9-45aa-9d00-5ddeea064a85"
# This implies that $$ h(x_{k+1})-(f+h)(x_*) \geq \frac{1}{2\lambda}\left \| x_{k+1}-x_*\right \|^2 $$
# + [markdown] id="ad5dedcf-d250-4469-86c3-7643261551d8"
# Plugging for g(u) into above inequality
# + [markdown] id="0341f4ba-8870-44f7-a3f4-7ee423a6df3c"
# $$ f(x_{k+1}) + \left \langle \bigtriangledown f(x_{k+1}), x-x_{k+1} \right \rangle + h(x)+\frac{1}{2\lambda}\left \| x-x_{k+1} \right \|^2 -(f+h)(x_*) \geq \frac{1}{2\lambda}\left \| x-x_* \right \|^2$$
# + [markdown] id="5e7f0187-bed1-4d85-9554-67ca092febee"
# Which is equal to
# + [markdown] id="39a3c1db-6596-430a-a9e0-9aa7e67a8374"
# $$ (f+h)(x_{k+1})-(f+h)(x_*) \geq \frac{1}{2\lambda}\left \| x_{k+1}-x_* \right \|^2 -\frac{1}{2\lambda}\left \| x-x_{k+1} \right \|^2 +f(x_{k+1}) + \ell_f(x;x_{k+1}) $$
# + [markdown] id="211c987c-c7d1-451f-ac95-429e9bbd5faf"
# $$(f+h)(x_*)+\frac{1}{2\lambda}||x_k-x_*||^2 \geq (f+h)(x_{k+1})+h(x_{k+1})+ \frac{1}{2\lambda}||x_{k+1}-x_*||^2$$
# + [markdown] id="4fbb94de-a28d-4c2d-93a8-663e2d339aed"
# Using $$ \frac{1}{2\lambda}((f+h)(x_*)-(f+h)(x_{k+1})) \geq \left \| x_*-x_{k+1} \right \|^2 -\left \| x_*-x_k \right \|^2 + \frac{1}{2\lambda}\ell_f(x_*,x_k)$$
# + [markdown] id="5c4ca187-104f-41e3-888e-438ac4cbf4e5"
# $$ \frac{1}{2\lambda}((f+h)(x_*)-(f+h)(x_{k+1})) \geq \left \| x_*-x_{k+1} \right \|^2 -\left \| x_*-x_k \right \|^2 $$
# + [markdown] id="7e3b7af4-d91f-4f66-9aec-00a85029701a"
# Sum over all n from 0 to k to obtain:
# $$
# \frac{1}{2\lambda}\sum_{}^{k}(f+h)(x_*)-(f+h)(x_{k+1}) \geq \left \| x_*-x_k \right \|^2-\left \| x_*-x_0 \right \|^2
# $$
# + [markdown] id="78b72791-9036-4108-97b5-897f616e4b53"
# Thus
# $$
# \sum_{}^{k}((f+h)(x_{k+1})-(f+h)(x_*)) \leq \frac{1}{2\lambda}\left \| x_*-x_0 \right \|^2-\frac{1}{2\lambda}\left \| x_*-x_k \right \|^2 \leq \frac{1}{2\lambda}\left \| x_*-x_0 \right \|^2
# $$
# + [markdown] id="0cc66cdb-65db-4573-a413-f6f2b91360ae"
# Given the monotonicity of $(f+h)(x_n)$ for $n \geq 0$
# $$
# k((f+h)(x_k)-(f+h)(x_*)) \leq \sum_{}^{k}((f+h)(x_{k+1})-(f+h)(x_*)) \leq \frac{1}{2\lambda}\left \| x_*-x_0 \right \|^2
# $$
# + [markdown] id="792eb5b5-127a-419d-a5fd-ca899ba32a47"
# Thus $$\sum_{i=1}^k (f+h)(x_i)-k(f+h)(x_*) \leq \frac{||x_0-x_*||^2}{2\lambda} $$
#
# + [markdown] id="c6fc25c9-e701-4637-95db-3c110d6ccdf9"
# Proving PGM has the descent property:
# + [markdown] id="badee0ff-5717-4481-8845-b1573b14714c"
# $$(f+h)(x_k) \geq (f+h)(x_{k+1}), \forall k \geq 0 $$
# + [markdown] id="6f2db67c-3ea8-450f-b7c9-0df81a070c5f"
# $$ \frac{1}{2\lambda}((f+h)(x_*)-(f+h)(x_{k+1})) \geq \left \| x_*-x_{k+1} \right \|^2 -\left \| x_*-x_k \right \|^2 + \frac{1}{2\lambda}\ell_f(x_*,x_k)$$
# + [markdown] id="c2c46548-946a-45c5-8bc2-8eab51b3e9ea"
# Along with the relationship: $$ \left \| x_{k+1} -x_*\right \| \leq \left \| x_k-x_* \right \|$$
# + [markdown] id="8eb9912d-dbc6-4276-9e59-6605c4a03821"
# It follows that: $$ (f+h)(x_*)-(f+h)(x_{k+1}) \leq (f+h)(x_*)-(f+h)(x_{k}) \leq 0$$
# + [markdown] id="98adda48-0838-49d7-b51e-3cdb822f68ab"
# Thus for all k $\geq 0$ $$(f+h)(x_{k+1}) \leq (f+h)(x_{k}) $$
# + [markdown] id="9707a214-b66b-4ea9-b226-ded5586e69dc"
# Finally, given the above:
# $$
# k((f+h)(x_k)-(f+h)(x_*)) \leq \sum_{}^{k}((f+h)(x_{k+1})-(f+h)(x_*)) \leq \frac{1}{2\lambda}\left \| x_0-x_* \right \|^2
# $$
# Consequently
# $$ (f+h)(x_i)-(f+h)(x_*) \leq \frac{1}{k2\lambda}||x_0-x_*||^2 $$
# + [markdown] id="666f32f5-5c41-432a-82c1-1afc0115c5f3"
# Hence we obtain the $O(\frac{1}{k})$ convergence rate
# + [markdown] id="477d0b2b-bd4c-4d7d-a5d7-ec31c8947da9"
# __________________________________
# + [markdown] id="313b0570-b151-4f47-b87a-8b535ee64fa9"
# ### Code Example
# + [markdown] id="e0f3872d-5627-46f2-a45d-02b403b64934"
# Here we will employ proximal gradient descent with stochastic schemes. In general, when the loss function we are trying to minimize can be wwritten in the form $\sum_{i=1}^{m}g_i(\theta )$ where each $g_i(\theta)$ is the loss sample at i, and the training time is long, then stochastic schemes should be considered. We will optimize
# $$f(\theta) = \underset{\theta \in \mathbb{R}^d}{min}\frac{1}{m}\sum_{i=1}^{m}\left [ log(1+exp(x_i\theta)) -y_ix_i\theta \right ] + \lambda\left \| \theta \right \|_1$$
# We decompose $f(\theta)$ into a convex and differentiable function g and a convex but not differentiable function h:
# $$ g(\theta) = \frac{1}{m}\sum_{i=1}^{m}log(1+exp(x_i\theta)) $$
# $$ h(\theta) = \frac{1}{m}\sum_{i=1}^{m} -y_ix_i\theta + \lambda\left \| \theta \right \|_1$$
# + [markdown] id="00ab09d6-a064-4591-af17-0a4146fbc788"
# The data we are using is from the classic MNIST machine learning dataset. There are two classes, 0 and 1, and we have a total of 14,780 images; a training set of 12,665 and a test set of 2,115. Each image is 28x28. Each image is vectorized and stacked to form a training and test matrix, with the label appended to the last column of each matrix. Thus, our classifier will learn $\theta$ on the train set to predict the labels for the test set.
# + id="e5454956-7545-4c6c-9962-b3149e3c1ed2"
import numpy as np
from sklearn.metrics import accuracy_score
# + id="e097ae6c-0bc8-4c4d-90b2-2baf5f04056d"
x_train = train[:, :-1]
y_train = train[:, -1 :]
x_test = test[:, :-1]
y_test = test[:, -1 :]
x = x_train
y = y_train
# + id="f0b74e42-ffee-4734-aee6-573c068a6667"
def predict_labels(X, weights):
return 1/(1+np.exp(-X.dot(weights)))
def soft_threshold(x,t):
pos = np.maximum(x - t, 0)
neg = np.minimum(x + t, 0)
return pos+neg
def log_loss(X, theta):
return np.sum(np.log(1 + np.exp(X.dot(theta)))) / X.shape[0]
def h(X, y, lam=10, lr=0.01):
return (1/len(X))*(-y.T.dot(X)) + lam*lr
def evaluate_gradient(X, theta, y=None):
return np.sum((X*np.exp(X.dot(theta))) / (1 + np.exp(X.dot(theta))), axis=0)/m
# + id="64828a46-6112-4425-b1f7-da66be8c5d3c"
n = 100
lam = 10
lr= 0.01
max_iters=1000
tol= 1e-3
N, D = x.shape
theta_current = np.zeros(shape=(D, 1))
losses = [log_loss(x, theta_current)]
thetas = [theta_current]
iterations = 1
while (loss > tol) or (iterations > max_iters):
theta_current = thetas[-1]
# Stochastic
number_of_rows = x.shape[0]
random_indices = np.random.choice(number_of_rows, size=n, replace=False)
x_temp, y_temp = x[random_indices, :], y[random_indices, :]
for it in range(n):
# Proximal GD
grad = evaluate_gradient(x_temp, theta_current).reshape(-1,1)
theta_new_grad = theta_current - (lr * grad)
theta_new = soft_threshold(theta_new_grad, h(x_temp, y_temp))
theta_current = theta_new
loss = log_loss(x, theta_current)
losses.append(loss)
thetas.append(theta_current)
iterations += 1
# + id="d4d3f55b-e024-4244-bba3-6d23fde05a37"
# Non-stochastic approach
n = 100
lam = 10
lr= 0.01
max_iters=1000
tol= 1e-5
N, D = x.shape
theta_current = np.zeros(shape=(D, 1))
loss_1 = log_loss(x, theta_current)
losses = [loss_1]
thetas = [theta_current]
iterations = 1
#while losses[-1] > tol:
for i in range(200):
theta_current = thetas[-1]
grad = evaluate_gradient(x, theta_current).reshape(-1,1)
theta_new_grad = theta_current - (lr * grad)
theta_new = soft_threshold(theta_new_grad, h(x, y).T)
theta_current = theta_new
loss = log_loss(x, theta_current)
losses.append(loss)
thetas.append(theta_current)
#iterations += 1
# + id="6131719d-2da1-465e-a706-8d0a5f86fbb6"
predict_labels(x, thetas[-1])
accuracy_score(y_test, predict_labels(x_test, thetas[-1]))
# + [markdown] id="c6fa4563-bd4d-4364-b874-854fad11dca0"
# Overall, this stochastic implementation achieves an accuracy of 93.76 on the training set.
|
_notebooks/2020-08-05-PGD.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pakiety
import pandas as pd
import numpy as np
import sklearn
from sklearn.datasets import load_boston
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# # Boston Housing Dataset
# +
np.random.seed(123)
boston_dict = load_boston()
print(boston_dict.keys())
boston=pd.DataFrame(boston_dict.data)
boston.columns=boston_dict.feature_names
print(boston.head())
X = boston
Y = pd.DataFrame(boston_dict.target)
# -
boston.info()
# nie ma braków, tylko dane numeryczne
boston.describe()
# +
plt.plot(boston.RM, 'o')
plt.title('Zmienna RM (liczba pokoi) w kolejnych rekordach')
plt.xlabel('rekordy')
plt.ylabel('RM')
# pamiętamy o tytułach wykresu i osi
# co możemy wywnioskować?
# -
plot_dens=sns.distplot(boston.RM, hist = True, kde = True,
kde_kws = {'shade': True, 'linewidth': 3})
plot_dens.set_title('Rozkład zmiennej RM')
# +
fig1, ax1 = plt.subplots()
ax1.set_title('Zmienna RM')
ax1.boxplot(boston.RM, vert=False)
print('mediana = %s' % np.median(boston.RM))
print('średnia = %s' % np.mean(boston.RM))
print('Q1 = %s' %np.percentile(boston.RM, 25),'Q3 = %s' %np.percentile(boston.RM, 75))
# może funkcja?
# +
sns.pairplot(boston.iloc[:,[0,5,10]], size=2)
plt.tight_layout()
#CRIM - współczynnik przestępczości
#PTRATIO- stosunek liczby uczniów do liczby nauczycieli
# tak naprawdę wystarczyłoby narysowanie tylko części wykresów
# czy jest koleracja zmiennych?
# -
corr=boston.iloc[:,np.r_[0:7,10]].corr()
#plt.matshow(corr)
#plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
#plt.yticks(range(len(corr.columns)), corr.columns)
ax=sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns, annot=True)
# below is a workaround for matrix truncation
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
ax.set_title('Korelacja zmiennych')
plt.show()
sns.lmplot(x='LSTAT', y='RM', data=boston)
#fit_reg=False, # No regression line
#dodaje automatycznie prostą regresji
#ekstrakcja informacji
criminal=boston[boston['CRIM']>50]
criminal
# # Zbiór danych nt. butów męskich
# +
data = pd.read_csv('menshoes.csv')
data.head()
# zbiór wymagający czyszczenia
# -
data.info()
# dużo braków - kolumny gdzie są tylko braki usuwamy
#a co jeżeli wartości są w mniej niż 10% rekordów? - na następnych zajęciach to omówmimy
# są zmienne kategoryczne (object) i numeryczne
data.brand.value_counts() #taki zapis jest tożsamy z data['brand']
# w przyszłości należy zrobić normalizację
Nike_Puma=data[data.brand.isin(['Puma', 'Nike'])]
sns.violinplot(Nike_Puma.brand, Nike_Puma.prices_amountmin)
popular_brand=data[data.brand.isin(data.brand.value_counts().index[:5])]
mean_price_popular_brand=popular_brand.groupby('brand').prices_amountmin.mean()
mean_price_popular_brand.plot(kind='bar', title='Średnia cena')
#plt.bar(mean_price_popular_brand.index, mean_price_popular_brand)
# +
# kiedy stosować pie chart?
# -
# Ciekawa strona z przykładami wizualizacji (wraz z kodem):
# https://www.machinelearningplus.com/plots/top-50-matplotlib-visualizations-the-master-plots-python/
# # Zadanie
# Zadanie nie jest na ocenę, ma na celu tylko sprawdzenie jaki jest dominujący język/biblioteki w grupie i zobaczyć jak sobie radzicie z danymi
#
# Pobierz zbiór danych ze strony:
# https://www.mldata.io/dataset-details/abalone/
# - Przeprowadź EDA dowolnej kolumny numerycznej w wybranym przez siebie języku i bibliotekach:
# - Rozkład zmiennej
# - Zależności między wybraną zmienną a innymi
# - Krótki opis tego co wyszło
#
# Rozwiązanie (Jupyter Notebook/Rmd + HTML) należy wrzucić jako pull request na repo https://github.com/mini-pw/2020L-WUM do folderu Laboratoria/Grupa1/NazwiskoImie/
#
# Nawet jeżeli ktoś zrobi bardzo mało - proszę wrzucić cokolwiek - w taki sposób będzie sprawdzona obecność na zajęciach.
# # Transformacje danych
data['prices_amountmin'].hist(bins=50)
plt.title('rozkład ceny butów')
#widoczny jest długi ogon
np.percentile(data.prices_amountmin,99)
price_after_log=np.log1p(data.prices_amountmin)
plt.hist(price_after_log, bins=50)
plt.title('rozkład logarytmu ceny butów')
plt.show()
inv_transform=np.expm1(price_after_log)
plt.hist(inv_transform, bins=50)
plt.title('rozkład ceny butów')
plt.show()
# +
# Normalizacja zmiennych kategorycznych
# +
# Duża liczba zmiennych kategorycznych unikalnych
# +
# Zamiana zmiennych kategorycznych na inne kodowanie
# +
# Braki w danych
# -
# # Selekcja zmiennych
corr=boston.iloc[:,np.r_[0:7,10]].corr()
#plt.matshow(corr)
#plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
#plt.yticks(range(len(corr.columns)), corr.columns)
ax=sns.heatmap(corr,
xticklabels=corr.columns,
yticklabels=corr.columns, annot=True)
# below is a workaround for matrix truncation
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
ax.set_title('Korelacja zmiennych')
plt.show()
from scipy.stats import chi2_contingency
# H0: Zmienne są niezależne (odrzucamy gdy p_value<alfa)
for i in boston.columns:
d=np.hstack([Y.values,boston[i].values.reshape(-1,1)])
p_value=chi2_contingency(d)[1]
if p_value<0.05:
print('%s - ISTOTNA' %i)
else:
print ('%s - NIEISTOTNA' %i)
|
Materialy/Grupa1/Lab1/WUM1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: test_py_env
# language: python
# name: test_py_env
# ---
# # Consistent Bayes: Some Motivating Examples
# ---
#
# Copyright 2017-2018 <NAME>
#
# ### Import Libraries
# _tested with python 3.6 on 02/11/18_
# Mathematics and Plotting
from HelperFuns import * # pyplot wrapper functions useful for visualizations, numpy, scipy, etc.
# %matplotlib inline
plt.rcParams.update({'font.size': 14})
plt.rcParams['figure.figsize'] = 10, 5
from cbayes import sample, solve, distributions
# Interactivity
from ipywidgets import *
# ---
# ## Formulating the Inverse Problem
# ---
# ### Prior Information/Assumptions
#
# * We assume that the true value $\lambda_0$ belongs to a parameter space $\Lambda$.
#
#
# * Much like in the classical statistical Bayesian framework, we begin by encapsulating our pre-existing beliefs about our parameters in a distribution in a prior distribution on $\Lambda$, $\pi^{prior}_\Lambda$
# ### The Observed Density
#
# * The observed density represents the uncertainty in an observation of a measurable quantity of interest map that takes input parameters and produces a vector in $\mathbb{R}^d$.
#
#
# * While there are problem scenarios you can posit where the observed density corresponds to a normalized likelihood function from the statistical Bayesian approach, the quantity of interest may not necessarily just be the uncertainty in the measurement data.
#
#
# * If the quantity of interest is indeed a single direct measurement, then the likelihood is the observed. For example, for some true parameter $\lambda_0$ and model $u(\lambda, t)$, suppose your quantity of interest is defined as a single evaluation at some time $t_0$. The measurement uncertainty contained in that one measurement would constitute your observed density.
#
#
# * However, if we have a collection of observations, such as at $t_0, t_2, \dots, t_K$, each of which is polluted by normally distributed noise, a common thing to do from Bayesian and Frequentist statistics would be to minimize the mean-or sum-squared error. If the sum squared error (SSE) is what we treat as our quantity of interest, the observed density on $\mathcal{D}$, denoted by $\pi^{obs}_{\mathcal{D}}(d)$, would be given by a $\chi^2_{K+1}$ distribution.
#
#
# ### The Posterior Density
#
# * Let $\pi^{O(prior)}_{\mathcal{D}}(d)$ denote the push-forward of the prior density onto $\mathcal{D}$. Then, the posterior density on $\Lambda$ is given by
#
# $$
# \pi^{post}_\Lambda(\lambda) := \pi^{prior}_\Lambda(\lambda)\frac{\pi^{obs}_{\mathcal{D}}(Q(\lambda))}{\pi^{O(prior)}_{\mathcal{D}}(Q(\lambda))}
# $$
# ---
# ## Define your Map
# _ Choose from one of the following example options, feel free to add your own _
#
# $O_1(\lambda) = \sum_{i=1}^n \lambda_i$
#
# $O_2(\lambda) = \lbrace \lambda_0,\;\; \lambda_0 + \lambda_1 \rbrace$
#
# $O_3(\lambda) = \lbrace \lambda_0,\;\; \lambda_0 - \lambda_1, \; \;\lambda_1^2 \rbrace$
#
# $O_4(\lambda) = (1-x)^2 + (y - x^2)^2$ ( This is the [Rosenbrock Function](https://en.wikipedia.org/wiki/Rosenbrock_function) with $a=1$ and $b=100$. )
# +
PtO_fun_choice = 4
def fun1(lam): # sum all params
return np.sum(lam,axis=1)
def fun2(lam): # pull two params, linear combination.
return np.array([ lam[:,0] ,lam[:,0]+lam[:,1]]).transpose()
def fun3(lam): # pull two params, linear combination.
return np.array([ lam[:,0] ,lam[:,0]-lam[:,1], lam[:,1]**2 ]).transpose()
def rosenbrock(lam):
return (1.-lam[:,0])**2 + 100*(lam[:,1]-lam[:,0]**2.)**2
if PtO_fun_choice == 1:
PtO_fun = fun1
elif PtO_fun_choice == 2:
PtO_fun = fun2
elif PtO_fun_choice == 3:
PtO_fun = fun3
elif PtO_fun_choice == 4:
PtO_fun = rosenbrock
else:
raise( ValueError('Specify Proper PtO choice!') )
# -
# ---
#
# # Sample from $\Lambda$
# _Here we implement uniform random priors on the unit hypercube_
# +
input_dim = 2 # Specify input space dimension (n)
num_samples = int(1E3) # number of input samples (N)
s_set = sample.sample_set(size=(num_samples, input_dim))
if PtO_fun_choice == 1:
s_set.set_dist('normal', {'loc': 0, 'scale': 1})
elif PtO_fun_choice == 2:
s_set.set_dist('normal', {'loc': -1, 'scale': 2})
elif PtO_fun_choice == 3:
s_set.set_dist('normal', {'loc': 0, 'scale': 1})
elif PtO_fun_choice == 4: # rosenbrock
s_set.set_dist('uniform', {'loc': [-1, -1], 'scale': [2, 1]})
s_set.generate_samples()
lam = s_set.samples # create a pointer for ease of reference later with plotting.
# -
# ### Visualize Prior
widgets.interactive(pltdata, data = fixed(lam), inds = fixed(None),
N = widgets.IntSlider(value=500, min = 100, max=5000, step=100, continuous_update=False),
eta_r = fixed(None), space=fixed(0.05), svd=widgets.Checkbox(value=False), color=widgets.Text(value="orange"),
view_dim_1 = widgets.IntSlider(value=0, min=0, max=input_dim-1, step=1, continuous_update=False),
view_dim_2 = widgets.IntSlider(value=input_dim-1, min=0, max=input_dim-1, step=1, continuous_update=False))
# ---
# # Compute Data Space $O(\Lambda) = \mathcal{D}$
#
# Format: `(n_dims, n_samples)`
# +
p_set = sample.map_samples_and_create_problem(s_set, PtO_fun)
D = p_set.output.samples
# This is how we handle trying to infer the dimension based on what the map put out.
# You can delete this once you are certain your model is correctly defined.
try:
output_dim = D.shape[1] # if your function was coded correctly, you should have an (n, d) data space.
except IndexError:
print(Warning("Warning: Your map might be returning the wrong dimensional data."))
try:
output_dim = D.shape[0]
except IndexError:
print(Warning("Warning: Guessing it's 1-dimensional."))
output_dim = 1
print('dimensions : lambda = '+str(lam.shape)+' D = '+str(D.shape) )
# -
# # Compute Push-Forward of the Prior $P_{O(\Lambda)}$
# _ ... i.e. Characterize the Data Space_
# Interactive Marginal Visualization
p_set.compute_pushforward_dist()
pf_dist = p_set.pushforward_dist
widgets.interactive(pltdata, data = fixed(pf_dist), inds = fixed(None),
N = widgets.IntSlider(value=500, min = 100, max=5000, step=100, continuous_update=False),
eta_r = fixed(None), space=fixed(0.05), svd=fixed(False), color=widgets.Text(value="brown"),
view_dim_1 = widgets.IntSlider(value=0, min=0, max=output_dim-1, step=1, continuous_update=False),
view_dim_2 = widgets.IntSlider(value=output_dim-1, min=0, max=output_dim-1, step=1, continuous_update=False))
# # Define Observed Probability Measure $P_\mathcal{D}$
# +
if PtO_fun_choice == 4:
p_set.set_observed_dist('normal', {'loc': 100, 'scale': 12}) # FOR ROSENBROCK
elif PtO_fun_choice == 3:
# p_set.set_observed_dist('normal', {'loc':[0, 0, 0], 'scale':[0.5, 0.25, 1]}) # better for function choice = 2
p_set.set_observed_dist('uniform', {'loc':[-0.5, -0.5, -0.5], 'scale':[1, 1, 1]}) # better for function choice = 2
elif PtO_fun_choice == 2:
p_set.set_observed_dist('normal', {'loc':[0, 0], 'scale':[1, 1]}) # default is normal based on the data space # for function choice = 1
elif PtO_fun_choice == 1:
p_set.set_observed_dist('uni', {'loc':0, 'scale':0.3}) # default is normal based on the data space # for function choice = 1
obs_dist = p_set.observed_dist # this is define a pointer for ease of reference.
widgets.interactive(pltdata, data = fixed(obs_dist), inds = fixed(None),
N = widgets.IntSlider(value=500, min = 100, max=5000, step=100, continuous_update=False),
eta_r = fixed(None), space=fixed(0.05), svd=fixed(False), color=widgets.Text(value="wine"),
view_dim_1 = widgets.IntSlider(value=0, min=0, max=output_dim-1, step=1, continuous_update=False),
view_dim_2 = widgets.IntSlider(value=output_dim-1, min=0, max=output_dim-1, step=1, continuous_update=False))
# -
# ---
#
# At this point we have performed the computations we need to. We have evaluated the input points through our map and performed a KDE on them. It would be useful at this point to save this object and/or its evaluation at every point in the data space for later re-use. Doing so here would be an appropriate place.
# ---
#
# # Accept/Reject Sampling of Posterior
#
# Since we have already used the samples in our prior to compute the pushforward density, we can re-use these with an accept/reject algorithm to get a set of samples generated from the posterior according to the solution of the stochastic inverse problem as outlined in the Consistent Bayes formulation.
p_set.set_ratio()
eta_r = p_set.ratio
solve.problem(p_set)
accept_inds = p_set.accept_inds
lam_accept = p_set.input.samples[accept_inds,:]
num_accept = len(accept_inds)
print('Number accepted: %d = %2.2f%%'%(num_accept, 100*np.float(num_accept)/num_samples))
# ## Visualize Posterior Density
# ### (Visualize Accept/Reject Samples)
#
# +
widgets.interactive(pltdata, data = fixed(lam), inds = fixed(accept_inds),
N = widgets.IntSlider(value=num_accept/2, min = 2, max=num_accept, step=1, continuous_update=False),
eta_r = fixed(None), space=fixed(0.05), svd=widgets.Checkbox(value=False), color=widgets.Text(value="orange"),
view_dim_1 = widgets.IntSlider(value=0, min=0, max=input_dim-1, step=1, continuous_update=False),
view_dim_2 = widgets.IntSlider(value=input_dim-1, min=0, max=input_dim-1, step=1, continuous_update=False))
# You will visualize the accepted samples in a subset of size N of the input samples.
# This is mostly for faster plotting, but also so you can see the progression of accepted sampling in the algorithm.
# -
# ---
# # Now what?
#
# Well, we can...
#
# ## _Visualize the Quality of our SIP Solution by Comparing it to the Observed_
# _We compare the push-forward of the posterior using accepted samples against the observed density_
# _(SIP = Stochastic Inverse Problem)_
# ### Observed:
widgets.interactive(pltdata, data = fixed(obs_dist), inds = fixed(None),
N = widgets.IntSlider(value=500, min = 100, max=5000, step=100, continuous_update=False),
eta_r = fixed(None), space=fixed(0.05), svd=fixed(False), color=widgets.Text(value="wine"),
view_dim_1 = widgets.IntSlider(value=0, min=0, max=output_dim-1, step=1, continuous_update=False),
view_dim_2 = widgets.IntSlider(value=output_dim-1, min=0, max=output_dim-1, step=1, continuous_update=False))
# ## Pushforward of Posterior:
widgets.interactive(pltdata, data = fixed(D), inds = fixed(accept_inds),
N = widgets.IntSlider(value=num_accept/2, min = 2, max=num_accept-1, step=1, continuous_update=False),
eta_r = fixed(None), space=fixed(0.05), svd=fixed(False), color=widgets.Text(value="eggplant"),
view_dim_1 = widgets.IntSlider(value=0, min=0, max=output_dim-1, step=1, continuous_update=False),
view_dim_2 = widgets.IntSlider(value=output_dim-1, min=0, max=output_dim-1, step=1, continuous_update=False))
|
examples/CBayes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# + deletable=true editable=true
library(tidyverse)
library(ggplot2)
library(vegan)
# + [markdown] deletable=true editable=true
# ## Metadata
# + deletable=true editable=true
sra.md = read.delim("metadata/sra2name.tab")
#head(sra.md)
# + deletable=true editable=true
edw.md = read.delim("edwards-data/greenhouse_metadata.txt")
#head(edw.md)
# + deletable=true editable=true
# Join and match metadata
metadata = sra.md %>%
mutate(name=gsub('_', '.', name)) %>%
inner_join(edw.md, by=c("name"="SampleID")) %>%
arrange(runid) %>%
mutate(Compartment=factor(Compartment, levels=c("Bulk Soil", "Rhizosphere", "Rhizoplane", "Endosphere")))
head(metadata)
# + [markdown] deletable=true editable=true
# ## kWIP
# + deletable=true editable=true
kwip = read.delim("kwip/greenhouse_wip.dist", row.names=1) %>%
as.matrix()
# + deletable=true editable=true
kwip.sras = rownames(kwip)
kwip.names = metadata$name[match(kwip.sras, metadata$runid)]
rownames(kwip) = colnames(kwip) = kwip.names
# + deletable=true editable=true
kwip.mds = cmdscale(as.dist(kwip), eig=T)
kwip.var = (kwip.mds$eig / sum(kwip.mds$eig))[1:2]
round(kwip.var * 100, 2)
# + [markdown] deletable=true editable=true
# ## Weighted UF
# + deletable=true editable=true
wuf = read.delim("edwards-data//weighted.gh.unifrac", row.names=1, sep=" ") %>% as.matrix()
# + deletable=true editable=true
all(rownames(wuf) == rownames(kwip))
# + deletable=true editable=true
wuf.mds = cmdscale(as.dist(wuf),eig=T)
wuf.var = (wuf.mds$eig / sum(wuf.mds$eig))[1:2]
round(wuf.var * 100, 2)
# + [markdown] deletable=true editable=true
# ## UUF
# + deletable=true editable=true
uuf = read.delim("edwards-data/unweighted.gh.unifrac", row.names=1, sep=" ") %>% as.matrix()
# + deletable=true editable=true
all(rownames(uuf) == rownames(kwip))
# + deletable=true editable=true
uuf.mds = cmdscale(as.dist(uuf),eig=T)
uuf.var = (uuf.mds$eig / sum(uuf.mds$eig))[1:2]
round(uuf.var * 100, 2)
# + [markdown] deletable=true editable=true
# # Plotting
#
# + deletable=true editable=true
edw.colours = c("#E41A1C", "#984EA3", "#4DAF4A", "#377EB8")
# + deletable=true editable=true
all(rownames(kwip.mds) == rownames(wuf.mds))
# + deletable=true editable=true
all(rownames(kwip.mds) == rownames(uuf.mds))
# + deletable=true editable=true
plot.dat = data.frame(
PC1.kwip = kwip.mds$points[,1],
PC2.kwip = kwip.mds$points[,2],
PC1.wuf = wuf.mds$points[,1],
PC2.wuf = wuf.mds$points[,2],
PC1.uuf = uuf.mds$points[,1],
PC2.uuf = uuf.mds$points[,2],
name = rownames(kwip.mds$points)
)
plot.dat = left_join(plot.dat, metadata)
# + deletable=true editable=true
# kWIP
p = ggplot(plot.dat, aes(x=PC1.kwip, y=PC2.kwip, colour=Compartment, shape=Site)) +
geom_point(alpha=0.75, size=2) +
scale_color_manual(values = edw.colours) +
xlab("PC1") +
ylab("PC2") +
theme_bw() +
ggtitle("kWIP") +
theme(panel.grid=element_blank())
svg("gh_kwip.svg", width=4, height = 3)
print(p)
dev.off()
print(p)
# + deletable=true editable=true
# WUF
p = ggplot(plot.dat, aes(x=PC1.wuf, y=PC2.wuf, colour=Compartment, shape=Site)) +
geom_point(alpha=0.75, size=2) +
scale_color_manual(values = edw.colours) +
xlab("PC1") +
ylab("PC2") +
theme_bw() +
ggtitle("Weighted UniFrac") +
theme(panel.grid=element_blank())
svg("gh_wuf.svg", width=4, height = 3)
print(p)
dev.off()
print(p)
# + deletable=true editable=true
# UUF
p = ggplot(plot.dat, aes(x=PC1.uuf, y=PC2.uuf, colour=Compartment, shape=Site)) +
geom_point(alpha=0.75, size=2) +
scale_color_manual(values = edw.colours) +
xlab("PC1") +
ylab("PC2") +
theme_bw() +
ggtitle("UniFrac") +
theme(panel.grid=element_blank())
svg("gh_uuf.svg", width=4, height = 3)
print(p)
dev.off()
print(p)
# + [markdown] deletable=true editable=true
# # Distance Matrix Correlation
#
#
# + deletable=true editable=true
distcor = function(a, b, method="spearman") {
a = as.matrix(a)
a = a[lower.tri(a)]
b = as.matrix(b)
b = b[lower.tri(b)]
cor(a, b, method=method)
}
# + deletable=true editable=true
distcor(kwip, uuf)
# + deletable=true editable=true
distcor(kwip, wuf)
# + deletable=true editable=true
distcor(uuf, wuf)
# + [markdown] deletable=true editable=true
# # Summary of results
#
# #### Axis pct contributions:
#
# |metric | PC1 | PC2 |
# |-------|-----|-----|
# |kWIP |22.6 |15.8 |
# |WUF |46.4 |11.5 |
# |UUF |18.1 |14.9 |
#
#
# #### Correlations between metrics:
#
# - kwip-> WUF: 0.88
# - kwip-> UUF: 0.90
# - WUF-> UUF: 0.73
# -
|
writeups/rice-meta/gh_paper_plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv_playground
# language: python
# name: venv_playground
# ---
# # **Aula 10 - Introdução a Redes Neurais**
#
# ## **TOC:**
# Na aula de hoje, vamos explorar os seguintes tópicos em Python:
#
# - 1) [Introdução](#intro)
# - 2) [Redes Neurais Artificiais](#rna)
# - 2.1) [Como a rede neural é treinada?](#how)
# - 3) [O exemplo clássico: MNIST](#MNIST)
# - 4) [Overfitting](#overfitting)
# - 5) [Outras arquiteturas de RNAs](#arch)
# - 6) [Quando usar Redes Neurais?](#when)
#
# ---
# ## 1) **Introdução** <a class="anchor" id="intro"></a>
#
# Seres humanos são muito bons em aprender! Isto se deve a nosso sistema nervoso, que pode ser entendido como uma grande rede de neurônios interligados.
#
# Um dos objetivos da Inteligência Artificial é o de construir sistemas inteligentes, com capacidade cognitiva similar à dos humanos.
#
# Assim sendo, faz sentido construirmos um modelo inspirado no sistema nervoso, não é mesmo?
#
# Assim nasceram as __Redes Neurais Artificiais (RNAs)__! A imagem a seguir ilustra a inspiração biológica para a construção deste modelo:
#
# <center><img src="https://www.ee.co.za/wp-content/uploads/2019/07/Application-of-machine-learning-algorithms-in-boiler-plant-root-cause-analysis-Fig-1.jpg" width="400"/></center>
#
# Vamos entender um pouco melhor como funcionam as RNAs?
#
#
# ---
# ## 2) **Redes neurais artificiais** <a class="anchor" id="rna"></a>
#
# Uma RNA é composta pelos seguintes elementos básicos:
#
# - **Unidades (ou neurônios)**: são as unidades mínimas de processamento da rede neural, onde as operações matemáticas são realizadas;
#
# - **Camadas**: há três tipos de camadas:
#
# - **Camada de entrada (input layer)**: é a camada de entrada de dados. O número de unidades nesta camada é igual ao número de features do modelo;
#
# - **Camadas ocultas (hidden layers)**: são as camadas de processamento. O número de camadas ocultas, bem como o número de neurônios em cada uma delas, é variável, dependendo do problema e dos dados. Em geral, a melhor estratégia é experimentar diferentes números de camadas e de neurônios;
#
# - **Camada de saída (output layer)**: é a camada que dá a resposta da rede neural, isto é, o valor predito por ela. O número de unidades nesta camada depende do tipo de output desejado, e é o que determina o target (variável dependente) do modelo.
#
#
# As camadas e neurônios são interligadas entre si através de conexões. A cada conexão, associa-se um **peso** (que denotamos pela letra **W**). O objetivo da RNA é **"aprender" os pesos que melhor se ajustam aos dados!**
#
# Após treinada, a rede neural faz previsões **passando os dados de input através de todos os nós**, conforme ilustrado a seguir:
#
#
# <center><img src="https://thumbs.gfycat.com/BouncyGleefulFeline-max-1mb.gif" width=500></center>
#
# ---
# ### 2.1) **Como a rede neural é treinada?** <a class="anchor" id="how"></a>
#
# A "aprendizagem" da Rede Neural a partir dos dados se dá através de duas etapas:
#
# - **Forward Propagation**;
# - **Backward Propagation**.
#
# No __forward propagation__, a informação propaga na direção habitual (de frente pra trás) na rede neural: features são lidas na camada de input, passam pelo processamento nas camadas ocultas, e a resposta (target) é predita na camada de output.
#
# Para que a predição seja realizada, os neurônios nas camadas ocultas realizam as seguintes duas etapas de cálculo:
#
# - Uma combinação linear entre o output (que denotamos pela letra **a**) da camada enterior e os pesos da camada atual. Isto é, se tivermos n ligações. a combinação linear é:
#
# $$z_j = \sum_{i=1}^{D} w_{ji}x_i + x_0$$
#
# - Aplica-se uma **função de ativação** não-linear à combinação linear acima. As principais funções de ativação utilizadas são:
#
# $$a_j = f(z_j)$$
#
# <br>
#
# <center><img src="https://cdn-images-1.medium.com/max/1000/1*4ZEDRpFuCIpUjNgjDdT2Lg.png
# " width="500" /></center>
#
# O cálculo realizado por um único neurônio é bem parecido com um **perceptron**, ilustrado a seguir:
#
#
# <center><img src="https://img2.gratispng.com/20180619/oav/kisspng-multilayer-perceptron-machine-learning-statistical-5b2996bdb9dcd2.4724873615294522217613.jpg" width="400" /></center>
#
#
# A escolha das funções de ativação também pode ser variável, mas costuma-se utilizar:
#
# - **ReLu** nas camadas ocultas;
# - **Sigmoid** (para problemas de classificação binários) ou **Softmax** (para probelams de classificação multiclasse) na camada de output.
#
#
# Ao fim do forward propagation, na camada de output, calculamos a **função de perda**, que quantifica qual a **diferença entre as predições feitas pela rede neural e os valores reais do target dos dados**. <font color="orange"><b>Cada tipo de problema tem uma função de perda própria.</b></font>
#
# Queremos que as predições sejam sempre o mais próximas o possível dos valores reais!
#
# Então, o que fazemos é **minimizar** a função de perda.
#
# Isto é feito ao propagarmos a informação na direção contrária (de trás pra frente) na rede neural, o que caracteriza o chamado __backward propagation__.
#
# Para minimizar a função de perda, utilizamos um **otimizador**, que são objetos que representam o procedimento matemático de minimização da função de perda.
#
# Os principais otimizadores utilizados são: __gradiente descendente (GD)__, **Adam** e **RMSProp** (vale a pena testar cada um deles!)
#
#
# Este processo de forward e backward propagation é feito iterativamente, várias vezes. Cada rodada é chamada de **epoch**.
#
# O objetivo do backward propagation é **determinar os pesos que miminizem a função de perda!** A cada epoch, os pesos são **atualizados**, de modo que a função de perda é sempre reduzida em direção ao seu mínimo!
#
# Para quem quiser saber mais, segue uma sugestão de leitura adicional: [Neural Networks Explained](https://medium.com/datadriveninvestor/neural-networks-explained-6e21c70d7818)
# _______
#
# Vamos agora ao nosso exemplo prático: construiremos nossa própria rede neural!
# ---
# ## 3) **O exemplo clássico: MNIST** <a class="anchor" id="MNIST"></a>
#
# Neste exemplo, usaremos o **[MNIST](https://www.kaggle.com/c/digit-recognizer/data)**, o famoso dataset de dígitos (números de 0 a 9) escritos à mão -- caso você queira saber mais, [clique aqui!](http://yann.lecun.com/exdb/mnist/)
#
#
# <center><img src="https://i2.wp.com/syncedreview.com/wp-content/uploads/2019/06/MNIST.png?fit=530%2C297&ssl=1" width=450></center>
#
# O objetivo do nosso modelo será o de **classificar digítos, com base em imagens**.
#
# Assim sendo, temos um **problema de classificação multiclasse** (pois os dados serão classificados em uma dentre 10 classes possíveis, de 0 a 9).
# +
# importe as bibliotecas que sempre usamos
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# -
# A base do MNIST é composta por imagens de números manuscritos.
#
# Cada imagem é uma **matriz 28 x 28**, contendo assim **784 pixels**.
#
# As imagens estão em escala de cinza, na qual cada pixel pode variar de **0 a 255**, e foram centralizadas, de forma que o número não fique "cortado" por estar na borda.
#
# - o primeiro da forma (a, 784), que são os 784 pixels da imagem organizados de forma sequencial;
# - o segundo da forma (a, 1), que é o identificador (label) da imagem, sendo um número que varia de 0 a 9.
#
# Para maiores detalhes, verificar a [página da base](http://yann.lecun.com/exdb/mnist/).
# + [markdown] tags=[]
# ### Pré-processamento dos dados
#
# __Rescalamento__
#
# É interessante que o os valores sejam rescalados **entre 0 e 1 para que o tempo de treinamento seja otimizado**.
#
# Algumas respostas sobre o porquê disso podem ser vistas [nesse link](https://stackoverflow.com/questions/4674623/why-do-we-have-to-normalize-the-input-for-an-artificial-neural-network).
#
# Então, fazemos a divisão correpondentes aos pixels das imagens pelo pixel de valor máximo do **conjunto de treino**.
#
# Obs: isso também poderia ser feito com o MinMaxScaler!
#
# Não vamos implementar para tornar o código mais simples.
# -
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
# import matplotlib.pyplot as plt
# device config
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
input_size = 784 # 28x28
# + tags=[]
# hyper parameters
hidden_size = 100
num_classes = 10
num_epochs = 2
batch_size = 100
learning_rate = 0.001
# MNIST dataset
# train_dataset = torchvision.datasets.MNIST(root="./data", train=True, transform=transforms.ToTensor(), download=True)
# test_dataset = torchvision.datasets.MNIST(root="./data", train=False, transform=transforms.ToTensor(), download=True)
train_dataset = torchvision.datasets.MNIST(root="./data", train=True, transform=transforms.ToTensor())
test_dataset = torchvision.datasets.MNIST(root="./data", train=False, transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, shuffle=True, batch_size=batch_size)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, shuffle=False, batch_size=batch_size)
# -
# O que está acontecendo acima?
#
# O treinamento de uma rede neural (e de todo modelo de machine learning, na verdade) consiste em um **problema de otimização**, em que queremos encontrar **o valor mínimo da função de perda**
#
# No nosso caso, a função de perda é a "categorical crossentropy", cuja expressão matemática é:
#
# $$ L(y, \hat{y}) = - \sum_i{y_i \log \hat{y}_i}$$
#
# onde $\hat{y}$, a previsão do modelo, é uma expressão bem complicada, que relaciona todos os pesos e funções de ativação de toda a rede neural.
#
# Como mencionamos, as redes neurais utilizam o procedimento de **back propagation** em seu treinamento (cujo objetivo é **determinar os pesos**):
#
# <center><img src="https://machinelearningknowledge.ai/wp-content/uploads/2019/10/Backpropagation.gif" width=300></center>
#
# Matematicamente, o back propagation é implementado através de algum **método de otimização**, com o fim de **determinar os pesos que minimizam a função de perda**. O principal método utilizado para este fim é o **gradiente descendente**:
#
# <center><img src="https://thumbs.gfycat.com/AngryInconsequentialDiplodocus-size_restricted.gif" width=500></center>
#
# ### Conjunto de Validação
#
# Em modelos de rede neural, é importante que exista um terceiro conjunto além de treino e teste, o **conjunto de validação**.
#
# Durante o treinamento, a depender do caso, **risco de ocorrer overfitting é bastante alto**.
#
# Uma forma de verificar e evitar isso é por meio do conjunto de validação, o qual é um subconjunto do conjunto de treino que não é utilizado pelo otimizador.
#
# Espera-se que a o valor da função de perda (loss) vá diminuindo a cada época, tanto para o conjunto de treino como para o conjunto de validação. Porém, se a função de perda diminui para o conjunto de treino e aumenta para o conjunto de validação (ocorrendo assim um descolamento), podemos concluir que está ocorrendo um overfitting para o conjunto de treino.
#
# Deve-se então interromper o processo de treinamento.
# + [markdown] tags=[]
# ### Arquitetura da Rede
#
# Faremos uma rede simples, com:
#
# - **3 camadas**;
# - **25 neurônios cada**;
# - **Camadas densas**;
# - **função de ativação ReLu nas camadas escondidas**;
# - **10 neurônios e ativação Softmax na camada de saída**.
#
# -
# Diferentes funções de perda possuem diferentes propósitos, devendo ser escolhido caso a caso. Já os otimizadores possuem maior liberdade de escolha, não existindo uma regra fechada.
# __Modelo treinado!__
#
# Agora, devemos **fazer predições** e **avaliar a performance**:
# ## 4) **Overfitting** <a class="anchor" id="overfitting"></a>
#
# O **overfitting** é algo que pode ser bastante comum em redes neurais se não for bem tratado, porque ele é um modelo altamente não linear!
#
# Relembrando, o overfitting está intimamente relacionado com o tradeoff viés-variância:
#
# <img src="https://www.learnopencv.com/wp-content/uploads/2017/02/Bias-Variance-Tradeoff-In-Machine-Learning-1.png" width=500>
#
# Podemos visualizar esta característica em nosso modelo ao **plotar** o valor da **função de perda** a cada epoch:
# + [markdown] tags=[]
# A curva acima é muito parecida com o nosso exemplo do tradeoff vié-variância, não é mesmo?!
#
# Temos, portanto, um forte indício que está começando a ocorrer overfitting!
#
# Para evitarmos isso, pode ser interessante que o **treinamento seja interrompido antes do overfitting começar a ocorrer**! Esta técnica é conhecida como **early stopping**.
# -
# ---
# ## 5) **Outras arquiteturas de RNAs** <a class="anchor" id="arch"></a>
#
# Os modelos de redes neurais formam uma classe enorme de modelos, de grande variedade e aplicabilidade!
#
# Além da rede neural simples que construímos (uma **rede neural densa (fully-connected)**), há muitas outras arquiteturas!
#
# A imagem a seguir ilustra esta enorme diversidade:
#
# <center><img src="https://miro.medium.com/max/4000/1*cuTSPlTq0a_327iTPJyD-Q.png" width=500></center>
#
# __Para saber um pouco mais dessas muitas arquiteturas, [clique aqui!](https://towardsdatascience.com/the-mostly-complete-chart-of-neural-networks-explained-3fb6f2367464)__
#
# Vamos mencionar duas arquiteturas muito especiais:
#
# ### Redes Neurais Convolucionais (CNN)
#
# Utilizam-se de **convoluções** nas camadas iniciais.
#
# <center><center><img src="https://miro.medium.com/max/2340/1*Fw-ehcNBR9byHtho-Rxbtw.gif" width=300></center>
#
# > Estas redes são especializadas para modelos que envolvam **imagens**, sendo, portanto, a principal escolha para modelos de **visão computacional**
#
# <center><img src="https://miro.medium.com/max/3288/1*uAeANQIOQPqWZnnuH-VEyw.jpeg" width=600></center>
#
# <center><img src="https://miro.medium.com/max/1200/1*XbuW8WuRrAY5pC4t-9DZAQ.jpeg" width=600></center>
#
# ### Redes Neurais Recorrentes (RNN)
#
# São redes neurais em que as conexões são feitas de maneira **sequencial e cíclica**
#
# > Estas redes são especializadas para modelos que envolvam **dados sequenciais**, como, por exemplo, **textos**, **áudios**, **filmes (sequência de quadros)**, **séries temporais**, etc.
#
# <center><img src="https://miro.medium.com/max/1200/1*chs1MCz2rCK4_dFRLnUEIg.png" width=300></center>
#
# Modelos de RNNs mais sofisticados se utilizam de **células de memória (ex.: LSTM, GRU)** para reter a informação de dados do início da sequência, e propagá-los em todo o modelo:
#
# <center><img src="https://www.researchgate.net/profile/Sergio_Guadarrama/publication/319770438/figure/fig1/AS:613925582303265@1523382673457/A-diagram-of-a-basic-RNN-cell-left-and-an-LSTM-memory-cell-right-used-in-this-paper.png" width=400></center>
#
#
#
# ---
#
# ## 6) **Quando usar Redes Neurais?** <a class="anchor" id="when"></a>
#
# No atual cenário de Big Data em que vivemos (há muitos dados em todo o lugar!), os modelos de Redes Neurais e Deep Learning são cada vez mais utilizados!
#
# Isto é possível porque a performance destes modelos aumenta conforme mais dados são utilizados, diferentemente dos modelos tradicionais, cuja performance é estabilizada depois de certa quantidade de dados!
#
#
# <center><img src="https://www.sumologic.com/wp-content/uploads/performances_vs_data.png" width="400" /></center>
#
#
# Então, é de se esperar que os modelos de Deep Learning funcionem melhor nos cenários em que há **muitos dados disponíveis**.
#
# No entanto, se houver tempo e recursos computacionais disponíveis, é sempre uma ideia construir também um modelo de Deep Learning juntamente de outros modelos de Machine Learning, e então comparar qual tem melhor performance! :)
#
#
# ### **Mas e o tal do "Deep Learning"?**
#
# De forma geral, uma rede neural é chamada de "profunda" (deep), se ela tiver mais de uma camada oculta. Neste caso, temos uma **"Rede Neural Profunda" (Deep Neural Network)**.
#
# <center><img src="https://thedatascientist.com/wp-content/uploads/2018/03/simple_neural_network_vs_deep_learning.jpg" width=500></center>
#
# <br>
#
# O termo "Deep Learning" é utilizado pra se referir a modelos que usam Redes Neurais Profundas, mas este termo engloba também outras **arquiteturas** de redes neurais mais especializadas e complexas, como CNNs e RNNs, que inevitavelmente são "profundas", por construção.
|
semana_10/intro_redes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RoseSarlake/spectral-image/blob/main/task1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="hkzPBtfkn2w8" outputId="1f0d2422-3314-411d-f301-59d2e0974f44"
from google.colab import drive
drive.mount("/content/drive", force_remount=True)
# + id="jYAiev8Y05rR"
import matplotlib.pyplot as plt # to make plots and show images
import numpy # numerical python for ND array
import pandas as pd
# + [markdown] id="-BLXm0U8wrCL"
# sample set 1
# + id="xlaeT7pVpAFI"
# sample 42: plastic green
path1 = '/content/drive/MyDrive/ASI/Lectures+Exercises/Lecture 2/csv files/42 plastic green.Sample.Raw.csv'
sam42 = pd.read_csv(path1)
# sample 43: another plastic green
path2 = '/content/drive/MyDrive/ASI/Lectures+Exercises/Lecture 2/csv files/43 plastic green.Sample.Raw.csv'
sam43 = pd.read_csv(path2)
#sample 32: plastic green spoon
path3= '/content/drive/MyDrive/ASI/Lectures+Exercises/Lecture 2/csv files/32 plastic spoon green.Sample.Raw.csv'
sam32 = pd.read_csv(path3)
# pick the wavelengths and reflectance
waves = sam32['nm']
re42 = sam42[' %R']
re43 = sam43[' %R']
re32 = sam32[' %R']
# + colab={"base_uri": "https://localhost:8080/", "height": 765} id="aUlmvBoGq2NA" outputId="e95ddbdf-3697-41a3-8864-146dc27a400b"
#plot setting
plt.rcParams['figure.dpi'] = 200
plt.yticks(range(0, 110, 10))
plt.xticks(range(200, 2500, 200))
plt.grid(b=True, linestyle='-')
plt.xlabel('xlabel', fontsize=10)
plt.ylabel('ylabel', fontsize=10)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.xlabel('Wavelength, nm')
plt.ylabel('Reflectance [0-100]%')
# plot the curves
plt.plot(waves, re42, 'r', label="sample 42")
plt.plot(waves, re43, 'g', label="sample 43")
plt.plot(waves, re32, 'b',label="sample 32")
plt.legend(loc="upper right")
# + id="2WR8vgDFyx7T"
# sample 18: plastic green
path4 = '/content/drive/MyDrive/ASI/Lectures+Exercises/Lecture 2/csv files/18 thin paper green.Sample.Raw.csv'
sam18 = pd.read_csv(path4)
# sample 19: another plastic green
path5 = '/content/drive/MyDrive/ASI/Lectures+Exercises/Lecture 2/csv files/19 thin paper green.Sample.Raw.csv'
sam19 = pd.read_csv(path5)
# pick the wavelengths and reflectance
waves = sam18['nm']
re18 = sam18[' %R']
re19 = sam19[' %R']
# + colab={"base_uri": "https://localhost:8080/", "height": 765} id="9A-2Rfp2zLMf" outputId="e9b18729-554a-4128-d3e9-989a1900afcd"
#plot setting
plt.rcParams['figure.dpi'] = 200
plt.yticks(range(0, 110, 10))
plt.xticks(range(200, 2500, 200))
plt.grid(b=True, linestyle='-')
plt.xlabel('xlabel', fontsize=10)
plt.ylabel('ylabel', fontsize=10)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.xlabel('Wavelength, nm')
plt.ylabel('Reflectance [0-100]%')
# plot the curves
plt.plot(waves, re18, 'r', label="sample 18")
plt.plot(waves, re19, 'g', label="sample 19")
plt.legend(loc="upper right")
# + id="0TxtEHotUJYq" colab={"base_uri": "https://localhost:8080/", "height": 758} outputId="9fe23220-1347-444d-8a13-b4d14df1f109"
plt. plot(waves,re19-re18, 'b', label="sample 19 - sample 18")
plt.legend(loc="upper right")
plt.yticks(range(0, 30, 10))
plt.xticks(range(200, 2500, 200))
plt.grid(b=True, linestyle='-')
plt.xlabel('xlabel', fontsize=10)
plt.ylabel('ylabel', fontsize=10)
plt.tick_params(axis='both', which='major', labelsize=10)
plt.xlabel('Wavelength, nm')
plt.ylabel('Reflectance difference [0-100]%')
plt.show()
|
task1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit ('venv')
# name: python385jvsc74a57bd07ccd5efa34661802a60805182d7d24511e124d1f180f619c03ce90313fb16373
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
'''
Raw Dataset을 로딩하는 과정
+ 전체 series 데이터로부터 필요한 부분만 segmentation하는 부분 포함
+ 1d를 2d로 reshape하는 과정 포함
- scale up 과정 미포함
'''
'''
Path Settings
'''
CONFIGURATION_FILE_PATH = "./data/train/data_config.csv"
DATASET_PATH = "./data/train/"
'''
Figure Settings
'''
pd.set_option('display.width', 200) # for display width
'''
1. Read Configurration File (only xls)
'''
data_config = pd.read_csv(CONFIGURATION_FILE_PATH, header=0, index_col=0)
print("Configuration Dataframe dimension: ", data_config.shape)
'''
2. Read all FSR matrix data and Seat sensor data
'''
fsr_dataframe = {}
seat_dataframe = {}
for idx in data_config.index:
fsr_filepath = DATASET_PATH+data_config.loc[idx, "fsr_matrix_1d_datafile"] # set FSR matrix data filepath
seat_filepath = DATASET_PATH+data_config.loc[idx, "seat_datafile"] # set Seat data filepath
print(idx, ") read data files : ", fsr_filepath, ",", seat_filepath)
fsr_dataframe[idx] = pd.read_csv(fsr_filepath, header=0, index_col=False).iloc[:,0:162] # read FSR matrix data file
seat_dataframe[idx] = pd.read_csv(seat_filepath, header=0, index_col=False) # read Seat data file
# clear unnecessary columns
del seat_dataframe[idx]['Measurement time'] # remove unnecessary column
del fsr_dataframe[idx]['Measurement Time (sec)'] # remove unnecessary column
'''
3. Preproceess : Data Segmentation by mtime
- @brief FSR matrix data and Seat data should be segmented by mtime
- @output segmented dataframes
'''
# output dict.
fsr_dataframe_standard_segment = {}
fsr_dataframe_relax_segment = {}
seat_loadcell_dataframe_standard_segment = {}
seat_loadcell_dataframe_relax_segment = {}
for idx in data_config.index:
mtime = data_config.loc[idx, ['standard_s_mtime', "standard_e_mtime", "relax_s_mtime", "relax_e_mtime"]]
# seat loadcell segmentation
seat_loadcell_dataframe_standard_segment[idx] = seat_dataframe[idx][(seat_dataframe[idx]['mtime']>=mtime.standard_s_mtime) & (seat_dataframe[idx]['mtime']<=mtime.standard_e_mtime)]
seat_loadcell_dataframe_relax_segment[idx] = seat_dataframe[idx][(seat_dataframe[idx]['mtime']>=mtime.relax_s_mtime) & (seat_dataframe[idx]['mtime']<=mtime.relax_e_mtime)]
# fsr matrix segmentation
fsr_dataframe_standard_segment[idx] = fsr_dataframe[idx][(fsr_dataframe[idx]['mtime']>=mtime.standard_s_mtime) & (fsr_dataframe[idx]['mtime']<=mtime.standard_e_mtime)]
fsr_dataframe_relax_segment[idx] = fsr_dataframe[idx][(fsr_dataframe[idx]['mtime']>=mtime.relax_s_mtime) & (fsr_dataframe[idx]['mtime']<=mtime.relax_e_mtime)]
print("FSR Segments@Standard size : ", len(fsr_dataframe_standard_segment[idx]), ", FSR Segments@Relax size : ", len(fsr_dataframe_relax_segment[idx]))
print("Seat Segments@Standard size : ", len(seat_loadcell_dataframe_standard_segment[idx]), ", Seat Segments@Relax size : ", len(seat_loadcell_dataframe_relax_segment[idx]))
# + tags=["outputPrepend"]
'''
데이터를 catrom으로 interpolation(dynamic scale)하고
좌우 데이터가 상대적으로 다른 스케일로 측정되는 문제에서
잘 나오는 쪽만 crop하여 처리하도록 함.
이때 scale up의 min max는 0~255의 절대범위가 아닌, 각 series마다 local min/max를 기준으로 한다.
+ only standard mode
'''
import os
import os.path
import gc
from skimage import io, color
DYNAMIC_SCALEUP = True
CASE_PATH = "./catmul_static"
crop_standard_interpolated_path = {}
if DYNAMIC_SCALEUP==True:
try:
#os.mkdir("./high") # create diretory
os.mkdir(CASE_PATH) # create diretory
# os.mkdir("./none") # create diretory
except FileExistsError:
pass
for idx in data_config.index:
fsr_standard_segment_1d = fsr_dataframe_standard_segment[idx].iloc[:,1:161]
fsr_standard_segment_2d = fsr_standard_segment_1d.values.reshape(-1, 16, 10) # reshape
try:
# os.mkdir("./high/{}".format(idx)) # create diretory for each id
os.mkdir("{}/{}".format(CASE_PATH, idx)) # create diretory for each id
# os.mkdir("./none/{}".format(idx)) # create diretory for each id
except FileExistsError:
pass
standard_fsr_crop_file_list = []
for ridx in range(fsr_standard_segment_2d.shape[0]):
# result_image_filepath = "./high/{}/standard_{}.jpg".format(idx, ridx)
# result_crop_image_filepath = "./high/{}/standard_crop_{}.jpg".format(idx, ridx)
result_image_filepath = "{}/{}/standard_{}.jpg".format(CASE_PATH, idx, ridx)
result_crop_image_filepath = "{}/{}/standard_crop_{}.jpg".format(CASE_PATH, idx, ridx)
# result_image_filepath = "./none/{}/standard_{}.jpg".format(idx, ridx)
# result_crop_image_filepath = "./none/{}/standard_crop_{}.jpg".format(idx, ridx)
# data interpolation
if os.path.isfile(result_image_filepath)==False:
fig = plt.figure()
plt.axis('off')
#plt.imshow(fsr_standard_segment_2d[ridx], interpolation='catrom', cmap='Greys_r') # dynamic
plt.imshow(fsr_standard_segment_2d[ridx], interpolation='catrom', vmin=0, vmax=255, cmap='Greys_r') # statuc
#plt.imshow(fsr_standard_segment_2d[ridx], interpolation='none', cmap='Greys_r')
plt.savefig(result_image_filepath, bbox_inches='tight', pad_inches=0)
plt.close()
else:
pass
# crop active region
if os.path.isfile(result_crop_image_filepath)==False:
image = io.imread(result_image_filepath)
grayscale = color.rgb2gray(image)
crop = grayscale[0:grayscale.shape[0],int(grayscale.shape[1]/2):grayscale.shape[1]]
io.imsave(result_crop_image_filepath, crop)
print("(standard) saved output crop images for id {}, {}".format(idx, ridx))
else:
pass
standard_fsr_crop_file_list.append(result_crop_image_filepath)
crop_standard_interpolated_path[idx] = pd.DataFrame(standard_fsr_crop_file_list, columns=['path'])
print(crop_standard_interpolated_path[idx])
print("done")
# + tags=[]
'''
Feature Engineering : save to feature image
+ 1D max pooling (input data : (batch, steps, features)))
'''
from skimage import io, color
import tensorflow as tf
from tensorflow.keras.layers import MaxPool1D, GlobalMaxPooling1D
SAVE_FEATURE_IMAGE = True
try:
os.mkdir("{}/feature".format(CASE_PATH)) # create diretory
except FileExistsError:
pass
featureset_container = {}
for idx in data_config.index:
feature_set = np.array([], dtype=np.int64).reshape(0, 217)#empty(217)
for f in crop_standard_interpolated_path[idx]["path"]:
image = io.imread(f)
grayscale = color.rgb2gray(image) # (217, 68)
tensor = tf.reshape(grayscale, [grayscale.shape[0], grayscale.shape[1], 1]) # (217, 68, 1)
feature = tf.keras.layers.GlobalMaxPooling1D()(tensor).numpy() #(217,1)
feature_1d = feature.reshape(feature.shape[0]) #(217,)
feature_set = np.vstack([feature_set, feature_1d])
if SAVE_FEATURE_IMAGE == True:
io.imsave("{}/feature/standart_feature_{}.png".format(CASE_PATH, idx), feature_set.transpose())
featureset_container[idx] = feature_set
print("created featureset :", idx)
print("done")
# +
'''
SVM model training & predict with full dimensional data
'''
'''
svm with positive class
'''
from sklearn import svm
from sklearn.datasets._samples_generator import make_blobs
from tensorflow.keras.layers import MaxPool1D, GlobalMaxPooling1D
feautre_length = 115 #115 dimensional dataset
feature_set = np.array([], dtype=np.int64).reshape(0, feautre_length)
print("featureset shape :",feature_set.shape)
for idx in data_config.index:
tensor = tf.reshape(featureset_container[idx], [featureset_container[idx].shape[0], featureset_container[idx].shape[1], 1]) # (1??, 217, 1)
# print(tensor.shape)
feature = tf.keras.layers.GlobalMaxPooling1D()(tensor).numpy() #(1??,1)
# print(feature.shape)
feature_1d = feature.reshape(feature.shape[0]) #(1??,)
# print(feature_1d.shape)
feature_set = np.vstack([feature_set, feature_1d[0:feautre_length]])
print("pclass shape :", pclass.shape)
print("feature length:", feautre_length)
print("feature_set:", feature_set.shape)
# plt.figure()
# plt.plot(feature_set)
# plt.show()
# random selection
import random
shuffled_index = np.arange(feature_set.shape[0])
random.shuffle(shuffled_index)
print("shuffled", shuffled_index)
pclass = shuffled_index[0:49] # first 5 index select from shuffled_index
nclass = shuffled_index[5:]
X = feature_set[pclass[:,None]].reshape(pclass.shape[0], feautre_length)
y = pclass
print("pclass :", pclass)
model = svm.SVC(kernel='linear')
model.fit(X,y)
print(model.predict([X[0]]))
print(model.predict([X[1]]))
print(model.predict([X[2]]))
print(model.predict([X[3]]))
print(model.predict([X[4]]))
print("done")
# + tags=[]
'''
SVM model training & predict with n-dimensional data
'''
'''
svm with positive class
'''
from sklearn import svm
from sklearn.datasets._samples_generator import make_blobs
from tensorflow.keras.layers import MaxPool1D, GlobalMaxPooling1D
feautre_length = 10 # n-dimension
feature_pt = random.choice(np.arange(100)) # random point to segment
feature_set = np.array([], dtype=np.int64).reshape(0, feautre_length)
# print("featureset shape :",feature_set.shape)
for idx in data_config.index:
tensor = tf.reshape(featureset_container[idx], [featureset_container[idx].shape[0], featureset_container[idx].shape[1], 1]) # (1??, 217, 1)
# print(tensor.shape)
feature = tf.keras.layers.GlobalMaxPooling1D()(tensor).numpy() #(1??,1)
# print(feature.shape)
feature_1d = feature.reshape(feature.shape[0]) #(1??,)
# print("feature 1d :",feature_1d.shape)
feature_set = np.vstack([feature_set, feature_1d[feature_pt:feautre_length+feature_pt]])
# print("feature length:", feautre_length)
# print("feature_set:", feature_set.shape)
# # random selection
import random
shuffled_index = np.arange(feature_set.shape[0])
random.shuffle(shuffled_index)
print("shuffled", shuffled_index)
pclass = shuffled_index[0:feature_set.shape[0]] # first 5 index select from shuffled_index
# print("pclass shape :", pclass.shape)
X = feature_set[pclass[:,None]].reshape(pclass.shape[0], feautre_length)
y = pclass
print("pclass :", pclass)
model = svm.SVC(kernel='linear')
model.fit(X,y)
print(model.predict([X[0]]))
print(model.predict([X[1]]))
print(model.predict([X[2]]))
print(model.predict([X[3]]))
print(model.predict([X[4]]))
# + tags=[]
'''
SVM Model training & predict with n-dimentional augmented data
'''
'''
svm with positive class
'''
from sklearn import svm
from sklearn.datasets._samples_generator import make_blobs
from tensorflow.keras.layers import MaxPool1D, GlobalMaxPooling1D
from sklearn.model_selection import train_test_split
import random
from sklearn import metrics
feautre_length = 5 # n-dimensional dataset
feature_pt = random.choice(np.arange(100)) # random point to segment
number_of_samples = 999 # 1 is raw feature, 99 is augmented feature
augmented_data_dict = {}
for idx in data_config.index:
feature_set = np.array([], dtype=np.int64).reshape(0, feautre_length)
tensor = tf.reshape(featureset_container[idx], [featureset_container[idx].shape[0], featureset_container[idx].shape[1], 1]) # (1??, 217, 1)
# print(tensor.shape)
feature = tf.keras.layers.GlobalMaxPooling1D()(tensor).numpy() #(1??,1)
# print(feature.shape)
feature_1d = feature.reshape(feature.shape[0]) #(1??,)
# print(feature_1d.shape)
feature_set = np.vstack([feature_set, feature_1d[feature_pt:feautre_length+feature_pt]])
# data augmentation
mu, sigma = 0, 3
for aug in range(number_of_samples):
aug_1d = np.random.normal(mu, sigma, feature_1d.shape[0])
feature_1d = feature_1d + aug_1d
np.clip(feature_1d, 0, None) # lower bound
# save featureset
feature_set = np.vstack([feature_set, feature_1d[0:feautre_length]])
augmented_data_dict[idx] = feature_set
# print("augmented set:", augmented_data_container[idx].shape)
print("augmented :", len(augmented_data_dict))
# print("feature length:", feautre_length)
# print("feature_set:", feature_set.shape)
# random selection for testing
shuffled_index = np.array(list(data_config.index))
random.shuffle(shuffled_index)
pclass = shuffled_index[0:5] # first 5 index select from shuffled_index
print("pclass :", pclass)
# data split
Xcon_train = np.array([], dtype=np.int64).reshape(0, feautre_length)
Xcon_test = np.array([], dtype=np.int64).reshape(0, feautre_length)
ycon_train = np.array([], dtype=np.int64)
ycon_test = np.array([], dtype=np.int64)
for pc in pclass:
X = augmented_data_dict[pc]
y = np.full(X.shape[0], pc)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
Xcon_train = np.vstack([Xcon_train, X_train])
ycon_train = np.hstack([ycon_train, y_train])
Xcon_test = np.vstack([Xcon_test, X_test])
ycon_test = np.hstack([ycon_test, y_test])
# print("pclass :", pclass)
model = svm.SVC(kernel='linear', C=1, probability=True)
model.fit(Xcon_train,ycon_train)
print("model trained")
# testing
ypredictions = model.predict(Xcon_test)
print("Accuracy score: ", metrics.accuracy_score(ycon_test, ypredictions)*100)
# roc = metrics.roc_curve(model.predict_proba(Xcon_train), ycon_train)
# metrics.plot_roc_curve(model, Xcon_test, ycon_test)
# plt.show()
# +
'''
SVM Model training & predict with n-dimentional augmented data
# testing
'''
'''
svm with positive class
'''
from sklearn import svm
from sklearn.datasets._samples_generator import make_blobs
from tensorflow.keras.layers import MaxPool1D, GlobalMaxPooling1D
from sklearn.model_selection import train_test_split
import random
from sklearn import metrics
feautre_length = 20 # n-dimensional dataset
feature_pt = random.choice(np.arange(115-feautre_length)) # random point to segment
number_of_samples = 299 # 1 is raw feature, 99 is augmented feature
augmented_data_dict = {}
for idx in data_config.index:
feature_set = np.array([], dtype=np.int64).reshape(0, feautre_length)
tensor = tf.reshape(featureset_container[idx], [featureset_container[idx].shape[0], featureset_container[idx].shape[1], 1]) # (1??, 217, 1)
# print("tensor shape:", tensor.shape)
feature = tf.keras.layers.GlobalMaxPooling1D()(tensor).numpy() #(1??,1)
# print("feature shape:", feature.shape)
feature_1d = feature.reshape(feature.shape[0]) #(1??,)
# print("feature 1d shape:", feature_1d.shape)
feature_set = np.vstack([feature_set, feature_1d[feature_pt:feautre_length+feature_pt]])
# data augmentation
mu, sigma = 0, 1
for aug in range(number_of_samples):
aug_1d = np.random.normal(mu, sigma, feature_1d.shape[0])
feature_1d = feature_1d + aug_1d
np.clip(feature_1d, 0, None) # lower bound
# save featureset
feature_set = np.vstack([feature_set, feature_1d[0:feautre_length]])
augmented_data_dict[idx] = feature_set
# print("augmented set:", augmented_data_container[idx].shape)
print("augmented :", len(augmented_data_dict))
# print("feature length:", feautre_length)
# print("feature_set:", feature_set.shape)
# random selection for testing
shuffled_index = np.array(list(data_config.index))
random.shuffle(shuffled_index)
pclass = shuffled_index[0:5] # first 5 index select from shuffled_index
print("pclass :", pclass)
# data split
Xcon_train = np.array([], dtype=np.int64).reshape(0, feautre_length)
Xcon_test = np.array([], dtype=np.int64).reshape(0, feautre_length)
ycon_train = np.array([], dtype=np.int64)
ycon_test = np.array([], dtype=np.int64)
for pc in data_config.index:
X = augmented_data_dict[pc]
if pc in pclass:
y = np.full(X.shape[0], pc)
else:
y = np.full(X.shape[0], 0)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
Xcon_train = np.vstack([Xcon_train, X_train])
ycon_train = np.hstack([ycon_train, y_train])
Xcon_test = np.vstack([Xcon_test, X_test])
ycon_test = np.hstack([ycon_test, y_test])
# for pc in pclass:
# X = augmented_data_dict[pc]
# y = np.full(X.shape[0], pc)
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)
# Xcon_train = np.vstack([Xcon_train, X_train])
# ycon_train = np.hstack([ycon_train, y_train])
# Xcon_test = np.vstack([Xcon_test, X_test])
# ycon_test = np.hstack([ycon_test, y_test])
print("x:", Xcon_train.shape)
print("y:", ycon_train)
# print("pclass :", pclass)
model = svm.SVC(kernel='linear', C=1, probability=True, max_iter=-1, verbose=2)
model.fit(Xcon_train,ycon_train)
print("model trained")
# testing
ypredictions = model.predict(Xcon_test)
print("Accuracy score: ", metrics.accuracy_score(ycon_test, ypredictions)*100)
print("Balanced score : ", metrics.balanced_accuracy_score(ycon_test, ypredictions)*100)
# roc = metrics.roc_curve(model.predict_proba(Xcon_train), ycon_train)
# metrics.plot_roc_curve(model, Xcon_test, ycon_test)
# plt.show()
# +
pd.set_option('display.width', 10) # for display width
# precision
from sklearn.metrics import precision_score
precision_score(ycon_test, ypredictions, average=None)
# confusion metrix
metrics.confusion_matrix(ycon_test, ypredictions)
#metrics.plot_confusion_matrix(model, Xcon_test, ycon_test)
# metrics.multilabel_confusion_matrix(ycon_test, ypredictions)
metrics.classification_report(ycon_test, ypredictions)
# +
'''
ROC Curve for multiclass
'''
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.legend(loc="lower right")
plt.show()
|
2021_legrest/classifier_model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import spacy
nlp = spacy.load("en_core_web_sm")
# ## POS tagging
doc = nlp("My name is Vyshak and Im a Computer Science student")
for token in doc:
print(token.text, token.lemma_, token.pos_, token.tag_, token.dep_,token.shape_, token.is_alpha, token.is_stop)
# ## Dependency Parsing
# ### Noun chunks
# Noun chunks are “base noun phrases” – flat phrases that have a noun as their head. You can think of noun chunks as a noun plus the words describing the noun – for example, “the lavish green grass” or “the world’s largest tech fund”. To get the noun chunks in a document, simply iterate over Doc.noun_chunks
for chunk in doc.noun_chunks:
print(chunk.text, chunk.root.text, chunk.root.dep_,chunk.root.head.text)
# ### Navigating the parse tree
# spaCy uses the terms head and child to describe the words connected by a single arc in the dependency tree. The term dep is used for the arc label, which describes the type of syntactic relation that connects the child to the head. As with other attributes, the value of .dep is a hash value. You can get the string value with .dep_.
for token in doc:
print(token.text, token.dep_, token.head.text, token.head.pos_,[child for child in token.children])
# ## Visualizing dependencies
#
from spacy import displacy
displacy.render(doc, style='dep')
displacy.serve(doc, style="ent")
|
dependencyCodes/Studying/SpacyStudying.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Experiment: Cohesion
# To goal of cohesion is to maintain a cohesive collective whose network structure is neither too dense nor too sparse.
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import matplotlib
matplotlib.rcParams['figure.figsize'] = [12, 8]
import math
import numpy as np
from interaction import Interaction
from environment import Environment
from fish import Fish
from channel import Channel
from observer import Observer
from utils import generate_distortion, generate_fish, run_simulation
# -
# ### Dispersion
# All robots start in the same place and have too many neighbors. They disperse to decrease their number of neighbors.
# +
run_time = 20 # in seconds
num_fish = 25
arena_size = np.array([1780, 1780, 1170])
arena_center = arena_size / 2.0
initial_spread = 2000
fish_pos = initial_spread * np.random.rand(num_fish, 3) + arena_center - initial_spread / 2.0
clock_freqs = 1
verbose = False
distortion = generate_distortion(type='none', magnitude=1, n=math.ceil(arena_size[0]/10), show=False)
environment = Environment(
arena_size=arena_size,
node_pos=fish_pos,
distortion=distortion,
prob_type='binary',
conn_thres=2000,
conn_drop=1,
noise_magnitude=10,
verbose=verbose
)
interaction = Interaction(environment, verbose=verbose)
channel = Channel(environment)
fish = generate_fish(
n=num_fish,
channel=channel,
interaction=interaction,
lim_neighbors=[2,3],
neighbor_weights=1.0,
fish_max_speeds=130,
clock_freqs=clock_freqs,
verbose=verbose
)
channel.set_nodes(fish)
observer = Observer(fish=fish, environment=environment, channel=channel)
run_simulation(fish=fish, observer=observer, run_time=run_time, dark=False, white_axis=False, no_legend=True)
# -
# ### Aggregation in a counterclockwise curl
# They robots start spread out and have too few neighbors. They aggregate to increase their number of neighbors. While doing so, they are affacted by a distortion of the type of a counterclockwise curl.
# +
run_time = 30 # in seconds
num_fish = 40
arena_size = 1780 + 1
arena_center = arena_size / 2.0
initial_spread = 2000
fish_pos = initial_spread * np.random.rand(num_fish, 3) + arena_center - initial_spread / 2.0
clock_freqs = 1
verbose = False
distortion = generate_distortion(type='curl', magnitude=50, n=math.ceil(arena_size/10), show=False)
environment = Environment(
node_pos=fish_pos,
distortion=distortion,
prob_type='binary',
noise_magnitude=10,
conn_thres=2000,
verbose=verbose
)
interaction = Interaction(environment, verbose=verbose)
channel = Channel(environment)
fish = generate_fish(
n=num_fish,
channel=channel,
interaction=interaction,
lim_neighbors=[math.inf,math.inf],
neighbor_weights=1.0,
fish_max_speeds=130,
clock_freqs=clock_freqs,
verbose=verbose
)
channel.set_nodes(fish)
observer = Observer(fish=fish, environment=environment, channel=channel)
run_simulation(fish=fish, observer=observer, run_time=run_time, dark=False, white_axis=False, no_legend=True)
# -
# ### Aggregation and migration
# The robots start dispersed and gather first to then migrate from left to right. This is similar to salmons who come from different places in the ocean and then swim up their rivers of origin for reproduction.
# +
from events import Move
run_time = 30 # in seconds
num_fish = 25
arena_size = 1780 + 1
arena_center = arena_size / 2.0
initial_spread = 1000
fish_pos = np.zeros((num_fish,3))
fish_pos[:,0] = np.random.rand(num_fish) * initial_spread
fish_pos[:,1] = np.random.rand(num_fish) * initial_spread + arena_center - initial_spread/2
fish_pos[:,2] = np.random.rand(num_fish) * initial_spread
clock_freqs = 1
verbose = False
distortion = generate_distortion(type='none', magnitude=50, n=math.ceil(arena_size/10), show=False)
environment = Environment(
node_pos=fish_pos,
distortion=distortion,
prob_type='binary',
noise_magnitude=20,
conn_thres=1000,
verbose=verbose
)
interaction = Interaction(environment, verbose=verbose)
channel = Channel(environment)
fish = generate_fish(
n=num_fish,
channel=channel,
interaction=interaction,
lim_neighbors=[math.inf,math.inf],
neighbor_weights=1.0,
fish_max_speeds=130,
clock_freqs=clock_freqs,
verbose=verbose
)
channel.set_nodes(fish)
observer = Observer(fish=fish, environment=environment, channel=channel)
migrate = Move(50,0)
observer.instruct(event=migrate, rel_clock=0, fish_all=True)
run_simulation(fish=fish, observer=observer, run_time=run_time, dark=False, white_axis=False, no_legend=True, no_star=True)
# -
|
BlueSim/.ipynb_checkpoints/Cohesion-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lionelsamrat10/machine-learning-a-to-z/blob/main/Regression/SVR%20(Support%20Vector%20Regression)/support_vector_regression_samrat.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="m3PAEPRDRLA3"
# # Support Vector Regression (SVR)
# + [markdown] id="0VCUAVIjRdzZ"
# ## Importing the libraries
# + id="56oRF-QfSDzC"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] id="fXVXoFWtSF4_"
# ## Importing the dataset
# + id="xfoa8OSORfHQ"
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, -1].values
# + id="fAWJV6gpiTYM" colab={"base_uri": "https://localhost:8080/"} outputId="322b6da4-6087-4c82-ddbb-63b619eebb81"
print(X)
# + id="P1CzeAyRiU3c" colab={"base_uri": "https://localhost:8080/"} outputId="0d09175a-f1fd-4807-e5ec-5c4c75c8beab"
print(y)
# + id="_8Ny1GfPiV3m"
y = y.reshape(len(y),1) # convert the 1D Array to a 2D array, because fit_transform() works on 2D array
# + id="D4aFLlbziaV9" colab={"base_uri": "https://localhost:8080/"} outputId="50281022-d884-42fb-d9ea-eee41df8c765"
print(y)
# + [markdown] id="YS8FeLHYS-nI"
# ## Feature Scaling
# + id="PGeAlD1HTDI1"
# Here we will apply Feature Scaling on both X and y means both dependent and independent variables
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
# + id="nXa8Z9FgjFTQ" colab={"base_uri": "https://localhost:8080/"} outputId="16965e9b-cb0a-4e97-89bf-43fb3c67677c"
print(X)
# + id="i7Oo2kybjGr2" colab={"base_uri": "https://localhost:8080/"} outputId="1bbe1665-8a78-4698-a507-0e07de2faf9a"
print(y)
# + [markdown] id="eiU6D2QFRjxY"
# ## Training the SVR model on the whole dataset
# + colab={"base_uri": "https://localhost:8080/"} id="XMwbp3q2TEUf" outputId="d85fdfd2-5874-4716-9709-09b56a7e04a2"
from sklearn.svm import SVR
regressor = SVR(kernel = 'rbf')
regressor.fit(X, y)
# + [markdown] id="deDnDr8UR5vq"
# ## Predicting a new result
# + colab={"base_uri": "https://localhost:8080/"} id="MKn1X5Iy7e2O" outputId="4bfbb514-3818-42d9-dfdf-a6de4dcb9f7d"
print(sc_y.inverse_transform(regressor.predict(sc_X.transform([[6.5]]))))
# + [markdown] id="zzedFlUISSu_"
# ## Visualising the SVR results
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="KKA2PLmi9yTm" outputId="e26b0963-754c-4adb-a399-b799b9dfb2ad"
# Create the Scatter Plot first
plt.scatter(sc_X.inverse_transform(X), sc_y.inverse_transform(y), color = 'red')
plt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor.predict(X)), color = 'blue')
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# + [markdown] id="UahPVNlJSZ-K"
# ## Visualising the SVR results (for higher resolution and smoother curve)
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="IQmT0Br5BWvz" outputId="ed4835de-0ee2-4a4a-97ee-b178600c49bb"
X_grid = np.arange(min(sc_X.inverse_transform(X)), max(sc_X.inverse_transform(X)), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(sc_X.inverse_transform(X), sc_y.inverse_transform(y), color = 'red')
plt.plot(X_grid, sc_y.inverse_transform(regressor.predict(sc_X.transform(X_grid))), color = 'blue')
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
Regression/SVR (Support Vector Regression)/support_vector_regression_samrat.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ANEOS MODEL INTERFACE JUPYTER NOTEBOOK
#
# <NAME><br>
# <EMAIL><br>
# Created October 22, 2019<br>
# Updated January 21, 2020<p>
#
# ***
# ***
# ## COMMENTS ON ORIGINAL ANEOS IRON MODEL WITH HIGH-PRESSURE PHASE CHANGE
# These material parameters for iron were distributed with the original ANEOS in Thompson 1970. The iron parameters were prepared by T.G. Trucano at Sandia according to documentation in the CTH shock code package.
#
# This version was used in the giant impact papers using the GADGET2 code: Marcus et al. 2009, 2010ab; Cuk & Stewart 2012; Sarid et al. 2015; Lock & Stewart 2017; Lock et al. 2018; Rufu et al. 2017; Carter et al. 2018; Bonomo et al. 2019; Denman et al. submitted. This version of ANEOS iron was also used in many impact cratering studies, e.g.: Pierazzo et al. (1997); Svetsov (2005); Artemieva and Pierazza (2011); Barr and Citron (2011); Quintana et al. (2015); Yue and Di (2017); Moreau et al. (2018).
#
# The parameters were tabulated in Thompson 1970 as "IRON 130KB PHASE TRANSITION" and in Thompson & Lauson 1972 as 'IRON 130PT'. This ANEOS model was used to construct SESAME table 2141 with the aid of the PANDA code.
#
# This is a condensed phase + vapor ANEOS model with ionization and a high-pressure phase transition for pure iron.
#
# Issues:<br>
# - No melt curve.<br>
# - The model includes one high-pressure phase transition, but there are multiple solid phases. The high-pressure phase transition was never completed in ANEOS. It produces artificial discontinuities in the liquid field.<br>
# - The vapor curve does not match available data. The critical point temperature is too high.<br>
# - In the original ANEOS, the temperatures are too high and the entropies are too low in the liquid region. The discrepancy arises because the ANEOS model has a limiting 3nR heat capacity; the true heat capacities in liquid iron are larger.<br>
#
# ***
# ***
# <b>STS ANEOS INTERFACE NOTEBOOK GUIDE</b>
#
# <b>USAGE</b><br>
# This notebook interacts with multiple outputs from the Stewart-modified version of ANEOS, including phase boundary information and tabulated EOS.
#
# This notebook is designed to be used in two modes:<br>
# <ul>
# <li> Development mode: ANEOS is called in the local directory to generate new model output. The user can varying the model input parameters and tabular gridding schemes. This notebook produces plots of the equation of state surface and comparisons to experimental data and ab initio calculations.
# <li> Release mode: the notebook interacts with previously created ANEOS model output. This notebook provides detailed documentation of a version-controlled release of the model EOS and enables users to interact with the EOS surface and compare the model to data.
# </ul>
#
# This notebook is distributed in release mode. Users who wish to use the development mode may request the modified version of ANEOS from the author.<p>
#
# Development mode:<br>
# The notebook calls a local aneos executable which expects a formatted ANEOS.INPUT ascii file and an ascii gridtable.txt definition file that is generated by this notebook. ANEOS produces an ANEOS.OUTPUT ascii file and gridded ascii SESAME-format EOS tables.<br>
# The user edits (1) the input parameters in ANEOS.INPUT (separately from this notebook) and (2) the output EOS table parameters in the notebook cell below.<br>
# This notebook reads some parameters from ANEOS.INPUT and assumes that the variables are spaced by 10 columns.<br>
# When the notebook is run, the code issues a system call to run the aneos program in the local directory and reads in results from the ANEOS.OUTPUT, NEW-SESAME-STD.TXT, and NEW-SESAME-EXT.TXT files to make the plots below and to generate the GADGET format EOS table and other miscellaneous files.<p>
#
# Release mode:<br>
# This notebook reads in the provided ANEOS.INPUT, EOS tables, and other distribution data to generate the plots below. <p>
#
# <b>OUTPUT/PROVIDED DATA FILES</b><br>
# ANEOS.OUTPUT: Plain text file with an overview of the calculated equation of state. Look at this file for any errors in the EOS calculation.<br>
#
# NEW-SESAME-STD.TXT: Standard length Sandia-style SESAME file with 201 table and 301 table (density, temperature, pressure, sp. internal energy, Helmholtz free energy). 301 table units: g/cm$^3$, K, GPa, MJ/kg, MJ/kg. <br>
#
# NEW-SESAME-EXT.TXT: SESAME-style table with extra variables from ANEOS. Contains the standard 201 table and non-standard 301-extra-variables EOS table. The 301 table has: density grid values, temperature grid values, sp. entropy(T,rho), sound speed(T,rho), sp. heat capacity(T,rho), KPA flag(T,rho). 2-D arrays list all densities, looping over each temperature. 301 table units: g/cm$^3$, K, MJ/K/kg, cm/s, MJ/K/kg, integer flag, integer flag. The KPA flag is an ANEOS output with phase information. <br>
#
# NEW-SESAME-HUG.TXT: Ascii table with the reference state Hugoniot. <br>
#
# NEW-SESAME-STD-NOTENSION.TXT: Standard length Sandia-style SESAME file with 201 table and 301 table (density, temperature, pressure, sp. internal energy, Helmholtz free energy) where the ANEOS tension region is removed and replaced with the solid-vapor coexistence region. 301 table units: g/cm$^3$, K, GPa, MJ/kg, MJ/kg. <br>
#
# NEW-GADGET2-STD-NOTENSION.TXT: Standard GADGET2 EOS table generated by interpolating the ANEOS table with no tension. Format: number of density points, number of sp. entropy points, density grid values, sp. entropy grid values, 2-D arrays of pressure, temperature, specific internal energy, sound speed. 2-D array values list all densities, looping over each value for specific entropy. Units: g/cm$^3$, erg/K/g, dynes/cm$^2$, K, erg/g, cm/s. <br>
#
# NEW-GADGET2-EXT-NOTENSION.TXT: Extended variables GADGET2 EOS table generated by interpolating the ANEOS table with no tension. Format: number of density points, number of sp. entropy points, density grid values, sp. entropy grid values, 2-D arrays of Helmholtz free energy, KPA flag, MDQ flag. 2-D array values list all densities, looping over each value for specific entropy. Units: g/cm$^3$, erg/K/g, erg/g, integer flag, integer flag. <br>
#
# The SESAME tabular equation of state formats are described in Lyon and Johnson (1992).<p>
#
# <b>ANEOS NOTES</b><br>
# The ANEOS source code was provided by <NAME> and <NAME>. This version included Melosh's (2007) treatment for molecular gas and the capability to include a melt curve and solid-solid/liquid-liquid transition (Collins & Melosh LPSC 2014).<br>
# In Stewart et al. 2019, the Melosh and Collins version of ANEOS was modified to adjust the Debye model for the thermal term in the Helmholtz free energy to approach a user-defined heat capacity at high temperatures. The multiplicative factor $f_{cv}$ is entered in input value V44, and the high-temperature heat capacity is $3f_{cv}Nk$.<p>
# The ANEOSTEST.f routine was modified to output tabulated EOS. Note that the current version of this function sets positive pressures smaller than 1.E-30 GPa equal to 1.E-30 GPa.<br>
# ANEOS2.f was modified to increase the number of points tabulated on the melt curve in the ANEOS.OUTPUT file and to gather the variables for the heat capacity modification.<br>
# ANHUG.f was modified to output more Hugoniot points.<br>
# ANEOS1.f and ANEOS2.f were modified to increase the high temperature limit for the heat capacity (Stewart et al., SCCM19).<p>
#
# <b>CORRECTIONS AND IMPROVEMENTS</b><br>
# Please send corrections to STS and any requests for data to include in the model-data comparison plots.<p>
#
# +
# this cell has a java script that that enables hiding the raw python code
from IPython.display import HTML
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
# +
# Record the verions information for these calculation for posterity
import platform
print('python version: ',platform.python_version())
del platform
import matplotlib
print('matplotlib version: ', matplotlib.__version__)
del matplotlib
import numpy
print('numpy version: ', numpy.__version__)
del numpy
import scipy
print('scipy version: ', scipy.__version__)
del scipy
import pandas
print('pandas version: ', pandas.__version__)
del pandas
# this prevents some error messages for inline plotting in a jupyter notebook
# %matplotlib inline
# import python libraries
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from copy import deepcopy
import numpy as np
from scipy import interpolate
import colormaps as local_cmaps
import subprocess
import pandas as pd
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
#
plt.rcParams["figure.figsize"] = (20,10)
plt.rcParams["patch.force_edgecolor"] = True
# -
# ## USER INPUTS FOR SESAME AND GADGET2 TABLE CONSTRUCTION
#
# If the code cell below is hidden, use the button above to reveal the cell.<p>
#
# In development mode, the user must input:<br>
# 1. Header information for the SESAME table.<br>
# 2. Temperature, density and entropy grid points.<p>
#
# The following code cell also includes the development mode flag and
# option to skip construction of a GADGET2 table if it is not needed.<p>
# -------------------------------------------------------------
# USER INPUTS
#-------------------------------------------------------------
# NOTEBOOK MODE
#-------------------------------------------------------------
#
DEVMODE = 1 # 1=development mode; 0=release mode
#
#-------------------------------------------------------------
# OPTIONS TO REMOVE TENSION REGION AND CONSTRUCT A GADGET2 TABLE
#-------------------------------------------------------------
#
REMOVETENSIONFLAG=0 # 0- keep tension region; 1- remove tension region
MAKEGADGETTABLE=0 # 1=make table; 0=do not make gadget2 table
#
#-------------------------------------------------------------
# USER MUST SET THESE VALUES TO DEFINE THE TABULATED EOS GRIDS
#-------------------------------------------------------------
#
# ====>>>>>> YOU NEED TO MAKE SURE THESE VALUES MATCH ANEOS.INPUT <<<<=====
MODELNAME = 'Iron-ANEOS-SLVTv0.1Gx'
# Header information must all be compatible with float format
MATID = 1.0 # MATID number
DATE = 191022. # Date as a single 6-digit number YYMMDD
VERSION = 0.1 # ANEOS Parameters Version number
FMN = 26. # Formula weight in atomic numbers for Fe
FMW = 55.847 # Formula molecular weight (g/cm3) for Fe
# The following define the default initial state for material in the 201 table
R0REF = 8.06 # g/cm3 *** R0REF is inserted into the density array; using gamma-iron for rho0
K0REF = 1.33E12 # dynes/cm2; using gamma-iron for rho0
T0REF = 298. # K -- *** T0REF is inserted into the temperature array
P0REF = 1.E6 # dynes/cm2 -- this defines the principal Hugoniot calculated below
#
# generate output files for other ANEOS models
modelflag = 2 # 0 - don't change anything above; 1 - Canup et al. 2013;
# 2 - GADGET2 Marcus PhD Thesis & Cuk & Stewart 2012; 3 - <NAME> and Melosh 2014
#
if modelflag == 1:
MODELNAME = 'IRON WITH MELT CURVE'
# Header information must all be compatible with float format
MATID = 1.0 # MATID number
DATE = 191022. # Date as a single 6-digit number YYMMDD
VERSION = 1.0 # ANEOS Parameters Version number
FMN = 26. # Formula weight in atomic numbers for Mg2SiO4
FMW = 55.847 # Formula molecular weight (g/cm3) for Mg2SiO4
# The following define the default initial state for material in the 201 table
R0REF = 7.85 # g/cm3 *** R0REF is inserted into the density array
K0REF = 1.45E12 # dynes/cm2 K0=rho0*cs0^2
T0REF = 298. # K -- *** T0REF is inserted into the temperature array
P0REF = 1.E6 # dynes/cm2 -- this defines the principal Hugoniot calculated below
if modelflag == 2:
MODELNAME = 'IRON 130PT Thompson 1970'
# Header information must all be compatible with float format
MATID = 1.0 # MATID number
DATE = 191022. # Date as a single 6-digit number YYMMDD
VERSION = 1.0 # ANEOS Parameters Version number
FMN = 26. # Formula weight in atomic numbers for Mg2SiO4
FMW = 55.847 # Formula molecular weight (g/cm3) for Mg2SiO4
# The following define the default initial state for material in the 201 table
R0REF = 7.85 # g/cm3 *** R0REF is inserted into the density array
K0REF = 1.93E12 # dynes/cm2 K0=rho0*cs0^2
T0REF = 298. # K -- *** T0REF is inserted into the temperature array
P0REF = 1.E6 # dynes/cm2 -- this defines the principal Hugoniot calculated below
#
#-------------------------------------------------------------
# EXAMPLE EOS GRID ARRAYS
#-------------------------------------------------------------
#
# Life is easier if the grid arrays are even in length (after adding T0REF and R0REF below)
#
# Flag for easily switching between different table grids during model development
# 1: high res for testing
gridflag = 1
#
#------------------------------------------------------
if gridflag == 0: # low resolution grid
# define the temperature array for the SESAME table
# 0 K is changed to 1 K
# T0REF is inserted into the table if it is not present in the original array
# need to have extra temperature resolution around the triple point
gridtvals = [0, 1750, 1950, 20000.,1.E5, 1.E7] # K -- this array defines the edges of the segments along the grid, 0 is changed to 1 below
gridtstyle = [1,1,1,1,0] # 1 for linear, 0 for log in each segment
gridtnpts = [20, 20, 150.,50.,10.] # number of grid points in each segment
# define the density array for the SESAME AND GADGET2 tables
# R0REF is inserted into the table if it is not present in the original array
# need to have extra density resolution around the triple point
# 0 g/cm3 is removed if present
gridrvals = [1.e-20, 0.5, 6.6, 7.4, 20., 30.] # g/cm3 -- this array defines the edges of the segments along the grid
gridrstyle = [0, 1, 1, 1, 1] # 1 for linear, 0 for log in each segment
gridrnpts = [50, 20, 20, 40, 20] # number of grid points in each segment
# define the specific entropy array for the GADGET2 table
# 0 MJ/K/kg is removed if present
gridsvals = [1.e-4,.1] # MJ/K/kg -- this array defines the edges of the segments along the grid
gridsstyle = [0] # 1 for linear, 0 for log in each segment
gridsnpts = [200] # number of grid points in each segment
#
if gridflag == 1: # new form for the high resolution table entries in 301 table. Try for 1e6 values
# define the temperature array for the SESAME table
# 0 K is changed to 1 K
# T0REF is inserted into the table if it is not present in the original array
# need to have extra temperature resolution around the triple point
gridtvals = [0, 100, 1750, 1950, 20000.,1.E5, 1.E7] # K -- this array defines the edges of the segments along the grid, 0 is changed to 1 below
gridtstyle = [1,1,1,1,1,0] # 1 for linear, 0 for log in each segment
gridtnpts = [2, 75., 100, 500.,85.,50.] # number of grid points in each segment
# define the density array for the SESAME AND GADGET2 tables
# R0REF is inserted into the table if it is not present in the original array
# need to have extra density resolution around the triple point
# 0 g/cm3 is removed if present
gridrvals = [1.e-20, 0.5, 6.6, 7.4, 20., 30.] # g/cm3 -- this array defines the edges of the segments along the grid
gridrstyle = [0, 1, 1, 1, 1] # 1 for linear, 0 for log in each segment
gridrnpts = [150, 84, 101, 400, 100] # number of grid points in each segment
# define the specific entropy array for the GADGET2 table
# 0 MJ/K/kg is removed if present
gridsvals = [1.e-4,.1] # MJ/K/kg -- this array defines the edges of the segments along the grid
gridsstyle = [0] # 1 for linear, 0 for log in each segment
gridsnpts = [800] # number of grid points in each segment
#----------------------------------------
# END USER INPUT
#----------------------------------------
# THIS CELL DEFINEs FUNCTIONS
# define index function (must be a more efficient way to do this...)
get_indexes = lambda x, xs: [i for (y, i) in zip(xs, range(len(xs))) if x == y]
#
# LOAD EXTERNAL FUNCTIONS AND OBJECT CLASSES
# defines rational function interpolation functions rf1d and rf2d
# #%run rfinterpolation.py
# defines class objects for extEOStable and functions to read in ANEOS input and output
# READ THIS CODE IF YOU WANT TO UNDERSTAND THE FILE STRUCTURES
# %run eostable.py
#import eostable
# Run the following help for the functions in the class
#help(extEOStable())
# +
# THIS CELL HAS CODE TO GENERATE THE GRIDS AND WRITES TO A FILE TO BE READ BY ANEOS
print('GENERATING ARRAYS FOR THE INDICES OF THE TABULATED EOS.')
#-------------- the following code generates the temperature array
gridtarr = []
for iir in range(1,len(gridtvals)):
if gridtstyle[iir-1] == 1:
# linear region
temptarr = np.arange(gridtnpts[iir-1])/gridtnpts[iir-1]*(gridtvals[iir]-gridtvals[iir-1])+gridtvals[iir-1]
if gridtstyle[iir-1] == 0:
# log region
logstep = (np.log10(gridtvals[iir])-np.log10(gridtvals[iir-1]))/gridtnpts[iir-1]
temptarr = np.power(10.,np.log10(gridtvals[iir-1])+(np.arange(gridtnpts[iir-1]))*logstep)
gridtarr = np.concatenate((gridtarr,temptarr))
# check if T0REF already in the grid
#indexT0REF = get_indexes(T0REF,gridtarr)
indexT0REF = np.where(abs(T0REF-gridtarr)<1.E-4)[0]
if len(indexT0REF) == 0:
# insert T0REF K into the array
indexT0REF = np.where((gridtarr-T0REF)>0.)[0]
gridtarr = np.concatenate((gridtarr[0:indexT0REF[0]],[T0REF],gridtarr[indexT0REF[0]::]))
if indexT0REF[0]>1:
print('Inserted the REFERENCE TEMPERATURE INTO THE ARRAY:',gridtarr[indexT0REF[0]-2:indexT0REF[0]+3])
else:
print('Inserted the REFERENCE TEMPERATURE INTO THE ARRAY:',gridtarr[indexT0REF[0]-1:indexT0REF[0]+3])
else:
print('T0REF already in temperature array: ',T0REF,gridtarr[indexT0REF[0]-2:indexT0REF[0]+2])
# Remove 0 K or convert to 1 K
indexzero = get_indexes(0.,gridtarr)
indexone = get_indexes(1.,gridtarr)
if len(indexzero) == 1 and len(indexone) == 0:
gridtarr[indexzero] = 1.
print('Zero kelvin changed to 1 kelvin.')
else:
gridtarr = np.delete(gridtarr, indexzero)
print('Deleted zero K index.')
#
#gridtsize = len(grdtarr)
print('Number of temperature points: ',len(gridtarr))
#print(gridtarr)
#------------ the following code generates the density array
gridrarr = []
for iir in range(1,len(gridrvals)):
if gridrstyle[iir-1] == 1:
# linear region
temprarr = np.arange(gridrnpts[iir-1])/gridrnpts[iir-1]*(gridrvals[iir]-gridrvals[iir-1])+gridrvals[iir-1]
if gridrstyle[iir-1] == 0:
# log region
logstep = (np.log10(gridrvals[iir])-np.log10(gridrvals[iir-1]))/gridrnpts[iir-1]
temprarr = np.power(10.,np.log10(gridrvals[iir-1])+(np.arange(gridrnpts[iir-1]))*logstep)
gridrarr = np.concatenate((gridrarr,temprarr))
# check if R0REF already in the grid
#indexR0REF = get_indexes(R0REF,gridrarr)
indexR0REF = np.where(abs(R0REF-gridrarr)<1.E-4)[0]
if len(indexR0REF) == 0:
# insert R0REF into the array
indexR0REF = np.where((gridrarr-R0REF)>0.)[0]
gridrarr = np.concatenate((gridrarr[0:indexR0REF[0]],[R0REF],gridrarr[indexR0REF[0]::]))
print('Inserted the REFERENCE DENSITY INTO THE ARRAY:',gridrarr[indexR0REF[0]-2:indexR0REF[0]+3])
#print('NOT INSERTING REFERENCE DENSITY INTO THE ARRAY.')
else:
print('R0REF already in density array: ',R0REF,gridrarr[indexR0REF[0]-2:indexR0REF[0]+2])
# Remove 0 g/cm3 if present
indexzero = get_indexes(0.,gridrarr)
if len(indexzero) == 1:
gridrarr = np.delete(gridrarr, indexzero)
print('Deleted zero g/cm3 index.')
#
#gridrsize = len(gridrarr)
print('Number of density points: ',len(gridrarr))
#print(gridrarr)
#------------ the following code generates the specific entropy array for gadget tables
gridsarr = []
for iir in range(1,len(gridsvals)):
if gridsstyle[iir-1] == 1:
# linear region
tempsarr = np.arange(gridsnpts[iir-1])/gridsnpts[iir-1]*(gridsvals[iir]-gridsvals[iir-1])+gridsvals[iir-1]
if gridsstyle[iir-1] == 0:
# log region
logstep = (np.log10(gridsvals[iir])-np.log10(gridsvals[iir-1]))/gridsnpts[iir-1]
tempsarr = np.power(10.,np.log10(gridsvals[iir-1])+(np.arange(gridsnpts[iir-1]))*logstep)
gridsarr = np.concatenate((gridsarr,tempsarr))
# Remove 0 MJ/K/kg if present
indexzero = get_indexes(0.,gridsarr)
if len(indexzero) == 1:
gridsarr[indexzero]=1.e-20
# gridsarr = np.delete(gridsarr, indexzero)
print('replaced zero MJ/K/kg index with 1.E-20')
#gridssize = len(gridsarr)
#
print('Number of specific entropy points: ',len(gridsarr))
#print(gridsarr)
print('Number of words in the standard SESAME 301 table (NR, NT, R, T, P, IE, HFE)=',2.+len(gridrarr)+len(gridtarr)+len(gridrarr)*len(gridtarr)*3.)
#-----------------------------------------------------------
#
if DEVMODE:
print('WRITING tablegrid.txt FOR ANEOS in the local directory.\n')
# write the table arrays to a file for the ANEOS driver to read in
tablegridfile = open("tablegrid.txt","w")
tablegridfile.write("{:.6e}".format(MATID)+'\n')
tablegridfile.write("{:.6e}".format(DATE)+'\n')
tablegridfile.write("{:.6e}".format(VERSION)+'\n')
tablegridfile.write("{:.6e}".format(FMN)+'\n')
tablegridfile.write("{:.6e}".format(FMW)+'\n')
tablegridfile.write("{:.6e}".format(R0REF)+'\n')
tablegridfile.write("{:.6e}".format(K0REF)+'\n')
tablegridfile.write("{:.6e}".format(T0REF)+'\n')
tablegridfile.write("{:.6e}".format(len(gridrarr))+'\n') # number of density points
tablegridfile.write("{:.6e}".format(len(gridtarr))+'\n') # number of temperature points
for i in range(0,len(gridrarr)):
tablegridfile.write("{:.6e}".format(gridrarr[i])+'\n')
for i in range(0,len(gridtarr)):
tablegridfile.write("{:.6e}".format(gridtarr[i])+'\n')
tablegridfile.close()
#
#-----------------------------------------------------
# Output GADGET2 Table information
if MAKEGADGETTABLE:
print('Number of words in the extended GADGET2 EOS table (NR, NS, R, S, P, T, IE, CS, HFE, CV, KPA, MDQ)=',2.+len(gridrarr)+len(gridtarr)+len(gridrarr)*len(gridtarr)*8.)
#------------------------------------------------------
#----------------------------------------------------
# Sp. Internal Energy grid for Tillotson
tillearr = np.power(10.,np.arange(600)/600.*6.-3.) # MJ/kg
tillearr = np.append(0.,tillearr)
print('Number of specific internal energy points for Tillotson EOS: ',len(tillearr))
#----------------------------------------------------
# Paranoia check
# make sure all table grid values are unique
if (len(np.unique(gridtarr))!=len(gridtarr)):
print('NON-UNIQUE VALUES IN TEMP ARRAY')
stopthecode
if (len(np.unique(gridrarr))!=len(gridrarr)):
print('NON-UNIQUE VALUES IN DENSITY ARRAY')
stopthecode
if (len(np.unique(gridsarr))!=len(gridsarr)):
print('NON-UNIQUE VALUES IN SP. ENTROPY ARRAY')
stopthecode
if (len(np.unique(tillearr))!=len(tillearr)):
print('NON-UNIQUE VALUES IN SP. ENERGY ARRAY')
stopthecode
#----------------------------------------------------
print('\nDone generating EOS grids.\n')
print('\nCHECK THAT THE GRID ARRAYS ARE EVEN IN LENGTH.\n')
# END TABLE GRIDDING INFORMATION
# -
if DEVMODE:
# call aneos in the local directory
# output file is in the local directory
print('RUNNING IN DEVELOPMENT MODE')
print('Calling aneos in the local directory.')
completed = subprocess.run(['./aneos'])
print('Aneos call complete. CHECK THAT A NEW ANEOS.OUTPUT FILE WAS CREATED.')
print('aneos call result: ', completed.returncode)
print('Check ANEOS.OUTPUT file for errors.')
else:
print('RUNNING IN RELEASE MODE: NO ANEOS SYSTEM CALL.')
print('READING IN EOS DISTRIBUTION FILES.')
# +
# READ IN NEW ANEOS MODEL and fill the extEOStable class object
# source in eostable.py
#------------------------------------------------------------------
NewEOS = extEOStable() # FIRST make new empty EOS object
NewEOS.loadextsesame('NEW-SESAME-EXT.TXT') # LOAD THE EXTENDED 301 SESAME FILE GENERATED BY STSM VERSION OF ANEOS
NewEOS.loadstdsesame('NEW-SESAME-STD.TXT') # LOAD THE STANDARD 301 SESAME FILE GENERATED BY STSM VERSION OF ANEOS
NewEOS.MODELNAME = MODELNAME # string set above in user input
NewEOS.MDQ = np.zeros((NewEOS.NT,NewEOS.ND)) # makes the empty MDQ array
#print(NewEOS.units) # these are the default units for SESAME rho-T tables
#'Units: g/cm3, K, GPa, MJ/kg, MJ/kg, MJ/K/kg, cm/s, MJ/K/kg, KPA flag. 2D arrays are (NT,ND).'
# Add the header info to the table. This could be done during the loading.
# if made from this notebook, these values are set in the user-input above.
# ** MAKE SURE THEY MATCH ANEOS.INPUT **
NewEOS.MATID = MATID
NewEOS.DATE = DATE
NewEOS.VERSION = VERSION
NewEOS.FMN = FMN
NewEOS.FMW = FMW
NewEOS.R0REF = R0REF
NewEOS.K0REF = K0REF
NewEOS.T0REF = T0REF
NewEOS.P0REF = P0REF
#
# Load the information from ANEOS.INPUT and ANEOS.OUTPUT
NewEOS.loadaneos(aneosinfname='ANEOS.INPUT',aneosoutfname='ANEOS.OUTPUT')
#
# calculate the principal Hugoniot from the table
if DEVMODE:
# calculate the Hugoniot and write to a file
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,writefilename='NEW-SESAME-HUG.TXT')
else:
# just calculate the Hugoniot and put it in the EOS object
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
#
# calculate the 1-bar profile; loop over temp
NewEOS.onebar.T = np.zeros(NewEOS.NT)
NewEOS.onebar.S = np.zeros(NewEOS.NT)
NewEOS.onebar.rho = np.zeros(NewEOS.NT)
it0 = np.where(NewEOS.T >= NewEOS.T0REF)[0]
id0 = np.arange(NewEOS.ND)#np.where(NewEOS.rho >= 0.8*NewEOS.R0REF)[0]
for iit in range(0,NewEOS.NT):
NewEOS.onebar.T[iit] = NewEOS.T[iit]
NewEOS.onebar.S[iit] = np.interp(1.E-4,NewEOS.P[iit,id0],NewEOS.S[iit,id0])
NewEOS.onebar.rho[iit] = np.interp(1.E-4,NewEOS.P[iit,id0],NewEOS.rho[id0])
#print(iit,NewEOS.onebar.rho[iit],NewEOS.onebar.T[iit],NewEOS.onebar.S[iit]*1.E3)
# +
# Make a NOTENSION TABLE and GADGET TABLE
################### REMOVE TENSION REGION ###########################
## The GADGET2 tables do not have a tension region.
## If the gadget2 table is selected, then must generate a no-tension table
if MAKEGADGETTABLE:
REMOVETENSIONFLAG=1 # 0- keep tension region; 1- remove tension region
# minimum pressure 1.E-30
# minimum sp energy about 1.E-4 before negative entropy region
# minimum sp entropy about 1.E-5 before negative entropy region
# minimum HFE about 1.E-3 before negative entropy region
# minimum sound speed about 1.E-10 before negative entropy region
# minimum sp heat capacity about 1.E-4 before negative entropy region
## FUTURE WORK: THIS SECTION COULD USE THE LEVER RULE TO FILL IN THE MISSING POINTS
## SINCE MANY VALUES ARE NEAR THE LOWER LIMITS IN THE SUBLIMATION REGION ANYWAY,
## HERE, TENSION REGION VALUES ARE FILLED IN WITH THE ADJACENT S-V DOME VALUES
## This is obviously bad for interpolations. Fix for future work.
if REMOVETENSIONFLAG:
# First make a deep copy of the original table with the tension region
NewEOSorig = deepcopy(NewEOS)
NewEOS.MODELNAME = NewEOS.MODELNAME+'notension'
# THIS SECTION REWRITES NEGATIVE PRESSURE REGION
# The tension region is in the solid-vapor phase boundary.
# Use the liquid-vapor phase boundary. Below the temperature of the triple point, it is the sublimation curve.
# But the pressures for the solid calculation is spurious at very low pressures, so use the vapor pressure.
# HFE = GFE + TS
print('REMOVING TENSION REGION FROM THE ORIGINAL ANEOS TABLE.')
for itt in range(NewEOS.NT-1,-1,-1):
tmp = np.where(NewEOS.P[itt,:] < 0.)
if (len(tmp[0]) > 0):
badind = tmp[0]
if badind[0] > 0:
#print(itt, len(badind), NewEOS.T[itt], 'tension only', NewEOS.P[itt,badind[0]-4:badind[0]+1])
# at this temperature, all densities are the same pressure in the vapor dome
# so use the adjacent positive pressure value
# do the same for all the variables since there is very little change in this region of the dome.
# Errors introduced by interpolating the vapor curve are comparable to
# recalculating the lever rule. Would need to call ANEOS again to get good end point values
# to calculate the mass-weighted value via the lever rule.
NewEOS.P[itt,badind] = np.full(len(badind),NewEOS.P[itt,badind[0]-1])
NewEOS.U[itt,badind] = np.full(len(badind),NewEOS.U[itt,badind[0]-1])
NewEOS.S[itt,badind] = np.full(len(badind),NewEOS.S[itt,badind[0]-1])
NewEOS.A[itt,badind] = np.full(len(badind),NewEOS.A[itt,badind[0]-1])
NewEOS.cs[itt,badind] = np.full(len(badind),NewEOS.cs[itt,badind[0]-1])
NewEOS.cv[itt,badind] = np.full(len(badind),NewEOS.cv[itt,badind[0]-1])
NewEOS.KPA[itt,badind] = np.full(len(badind),2)
NewEOS.MDQ[itt,badind] = np.full(len(badind),-1) # replaced tension region flag
else:
print('SOMETHING VERY WRONG WITH TENSION IN THE TABLE')
makethecodestop # please tell me a more graceful way to stop the notebook
#
# THIS SECTION RESETS NEGATIVE SPECIFIC ENTROPIES WITH ADJACENT VALUES OR MINIMUM POSITIVE VALUES
# THESE VALUES TYPICALLY DEFINE THE COLD COMPRESSION LIMIT OF THE MATERIAL
# OR SPURIOUS NEGATIVE ENTROPIES AT VERY LOW PRESSURES (IMPERFECT DEBYE MODEL IN ANEOS)
for itt in range(NewEOS.NT-1,-1,-1):
tmp = np.where(NewEOS.S[itt,:] < 0.)
if (len(tmp[0]) > 0):
badind = tmp[0]
if badind[0] > 0:
#print(itt, antarr[itt], andarr[badind[0]], badind[0],len(badind),'negative entropy', ansarr[itt,badind[0]],ansarr[itt,badind[0]-1])
NewEOS.P[itt,badind] = np.full(len(badind),NewEOS.P[itt,badind[0]-1]) # use the adjacent pressure at this temperature in the dome
NewEOS.U[itt,badind] = np.full(len(badind),NewEOS.U[itt,badind[0]-1])
NewEOS.S[itt,badind] = np.full(len(badind),NewEOS.S[itt,badind[0]-1])
NewEOS.A[itt,badind] = np.full(len(badind),NewEOS.A[itt,badind[0]-1])
NewEOS.cs[itt,badind] = np.full(len(badind),NewEOS.cs[itt,badind[0]-1])
NewEOS.cv[itt,badind] = np.full(len(badind),NewEOS.cv[itt,badind[0]-1])
#NewEOS.KPA[itt,badind] # leave as original region flag
NewEOS.MDQ[itt,badind] = np.full(len(badind),-2) # negative entropies flag
#print('Some NEGATIVE S AT THIS INDEX, TEMP(K): ',itt, NewEOS.T[itt],NewEOS.P[itt,badind[0]-1],NewEOS.P[itt,badind[0]-2]) #, badind[0],len(badind),'negative entropy', NewEOS.S[itt,badind[0]])
if badind[0] == 0:
print('All S VALUES NEGATIVE AT THIS INDEX, TEMP(K): ',itt, NewEOS.T[itt]) #, badind[0],len(badind),'negative entropy', NewEOS.S[itt,badind[0]])
NewEOS.S[itt,badind] = np.full(len(badind),1.E-10)
NewEOS.U[itt,badind] = np.full(len(badind),1.E-10) # also replace the negative sp. internal energies
NewEOS.MDQ[itt,badind] = np.full(len(badind),-2) # negative entropies flag
#### ONLY WRITE A NEW TABLE IN DEVELOPMENT MODE
if DEVMODE:
#### WRITE NEW SESAME TABLE WITH NO TENSION REGION
# WRITE STANDARD (SHORT) SESAME FILE
# WRITE SESAME TABLE TO FILE LIMITED TO P, E, HFE
NewEOS.writestdsesame(writestdsesfname="NEW-SESAME-STD-NOTENSION.TXT")
#### ONLY WRITE A NEW TABLE IN DEVELOPMENT MODE
#if DEVMODE:
# #### WRITE MDQ Flag as a 301-style table
# NewEOS.writemdqsesame(writemdqsesfname="NEW-SESAME-MDQ.TXT")
#
#--------------------------------------------------------------------------
########## MAKE GADGET2 EOS TABLE ARRAYS ###################
# LATER WE CAN TURN THIS INTO A FUNCTION IN THE GADGET EOS OBJECT
# FLAG IS SET ABOVE IN THE USER INPUT SECTION
# MAKEGADGETTABLE = 1 # 1 - do it ; 0 - skip it
if MAKEGADGETTABLE:
print('Starting interpolation for GADGET table.')
GadEOS = extGADtable() # make new empty Gadget EOS object
#
# rho array is andarr to match the SESAME table
# S array is gridsarr
GadEOS.ND = len(gridrarr)
GadEOS.NS = len(gridsarr)
GadEOS.rho = gridrarr
GadEOS.S = gridsarr
GadEOS.P = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.T = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.U = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.A = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.cs = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.cv = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.KPA = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.MDQ = np.zeros((GadEOS.NS,GadEOS.ND))
GadEOS.MODELNAME = NewEOS.MODELNAME+'-gadget-linear-int'
#
# some combinations of density and sp. entropy are not in the EOS surface
# in these cases, apply minimum values
gadpmin = np.min(NewEOS.P) # GPa
gadsmin = 1.E-30 # MJ/K/kg
gademin = np.min(NewEOS.U) # MJ/kg
gadtmin = 1. # K
gadcmin = np.min(NewEOS.cs) # cm/s
gadcvmin = 0. # MJ/K/kg
gadamin = np.min(NewEOS.A) # can be negative # MJ/kg
print('Minimum values used in the rational function interpolation scheme:')
print('gadpmin,gadsmin,gademin,gadtmin,gadcmin,gadcvmin,gadamin=')
print(gadpmin,gadsmin,gademin,gadtmin,gadcmin,gadcvmin,gadamin)
#
# some combinations of density and sp. entropy are not in the EOS surface
# in these cases, fill the region with the adjacent real values from the original T-RHO table
# ----------------------------------------------------------
# SIMPLE LINEAR INTERPOLATION TO FLIP T-RHO to S-RHO table
print("LINEAR INTERPOLATION FOR GADGET TABLE.")
for iid in range(0,GadEOS.ND):
# same density arrays for the T-rho table and S-rho table
sall1d = NewEOS.S[:,iid]
pall1d = NewEOS.P[:,iid]
eall1d = NewEOS.U[:,iid]
tall1d = NewEOS.T[:]
call1d = NewEOS.cs[:,iid]
cvall1d = NewEOS.cv[:,iid]
aall1d = NewEOS.A[:,iid]
kall1d = NewEOS.KPA[:,iid]
mdqall1d = NewEOS.MDQ[:,iid]
# interpolate P at the new S
testinterp = interpolate.interp1d(sall1d,pall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.P[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.P[tmpind,iid] = pall1d[NewEOS.NT-1]
# interpolate E at the new S
testinterp = interpolate.interp1d(sall1d,eall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.U[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.U[tmpind,iid] = eall1d[NewEOS.NT-1]
# interpolate T at the new S
testinterp = interpolate.interp1d(sall1d,tall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.T[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.T[tmpind,iid] = tall1d[NewEOS.NT-1]
# interpolate sound speed at the new S
testinterp = interpolate.interp1d(sall1d,call1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.cs[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.cs[tmpind,iid] = call1d[NewEOS.NT-1]
# interpolate sp. heat capacity at the new S
testinterp = interpolate.interp1d(sall1d,cvall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.cv[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.cv[tmpind,iid] = call1d[NewEOS.NT-1]
# interpolate HFE at the new S
testinterp = interpolate.interp1d(sall1d,aall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.A[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.A[tmpind,iid] = call1d[NewEOS.NT-1]
# interpolate phase information KPA flag at the new S
testinterp = interpolate.interp1d(sall1d,kall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.KPA[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.KPA[tmpind,iid] = call1d[NewEOS.NT-1]
# interpolate model development quality flag at the new S
testinterp = interpolate.interp1d(sall1d,mdqall1d)
tmpind = np.where(gridsarr < np.max(sall1d))[0]
GadEOS.MDQ[tmpind,iid] = testinterp(gridsarr[tmpind])
tmpind = np.where(gridsarr >= np.max(sall1d))[0]
GadEOS.MDQ[tmpind,iid] = call1d[NewEOS.NT-1]
#
GadEOSlin = deepcopy(GadEOS)
#
'''
# ----------------------------------------------------------
# 1D RATIONAL FUNCTION INTERPOLATION METHOD FROM KERLEY
print("RATIONAL FUNCTION INTERPOLATION TO GENERATE GADGET2 EOS TABLE.")
for iid in range(0,GadEOS.ND):
GadEOS.MODELNAME = NewEOS.MODELNAME+'-gadget-ratfunc-int'
#
# same density arrays for the T-rho table and S-rho table
sall1d = NewEOS.S[:,iid]
pall1d = NewEOS.P[:,iid]
eall1d = NewEOS.U[:,iid]
tall1d = NewEOS.T[:]
call1d = NewEOS.cs[:,iid]
cvall1d = NewEOS.cv[:,iid]
aall1d = NewEOS.A[:,iid]
#kall1d = NewEOS.KPA[:,iid]
#mdqall1d = NewEOS.MDQ[:,iid]
#
# interpolate P at the new S
GadEOS.P[:,iid] = rf1d(sall1d,pall1d,gridsarr,gadpmin)
# interpolate E at the new S
GadEOS.U[:,iid] = rf1d(sall1d,eall1d,gridsarr,gademin)
# interpolate T at the new S
GadEOS.T[:,iid] = rf1d(sall1d,tall1d,gridsarr,gadtmin)
# interpolate sound speed at the new S
GadEOS.cs[:,iid] = rf1d(sall1d,call1d,gridsarr,gadcmin)
# interpolate sp. heat capacity at the new S
GadEOS.cv[:,iid] = rf1d(sall1d,cvall1d,gridsarr,gadcvmin)
# interpolate HFE at the new S
GadEOS.A[:,iid] = rf1d(sall1d,aall1d,gridsarr,gadamin)
# Do not interpolate KPA or MDQ with rational function interp. Use Linear.
# do not overwrite the linear interpolation above.
'''
#
print('Done interpolating the gadget2 table.')
#print('Using RATIONAL FUNCTION INTERPOLATION EXCEPT FOR KPA and MDQ FLAGS (bilinear).')
if DEVMODE:
# ONLY WRITE GADGET2 TABLE TO DISK IN DEVELOPMENT MODE
#### WRITE NEW GADGET2 TABLE WITH NO TENSION REGION
#### This ascii table format follows the sesame table format of 5 columns of numbers in 16.8e format
#### the STYLE variable is just a counter to 5 to know when to write a newline
GadEOS.writestdgadget(writestdgadgetfname="NEW-GADGET-STD-NOTENSION.TXT")
GadEOS.writeextgadget(writeextgadgetfname="NEW-GADGET-EXT-NOTENSION.TXT")
# +
# READ IN FORSTERITE MATERIAL DATA SETS
datadir='../data/'
#======================================================================
#======================================================================
# Gather some experimental and theoretical data on forsterite
ironSTP_S = 0.489211596E-3 # sp. entropy MJ/K/kg JANAF STP value
# iron 1 bar entropy Desai 1986
#T (K), Cp (J/mol/K), dH0 (J/mol), S (J/mol/K)
Desai_file=datadir+'tabula-Desai-Table2.csv'
Desai_datarr = pd.read_csv(Desai_file,nrows=67)
Desai_tarr = Desai_datarr.iloc[:,0].values # temp K
Desai_sarr = Desai_datarr.iloc[:,3].values/55.847+0.489211596 # sp. entropy J/mol/K->kJ/kg/K offset by JANAF STP value
# iron 1 bar liquid density Assael et al. PCRD 2006 Table 3
# T(K), density (kg/m3)
Assael_file=datadir+'tabula-Assael-Table3.csv'
Assael_datarr = pd.read_csv(Assael_file,nrows=15)
Assael_tarr = Assael_datarr.iloc[:,0].values # temp K
Assael_rarr = Assael_datarr.iloc[:,1].values # rho kg/m3
# Iron alloy solidus Fe-16Si from Fischer et al. 2012 in Fig 1.2A from Fischer AGU 2016
Fischer_file=datadir+'Fischer-Fig1.2-AFischer-lower.csv'
Fischer_datalow = pd.read_csv(Fischer_file,skiprows=1)
Fischer_file=datadir+'Fischer-Fig1.2-AFischer-upper.csv'
Fischer_datahigh = pd.read_csv(Fischer_file,skiprows=1)
# Kraus et al. Nature Geoscience 2015
# Entropy tie point on the Hugoniot
Ztie_p=507. # GPa
Ztie_perrlow=-85. # GPa
Ztie_perrhigh=+65. # GPa
Ztie_s=2240.E-6 # MJ/K/kg
Ztie_serr=60.E-6 # MJ/K/kg
# http://www.ihed.ras.ru/rusbank/
# iron data compilation REFERENCES ARE LISTED AT THE END OF THE FILE
# m [rho0/rho], U=part vel(km/s), D = shockvel(km/s), P (GPa)
ihed_rho0=7.873
ihed_file=datadir+'iron-Russian-shockwave-database.txt'
ihed_datarr = np.loadtxt(ihed_file,skiprows=4,usecols=(0,1,2,3,4),max_rows=571)
ihed_fullden = np.where(ihed_datarr[:,0] == 1.)[0]
#ihed_datarr.shape
#print(ihed_datarr[ihed_fullden,3])
#print(max(ihed_datarr[ihed_fullden,3]))
# http://www.ihed.ras.ru/rusbank/
# iron data compilation REFERENCES ARE LISTED AT THE END OF THE FILE
# m U D Cs P R/R0 R E-E0 Rem Ref
ihed_file=datadir+'iron-Russian-database-cs.txt'
ihed_csarr = np.loadtxt(ihed_file,skiprows=4,usecols=(0,1,2,3,4,5,6),max_rows=9)
#ihed_datarr.shape
#print(ihed_csarr[:,3])
#print(max(ihed_datarr[ihed_fullden,3]))
# http://www.ihed.ras.ru/rusbank/
# iron data compilation REFERENCES ARE LISTED AT THE END OF THE FILE
# m U0 P0 U P Rem Ref
ihed_file=datadir+'iron-Russian-database-rel.txt'
ihed_relarr = np.loadtxt(ihed_file,skiprows=4,usecols=(0,1,2,3,4),max_rows=20)
#ihed_datarr.shape
#print(ihed_relarr[:,3])
#print(max(ihed_datarr[ihed_fullden,3]))
# Sjostrom & Crockett 2018 Hugoniot equation from their paper; temperature equation has a problem
scironuparr = np.arange(300)/300.*30. # km/s
scironusarr = 3.7885 + 1.25524*scironuparr + 0.518106*scironuparr*np.exp(-0.0866344*scironuparr) # km/s
scironparr = 7.874*(scironuparr)*(scironusarr) # GPa
scironrarr = 1./((1./7874.)*(1.-scironuparr/scironusarr))/1.e3 # g/cm3
#scirontarr = 1.24182E-3*scironparr+2.80723E-6*np.power(scironparr,2.)-1.31703E-9*np.power(scironparr,3.)+1.88449E-13*np.power(scironparr,4.)*11604. # K
# Kraus et al. 2015 fit Hugoniot SOM Equation 7
krausironuparr = (np.arange(200)+1)/200.*20. # km/s
krausironusarr = 3.41 + 1.84*krausironuparr - 0.036*krausironuparr*krausironuparr # km/s
krausironparr = 7854.*(krausironuparr)*(krausironusarr)/1.e3 # GPa
krausironrarr = 1./((1./7854.)*(1.-krausironuparr/krausironusarr))/1.e3 # g/cm3
krausironsarr = -1.984e4/np.sqrt(krausironparr) + 39.9*np.sqrt(krausironparr)-0.1047*krausironparr+2279.
krausironsuarr = -1.845e4/np.sqrt(krausironparr) + 45.7*np.sqrt(krausironparr)-0.1193*krausironparr+2270.
krausironslarr = -2.631e4/np.sqrt(krausironparr) + 22.33*np.sqrt(krausironparr)-0.007689*krausironparr+2804.
krausliqrho = 6.01 # g/cm3 1 bar boiling point, 3133 K Kraus et al. 2015 SOM Equation 6
krausironliqusarr = 0.79 + 0.50*krausliqrho + (1.19 + 0.0307*krausliqrho)*krausironuparr # km/s
krausironliqparr = krausliqrho*(krausironuparr)*(krausironliqusarr) # GPa
krausironliqrarr = 1./((1./krausliqrho)*(1.-krausironuparr/krausironliqusarr)) # g/cm3
ironliq_r0=krausliqrho
ironliq_t0=3133.
# -
# ## Color mesh plots of the SESAME Rho-T table
#
# +
# PLOT COLOR SHADED VALUES RHO-T TABLE
lw=1
sfont = 15
ds = (10,10)
# %matplotlib inline
fig, ([ax0, ax1], [ax2,ax3], [ax4,ax5])= plt.subplots(nrows=3,ncols=2,figsize=(14,24))
plt.subplots_adjust(wspace=0.4)
#plt.tight_layout(pad=0.6)
##================ Pressure
im1=ax0.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.P)+25)/29.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax0.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax0.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax0.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.set_ylim(0.,20000)
ax0.set_xlim(0,30.)
ax0.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax0.set_ylabel('Temperature (K)',size=sfont)
ax0.set_title('Colors=log$_{10}$(P (GPa))',size=sfont)
ax0.tick_params(labelsize=sfont)
divider0 = make_axes_locatable(ax0)
cax0 = divider0.append_axes('right', size='5%', pad=0.05)
cbar0 = fig.colorbar(im1, cax=cax0, ticks = [],orientation='vertical')
#cbar0.ax.set_yticklabels([' ',' ',' ']) # vertical colorbar
cbar0.set_label('Log$_{10}$(P) [GPa]',size=sfont)
#cbar0.ax.set_yticklabels(['-25','-11','+4']) # vertical colorbar
im1=ax1.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.P)+25)/29.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax1.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax1.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax1.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.set_ylim(10.,1.E7)
ax1.set_xlim(1.E-21,100.)
ax1.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax1.set_ylabel('Temperature (K)',size=sfont)
ax1.set_title('Colors=log$_{10}$(P (GPa))',size=sfont)
ax1.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax1)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-25','-11', '+4']) # vertical colorbar
cbar.set_label('Log$_{10}$(P) [GPa]',size=sfont)
#================ Sp. Int. Energy
im1=ax2.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.U)+4.)/10.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax2.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax2.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax2.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.set_ylim(0.,20000)
ax2.set_xlim(0,30.)
ax2.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax2.set_ylabel('Temperature (K)',size=sfont)
ax2.set_title('Colors=log$_{10}$(Sp. Int. Energy (MJ/kg))',size=sfont)
ax2.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax2)
cax = divider.append_axes('right', size='5%', pad=0.05)
# does not want to label ticks in this location
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['','','']) # vertical colorbar
cbar.set_label('Log$_{10}$(E) [MJ/kg]',size=sfont)
#cbar.ax.set_yticklabels(['-4','+1','+6']) # vertical colorbar
ax3.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.U)+4.)/10.,cmap=local_cmaps.parula,vmin=0,vmax=1)
ax3.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax3.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax3.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.set_ylim(10.,1.E7)
ax3.set_xlim(1.E-21,100.)
ax3.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax3.set_ylabel('Temperature (K)',size=sfont)
ax3.set_title('Colors=log$_{10}$(E (MJ/kg))',size=sfont)
ax3.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax3)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-4', '+1','+6']) # vertical colorbar
cbar.set_label('Log$_{10}$(E) [MJ/kg]',size=sfont)
#================ Sp. Entropy
im1=ax4.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.S)+4.)/4.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax4.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax4.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax4.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.set_ylim(0.,20000)
ax4.set_xlim(0,30.)
ax4.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax4.set_ylabel('Temperature (K)',size=sfont)
ax4.set_title('Colors=log$_{10}$(Sp. Entropy (MJ/K/kg))',size=sfont)
ax4.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax4)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['-4', '-2', '0']) # vertical colorbar
cbar.set_label('Log$_{10}$(S) [MJ/K/kg]',size=sfont)
ax5.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.S)+4.)/4.,cmap=local_cmaps.parula,vmin=0,vmax=1)
ax5.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax5.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax5.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.set_ylim(10.,1.E7)
ax5.set_xlim(1.E-21,100.)
ax5.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax5.set_ylabel('Temperature (K)',size=sfont)
ax5.set_title('Colors=log$_{10}$(Sp. Entropy (MJ/K/kg))',size=sfont)
ax5.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax5)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-4', '-2','0']) # vertical colorbar
cbar.set_label('Log$_{10}$(S) [MJ/K/kg]',size=sfont)
##============= start new plot
fig2, ([ax0, ax1], [ax2,ax3], [ax4,ax5])= plt.subplots(nrows=3,ncols=2,figsize=(14,24))
plt.subplots_adjust(wspace=0.4)
#================ Sound Speed
im1=ax0.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.cs)+3.)/10.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax0.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax0.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax0.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax0.set_ylim(0.,20000)
ax0.set_xlim(0,30.)
ax0.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax0.set_ylabel('Temperature (K)',size=sfont)
ax0.set_title('Colors=log$_{10}$(Sound Speed (cm/s))',size=sfont)
ax0.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax0)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['-3', '+2', '+7']) # vertical colorbar
cbar.set_label('Log$_{10}$(Cs) [cm/s]',size=sfont)
ax1.pcolormesh(NewEOS.rho,NewEOS.T,(np.log10(NewEOS.cs)+3.)/10.,cmap=local_cmaps.parula,vmin=0,vmax=1)
ax1.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax1.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax1.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax1.set_ylim(10.,1.E7)
ax1.set_xlim(1.E-21,100.)
ax1.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax1.set_ylabel('Temperature (K)',size=sfont)
ax1.set_title('Colors=log$_{10}$(sound speed (cm/s))',size=sfont)
ax1.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax1)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-3', '+2', '+7']) # vertical colorbar
cbar.set_label('Log$_{10}$(Cs) [cm/s]',size=sfont)
#================ HFE
amin = np.min(NewEOS.A)
amin = -1000.
alogmax = 1500. #np.max(anaarrodd)-amin #np.max(np.log10(anaarrodd-amin))
#print('amin, log10(amin)',amin,np.log10(-amin))
#print('alogmax=',alogmax)
im1=ax2.pcolormesh(NewEOS.rho,NewEOS.T,(NewEOS.A-amin)/alogmax,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax2.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax2.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax2.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax2.set_ylim(0.,20000)
ax2.set_xlim(0,30.)
ax2.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax2.set_ylabel('Temperature (K)',size=sfont)
ax2.set_title('Colors=HFE (MJ/kg)',size=sfont)
ax2.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax2)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['-1000','-500','0','500']) # vertical colorbar
cbar.set_label('Log$_{10}$(HFE) [MJ/kg]',size=sfont)
ax3.pcolormesh(NewEOS.rho,NewEOS.T,(NewEOS.A-amin)/alogmax,cmap=local_cmaps.parula,vmin=0,vmax=1)
ax3.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax3.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax3.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax3.set_ylim(10.,1.E7)
ax3.set_xlim(1.E-21,100.)
ax3.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax3.set_ylabel('Temperature (K)',size=sfont)
ax3.set_title('Colors=HFE (MJ/kg)',size=sfont)
ax3.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax3)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = [0,.333,.6667, 1], orientation='vertical')
cbar.ax.set_yticklabels(['-1000','-500','0','500']) # vertical colorbar
cbar.set_label('Log$_{10}$(HFE) [MJ/kg]',size=sfont)
#================ KPA flag
cmap2 = cm.get_cmap('plasma', 6) # 6 discrete colors
im1=ax4.pcolormesh(NewEOS.rho,NewEOS.T,((NewEOS.KPA)-0.5)/6.,cmap=cmap2,vmin=0,vmax=1,edgecolors='face')
#im1=ax4.pcolormesh(NewEOS.rho,NewEOS.T,((NewEOS.KPA))/6.,cmap=local_cmaps.parula,vmin=0,vmax=1,edgecolors='face')
ax4.plot(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.plot(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.plot(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax4.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax4.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax4.set_ylim(0.,20000)
ax4.set_xlim(0,30.)
ax4.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax4.set_ylabel('Temperature (K)',size=sfont)
ax4.set_title('Colors=KPA flag',size=sfont)
ax4.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax4)
cax = divider.append_axes('right', size='5%', pad=0.05)
cbar = fig.colorbar(im1, cax=cax, ticks = [], orientation='vertical')
#cbar.ax.set_yticklabels(['0','1','2','3','4','5', '6']) # vertical colorbar
cbar.set_label('KPA Flag',size=sfont)
ax5.pcolormesh(NewEOS.rho,NewEOS.T,((NewEOS.KPA)-0.5)/6.,cmap=cmap2,vmin=0,vmax=1)
ax5.loglog(NewEOS.vc.rl,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.loglog(NewEOS.vc.rv,NewEOS.vc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.loglog(NewEOS.vc.rl[0],NewEOS.vc.T[0],'ko')
if NewEOS.mc.NT > 0:
ax5.plot(NewEOS.mc.rl,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
# ax5.plot(NewEOS.mc.rs,NewEOS.mc.T,color="black",linewidth=lw,linestyle='--',dashes=ds)
ax5.set_ylim(10.,1.E7)
ax5.set_xlim(1.E-21,100.)
ax5.set_xlabel('Density (g/cm$^3$)',size=sfont)
ax5.set_ylabel('Temperature (K)',size=sfont)
ax5.set_title('Colors=KPA flag',size=sfont)
ax5.tick_params(labelsize=sfont)
divider = make_axes_locatable(ax5)
cax = divider.append_axes('right', size='5%', pad=0.05)
#cbar = fig.colorbar(im1, cax=cax, ticks = [0, 0.5, 1], orientation='vertical')
#cbar.ax.set_yticklabels(['-6', '-3.5', '-1']) # vertical colorbar
cbar = fig.colorbar(im1, cax=cax, ticks = np.arange(13)/12, orientation='vertical')
cbar.ax.set_yticklabels(['','1','','2','','3','','4','','5','', '6']) # vertical colorbar
cbar.set_label('KPA Flag',size=sfont)
fig2.show()
# -
# ## ANEOS Hugoniots Compared to Experimental Data
#
# The following plots compare the ANEOS model Hugoniots with laboratory data downloaded from the shock wave database at http://ihed.ras.ru/rusbank/
#
# Entropy on the Principal Hugoniot derived by Kraus et al. 2015.
#
# ANEOS Hugoniot: red -- calculated in the ANEOS code using model reference density and temperature.
#
# Calculated Hugoniot: blue -- calculated by linear interpolation of the EOS table starting at model reference density and temperature.
#
# The liquid Hugoniot is calculated in Kraus et al. 2015 starting at the boiling point of liquid iron at 1 bar: 6.01 g/cm$^3$, 3133 K. The ANEOS model Hugoniot is shown for comparison.
# +
# Display the initial conditions for the Hugoniot in ANEOS.OUTPUT
print('\nRequested ANEOS Hugoniot initial state from (THUG,RHUG) (red curves in plots below):')
print('Initial Pressure (GPa):',NewEOS.anhug.P[0])
print('Initial Temperature (K):',NewEOS.anhug.T[0])
print('Initial Sp. Entropy (MJ/K/kg):',NewEOS.anhug.S[0])
print('Initial Sp. Internal Energy (MJ/kg):',NewEOS.anhug.U[0])
print('Initial Density (g/cm3):',NewEOS.anhug.rho[0])
#
print('\nCalculated Hugoniot using (T0REF,R0REF) initial state (blue curves in plots below):')
print('Initial Pressure (GPa):',NewEOS.hug.P[0])
print('Initial Temperature (K):',NewEOS.hug.T[0])
print('Initial Sp. Entropy (MJ/K/kg):',NewEOS.hug.S[0])
print('Initial Sp. Internal Energy (MJ/kg):',NewEOS.hug.U[0])
print('Initial Density (g/cm3):',NewEOS.hug.rho[0])
#========== PLOTTING STYLES FOR LOTS OF PLOTS =================
styleqmd = ['^','red',5,' ']
stylezdat = ['+','black',5,'Porous Fe Hugoniot\n(IHED)']
stylegg = ['+','blue',5,'STP Hugoniot data\n(IHED)']
styleliq = ['--','orange',7,'Liquid Fe Hugoniot\n(Kraus et al. 2015)']
styleliqhug = ['-','orange',1,'ANEOS Liquid Fe Hugoniot']
stylezfit = ['-','green',1,' ']
styleanhug = ['-','red',1,'ANEOS Hugoniot']
stylestphug = ['-','blue',1,'Calculated Hugoniot']
styleanphase = ['-','black',1,'ANEOS Phase Boundaries']
stylephasept = ['o','black',6]
#======================LOTS OF PLOTS============================
# plot the Hugoniots for comparison
sfont = 15
fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(14,18))
plt.subplots_adjust(wspace=0.4)
#-------------
# Pressure-Density Data vs. ANEOS
ai=0
aj=0
#axes[ai,aj].plot(QMDH_r[1::],QMDH_p[1::],styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].plot(Zdat_r,Zdat_p,stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3])
#axes[ai,aj].plot(LowHugDat_r,LowHugDat_p,stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
#axes[ai,aj].plot(Zfit_r,Zfit_p,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
axes[ai,aj].plot(NewEOS.anhug.rho,NewEOS.anhug.P,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(ihed_datarr[ihed_fullden,4]*ihed_rho0,ihed_datarr[ihed_fullden,3],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
axes[ai,aj].plot(scironrarr,scironparr,'-',color='m',label='STP Hugoniot\n(Sjostrom & Crockett 2018)')
# Kraus Liquid Iron Hugoniot at 1 bar boiling point
axes[ai,aj].plot(krausironliqrarr[40:100], krausironliqparr[40:100],styleliq[0],markersize=styleliq[2],color=styleliq[1],label=styleliq[3])
#linestyle='-',color='cyan',label='Kraus et al. 2015\n1-bar Boiling Pt. Liquid Hugoniot')
# ANEOS LIQUID HUGONIOT
# Calculate the model Hugoniot with the same initial state as Thomas & Asimow 2013
NewEOS.calchugoniot(r0=ironliq_r0,t0=ironliq_t0)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=styleliqhug[1],label=styleliqhug[3])
# Put the STP hugoniot back into the structure
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
axes[ai,aj].set_ylim(0,1100)
axes[ai,aj].set_xlim(7.,16.)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Pressure - Temperature Data vs. ANEOS
ai=0
aj=1
#axes[ai,aj].plot(QMDH_p[1::],QMDH_t[1::]/1.E3,styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].errorbar(ZTdat_p,ZTdat_t/1.E3,xerr=ZTdat_perr,yerr=ZTdat_terr/1.E3,fmt=stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3],elinewidth=1)
#axes[ai,aj].plot(Zfit_p,Zfit_t/1.E3,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
axes[ai,aj].plot(NewEOS.anhug.P,NewEOS.anhug.T/1.E3,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.P,NewEOS.hug.T/1.E3,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
## add squares
rect = Rectangle((240.,5.),6.,.700,linewidth=1,edgecolor='moccasin',facecolor='moccasin')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(243.,5.350,'d',color='orange',label='Brown & McQueen 1986')
rect = Rectangle((222.,4.7),6.,1.000,linewidth=1,edgecolor='moccasin',facecolor='moccasin')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(225.,5.100,'o',color='orange',label='Nguyen & Holmes 2004')
axes[ai,aj].set_xlim(0,500)
axes[ai,aj].set_ylim(0,20)
axes[ai,aj].set_ylabel('Temperature (1000 K)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Particle Velocity vs. Shock Velocity; Data vs. ANEOS
ai=1
aj=0
#axes[ai,aj].plot(QMDH_up[1::],QMDH_us[1::],styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].errorbar(Zdat_up,Zdat_us,xerr=Zdat_uperr,yerr=Zdat_userr,fmt=stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3],elinewidth=1)
#axes[ai,aj].plot(LowHugDat_up,LowHugDat_us,stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
#axes[ai,aj].plot(Zfit_up,Zfit_us,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
axes[ai,aj].plot(NewEOS.anhug.up,NewEOS.anhug.us,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.up,NewEOS.hug.us,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(ihed_datarr[ihed_fullden,1],ihed_datarr[ihed_fullden,2],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
#axes[ai,aj].plot(foliq_up,foliq_us,styleliq[0],markersize=styleliq[2],color=styleliq[1],label=styleliq[3])
# ANEOS LIQUID HUGONIOT
# Calculate the model Hugoniot with the same initial state as Thomas & Asimow 2013
#NewEOS.calchugoniot(r0=foliq_r0,t0=foliq_t0)
#axes[ai,aj].plot(NewEOS.hug.up,NewEOS.hug.us,styleliqhug[0],linewidth=styleliqhug[2],color=styleliqhug[1],label=styleliqhug[3])
# Put the STP hugoniot back into the structure
#NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
axes[ai,aj].set_xlim(0,11)
axes[ai,aj].set_ylim(0,20)
axes[ai,aj].set_xlabel('Particle Velocity (km/s)',size=sfont)
axes[ai,aj].set_ylabel('Shock Velocity (km/s)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Sp. Entropy vs Pressure; Calculated from experiments vs. ANEOS
ai=1
aj=1
#axes[ai,aj].plot(Zfit_p,Zfit_s*1.E3,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
#axes[ai,aj].fill_between(Zfit_p,(Zfit_s-Zfit_serr)*1000.,(Zfit_s+Zfit_serr)*1000.,color=stylezfit[1],alpha=0.2)
# pressure error envelope is tiny and can't be seen on this scale
#axes[ai,aj].fill_betweenx(Zfit_s*1000.,(Zfit_p-Zfit_perr),(Zfit_p+Zfit_perr),color='red',alpha=0.2)
axes[ai,aj].plot(NewEOS.anhug.P,NewEOS.anhug.S*1.E3,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.P,NewEOS.hug.S*1.E3,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
# Tie point from Kraus et al. 2015
axes[ai,aj].plot(Ztie_p,Ztie_s*1.E3,'o',markersize=5,color='red',label='Entropy Tie Point (Kraus et al. 2015)')
axes[ai,aj].plot([Ztie_p+Ztie_perrlow,Ztie_p+Ztie_perrhigh],[Ztie_s*1.E3,Ztie_s*1.E3],'-',color='red')
axes[ai,aj].plot([Ztie_p,Ztie_p],[(Ztie_s-Ztie_serr)*1.E3,(Ztie_s+Ztie_serr)*1.E3],'-',color='red')
# Iron at STP from JANAF
axes[ai,aj].plot(1.E-4,ironSTP_S*1.E3,'s',markersize=5,color='orange',label='STP (JANAF)')
colstr='green'
axes[ai,aj].plot(krausironparr[40:80], krausironsarr[40:80]/1.e3, linestyle='-',color=colstr)
#axes[ai,aj].plot(krausironparr[40:80], krausironsarr[40:80]/1.e3, linestyle='-',color=colstr,label='Kraus et al. 2015\nLiquid Region STP Hugoniot')
#axes[ai,aj].plot(krausironparr[40:80], krausironsuarr[40:80]/1.e3, linestyle=':',color=colstr)
#axes[ai,aj].plot(krausironparr[40:80], krausironslarr[40:80]/1.e3, linestyle=':',color=colstr)
axes[ai,aj].fill_between(krausironparr[40:80],krausironslarr[40:80]/1.e3,krausironsuarr[40:80]/1.e3,color='green',alpha=0.2,label='Kraus et al. 2015\nLiquid Region STP Hugoniot')
axes[ai,aj].set_ylim(0,4.)
axes[ai,aj].set_xlim(0,1200)
axes[ai,aj].set_ylabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Pressure-Density Data vs. ANEOS LOW PRESSURE + LIQUID HUGONIOT
ai=2
aj=0
#axes[ai,aj].plot(QMDH_r[1::],QMDH_p[1::],styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].plot(Zdat_r,Zdat_p,stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3])
#axes[ai,aj].plot(LowHugDat_r,LowHugDat_p,stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
# Liquid forsterite Hugoniot Data
#axes[ai,aj].plot(foliq_r,foliq_p,styleliq[0],markersize=styleliq[2],color=styleliq[1],label=styleliq[3])
#axes[ai,aj].plot(foliq_r0,0,styleliq[0],markersize=styleliq[2],color=styleliq[1])
#axes[ai,aj].plot(Zfit_r,Zfit_p,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
# ANEOS REFERENCE POINT (STP) Hugoniots
axes[ai,aj].plot(NewEOS.anhug.rho,NewEOS.anhug.P,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(ihed_datarr[ihed_fullden,4]*ihed_rho0,ihed_datarr[ihed_fullden,3],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
# POROUS HUGONIOT m=R0/R00=1.128
m=1.128
colstr='black'
labstr='R0/R00=1.128 (+IHED -ANEOS)'
ihed_porous = np.where(ihed_datarr[:,0] == m)[0]
axes[ai,aj].plot(ihed_datarr[ihed_porous,4]*ihed_rho0,ihed_datarr[ihed_porous,3],'+',markersize=5,color=colstr,label=labstr)
NewEOS.calcporoushugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,r00=NewEOS.R0REF/m)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=colstr)
# Put the STP hugoniot back into the structure
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
# POROUS HUGONIOT m=R0/R00=1.315
m=1.315
colstr='green'
labstr='R0/R00=1.315 (+IHED -ANEOS)'
ihed_porous = np.where(ihed_datarr[:,0] == m)[0]
axes[ai,aj].plot(ihed_datarr[ihed_porous,4]*ihed_rho0,ihed_datarr[ihed_porous,3],'+',markersize=5,color=colstr,label=labstr)
NewEOS.calcporoushugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,r00=NewEOS.R0REF/m)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=colstr)
# POROUS HUGONIOT m=R0/R00=1.660
m=1.660
colstr='m'
labstr='R0/R00=1.660 (+IHED -ANEOS)'
ihed_porous = np.where(ihed_datarr[:,0] == m)[0]
axes[ai,aj].plot(ihed_datarr[ihed_porous,4]*ihed_rho0,ihed_datarr[ihed_porous,3],'+',markersize=5,color=colstr,label=labstr)
NewEOS.calcporoushugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,r00=NewEOS.R0REF/m)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=colstr)
# POROUS HUGONIOT m=R0/R00=2.333
m=2.333
colstr='c'
labstr='R0/R00=2.333 (+IHED -ANEOS)'
ihed_porous = np.where(ihed_datarr[:,0] == m)[0]
axes[ai,aj].plot(ihed_datarr[ihed_porous,4]*ihed_rho0,ihed_datarr[ihed_porous,3],'+',markersize=5,color=colstr,label=labstr)
NewEOS.calcporoushugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF,r00=NewEOS.R0REF/m)
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,styleliqhug[0],linewidth=styleliqhug[2],color=colstr)
# Put the STP hugoniot back into the structure
NewEOS.calchugoniot(r0=NewEOS.R0REF,t0=NewEOS.T0REF)
axes[ai,aj].set_ylim(-5,200)
axes[ai,aj].set_xlim(5,12.)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
#-------------
# Pressure-Density Data vs. ANEOS EXTRAPOLATED HIGH PRESSURE
ai=2
aj=1
#axes[ai,aj].plot(QMDH_r[1::],QMDH_p[1::],styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label=styleqmd[3])
#axes[ai,aj].plot(Zdat_r,Zdat_p,stylezdat[0],markersize=stylezdat[2],color=stylezdat[1],label=stylezdat[3])
#axes[ai,aj].plot(LowHugDat_r,LowHugDat_p,stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
#axes[ai,aj].plot(Zfit_r,Zfit_p,stylezfit[0],linewidth=stylezfit[2],color=stylezfit[1],label=stylezfit[3])
axes[ai,aj].plot(NewEOS.anhug.rho,NewEOS.anhug.P,styleanhug[0],linewidth=styleanhug[2],color=styleanhug[1],label=styleanhug[3])
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(ihed_datarr[ihed_fullden,4]*ihed_rho0,ihed_datarr[ihed_fullden,3],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
axes[ai,aj].plot(scironrarr,scironparr,'-',color='m',label='STP Hugoniot\n(Sjostrom & Crockett 2018)')
axes[ai,aj].set_ylim(0,6200)
axes[ai,aj].set_xlim(7.,25.)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].legend(fontsize=sfont-5)
# +
#======================LOTS OF PLOTS============================
# plot the Hugoniots for comparison
sfont = 15
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(14,6))
plt.subplots_adjust(wspace=0.4)
#-------------
# Pressure-Sound Speed Hugoniot Data vs. ANEOS
ai=0
aj=0
axes[ai].plot(NewEOS.hug.P,NewEOS.hug.cs,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai].plot(ihed_csarr[:,4],ihed_csarr[:,3],stylegg[0],markersize=stylegg[2],color=stylegg[1],label=stylegg[3])
axes[ai].set_xlim(0,400)
axes[ai].set_ylim(4.,14.)
axes[ai].set_ylabel('Sound speed (km/s)',size=sfont)
axes[ai].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai].tick_params(labelsize=sfont)
axes[ai].legend(fontsize=sfont-5)
#-------------
# Particle velocity - Pressure Data vs. ANEOS
ai=1
aj=1
axes[ai].semilogy(NewEOS.hug.up,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai].plot(ihed_relarr[:,3],ihed_relarr[:,4],'s',markersize=stylegg[2],color='red',label='Partial release (IHED)')
axes[ai].plot(ihed_relarr[:,1],ihed_relarr[:,2],'s',markersize=stylegg[2],color='red')
uniqrelarr = np.unique(ihed_relarr[:,2])
for j in range(0,len(uniqrelarr)):
ptarget=uniqrelarr[j]
# ANEOS ISENTROPE NEAR SAME PRESSURE
ANEOSIsen = isentrope_class() # code in eostable.py
Sisen = np.interp(ptarget,NewEOS.hug.P,NewEOS.hug.S) # MJ/K/kg
# loop across all densities and extract the values for the requested isentrope
for i in range(0,NewEOS.ND):
ind = np.where((NewEOS.S[:,i] > 0))[0]
interpfunction = interpolate.interp1d(NewEOS.S[ind,i],NewEOS.P[ind,i]) # MJ/K/kg, GPa
ANEOSIsen.pressure = np.append(ANEOSIsen.pressure,interpfunction(Sisen)) # GPa
interpfunction = interpolate.interp1d(NewEOS.S[ind,i],NewEOS.T[ind]) # MJ/K/kg, GPa
ANEOSIsen.temperature = np.append(ANEOSIsen.temperature,interpfunction(Sisen)) # GPa
interpfunction = interpolate.interp1d(NewEOS.S[ind,i],NewEOS.cs[ind,i]) # MJ/K/kg, cm/s
ANEOSIsen.soundspeed = np.append(ANEOSIsen.soundspeed,interpfunction(Sisen)) # cm/s
interpfunction = interpolate.interp1d(NewEOS.S[ind,i],NewEOS.U[ind,i]) # MJ/K/kg, MJ/kg
ANEOSIsen.energy = np.append(ANEOSIsen.energy,interpfunction(Sisen)) # MJ/kg
ANEOSIsen.density = np.copy(NewEOS.rho)*1000. # kg/m3
ANEOSIsen.partvel = np.zeros(NewEOS.ND) # m/s
ir0 = int(np.round(np.where(ANEOSIsen.pressure >= ptarget)[0][0])) # go up just past the intersection with Hugoniot
ANEOSIsen.partvel[ir0] = np.interp(ptarget,NewEOS.hug.P,NewEOS.hug.up)*1.E3 # m/s
#print('ANEOS Starting Pressure:',ptarget)
iir=ir0-1
while (ANEOSIsen.pressure[iir]>.0001):
if iir == ir0-1:
dp = (ptarget - ANEOSIsen.pressure[iir])*1.E9 # Pa
else:
dp = (ANEOSIsen.pressure[iir+1]-ANEOSIsen.pressure[iir])*1.E9 # Pa
ANEOSIsen.partvel[iir] = ANEOSIsen.partvel[iir+1] + dp/(ANEOSIsen.density[iir]*ANEOSIsen.soundspeed[iir+1]/1.E2) # m/s
#print(iir,ANEOSIsen.pressure[iir],ANEOSIsen.partvel[iir]/1.e3,dp/1.e9,ANEOSIsen.density[iir])
iir=iir-1
# plot aneos isentrope
tmp = np.where((ANEOSIsen.pressure < ptarget)&(ANEOSIsen.pressure > .01))[0]
if j == 0:
axes[ai].plot(ANEOSIsen.partvel[tmp]/1.e3,ANEOSIsen.pressure[tmp],'--',color='blue',label='ANEOS release isentrope')
else:
axes[ai].plot(ANEOSIsen.partvel[tmp]/1.e3,ANEOSIsen.pressure[tmp],'--',color='blue')
axes[ai].set_xlim(0,7)
axes[ai].set_ylim(.01,200)
axes[ai].set_xlabel('Particle velocity (km/s)',size=sfont)
axes[ai].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai].tick_params(labelsize=sfont)
axes[ai].legend(fontsize=sfont-5)
# -
# ## Model Critical Point and Shock-Induced Phase Changes
#
#
# +
print('ANEOS Calculated Critical point:')
print(' Temperature (K): ',NewEOS.cp.T)
print(' Pressure (GPa): ', NewEOS.cp.P)
print(' Density (g/cm^3): ',NewEOS.cp.rho)
print(' Sp. Energy (MJ/kg): ', NewEOS.cp.U)
print(' Sp. Entropy (kJ/K/kg): ', NewEOS.cp.S*1.e3)
print('\n')
print('ANEOS Calculated Values at 1 bar:')
print(' Temperature at melting (K): ', NewEOS.onebar.Tmelt)
print(' Liquid density at melting (g/cm^3): ', NewEOS.onebar.rhocm)
print(' Sp. Entropy at inc. melting Sim (kJ/K/kg): ', NewEOS.onebar.Sim*1.E3)
print(' Sp. Entropy at com. melting Scm (kJ/K/kg): ', NewEOS.onebar.Scm*1.E3)
print(' Temperature at boiling (K): ', NewEOS.onebar.Tvap)
print(' Liquid density at boiling (g/cm^3): ', NewEOS.onebar.rhoiv)
print(' Sp. Entropy at inc. vaporization Siv (kJ/K/kg): ', NewEOS.onebar.Siv*1.E3)
print(' Sp. Entropy at com. vaporization Scv (kJ/K/kg): ', NewEOS.onebar.Scv*1.E3)
print('\n')
if NewEOS.tp.T > 0:
print('ANEOS Calculated Triple Point:')
print(' Temperature (K): ',NewEOS.tp.T)
print(' Pressure (GPa): ', NewEOS.tp.P)
print(' Solid density (g/cm^3): ',NewEOS.tp.rhos)
print(' Liquid density (g/cm^3): ',NewEOS.tp.rhol)
print(' Vapor density (g/cm^3): ',NewEOS.tp.rhov)
print(' Sp. Entropy at inc. melt Sim (kJ/K/kg): ', NewEOS.tp.Sim*1.E3)
print(' Sp. Entropy at com. melt Scm (kJ/K/kg): ', NewEOS.tp.Scm*1.E3)
print(' Sp. Entropy at inc. vap. Siv (kJ/K/kg): ', NewEOS.tp.Siv*1.E3)
print(' Sp. Entropy at com. vap. Scv (kJ/K/kg): ', NewEOS.tp.Scv*1.E3)
else:
print('ANEOS: No melt curve or triple point in this calculation.')
print('\n')
print('USING MODEL ENTROPIES:')
print('Starting at reference conditions (rho,T): ',NewEOS.R0REF,NewEOS.T0REF, ' (g/cm^3, K)')
print('ANEOS model conditions for shock-induced vaporization on release to 1 bar:')
print(' : IM CM IV 50%V CV')
print('Shock Pressure (GPa) : {:10.2f}{:10.2f}{:10.2f}{:10.2f}{:10.2f}'.format(
np.interp(NewEOS.onebar.Sim,NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.onebar.Scm,NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.onebar.Siv,NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.onebar.Siv+0.5*(NewEOS.onebar.Scv-NewEOS.onebar.Siv),NewEOS.hug.S,NewEOS.hug.P),
np.interp(NewEOS.onebar.Scv,NewEOS.hug.S,NewEOS.hug.P)))
print('Imp. Match Vel (km/s): {:10.2f}{:10.2f}{:10.2f}{:10.2f}{:10.2f}'.format(
np.interp(NewEOS.onebar.Sim,NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.onebar.Scm,NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.onebar.Siv,NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.onebar.Siv+0.5*(NewEOS.onebar.Scv-NewEOS.onebar.Siv),NewEOS.hug.S,NewEOS.hug.up)*2,
np.interp(NewEOS.onebar.Scv,NewEOS.hug.S,NewEOS.hug.up)*2))
print('\n')
print('USING JANAF ENTROPIES AT 1 BAR:')
print('Starting at reference conditions (rho,T): ',NewEOS.R0REF,NewEOS.T0REF, ' (g/cm^3, K)')
print('ANEOS model conditions for shock-induced vaporization on release to 1 bar (JANAF):')
print(' : IM CM IV 50%V CV')
print('Shock Pressure (GPa) : {:10.2f}{:10.2f}{:10.2f}{:10.2f}{:10.2f}'.format(
np.interp(92.190/NewEOS.FMW/1.e3,NewEOS.hug.S,NewEOS.hug.P),
np.interp(99.823/NewEOS.FMW/1.e3,NewEOS.hug.S,NewEOS.hug.P),
np.interp(2.24e-3,NewEOS.hug.S,NewEOS.hug.P),
np.interp(2.24e-3+0.5*((4.238-2.24)*1.e-3),NewEOS.hug.S,NewEOS.hug.P),
np.interp(4.238e-3,NewEOS.hug.S,NewEOS.hug.P)))
stop
# -
print(NewEOS.hug.S)
# ## Model Phase Boundaries
#
# Black lines are the ANEOS phase boundaries. Blue curve is the model Principal Hugoniot.
#
# Orange diamonds are the 1-bar boiling point; orange squares are the 1-bar melting point.
#
# The critical point has not been measured experimentally. There are several ab initio calculations, summarized in Menvedev 2014. The range of calculated critical points are shown below by the light blue box.
# +
# PHASE DIAGRAM PLOTS
#======================LOTS OF PLOTS============================
# Plot aneos phase boundaries with focus on the vapor curve
sfont = 15
fig, axes = plt.subplots(nrows=4, ncols=2, figsize=(14,24))
plt.subplots_adjust(wspace=0.4)
#------------------------------
# entropy-temperature
ai=0
aj=0
axes[ai,aj].plot(NewEOS.vc.Sl*1.E3,NewEOS.vc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.Sv*1.E3,NewEOS.vc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.S*1.E3,NewEOS.cp.T,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Sl*1.E3,NewEOS.mc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.Ss*1.E3,NewEOS.mc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.S*1.E3,NewEOS.hug.T,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.S[0]*1.E3,NewEOS.hug.T[0],'o',color=stylestphug[1])
axes[ai,aj].set_ylim(0,1.5*NewEOS.cp.T)
axes[ai,aj].set_xlim(0,7)
axes[ai,aj].set_xlabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_ylabel('Temperature [K]',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------------------
# density-pressure
ai=0
aj=1
axes[ai,aj].semilogy(NewEOS.vc.rl,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.rv,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.rho,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.rho[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_ylim(1.e-5,300)
axes[ai,aj].set_xlim(0,10)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#------
#entropy-pressure
ai=1
aj=0
axes[ai,aj].semilogy(NewEOS.vc.Sl*1.E3,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.Sv*1.E3,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.S*1.E3,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Sl*1.E3,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.Ss*1.E3,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.S*1.E3,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.S[0]*1.E3,NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_xlabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(1.e-12,10000)
axes[ai,aj].set_xlim(0,7)
#-----------
# specific energy - pressure
ai=1
aj=1
axes[ai,aj].semilogy(NewEOS.vc.Ul,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.Uv,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.U,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Ul,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.Us,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.U,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.U[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_xlabel('Specific Internal Energy (MJ/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(1.e-12,10000)
axes[ai,aj].set_xlim(-3,NewEOS.cp.U*1.5)
#-------
# temperature - pressure
ai=2
aj=0
axes[ai,aj].semilogy(NewEOS.vc.T,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.T,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.T,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.T,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.T,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.T,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.T[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_xlabel('Temperature (K)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_xlim(0,12000)
axes[ai,aj].set_ylim(1.e-16,100)
#-------
# pressure - temperature melt curve
ai=2
aj=1
#axes[ai,aj].plot(NewEOS.vc.Pl,NewEOS.vc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
#axes[ai,aj].plot(NewEOS.vc.Pv,NewEOS.vc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
#axes[ai,aj].plot(NewEOS.cp.P,NewEOS.cp.T,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Pl,NewEOS.mc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.mc.Ps,NewEOS.mc.T,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.P,NewEOS.hug.T,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.P[0],NewEOS.hug.T[0],'o',color=stylestphug[1])
axes[ai,aj].set_ylabel('Temperature (K)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(0,9000)
axes[ai,aj].set_xlim(-5,450)
#-------
# density - pressure melt curve
ai=3
aj=0
axes[ai,aj].plot(NewEOS.hug.rho,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.rho[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].plot(scironrarr,scironparr,'-',color='m',label='STP Hugoniot\n(Sjostrom & Crockett 2018)')
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,'--',color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(100,400)
axes[ai,aj].set_xlim(10,14)
#-------
# temperature - pressure zoom out
ai=3
aj=1
axes[ai,aj].semilogy(NewEOS.vc.T,NewEOS.vc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2],label=styleanphase[3])
axes[ai,aj].plot(NewEOS.vc.T,NewEOS.vc.Pv,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.cp.T,NewEOS.cp.P,'o',color=stylephasept[1],markersize=stylephasept[2])
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.T,NewEOS.mc.Pl,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.mc.T,NewEOS.mc.Ps,styleanphase[0],color=styleanphase[1],linewidth=styleanphase[2])
axes[ai,aj].plot(NewEOS.hug.T,NewEOS.hug.P,stylestphug[0],linewidth=stylestphug[2],color=stylestphug[1],label=stylestphug[3])
axes[ai,aj].plot(NewEOS.hug.T[0],NewEOS.hug.P[0],'o',color=stylestphug[1])
axes[ai,aj].set_xlabel('Temperature (K)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_xlim(0,35000)
axes[ai,aj].set_ylim(1.e-16,100)
#===================================================================
# PLOT EXPERIMENTAL CONSTRAINTS ON THE ANEOS PHASE BOUNDARIES
PLOTDATA=1
datms = 8
if PLOTDATA:
# entropy-temperature [0,0]
ai=0
aj=0
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((2.24,7300.),2.,3600,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
# STP NIST-JANAF
axes[ai,aj].plot(ironSTP_S*1.E3,298,'+',color='orange',label='STP (JANAF)')
# 1 bar melting point JANAF delta-iron to liquid
#1809.000 42.558 92.190 59.772 58.645 DELTA <--> LIQUID
#1809.000 46.024 99.823 59.772 72.452 TRANSITION
axes[ai,aj].plot(99.823/NewEOS.FMW,1809,'d',color='orange',label='1-bar MP (JANAF)')
axes[ai,aj].plot(92.190/NewEOS.FMW,1809,'d',color='orange')
# Boiling point NIST-JANAF, Kraus et al. Nature Geoscience 2015 and refs within
axes[ai,aj].plot(2.24,3133,'s',color='orange',label='1-bar BP (JANAF)')
axes[ai,aj].plot(4.238,3133,'s',color='orange')
axes[0,0].plot(2.496,8787,'x',color='navy',label='Fortov & Lomonosov 2010 CP',markersize=10)
#EOS parameters of the critical point for iron Pc = 1.131 GPa, Tc = 8,787 K, Vc = 0.458cm3/g, Sc = 2.496 J/g/K
# density-pressure [0,1]
ai=0
aj=1
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((1.24,.356),1.,1.35,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(2.1834,1.131,'x',color='navy',label='Fortov & Lomonosov 2010 CP',markersize=10)
# 99.99% Fe melting volume at 1 bar from Blumm & Henderson 2000 3.46%, approx 7.175 g/cm3 for solid at melt pt
axes[ai,aj].plot([6.98,6.98*(1.0346)],[1.E-4,1.E-4],'d',color='orange',label='MP (Blumm & Henderson 2000)')
# 1-bar Fe liquid at boiling point calculation Kraus et al. 2015
axes[ai,aj].plot([6.01],[1.E-4],'s',color='orange',label='BP (Kraus et al. 2015)')
# entropy -- pressure [1,0]
ai=1
aj=0
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((2.24,.356),2.,1.35,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(2.496,1.131,'x',color='navy',label='Fortov & Lomonosov 2010 CP',markersize=10)
# STP NIST-JANAF
axes[ai,aj].plot(ironSTP_S*1.E3,1.E-4,'+',color='orange',label='STP (JANAF)')
# 1 bar melting point JANAF delta-iron to liquid
#1809.000 42.558 92.190 59.772 58.645 DELTA <--> LIQUID
#1809.000 46.024 99.823 59.772 72.452 TRANSITION
axes[ai,aj].plot(99.823/NewEOS.FMW,1.E-4,'d',color='orange',label='1-bar MP (JANAF)')
axes[ai,aj].plot(92.190/NewEOS.FMW,1.E-4,'d',color='orange')
# Boiling point NIST-JANAF, Kraus et al. Nature Geoscience 2015 and refs within
axes[ai,aj].plot(2.24,1.E-4,'s',color='orange',label='1-bar BP (JANAF)')
axes[ai,aj].plot(4.238,1.E-4,'s',color='orange')
# temperature -- pressure [2,0]
ai=2
aj=0
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((7300.,.356),3600,1.35,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
# plot Fe experimentally fitted vapor curve from Alcock et al. 1984
alcocktarr = np.arange(2100)+1
# Fe solid good from 298 to melting point
A=7.1
B=-21723.
C=0.4536
D=-0.5846
alcockfesolidparr = 1.e-4*np.power(10.,A+B/alcocktarr+C*np.log10(alcocktarr)+D*alcocktarr*1.e-3) #GPa
# Fe liquid good from melting point to 2100 K
A=6.347
B=-19574
C=0.
D=0.
alcockfeliquidparr = 1.e-4*np.power(10.,A+B/alcocktarr+C*np.log10(alcocktarr)+D*alcocktarr*1.e-3) #GPa
axes[ai,aj].plot(alcocktarr[298:1809],alcockfesolidparr[298:1809],color='orange',label='Vapor curve\nAlcock et al. 1984')
axes[ai,aj].plot(alcocktarr[1809:2100],alcockfeliquidparr[1809:2100],color='orange')
axes[ai,aj].plot([3133.345],[1.e-4],'o',color='orange',label='1-bar BP (JANAF)')
axes[ai,aj].plot(9250.,8750.e-4,'+',color='purple',label='Beutl et al. 1994 CP',markersize=10)
axes[ai,aj].plot(8787,1.131,'x',color='navy',label='Fortov & Lomonosov 2010 CP',markersize=10)
# pressure- temperature [2,1]
ai=2
aj=1
# Anzellini et al. 2013 Science iron melt curve high pressure region up to 200 GPa/5000 K
anzmct3arr = (np.arange(100)+1.)/100.*1300.+3700 # K
anzmcp3arr = (np.power(anzmct3arr/3712.,1.72)-1.)*161.2 + 98.5 # GPa
anzmct2arr = (np.arange(100)+1.)/100.*3700. # K
anzmcp2arr = (np.power(anzmct2arr/1991.,2.38)-1.)*27.39 + 5.2 # GPa
axes[ai,aj].plot(anzmcp3arr,anzmct3arr,color="orange",linestyle='--',label='Anzellini et al. 2013')
axes[ai,aj].plot(anzmcp2arr,anzmct2arr,color="orange",linestyle='--')
#Fischer et al. 2012 Fe-Si alloys
# example solidus of an iron alloy
fischerparrlow = Fischer_datalow.iloc[:,0].values # P GPa
fischertarrlow = Fischer_datalow.iloc[:,1].values # T K
fischerparrhigh = Fischer_datahigh.iloc[:,0].values # P GPa
fischertarrhigh = Fischer_datahigh.iloc[:,1].values # T K
axes[ai,aj].plot(fischerparrhigh,fischertarrhigh,'v',color="brown",label='Fischer et al. 2012 Fe-16Si\nupper limit solidus')
axes[ai,aj].plot(fischerparrlow,fischertarrlow,'^',color="brown",label='lower limit solidus')
rect = Rectangle((240.,5000.),6.,700,linewidth=1,edgecolor='moccasin',facecolor='moccasin')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(243.,5350,'x',color='orange',label='Brown & McQueen 1986')
rect = Rectangle((222.,4700),6.,1000,linewidth=1,edgecolor='moccasin',facecolor='moccasin')
axes[ai,aj].add_patch(rect)
axes[ai,aj].plot(225.,5100,'*',color='orange',label='Nguyen & Holmes 2004')
axes[ai,aj].errorbar(100.,3500.,xerr=10.,yerr=200,fmt='s',color='red',label='bcc-hcp-liquid TP\nMorard et al. 2018')
# density - pressure [3,0]
ai=3
aj=0
axes[ai,aj].plot(12.5,270.,'^',color='orange',label='STP Hug. enters liq. 270 GPa\n(Sjostrom & Crockett 2018)',markersize=6)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(np.interp(260.,NewEOS.mc.Pl,NewEOS.mc.rl),260.,'d',color='orange',label='STP Hug. enters liq. 260 GPa\n(Nguyen & Holmes 2014)',markersize=6)
axes[ai,aj].plot(np.interp(225.,NewEOS.mc.Ps,NewEOS.mc.rs),225.,'d',color='red',label='STP Hug. begins to melt 225 GPa\n(Nguyen & Holmes 2014)',markersize=6)
# temperature -- pressure [3,1]
ai=3
aj=1
# range of ab initio predictions for the critical point from Medvedev 2014
rect = Rectangle((7300.,.356),3600,1.35,linewidth=1,edgecolor='skyblue',facecolor='skyblue')
axes[ai,aj].add_patch(rect)
# plot Fe experimentally fitted vapor curve from Alcock et al. 1984
alcocktarr = np.arange(2100)+1
# Fe solid good from 298 to melting point
A=7.1
B=-21723.
C=0.4536
D=-0.5846
alcockfesolidparr = 1.e-4*np.power(10.,A+B/alcocktarr+C*np.log10(alcocktarr)+D*alcocktarr*1.e-3) #GPa
axes[0,0].legend(fontsize=sfont-5)
axes[0,1].legend(fontsize=sfont-5)
axes[1,0].legend(fontsize=sfont-5)
axes[1,1].legend(fontsize=sfont-5)
axes[2,0].legend(fontsize=sfont-5)
axes[2,1].legend(fontsize=sfont-5)
axes[3,0].legend(fontsize=sfont-5)
axes[3,1].legend(fontsize=sfont-5)
plt.tight_layout(pad=0.6)
# -
# ## ANEOS model comparisons to experimental data
#
# The original ANEOS with a 3nR limit to the heat capacity does provide a good fit to the liquid region.
#
# Dorogokupets et al. 2017 calculated the isotherm for liquid iron at 2200 K. The ANEOS model is a good fit (and then enters the solid phase).
#
# Anzellini et al. (2013) estimated that the temperature at the core-mantle boundary is about 4050 K. Here, I plot the ANEOS isentrope through (the nearest grid point to) 4050 K and 136 GPa and compare the isentrope to PREM (http://ds.iris.edu/spud/earthmodel/9785674). As expected, the ANEOS model isentrope is denser than Earth's core. The model isentrope and 10% and 4% density reductions are compared to PREM.
# +
# INCLUDE A COMPARISON TO EARTH'S STRUCTURE PREM MODEL
G = 6.67E-11 # Gravitational constant m3/kg/s2
# Read in PREM: Preliminary Earth Reference Model
PREM_filename=datadir+'PREM500_IDV.csv' # we love long, readable variable names!
# make a class to hold the PREM data
class PREMclass:
"""Class to hold PREM data and other 1-D Earth variables.""" # this is a documentation string for this class
def __init__(self): # self is the default name of the object for internal referencing of the variables in the class
"""A function to initialize the class object.""" # this is a documentation string for this function
self.NR = 0 # number of radius points
self.radius = np.zeros(self.NR)
self.density = np.zeros(self.NR)
self.pwavevel = np.zeros(self.NR)
self.swavevel = np.zeros(self.NR)
self.pressure = np.zeros(self.NR)
self.temperature = np.zeros(self.NR)
# not going to use all the variables in the file
self.units = '' # I like to keep a text note in a structure about the units
# initialize an empty PREM object
PREM = PREMclass()
# read the data into the class parameters
PREM.radius = np.loadtxt(PREM_filename,delimiter=',',skiprows=2,usecols=[0]) # radius in m
PREM.density = np.loadtxt(PREM_filename,delimiter=',',skiprows=2,usecols=[1]) # density in kg/m3
PREM.pwavevel = np.loadtxt(PREM_filename,delimiter=',',skiprows=2,usecols=[2]) # p-wave velocity m/s
PREM.swavevel = np.loadtxt(PREM_filename,delimiter=',',skiprows=2,usecols=[3]) # s-wave velocity m/s
PREM.NR = len(PREM.radius) # number of radius points
PREM.units = 'radius (m), density (kg/m3), pwavevel (m/s), swavevel (m/s)'
# start at the surface and integrate via a for loop to the center of the planet
# calculate the thickness of each layer in the PREM model using the roll function
PREM_dr = np.roll(PREM.radius,-1)-PREM.radius
PREM_dr[PREM.NR-1] = 0. # we are not using the last entry in the list because there are NR-1 layers
#print(PREM_dr)
# calculate the mass of each layer
# density x area x thickness of each layer
PREM_mass_rad = PREM.density*(4.*np.pi*PREM.radius*PREM.radius*PREM_dr)
# Check that the total mass is the mass of the Earth
#print('PREM total mass (kg)=',np.sum(PREM_mass_rad))
#print('PREM total mass (Mearth)=',np.sum(PREM_mass_rad)/5.972E24)
PREM.pressure = np.zeros(PREM.NR) # make array of zeros for pressure of the same length as the arrays in the PREM model
# The first entry is the middle of the planet, so start at the surface and integrate inwards
for i in range(PREM.NR-2,0,-1):
# we indent for the code in the for loop
PREM.pressure[i] = PREM.pressure[i+1]+G*np.sum(PREM_mass_rad[0:i-1])*PREM.density[i]*PREM_dr[i]/PREM.radius[i]/PREM.radius[i]
# use SESAME units
PREM.pressure = PREM.pressure/1.E9 # GPa
IronEOS = NewEOS
# first extract the isentropes for the planet from the EOS tables
core = isentrope_class() # code in eostable.py
Score = 0.0022 # MJ/K/kg
# loop across all densities and extract the values for the requested isentrope
for i in range(0,IronEOS.ND):
ind = np.where((IronEOS.S[:,i] > 0))[0]
interpfunction = interpolate.interp1d(IronEOS.S[ind,i],IronEOS.P[ind,i]) # MJ/K/kg, GPa
core.pressure = np.append(core.pressure,interpfunction(Score)) # GPa
interpfunction = interpolate.interp1d(IronEOS.S[ind,i],IronEOS.T[ind]) # MJ/K/kg, GPa
core.temperature = np.append(core.temperature,interpfunction(Score)) # GPa
core.density = IronEOS.rho # g/cm3
#print('Calculated iron isentrope with ',np.round(np.interp(136.,core.pressure,core.temperature)),' K CMB temperature (P=136 GPa)')
# first extract the isentropes for the planet from the EOS tables
core2 = isentrope_class() # code in eostable.py
it0 = np.where(IronEOS.T >= 4050.)[0]
Score = np.interp(136.,IronEOS.P[it0[0],:],IronEOS.S[it0[0],:])
#Score = 0.00186 # MJ/K/kg
# loop across all densities and extract the values for the requested isentrope
for i in range(0,IronEOS.ND):
ind = np.where((IronEOS.S[:,i] > 0))[0]
interpfunction = interpolate.interp1d(IronEOS.S[ind,i],IronEOS.P[ind,i]) # MJ/K/kg, GPa
core2.pressure = np.append(core2.pressure,interpfunction(Score)) # GPa
interpfunction = interpolate.interp1d(IronEOS.S[ind,i],IronEOS.T[ind]) # MJ/K/kg, GPa
core2.temperature = np.append(core2.temperature,interpfunction(Score)) # GPa
core2.density = IronEOS.rho # g/cm3
print('Calculated iron isentrope with ',np.round(np.interp(136.,core2.pressure,core2.temperature)),' K CMB temperature (P=136 GPa)')
# +
# 1-bar plots and 298 K plots
# Plot aneos vapor curves
sfont = 15
fig, axes = plt.subplots(2, 2, figsize=(14,14))
plt.subplots_adjust(wspace=0.4)
#--------
# temperature - entropy at 1 bar
ai=0
aj=0
axes[ai,aj].plot(Desai_tarr,Desai_sarr,'-',color='orange',label='Iron data (Desai 1986)',linewidth=3)
axes[ai,aj].plot(NewEOS.onebar.T,NewEOS.onebar.S*1.E3,'+',color='blue',label='ANEOS 1 bar')
axes[ai,aj].plot(1809.,99.823/NewEOS.FMW,'d',color='orange',label='1-bar MP (JANAF)')
axes[ai,aj].plot(1809.,92.190/NewEOS.FMW,'d',color='orange')
# Boiling point NIST-JANAF, Kraus et al. Nature Geoscience 2015 and refs within
axes[ai,aj].plot(3133.,2.24,'s',color='orange',label='1-bar BP (JANAF)')
axes[ai,aj].plot(3133.,4.238,'s',color='orange')
axes[ai,aj].set_xlabel('Temperature (K)',size=sfont)
axes[ai,aj].set_ylabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_title('1-bar isobar',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(0,5)
axes[ai,aj].set_xlim(0,4000)
#----
# density-temperature at 1 bar
ai=0
aj=1
axes[ai,aj].plot(Assael_tarr,Assael_rarr/1.e3,'-',color='orange',label='Liquid iron data (Assael et al. 2006)')
axes[ai,aj].plot(NewEOS.onebar.T,NewEOS.onebar.rho,'+',color='blue',label='ANEOS 1 bar')
axes[ai,aj].plot(298.,7.873,'+',color='orange',label='STP')
axes[ai,aj].plot([3133],[6.01],'s',color='orange',label='Boiling point (Kraus et al. 2015)')
# 3.4% volume change (Askeland 1996) with highest density from Assael et al. 2006
axes[ai,aj].plot([1809.,1809],[6.99,6.99*(1.034)],'d',color='orange',label='Melt transition')
axes[ai,aj].plot([1811.],[7.019],'X',color='red',label='Anderson & Ahrens 1994')
axes[ai,aj].set_ylabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_xlabel('Temperature (K)',size=sfont)
axes[ai,aj].set_title('1-bar isobar',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_ylim(5,9)
axes[ai,aj].set_xlim(0,4000)
# plot data
#axes[ai,aj].plot(1890+273.,2.968,'s',color='orange',label='MELTS MP Berman')
#axes[ai,aj].plot(1890+273.,2.687,'o',color='orange',label='MELTS MP Lange')
#axes[ai,aj].plot(2273.,2.597,'d',color='orange',label='Thomas & Asimow 2013')
#----
# density-pressure at 298 K
ai=1
aj=0
# Guinan & Beshers JPCS 1969 Fe 298 K isotherm
#K0T = 166.4 # GPa room temp isentrope
#K0Tp = 5.29
#R00 = 7.874
#print(R00)
GB_cs_stp = np.sqrt(166.4E9/7874.) # m/s
#print(GB_cs_stp)
#BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
# )*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
#igood = np.where((BM_pressure < 14.6))[0]
#axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'-',color='orange',label='alpha-Fe 298-K isentrope\nGuinan & Beshers 1969')
# Dorogokupets et al. 2017
# alpha (bcc) iron 298 K 1 bar (low T is ferro; high T is para)
K0T = 164.001 # GPa room temp isotherm
K0Tp = 5.5
R00 = 7.8746
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure < 14.6))[0]
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'-',color='teal',label='alpha-Fe 298-K isotherm\nDorogokupets et al. 2017')
# Dorogokupets et al. 2017
# gamma (fcc) iron 298 K 1 bar
K0T = 146.2 # GPa room temp isotherm
K0Tp = 4.67
R00 = 8.06
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure < 110))[0]
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'-',color='cyan',linewidth=4,label='gamma-Fe 298-K isotherm\nDorogokupets et al. 2017')
iT0REF = np.where(NewEOS.T == T0REF)[0]
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[iT0REF[0],:],'-',color='blue',label='ANEOS 298-K isotherm')
# Dewaele et al. PRL 2006 Fe 298 K isotherm
# espilon-iron (hcp)
K0T = 165 # fixed GPa room temp isotherm
K0Tp = 4.97 # +-0.04 fitted
R00 = (NewEOS.FMW)*1.*1.6726/(11.234*1.E-3)/1.E3 # kg/m3->g/cm3
#print(R00)
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure > 17.)&(BM_pressure < 197.))
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'-',color='brown',linewidth=2,label='epsilon-Fe 298-K isotherm\nDewaele et al. 2006')
'''
# error bars Dewaele et al. PRL 2006 Fe 298 K isotherm
K0Tp = 4.97-0.04 # +-0.04 fitted
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure > 17.)&(BM_pressure < 197.))
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'--',color='brown')
# error bars Dewaele et al. PRL 2006 Fe 298 K isotherm
K0Tp = 4.97+0.04 # +-0.04 fitted
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure > 17.)&(BM_pressure < 197.))
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'--',color='brown')
'''
#------ LIQUID
# Dorogokupets et al. 2017
# liquid iron 2200 K 1 bar
K0T = 65.2 # GPa isotherm
K0Tp = 6.48
R00 = 7.019/1.04
BM_pressure = 1.5*K0T*(np.power(NewEOS.rho/R00,7./3.)-np.power(NewEOS.rho/R00,5./3.)
)*(1.+0.75*(K0Tp-4.)*(np.power(NewEOS.rho/R00,2./3.)-1.))
igood = np.where((BM_pressure < 20))[0]
axes[ai,aj].plot(NewEOS.rho[igood],BM_pressure[igood],'--',color='orange',linewidth=2,label='Liquid Fe 2200-K isotherm\nDorogokupets et al. 2017')
# plot liquid isotherm
it0 = np.where(NewEOS.T >= 2200)[0]
#print('Liquid isotherm at (K) ',NewEOS.T[it0[0]])
igood = np.where((NewEOS.P[it0[0],:] < 20))[0]
axes[ai,aj].plot(NewEOS.rho[igood],NewEOS.P[it0[0],igood],'--',color='orangered',label='ANEOS '+str(np.round(NewEOS.T[it0[0]]))+'-K isotherm')
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_title('298-K, 2200-K isotherms',size=sfont)
axes[ai,aj].set_xlim(6,13)
axes[ai,aj].set_ylim(0,200)
#----
# density-pressure at 298 K
ai=1
aj=1
axes[ai,aj].plot(PREM.density[1::]/1.E3,PREM.pressure[1::],'-',color='orange',label='PREM')
#axes[ai,aj].plot(core.density,core.pressure,'-',color='blue',label='ANEOS '+str(np.round(np.interp(136,core.pressure,core.temperature)))+' K isentrope at CMB',markersize=10)
axes[ai,aj].plot(core2.density,core2.pressure,'-',color='blue',label='ANEOS isentrope: '+str(np.round(np.interp(136,core2.pressure,core2.temperature)))+' K at CMB',markersize=10)
axes[ai,aj].plot(0.9*core2.density,core2.pressure,'--',color='blue',label='ANEOS isentrope shifted 10% in density')
axes[ai,aj].plot(0.96*core2.density,core2.pressure,':',color='blue',label='ANEOS isentrope shifted 4% in density')
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[ai,aj].set_title('PREM and ANEOS model isentropes',size=sfont)
axes[ai,aj].set_xlim(3,14)
axes[ai,aj].set_ylim(0,360)
# don't show a plot in lower right
#axes[1,1].axis("off")
axes[0,0].legend(fontsize=sfont-5)
axes[0,1].legend(fontsize=sfont-5)
axes[1,0].legend(fontsize=sfont-5)
axes[1,1].legend(fontsize=sfont-5)
#print(PREM.pressure)
# -
# ## ANEOS Gruneisen parameter and Theta parameter for thermal model
#
# The Gruneisen parameter for iron liquid has been determined experimentally from shock wave data (e.g., see Kraus et al. 2015). Shockwave data derive the Grueneisen parameter in reference to a known state:
# \begin{equation}
# \gamma = \frac{1}{\rho}\frac{(P-P_{\rm ref})}{(E-E_{\rm ref})} \bigg\rvert _V
# \end{equation}
#
# The ANEOS Gruneisen model is split into two parts. For $\rho \ge \rho_{0}$,
# \begin{equation}
# \gamma = \left( \frac{\gamma_0 \rho_0}{\rho} + C_{24} \left( 1-\frac{\rho_0}{\rho} \right)^2 \right) (1-C_{60}) + \left( \gamma_0 + (C_{24}-\gamma_0) \left( 1- \frac{\rho_0}{\rho} \right)^2 \right) C_{60}.
# \end{equation}
# For $\rho < \rho_{0}$,
# \begin{equation}
# \gamma = C_{16} \rho^2 + C_{17}\rho + 1 + C_{61}.
# \end{equation}
#
# $C_{16}$ and $C_{17}$ are calculated so that $\gamma$ and $d \gamma / d \rho$ are continuous at $\rho=\rho_0$. The model asymptotes to the Thomas-Fermi limit ($\gamma=2/3$) when $C_{24}=2/3$.
#
# The gamma function developed by Kraus et al. 2015 (SOM equation 5) is also shown for comparison. This function was derived from the abundant data on shocked porous iron. This calculation assumes E=0.
#
# The literature values for the Gruneisen parameter of alpha-iron span 1.65 to 1.75: e.g., 1.65 Adams et al. 2006; 1.736 Dorogokupets et al. 2017; 1.75 Sjostrom & Crockett 2018.
# +
# plot the gamma and isotherms
# ANEOS Formulation for Gamma and theta
theta0=abs(NewEOS.theta0)
rhoarr = (1+np.arange(1000))/1000.*NewEOS.R0REF*10.
gammaarr = (NewEOS.gamma0*NewEOS.R0REF/rhoarr
)+NewEOS.C24*np.power(1.0-NewEOS.R0REF/rhoarr,2.)*(1.-NewEOS.C60)+(NewEOS.gamma0 + (NewEOS.C24-NewEOS.gamma0)*np.power(1.-NewEOS.R0REF/rhoarr,2.))*NewEOS.C60
s3 = NewEOS.C61+1.
C14 = theta0 * np.exp(1.5*s3-(2.-0.5*NewEOS.C60)*NewEOS.gamma0)/(np.power(NewEOS.R0REF,s3))
C16 = (s3-(2.-NewEOS.C60)*NewEOS.gamma0)/(NewEOS.R0REF*NewEOS.R0REF)
C17 = ((3.-NewEOS.C60)*NewEOS.gamma0-2.*s3)/NewEOS.R0REF
gammaarrlow = C16*rhoarr*rhoarr+C17*rhoarr+s3
thetaarr = theta0*np.exp((1.-NewEOS.C60)*(1.-NewEOS.R0REF/rhoarr)*NewEOS.gamma0-0.5*(NewEOS.R0REF/rhoarr)*(3.-(NewEOS.R0REF/rhoarr)*(4-(NewEOS.R0REF/rhoarr))))*np.power((rhoarr/NewEOS.R0REF),NewEOS.C24)
thetalowarr = C14*np.exp(rhoarr*(C17+0.5*C16*rhoarr))*np.power(rhoarr,s3)
#-------- plots
sfont = 15
fig, axes = plt.subplots(1, 2, figsize=(14,7))
plt.subplots_adjust(wspace=0.4)
# -----
ai=0
indlow = np.where(rhoarr <= NewEOS.R0REF)
indhigh = np.where(rhoarr >= NewEOS.R0REF)
# Gamma from Kraus et al. 2015 SOM Equation 5
# this assumes E = 0. I dropped the exp(-phi*E) term. Phi=0.045 kg/MJ.
kraus_garr = 0.7+(2.1-0.7)*np.power(7.85/NewEOS.rho,1.7)
kraus_validrange = np.where((NewEOS.rho>9)&(NewEOS.rho<12))[0]
axes[ai].plot(NewEOS.rho[kraus_validrange],kraus_garr[kraus_validrange],'-',linewidth=6,color='orange',label='Liquid iron fit\n(Kraus et al. 2015)')
axes[ai].set_ylim(0,2.5)
axes[ai].set_xlim(0,25)
# liquid iron at 1 bar 1811 K Anderson and Ahrens
#axes[ai,aj].plot([7.019],[1.735],'s',color='red',label='Liq. Iron 1811 K, 1 bar')
#axes[ai,aj].plot([5.5,6.0,6.5,7.019,12.643,13.015,13.417],[1.344,1.547,1.751,1.723,1.419,1.401,1.303],'o',color='red',label='Liquid Iron')
axes[ai].errorbar([5.5,6.0,6.5,7.019,12.643,13.015,13.417],[1.344,1.547,1.751,1.723,1.419,1.401,1.303], yerr=[0.269,0.309,0.350,0.116,0.272,0.265,0.269], fmt='o', color='orange', ecolor='orange', capthick=2,label='Liquid Fe data\n(Anderson & Ahrens 1994)')
# solid alpha-iron at STP
axes[ai].plot([NewEOS.R0REF,NewEOS.R0REF],[1.65,1.75],'-',linewidth=3,color='blue',label='alpha-Fe solid STP')
axes[ai].plot(rhoarr[indhigh[0]],gammaarr[indhigh[0]],label="ANEOS Gamma-high",color='black')
axes[ai].plot(rhoarr[indlow[0]], gammaarrlow[indlow[0]],label="ANEOS Gamma-low",color='black',ls='--')
axes[ai].plot(NewEOS.R0REF,NewEOS.gamma0,'+',label="ANEOS Gamma0",color='black')
axes[ai].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai].set_ylabel('Gamma (-)',size=sfont)
axes[ai].tick_params(labelsize=sfont)
# -----
ai=1
axes[ai].plot(rhoarr[indhigh[0]],thetaarr[indhigh[0]],label="ANEOS Theta-high",color='black')
axes[ai].plot(rhoarr[indlow[0]],thetalowarr[indlow[0]],label="ANEOS Theta-low",color='black',ls='--')
axes[ai].plot(NewEOS.R0REF,theta0,'+',label="Theta0",color='black')
axes[ai].set_ylim(0,2000)
axes[ai].set_xlim(0,25)
axes[ai].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai].set_ylabel('Theta (K)',size=sfont)
axes[ai].tick_params(labelsize=sfont)
axes[0].legend(fontsize=sfont-5)
axes[1].legend(fontsize=sfont-5)
# -
# # Specific Heat Capacity and Sounds Speeds
#
#
# +
sfont = 15
fig, axes = plt.subplots(2, 2, figsize=(14,14))
plt.subplots_adjust(wspace=0.4)
#-----------------------------
# Pick isotherms and set color scale
ixtemparr = [298.,1000.,2000.,5000.,20000.]
# set color scale same for all plots
tcolmin=0.
tcolmax=20000.
# find the index closest to the selected temperatures -- no interpolation
ixindarr=np.zeros(len(ixtemparr))
ixlabelarr = []
for i in np.arange(len(ixtemparr)):
tmpi=int(np.round(np.interp(ixtemparr[i],NewEOS.T,np.arange(NewEOS.NT))))
#print(tmpi,antarr[tmpi])
ixindarr[i] = tmpi
ixlabelarr.append(str(ixtemparr[i])+' K')
#-----------------------
ai=0
aj=0
for i in np.arange(len(ixtemparr)):
icolscale = (ixtemparr[i]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.cv[int(ixindarr[i]),:]*1000.,c=col,linestyle='-',label=ixlabelarr[i])
#axes[ai,aj].plot(QMDH_r1,QMDH_cv*1000.,styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label='QMD Hugoniot\n(Root et al. 2018')
#axes[ai,aj].plot(2.597,1.73736,'^',color='orange',label='1 bar 2273 K liquid Cv\n(Thomas & Asimow 20133)')
#axes[ai,aj].plot(3.,180./(NewEOS.FMW/1.E3)/1.E3,'s',color='orange',label='1 bar 2000 K solid Cv\n(Gillet et al. 1991)')
axes[ai,aj].set_ylim(0.,3.)
axes[ai,aj].set_xlim(0,30)
axes[ai,aj].set_ylabel('Specific heat capacity Cv (kJ/K/kg)',size=sfont)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-----------------------
ai=0
aj=1
for i in np.arange(len(ixtemparr)):
icolscale = (ixtemparr[i]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.cv[int(ixindarr[i]),:]*1000.*1000./1/8.314*NewEOS.FMW/1000.,c=col,linestyle='-',label=ixlabelarr[i])
#axes[ai,aj].plot(QMDH_r1,QMDH_cv*1.E6/7/8.314*0.14,styleqmd[0],markersize=styleqmd[2],color=styleqmd[1],label='QMD Hugoniot\n(Root et al. 2018')
#axes[ai,aj].plot(2.597,1737./7/8.314*0.140,'^',color='orange',label='1 bar 2273 K liquid Cv\n(Thomas & Asimow 2013)')
#axes[ai,aj].plot(3.,180./(NewEOS.FMW/1.E3)/7/8.314*0.140,'s',color='orange',label='1 bar 2000 K solid Cv\n(Gillet et al. 1991)')
axes[ai,aj].set_ylim(1.,7.)
axes[ai,aj].set_xlim(0,30)
axes[ai,aj].set_ylabel('Specific heat capacity Cv (nR)',size=sfont)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-----------------------
# plot sound speed along isotherms
ai=1
aj=0
for i in np.arange(len(ixtemparr)):
icolscale = (ixtemparr[i]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.P[int(ixindarr[i]),:],NewEOS.cs[int(ixindarr[i]),:]/1.e5,c=col,linestyle='-',label=ixlabelarr[i])
axes[ai,aj].plot(1.E-4,GB_cs_stp/1.e3,'+',markersize=10,color='orange',label='alph-iron STP\n(Guinan & Beshers 1968)')
axes[ai,aj].plot(1.E-4,3.82,'X',markersize=10,color='orange',label='Liquid iron 1809 K\n(Nasch et al. 1994)')
axes[ai,aj].set_ylim(0,10)
axes[ai,aj].set_xlim(-10,40)
axes[ai,aj].set_ylabel('Bulk Sound Speed (km/s)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-----------------------
# plot sound speed along isotherms
ai=1
aj=1
for i in np.arange(len(ixtemparr)):
icolscale = (ixtemparr[i]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.P[int(ixindarr[i]),:],NewEOS.cs[int(ixindarr[i]),:]/1.e5,c=col,linestyle='-',label=ixlabelarr[i])
axes[ai,aj].plot([225,260],[10,9.5],'d',color='orange',label='Nguyen & Holmes 2004 on Hugoniot')
axes[ai,aj].set_ylim(0,20)
axes[ai,aj].set_xlim(0,700)
axes[ai,aj].set_ylabel('Bulk Sound Speed (km/s)',size=sfont)
axes[ai,aj].set_xlabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
axes[0,0].legend(fontsize=sfont-5)
axes[0,1].legend(fontsize=sfont-5)
axes[1,0].legend(fontsize=sfont-5)
axes[1,1].legend(fontsize=sfont-5)
# -
# # Check the Melt Curve in Tabulated EOS
#
# Colored lines correspond to isotherms.
#
# There is no melt curve in this model. The kinks in the isotherms correspond to the high-pressure phase change.
# +
# CHECK MELT CURVE GRIDDING
sfont = 15
fig, axes = plt.subplots(nrows=5, ncols=2, figsize=(14,26))
plt.subplots_adjust(wspace=0.5)
tcolmin=0.
tcolmax=1.5*NewEOS.cp.T
tpltmax=1.5*NewEOS.cp.T
tskip=5
#-------------
ai=0
aj=0
axes[ai,aj].plot([],[],' ',label='Low pressure melt curve')
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(0,50)
axes[ai,aj].set_xlim(6.5,9.5)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------
ai=0
aj=1
axes[ai,aj].plot([],[],' ',label='High pressure melt curve')
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(50.,150)
axes[ai,aj].set_xlim(9.0,11.5)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------
ai=1
aj=0
axes[ai,aj].plot([],[],' ',label='Higher pressure melt curve')
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(180.,310)
axes[ai,aj].set_xlim(11.,13.,)
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------
ai=1
aj=1
axes[ai,aj].plot([],[],' ',label='Higher pressure melt curve')
axes[ai,aj].set_xlabel('Specific Energy (MJ/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.U[it,:],NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Ul,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Us,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(180.,310)
axes[ai,aj].set_xlim(4,7.5)
#-------------
ai=2
aj=0
axes[ai,aj].plot([],[],' ',label='Low pressure melt curve')
axes[ai,aj].set_xlabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.S[it,:]*1.e3,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Sl*1.e3,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Ss*1.e3,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(0.,50)
axes[ai,aj].set_xlim(1.5,2.)
#-------------
ai=2
aj=1
axes[ai,aj].plot([],[],' ',label='High pressure melt curve')
axes[ai,aj].set_xlabel('Specific Entropy (kJ/K/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.S[it,:]*1.e3,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Sl*1.e3,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Ss*1.e3,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(50.,150)
axes[ai,aj].set_xlim(1.6,2.)
#-------------
ai=3
aj=0
axes[ai,aj].plot([],[],' ',label='Low pressure melt curve')
axes[ai,aj].set_xlabel('Specific Energy (MJ/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.U[it,:],NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Ul,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Us,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_xlim(0.5,2.)
axes[ai,aj].set_ylim(0.,50)
#-------------
ai=3
aj=1
axes[ai,aj].plot([],[],' ',label='High pressure melt curve')
axes[ai,aj].set_xlabel('Specific Energy (MJ/kg)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.U[it,:],NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.Ul,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.Us,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(50.,150)
axes[ai,aj].set_xlim(1,4.)
#-------------
ai=4
aj=0
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,color=col,linestyle='-')
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(300,550)
axes[ai,aj].set_xlim(13.5,15.)
axes[ai,aj].plot([],[],' ',label='Very high pressure melt curve.')
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#-------------
ai=4
aj=1
for it in range(1,NewEOS.NT,tskip):
icolscale = (NewEOS.T[it]-tcolmin)/(tcolmax-tcolmin)
col=local_cmaps.parula(icolscale)
axes[ai,aj].plot(NewEOS.rho,NewEOS.P[it,:],'o',markersize=2,linestyle='-',color=col)
if NewEOS.tp.T > 0:
axes[ai,aj].plot(NewEOS.mc.rl,NewEOS.mc.Pl,color="black")
axes[ai,aj].plot(NewEOS.mc.rs,NewEOS.mc.Ps,color="black")
axes[ai,aj].set_ylim(800,1000)
axes[ai,aj].set_xlim(16.5,17.5)
axes[ai,aj].plot([],[],' ',label='Very high pressures')
axes[ai,aj].set_xlabel('Density (g/cm$^3$)',size=sfont)
axes[ai,aj].set_ylabel('Pressure (GPa)',size=sfont)
axes[ai,aj].tick_params(labelsize=sfont)
#------
axes[0,0].legend(fontsize=sfont-5)
axes[0,1].legend(fontsize=sfont-5)
axes[1,0].legend(fontsize=sfont-5)
axes[1,1].legend(fontsize=sfont-5)
axes[2,0].legend(fontsize=sfont-5)
axes[2,1].legend(fontsize=sfont-5)
axes[3,0].legend(fontsize=sfont-5)
axes[3,1].legend(fontsize=sfont-5)
axes[4,0].legend(fontsize=sfont-5)
axes[4,1].legend(fontsize=sfont-5)
# -
# # References
#
# <NAME>., <NAME>., <NAME>., & <NAME>. (2006). Elastic constants of monocrystal iron from 3 to 500 K. Journal of applied physics, 100(11), 113530.
#
# <NAME>., <NAME>., & <NAME>. (1984). Vapour pressure equations for the metallic elements: 298–2500K. Canadian Metallurgical Quarterly, 23(3), 309-313.
#
# <NAME>., & <NAME>. (1994). An equation of state for liquid iron and implications for the Earth's core. Journal of Geophysical Research: Solid Earth, 99(B3), 4273-4284.
#
# <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2013). Melting of iron at Earth’s inner core boundary based on fast X-ray diffraction. Science, 340(6131), 464-466.
#
# <NAME>., & <NAME>. (2011). The Canyon Diablo impact event: 2. Projectile fate and target melting upon impact. Meteoritics & Planetary Science, 46(6), 805-829.
#
# <NAME>, 1996 Materialwissenschaften (Heidelberg: Spektrum Akademischer Verlag GmbH) p. 203
#
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2006). Reference data for the density and viscosity of liquid aluminum and liquid iron. Journal of Physical and Chemical Reference Data, 35(1), 285-300.
#
# <NAME>., & <NAME>. (2011). Scaling of melt production in hypervelocity impacts from high-resolution numerical simulations. Icarus, 211(1), 913-916.
#
# <NAME>., <NAME>., & <NAME>. (1994). Thermophysical properties of liquid iron. International journal of thermophysics, 15(6), 1323-1331.
#
# <NAME>., & <NAME>. (2000). Measurement of the volumetric expansion and bulk density of metals in the solid and molten regions. High Temperatures High Pressures, 32(1), 109-114.
#
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2019). A giant impact as the likely origin of different twins in the Kepler-107 exoplanet system. Nature Astronomy, 3(5), 416-423. doi:10.1038/s41550-018-0684-9
#
# <NAME>., & <NAME>. (1986). Phase transitions, Grüneisen parameter, and elasticity for shocked iron between 77 GPa and 400 GPa. Journal of Geophysical Research: Solid Earth, 91(B7), 7485-7494.
#
# <NAME>., & <NAME>. (2004). Analytic model of the Grüneisen parameter all densities. Journal of Physics and Chemistry of Solids, 65(8-9), 1581-1587.
#
# <NAME>., <NAME> and <NAME>, et al. (accessed 2019) Shock Wave Database, http://www.ihed.ras.ru/rusbank/
#
# <NAME>, <NAME>., <NAME>., <NAME>, <NAME>., <NAME>., <NAME>., & <NAME>. (1982). JANAF thermochemical tables, 1982 supplement. Journal of Physical and Chemical Reference Data, 11(3), 695-940. https://janaf.nist.gov
#
# <NAME>., <NAME>, <NAME>, <NAME>. (submitted). Atmosphere loss in planet-planet collisions. MNRAS.
#
# <NAME>. (1986). Thermodynamic properties of iron and silicon. Journal of physical and chemical reference data, 15(3), 967-983.
#
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2006). Quasihydrostatic equation of state of iron above 2 Mbar. Physical Review Letters, 97(21), 215504.
# Chicago
#
# <NAME>., <NAME>., <NAME>., & <NAME>. (2017). Thermodynamics and Equations of State of Iron to 350 GPa and 6000 K. Scientific reports, 7, 41863.
#
# <NAME>., <NAME>., & <NAME>. (2015). 2.07 Mineralogy of Super-Earth Planets. Treatise on Geophysics, Second Edition, Elsevier, Oxford.Duffy, T., <NAME>., & <NAME>. (2015). 2.07 Mineralogy of Super-Earth Planets. Treatise on Geophysics, Second Edition, Elsevier, Oxford.
#
# <NAME>., and <NAME> (1996) Modified PREM (Preliminary Reference Earth Model), doi:10.17611/DP/9785674, http://ds.iris.edu/spud/earthmodel/9785674.
#
# <NAME>. (2016). Melting of Fe alloys and the thermal structure of the core. Deep Earth: Physics and chemistry of the lower mantle and core, 217, 3-12.
#
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2012). Equation of state and phase diagram of Fe–16Si alloy as a candidate component of Earth's core. Earth and Planetary Science Letters, 357, 268-276.
#
# <NAME>., & <NAME>. (2010). Shock waves and equations of state of matter. Shock waves, 20(1), 53-71.
#
# <NAME>., & <NAME>. (1968). Pressure derivatives of the elastic constants of α-iron to 10 kbs. Journal of Physics and Chemistry of Solids, 29(3), 541-549.
#
# <NAME>. (1977). Rational function method of interpolation (No. LA-6903-MS). Los Alamos National Laboratory, Los Alamos, NM (United States).
#
# <NAME>. (1993). Multiphase equation of state for iron (No. SAND-93-0027). Sandia National Labs., Albuquerque, NM (United States).
#
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2015). Impact vaporization of planetesimal cores in the late stages of planet formation. Nature Geoscience, 8(4), 269.
#
# <NAME>., <NAME>., <NAME>., & <NAME>. (2003). Static compression of iron‐silicon alloys: Implications for silicon in the Earth's core. Journal of Geophysical Research: Solid Earth, 108(B1).
#
# <NAME>., & <NAME>. (1992). SESAME: The LANL equation of state database. Los Alamos National Laboratories Report LAUR-92-3407, Los Alamos, NM.
#
# <NAME>. (2014). Wide-range multiphase equation of state for iron. Combustion, Explosion, and Shock Waves, 50(5), 582-598.
#
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., ... & <NAME>. (2018). Solving controversies on the iron phase diagram under high pressure. Geophysical Research Letters, 45(20), 11-074.
#
# <NAME>., <NAME>., & <NAME>. (2018). Melting efficiency of troilite-iron assemblages in shock-darkening: Insight from numerical modeling. Physics of the Earth and Planetary Interiors, 282, 25-38.
#
# <NAME>., <NAME>., & <NAME>. (1994). Sound velocity measurements in liquid iron by ultrasonic interferometry. Journal of Geophysical Research: Solid Earth, 99(B3), 4285-4291.
#
# <NAME>., & <NAME>. (2004). Melting of iron at the physical conditions of the Earth's core. Nature, 427(6972), 339.
#
# <NAME>., <NAME>., & <NAME>. (1997). A reevaluation of impact melt production. Icarus, 127(2), 408-423.
#
# <NAME>., <NAME>., & <NAME>. (2015). Analysis of impact melt and vapor production in CTH for planetary applications. Procedia Engineering, 103(C).
#
# <NAME>. (1962). Metallic equations of state for hypervelocity impact (No. GA-3216). General Atomics Division, General Dynamics, San Diego, CA.
#
# <NAME>., <NAME>., & <NAME>. (2015). Erosive Hit-and-Run Impact Events: Debris Unbound. Proceedings of the International Astronomical Union, 10(S318), 9-15.
#
# <NAME>., & <NAME>. (2018). Quantum molecular dynamics of warm dense iron and a five-phase equation of state. Physical Review E, 97(5), 053209.
#
# <NAME>. (2005). Numerical simulations of very large impacts on the Earth. Planetary and Space Science, 53(12), 1205-1220.
#
# <NAME>., & <NAME>. (2017). Hydrocode simulation of the impact melt layer distribution underneath Xiuyan Crater, China. Journal of Earth Science, 28(1), 180-186.
#
# <NAME>., <NAME>., & <NAME>. (2019). HerEOS: A framework for consistent treatment of the Equation of State in ALE hydrodynamics. Computers & Mathematics with Applications, 78(2), 483-503.
#
#
# ## ANEOS references
# <NAME>., and <NAME> (2014). Improvements to ANEOS for multiple phase transitions. 45th Lunar Planet. Sci. Conf. Abs. 2664.
#
# <NAME>. (2007). A hydrocode equation of state for SiO$_2$. Meteoritics & Planetary Science, 42(12), 2079-2098.
#
# <NAME>. (1990). ANEOS analytic equations of state for shock physics codes input manual. SANDIA REPORT SAND, 89-2951.
#
# <NAME>., & <NAME>. (1972). Improvements in the Chart D radiation-hydrodynamic CODE III: Revised analytic equations of state (No. SC-RR--71-0714). Sandia Labs.
#
# <NAME>., et al. (accepted). The shock physics of giant impacts: Key requirements for the equations of state. In <NAME>, <NAME>, and <NAME> (Eds.), 21st Biennial APS Conference on Shock Compression of Condensed Matter (SCCM19). AIP Publishing. (https://arxiv.org/abs/1910.04687)
#
# <NAME>. (2019). ANEOS Code Modification: Thermal model adjustment parameter. https://github.com/ststewart/aneos-forsterite-2019/EOS-docs/
#
# End of File
# #####
|
aneos-T70/iron_aneos.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# #### [**<NAME>**](http://www.ncachanosky.com) | Department of Economics | Metropolitan State University of Denver | <EMAIL>
# # LABOR MARKET
# ---
# This note illustrates how to code a labor market in Python. The purpose of the note is to walk through Python applications, not to offer a detailed discussion of the labor market or to show best coding practices. The note also assumes familiarity with the neoclassical labor market model and a beginner experience with Python.
#
# For a more complete and detailed discussion of Python applications see the material in [Quant Econ](https://quantecon.org/).
#
# ---
# ## TABLE OF CONTENTS
# 1. [Labor demand](#1.-LABOR-DEMAND)
# 2. [Labor supply](#2.-LABOR-SUPPLY)
# 3. [Equilibrium](#3.-EQUILIBRIUM)
# ## 1. LABOR DEMAND
# Labor demand $\left(N^D\right)$ comes from a representative firm maximazing its profits $(\pi)$. Assume output $(Q)$ follows a Cobb-Douglas production function with Hicks-Neutral technology $(A)$, and that $P$ is the market price of the firm's output. Further, assume that $w$ and $r$ are the prices of labor $(N)$ and capital $(K)$ respectively. Then, firm's profit is (where $\alpha \in (0, 1)$):
#
# \begin{align}
# \pi &= P \cdot Q(K, N) - wN - rK \\
# \pi &= P \cdot \left(A \cdot K^{\alpha} N^{1-\alpha} \right) - wN - rK
# \end{align}
#
# With capital and technology as given in the short-run, the firm maximizes its profits by changin the amount of labor. The firm demands labor (that has decreasing marginal returns) up the points of its marginal productivity. It can be seen that labor demand has an hyperbolic shape with respect to real wages $(w/P)$.
#
# \begin{align}
# \frac{\partial \pi}{\partial N} &= P \cdot (1-\alpha) \, A \left(\frac{K}{N}\right)^{\alpha} - w= 0 \\
# N^D &= K \cdot \left[\frac{(1-\alpha)A}{(w/P)}\right]^{1/\alpha}
# \end{align}
#
# The following code plots labor demand and shifts produced by changes in $k$ (in blue), $A$ (in red), and in $\alpha$ (in green). The first part of the code imports the required packages. The second part defines the parameters and vectors to be used. The third part of the code builds the labor demand function. The fourth section calculates labor demand and the effects of shocks (1) capital $(\Delta K = 20)$, (2) productivity $(\Delta A = 20)$, and (3) output elasticity of capital $(\Delta \alpha = 20)$. The fifth part of the code plots labor demand and the shock effects.
# +
"1|IMPORT PACKAGES"
import numpy as np # Package for scientific computing with Python
import matplotlib.pyplot as plt # Matplotlib is a 2D plotting library
"2|DEFINE PARAMETERS AND ARRAYS"
# Parameters
size = 50 # Real wage domain
K = 20 # Capital stock
A = 20 # Technology
alpha = 0.6 # Output elasticity of capital
# Arrays
rW = np.arange(1, size) # Real wage
"3|LABOR DEMAND FUNCTION"
def Ndemand(A, K, rW, alpha):
Nd = K * ((1-alpha)*A/rW)**(1/alpha)
return Nd
"4|CALCULATE LABOR DEMAND AND SHOCK EFFECTS"
D_K = 20 # Shock to K
D_A = 20 # Shock to A
D_a = 0.2 # Shock to alpha
Nd = Ndemand(A , K , rW, alpha)
Nd_K = Ndemand(A , K+D_K, rW, alpha)
Nd_A = Ndemand(A+D_A, K , rW, alpha)
Nd_a = Ndemand(A , K , rW, alpha+D_a)
"5|PLOT LABOR DEMAND AND SHOCK EFFECTS"
xmax_v = np.zeros(4)
xmax_v[0] = np.max(Nd)
xmax_v[1] = np.max(Nd_K)
xmax_v[2] = np.max(Nd_A)
xmax_v[3] = np.max(Nd_a)
xmax = np.max(xmax_v)
v = [0, 30, 0, size] # Set the axes range
fig, ax = plt.subplots(figsize=(10, 8))
ax.set(title="LABOR DEMAND", xlabel=r'Nd', ylabel=r'w/P')
ax.grid()
ax.plot(Nd , rW, "k-", label="Labor demand", linewidth=3)
ax.plot(Nd_K, rW, "b-", label="Capital shock")
ax.plot(Nd_A, rW, "r-", label="Productivity shock")
ax.plot(Nd_a, rW, "g-", label="Output elasticity of K shock")
ax.yaxis.set_major_locator(plt.NullLocator()) # Hide ticks
ax.xaxis.set_major_locator(plt.NullLocator()) # Hide ticks
ax.legend()
plt.axis(v) # Use 'v' as the axes range
plt.show()
# -
# ## 2. LABOR SUPPLY
# Labor supply is dervied from the consumer maximizing a constrained utility function. The consumer receives utility from consumption $(C)$ and leisure time $(L)$. While the profit function of the firm has an internal maximum, the utility function is strictly increasing on $C$ and $L$. Therefore, the utility maximization problem includes (1) a binding constrain and (2) the right mix of $C$ and $L$ that will depend on their relative prices.
#
# Assume a Cobb-Douglas utility function where $\beta$ is the consumption elasticity of utilility.
#
# \begin{equation}
# U(C, L) = C^{\beta} L^{1-\beta}
# \end{equation}
#
# The individual faces the folllowing budget constraint:
#
# \begin{align}
# C &= \left(\frac{w}{P} \right) (24 - L) + I \\
# C &= \underbrace{\left[I + 24 \left(\frac{w}{P} \right) \right]}_\text{intercept} - \underbrace{\left( \frac{w}{P} \right)}_\text{slope}L
# \end{align}
#
# where $24$ is the amount of hours the individual can work in given day and $I$ is other (non-labor) income.
#
# Before deriving labor supply $N^S$ we can plot the indifference curve between consumption and leisure with the budget constraint. "Solving" for $C$ for a given level of utility:
#
# \begin{equation}
# C = \left( \frac{\bar{U}}{L^{1-\beta}} \right)^{(1/\beta)}
# \end{equation}
#
# ---
# We can now maximize the utility with the budget constraint using a Lagrangian $\left(\Im\right)$:
# \begin{equation}
# \max_{\{C, L\}} \Im = C^{\beta} L^{1-\beta} + \lambda \left[C - I - \frac{w}{P} (24-L) \right]
# \end{equation}
#
# The FOC for $\Im$:
# \begin{cases}
# \Im_{L} = (1 - \beta) \left( \frac{C}{L} \right)^{\beta} - \lambda = 0 \\
# \Im_{C} = \beta \left( \frac{L}{C} \right)^{1-\beta} - \lambda \left(\frac{w}{P} \right) = 0 \\
# \Im_{\lambda} = C - I - \left(\frac{w}{P}\right) (24-L) = 0
# \end{cases}
#
# From the firt two FOCs we get the known relationship $\frac{U_{L}}{U_{C}} = \frac{w/P}{1}$
#
# Solving for $C$ in terms of $L$ yields $C = \frac{\beta}{1-\beta} \left(\frac{w}{P}\right)L$.
# Pluging this result in the third FOC and solving for $L$ yields $L^{*} = (1-\beta) \left[\frac{I + 24 (w/P)}{(w/P)} \right]$. With $L^*$ we can now get $C^* = \beta \left[I + 24 (w/P) \right]$. Next we plug-in $C^*$ and $L^*$ into the utility function.
#
# \begin{align}
# U(C^{*}, L^{*})^* &= \left(C^*\right)^{\beta} \left(L^*\right)^{1-\beta} \\
# U(C^{*}, L^{*})^* &= \left[\beta(I + 24 (w/P)\right]^{\beta} \left[(1-\beta) \frac{I+24(w/P)}{(w/P)} \right]^{1-\beta}
# \end{align}
#
# Note that if $I=0$ then $L^*$ and $C^*$ are fixed quantities that depend on the value of $\beta$.
#
# Using the lagrangian method also allows to find the "optimal" value of $\lambda$ or the "shadow price":
#
# \begin{align}
# \lambda^* &= (1-\beta) \cdot \left(\frac{C^*}{L^*} \right)^{\beta} \\
# \lambda^* &= (1-\beta) \cdot \left[\frac{\beta \left(I+24(w/P)\right)w}{(1-\beta)(I+24(w/P)} \right]^{\beta} \\
# \lambda^* &= (1-\beta) \cdot \left(\frac{\beta}{1-\beta} \frac{w}{P} \right)^{\beta}
# \end{align}
#
# Now we can use this information to plot the indifference curve with `matplotlib`. Note that the code calculates $U^*$, $L^*$, and $C^*$ and uses these values in the graph. The first part of the code imports the required packages. The second part of the code defines needed parameters and vectors. The third part of the code calculates $U^*$, $L^*$, $C^*$, and builds the functions for the indifference curve and the budget constraint. The fourth part of the code builds the plot.
# +
"1|IMPORT PACKAGES"
import numpy as np # Package for scientific computing with Python
import matplotlib.pyplot as plt # Matplotlib is a 2D plotting library
"2|DEFINE PARAMETERS AND ARRAYS"
T = 25 # Available hours to work
beta = 0.7 # Utility elasticity of consumption
I = 50 # Non-labor income
L = np.arange(1, T) # Array of labor hours from 0 to T
rW = 25 # Real wage
"3|CALCULATE OPTIMAL VALUES AND DEFINE FUNCTIONS"
Ustar = (beta*(I+24*rW))**beta * ((1-beta)*(I+24*rW)/rW)**(1-beta)
Lstar = (1-beta)*((I+24*rW)/rW)
Cstar = beta*(I+24*rW)
def C_indiff(U, L, beta): # Create consumption function
C_indiff = (U/L**(1-beta))**(1/beta)
return C_indiff
def Budget(I, rW, L): # Create budget constraint
Budget = (I + 24*rW) - rW*L
return Budget
B = Budget(I, rW, L) # Budget constraint
C = C_indiff(Ustar, L, beta) # Indifference curve
"4|PLOT THE INDIFFERENCE CURVE AND THE BUDDGET CONSTRAINT"
y_max = 2*Budget(I, rW, 0)
v = [0, T, 0, y_max] # Set the axes range
fig, ax = plt.subplots(figsize=(10, 8))
ax.set(title="INDIFFERENCE CURVE", xlabel="Leisure", ylabel="Real income")
ax.grid()
ax.plot(L, C, "g-", label="Indifference curve")
ax.plot(L, B, "k-", label="Budget constraint")
plt.axvline(x=T-1 , ymin=0, ymax=I/y_max, color='k') # Add non-labor income
plt.axvline(x=Lstar, ymin=0, ymax = Cstar/y_max, ls=':', color='k') # Lstar
plt.axhline(y=Cstar, xmin=0, xmax = Lstar/T , ls=':', color='k') # Cstar
plt.plot(Lstar, Cstar, 'bo') # Point
plt.text(0.1 , Cstar+5, np.round(Cstar, 1), color="k")
plt.text(Lstar+0.2, 10 , np.round(Lstar, 1), color="k")
ax.legend()
plt.axis(v) # Use 'v' as the axes range
plt.show()
# -
# ---
# Labor supply $N^S$ is the number of hours **not spent** in leisure. Note that $N^S$ decreases with $I$ and increases with $(w/P)$.
#
# \begin{align}
# N^S &= 24 - L^* \\
# N^S &= 24 - (1-\beta) \left[\frac{I + 24 (w/P)}{(w/P)} \right]
# \end{align}
#
# The following code shows labor supply (in black) and shocks to non-labor income $\Delta I = 25$ (in blue) and to consumption elasticity of ulility $\Delta \beta = 0.10$ (in red). Note that in this construction $N^S$ does not bend-backwards.
# +
"1|IMPORT PACKAGES"
import numpy as np # Package for scientific computing with Python
import matplotlib.pyplot as plt # Matplotlib is a 2D plotting library
"2|DEFINE PARAMETERS AND ARRAYS"
size = 50
T = 25 # Available hours to work
beta = 0.6 # Utility elasticity of consumption
I = 50 # Non-labor income
rW = np.arange(1, size) # Vector of real wages
"3|LABOR SUPPLY"
def Lsupply(rW, beta, I):
Lsupply = 24 - (1-beta)*((24*rW + I)/rW)
return Lsupply
D_I = 25 # Shock to non-income labor
D_b = 0.10 # Shock to beta
Ns = Lsupply(rW, beta , I)
Ns_b = Lsupply(rW, beta+D_b, I)
Ns_I = Lsupply(rW, beta , I+D_I)
"4|PLOT LABOR SUPPLY"
y_max = np.max(Ns)
v = [0, T, 0, y_max] # Set the axes range
fig, ax = plt.subplots(figsize=(10, 8))
ax.set(title="LABOR SUPPLY", xlabel="Work Hs.", ylabel=r'(w/P)')
ax.grid()
ax.plot(Ns , rW, "k", label="Labor supply", linewidth=3)
ax.plot(Ns_I, rW, "b", label="Non-labor income shock")
ax.plot(Ns_b, rW, "r", label="Consumption elasticy of utility shock")
ax.yaxis.set_major_locator(plt.NullLocator()) # Hide ticks
ax.xaxis.set_major_locator(plt.NullLocator()) # Hide ticks
ax.legend()
plt.axis(v) # Use 'v' as the axes range
plt.show()
# -
# ## 3. EQUILIBRIUM
# We can now calculate the equilibrium condition, the value of $(w/P)_0$ which makes $N^D\left(\left(\frac{w}{P}\right)_0\right) = N^S\left(\left(\frac{w}{P}\right)_0\right)$. Then, we can define a function $\Theta$ equal to zero at $\left(\frac{w}{P}\right)_0$:
#
# \begin{align}
# \Theta \left[ \left(\frac{w}{P}\right)_0\right] &= 0 = N^D \left[\left(\frac{w}{P} \right)_0\right] - N^S \left[\left(\frac{w}{P}\right)_0\right] \\
# \Theta \left[ \left(\frac{w}{P}\right)_0\right] &= 0 = \underbrace{\left[ K \cdot \left[\frac{(1-\alpha)A}{(w/P)}\right]^{1/\alpha} \right]}_{N^D} - \underbrace{\left[ 24 (1-\beta) \left[\frac{I + 24 (w/P)}{(w/P)} \right] \right]}_{N^S}
# \end{align}
#
# We can ask Python to calculate the value (root) of $\left( \frac{w}{P} \right)$ that makes $\Theta = 0$. For this we need the "root" function from the `SciPy` library. The cade has four sections. Section 1 imports the required packages. Section 2 defines the paramters and arrays. Section 3 find the dequilibrium values. And section 4 plots the results.
# +
"1|IMPORT PACKAGES"
import numpy as np # Package for scientific computing with Python
import matplotlib.pyplot as plt # Matplotlib is a 2D plotting library
from scipy.optimize import root # Package to find the roots of a function
"2|DEFINE PARAMETERS AND ARRAYS"
size = 50
T = 24 # Available hours to work
# Demand parameters
K = 20 # Capital stock
A = 20 # Total factor productivity
alpha = 0.6 # Output elasticity of capital
# Supply parameters
I = 50 # Non-labor income
beta = 0.6 # Utility elasticity of consumption
# Arrays
rW = np.arange(1, size) # Real wage
"3|OPTIMIZATION PROBLEM: FIND EQUILIBRIUM VALUES"
def Ndemand(A, K, rW, alpha):
Nd = K * ((1-alpha)*A/rW)**(1/alpha)
return Nd
def Nsupply(rW, beta, I):
Lsupply = T - (1-beta)*((24*rW + I)/rW)
return Lsupply
def Eq_Wage(rW):
Eq_Wage = Ndemand(A, K, rW, alpha) - Nsupply(rW, beta, I)
return Eq_Wage
rW_0 = 10 # Initial value (guess)
rW_star = root(Eq_Wage, rW_0) # Equilibrium: Wage
N_star = Nsupply(rW_star.x, beta, I) # Equilibrium: Labor
"4|PLOT LABOR MARKET EQUILIBRIUM"
Nd = Ndemand(A, K, rW, alpha)
Ns = Nsupply(rW, beta, I)
y_max = rW_star.x*2
v = [0, T, 0, y_max] # Set the axes range
fig, ax = plt.subplots(figsize=(10, 8))
ax.set(title="LABOR SUPPLY", xlabel="Work Hs.", ylabel=r'(w/P)')
ax.plot(Ns[1:T], rW[1:T], "k", label="Labor supply")
ax.plot(Nd[1:T], rW[1:T], "k", label="Labor demand")
plt.plot(N_star, rW_star.x, 'bo')
plt.axvline(x=N_star , ymin=0, ymax=rW_star.x/y_max, ls=':', color='k')
plt.axhline(y=rW_star.x, xmin=0, xmax=N_star/T , ls=':', color='k')
plt.text(5 , 20, "Labor demand")
plt.text(19, 9, "Labor supply")
plt.text(0.2 , rW_star.x+0.5, np.round(rW_star.x, 1))
plt.text(N_star+0.3, 0.3 , np.round(N_star, 1))
plt.axis(v) # Use 'v' as the axes range
plt.show()
|
labor.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Test att hämta bilder och söka i SPA
# * [Denna notebook](https://github.com/salgo60/spa2Commons/blob/main/Notebook/SPA%20test.ipynb)
#
# Tanken att se vilka möjligheter som finns
# * skapa ett script --> ett klick för att ladda upp bilder se test [spa2commons](https://commons.wikimedia.org/wiki/User:Salgo60/spa2commons.js) - GITHUB [salgo60/spa2Commons](https://github.com/salgo60/spa2Commons)
# * att ta en plats som en kyrkogård eller en koppling Litteraturbanken och se vad som kan matchas
# * att stämma av länkar SPA -> WIkipedia att dessa finns i WIkidata med länk tillbaka via [P4819](https://www.wikidata.org/wiki/Property:P4819?uselang=sv)
from datetime import datetime
start_time = datetime.now()
print("Last run: ", start_time)
# +
import urllib3, json
import pandas as pd
http = urllib3.PoolManager()
url= "https://portrattarkiv.se/endpoints/latest.php"
url= "https://xn--portrttarkiv-kcb.se/endpoints/search.php"
url= "https://portrattarkiv.se/endpoints/search.php"
# -
# ### Test 1 <NAME> - exakt sökning
# +
#Test 1
encoded_body = json.dumps({
"limit": "40",
"from": "0",
"firstname": "<NAME> ",
"firstnameexact":True,
"lastname":"Flygare",
"lastnameexact":True,
"facts":{},
"factsExact":{},
"all":None,
})
r = http.request('POST', url,
headers={'Content-Type': 'application/json'},
body=encoded_body)
data = json.loads(r.data.decode('utf-8'),)
print(r.status)
# +
for h in data["hits"]["hits"]:
id = h["_id"]
source = h["_source"]
#print(h)
try:
FirstName = source["FirstName"]
LastName = source["LastName"]
BirthYear = source["BirthYear"]
print(id , " ", FirstName, " ", LastName, " - ", BirthYear)
print("\t" + "https://portrattarkiv.se/details/" + id)
# print("\t" + "https://portrattarkiv.se/endpoints/file.php?id=" + id)
except:
print("Error")
# print(h["_source"]["Path"])
# print(h["_source"]["Filename"])
# print(h["_source"]["FileHash"])
# -
import requests
import IPython.display as Disp
# +
from IPython.display import HTML, Image
def _src_from_data(data):
"""Base64 encodes image bytes for inclusion in an HTML img element"""
img_obj = Image(data=data)
for bundle in img_obj._repr_mimebundle_():
for mimetype, b64value in bundle.items():
if mimetype.startswith('image/'):
return f'data:{mimetype};base64,{b64value}'
def gallery(images, row_height='auto'):
"""Shows a set of images in a gallery that flexes with the width of the notebook.
Parameters
----------
images: list of str or bytes
URLs or bytes of images to display
row_height: str
CSS height value to assign to all images. Set to 'auto' by default to show images
with their native dimensions. Set to a value like '250px' to make all rows
in the gallery equal height.
"""
figures = []
for image in images:
if isinstance(image, bytes):
src = _src_from_data(image)
caption = ''
else:
src = image
caption = f'<figcaption style="font-size: 0.6em">{image}</figcaption>'
figures.append(f'''
<figure style="margin: 5px !important;">
<img src="{src}" style="height: {row_height}">
{caption}
</figure>
''')
return HTML(data=f'''
<div style="display: flex; flex-flow: row wrap; text-align: center;">
{''.join(figures)}
</div>
''')
# +
urlbasePic = "https://portrattarkiv.se/endpoints/file.php?id="
urls = []
for h in data["hits"]["hits"]:
id = h["_id"]
source = h["_source"]
urlPicture = urlbasePic + id
urls.append(urlPicture)
# -
gallery(urls, row_height='100px')
# ### Test 2 <NAME> - exakt sökning
#
# +
encoded_body = json.dumps({
"limit": "40",
"from": "0",
"firstname": "August ",
"firstnameexact":True,
"lastname":"Strindberg",
"lastnameexact":True,
"facts":{},
"factsExact":{},
"all":None,
})
r = http.request('POST', url,
headers={'Content-Type': 'application/json'},
body=encoded_body)
data = json.loads(r.data.decode('utf-8'),)
print(r.status)
urls = []
for h in data["hits"]["hits"]:
id = h["_id"]
source = h["_source"]
urlPicture = urlbasePic + id
urls.append(urlPicture)
gallery(urls, row_height='150px')
# -
# ### Test 3 Thekla Engström - enkel sökning
#
# +
encoded_body = json.dumps({
"limit": "40",
"from": "0",
"all":"Thekla Engström",
})
r = http.request('POST', url,
headers={'Content-Type': 'application/json'},
body=encoded_body)
data = json.loads(r.data.decode('utf-8'),)
print(r.status)
urls = []
for h in data["hits"]["hits"]:
id = h["_id"]
source = h["_source"]
try:
urlPicture = urlbasePic + id
urls.append(urlPicture)
FirstName = source["FirstName"]
LastName = source["LastName"]
BirthYear = source["BirthYear"]
print(id , " ", FirstName, " ", LastName, " - ", BirthYear)
except:
print("Error")
gallery(urls, row_height='150px')
# -
# # Test 4 hitta personer inte kopplade till Wikidata/ Wikipedia
# Exempel
# * personer begravda på [Q26257009](https://www.wikidata.org/wiki/Q26257009?uselang=sv) = "<NAME>, Gävle"
# * personer begravda på [Q252312](https://www.wikidata.org/wiki/Q252312?uselang=sv) = "Norra begravningsplatsen"
#
# TODO: see
# * personer hos Litteraturbanken [Property:P5101](https://www.wikidata.org/wiki/Property:P5101?uselang=sv)
# * personer hos Svenskt Kvinnobiografiskt lexikon [Property:P4963](https://www.wikidata.org/wiki/Property:P4963?uselang=sv)
# * .....
#
# +
# pip install sparqlwrapper
# https://rdflib.github.io/sparqlwrapper/
import sys
from SPARQLWrapper import SPARQLWrapper, JSON
endpoint_url = "https://query.wikidata.org/sparql"
# wd:Q26257009. Gamla Gävle Behravningsplats
# Q252312
query = """SELECT (CONCAT (?itemLabel," ",str(year(?birthDate))) AS ?search) ?item ?itemLabel
(str(year(?birthDate)) AS ?year)
WHERE {
?item wdt:P119 wd:Q252312.
minus {?item wdt:P4819 ?c }
OPTIONAL { ?item wdt:P569 ?birthDate. }
SERVICE wikibase:label { bd:serviceParam wikibase:language "sv", "en". }
} order by ?itemLabel """
queryNoPicNorra1890 = """SELECT (CONCAT (?itemLabel," ",str(year(?birthDate))) AS ?search) ?item ?itemLabel
(str(year(?birthDate)) AS ?year)
WHERE {
?item wdt:P119 wd:Q252312.
minus {?item wdt:P4819 ?c }
minus {?item wdt:P18 ?d }
OPTIONAL { ?item wdt:P569 ?birthDate. }
SERVICE wikibase:label { bd:serviceParam wikibase:language "sv", "en". }
FILTER ( YEAR(?birthDate) < 1880)
} order by ?itemLabel """
def get_sparql_dataframe(endpoint_url, query):
"""
Helper function to convert SPARQL results into a Pandas data frame.
"""
user_agent = "salgo60/%s.%s" % (sys.version_info[0], sys.version_info[1])
sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
result = sparql.query()
processed_results = json.load(result.response)
cols = processed_results['head']['vars']
out = []
for row in processed_results['results']['bindings']:
item = []
for c in cols:
item.append(row.get(c, {}).get('value'))
out.append(item)
return pd.DataFrame(out, columns=cols)
#SPAdf = get_sparql_dataframe(endpoint_url, query)
SPAdf = get_sparql_dataframe(endpoint_url, queryNoPicNorra1890)
SPAdf.info()
# -
SPAdf
SPAdetail = "https://portrattarkiv.se/details/"
for index, row in SPAdf.iterrows():
print("\n",row["search"],row["item"])
encoded_body = json.dumps({
"limit": "5",
"from": "0",
"birthyear":row["year"],
"all":row["search"]
})
r = http.request('POST', url,
headers={'Content-Type': 'application/json'},
body=encoded_body)
if r.status != 200:
print(r.status)
continue
data = json.loads(r.data.decode('utf-8'),)
urls = []
for h in data["hits"]["hits"]:
id = h["_id"]
#print(h)
source = h["_source"]
try:
urlPicture = urlbasePic + id
urls.append(urlPicture)
score = h["_score"]
FirstName = source["FirstName"]
LastName = source["LastName"]
BirthYear = source["BirthYear"]
print("\t\t",score,FirstName, " ", LastName, " - ", BirthYear,SPAdetail+id, "\t", )
except:
print("Error")
SPAdf.columns
SPAdf["item"]
# ## Test 5 Hitta alla länkar till sv:Wikipedia i SPA
#
# Finns enormt många poster I SPA som länkar Wikipedia
# * sök fram alla
# * hämta Wikidata objektet för länken till Wikipedia
# * kolla om SPA finns kopplat [P4819](https://www.wikidata.org/wiki/Property:P4819?uselang=sv)
#
# * avancerad [sökning](https://portrattarkiv.se/advancedsearch) ger 16283 träffar
# +
from tqdm.notebook import tqdm
SPAdetail = "https://portrattarkiv.se/details/"
print(url)
#print("\n",row["search"],row["item"])
limit = 10000
for year in range(1780,1910):
for start in range(0,10000,limit):
print("\n\nyear", year,"\tfrom: ",start,"\tlimit",limit,"")
encoded_body = json.dumps({
"limit": limit,
"from": start,
"year": year,
"facts":{"URL":"https://sv.wikipedia.org/wiki"}
})
r = http.request('POST', url,
headers={'Content-Type': 'application/json'},
body=encoded_body)
print("http status ", r.status)
if r.status != 200:
print("Exit ",r.status)
print(r)
break
data = json.loads(r.data.decode('utf-8'),)
urls = []
print("\nTotal: ",data["hits"]["total"])
print(data["_shards"])
print("Hits: ",len(data["hits"]["hits"]))
df = pd.json_normalize(data["hits"]["hits"])
# -
df.shape
print(data.keys())
pd.set_option('display.max_rows', None)
pd.set_option("display.max_rows", None)
#for col_name in df.columns:
# print(col_name)
# _id _source.Facts.URL
#df["_id","_source.Facts.URL"]
#df["_id","_source.Facts.URL"]
listdf = df[["_id","_source.Facts.URL"]]
listdf.shape
#listdf[listdf['_source.Facts.URL'].str.contains("wikipedia")]
#listdf[listdf['_source.Facts.URL'].isnull()]
# Går igenom alla Wikipedia länkar i listdf och hämtar ut WIkidata posten och ser om den länkar SPA
# +
from wikidata.client import Client
from tqdm.notebook import tqdm
def checkSPA(wikidata_id):
# get SPA
client = Client()
entity = client.get(wikidata_id, load=True)
try:
spa_prop = entity[client.get('P4819')]
except:
return False
return spa_prop
def getWD (url):
baseUrl = "https://sv.wikipedia.org/w/api.php?action=query&prop=pageprops|wbentityusage&titles="
title = url.replace("https://sv.wikipedia.org/wiki/","")
urlWD = baseUrl + title
wdr = http.request('Get', urlWD + "&format=json",
headers={'Content-Type': 'application/json'})
if wdr.status != 200:
print("Error ", wdr.status)
return False
datawd = json.loads(wdr.data.decode('utf-8'),)
pg_dict = datawd['query']['pages']
pg_key = list(pg_dict.keys())[0]
wikidata_id = pg_dict[pg_key]['pageprops']['wikibase_item']
return wikidata_id
for index, row in tqdm(listdf.iterrows(),total=listdf.shape[0]):
#print(row['_id'], row['_source.Facts.URL'])
for n in row['_source.Facts.URL']:
if "wikipedia" in n.lower():
try:
wdrec = getWD(n)
if checkSPA(wdrec) == False:
print ("Todo",row['_id'],wdrec,n)
#print (n,"Value :",getWD(n))
except:
pass
# -
# ## Test 6 hitta icke kopplade personer P4819 hos Litteraturbanken P5101
#
# Testar att söka med mer parametrar flr att få bättre precision i rankingen
# +
# query to get people
query = """SELECT (CONCAT (?itemLabel," ",str(year(?birthDayWD))) AS ?search)
?birthDayWD
?firstnameLabel ?lastnameLabel
?item ?itemLabel
(str(year(?birthDayWD)) AS ?year)
WHERE {
?item wdt:P5101 ?Litt.
minus {?item wdt:P4819 ?c }
OPTIONAL { ?item wdt:P735 ?firstname. }
OPTIONAL { ?item wdt:P734 ?lastname. }
OPTIONAL { ?item wdt:P569 ?birthDayWD. }
SERVICE wikibase:label { bd:serviceParam wikibase:language "sv", "en". }
} """
SPALittdf = get_sparql_dataframe(endpoint_url, query)
SPALittdf.info()
# -
SPAdetail = "https://portrattarkiv.se/details/"
def getdfScore(panddf):
dfScore = pd.DataFrame(columns=['all','wd','spaid','score',
'FirstNameWD','LastNameWD','BirthDayWD',
'FirstNameSPA','LastNameSPA','BirthDaySPA'])
#for index, row in SPALittdf.iterrows():
for index, row in tqdm(panddf.iterrows(),total=panddf.shape[0]):
allString=row["search"]
wd=row["item"]
print("\n",allString,wd)
encoded_body = json.dumps({
"limit": "5",
"from": "0",
"firstname" : row["firstnameLabel"],
"lastname" : row["lastnameLabel"],
"birthyear":row["year"],
"all":allString
})
r = http.request('POST', url,
headers={'Content-Type': 'application/json'},
body=encoded_body)
if r.status != 200:
print(r.status)
continue
data = json.loads(r.data.decode('utf-8'),)
urls = []
for h in data["hits"]["hits"]:
spaid = h["_id"]
#print(h)
source = h["_source"]
try:
urlPicture = urlbasePic + id
urls.append(urlPicture)
score = h["_score"]
FirstNameSPA = source["FirstNameSPA"]
LastNameSPA = source["LastNameSPA"]
BirthYearSPA = source["BirthYearSPA"]
print("\t\t",score,FirstNameSPA, " ", LastNameSPA, " - ", BirthYearSPA,SPAdetail+spaid, "\t", )
dfScore = dfScore.append({
'all' : allString,
'wd' : wd,
'spaid' : spaid,
'score' : score,
'FirstNameWD' : row["firstnameLabel"],
'LastNameWD' : row["lastnameLabel"],
'BirthYearWD' : row["BirthYearWD"],
'FirstNameSPA' : FirstNameSPA,
'LastNameSPA' : LastNameSPA,
'BirthYearSPA' : BirthYearSPA
},ignore_index=True)
except Exception as e:
print(f"{type(e).__name__} at line {e.__traceback__.tb_lineno} of {__file__}: {e}")
return dfScore
dfScoreSPALitt = getdfScore(SPALittdf)
dfScoreSPALitt.info()
dfScoreSPALitt.sort_values('score', ascending=False)
dfScoreSPALitt.groupby("wd")
# +
#Group by WIkidata Q number and get highest score
#create a helper column with max
dfScoreSPALitt['max'] = dfScoreSPALitt.groupby('wd')['score'].transform('max')
dfScoreSPALittMax = dfScoreSPALitt.groupby('wd')['score','max','spaid','FirstName','LastName','BirthYear'].apply(lambda x: x.nlargest(1, columns=['score'])).sort_values(['max','wd','score'], ascending=False)
dfScoreSPALittMax
# -
dfScoreSPALittMax.to_csv("Littraturbanken_max.csv")
|
Notebook/SPA test.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
#Default Garph
a = tf.constant(2)
b = tf.constant(3)
c = tf.add(a, b)
# +
#Session to fetch the value of c
with tf.Session() as sess:
writer = tf.summary.FileWriter('./graphs', sess.graph)
sess.run(c)
writer.close()
# -
|
tFExplore.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # Train and deploy a model
# _**Create and deploy a model directly from a notebook**_
#
# ---
# ---
#
# ## Contents
# 1. [Introduction](#Introduction)
# 1. [Setup](#Setup)
# 1. [Data](#Data)
# 1. [Train](#Train)
# 1. Viewing run results
# 1. Simple parameter sweep
# 1. Viewing experiment results
# 1. Select the best model
# 1. [Deploy](#Deploy)
# 1. Register the model
# 1. Create a scoring file
# 1. Describe your environment
# 1. Descrice your target compute
# 1. Deploy your webservice
# 1. Test your webservice
# 1. Clean up
# 1. [Next Steps](#Next%20Steps)
#
# ---
#
# ## Introduction
# Azure Machine Learning provides capabilities to control all aspects of model training and deployment directly from a notebook using the AML Python SDK. In this notebook we will
# * connect to our AML Workspace
# * create an experiment that contains multiple runs with tracked metrics
# * choose the best model created across all runs
# * deploy that model as a service
#
# In the end we will have a model deployed as a web service which we can call from an HTTP endpoint
# ---
#
# ## Setup
# If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you have completed the [Configuration](../../../configuration.ipnyb) notebook to set up your Azure Machine Learning workspace and ensure other common prerequisites are met. From the configuration, the important sections are the workspace configuration and ACI regristration.
#
# We will also need the following libraries install to our conda environment. If these are not installed, use the following command to do so and restart the notebook.
# ```shell
# (myenv) $ conda install -y matplotlib tqdm scikit-learn
# ```
#
# For this notebook we need the Azure ML SDK and access to our workspace. The following cell imports the SDK, checks the version, and accesses our already configured AzureML workspace.
# + tags=["install"]
import azureml.core
from azureml.core import Experiment, Workspace
# Check core SDK version number
print("This notebook was created using version 1.0.2 of the Azure ML SDK")
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
print("")
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep='\n')
# -
# ---
#
# ## Data
# We will use the diabetes dataset for this experiement, a well-known small dataset that comes with scikit-learn. This cell loads the dataset and splits it into random training and testing sets.
#
# +
from sklearn.datasets import load_diabetes
from sklearn.linear_model import Ridge
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.externals import joblib
X, y = load_diabetes(return_X_y = True)
columns = ['age', 'gender', 'bmi', 'bp', 's1', 's2', 's3', 's4', 's5', 's6']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
data = {
"train":{"X": X_train, "y": y_train},
"test":{"X": X_test, "y": y_test}
}
print ("Data contains", len(data['train']['X']), "training samples and",len(data['test']['X']), "test samples")
# -
# ---
# ## Train
#
# Let's use scikit-learn to train a simple Ridge regression model. We use AML to record interesting information about the model in an Experiment. An Experiment contains a series of trials called Runs. During this trial we use AML in the following way:
# * We access an experiment from our AML workspace by name, which will be created if it doesn't exist
# * We use `start_logging` to create a new run in this experiment
# * We use `run.log()` to record a parameter, alpha, and an accuracy measure - the Mean Squared Error (MSE) to the run. We will be able to review and compare these measures in the Azure Portal at a later time.
# * We store the resulting model in the **outputs** directory, which is automatically captured by AML when the run is complete.
# * We use `run.complete()` to indicate that the run is over and results can be captured and finalized
# + tags=["local run", "outputs upload"]
# Get an experiment object from Azure Machine Learning
experiment = Experiment(workspace=ws, name="train-within-notebook")
# Create a run object in the experiment
run = experiment.start_logging()
# Log the algorithm parameter alpha to the run
run.log('alpha', 0.03)
# Create, fit, and test the scikit-learn Ridge regression model
regression_model = Ridge(alpha=0.03)
regression_model.fit(data['train']['X'], data['train']['y'])
preds = regression_model.predict(data['test']['X'])
# Output the Mean Squared Error to the notebook and to the run
print('Mean Squared Error is', mean_squared_error(data['test']['y'], preds))
run.log('mse', mean_squared_error(data['test']['y'], preds))
# Save the model to the outputs directory for capture
joblib.dump(value=regression_model, filename='outputs/model.pkl')
# Complete the run
run.complete()
# -
# ### Viewing run results
# Azure Machine Learning stores all the details about the run in the Azure cloud. Let's access those details by retrieving a link to the run using the default run output. Clicking on the resulting link will take you to an interactive page presenting all run information.
run
# ### Simple parameter sweep
# Now let's take the same concept from above and modify the **alpha** parameter. For each value of alpha we will create a run that will store metrics and the resulting model. In the end we can use the captured run history to determine which model was the best for us to deploy.
#
# Note that by using `with experiment.start_logging() as run` AML will automatically call `run.complete()` at the end of each loop.
#
# This example also uses the **tqdm** library to provide a thermometer feedback
# +
import numpy as np
from tqdm import tqdm
model_name = "model.pkl"
# list of numbers from 0 to 1.0 with a 0.05 interval
alphas = np.arange(0.0, 1.0, 0.05)
# try a bunch of alpha values in a Linear Regression (Ridge) model
for alpha in tqdm(alphas):
# create a bunch of runs, each train a model with a different alpha value
with experiment.start_logging() as run:
# Use Ridge algorithm to build a regression model
regression_model = Ridge(alpha=alpha)
regression_model.fit(X=data["train"]["X"], y=data["train"]["y"])
preds = regression_model.predict(X=data["test"]["X"])
mse = mean_squared_error(y_true=data["test"]["y"], y_pred=preds)
# log alpha, mean_squared_error and feature names in run history
run.log(name="alpha", value=alpha)
run.log(name="mse", value=mse)
# Save the model to the outputs directory for capture
joblib.dump(value=regression_model, filename='outputs/model.pkl')
# -
# ### Viewing experiment results
# Similar to viewing the run, we can also view the entire experiment. The experiment report view in the Azure portal lets us view all the runs in a table, and also allows us to customize charts. This way, we can see how the alpha parameter impacts the quality of the model
# now let's take a look at the experiment in Azure portal.
experiment
# ### Select the best model
# Now that we've created many runs with different parameters, we need to determine which model is the best for deployment. For this, we will iterate over the set of runs. From each run we will take the *run id* using the `id` property, and examine the metrics by calling `run.get_metrics()`.
#
# Since each run may be different, we do need to check if the run has the metric that we are looking for, in this case, **mse**. To find the best run, we create a dictionary mapping the run id's to the metrics.
#
# Finally, we use the `tag` method to mark the best run to make it easier to find later.
# +
runs = {}
run_metrics = {}
# Create dictionaries containing the runs and the metrics for all runs containing the 'mse' metric
for r in tqdm(experiment.get_runs()):
metrics = r.get_metrics()
if 'mse' in metrics.keys():
runs[r.id] = r
run_metrics[r.id] = metrics
# Find the run with the best (lowest) mean squared error and display the id and metrics
best_run_id = min(run_metrics, key = lambda k: run_metrics[k]['mse'])
best_run = runs[best_run_id]
print('Best run is:', best_run_id)
print('Metrics:', run_metrics[best_run_id])
# Tag the best run for identification later
best_run.tag("Best Run")
# -
# ---
# ## Deploy
# Now that we have trained a set of models and identified the run containing the best model, we want to deploy the model for real time inferencing. The process of deploying a model involves
# * registering a model in your workspace
# * creating a scoring file containing init and run methods
# * creating an environment dependency file describing packages necessary for your scoring file
# * creating a docker image containing a properly described environment, your model, and your scoring file
# * deploying that docker image as a web service
# ### Register a model
# We have already identified which run contains the "best model" by our evaluation criteria. Each run has a file structure associated with it that contains various files collected during the run. Since a run can have many outputs we need to tell AML which file from those outputs represents the model that we want to use for our deployment. We can use the `run.get_file_names()` method to list the files associated with the run, and then use the `run.register_model()` method to place the model in the workspace's model registry.
#
# When using `run.register_model()` we supply a `model_name` that is meaningful for our scenario and the `model_path` of the model relative to the run. In this case, the model path is what is returned from `run.get_file_names()`
# + tags=["query history"]
# View the files in the run
for f in best_run.get_file_names():
print(f)
# Register the model with the workspace
model = best_run.register_model(model_name='best_model', model_path='outputs/model.pkl')
# -
# Once a model is registered, it is accessible from the list of models on the AML workspace. If you register models with the same name multiple times, AML keeps a version history of those models for you. The `Model.list()` lists all models in a workspace, and can be filtered by name, tags, or model properties.
# + tags=["register model from history"]
# Find all models called "best_model" and display their version numbers
from azureml.core.model import Model
models = Model.list(ws, name='best_model')
for m in models:
print(m.name, m.version)
# -
# ### Create a scoring file
#
# Since your model file can essentially be anything you want it to be, you need to supply a scoring script that can load your model and then apply the model to new data. This script is your 'scoring file'. This scoring file is a python program containing, at a minimum, two methods `init()` and `run()`. The `init()` method is called once when your deployment is started so you can load your model and any other required objects. This method uses the `get_model_path` function to locate the registered model inside the docker container. The `run()` method is called interactively when the web service is called with one or more data samples to predict.
#
# The scoring file used for this exercise is [here](score.py).
#
# ### Describe your environment
#
# Each modelling process may require a unique set of packages. Therefore we need to create a dependency file providing instructions to AML on how to contstruct a docker image that can support the models and any other objects required for inferencing. In the following cell, we create a environment dependency file, *myenv.yml* that specifies which libraries are needed by the scoring script. You can create this file manually, or use the `CondaDependencies` class to create it for you.
#
# Next we use this environment file to describe the docker container that we need to create in order to deploy our model. This container is created using our environment description and includes our scoring script.
# +
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.image import ContainerImage
# Create an empty conda environment and add the scikit-learn package
env = CondaDependencies()
env.add_conda_package("scikit-learn")
# Display the environment
print(env.serialize_to_string())
# Write the environment to disk
with open("myenv.yml","w") as f:
f.write(env.serialize_to_string())
# Create a configuration object indicating how our deployment container needs to be created
image_config = ContainerImage.image_configuration(execution_script="score.py",
runtime="python",
conda_file="myenv.yml")
# -
# ### Describe your target compute
# In addition to the container, we also need to describe the type of compute we want to allocate for our webservice. In in this example we are using an [Azure Container Instance](https://azure.microsoft.com/en-us/services/container-instances/) which is a good choice for quick and cost-effective dev/test deployment scenarios. ACI instances require the number of cores you want to run and memory you need. Tags and descriptions are available for you to identify the instances in AML when viewing the Compute tab in the AML Portal.
#
# For production workloads, it is better to use [Azure Kubernentes Service (AKS)](https://azure.microsoft.com/en-us/services/kubernetes-service/) instead. Try [this notebook](11.production-deploy-to-aks.ipynb) to see how that can be done from Azure ML.
#
# + tags=["deploy service", "aci"]
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={'sample name': 'AML 101'},
description='This is a great example.')
# -
# ### Deploy your webservice
# The final step to deploying your webservice is to call `WebService.deploy_from_model()`. This function uses the deployment and image configurations created above to perform the following:
# * Build a docker image
# * Deploy to the docker image to an Azure Container Instance
# * Copy your model files to the Azure Container Instance
# * Call the `init()` function in your scoring file
# * Provide an HTTP endpoint for scoring calls
#
# The `deploy_from_model` method requires the following parameters
# * `workspace` - the workspace containing the service
# * `name` - a unique named used to identify the service in the workspace
# * `models` - an array of models to be deployed into the container
# * `image_config` - a configuration object describing the image environment
# * `deployment_config` - a configuration object describing the compute type
#
# **Note:** The web service creation can take several minutes.
# + tags=["deploy service", "aci"]
# %%time
from azureml.core.webservice import Webservice
# Create the webservice using all of the precreated configurations and our best model
service = Webservice.deploy_from_model(name='my-aci-svc',
deployment_config=aciconfig,
models=[model],
image_config=image_config,
workspace=ws)
# Wait for the service deployment to complete while displaying log output
service.wait_for_deployment(show_output=True)
# -
#
# ### Test your webservice
# Now that your web service is runing you can send JSON data directly to the service using the `run` method. This cell pulls the first test sample from the original dataset into JSON and then sends it to the service.
# + tags=["deploy service", "aci"]
import json
# scrape the first row from the test set.
test_samples = json.dumps({"data": X_test[0:1, :].tolist()})
#score on our service
service.run(input_data = test_samples)
# -
# This cell shows how you can send multiple rows to the webservice at once. It then calculates the residuals - that is, the errors - by subtracting out the actual values from the results. These residuals are used later to show a plotted result.
# + tags=["deploy service", "aci"]
# score the entire test set.
test_samples = json.dumps({'data': X_test.tolist()})
result = service.run(input_data = test_samples)
residual = result - y_test
# -
# This cell shows how you can use the `service.scoring_uri` property to access the HTTP endpoint of the service and call it using standard POST operations.
# + tags=["deploy service", "aci"]
import requests
# use the first row from the test set again
test_samples = json.dumps({"data": X_test[0:1, :].tolist()})
# create the required header
headers = {'Content-Type':'application/json'}
# post the request to the service and display the result
resp = requests.post(service.scoring_uri, test_samples, headers = headers)
print(resp.text)
# -
# ### Residual graph
# One way to understand the behavior of your model is to see how the data performs against data with known results. This cell uses matplotlib to create a histogram of the residual values, or errors, created from scoring the test samples.
#
# A good model should have residual values that cluster around 0 - that is, no error. Observing the resulting histogram can also show you if the model is skewed in any particular direction.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
f, (a0, a1) = plt.subplots(1, 2, gridspec_kw={'width_ratios':[3, 1], 'wspace':0, 'hspace': 0})
f.suptitle('Residual Values', fontsize = 18)
f.set_figheight(6)
f.set_figwidth(14)
a0.plot(residual, 'bo', alpha=0.4)
a0.plot([0,90], [0,0], 'r', lw=2)
a0.set_ylabel('residue values', fontsize=14)
a0.set_xlabel('test data set', fontsize=14)
a1.hist(residual, orientation='horizontal', color='blue', bins=10, histtype='step')
a1.hist(residual, orientation='horizontal', color='blue', alpha=0.2, bins=10)
a1.set_yticklabels([])
plt.show()
# -
# ### Clean up
# Delete the ACI instance to stop the compute and any associated billing.
# + tags=["deploy service", "aci"]
# %%time
service.delete()
# -
# ---
# ## Next Steps
# In this example, you created a series of models inside the notebook using local data, stored them inside an AML experiment, found the best one and deployed it as a live service! From here you can continue to use Azure Machine Learning in this regard to run your own experiments and deploy your own models, or you can expand into further capabilities of AML!
#
# If you have a model that is difficult to process locally, either because the data is remote or the model is large, try the [train-on-remote-vm](../train-on-remote-vm) notebook to learn about submitting remote jobs.
#
# If you want to take advantage of multiple cloud machines to perform large parameter sweeps try the [train-hyperparameter-tune-deploy-with-pytorch](../../training-with-deep-learning/train-hyperparameter-tune-deploy-with-pytorch
# ) sample.
#
# If you want to deploy models to a production cluster try the [production-deploy-to-aks](../../deployment/production-deploy-to-aks
# ) notebook.
|
how-to-use-azureml/training/train-within-notebook/train-within-notebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: test
# language: python
# name: test
# ---
# ## Visualize grid 2 in the real example
# +
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
import math
import time
from itertools import permutations
from functions import *
np.random.seed(1234)
# -
PATH_START = (121.46, 31.22) #(CNWGQ: 31.22N, 121.46E)
PATH_END = (174.77, -36.87) #(NZAKL: 36.87S 174.77E)
THRESHOLD = 0.5
file_list = os.listdir('out')
file_list.sort()
# +
grid_list = []
for file_name in file_list:
if file_name.endswith('.csv'):
with open(os.path.join('out', file_name), 'r') as f:
grid = np.genfromtxt(f, delimiter=',')
grid_list.append(grid)
# +
grid_list_reshape = []
for grid in grid_list:
if len(grid.shape) == 1:
grid = grid.reshape(-1,1)
grid_list_reshape.append(grid)
grid_list = grid_list_reshape
# +
fig, ax = plt.subplots(nrows=5, ncols=5, figsize=(16,12))
fig.subplots_adjust(wspace=0.3, hspace=0.3)
i = 0
for row in ax:
for col in row:
if i == len(grid_list):
col.plot()
col.axis('off')
continue
col.imshow(grid_list[i], cmap='jet')
#for (j,i),label in np.ndenumerate(grid_list[i]):
# col.text(i,j,int(label),ha='center',va='center')
col.axis('off')
i += 1
# +
import pickle
out_name = 'out/grid_info_list'
with open(out_name, 'rb') as f:
grid_info_list = pickle.load(f)
out_name = 'out/best_route_list'
with open(out_name, 'rb') as f:
best_route_list = pickle.load(f)
out_name = 'out/boot_sample_list'
with open(out_name, 'rb') as f:
boot_sample_list = pickle.load(f)
# -
def get_n_routes(grid):
n_routes = math.factorial(grid.shape[0]-1+grid.shape[1]-1)/math.factorial(grid.shape[0]-1)/math.factorial(grid.shape[1]-1)
return int(n_routes)
nr, nc = (9,9)
X = np.random.randint(low=0, high=100, size=nc*nr).reshape(nr,nc)
get_n_routes(X)
X = grid_list[1]
X
(x_s, y_s, x_e, y_e, x_d, y_d) = grid_info_list[1]
X = grid_list[1]
# +
print([x_s, y_s, x_e, y_e, x_d, y_d], X.shape, get_n_routes(X))
start_time = time.perf_counter()
if x_d == -1:
X = np.fliplr(X)
if y_d == -1:
X = np.flipud(X)
unique_routes = get_unique_routes(X)
unique_paths = get_path_from_routes(X, unique_routes)
#path_info_list = get_path_info_from_matrix(X)
boot_sample = bayesian_bootstrap(X, get_max_path_idx, 1000, unique_paths)
boot_sample_out = []
for idx in range(0,len(unique_routes)):
boot_sample_out.append([idx, sum(np.asarray(boot_sample)==idx)])
boot_sample_out = np.vstack(boot_sample_out)
boot_sample_list.append(boot_sample_out)
#best_route = path_info_list[1][np.argmax(boot_sample_out[:,1])]
best_route = unique_paths[np.argmax(boot_sample_out[:,1])]
#best_route_list.append(best_route)
print('time (s): {}'.format(time.perf_counter()-start_time))
plt.bar(boot_sample_out[:,0], boot_sample_out[:,1])
plt.show()
# -
X
boot_sample_out_selected = boot_sample_out[np.where(boot_sample_out[:,1]>5)[0]]
boot_sample_out_selected
plt.bar([str(x) for x in boot_sample_out_selected[:,0]], boot_sample_out_selected[:,1]/1000*100)
plt.xticks(rotation=-90)
plt.xlabel('index')
plt.ylabel('percentage (%)')
plt.show()
out_name = 'out_exp/grid2_example'
with open(out_name, 'wb') as f:
pickle.dump(boot_sample_out_selected, f)
out_name = 'out_exp/grid2_example'
with open(out_name, 'rb') as f:
grid2_example = pickle.load(f)
available_route_list = []
for idx in grid2_example[:,0]:
available_route_list.append(unique_paths[idx])
out_name = 'out_exp/grid2_available_route_list'
with open(out_name, 'wb') as f:
pickle.dump(available_route_list, f)
available_route_list = []
available_route_list.append(best_route)
out_name = 'out_exp/grid2_best_route_list'
with open(out_name, 'wb') as f:
pickle.dump(available_route_list, f)
X = grid_list[1]
exp_grid_list = [X]
out_name = 'out_exp/exp_grid_list2'
with open(out_name, 'wb') as f:
pickle.dump(exp_grid_list, f)
|
real_data codes/Grid2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # What is this challenge all about?
# We are presented with multiple recordings (up to 100) for each of the 264 bird species in the training set. All birds live in or travel to North America. The recordings come from https://www.xeno-canto.org/ - a site where people upload their bird recordings.
# Lets look at the data folder.
# !ls data
# Let's take a look at `train.csv`.
# +
import pandas as pd
train = pd.read_csv('data/train.csv')
train.shape
# -
train.head(3)
# How many different birds are there in the train set?
train.ebird_code.nunique()
# This is what sampling rates across the recordings look like:
train.sampling_rate.value_counts()
# One of the organizers shared [on Kaggle](https://www.kaggle.com/c/birdsong-recognition/discussion/159943#893049) that all the recordings in the test set should be sampled at 32 kHz.
#
# Let's resample the train set to 32 kHz.
# #### Resampling
# +
#export
import soundfile as sf
from pathlib import Path
import librosa
import numpy as np
from multiprocessing import Pool, cpu_count
import pandas as pd
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
# -
# mkdir data/train_resampled
# +
import os
for directory in Path('data/train_audio').iterdir():
ebird_code = directory.name
os.makedirs(f'data/train_resampled/{ebird_code}')
# -
#export
NUM_WORKERS = 6
SAMPLE_RATE = 32000
def resample_audio(path):
x = librosa.load(path, sr=SAMPLE_RATE, mono=True)[0]
ebird_code = path.parent.name
sf.write(f'data/train_resampled/{ebird_code}/{path.stem}.wav', x, SAMPLE_RATE)
for directory in Path('data/train_audio').iterdir():
file_paths = list(directory.iterdir())
with Pool(NUM_WORKERS) as p:
try:
p.map(resample_audio, file_paths)
except Exception as e:
print(e)
# #### Data Exploration
# how many recordings do we have per species?
# +
from collections import defaultdict
recs = defaultdict(list)
for directory in Path('data/train_resampled').iterdir():
ebird_code = directory.name
for file in directory.iterdir():
recs[ebird_code].append((file, sf.info(file).duration))
# -
counts = [len(recs[ebird]) for ebird in recs.keys()]
min(counts), max(counts)
# +
import matplotlib.pyplot as plt
plt.hist(counts);
# -
|
0_DataCreation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Comparison of the K-Means and MiniBatchKMeans clustering algorithms
#
#
# We want to compare the performance of the MiniBatchKMeans and KMeans:
# the MiniBatchKMeans is faster, but gives slightly different results (see
# `mini_batch_kmeans`).
#
# We will cluster a set of data, first with KMeans and then with
# MiniBatchKMeans, and plot the results.
# We will also plot the points that are labelled differently between the two
# algorithms.
#
#
# +
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
# #############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
# #############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
# #############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
# #############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
mbk_means_cluster_centers = np.sort(mbk.cluster_centers_, axis=0)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
scikit-learn-official-examples/cluster/plot_mini_batch_kmeans.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # What happens when ANNs are applied out of bound?
#
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LeakyReLU
# -
# ## Generate test data
# +
n = 4000
split_elem = 3000
beta = 20
noise = 2
x = np.random.random(n) * beta
x = np.sort(x)
y = np.sin(x) + np.random.random(n) * noise
plt.scatter(x, y, s = 10)
# -
# ## Normalization
#
# +
dataset = np.transpose(np.array([x,y]))
scaler = MinMaxScaler(feature_range=(0, 1))
dataset_scaled = scaler.fit_transform(dataset)
plt.scatter(dataset_scaled[:,0], dataset_scaled[:,1], s = 10)
# -
# ## Split in training and test dataset
#
# Training dataset will adapt model parameters.
#
# Test parameter set is used to check model quality.
#
# Test dataset should only be used once!
#
#
# +
dataset_out_of_range = dataset_scaled[split_elem:dataset_scaled.shape[0],:]
dataset_production = dataset_scaled[0:split_elem,:]
train, test = train_test_split(dataset_production,train_size=0.7,test_size=0.3)
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(train[:,0], train[:,1], s = 10, c = "green", label = "train")
ax1.scatter(test[:,0], test[:,1], s = 10, c = "blue", label = "test")
ax1.scatter(dataset_out_of_range[:,0], dataset_out_of_range[:,1], s = 10, c = "red", label = "out of range")
plt.legend(loc='upper left');
plt.show()
# -
# ## Training model
# +
model = Sequential()
model.add(Dense(20 , activation = 'tanh', input_dim = 1))
model.add(LeakyReLU(alpha=0.03))
model.add(Dense(20 , activation = 'tanh', input_dim = 1))
model.add(LeakyReLU(alpha=0.03))
model.add(Dense(1, activation = 'tanh'))
model.compile(optimizer = 'adam',
loss = 'mean_squared_error',
metrics = ['accuracy'])
x = train[:,0]
y = train[:,1]
index = np.random.randint(0, len(x), round(0.8 * len(x)))
x_t = x[index]
y_t = y[index]
mask = np.ones(len(x),dtype=bool) #np.ones_like(a,dtype=bool)
mask[index] = False
x_val = x[mask]
y_val = y[mask]
epochs = 20
history = model.fit(x_t, y_t, epochs = epochs, batch_size = 1, validation_data = (x_val, y_val))
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
plt.plot(epochs, loss, 'bo')
plt.plot(epochs, val_loss, 'b')
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
# -
# ## Plot prediction
# +
x_test = test[:,0:(test.shape[1]-1)]
y_predict_test = model.predict(x_test, batch_size = 1)
y_predict_test.shape = (len(y_predict_test))
dataset_invert = np.copy(test)
dataset_invert[:, dataset_invert.shape[1]-1] = y_predict_test
y_predict_test = scaler.inverse_transform(dataset_invert)[:, dataset_invert.shape[1]-1]
test_tf = scaler.inverse_transform(test)
x_original = test_tf[:, 0]
y_original = test_tf[:, test_tf.shape[1]-1]
plt.scatter(x_original, y_original)
plt.scatter(x_original, y_predict_test)
plt.show()
np.corrcoef(y_original, y_predict_test)
# -
# # Cool, let's take this result and apply it a little bit out of range.
#
# +
x_test = dataset_scaled[:, 0:(dataset_scaled.shape[1]-1)]
y_predict_test = model.predict(x_test, batch_size = 1)
y_predict_test.shape = (len(y_predict_test))
dataset_invert = np.copy(dataset_scaled)
dataset_invert[:,dataset_invert.shape[1]-1] = y_predict_test
y_predict_test = scaler.inverse_transform(dataset_invert)[:, dataset_invert.shape[1]-1]
test_tf = scaler.inverse_transform(dataset_scaled)
x_original = test_tf[:, 0]
y_original = test_tf[:, test_tf.shape[1]-1]
plt.scatter(x_original, y_original)
plt.scatter(x_original, y_predict_test)
plt.show()
print(np.corrcoef(y_original, y_predict_test))
|
code/notebooks_python/.ipynb_checkpoints/overfitting-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: airbnb_ml
# language: python
# name: airbnb_ml
# ---
# # Introduction to the Article Code:
#
# This is the code used for the article posted on Medium.com. If you would like to know more about it click the link: [CRISP-DM Regression Analysis in Python](https://medium.com/p/ade4b74bdc85/edit)
# + run_control={"marked": true}
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
from helper import cleaning_dollar
from helper import apply_regression_models
from helper import unzip_files
from pandas.plotting import table
from sklearn.preprocessing import OneHotEncoder
from helper import iqr_outlier_detect
from helper import AnalysisStatus
# -
# <a id="extract"></a>
# # Data Understanding: Extracting and Merging the Data
# ## Extracting Zip File
# getting zip file name
data_zip = os.listdir('data')
data_zip
# extracting datasets
file_names = unzip_files(data_zip)
file_names
# ## Calendar
# +
# Extracting the calendar dataset
df_calendar = pd.read_csv('data\calendar.csv')
# Extracting the listings dataset
target_col = ['id', 'accommodates', 'bedrooms', 'cleaning_fee', 'property_type']
df_listings = pd.read_csv('data\listings.csv')[target_col]
# Calculating null values
null_calendar = df_calendar.isnull().sum()
null_listings = df_listings.isnull().sum()
# Show Nulls on Graph
null_calendar.plot.barh(title='Calendar Null Counts');
# -
pd.to_datetime(df_calendar.date).describe()
# The only feature in the calendar dataset having nulls is price.
# ## Listings
# +
# Plot settings
ax = plt.subplot(111, frame_on=True) # no visible frame
ax.xaxis.set_visible(False) # hide the x axis
ax.yaxis.set_visible(False) # hide the y axis
# Adding Table
table(ax, null_listings)
# Showing Graph
null_listings.plot.barh(title='Listings Null Counts', figsize=(10,4));
# -
# The most noticeable feature having nulls is the cleaning fee. Bedrooms and property types have insignificant null values. However, instead of just removing these, we are imputing them for the sake of example.
# <a id="tran"></a>
# # Data Preparation: Transforming and Cleaning the Data
# ## Calendar
# +
# Cleaning price
df_calendar.loc[:, 'price'] = df_calendar.price.str.findall(
'[^$,]').str.join('').astype(float)
# Grouping prices by the available feature
df_calendar.groupby('available').count()[['price']].plot.barh(
title="Calendar Value Counts by Availability");
# -
# Where the prices are null in the calendar dataset, the availability is false (f). This means that the unavailable listings have no price at all. The nulls here are meaningful.
# +
# Dropping where available is f
df_calendar = df_calendar.where(lambda x: x.available=='t').dropna()
# Kepp only usefull features for the analysis
df_calendar = df_calendar[['listing_id', 'price', 'date']]
# -
# We ended up dropping the null prices because we will waste our time predicting $0 prices when the listing is not available. Thus, the availability feature ended populated with just True conditions (t). So we dropped it.
# +
# Converting date to date time
df_calendar.loc[:, 'date'] = pd.to_datetime(df_calendar.date)
# Adding the months ot sort on Graph
df_calendar.loc[:, 'month'] = df_calendar.date.dt.to_period("M").astype(str)
# Plotting settings
fig, ax = plt.subplots(ncols=2, nrows=1, figsize=(11, 5))
plt.suptitle('Distributions by Month (No Overall Outlier)')
plt.subplots_adjust(wspace=.7)
# Statistical outlier removal
upper_quartile = df_calendar.price.quantile(.75)
lower_quartile = df_calendar.price.quantile(.25)
interquartile = upper_quartile - lower_quartile
lower_whisker = lower_quartile - interquartile * 1.5
upper_whisker = upper_quartile + interquartile * 1.5
# Data without outlier to plot
df_calendar_noout = df_calendar.query(
f"price<={upper_whisker} and price>={lower_whisker}")
# Plotting
ax[0].set_title('Price Distributions')
sns.boxplot(x='price', y='month', data=df_calendar_noout, ax=ax[0]);
ax[1].set_title('Listing Count Distributions')
sns.countplot(y='month', data=df_calendar_noout, ax=ax[1]);
# -
df_calendar.date.describe()
# We answered the first business question with this graphs. Which months have higher prices? Here, we are talking about the listing prices. Thus, to show each particular prices by month (not aggregates), the best way is to provide their distributions. However, to know how complete is the data, we needed to check the listing counts per month. For example, January 2017 is definitely not completed. In fact it only has 2 days of data. Perhaps after completing the month, January 2017 ends up being higher than December 2016. Thus, we cannot use January 2017 to compare.
# + run_control={"marked": true}
# Save date references
dates = pd.Series(df_calendar.date.unique())
date_order = pd.Series(df_calendar.date.unique()).rank()
date_ref_df = pd.concat([dates, date_order], axis=1)
date_ref_df.columns = ['dates', 'date_order']
# Transforming dates to ordered values
df_calendar.loc[:, 'date'] = df_calendar.date.rank(method='dense')
# Dropping irrelevant columns
df_calendar.drop('month', axis=1, inplace=True)
# -
# We saved the original dates and the related ranking for reference.
# ## Listings
# Cleaning cleaning fee
df_listings.loc[:, 'cleaning_fee'] = df_listings.cleaning_fee.str.findall(
'[^$,]').str.join('').astype(float)
# The only field needing cleanup is the cleaning fee.
# <a id="impute"></a>
# # Data Preparation: Imputation of the Data
# ## Bedroom Imputations
# +
# Get mode by group into dictionary
mode = df_listings.groupby('property_type').bedrooms.apply(
lambda x: x.value_counts().index[0]).to_dict()
# Assign the mode by mapping
mode_fill = df_listings.assign(
bedrooms=lambda x: x.property_type.map(mode)).bedrooms
# Fill the nulls with respective modes by property_type
df_listings.bedrooms.fillna(mode_fill, inplace=True)
# Show modes by category
pd.DataFrame(mode, index=[0]).T.rename(columns={0: 'Mode'})
# -
# By chance, most of property type's most common number of bedrooms is 1.
# ## Cleaning Fees Imputations
df_listings.cleaning_fee.fillna(0, inplace=True)
# We are assuming null cleaning fees are just $0 cleaning fees.
# ## Property Type Sparsity Groupings
df_listings.property_type.value_counts(
normalize=True, dropna=False, ascending=True
).plot.barh(title='Sparsity of Property Types');
# + run_control={"marked": true}
# Leaving relevant classes and assigning all other to Other
fn_keep_labels = lambda x: x.str.contains('House|Apartment')
df_listings.loc[:, 'property_type'] = df_listings.property_type.where(
fn_keep_labels).fillna('Other')
# Plotting new classes
df_listings.property_type.value_counts(
normalize=True, dropna=False, ascending=True
).plot.barh(title='New Classes of Property Types');
# -
# <a id="encode"></a>
# # Data Preparation: Encoding the Data
# +
# Instantiating Encoder
encoder = OneHotEncoder()
# Encoding property types
encode_matrix = encoder.fit_transform(df_listings[['property_type']])
# Converting encoded data to dataframe
endoce_df = pd.DataFrame(encode_matrix.toarray(), columns=encoder.categories_[0])
# Adding columns to dataframe
df_listings = pd.concat([df_listings, endoce_df], axis=1)
# -
endoce_df.head()
df_listings.head()
# The property type was encoded as dummy variables so the model can run.
# <a id="assess"></a>
# Dropping original column
df_listings.drop('property_type', axis=1, inplace=True)
# # Data Preparation: Assessing Correlations
# +
# Instantiating analysis class
analysis = AnalysisStatus(df_calendar, df_listings)
# Dropping keys
analysis.df_merged.drop(['id', 'listing_id'], axis=1, inplace=True)
# Plotting correlations
analysis.correlation_heatmap()
# Creating the final Seattle data
df_seattle = analysis.df_merged
# -
# The encoded columns ended up not providing any value to the model. In addition, two of them have high multicollinearity. Thus, we are removing them.
# Droping encoded columns
df_seattle.drop(['Apartment', 'House', 'Other'], axis=1, inplace=True)
# <a id="models"></a>
# # Model: Applying and Evaluating
# ## Model Evluation
apply_regression_models(df_seattle, 'price')
# The score is better for the Random Forest Regression. However, we can do better by removing outliers.
# ## Evaluation: Outlier Analysis
iqr_outlier_detect(df_seattle.price)
# Indeed, there are many outliers in this dataset. However, let's start by looking for patterns in the top outliers.
df_calendar.query("price>1100").groupby(['listing_id', 'price']).size().to_frame('ListingCount')
# We can quickly observe that most of the outliers are concentrated in one listing for the same host.
# +
plt.figure(figsize=(10, 10));
data = df_calendar
plt.title("Seattle Listing Prices Over Date Numbers")
sns.scatterplot(x='date',
y='price',
data=data,
color='black',
alpha=.1,
label='Other Listings');
data = df_calendar.query("listing_id == '3308979'")
plt.title("Seattle Listing for Host with Outliers")
sns.scatterplot(x='date',
y='price',
data=data,
color='red',
label='Listing# 3308979');
data = df_calendar.query("listing_id == '7733192'")
plt.title("Seattle Listing for Host with Outliers")
sns.scatterplot(x='date',
y='price',
data=data,
color='blue',
label='Listing# 7733192');
# -
# We can see the density is at the bottom of the graph and there are some listings unlikely to happen at the top. In fact, these points at the top are concentrated in two listings. This is unlikely to happen in general, so it is bad for prediction. We are removing those ones.
df_seattle = df_seattle.query("price < 1100")
iqr_outlier_detect(df_seattle.price)
# +
plt.figure(figsize=(10, 10));
sns.scatterplot(x='date',
y='price',
data=df_seattle,
color='black',
alpha=.1);
# -
# We still observe some outliers, but these outliers are part of the noise and valid for the analysis.
# ## Evaluation: Re-run Model with Outliers Removed
apply_regression_models(df_seattle, 'price')
# We don't see substantial improvement in the models to justify the removal of the outliers. With them or without them, the model performs about the same.
|
Airbnb_Seattle_Simple.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import csv
from os import listdir
from os.path import join
import numpy as np
import pandas as pd
tsv_folder_path = './tsvs_for_ensemble'
filelist =[f for f in listdir(tsv_folder_path)]
# thresh_list = [0.515, 0.513, 0.503, 0.506, 0.514]
mode= "vote"
if mode == "mean":
# thresh = sum(thresh_list)/len(thresh_list)
thresh = 0.513
probs_list = []
for tsv in filelist:
probs = []
if "tsv" in tsv and "prob" in tsv:
with open(join(tsv_folder_path, tsv), 'r') as tsv:
tsv_file = csv.reader(tsv, delimiter = "\t")
for row in tsv_file:
prob = float(row[0])
probs.append(prob)
probs_list.append(probs)
num_test = len(probs_list[0]) #9192
ensemble_probs = [0]*len(probs_list[0])
for probs in probs_list:
for i in range(num_test):
ensemble_probs[i] += probs[i]
ensemble_preds[:] = [((x / len(probs_list)) >= thresh)*1 for x in ensemble_probs]
df = pd.DataFrame(ensemble_preds)
elif mode == "vote":
preds_list = []
for tsv in filelist:
if 'tsv' in tsv and "pred" in tsv:
preds = []
with open(join(tsv_folder_path, tsv), 'r') as tsv:
tsv_file = csv.reader(tsv, delimiter = "\t")
for row in tsv_file:
pred = int(row[0])
preds.append(pred)
preds_list.append(preds)
ensemble_preds = []
for i in range(len(preds_list[0])):
count = 0
for j in range(len(preds_list)):
if preds_list[j][i] == 1:
count +=1
if count > len(preds_list)/2:
ensemble_preds.append(1)
else:
ensemble_preds.append(0)
# elif mode == 'Unanimous':
# preds_list = []
# for tsv in filelist:
# if 'tsv' in tsv:
# idx = int(tsv.split('.')[0][-1])-1
# thresh = thresh_list[idx]
# preds = []
# with open(join(tsv_folder_path, tsv), 'r') as tsv:
# tsv_file = csv.reader(tsv, delimiter = "\t")
# for row in tsv_file:
# prob = float(row[0])
# pred = (prob>= thresh) * 1
# preds.append(pred)
# preds_list.append(preds)
# ensemble_preds = []
# for i in range(len(preds_list[0])):
# count = 0
# prediction = preds_list[0][i]
# for j in range(1, len(preds_list)):
# if preds_list[j][i] ==prediction:
# count+=1
# if count != len(preds_list)-1:
# ensemble_preds.append(1)
# else:
# ensemble_preds.append(prediction)
df = pd.DataFrame(ensemble_preds)
df.to_csv('ensembled_prediction.tsv', index=False, header=None, sep="\t")
# -
|
tsv_ensembler.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Imports PIL module
import PIL
from PIL import ImageFont, ImageDraw, Image
# creating a image object (new image object) with
# RGB mode and size 200x200
width = 32
im = PIL.Image.new(mode = "L", size = (width, width),color='white')
draw = ImageDraw.Draw(im)
fontsize = 32
font = ImageFont.truetype("/home/data/japanese/Fonts/epmarugo.ttf", fontsize)
char ="龜"
x = (width - fontsize)/2
draw.text((x, x), char, font=font)
# -
def FontImg(size,font,char):
im = PIL.Image.new(mode = "L", size = (size, size),color='white')
draw = ImageDraw.Draw(im)
x = (width - size)/2
draw.text((x, x), char, font=font)
return im
# +
im
# +
import numpy as np
from matplotlib import pyplot as plt
import cv2
img=im
f = np.fft.fft2(img)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20*np.log(np.abs(fshift))
plt.subplot(121),plt.imshow(img, cmap = 'gray')
plt.title('Input Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(magnitude_spectrum, cmap = 'gray')
plt.title('Magnitude Spectrum'), plt.xticks([]), plt.yticks([])
plt.show()
# -
import pickle
available = pickle.load( open( "/home/pomelo/perspectiveTransform/charactersAvailable.p", "rb" ) )
epm=available['epmarugo.ttf']
# +
FontImg(256,font,char)
# -
img = np.array(im)
import pandas as pd
# +
from tqdm import tqdm
chars = pd.DataFrame([])
for char in tqdm(epm):
img = FontImg(256,font,char)
img = np.array(img)
char = pd.Series(img.flatten(),name=char)
chars = chars.append(char)
# -
chars
mean=chars.mean()
mean=np.array(mean)
m=mean.reshape((256,256))
plt.imshow(m)
|
fourier/Fourier.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import libraries
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
import plotly.express as px
import plotly.graph_objects as go
# set display options
pd.options.display.max_columns=999
pd.options.display.max_rows = 50
# code formating
# %load_ext nb_black
# project paths
project_root_dir = os.path.normpath(os.getcwd() + os.sep + os.pardir)
data_path = os.path.join(project_root_dir, "data")
image_path = os.path.join(project_root_dir, "images")
os.makedirs(image_path, exist_ok=True)
# function for loading data
def load_data(filename, data_path=data_path):
csv_path = os.path.join(data_path, filename)
return pd.read_csv(csv_path)
# -
# Read the data
train_vals = load_data("train_values.csv")
train_labels = load_data("train_labels.csv")
test_vals = load_data("test_values.csv")
submission_format = load_data("submission_format.csv")
train_vals.head()
train_labels.head()
train = train_vals.merge(train_labels, on="building_id", how="left")
train.head()
test_vals.head()
print("The Number of (Rows, Columns) in Train Set: ", train.shape)
print("The number of (Rows, Columns) in Test Set: ", test_vals.shape)
# ### Data Description
# * damage_grade - (Target) 1 represents low damage, 2 represents a medium amount of damage, 3 represents almost complete destruction
# * geo_level_1_id, geo_level_2_id, geo_level_3_id (type: int): geographic region in which building exists, from largest (level 1) to most specific sub-region (level 3). Possible values: level 1: 0-30, level 2: 0-1427, level 3: 0-12567.
# * count_floors_pre_eq (type: int): number of floors in the building before the earthquake.
# * age (type: int): age of the building in years.
# * area_percentage (type: int): normalized area of the building footprint.
# * height_percentage (type: int): normalized height of the building footprint.
# * land_surface_condition (type: categorical): surface condition of the land where the building was built. Possible values: n, o, t.
# * foundation_type (type: categorical): type of foundation used while building. Possible values: h, i, r, u, w.
# * roof_type (type: categorical): type of roof used while building. Possible values: n, q, x.
# * ground_floor_type (type: categorical): type of the ground floor. Possible values: f, m, v, x, z.
# * other_floor_type (type: categorical): type of constructions used in higher than the ground floors (except of roof). Possible values: j, q, s, x.
# * position (type: categorical): position of the building. Possible values: j, o, s, t.
# * plan_configuration (type: categorical): building plan configuration. Possible values: a, c, d, f, m, n, o, q, s, u.
# * has_superstructure_adobe_mud (type: binary): flag variable that indicates if the superstructure was made of Adobe/Mud.
# * has_superstructure_mud_mortar_stone (type: binary): flag variable that indicates if the superstructure was made of Mud Mortar - Stone.
# * has_superstructure_stone_flag (type: binary): flag variable that indicates if the superstructure was made of Stone.
# * has_superstructure_cement_mortar_stone (type: binary): flag variable that indicates if the superstructure was made of Cement Mortar - Stone.
# * has_superstructure_mud_mortar_brick (type: binary): flag variable that indicates if the superstructure was made of Mud Mortar - Brick.
# * has_superstructure_cement_mortar_brick (type: binary): flag variable that indicates if the superstructure was made of Cement Mortar - Brick.
# * has_superstructure_timber (type: binary): flag variable that indicates if the superstructure was made of Timber.
# * has_superstructure_bamboo (type: binary): flag variable that indicates if the superstructure was made of Bamboo.
# * has_superstructure_rc_non_engineered (type: binary): flag variable that indicates if the superstructure was made of non-engineered reinforced concrete.
# * has_superstructure_rc_engineered (type: binary): flag variable that indicates if the superstructure was made of engineered reinforced concrete.
# * has_superstructure_other (type: binary): flag variable that indicates if the superstructure was made of any other material.
# * legal_ownership_status (type: categorical): legal ownership status of the land where building was built. Possible values: a, r, v, w.
# * count_families (type: int): number of families that live in the building.
# * has_secondary_use (type: binary): flag variable that indicates if the building was used for any secondary purpose.
# * has_secondary_use_agriculture (type: binary): flag variable that indicates if the building was used for agricultural purposes.
# * has_secondary_use_hotel (type: binary): flag variable that indicates if the building was used as a hotel.
# * has_secondary_use_rental (type: binary): flag variable that indicates if the building was used for rental purposes.
# * has_secondary_use_institution (type: binary): flag variable that indicates if the building was used as a location of any institution.
# * has_secondary_use_school (type: binary): flag variable that indicates if the building was used as a school.
# * has_secondary_use_industry (type: binary): flag variable that indicates if the building was used for industrial purposes.
# * has_secondary_use_health_post (type: binary): flag variable that indicates if the building was used as a health post.
# * has_secondary_use_gov_office (type: binary): flag variable that indicates if the building was used fas a government office.
# * has_secondary_use_use_police (type: binary): flag variable that indicates if the building was used as a police station.
# * has_secondary_use_other (type: binary): flag variable that indicates if the building was secondarily used for other purposes.
#
# ## Exploratory Data Analysis
# ### Damage Type Distibutions
train["damage_grade"].value_counts()
damage_freq = (
train["damage_grade"]
.value_counts()
.reset_index()
.rename(columns={"index": "Damage Grade", "damage_grade": "Count"})
)
damage_freq
fig = go.Figure()
fig.add_trace(go.Bar(x=damage_freq["Damage Grade"], y=damage_freq["Count"]))
fig.update_layout(
title="Damage Type Distribution",
xaxis=dict(tickmode="array", tickvals=[1, 2, 3], title="Damage Type"),
yaxis=dict(title="Count"),
)
fig.show()
# Most of the building has Medium amount of damage, followed by complete destruction.
# ### Damage By Building Structure Type
train.columns
# +
cols = [
"has_superstructure_adobe_mud",
"has_superstructure_mud_mortar_stone",
"has_superstructure_stone_flag",
"has_superstructure_cement_mortar_stone",
"has_superstructure_mud_mortar_brick",
"has_superstructure_cement_mortar_brick",
"has_superstructure_timber",
"has_superstructure_bamboo",
"has_superstructure_rc_non_engineered",
"has_superstructure_rc_engineered",
"has_superstructure_other",
"damage_grade",
]
temp_df = train[cols].copy()
# -
temp_df.head()
temp_df = pd.melt(temp_df, id_vars=["damage_grade"], var_name="building_type")
temp_df = temp_df[temp_df["value"] == 1]
temp_df
temp_df["damage_grade"] = temp_df["damage_grade"].map(
{1: "low", 2: "medium", 3: "high"}
)
temp_df.head()
df = pd.crosstab(temp_df["building_type"], temp_df["damage_grade"])
df
df = df.apply(lambda x: round(x / df.sum(axis=1) * 100, 2))
df
# +
fig = go.Figure()
fig.add_trace(go.Bar(x=df.index, y=df["low"], name="Low"))
fig.add_trace(go.Bar(x=df.index, y=df["medium"], name="Medium"))
fig.add_trace(go.Bar(x=df.index, y=df["high"], name="High"))
fig.update_layout(
title="Damage By Building Structure Type",
xaxis=dict(title="Building Type"),
yaxis=dict(title="Percentage"),
barmode="group",
)
fig.show()
# -
# sorted data, when building has high damages
df.sort_values(by=["high", "medium"], ascending=[False, False])
# We can see that when a building made of mud or stone or any combinations the damage is highest. The building which
# is less high damage are made of rc_engineered, cement_mortar_brick, followed by non_rc_engineered at third lowest.
# And the percentage of medium damage is between 50 - 60% for all building types except for the rc_engineered.
# # Damage By Secondary Use
train.columns
# +
cols = [
"has_secondary_use_agriculture",
"has_secondary_use_hotel",
"has_secondary_use_rental",
"has_secondary_use_institution",
"has_secondary_use_school",
"has_secondary_use_industry",
"has_secondary_use_health_post",
"has_secondary_use_gov_office",
"has_secondary_use_use_police",
"has_secondary_use_other",
"damage_grade",
]
temp_df = train[cols].copy()
temp_df = pd.melt(temp_df, id_vars=["damage_grade"], var_name="secondary_use")
temp_df = temp_df[temp_df["value"] == 1]
temp_df["damage_grade"] = temp_df["damage_grade"].map(
{1: "low", 2: "medium", 3: "high"}
)
df = pd.crosstab(temp_df["secondary_use"], temp_df["damage_grade"])
df = df.apply(lambda x: round(x / df.sum(axis=1) * 100, 2))
# -
# when building has high damage
df.sort_values(by=["high", "medium"], ascending=[False, False])
# Building has a high damage when the building secondary use is agriculture, police and industry and less when it is
# institution, rental and health post.
# ## Damage By Age Of The Building
train['age'].value_counts()
df = pd.crosstab(train["age"], train["damage_grade"])
df.rename(columns={1: "Low", 2: "Medium", 3: "High"}, inplace=True)
df = df.apply(lambda x: round(x / df.sum(axis=1) * 100, 2))
df.sort_values(by=["High", "Medium"], ascending=[False, False])
# There is not look like any pattern here. Old building could be more damage and or less damaged.
|
Notebooks/EDA_part1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Asynchronous Python Programming with `asyncio`
#
#
# ### <NAME>
# Cupcakes and Coding 2019
# + [markdown] slideshow={"slide_type": "slide"}
# ### General Overview:
# <hr>
#
#
# * Real world examples:
# * What does asynchronous mean?
# * What does concurrency mean?
# * Concurrency vs Asynchronous programming
# * Asynchronous vs. parrallel programming?
# * Terminology
# * Python Specifics
# * Examples
# * Examples Explanations
# * Caveats
# * Further Reading
# + [markdown] slideshow={"slide_type": "slide"}
# ### Real World Asynchronous Example:
# <hr>
#
# A real life example is doing chores. Say you have the following three chores:
#
# 1. Do laundry (1 hour)
# 2. Make Dinner (1 hour)
# 3. Run the dishwasher (30 mins)
# 4. Water plants (30 mins)
#
# **Total Time: 3 hours**
#
# What would be the best way to tackle these chores?
#
# 1. Start and finish Laundry, make dinner, Start the dishwasher and wait for it to finish, and then water plants for a total time of **3 hours**.
# 2. Start Laundry, while laundry is running make dinner, after dinner run the dishwasher, while dishwasher is running water your plants for a total time of **1.5 hours**.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Concurrency:
# <hr>
#
# What about these chores allow them to be finished in 1.5 hours instead of 3?
#
# The key is that a lot of these chores can be done **concurrently**.
#
# **What does concurrency mean?**
#
# * it means that each chunk of tasks can be done independently of each other, meaning that these tasks can be run an overlapping manner (not necessarilly in parrallel). Beacause most of these chunks are independent, I can start a task and instead of waiting for it to finish, I can start another task while i wait for the original task to complete, potentially saving me lots of time. In computing chunks of functionality that behave like this are called **coroutines**.
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ### What is the difference between Concurrent and Asynchronous programming?
# <hr>
#
# * **concurrent programming** means writing your program in a way where each functional unit can be performed as independently as possible. Ideally this would mean that you could run your program with a different order of functional units every time and still _**recieve the same result**_.
#
# * example: if you have the chores: run the dishwasher, do laundry, and clean your room. _It doesn't matter which order you complete these chores in_, You get the same result: a clean(er) house.
#
#
#
# * **asynchronous programming** means adding in the multitasking part of the execution to your concurrent code. Essentially directing your program to be run in a specific way that minimizes time spent waiting for long running tasks to complete.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### How is Asynchronous Programming different from Parrallel Programming?
# <hr>
#
# * As a refresher, **Parrallelism** refers to running multiple tasks at the same time.
# * We've seen this using multiple processes/threads.
#
#
# * In contrast **asynchronous** programming essentially boils down to mean **cooperative multitasking**.
# * The python asyncio library is designed as a _single threaded, single process_ way.
# * How can this be? If you look again at the chores example you may notice that there is only _one_ worker doing these chores, it's the _order/execution_ of these chores that are driving the performance increase.
#
#
# * They are/can be related! Concurrent programming makes parrallelization possible in the same may it makes asynchronous programming possible!
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Terminology:
#
# When implementing Asynchronous programming you might run into the following terminology:
#
# * **Task:**
# * Used as a wrapper to automatically schedule and run a coroutine
#
#
# * **Future:**
# * object that represent the result of a task that may or may not have been executed.
#
#
# * **event loop:**
# * manages and distributes the execution of different tasks. It registers tasks and handles distributing the flow of control between them.
#
#
# [source](https://docs.python.org/3/library/asyncio-task.html#awaitables)
# [source](https://hackernoon.com/asyncio-for-the-working-python-developer-5c468e6e2e8e)
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Python Specifics:
# <hr>
#
# * In python, co-routines are a special version of a [**generator** ](https://realpython.com/introduction-to-python-generators/).
# * In short a generator is a special function that can **yield** values before it returns, internally remembering it's state.
# * based off of this, a coroutine can pause it's execution and hand off control to other coroutines.
#
# + slideshow={"slide_type": "subslide"}
# Asynchronous Example
# code pulled/modified from https://realpython.com/async-io-python
import time
import asyncio
async def count():
# asynchronous co-routine
print("One")
# pause here and come back when sleep() is complete
await asyncio.sleep(1)
print("Two")
async def call_count():
# run coroutines concurrently
asyncio.gather(count(), count(), count())
await call_count()
# + slideshow={"slide_type": "subslide"}
# Synchronous Example
import time
def count():
print("One")
time.sleep(1)
print("Two")
if __name__ == "__main__":
for _ in range(3):
count()
elapsed = time.perf_counter() - s
# + [markdown] slideshow={"slide_type": "slide"}
# ### What is Happening Here?
# <hr>
#
# * Each call to the count function is known as an **event loop**,
# * When a task reaches the **await** keyword it signals to the event loop to take back control and execute some other task while it finishes the sleep task.
# * asyncio.sleep can be replaced with any time intensive IO task that involves wait time.
# * examples include:
# * Web curling
# * External communicatiaon between different applications
# * Database programming
#
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ### Caveats:
# <hr>
#
# * an `async def` function is able to use `await`, `return`, or `yield`
# * you must use the `await` keyword to get result from an `async def` declared function.
# * You can only use `await` inside the body of an async coroutine.
# * To await an object, that object itself must be _awaitable_ (aka another coroutine)
# * For design, coroutines are usually kept as small and modular as possible and are called with another wrapper function to chain co-routines together. `main()` is typically used to gather tasks and apply central co-routine across an iterable or pool.
# * As the name implies, asyncio is designed to be used for IO bound processes, If you have another type of bound process you might be better off using multi-processing.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Further Reading:
# <hr>
#
# * https://realpython.com/async-io-python/
# * https://vimeo.com/49718712
# -
|
2019/asyncio/asyncio_presentation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["remove-cell"]
# %matplotlib inline
# %config InlineBackend.figure_format ='retina'
import IPython
import numpy as np
import torch
import socialforce
# -
# (corridor)=
# # Corridor
#
# We want to qualitatively study the impact of non-standard Social Force potentials.
# +
def initial_state_corridor(n):
_ = torch.manual_seed(42)
# first n people go right, second n people go left
state = torch.zeros((n * 2, 6))
# positions
state[:n, 0:2] = ((torch.rand((n, 2)) - 0.5) * 2.0) * torch.tensor([25.0, 4.5])
state[n:, 0:2] = ((torch.rand((n, 2)) - 0.5) * 2.0) * torch.tensor([25.0, 4.5])
# velocity
state[:n, 2] = torch.normal(torch.full((n,), 1.34), 0.26)
state[n:, 2] = torch.normal(torch.full((n,), -1.34), 0.26)
# x destination
state[:n, 4] = 100.0
state[n:, 4] = -100.0
return state
initial_state = initial_state_corridor(60)
# -
# The space is just two walls at $y=5.0m$ and $y=-5.0m$. To avoid boundary effects,
# the walls extend beyond the periodic boundaries at $x=-25m$ and $x=25m$.
upper_wall = torch.stack([torch.linspace(-30, 30, 600), torch.full((600,), 5)], -1)
lower_wall = torch.stack([torch.linspace(-30, 30, 600), torch.full((600,), -5)], -1)
ped_space = socialforce.potentials.PedSpacePotential([upper_wall, lower_wall])
#
# ## Reference Potential
#
# Standard SF:
ped_ped = socialforce.potentials.PedPedPotential()
# + tags=["hide-input"]
# HIDE CODE
simulator = socialforce.Simulator(ped_ped=ped_ped, ped_space=ped_space,
oversampling=2, delta_t=0.08)
simulator.integrator = socialforce.simulator.PeriodicBoundary(
simulator.integrator, x_boundary=[-25.0, 25.0])
with torch.no_grad():
states_sf = simulator.run(initial_state, 250)
with socialforce.show.track_canvas(ncols=2, figsize=(12, 2), tight_layout=False) as (ax1, ax2):
socialforce.show.states(ax1, states_sf[0:1], monochrome=True)
socialforce.show.space(ax1, ped_space)
ax1.text(0.1, 0.1, '$t = 0s$', transform=ax1.transAxes)
ax1.set_xlim(-25, 25)
socialforce.show.states(ax2, states_sf[249:250], monochrome=True)
socialforce.show.space(ax2, ped_space)
ax2.text(0.1, 0.1, '$t = 20s$', transform=ax2.transAxes)
ax2.set_xlim(-25, 25)
# + tags=["hide-input"]
# HIDE CODE
with socialforce.show.track_canvas(figsize=(6, 2), tight_layout=False, show=False, dpi=130) as ax:
ax.set_xlim(-25, 25)
socialforce.show.space(ax, ped_space)
video = socialforce.show.state_animation(ax, states_sf, delta_t=0.08).to_html5_video()
IPython.display.HTML(video)
# -
# ## Diamond Potential
ped_ped = socialforce.potentials.PedPedPotentialDiamond(sigma=0.5)
# + tags=["hide-input"]
# HIDE CODE
simulator = socialforce.Simulator(ped_ped=ped_ped, ped_space=ped_space,
oversampling=2, delta_t=0.08)
simulator.integrator = socialforce.simulator.PeriodicBoundary(
simulator.integrator, x_boundary=[-25.0, 25.0])
with torch.no_grad():
states_diamond = simulator.run(initial_state, 250)
with socialforce.show.track_canvas(ncols=2, figsize=(12, 2), tight_layout=False) as (ax1, ax2):
socialforce.show.states(ax1, states_diamond[0:1], monochrome=True)
socialforce.show.space(ax1, ped_space)
ax1.text(0.1, 0.1, '$t = 0s$', transform=ax1.transAxes)
ax1.set_xlim(-25, 25)
socialforce.show.states(ax2, states_diamond[249:250], monochrome=True)
socialforce.show.space(ax2, ped_space)
ax2.text(0.1, 0.1, '$t = 20s$', transform=ax2.transAxes)
ax2.set_xlim(-25, 25)
# + tags=["hide-input"]
# HIDE CODE
with socialforce.show.track_canvas(figsize=(6, 2), tight_layout=False, show=False, dpi=130) as ax:
ax.set_xlim(-25, 25)
socialforce.show.space(ax, ped_space)
video = socialforce.show.state_animation(ax, states_diamond, delta_t=0.08).to_html5_video()
IPython.display.HTML(video)
# -
# ## Asymmetric Diamond
ped_ped = socialforce.potentials.PedPedPotentialDiamond(sigma=0.5, asymmetry_angle=-20.0)
# + tags=["hide-input"]
# HIDE CODE
simulator = socialforce.Simulator(ped_ped=ped_ped, ped_space=ped_space,
oversampling=2, delta_t=0.08)
simulator.integrator = socialforce.simulator.PeriodicBoundary(
simulator.integrator, x_boundary=[-25.0, 25.0])
with torch.no_grad():
states_diamond_sd = simulator.run(initial_state, 500)
with socialforce.show.track_canvas(ncols=2, figsize=(12, 2), tight_layout=False) as (ax1, ax2):
socialforce.show.states(ax1, states_diamond_sd[0:1], monochrome=True)
socialforce.show.space(ax1, ped_space)
ax1.text(0.1, 0.1, '$t = 0s$', transform=ax1.transAxes)
ax1.set_xlim(-25, 25)
socialforce.show.states(ax2, states_diamond_sd[499:500], monochrome=True)
socialforce.show.space(ax2, ped_space)
ax2.text(0.1, 0.1, '$t = 40s$', transform=ax2.transAxes)
ax2.set_xlim(-25, 25)
# + tags=["hide-input"]
# HIDE CODE
with socialforce.show.track_canvas(figsize=(6, 2), tight_layout=False, show=False, dpi=130) as ax:
ax.set_xlim(-25, 25)
socialforce.show.space(ax, ped_space)
video = socialforce.show.state_animation(ax, states_diamond_sd, delta_t=0.08).to_html5_video()
IPython.display.HTML(video)
# -
# ## Speed Analysis
# + tags=["hide-input"]
# HIDE CODE
def relative_speeds(states):
speeds = np.linalg.norm(states[:, :, 2:4], axis=-1)
preferred = states[:, :, 9]
relative = speeds / preferred
# ignore the first 50
relative = relative[50:]
return relative.reshape(-1)
def median_speed(states):
speeds = np.linalg.norm(states[:, :, 2:4], axis=-1)
# ignore the first 50
speeds = speeds[50:]
return np.median(speeds)
with socialforce.show.canvas() as ax:
r_sf = relative_speeds(states_sf)
r_diamond = relative_speeds(states_diamond)
r_diamond_sd = relative_speeds(states_diamond_sd)
ax.hist([r_sf, r_diamond, r_diamond_sd], bins=30, range=(0.8, 1.35), density=True,
label=[f'Social Force, $v_{{median}}$ = {median_speed(states_sf):.2f}m/s',
f'diamond, $v_{{median}}$ = {median_speed(states_diamond):.2f}m/s',
f'speed-dependent diamond, $v_{{median}}$ = {median_speed(states_diamond_sd):.2f}m/s'])
ax.legend()
ax.set_xlabel('$v / v_{preferred}$ [m/s]')
# -
# The simulation with standard social force potential leads to pedestrians that
# are above their preferred speed whereas in the simulation with the diamond
# potential the median speed as at the preferred speed of $1.34m/s$.
# This is a consequence of the asymmetric nature of the Social Force:
# the force experianced by pedestrian $\alpha$ due $\beta$ is different
# (not just opposite) from the force experience by $\beta$ from $\alpha$.
# When pedestrian $\alpha$ walks in front of $\beta$, $\beta$ will feel almost no
# force to slow down as the potential of $\alpha$ is shifted more towards the front.
# Pedestrian $\alpha$ however will be in the range of the potential from $\beta$
# who is behind. That force will be halfed by the field-of-view modulation, but
# that is still a stronger force than what $\beta$ experiences.
#
# In this corridor example with the Social Force pedestrian-pedestrian potential,
# pedestrians that are in front tend to get pushed to accelerate more than the
# pedestrians behind to slow down. There is no such asymmetry in the diamond
# potential.
# ## Asymmetry Analysis
# + tags=["hide-input"]
# HIDE CODE
def x2(states):
x2_coordinates = np.copy(states[:, :, 1])
x2_coordinates[states[:, :, 6] < 0.0] *= -1.0
# only take last ones
x2_coordinates = x2_coordinates[-1]
return x2_coordinates.reshape(-1)
def mean_x2(states):
return np.mean(x2(states))
with socialforce.show.canvas() as ax:
x2_sf = x2(states_sf)
x2_diamond = x2(states_diamond)
x2_diamond_sd = x2(states_diamond_sd)
ax.hist([x2_sf, x2_diamond, x2_diamond_sd], bins=6, range=(-5.0, 5.0), density=True,
label=[f'Social Force, $\\overline{{x}}_2$ = {mean_x2(states_sf):.1f}m',
f'diamond, $\\overline{{x}}_2$ = {mean_x2(states_diamond):.1f}m',
f'speed-dependent diamond, $\\overline{{x}}_2$ = {mean_x2(states_diamond_sd):.1f}m'],
orientation='horizontal')
ax.legend()
ax.set_ylabel('$x_2$ [m]')
# -
|
guide/corridor.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/a-forty-two/COG_GN22CDBDS001_MARCH_22/blob/main/TextAnalysisRegex.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="C-ytYsNKNfUZ"
# # Exploratory Text Analysis
#
# ## What kinds of text analysis are there?
#
# * analyst knows the pattern
# * regular expressions
# * analyst does not know the pattern
# * natural language processing
# * compares historical examples to judge novel cases
# * comparisons are statistical and approximate
#
# + [markdown] id="qfRVsmITNfUe"
# ### Examples of Analysis
# + [markdown] id="yB_dZ875NfUf"
# When you know the pattern:
# + id="CKXAPEWDNfUf"
pattern = '£ ?[0-9][0-9]?' # £ then SPACE-optional then digit then digit-optional
document = 'My eggs cost £3, bread cost £2, vodka cost £35'
# + id="SdHK4IzUNfUh"
import re
# + id="atPRnkyZNfUh"
re.findall(pattern, document)
# + [markdown] id="pmsj-SEdNfUi"
# If you dont:
#
# * sentiment analysis
# * how positive/negative is this (new) review?
# * topic analysis
# * what is this document about?
# + [markdown] id="TkoJl8GjNfUj"
# ## What can I do if I know what pattern I want to find?
#
# * finding ("extracting")
# * what matches the pattern?
# * matching ("validating")
# * does the entire document match YES/NO?
# * substitue ("replacing")
# * replace a part that matches a pattern with another...
# + [markdown] id="75OwcHa9NfUk"
# ## How do I validate text with pandas?
# + id="r6mAIQIBNfUk"
import pandas as pd
# + id="Lq0zu2m_NfUl" outputId="8d7cc2a5-ca70-453d-b999-47590d61a0fa"
ti = pd.read_csv('datasets/titanic.csv')
ti.sample(1)
# + id="zdorK5qgNfUm"
ti['ticket'] = "Ticket: " + ti['class'] + "; Price: $ " + ti['fare'].astype(str) + "; Port: " + ti['embark_town'] + ";"
# + id="4ZXp1_8ENfUm" outputId="56c0aa1c-5b44-4b81-d22a-a89dc25bb731"
ti[['class', 'fare', 'embark_town', 'ticket']].head(3)
# + id="ZSYqgIuCNfUn"
pattern = '(First|Second)'
ti['class'].str.match(pattern)
# + id="l3ZVWpIENfUn"
ti.loc[ ti['class'].str.match(pattern) , 'survived'].mean()
# + [markdown] id="adOjpfGdNfUo"
# ## How do I extract data with pandas?
# + id="6O_vA48vNfUo"
ti[['class', 'fare', 'embark_town', 'ticket']].head(3)
# + id="TzLtSkTuNfUo"
pattern = '([0-9.]+)'
ti['ticket'].str.extract(pattern).sample(4)
# + [markdown] id="cY4D2A4VNfUp"
# ## How do I substitue text with pandas?
# + id="ibT_QCy1NfUp"
ti['ticket'].str.replace('$', '€').sample(1)
# + [markdown] id="PDXbk5KgNfUp"
# ## What are regular expressions?
# + [markdown] id="52HhaGS8NfUq"
# Regular expressions are a language for describing patterns in text.
#
# They are separate from python, but may be used within python program. (And elsewhere, eg., often in SQL).
#
# They are notoriously difficult to read and write; and as a separate language, an additional tool to learn.
# + [markdown] id="B-zb0YVSNfUq"
# ## What regular expression patterns can I use?
#
# * literals
# * `a`, find me an `a`
# * `£`, find `£`
# * `!` means `!`
# * ... most symbols mean "find me"
# * `.`
# * find any **single** symbol
# * character classes -- find a **single** symbol
# * `[abc]` $\rightarrow$ **either** a, b, c
# * `[0-9]` $\rightarrow$ **either** 0, 1, 2, 3,...9
# * `[A-Z]` $\rightarrow$ **either** capital A, B, ... Z
# * inversions
# * `[^abc]` $\rightarrow$ **is not** `a` OR `b` OR `c`
# * `[^a-zA-Z0-9 ]` $\rightarrow$ **is not** alphanumeric-ish
#
# * alternatives -- find the character**s** given by...
# * `(May|June|July)` $\rightarrow$ **the whole worlds** May OR June..
#
# + id="IqJcQMd7NfUq"
ti['ticket'].str.extract('(Ticket: (First|Second))')
# + id="6gz-kaY0NfUr"
ti['ticket'].str.extract('( [0-9][0-9])')
# + id="9pf9rlUvNfUr"
ti['ticket'].sample(1)
# + id="Ek-sHOwzNfUr"
ti['ticket'].str.extract('(Ticket: [A-Z])').sample(2)
# + id="OGOHxwuRNfUr"
ti['ticket'].str.extract('(T........)').sample(3)
# + id="-rhwmV7FNfUr"
ti['ticket'].str.extract('(Price: [^0-9A-Za-z] ..)').sample(3)
# + id="Bz7zu6NLNfUs"
ti['ticket'].str.extract('(Port: (Cherbourg|Southampton))').sample(3)
# + [markdown] id="Igmy-zJjNfUs"
# * repetitions
# * optional `?`
# * an optional number: `[0-9]?`
# * one or more `+`
# * one or more spaces: ` +`
# * optional, or more, `*`
# * ` [0-9][0-9]?.[0-9]*`
#
# + id="-YrKvkmDNfUs"
ti['ticket'].str.extract('([0-9][0-9]?.[0-9]*)').sample(3)
# + id="R45w_rQLNfUs"
ti['ticket'].str.extract('(Ticket: [a-zA-Z]+)').sample(3)
# + id="QlStz-PCNfUs"
row = 0
match = 1 # second match
ti['ticket'].str.extractall('([a-zA-Z]+: [a-zA-Z]+)').loc[row, match]
# + id="hcPrDhE2NfUt"
ti['ticket'].str.extract('([a-zA-Z]+tow?n)')
# + [markdown] id="vnJnJaImNfUt"
# * EXTRA:
# * escaping
# * How do I say, literally, the `.` symbol?
# * `\.`
#
# + id="Lg9_UZpXNfUt"
ti['ticket'].str.extract('(\$ [0-9]+\.[0-9]+)').sample(2)
# + [markdown] id="NIpjQIS3NfUu"
# * positional matching
# * `^` means **at the beginning**
# * `$` means **at the end**
# + id="uYvR7f58NfUu"
ti['ticket'].str.extractall('([a-zA-Z]+: [a-zA-Z]+;$)').sample(1)
# + id="iRFo8a_BNfUu" outputId="6185a28c-9d0e-4cf0-cb1b-09b3e8d90b62"
ti['ticket'].str.extractall('(^[a-zA-Z]+: [a-zA-Z]+;)').sample(1)
# + [markdown] id="ykbEKffQNfUv"
# ## Next Steps
#
# * review a "Regex Cheat Sheet"
# * also, eg., https://en.wikipedia.org/wiki/Regular_expression#Examples
# + [markdown] id="437C8vefNfUv"
# ## Exercise (30 min)
#
# * find all the words in the tickets
# * HINT: a word is a repeated letter followed by a space or a colon
# * HINT: `[ :]` means a space or a colon
# * find all the USD prices
# * HINT: ``` \$ ``` and repeated numbers
#
# * find all the high-price tickets
# * HINT: consider `\$`, tripple-digit number, `\.`
# + id="AdKINtmSNfUv"
Solution
# + id="6YOPiVHeNfUv" outputId="cf6c2ec4-6e59-4cdd-fde9-773a4f343ea8"
ti['ticket'].str.findall('([a-zA-Z]+[ :])').sample(10)
# + id="7MbTkka7NfUw" outputId="f7497a95-c32a-40b6-bdc8-4fa115063a77"
ti['ticket'].str.extract('(\$ [0-9]+\.[0-9]+)').sample(2)
# + id="2d0jpTkNNfUw" outputId="9cc534d3-447d-4707-f6b2-80900a6eecb8"
row = 0
match = 1 # second match
ti['ticket'].str.extractall('(\$ [0-9][0-9][0-9]+\.[0-9]+)')#.loc[row, 0]
# + id="yBIOpdpjNfUw"
|
notebooks/TextAnalysisRegex.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
tips = sns.load_dataset("tips")
tips
tips.plot(kind="scatter", x="total_bill", y="tip")
sns.lmplot(x="total_bill", y="tip", data=tips)
sns.countplot(x="sex", data=tips)
sns.countplot(x="sex", data=tips, hue="time")
sns.lmplot(x="total_bill", y="tip", data=tips, col="time", hue="sex", row="smoker")
facet = sns.FacetGrid(tips, col="time", row="smoker", hue="sex")
facet.map(plt.scatter, "total_bill", "tip")
plt.scatter(tips["total_bill"], tips["tip"])
fig, ax = plt.subplots(1, 1)
ax.scatter(tips["total_bill"], tips["tip"])
fig
fig, (ax1, ax2) = plt.subplots(1, 2)
ax1.scatter(tips["total_bill"], tips["tip"])
sns.countplot(x="sex", data=tips, ax=ax2)
fig.savefig("my_plot.svg")
|
03-plotting.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # 深度循环神经网络
#
# 本章到目前为止介绍的循环神经网络只有一个单向的隐藏层,在深度学习应用里,我们通常会用到含有多个隐藏层的循环神经网络,也称作深度循环神经网络。图 6.11 演示了一个有 $L$ 个隐藏层的深度循环神经网络,每个隐藏状态不断传递至当前层的下一时间步和当前时间步的下一层。
#
# 
#
#
# 具体来说,在时间步 $t$ 里,设小批量输入 $\boldsymbol{X}_t \in \mathbb{R}^{n \times d}$(样本数为 $n$,输入个数为 $d$),第 $\ell$ 隐藏层($\ell=1,\ldots,L$)的隐藏状态为 $\boldsymbol{H}_t^{(\ell)} \in \mathbb{R}^{n \times h}$(隐藏单元个数为 $h$),输出层变量为 $\boldsymbol{O}_t \in \mathbb{R}^{n \times q}$(输出个数为 $q$),且隐藏层的激活函数为 $\phi$。第 1 隐藏层的隐藏状态和之前的计算一样:
#
# $$\boldsymbol{H}_t^{(1)} = \phi(\boldsymbol{X}_t \boldsymbol{W}_{xh}^{(1)} + \boldsymbol{H}_{t-1}^{(1)} \boldsymbol{W}_{hh}^{(1)} + \boldsymbol{b}_h^{(1)}),$$
#
#
# 其中权重 $\boldsymbol{W}_{xh}^{(1)} \in \mathbb{R}^{d \times h}, \boldsymbol{W}_{hh}^{(1)} \in \mathbb{R}^{h \times h}$ 和偏差 $\boldsymbol{b}_h^{(1)} \in \mathbb{R}^{1 \times h}$ 分别为第 1 隐藏层的模型参数。
#
# 当 $1 < \ell \leq L$ 时,第 $\ell$ 隐藏层的隐藏状态的表达式为
#
# $$\boldsymbol{H}_t^{(\ell)} = \phi(\boldsymbol{H}_t^{(\ell-1)} \boldsymbol{W}_{xh}^{(\ell)} + \boldsymbol{H}_{t-1}^{(\ell)} \boldsymbol{W}_{hh}^{(\ell)} + \boldsymbol{b}_h^{(\ell)}),$$
#
#
# 其中权重 $\boldsymbol{W}_{xh}^{(\ell)} \in \mathbb{R}^{h \times h}, \boldsymbol{W}_{hh}^{(\ell)} \in \mathbb{R}^{h \times h}$ 和偏差 $\boldsymbol{b}_h^{(\ell)} \in \mathbb{R}^{1 \times h}$ 分别为第 $\ell$ 隐藏层的模型参数。
#
# 最终,输出层的输出只需基于第 $L$ 隐藏层的隐藏状态:
#
# $$\boldsymbol{O}_t = \boldsymbol{H}_t^{(L)} \boldsymbol{W}_{hq} + \boldsymbol{b}_q,$$
#
# 其中权重 $\boldsymbol{W}_{hq} \in \mathbb{R}^{h \times q}$ 和偏差 $\boldsymbol{b}_q \in \mathbb{R}^{1 \times q}$ 为输出层的模型参数。
#
# 同多层感知机一样,隐藏层个数 $L$ 和隐藏单元个数 $h$ 都是超参数。此外,如果将隐藏状态的计算换成门控循环单元或者长短期记忆的计算,我们可以得到深度门控循环神经网络。
#
# ## 小结
#
# * 在深度循环神经网络中,隐藏状态的信息不断传递至当前层的下一时间步和当前时间步的下一层。
#
#
# ## 练习
#
# * 将[“循环神经网络的从零开始实现”](rnn-scratch.md)一节中的模型改为含有 2 个隐藏层的循环神经网络。观察并分析实验现象。
#
#
# ## 扫码直达[讨论区](https://discuss.gluon.ai/t/topic/6730)
#
# 
|
chapter_recurrent-neural-networks/deep-rnn.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import initdirs
# +
import os
import cv2
import math
import multiprocessing as mp
from matplotlib import pyplot as plt
import networkx as nx
from glob import glob
import nxpd
nxpd.nxpdParams['show'] = 'ipynb'
# +
from epypes import pipeline
from epypes import compgraph
from visioncg import cbcalib
from visioncg.io import open_image, sorted_glob
# -
# +
imfiles1 = sorted_glob(os.path.join(initdirs.DATA_DIR, 'opencv_cb_left/*.jpg'))
imfiles2 = sorted_glob(os.path.join(initdirs.DATA_DIR, 'opencv_cb_right/*.jpg'))
images1 = [open_image(f, cv2.IMREAD_GRAYSCALE) for f in imfiles1]
images2 = [open_image(f, cv2.IMREAD_GRAYSCALE) for f in imfiles2]
psize = (9, 6)
sq_size = 10
im_wh = cbcalib.get_im_wh(images1[0])
# +
cg_calib = cbcalib.CGCalibrateCamera()
calib_runner = compgraph.CompGraphRunner(cg_calib, frozen_tokens={
'im_wh': im_wh,
'pattern_size_wh': psize,
'square_size': sq_size
})
nxpd.draw(calib_runner.to_networkx())
# +
calib_runner.run(calibration_images=images1)
cm1 = calib_runner['camera_matrix']
dc1 = calib_runner['dist_coefs']
calib_runner.run(calibration_images=images2)
cm2 = calib_runner['camera_matrix']
dc2 = calib_runner['dist_coefs']
# -
# +
cg_pnp = cbcalib.CGSolvePnP()
pnp_params = {
'pattern_points': calib_runner['object_points'][0],
'pattern_size_wh': psize,
}
pnp_runner = compgraph.CompGraphRunner(cg_pnp, pnp_params)
nxpd.draw(pnp_runner.to_networkx())
# -
pnp_runner.run(
image=images1[0],
cam_matrix=cm1,
dist_coefs=dc1
)
# +
print('Rotation matrix:')
print(pnp_runner['rmat'])
print('Translation vector:')
print(pnp_runner['tvec'])
# -
|
notebooks/demo_pnp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # For nos Dicionarios
#
# ### Estrutura:
# + active=""
# for chave in dicionario:
# faça alguma coisa
# +
vendas_tecnologia = {'iphone': 15000, 'samsung galaxy': 12000, 'tv samsung': 10000, 'ps5': 14300, 'tablet': 1720, 'ipad': 1000, 'tv philco': 2500, 'notebook hp': 1000, 'notebook dell': 17000, 'notebook asus': 2450}
#demonstrando o for
for chave in vendas_tecnologia:
print('{} : {}' .format(chave, vendas_tecnologia[chave]))
# -
# - Qual o total de notebooks vendidos?
total = 0
for chave in vendas_tecnologia:
if 'notebook' in chave:
total += vendas_tecnologia[chave]
print(total)
|
dicionarios/for.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This example contains the necessary bits of code to run the federated training with homomorphic encryption (he).
# +
import os
import sys
import requests
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, r2_score
import jsonpickle as jpk
import time
import numpy as np
import pandas as pd
import torch.nn as nn
from tqdm.notebook import tqdm
# Federated imports
import forcast_federated_learning as ffl
# Parameters
num_clients = 10
com_rounds = 40
seed = 0
batch_size = 1
noise_multiplier = 0.3
max_grad_norm = 0.5
# Metrics
df_metrics = pd.DataFrame(dict(zip(['round', 'rmse', 'r2_score', 'epsilon', 'delta'], [int,[],[],[],[]])))
# Load local train data
X, y, df_data, target_names = ffl.datasets.load_scikit_iris()
# Split the database in train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=seed)
# Create custom pytorch datasers for train and testing
traindata = ffl.datasets.StructuredDataset(X_train, y_train, categorical=True)
testdata = ffl.datasets.StructuredDataset(X_test, y_test, categorical=True)
# +
# Split the train data and use only a fraction
traindata_split = ffl.data.random_split(traindata, num_clients=num_clients, seed=seed)
# Get data loader
train_loaders = [ffl.utils.DataLoader(traindata, batch_size=batch_size, shuffle=True, seed=seed) for traindata in traindata_split]
test_loader = ffl.utils.DataLoader(testdata, batch_size=len(testdata), shuffle=True, seed=seed)
# +
# Train params
delta = 10**-np.ceil(np.log10(len(traindata))) # delta < 1/len(dataset)
security_params = {'noise_multiplier': noise_multiplier, 'max_grad_norm': max_grad_norm, 'batch_size': batch_size, 'sample_size': len(traindata), 'target_delta': delta, 'secure_rng': True}
optimizer_params = {'lr': 0.01}
train_params = {'epochs': 4}
local_models = []
for _ in range(num_clients):
# Create federated model based on a pytorch model
num_features, num_classes = 4, 3
model = ffl.models.NN(input_dim=num_features, output_dim=num_classes) # pytorch model
loss_fn = nn.CrossEntropyLoss() # classification
local_model = ffl.LocalModel(model, model_type = 'nn', loss_fn=loss_fn, train_params=train_params)
local_model.optimizer = ffl.optim.Adam(local_model.parameters(), **optimizer_params)
local_model.privacy_engine = ffl.security.PrivacyEngine(local_model, **security_params)
local_model.privacy_engine.attach(local_model.optimizer)
local_models.append(local_model)
# -
model = local_model.model # pytorch model
fed_model = ffl.FederatedModel(model, model_type='nn')
public_context, secret_key = ffl.encryption.get_context()
# As a coment in practice, when deploying the public_context object need to be serialized to be shared with the clients with `context = public_context.serialize()`, and then each client needs to load it onto a python object with `context = ffl.encryption.load_context(context)`.
for com_round in tqdm(range(com_rounds)):
for local_model, train_loader in zip(local_models, train_loaders):
local_model.step(train_loader)
client_weights = []
for local_model in local_models:
state_dict = local_model.state_dict()
enc_state_dict = ffl.encryption.EncStateDict(state_dict)
enc_state_dict = enc_state_dict.encrypt(public_context)
client_weights.append(enc_state_dict)
client_lens = [len(traindata) for traindata in traindata_split]
## Server aggregate
fed_model.server_agregate(client_weights, client_lens, secret_key=secret_key)
weights = fed_model.state_dict()
for local_model in local_models:
local_model.load_state_dict(weights)
acc, _ = local_model.test(test_loader)
if local_model.privacy_engine: # privacy spent
epsilon, best_alpha = local_model.privacy_engine.get_privacy_spent(delta)
print(f'Test accuracy: {acc:.2f} - Privacy spent: (ε = {epsilon:.2f}, δ = {delta:.2f})')
else:
print(f'Test accuracy: {acc:.2f}')
|
examples/notebooks/data_classification_iris_he.ipynb
|
# # Colab
# ## Recipes
#
# ### Attach Google Drive
# ```py
# from google.colab import drive
# drive.mount("/content/drive", force_remount=True)
# ```
#
# ### Tensorboard
# ```
# # %load_ext tensorboard
#
# # !mkdir /content/training_logs
# # %tensorboard --logdir '/content/training_logs
# ```
#
# ### Clone private Github repo
# ```py
# import urllib.parse
# from getpass import getpass
#
# username = input("Github username: ")
# password = getpass("Github password: ")
#
# username_encoded = urllib.parse.quote(username)
# password_encoded = urllib.parse.quote(password)
# repo_url = f"https://{username_encoded}:{password_encoded}@github.com/bdsaglam/torch-scae.git"
#
# # !git clone $repo_url
# ```
#
|
nbs/craft/craft.colab.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: MyPython
# language: Python
# name: mypython
# ---
##do_runcode
# ##%overwritefile
# ##%file:src/do_gjs_runcode.py
# ##%noruncode
def do_runcode(self,return_code,file_name,magics,code, silent, store_history=True,
user_expressions=None, allow_stdin=True):
return_code=return_code
file_name=file_name
bcancel_exec=False
retinfo=self.mymagics.get_retinfo()
retstr=''
##代码运行前
p = self.mymagics.create_jupyter_subprocess(['gjs',file_name]+ magics['_st']['args'],cwd=None,shell=False,env=self.mymagics.addkey2dict(magics,'env'),magics=magics)
#p = self.create_jupyter_subprocess([binary_file.name]+ magics['args'],cwd=None,shell=False)
#p = self.create_jupyter_subprocess([self.master_path, binary_file.name] + magics['args'],cwd='/tmp',shell=True)
self.mymagics.g_rtsps[str(p.pid)]=p
return_code=p.returncode
##代码启动后
bcancel_exec,retstr=self.mymagics.raise_plugin(code,magics,return_code,file_name,3,2)
# if bcancel_exec:return bcancel_exec,retinfo,magics, code,file_name,retstr
if len(self.mymagics.addkey2dict(magics,'showpid'))>0:
self.mymagics._write_to_stdout("The process PID:"+str(p.pid)+"\n")
return_code=p.wait_end(magics)
# del self.g_rtsps[str(p.pid)]
# p.write_contents(magics)
##
##调用接口
return_code=p.returncode
##代码运行结束
if p.returncode != 0:
self.mymagics._log("Executable exited with code {}".format(p.returncode),2)
return bcancel_exec,retinfo,magics, code,file_name,retstr
##do_compile_code
# ##%overwritefile
# ##%file:src/do_c_compilecode.py
# ##%noruncode
def do_compile_code(self,return_code,file_name,magics,code, silent, store_history=True,
user_expressions=None, allow_stdin=True):
return_code=0
file_name=file_name
sourcefilename=file_name
bcancel_exec=False
retinfo=self.mymagics.get_retinfo()
retstr=''
return bcancel_exec,retinfo,magics, code,file_name,retstr
##do_create_codefile
# ##%overwritefile
# ##%file:src/do_gjs_create_codefile.py
# ##%noruncode
def do_create_codefile(self,magics,code, silent, store_history=True,
user_expressions=None, allow_stdin=True):
return_code=0
file_name=''
bcancel_exec=False
retinfo=self.mymagics.get_retinfo()
retstr=''
source_file=self.mymagics.create_codetemp_file(magics,code,suffix='.js')
newsrcfilename=source_file.name
file_name=newsrcfilename
return_code=True
return bcancel_exec,self.mymagics.get_retinfo(),magics, code,file_name,retstr
##do_preexecute
# ##%overwritefile
# ##%file:src/do_gjs_preexecute.py
# ##%noruncode
def do_preexecute(self,code,magics,silent, store_history=True,
user_expressions=None, allow_stdin=False):
bcancel_exec=False
retinfo=self.mymagics.get_retinfo()
return bcancel_exec,retinfo,magics, code
# +
## new kernel.py
# ##%overwritefile
# ##%file:../../../jupyter-MyGjs-kernel/jupyter_MyGjs_kernel/kernel.py
##//%file:kernel.py
# ##%noruncode
#
# MyGjs Jupyter Kernel
#
# ##%include:../../src/head.py
from .MyKernel import MyKernel
class MyGjsKernel(MyKernel):
implementation = 'jupyter-MyGjs-kernel'
implementation_version = '1.0'
language = 'JavaScript'
language_version = ''
language_info = {'name': 'javascript',
'version': sys.version.split()[0],
'mimetype': 'text/javascript',
'codemirror_mode': {
'name': 'javascript',
'version': sys.version_info[0]
},
'pygments_lexer': 'javascript%d' % 3,
'nbconvert_exporter': 'javascript',
'file_extension': '.js'}
runfiletype='script'
banner = "MyGjs kernel.\n" \
"Uses gjs, compiles in javascript, and creates source code files and executables in temporary folder.\n"
kernelinfo="[MyGjs]"
main_head = "\n" \
"\n" \
"int main(List<String> arguments){\n"
main_foot = "\nreturn 0;\n}"
##//%include:src/comm_attribute.py
def __init__(self, *args, **kwargs):
super(MyGjsKernel, self).__init__(*args, **kwargs)
self.runfiletype='script'
self.kernelinfo="[MyGjsKernel{0}]".format(time.strftime("%H%M%S", time.localtime()))
#################
# ##%include:src/do_gjs_runcode.py
# ##%include:src/do_c_compilecode.py
# ##%include:src/do_gjs_create_codefile.py
# ##%include:src/do_gjs_preexecute.py
# -
|
kernel/Gjs/l_mykernel.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Letter Counter - Contador de letras
#
#
# Un pequeño notebook para contar la frecuencia de las letras en el castellano (o en cualquier idioma, siempre que le pases la lista de letras en un CSV).
#
# Esto que estas leyendo, es un Jupyter notebook. Puedes utilizarlo como documeto, o como herramienta ejecutable (y que permite modificaciones de forma interactiva). Para saber mas sobre como ejecutarlo, mira la documentacion en [jupyter.org](https://jupyter.org/) y revisa el Makefile.
#
# ### Loading Internal Assets
#
# You can ignore this part.
#
import unidecode
from collections import defaultdict
# ### Configuration - Configuracion
#
# If you want to change the source of the data, do it here.
#
# The format is csv, with one column, with heading (1st row is ignored).
#
# ---
#
# Si necesitas cambiar la fuente de datos, hazlo aqui.
#
# El formato es CSV, con una columna, con cabecera (la primera fila se ignora).
#
# +
filename = "../data/ListaTodasPalabras.csv"
# -
#
#
# ### Counting characters
#
# Using a clasic method, and removing accents.
#
#
# ---
#
#
# Usando un metodo clasico, y eliminando acentos.
#
#
# +
filename = "../data/ListaTodasPalabras.csv"
# Initialise some variables
d = defaultdict(lambda:0)
total = 0
palabras = []
with open(filename) as fh:
# count the total words and the letters
for word in fh.readlines():
if word == "PALABRAS\n":
continue # Ignore the header name
total += 1
palabras.append(word.strip())
noacc = unidecode.unidecode(word)
for letter in noacc.strip():
d[letter] += 1
print("Letter frequency from least to most")
s = {k: v for k, v in sorted(d.items(), key=lambda item: item[1])}
for item,val in s.items():
print(f"{item} : {val}")
print("Total words counted")
print(total)
# +
# Initialise some variables
d = defaultdict(lambda:0)
total = 0
palabras = []
with open(filename) as fh:
# count the total words and the letters
for word in fh.readlines():
if word == "PALABRAS\n":
continue # Ignore the header name
total += 1
palabras.append(word.strip())
noacc = unidecode.unidecode(word)
for letter in noacc.strip():
d[letter] += 1
# +
print("Letter frequency from least to most")
s = {k: v for k, v in sorted(d.items(), key=lambda item: item[1])}
for item,val in s.items():
print(f"{item} : {val}")
print("Total words counted")
print(total)
# -
# ## Obtain list of usable words from 5 to 6 letters
#
# Using pandas and matplotlib.
#
# ---
#
# Utilizando pandas y matplotlib.
#
#
# Loading libraries
import pandas as pd
import matplotlib
# +
# Obtain the list of words from the original database
## SLOW Run carefully.
wordlist = r'../data/Anexo I - palabras 3-9 letras.xls'
#load the sheets for 5 and 6 letters words as Pandas DataFrames
words5letters = pd.read_excel (wordlist, sheet_name="5")
words6letters = pd.read_excel (wordlist, sheet_name="6")
# Extract only the words and the lexyc frequency of words
w5l = pd.DataFrame(words5letters ,columns=["PALABRA","FRECUENC"])
w6l = pd.DataFrame(words6letters , columns=["PALABRA", "FRECUENC"])
w6l
# -
# ls ../data/*
# ### Counting the letters into the frequency database
#
# To build the statistics about how frequent each letter is.
# +
## Obtain the full database
## SLOW Run carefully.
wordlist_fileSHORT = r'../data/Anexo I - palabras 3-9 letras.xls'
wordlist_fileLONG = r'../data/Anexo II - palabras 10-16 letras.xls'
#load the sheets for 5 and 6 letters words as Pandas DataFrames
all_words = pd.DataFrame(columns=["PALABRA","FRECUENC"])
for i in range(3,10): # range from 3 to 9, inclusive
# wordlist_fileSHORT
sheety = pd.read_excel (wordlist_fileSHORT, sheet_name=f"{i}")
all_words = all_words.append( pd.DataFrame(sheety ,columns=["PALABRA","FRECUENC"]) )
for i in range(10,17): # range from 10 to 16, inclusive
# wordlist_fileLONG
sheety = pd.read_excel (wordlist_fileLONG, sheet_name=f"{i}")
all_words = all_words.append(pd.DataFrame(sheety ,columns=["PALABRA","FRECUENC"]) )
all_words
# +
## Clear Up the statistics
## GLOBAL Variables
# total of words counted
word_sum = 0
# Freq of each letter
letter_frequency = defaultdict(lambda:0)
# Distinguish accent (True = i and í are 2 different letters; False = they are 2 letter i's)
accent_dif = False # False to ignore, as we won't be using them either for the exercises.
# +
## Count the word into the stats D
def is_vowel(letter):
""" returt if it's a vowel """
return unidecode.unidecode(letter) in "aeiou"
def no_accent(letter):
""" turn a letter into the no accent equivalent"""
return unidecode.unidecode(letter)
assert is_vowel("a") == True
assert is_vowel("á") == True
assert is_vowel("v") == False
assert no_accent("a") == "a"
assert no_accent("á") == "a"
assert no_accent("v") == "v"
def count_word(word, letter_frequency = letter_frequency, accent_dif = accent_dif):
global word_sum
# don't count not numbers
if type(word) == type(0.1):
return None
# Count the word to the total of words
word_sum += 1
# We might need to exclude accents, but not the ñ
for letter in word:
if not accent_dif: # we must NOT distinguish accents
if is_vowel(letter):
letter_frequency[no_accent(letter)] += 1
else:
letter_frequency[letter] += 1
else:
letter_frequency[letter] += 1
# +
### DEBUG Error checking which words are missing on the new data set
len(palabras)
def is_not_in(word, palabras=palabras):
if word in palabras:
return None
else:
return word
assert is_not_in("aba") == None
assert is_not_in("somethingrandom") == "somethingrandom"
palabras[0]
# +
# DEBUG : Running it in one word
p = all_words[3].PALABRA[15]
f = all_words[3].FRECUENC[15]
print(p)
print(f)
print("Stadisticas antes", word_sum )
print("Stadisticas antes", letter_frequency )
count_word(p)
print("Stadisticas despues", word_sum )
print("Stadisticas despues", letter_frequency )
# +
# Running it on all the words
## Building the Freq Table. Run Once.
palabras_not_in = []
def build_letter_freq_table():
"""
Build the table with the total stats of letter frequency
"""
#for key, df_words in all_words.items():
# ## Do what you do to each data frame of words
for word in df_words.PALABRA:
# make each letter into the stats dir.
try:
count_word(word)
#if is_not_in(word):
# palabras_not_in.append(word)
except TypeError as e:
print(e)
print(word)
print(type(word))
print("WTF")
return None
build_letter_freq_table()
letter_frequency
# -
print(len(palabras))
print(len(palabras_not_in))
palabras_not_in
df1 = palabras # tendria que ser un data frame.
df2 =
merged = df1.merge(df2, indicator=True, how='outer')
merged[merged['_merge'] == 'right_only']
# +
print("letter_frequency",letter_frequency)
print("word_sum",word_sum)
# +
# tenemos la tabla de frec. Necesitamos phi. Hay que calcular, el phi de una palabla.
# para el phi, necesitamos, el % de frequencias
#suma todas las frec
tot_letters = 0
for k,v in letter_freqency.items():
tot_letters += v
# -
# splitting the words into lists of letters
splits = df.apply(lambda x: [y for y in x[0].strip()], axis='columns')
splits
# ## TXT
#
# # WIP Do not read forward
#
#
# Stats of strokes per letter.
rasgos = {
"a": 3,
"b": 4,
"c": 2,
"d": 3,
"e": 3,
"f": 4,
"g": 4,
"h": 4,
"i": 3,
"j": 3,
"k": 5,
"l": 2,
"m": 6,
"n": 4,
"o": 3,
"p": 4,
"q": 3,
"r": 3,
"s": 3,
"t": 4,
"u": 4,
"v": 4,
"w": 6,
"x": 4,
"y": 5,
"z": 5
}
# +
## Trying to calculate Phi
## Sum of all the % Frequency of each letter in the word
def get_phi(letter):
return 1
def calc_phi(word=None):
""" Calculate the phi value of a word.
Phi is the sum of the frequencies of the each letter in a word.
"""
if word is None:
return 0
phi = 0
for letter in word:
phi += get_phi(letter)
return phi
# -
print(calc_phi("aa"), 199)
print(calc_phi(""),0)
print(calc_phi(),0)
print(calc_phi("abadia"))
# +
## Trying to calculate Stroke sum RHO
## Sum of all the strokes of each letter in a word
def get_rho(letter):
return rasgos[letter]
def calc_rho(word=None):
""" Calculate the Rho value of a word.
Rho is the sum of the strokes of the each letter in a word.
"""
if word is None:
return 0
rho = 0
for letter in word:
rho += get_rho(letter)
return rho
# -
print(calc_rho("aa"), 6)
print(calc_rho(""),0)
print(calc_rho(),0)
print(calc_rho("peluca"),18)
|
code/Letter Counting.bck.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os, sys, time, copy
import numpy as np
import matplotlib.pyplot as plt
import pickle
import myokit
sys.path.append('../')
sys.path.append('../Protocols')
sys.path.append('../Models')
sys.path.append('../Lib')
import protocol_lib, vc_protocols
import mod_trace
from ord2011 import ORD2011
# +
'''
O'Hara-Rudy CiPA v1.0 (2017)
'''
cells = {
'Endocardial' : 0,
'Epicardial' : 1,
'Mid-myocardial' : 2,
}
current_li = ['ina.INa', 'inal.INaL', 'ito.Ito', 'ical.ICaL', 'ikr.IKr', 'iks.IKs', 'ik1.IK1']
protocol = vc_protocols.hERG_CiPA()
# protocol = pickle.load(open("./trial_steps_ramps_Kernik_200_50_4_-120_60_paper/shortened_trial_steps_ramps_Kernik_200_50_4_-120_60_500_artefact_True_short.pkl", 'rb'))
# protocol = pickle.load(open("./trial_steps_ramps_ORD2011_288_51_4_-121_61/shortened_trial_steps_ramps_ORD2011_288_51_4_-121_61_500_artefact_False_short.pkl", 'rb'))
end_time = protocol.get_voltage_change_endpoints()[-1]
t_span = (0, end_time)
t_eval = np.linspace(0, end_time, 10000)
print(end_time)
# -
import simulator_myokit
'''
Simulation with Myokit
'''
model_path = "../mmt-model-files/ohara-cipa-v1-2017_VC.mmt"
model_myokit, protocol_myokit, script = myokit.load(model_path)
sim_myokit = simulator_myokit.Simulator(model_myokit, protocol, max_step=1.0, abs_tol=1e-8, rel_tol=1e-8, vhold=-8.80019046500000002e1) # 1e-12, 1e-14 # 1e-08, 1e-10 # max_step=1, atol=1E-2, rtol=1E-4 # defalt: abs_tol=1e-06, rel_tol=0.0001
sim_myokit.name = "OHara2017-CiPA-v1"
# +
start_time = time.time()
sim_myokit.simulation.set_constant('cell.mode', cells['Epicardial'])
y0_myokit = sim_myokit.pre_simulate(5000, sim_type=1)
d_myokit = sim_myokit.simulate(end_time, log_times=None, extra_log=current_li)
print("--- %s seconds ---"%(time.time()-start_time))
# +
'''
Plot
'''
fig, axes = plt.subplots(8,1, figsize=(15,20))
fig.suptitle(sim_myokit.name, fontsize=14)
plot_li = ['Voltage'] + current_li
for i, name in enumerate(plot_li):
# ax.set_title('Simulation %d'%(simulationNo))
# axes[i].set_xlim(model_scipy.times.min(), model_scipy.times.max())
# ax.set_ylim(ylim[0], ylim[1])
axes[i].set_xlabel('Time (ms)')
axes[i].set_ylabel(f'{name} (mV)')
if i==0:
# axes[i].plot( d_myokit['engine.time'], protocol_myokit.value_at_times(d_myokit['engine.time']), label=name, color='k')
axes[i].plot( t_eval, [protocol.get_voltage_at_time(t) for t in t_eval], label=name, color='k', linewidth=3)
else:
name = name.split('.')[1]
axes[i].plot( d_myokit['engine.time'], sim_myokit.current_response_info.get_current([name]), label=name, color='k', linewidth=3)
axes[i].legend()
axes[i].grid()
plt.subplots_adjust(left=0.07, bottom=0.05, right=0.95, top=0.95, wspace=0.5, hspace=0.15)
plt.show()
fig.savefig(os.path.join('Results', "OHara2017-CiPA-v1-VC.jpg"), dpi=100)
# -
print("Complete")
|
Examples/OHara_CiPA_v1_2017_VC.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="FhGuhbZ6M5tl"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" id="AwOEIRJC6Une"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + cellView="form" id="KyPEtTqk6VdG"
#@title MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# + [markdown] id="EIdT9iu_Z4Rb"
# # Basic regression: Predict fuel efficiency
# + [markdown] id="bBIlTPscrIT9"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/regression"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/keras/regression.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] id="AHp3M9ZmrIxj"
# In a *regression* problem, the aim is to predict the output of a continuous value, like a price or a probability. Contrast this with a *classification* problem, where the aim is to select a class from a list of classes (for example, where a picture contains an apple or an orange, recognizing which fruit is in the picture).
#
# This notebook uses the classic [Auto MPG](https://archive.ics.uci.edu/ml/datasets/auto+mpg) Dataset and builds a model to predict the fuel efficiency of late-1970s and early 1980s automobiles. To do this, provide the model with a description of many automobiles from that time period. This description includes attributes like: cylinders, displacement, horsepower, and weight.
#
# This example uses the `tf.keras` API, see [this guide](https://www.tensorflow.org/guide/keras) for details.
# + id="moB4tpEHxKB3"
# Use seaborn for pairplot
# !pip install -q seaborn
# + id="1rRo8oNqZ-Rj"
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# Make numpy printouts easier to read.
np.set_printoptions(precision=3, suppress=True)
# + id="9xQKvCJ85kCQ"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
print(tf.__version__)
# + [markdown] id="F_72b0LCNbjx"
# ## The Auto MPG dataset
#
# The dataset is available from the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/).
#
# + [markdown] id="gFh9ne3FZ-On"
# ### Get the data
# First download and import the dataset using pandas:
# + id="CiX2FI4gZtTt"
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(url, names=column_names,
na_values='?', comment='\t',
sep=' ', skipinitialspace=True)
# + id="2oY3pMPagJrO"
dataset = raw_dataset.copy()
dataset.tail()
# + [markdown] id="3MWuJTKEDM-f"
# ### Clean the data
#
# The dataset contains a few unknown values.
# + id="JEJHhN65a2VV"
dataset.isna().sum()
# + [markdown] id="9UPN0KBHa_WI"
# Drop those rows to keep this initial tutorial simple.
# + id="4ZUDosChC1UN"
dataset = dataset.dropna()
# + [markdown] id="8XKitwaH4v8h"
# The `"Origin"` column is really categorical, not numeric. So convert that to a one-hot:
#
# Note: You can set up the `keras.Model` to do this kind of transformation for you. That's beyond the scope of this tutorial. See the [preprocessing layers](../structured_data/preprocessing_layers.ipynb) or [Loading CSV data](../load_data/csv.ipynb) tutorials for examples.
# + id="gWNTD2QjBWFJ"
dataset['Origin'] = dataset['Origin'].map({1: 'USA', 2: 'Europe', 3: 'Japan'})
# + id="ulXz4J7PAUzk"
dataset = pd.get_dummies(dataset, prefix='', prefix_sep='')
dataset.tail()
# + [markdown] id="Cuym4yvk76vU"
# ### Split the data into train and test
#
# Now split the dataset into a training set and a test set.
#
# Use the test set in the final evaluation of our models.
# + id="qn-IGhUE7_1H"
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# + [markdown] id="J4ubs136WLNp"
# ### Inspect the data
#
# Have a quick look at the joint distribution of a few pairs of columns from the training set.
#
# Looking at the top row it should be clear that the fuel efficiency (MPG) is a function of all the other parameters. Looking at the other rows it should be clear that they are each functions of eachother.
# + id="oRKO_x8gWKv-"
sns.pairplot(train_dataset[['MPG', 'Cylinders', 'Displacement', 'Weight']], diag_kind='kde')
# + [markdown] id="gavKO_6DWRMP"
# Also look at the overall statistics, note how each feature covers a very different range:
# + id="yi2FzC3T21jR"
train_dataset.describe().transpose()
# + [markdown] id="Db7Auq1yXUvh"
# ### Split features from labels
#
# Separate the target value, the "label", from the features. This label is the value that you will train the model to predict.
# + id="t2sluJdCW7jN"
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('MPG')
test_labels = test_features.pop('MPG')
# + [markdown] id="mRklxK5s388r"
# ## Normalization
#
# In the table of statistics it's easy to see how different the ranges of each feature are.
# + id="IcmY6lKKbkw8"
train_dataset.describe().transpose()[['mean', 'std']]
# + [markdown] id="-ywmerQ6dSox"
# It is good practice to normalize features that use different scales and ranges.
#
# One reason this is important is because the features are multiplied by the model weights. So the scale of the outputs and the scale of the gradients are affected by the scale of the inputs.
#
# Although a model *might* converge without feature normalization, normalization makes training much more stable.
# + [markdown] id="aFJ6ISropeoo"
# ### The Normalization layer
# The `preprocessing.Normalization` layer is a clean and simple way to build that preprocessing into your model.
#
# The first step is to create the layer:
# + id="JlC5ooJrgjQF"
normalizer = preprocessing.Normalization()
# + [markdown] id="XYA2Ap6nVOha"
# Then `.adapt()` it to the data:
# + id="CrBbbjbwV91f"
normalizer.adapt(np.array(train_features))
# + [markdown] id="oZccMR5yV9YV"
# This calculates the mean and variance, and stores them in the layer.
# + id="GGn-ukwxSPtx"
print(normalizer.mean.numpy())
# + [markdown] id="oGWKaF9GSRuN"
# When the layer is called it returns the input data, with each feature independently normalized:
# + id="2l7zFL_XWIRu"
first = np.array(train_features[:1])
with np.printoptions(precision=2, suppress=True):
print('First example:', first)
print()
print('Normalized:', normalizer(first).numpy())
# + [markdown] id="6o3CrycBXA2s"
# ## Linear regression
#
# Before building a DNN model, start with a linear regression.
# + [markdown] id="lFby9n0tnHkw"
# ### One Variable
#
# Start with a single-variable linear regression, to predict `MPG` from `Horsepower`.
#
# Training a model with `tf.keras` typically starts by defining the model architecture.
#
# In this case use a `keras.Sequential` model. This model represents a sequence of steps. In this case there are two steps:
#
# * Normalize the input `horsepower`.
# * Apply a linear transformation ($y = mx+b$) to produce 1 output using `layers.Dense`.
#
# The number of _inputs_ can either be set by the `input_shape` argument, or automatically when the model is run for the first time.
# + [markdown] id="Xp3gAFn3TPv8"
# First create the horsepower `Normalization` layer:
# + id="1gJAy0fKs1TS"
horsepower = np.array(train_features['Horsepower'])
horsepower_normalizer = preprocessing.Normalization(input_shape=[1,])
horsepower_normalizer.adapt(horsepower)
# + [markdown] id="4NVlHJY2TWlC"
# Build the sequential model:
# + id="c0sXM7qLlKfZ"
horsepower_model = tf.keras.Sequential([
horsepower_normalizer,
layers.Dense(units=1)
])
horsepower_model.summary()
# + [markdown] id="eObQu9fDnXGL"
# This model will predict `MPG` from `Horsepower`.
#
# Run the untrained model on the first 10 horse-power values. The output won't be good, but you'll see that it has the expected shape, `(10,1)`:
# + id="UfV1HS6bns-s"
horsepower_model.predict(horsepower[:10])
# + [markdown] id="CSkanJlmmFBX"
# Once the model is built, configure the training procedure using the `Model.compile()` method. The most important arguments to compile are the `loss` and the `optimizer` since these define what will be optimized (`mean_absolute_error`) and how (using the `optimizers.Adam`).
# + id="JxA_3lpOm-SK"
horsepower_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
# + [markdown] id="Z3q1I9TwnRSC"
# Once the training is configured, use `Model.fit()` to execute the training:
# + id="-iSrNy59nRAp"
# %%time
history = horsepower_model.fit(
train_features['Horsepower'], train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
# + [markdown] id="tQm3pc0FYPQB"
# Visualize the model's training progress using the stats stored in the `history` object.
# + id="YCAwD_y4AdC3"
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
# + id="9E54UoZunqhc"
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 10])
plt.xlabel('Epoch')
plt.ylabel('Error [MPG]')
plt.legend()
plt.grid(True)
# + id="yYsQYrIZyqjz"
plot_loss(history)
# + [markdown] id="CMNrt8X2ebXd"
# Collect the results on the test set, for later:
# + id="kDZ8EvNYrDtx"
test_results = {}
test_results['horsepower_model'] = horsepower_model.evaluate(
test_features['Horsepower'],
test_labels, verbose=0)
# + [markdown] id="F0qutYAKwoda"
# Since this is a single variable regression it's easy to look at the model's predictions as a function of the input:
# + id="xDS2JEtOn9Jn"
x = tf.linspace(0.0, 250, 251)
y = horsepower_model.predict(x)
# + id="rttFCTU8czsI"
def plot_horsepower(x, y):
plt.scatter(train_features['Horsepower'], train_labels, label='Data')
plt.plot(x, y, color='k', label='Predictions')
plt.xlabel('Horsepower')
plt.ylabel('MPG')
plt.legend()
# + id="7l9ZiAOEUNBL"
plot_horsepower(x,y)
# + [markdown] id="Yk2RmlqPoM9u"
# ### Multiple inputs
# + [markdown] id="PribnwDHUksC"
# You can use an almost identical setup to make predictions based on multiple inputs. This model still does the same $y = mx+b$ except that $m$ is a matrix and $b$ is a vector.
#
# This time use the `Normalization` layer that was adapted to the whole dataset.
# + id="ssnVcKg7oMe6"
linear_model = tf.keras.Sequential([
normalizer,
layers.Dense(units=1)
])
# + [markdown] id="IHlx6WeIWyAr"
# When you call this model on a batch of inputs, it produces `units=1` outputs for each example.
# + id="DynfJV18WiuT"
linear_model.predict(train_features[:10])
# + [markdown] id="hvHKH3rPXHmq"
# When you call the model it's weight matrices will be built. Now you can see that the `kernel` (the $m$ in $y=mx+b$) has a shape of `(9,1)`.
# + id="DwJ4Fq0RXBQf"
linear_model.layers[1].kernel
# + [markdown] id="eINAc6rZXzOt"
# Use the same `compile` and `fit` calls as for the single input `horsepower` model:
# + id="A0Sv_Ybr0szp"
linear_model.compile(
optimizer=tf.optimizers.Adam(learning_rate=0.1),
loss='mean_absolute_error')
# + id="EZoOYORvoTSe"
# %%time
history = linear_model.fit(
train_features, train_labels,
epochs=100,
# suppress logging
verbose=0,
# Calculate validation results on 20% of the training data
validation_split = 0.2)
# + [markdown] id="EdxiCbiNYK2F"
# Using all the inputs achieves a much lower training and validation error than the `horsepower` model:
# + id="4sWO3W0koYgu"
plot_loss(history)
# + [markdown] id="NyN49hIWe_NH"
# Collect the results on the test set, for later:
# + id="jNC3D1DGsGgK"
test_results['linear_model'] = linear_model.evaluate(
test_features, test_labels, verbose=0)
# + [markdown] id="SmjdzxKzEu1-"
# ## A DNN regression
# + [markdown] id="DT_aHPsrzO1t"
# The previous section implemented linear models for single and multiple inputs.
#
# This section implements single-input and multiple-input DNN models. The code is basically the same except the model is expanded to include some "hidden" non-linear layers. The name "hidden" here just means not directly connected to the inputs or outputs.
# + [markdown] id="6SWtkIjhrZwa"
# These models will contain a few more layers than the linear model:
#
# * The normalization layer.
# * Two hidden, nonlinear, `Dense` layers using the `relu` nonlinearity.
# * A linear single-output layer.
#
# Both will use the same training procedure so the `compile` method is included in the `build_and_compile_model` function below.
# + id="c26juK7ZG8j-"
def build_and_compile_model(norm):
model = keras.Sequential([
norm,
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
model.compile(loss='mean_absolute_error',
optimizer=tf.keras.optimizers.Adam(0.001))
return model
# + [markdown] id="7T4RP1V36gVn"
# ### One variable
# + [markdown] id="xvu9gtxTZR5V"
# Start with a DNN model for a single input: "Horsepower"
# + id="cGbPb-PHGbhs"
dnn_horsepower_model = build_and_compile_model(horsepower_normalizer)
# + [markdown] id="Sj49Og4YGULr"
# This model has quite a few more trainable parameters than the linear models.
# + id="ReAD0n6MsFK-"
dnn_horsepower_model.summary()
# + [markdown] id="0-qWCsh6DlyH"
# Train the model:
# + id="sD7qHCmNIOY0"
# %%time
history = dnn_horsepower_model.fit(
train_features['Horsepower'], train_labels,
validation_split=0.2,
verbose=0, epochs=100)
# + [markdown] id="dArGGxHxcKjN"
# This model does slightly better than the linear-horsepower model.
# + id="NcF6UWjdCU8T"
plot_loss(history)
# + [markdown] id="TG1snlpR2QCK"
# If you plot the predictions as a function of `Horsepower`, you'll see how this model takes advantage of the nonlinearity provided by the hidden layers:
# + id="hPF53Rem14NS"
x = tf.linspace(0.0, 250, 251)
y = dnn_horsepower_model.predict(x)
# + id="rsf9rD8I17Wq"
plot_horsepower(x, y)
# + [markdown] id="WxCJKIUpe4io"
# Collect the results on the test set, for later:
# + id="bJjM0dU52XtN"
test_results['dnn_horsepower_model'] = dnn_horsepower_model.evaluate(
test_features['Horsepower'], test_labels,
verbose=0)
# + [markdown] id="S_2Btebp2e64"
# ### Full model
# + [markdown] id="aKFtezDldLSf"
# If you repeat this process using all the inputs it slightly improves the performance on the validation dataset.
# + id="c0mhscXh2k36"
dnn_model = build_and_compile_model(normalizer)
dnn_model.summary()
# + id="CXDENACl2tuW"
# %%time
history = dnn_model.fit(
train_features, train_labels,
validation_split=0.2,
verbose=0, epochs=100)
# + id="-9Dbj0fX23RQ"
plot_loss(history)
# + [markdown] id="hWoVYS34fJPZ"
# Collect the results on the test set:
# + id="-bZIa96W3c7K"
test_results['dnn_model'] = dnn_model.evaluate(test_features, test_labels, verbose=0)
# + [markdown] id="uiCucdPLfMkZ"
# ## Performance
# + [markdown] id="rDf1xebEfWBw"
# Now that all the models are trained check the test-set performance and see how they did:
# + id="e5_ooufM5iH2"
pd.DataFrame(test_results, index=['Mean absolute error [MPG]']).T
# + [markdown] id="DABIVzsCf-QI"
# These results match the validation error seen during training.
# + [markdown] id="ft603OzXuEZC"
# ### Make predictions
#
# Finally, predict have a look at the errors made by the model when making predictions on the test set:
# + id="Xe7RXH3N3CWU"
test_predictions = dnn_model.predict(test_features).flatten()
a = plt.axes(aspect='equal')
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
lims = [0, 50]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
# + [markdown] id="19wyogbOSU5t"
# It looks like the model predicts reasonably well.
#
# Now take a look at the error distribution:
# + id="f-OHX4DiXd8x"
error = test_predictions - test_labels
plt.hist(error, bins=25)
plt.xlabel('Prediction Error [MPG]')
_ = plt.ylabel('Count')
# + [markdown] id="KSyaHUfDT-mZ"
# If you're happy with the model save it for later use:
# + id="4-WwLlmfT-mb"
dnn_model.save('dnn_model')
# + [markdown] id="Benlnl8UT-me"
# If you reload the model, it gives identical output:
# + id="dyyyj2zVT-mf"
reloaded = tf.keras.models.load_model('dnn_model')
test_results['reloaded'] = reloaded.evaluate(
test_features, test_labels, verbose=0)
# + id="f_GchJ2tg-2o"
pd.DataFrame(test_results, index=['Mean absolute error [MPG]']).T
# + [markdown] id="vgGQuV-yqYZH"
# ## Conclusion
#
# This notebook introduced a few techniques to handle a regression problem. Here are a few more tips that may help:
#
# * [Mean Squared Error (MSE)](https://www.tensorflow.org/api_docs/python/tf/losses/MeanSquaredError) and [Mean Absolute Error (MAE)](https://www.tensorflow.org/api_docs/python/tf/losses/MeanAbsoluteError) are common loss functions used for regression problems. Mean Absolute Error is less sensitive to outliers. Different loss functions are used for classification problems.
# * Similarly, evaluation metrics used for regression differ from classification.
# * When numeric input data features have values with different ranges, each feature should be scaled independently to the same range.
# * Overfitting is a common problem for DNN models, it wasn't a problem for this tutorial. See the [overfit and underfit](overfit_and_underfit.ipynb) tutorial for more help with this.
#
|
site/en/tutorials/keras/regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### In this file, we will generate our E0 feature data
# Instructions:
#
# You should start by writing a notebook to create the dataset. Note that if you use many molecules, using MOPAC could take a while. If you decide to optimize the geometry of all the molecules in the Solubility datasets, ot will take at least 12 hours. Make sure to save the result so that you don't have to compute it multiple times!
# +
# Pandas dataframe, apply function to whole dataframe
# Make sure it works on a small dataset. Save the results
# Create dataset associated smiles to energy. Then, run overnight on all data
# Once dataset ready, can move on to next step
import pandas as pd
import os
# +
# Combine Datasets
def generate_combined_dataset(dir_path, fnames, out_name="", debug=False):
combined = None
print("Outname is", out_name)
for f in fnames:
df = pd.read_csv(dir_path + f)
if combined is None:
combined = df.copy()
else:
## Here we do not check for duplicates as Professor Tristan in class informed me that there should not be any duplicate data between the sets
combined = pd.concat([combined, df])
if debug:
print("Added " + str(f) + " with dim", df.shape)
if debug:
print("Our combined dataset has", combined.shape[0], "rows with", combined.shape[1], "features.")
if out_name != "":
if out_name.endswith(".csv"):
if os.path.exists(out_name):
print("Combined file already exists at specified output path... rename or replace it first.")
else:
combined.to_csv(out_name, index=False)
else:
print("ERROR: Output filename must end in .csv")
return combined
# Restriction: "It is forbidden to use any dataset other than A, B, C, D, F, G, H, I"
# So, we make sure we only use these datasets in our data preparation
generate_combined_dataset("../MLQC_HW/Data/Solubility/",
["dataset-H.csv", "dataset-I.csv"],
"train_data_preview.csv")
generate_combined_dataset("../MLQC_HW/Data/Solubility/",
["dataset-A.csv", "dataset-B.csv", "dataset-C.csv",
"dataset-D.csv", "dataset-F.csv", "dataset-G.csv",
"dataset-H.csv", "dataset-I.csv"], "train_data.csv")
# +
from rdkit import Chem
from rdkit.Chem import AllChem
from ase import Atoms
from ase.io import read
from ase.calculators.mopac import MOPAC
from tqdm.notebook import tqdm_notebook
# If necessary, can add a batch parameter to start at a given index and to save batches of data at intervals
def calc_potential(from_df):
def get_mopac(smile):
try:
# Create the rdkit molecule object in Cartesian space
mol = Chem.AddHs(Chem.MolFromSmiles(smile))
AllChem.EmbedMolecule(mol)
# Save and reload the information into a mol object from ase (instead of rdkit)
Chem.rdmolfiles.MolToXYZFile(mol, 'init.xyz')
mol = read('init.xyz')
# With this ase object, add quantum mechanic calculations
mol.calc = MOPAC(label='TMP', task='UHF BONDS GRADS')
return mol.get_potential_energy()
except:
pass
tqdm_notebook.pandas(desc="Quantum Calculations for Minimum Potential Energy (Molecule Number)")
# Would in theory progress_apply each batch if we needed
from_df['Min_PE'] = from_df['SMILES'].progress_apply(get_mopac)
return from_df
# -
# # CAUTION TEAM ELECTRON
# You only need to run your own code block here. I chunked it out so each of us have about an even amount of data to churn through so we can do this at 3x speed. Be advised it may take ~30 hours to go through all of this data, so for this reason I have it save to the csv file every time you complete one of the data sets. That's why there are multiple lines of calculating and saving
data_path = "../MLQC_HW/Data/Solubility/"
# +
### RJ run this ###
# Will do C, D, F (I already did H and I while testing)
# C
new_df = calc_potential(pd.read_csv(data_path + "dataset-C.csv"))
# Only keep our SMILES and Min_PE
new_df[["SMILES", "Min_PE"]].to_csv('Data/pe_data_C.csv', index=False)
# -
### RJ run this ###
# D
new_df = calc_potential(pd.read_csv(data_path + "dataset-D.csv"))
new_df[["SMILES", "Min_PE"]].to_csv('Data/pe_data_D.csv', index=False)
# F
new_df = calc_potential(pd.read_csv(data_path + "dataset-F.csv"))
new_df[["SMILES", "Min_PE"]].to_csv('Data/pe_data_F.csv', index=False)
# +
### RONAN RUN THIS ###
# This will do the computations on dataset A
# A
new_df = calc_potential(pd.read_csv(data_path + "dataset-A.csv"))
new_df[["SMILES", "Min_PE"]].to_csv('Data/pe_data_A.csv', index=False)
# +
### JOSHUA RUN THIS ###
# This will do the computations on dataset B, G
# B
new_df = calc_potential(pd.read_csv(data_path + "dataset-B.csv"))
new_df[["SMILES", "Min_PE"]].to_csv('Data/pe_data_B.csv', index=False)
# -
### JOSHUA RUN THIS ###
# G
new_df = calc_potential(pd.read_csv(data_path + "dataset-G.csv"))
new_df[["SMILES", "Min_PE"]].to_csv('Data/pe_data_G.csv', index=False)
|
build_data.ipynb
|