code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.9 64-bit (''paddle2.0'': conda)'
# name: python3
# ---
# # Load the Pretrained Model and the dataset
# We use bert-base-uncased as the model and SST-2 as the dataset for example. More models can be found in [PaddleNLP Model Zoo](https://paddlenlp.readthedocs.io/zh/latest/model_zoo/transformers.html#transformer).
#
# Obviously, PaddleNLP is needed to run this notebook, which is easy to install:
# ```bash
# pip install setuptools_scm
# pip install --upgrade paddlenlp==2.1
# ```
# +
import paddle
import paddlenlp
from paddlenlp.transformers import BertForSequenceClassification, BertTokenizer
MODEL_NAME = "bert-base-uncased"
model = BertForSequenceClassification.from_pretrained(MODEL_NAME, num_classes=2)
tokenizer = BertTokenizer.from_pretrained(MODEL_NAME)
from paddlenlp.datasets import load_dataset
train_ds, dev_ds, test_ds = load_dataset(
"glue", name='sst-2', splits=["train", "dev", "test"]
)
# -
# # Prepare the Model
# ## Train the model
# +
# training the model and save to save_dir
# only needs to run once.
# total steps ~2100 (1 epoch)
from assets.utils import training_model
training_model(model, tokenizer, train_ds, dev_ds, save_dir=f'assets/sst-2-{MODEL_NAME}')
# global step 2100, epoch: 1, batch: 2100, loss: 0.22977, acc: 0.91710
# eval loss: 0.20062, accu: 0.91972
# -
# ## Or Load the trained model
# + tags=[]
# Load the trained model.
state_dict = paddle.load(f'assets/sst-2-{MODEL_NAME}/model_state.pdparams')
model.set_dict(state_dict)
# -
# # See the prediction results
# +
from assets.utils import predict
reviews = [
"it 's a charming and often affecting journey . ",
'the movie achieves as great an impact by keeping these thoughts hidden as ... ( quills ) did by showing them . ',
'this one is definitely one to skip , even for horror movie fanatics . ',
'in its best moments , resembles a bad high school production of grease , without benefit of song . '
]
data = [ {"text": r} for r in reviews]
label_map = {0: 'negative', 1: 'positive'}
batch_size = 32
results = predict(
model, data, tokenizer, label_map, batch_size=batch_size)
for idx, text in enumerate(data):
print('Data: {} \t Lable: {}'.format(text, results[idx]))
# -
# # Prepare for Interpretations
# +
import interpretdl as it
import numpy as np
from assets.utils import convert_example, aggregate_subwords_and_importances
from paddlenlp.data import Stack, Tuple, Pad
from interpretdl.data_processor.visualizer import VisualizationTextRecord, visualize_text
def preprocess_fn(data):
examples = []
if not isinstance(data, list):
data = [data]
for text in data:
input_ids, segment_ids = convert_example(
text,
tokenizer,
max_seq_length=128,
is_test=True
)
examples.append((input_ids, segment_ids))
batchify_fn = lambda samples, fn=Tuple(
Pad(axis=0, pad_val=tokenizer.pad_token_id), # input id
Pad(axis=0, pad_val=tokenizer.pad_token_id), # segment id
): fn(samples)
input_ids, segment_ids = batchify_fn(examples)
return paddle.to_tensor(input_ids, stop_gradient=False), paddle.to_tensor(segment_ids, stop_gradient=False)
# -
# ## IG Interpreter
# + tags=[]
ig = it.IntGradNLPInterpreter(model, True)
pred_labels, pred_probs, avg_gradients = ig.interpret(
preprocess_fn(data),
steps=50,
return_pred=True)
true_labels = [1, 1, 0, 0] * 5
recs = []
for i in range(avg_gradients.shape[0]):
subwords = " ".join(tokenizer._tokenize(data[i]['text'])).split(' ')
subword_importances = avg_gradients[i]
words, word_importances = aggregate_subwords_and_importances(subwords, subword_importances)
word_importances = np.array(word_importances) / np.linalg.norm(
word_importances)
pred_label = pred_labels[i]
pred_prob = pred_probs[i, pred_label]
true_label = true_labels[i]
interp_class = pred_label
if interp_class == 0:
word_importances = -word_importances
recs.append(
VisualizationTextRecord(words, word_importances, true_label,
pred_label, pred_prob, interp_class)
)
visualize_text(recs)
# The visualization is not available at github
# -
# ## LIME Interpreter
# +
true_labels = [1, 1, 0, 0] * 5
recs = []
lime = it.LIMENLPInterpreter(model)
for i, review in enumerate(data):
pred_class, pred_prob, lime_weights = lime.interpret(
review,
preprocess_fn,
num_samples=1000,
batch_size=32,
unk_id=tokenizer.convert_tokens_to_ids('[UNK]'),
pad_id=tokenizer.convert_tokens_to_ids('[PAD]'),
return_pred=True)
# subwords
subwords = " ".join(tokenizer._tokenize(review['text'])).split(' ')
interp_class = list(lime_weights.keys())[0]
weights = lime_weights[interp_class][1 : -1]
subword_importances = [t[1] for t in lime_weights[interp_class][1 : -1]]
words, word_importances = aggregate_subwords_and_importances(subwords, subword_importances)
word_importances = np.array(word_importances) / np.linalg.norm(
word_importances)
true_label = true_labels[i]
if interp_class == 0:
word_importances = -word_importances
rec = VisualizationTextRecord(
words,
word_importances,
true_label,
pred_class[0],
pred_prob[0],
interp_class
)
recs.append(rec)
visualize_text(recs)
# The visualization is not available at github
# -
# ## GradShapNLPInterpreter
# +
ig = it.GradShapNLPInterpreter(model, True)
pred_labels, pred_probs, avg_gradients = ig.interpret(
preprocess_fn(data),
n_samples=10,
noise_amount=0.1,
return_pred=True)
true_labels = [1, 1, 0, 0] * 5
recs = []
for i in range(avg_gradients.shape[0]):
subwords = " ".join(tokenizer._tokenize(data[i]['text'])).split(' ')
subword_importances = avg_gradients[i]
words, word_importances = aggregate_subwords_and_importances(subwords, subword_importances)
word_importances = np.array(word_importances) / np.linalg.norm(
word_importances)
pred_label = pred_labels[i]
pred_prob = pred_probs[i, pred_label]
true_label = true_labels[i]
interp_class = pred_label
if interp_class == 0:
word_importances = -word_importances
recs.append(
VisualizationTextRecord(words, word_importances, true_label,
pred_label, pred_prob, interp_class)
)
visualize_text(recs)
# The visualization is not available at github
# -
| tutorials/bert-en-sst-2-tutorials.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] origin_pos=0
# # Concise Implementation of Multilayer Perceptrons
#
# By **relying on the high-level APIs,
# we can implement MLPs even more concisely.**
#
# + origin_pos=2 tab=["pytorch"]
import torch
from torch import nn
from d2l import torch as d2l
# + [markdown] origin_pos=4
# ## Model
#
# As compared with our concise implementation
# of softmax regression implementation,
# the only difference is that we add
# *two* fully-connected layers
# (previously, we added *one*).
#
# The first is [**our hidden layer**],
# which (**contains 256 hidden units
# and applies the ReLU activation function**).
# The second is our output layer.
#
# + origin_pos=6 tab=["pytorch"]
net = nn.Sequential(nn.Flatten(),
nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 10))
def init_weights(m):
if type(m) == nn.Linear:
nn.init.normal_(m.weight, std=0.01)
net.apply(init_weights);
# + [markdown] origin_pos=8
# [**The training loop**] is exactly the same
# as when we implemented softmax regression.
# This modularity enables us to separate
# matters concerning the model architecture
# from orthogonal considerations.
#
# + origin_pos=10 tab=["pytorch"]
batch_size, lr, num_epochs = 256, 0.1, 10
loss = nn.CrossEntropyLoss(reduction='none')
trainer = torch.optim.SGD(net.parameters(), lr=lr)
# + origin_pos=12 tab=["pytorch"]
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size)
d2l.train_ch3(net, train_iter, test_iter, loss, num_epochs, trainer)
| Tutorial-04/TUT4-3-mlp-concise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import example_pkg
import base
dir(example_pkg)
base.hello()
import numpy
import scipy
import pandas as pd
import os
import glob
import re
# +
class salad():
def __init__(self):
self.path = ''
self.items = []
self.numbers = []
def write(self, path, salad, n_items):
self.path = path
print(self.path)
assert len(salad) == len(n_items), "The lists must be equal length."
os.makedirs(self.path, exist_ok=True)
for k in range(len(salad)):
print(salad[k],n_items[k])
for j in range(n_items[k]):
file_name = salad[k] + '_' + str('{:0>2}'.format(j)) + '.salad'
f = open(os.path.join(self.path, file_name), "w+")
f.close()
return
def read(self, path):
flist = glob.glob(os.path.join(path,'*.salad'))
a = []
for file in flist:
pattern = r"(\w+)(\d\d).salad"
a.append(re.findall(pattern, file))
return a
path = 'mysalad'
salad_items = ['lettuce', 'tomato', 'oil', 'balsamic', 'onion', 'goat cheese']
salad_numbers = [2,3,3,2,4,7]
mysalad = salad()
mysalad.write(path, salad_items, salad_numbers)
flist = mysalad.read(path)
print(flist)
# -
f = open("README.txt", "w+")
f.write('manal')
f.write('hossain\n')
for k in flist:
f.write(str(k))
f.close()
os.getcwd()
# +
f.yourname
your surname
how long did it take to execute mysalad
f.write(flist)
f.close()
write the file to github README
| Python final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# *Copyright 2020 by <NAME> under the MIT license. This file is part of the Anomalous Diffusion (AnDi) Challenge, and is released under the "MIT License Agreement".
#
#
# # Submitting to the ANDI challenge: from trajectories to predictions
#
# In this notebook we will explore how to make predictions from the trajectories we have access to in the [ANDI challenge](https://github.com/AnDiChallenge/ANDI_datasets). From this trajectories, I will showcase the use of the tMSD method for Task 1 and a novel ML method for Task 2. Task 3 is a combination of the two previous and will not be explored in this notebook.
import numpy as np
import matplotlib.pyplot as plt
# The best way of dealing with the available datasets is by means of the `andi-datasets` python package, which can be installed using `pip install andi-datasets`.
import andi
from andi import andi_datasets as AD
# For this tutorial, I have first downloaded the datasets available in the Challenge webpage (www.andi-challenge.org). Note that you need to register before being able to access the data. You will have access to two datasets, one for training, which is labeled, and one for scoring, which is used to rank the participants in the competition. In this case, I downloaded each of the datasets in the following folders:
training_dataset = 'Development dataset for training/'
scoring_dataset = 'development_for_scoring/'
# To load the training dataset, we only need to do:
X1, Y1, X2, Y2, X3, Y3 = AD().andi_dataset(load_dataset = True, path_datasets = training_dataset)
# To load the datasets for scoring, you can use your favourite txt loader. This is because no `refX.txt` are present in the folder and the function `andi_dataset`. Newer versions of the andi-datasets package will have this feature, stay tuned!
import csv
trajs_from_files = csv.reader(open(scoring_dataset+'task1.txt','r'), delimiter=';',
lineterminator='\n',quoting=csv.QUOTE_NONNUMERIC)
validation = [[],[],[]]
for trajs in enumerate(trajs_from_files):
validation[int(trajs[1][0])-1].append(trajs[1][1:])
# # Task 1 - Anomalous exponent prediction
# `X1` contains the trajectories for task 1. As we asked for all dimensions, `X1` will be a list of three elements, each element one dimension. The same for the labels `Y1`. Let us check the 1D case:
X1_1D = X1[0]
Y1_1D = Y1[0]
# + jupyter={"source_hidden": true}
fig, ax = plt.subplots(1, 5, figsize = (20, 4))
for idx, (t, l) in enumerate(zip(X1_1D[:5], Y1_1D[:5])):
ax[idx].plot(t, label = r'$\alpha =$'+str(l), c = 'C'+str(idx))
ax[idx].legend()
ax[idx].set_title(f'Trajectory {idx+1}')
ax[0].set_ylabel('Position');
# -
# ## The 'good old' way: the tMSD fitting
# One way to extract the anomalous exponent is by fitting the tMSD:
# $$
# \mbox{tMSD}(\Delta) = \frac{1}{T-\Delta} \sum_{i=1}^{T-\Delta}(x(t_i + \Delta)-x(t_i))^2,
# $$
# where $\Delta$ is defined as the time lag and $T$ is length of the trajectory.
def TMSD(traj, t_lags):
ttt = np.zeros_like(t_lags, dtype= float)
for idx, t in enumerate(t_lags):
for p in range(len(traj)-t):
ttt[idx] += (traj[p]-traj[p+t])**2
ttt[idx] /= len(traj)-t
return ttt
# We know that (usually) $$\mbox{tMSD}(\Delta) \sim \Delta ^ \alpha,$$ hence we can use it to extract the anomalous exponent. Let us check this on trajectories from two models: ATTM and FBM. For that we can again use the `andi` package, and access the diffusion models directly:
# +
from andi import diffusion_models as DF
# We create one ATTM and one FBM trajectory with alpha = 0.2
attm = DF.oneD().attm(T = 1000, alpha = 0.2)
fbm = DF.oneD().fbm(T = 1000, alpha = 0.2)
# We calculate their tMSD
t_lags = np.arange(2, 20)
attm_tmsd = TMSD(attm, t_lags = t_lags)
fbm_tmsd = TMSD(fbm, t_lags = t_lags)
# -
# Let's plot the tMSD:
# +
fig, ax = plt.subplots(1,2, figsize = (10, 4))
ax[0].loglog(t_lags, fbm_tmsd, '-o', lw = 1)
ax[0].loglog(t_lags, t_lags**0.2/(t_lags[0]**0.2)*fbm_tmsd[0], ls = '--')
ax[0].loglog(t_lags, t_lags/(t_lags[0])*fbm_tmsd[0], ls = '--')
ax[0].set_title(r'FBM $\rightarrow$ Ergodic process')
ax[1].loglog(t_lags, attm_tmsd, '-o', lw = 1,label = 'tMSD')
ax[1].loglog(t_lags, t_lags**0.2/(t_lags[0]**0.2)*attm_tmsd[0], ls = '--', label = r'$\sim \Delta^{0.2}$')
ax[1].loglog(t_lags, t_lags/(t_lags[0])*attm_tmsd[0], ls = '--', label = r'$\sim \Delta$')
ax[1].set_title(r'ATTM $\rightarrow$ Non-ergodic process')
ax[1].legend(fontsize = 16)
plt.setp(ax, xlabel = r'$\Delta$', ylabel = 'tMSD');
fig.tight_layout()
# -
# We see that the tMSD works very well for ergodic processes, but fails horribly for non-ergodic, for which we usually have that $tMSD\sim\Delta$. Nevertheless, let's use it to fit the exponent of the 1D training dataset:
# +
t_lags = np.arange(2,10)
predictions = []
for traj in X1[0]
tmsd = TMSD(traj, t_lags)
predictions.append(np.polyfit(np.log(t_lags), np.log(tmsd),1)[0])
print('MAE = '+str(np.round(np.mean(np.abs(np.array(predictions)-Y1[0])), 4)))
# -
# Let's see how is the error distributed:
plt.hist(np.array(predictions)-Y1[0], bins = 50);
plt.xlabel('Error')
plt.ylabel('Frequency')
# We can now use the same method to predict the exponent of the validation dataset `V1`, for **1D**
# +
t_lags = np.arange(1,10)
predictions_task1_1d = []
for traj in validation[0]:
tmsd = TMSD(traj, t_lags)
predictions_task1_1d.append(np.polyfit(np.log(t_lags), np.log(tmsd),1)[0])
# -
# To make a submission, you only need to write a .txt file for which:
# - The name is the task: task1.txt, task2.txt, task3.txt
# - The first column is the dimension (1,2 or 3)
# - The following columns are the results
# - Delimiter should be ;
# +
pred_to_txt = np.ones((len(predictions_task1_1d), 2))
pred_to_txt[:, 1] = predictions_task1_1d
np.savetxt('task1.txt', pred_to_txt.astype(float), fmt = '%1.5f', delimiter = ';')
# -
# ### Then, we zip it and submit!
# # Task 2 - Model classification
# Let's check the trajectory for the second task. The structure of the variables `X2` and `Y2` is just as we explained for the first task. We will focus again in 1D:
X2_1D = X2[0]
Y2_1D = Y2[0]
# + jupyter={"source_hidden": true}
fig, ax = plt.subplots(1, 5, figsize = (20, 4))
for idx, (t, l) in enumerate(zip(X2_1D[:5], Y2_1D[:5])):
ax[idx].plot(t, label = AD().avail_models_name[int(l)].upper(), c = 'C'+str(idx))
ax[idx].legend()
ax[idx].set_title(f'Trajectory {idx+1}')
ax[0].set_ylabel('Position');
# -
# ## The new trend: machine learning
#
# There are various approaches to model classification: statistical tests to differentiate between CTRW and FBM, Bayesian inference,...etc. In this example we will use the latest proposal: Machine Learning.
#
# One of the main difficulties of the ANDI challenge is that we have **trajectories of all lengths!** Having ML models able to accomodate such feature is one of the main challenges the participants will face.
#
# For the sake of simplicity, I will solve here an easier problem: classifying between the subdiffusive models (ATTM, FBM, CTRW, SBM), with exponents $\in \ [0.1, 1]$, with trajectories of all $30$ points. To generate such dataset, I can use another function from the `andi-datasets` package, `create_dataset`. You can check all the details of this function in this [tutorial notebook](https://github.com/AnDiChallenge/ANDI_datasets/blob/master/tutorial_andi_datasets.ipynb).
# + Collapsed="false"
# Here I load a dataset that I have already generated. To create a new one, you just new to put load_trajectories = False
# Check the tutorials in the github for all the details
dataset = AD().create_dataset(T = 30, N = 1000, exponents = np.arange(0.1,1,0.05), models = [0,1,2,4],
load_trajectories = True, path = '/home/gmunoz/andi_data/datasets/', t_save=30)
# -
# As usually done in Machine Learning, we shuffle and create trainina/test set with 80-20% ratios:
# + Collapsed="false"
np.random.shuffle(dataset)
ratio = int(0.8*dataset.shape[0])
# We normalize the trajectories so all of them are in the same 'scale'
X_a = andi.normalize(dataset[:ratio, 2:]).reshape(ratio, T, 1)
X_e = andi.normalize(dataset[ratio:, 2:]).reshape(N-ratio, T, 1)
dataset[dataset[:,0] == 4, 0] = 3
Y_a = to_categorical(dataset[:ratio, 0])
Y_e = to_categorical(dataset[ratio:, 0])
# -
# We import the necessary packages for creating our neural network:
# +
from keras.models import Sequential, load_model
from keras.layers import Dense, Conv1D, Dropout, BatchNormalization, Flatten
from keras.regularizers import l2 as regularizer_l2
from keras.optimizers import Adam
# -
# Now let's create a typical Convolutional neural network with `keras`, with some L2 regularizers and Dropout and Batch Normalization layers.
# + Collapsed="false"
model = Sequential()
# Here we define the architecture of the Neural Network
model.add(Conv1D(filters=3, kernel_size=3 ,strides=1,
input_shape=(T, 1),
kernel_initializer= 'uniform',
activation= 'relu', kernel_regularizer = regularizer_l2(l = 0.001)))
model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
model.add(Conv1D(filters=8, kernel_size=5 ,strides=1,
kernel_initializer= 'uniform',
activation= 'relu', kernel_regularizer = regularizer_l2(l = 0.001)))
model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
model.add(Conv1D(filters=3, kernel_size=2 ,strides=1,
kernel_initializer= 'uniform',
activation= 'relu', kernel_regularizer = regularizer_l2(l = 0.001)))
model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(64*2, activation='sigmoid', kernel_regularizer = regularizer_l2(l = 0.001)))
model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
model.add(Dropout(0.5))
model.add(Dense(64, activation='sigmoid'))
model.add(BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001))
# Last layer needs to have same size as number of processes
number_process = 4
model.add(Dense(number_process, activation='softmax'))
# We add loss function + Adam optimizer
model.compile(loss='binary_crossentropy',
optimizer=Adam(),
metrics=['accuracy'])
# -
# Let's train the model:
# + Collapsed="false" jupyter={"outputs_hidden": true}
batch_size = 200
epochs = 150
history = model.fit(X_a, Y_a,
batch_size=batch_size,
epochs=epochs,
verbose=2,
validation_data=(X_e, Y_e))
model.save('model_classification_subdiffusive.h5')
# +
acc = history.history['loss']
val_acc = history.history['val_loss']
plt.plot(np.arange(len(history.history['accuracy'])), acc, label='Training loss')
plt.plot(np.arange(len(history.history['accuracy'])), val_acc,label='Validation loss')
plt.title('FCN - Training and validation accuracy T ='+str(T))
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# -
# In the ANDI challenge, the rank of task 2 is evaluated with the F1 score. Let's see how well/bad we did (best value is 1). Recall that this is a toy example, as we are not considergin LW.
# +
from sklearn.metrics import f1_score
groundtruth = np.argmax(Y_e, axis = 1)
predictions = np.argmax(model.predict(X_e), axis = 1)
f1_score(groundtruth, predictions, average='micro')
# -
# Not that bad! To analyze a bit more the predictions, we can use the confusion matrix:
# +
from sklearn.metrics import confusion_matrix
conf = confusion_matrix(groundtruth, predictions)/(predictions.shape[0]/2)
# + jupyter={"source_hidden": true}
fig, ax = plt.subplots(figsize = (7,7))
ax.matshow(conf)
for (i, j), z in np.ndenumerate(conf):
ax.text(j, i, '{:0.3f}'.format(z), ha='center', va='center', fontsize = 16)
ax.set_xticklabels(['c','ATTM','CTRW','FBM','SBM'], fontsize = 16)
ax.set_yticklabels(['a','ATTM','CTRW','FBM','SBM'], fontsize = 16)
ax.set_xlabel('Predicted class', fontsize = 16)
ax.set_ylabel('Groundtruth', fontsize = 16)
ax.xaxis.set_ticks_position('bottom')
# -
# We see here that the method is not perfect. For instance, it has a very hard time correctly classifying trajectories ATTM trajectories. For CTRW, the job is easier! Take into account that here we are working with trajectories without noise, contrary to what we have in the Challenge.
# ### Now you are ready to use your favourite ML architecture on the true ANDI dataset! Can you do better?
| tutorial_submission.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Django Shell-Plus
# language: python
# name: django_extensions
# ---
# +
# Setup Notebook to load Django code
# From project root, run: jupyter-lab
import os
import sys
from pathlib import Path
django_project_dir = Path('../')
sys.path.insert(0, str(django_project_dir))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ratom_api.settings.local")
import django
django.setup()
# -
import pypff
import pytz
from libratom.lib.pff import PffArchive
from ratom import models as ratom
arch = PffArchive('/home/jgibson/Downloads/zach.ambrose-CA1@nc.gov.pst')
arch.tree.get_node(2366436)
storedf = None
for folder in arch.folders():
print(folder.name)
if not folder.name: # skip root node
continue
print(
f"Scanning {folder.number_of_sub_messages} messages in folder {folder.name}"
)
if folder.number_of_sub_messages == 0:
continue
storedf = folder
message_ids = []
folder_struct = {}
folder_id = storedf.identifier
count = 0
for m in storedf.sub_messages:
if count == 40:
break
message_ids.append(m.identifier)
count += 1
folder_struct[folder_id] = message_ids
folder_struct
# + active=""
# arch.message_count
# -
| notebooks/libpff_explore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.style.use('ggplot')
csv = pd.read_csv("~/.ukpsummarizer/scores_new/weightshistory-DUC2004-4doc1sum-PROPAGATION.csv", low_memory=False)
csv
df = csv
plt.figure();
df.plot()
plt.show()
import hashlib
import random
s = hashlib.sha224(str(random.random())).hexdigest()[0:12]
s
s[0:12]
| scripts/deprecated-scripts/weightshistory-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import callflow
# %load_ext callflow
# %callflow --data_dir /Users/jarus/Work/llnl/CallFlow/data/lulesh-8-runs --profile_format caliper_json
| examples/%callflow-ipython-magic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/markhanjin/CPEN-21A-BSCpE-1-2/blob/main/Loop_Statement.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="l4sCK1xO6reV"
# ##For Loop
#
# + colab={"base_uri": "https://localhost:8080/"} id="YWUh5oAL613a" outputId="9c817cf6-8077-4ebe-f254-dee0402a245b"
week=["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]
for x in week:
print(x)
# + [markdown] id="R_YEb0md6-IZ"
# ##Break Statement
# + colab={"base_uri": "https://localhost:8080/"} id="AFwuj9y37CwI" outputId="3c663692-b4e6-49de-aa65-9f88ac23aaa0"
for x in week:
print(x)
if x=="Thursday":
break
# + colab={"base_uri": "https://localhost:8080/"} id="B7GDm_xR7NV2" outputId="a6e74ecf-fe56-4b86-c5d3-fb04854e1af1"
for x in week:
if x=="Thursday":
break
print(x)
# + [markdown] id="3SU7ga3w7Qf8"
# ##Looping through String
# + colab={"base_uri": "https://localhost:8080/"} id="-DcpKQsf7S_i" outputId="9aec784c-5336-4a36-e9bb-4564d45db6c9"
for x in "Programming with Python":
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="IN8h8sGu7ZyN" outputId="17dc8a5a-5193-464f-e03c-c755ff3bc239"
for x in range(10):
print(x)
# + [markdown] id="YLm-PSiJ7cpk"
# ##Nested Loops
# + colab={"base_uri": "https://localhost:8080/"} id="Bn4-Msiu7e2T" outputId="08b7fcc4-6c5c-4592-fc8e-76ba8c9d80f3"
adjective=["red","big","tasty"]
fruits=["apple","banana","cherry"]
for x in adjective:
for y in fruits:
print(x,y)
# + [markdown] id="G5J5KK4y7l-p"
# ##While Loop
# + colab={"base_uri": "https://localhost:8080/"} id="GyTa9et-7n80" outputId="5d0c6914-52a0-4435-b50d-d926386fc441"
i=10
while i>6:
print(i)
i-=1 #Assignment operator for subtraction i=i-1
# + [markdown] id="QVSE6AFi7rot"
# ##The Break Statement
# + colab={"base_uri": "https://localhost:8080/"} id="iG3MuDnP7tri" outputId="8ff6ea45-783f-4a01-9732-2226847f811d"
i=10
while i>6:
print(i)
if i==8:
break
i-=1
# + [markdown] id="hH2jNLgs7xYO"
# ##The Continue Statement
# + colab={"base_uri": "https://localhost:8080/"} id="7UF9Luo07zwG" outputId="9175244a-4b68-4c00-cb37-1d4beb7c5a1a"
i=10
while i>6:
i=i-1
if i==8:
continue
print(i)
# + [markdown] id="VnJdEWxC75tT"
# ##Else statement
# + colab={"base_uri": "https://localhost:8080/"} id="7z1WM0fq77Xx" outputId="d9c7e34e-04f8-4f46-ff1f-c230404b4632"
i=10
while i>6:
i=i-1
print(i)
else:
print("i is no longer greater than 6")
# + [markdown] id="9usUOIPI8AmV"
# ##Application 1
# + [markdown] id="_tZWosBL8FNr"
# ##For Loops
# + colab={"base_uri": "https://localhost:8080/"} id="9yEUDTua8G9m" outputId="b34758b0-8f77-4813-de1c-837241d97908"
Value=["Value 0","Value 1","Value 2","Value 3","Value 4","Value 5","Value 6","Value 7","Value 8","Value 9","Value 10"]
for x in Value:
print(x)
# + [markdown] id="NI_ELDFl8LtD"
# ##While Loops
# + colab={"base_uri": "https://localhost:8080/"} id="tnkdkl4G8NOr" outputId="7f21c8f8-edd7-49e6-bce7-3bcb10e0ff47"
i=0
while i<11:
print("Value",i)
i+=1
#else
#print("i is no longer greater 11")
# + [markdown] id="LPbbvnXA8RBa"
# ##Application 2
# + colab={"base_uri": "https://localhost:8080/"} id="we64M5Sw8TSA" outputId="8217f23c-e73b-4c82-e689-09194cd5675f"
i=20
while i>3:
i=i-1
print(i)
if i==4:
break
#else:
#print("i is no longer greater than 3")
| Loop_Statement.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Running Times on Real World Graphs
#
# Graphs from https://www.cc.gatech.edu/dimacs10/archive/clustering.shtml
# +
import numpy as np
import matplotlib as mpl
# %matplotlib inline
import pandas as pd
import json
import glob
import os
import seaborn
# +
data = {}
paths = glob.glob("../../data/results/all_real/*.json") # data on DSLM algorithms (no meltdown patches)
paths += glob.glob("../../data/results/all_real_seq/*.json") # data on sequential and parallel algorithms - first five runs (but still with meltdown patches)
paths += glob.glob("../../data/results/all_real_seq2/*.json") # data on sequential and parallel algorithms - second five runs (also with meltdown patches)
paths += glob.glob("../../data/results/gossip_map/*.json") # data on 5 gossip map runs without meltdown patches
for path in paths:
for typename, items in json.load(open(path)).items():
if typename in data:
for key, object_data in items.items():
if key in data[typename]:
data[typename][key].update(object_data)
else:
data[typename][key] = object_data
else:
data[typename] = items
frames = { typename: pd.DataFrame.from_dict(items, orient='index') for typename, items in data.items() }
# +
dlslm_label = 'DSLM-Mod'
dlslm_me_label = 'DSLM-Map'
seq_postfix = ' w. Seq.'
no_contraction_postfix = ' w/o Contraction'
dlslm_ws_label = dlslm_label + seq_postfix
dlslm_nc_label = dlslm_label + no_contraction_postfix
seq_louvain_label = 'Seq. Louvain'
seq_infomap_label = 'Seq. InfoMap'
plm_label = 'PLM'
relax_map_label = 'RelaxMap'
gossip_map_label = 'GossipMap'
algo_name_mapping = {
'synchronous local moving with map equation': dlslm_me_label,
'synchronous local moving with modularity': dlslm_label,
'sequential louvain': seq_louvain_label,
'sequential infomap': seq_infomap_label,
'relax map': relax_map_label,
'gossip map': gossip_map_label
}
frames['algorithm_run'].replace({ 'algorithm': algo_name_mapping }, inplace=True)
frames['algorithm_run']['algorithm'] += frames['algorithm_run'].merge(frames['program_run'], left_on='program_run_id', right_index=True, how='left')['switch_to_seq'].map({ False: '', True: seq_postfix, np.NaN: '' })
frames['algorithm_run']['algorithm'] += frames['algorithm_run'].merge(frames['program_run'], left_on='program_run_id', right_index=True, how='left')['contraction'].map({ False: no_contraction_postfix, True: '', np.NaN: '' })
# -
frames['algorithm_run']['runtime'].fillna((frames['algorithm_run']['done_ts'] - frames['algorithm_run']['start_ts']) / 1000000.0, inplace=True)
# +
frames['program_run']['graph_path'] = frames['program_run']['graph']
graph_names = {
'data/graphs/uk-2002.metis-preprocessed-*.bin': 'uk-2002',
'data/graphs/uk-2007-05.metis-preprocessed-*.bin': 'uk-2007-05',
'data/graphs/in-2004.metis-preprocessed-*.bin': 'in-2004',
'data/graphs/com-friendster-preprocessed-*.bin': 'friendster',
'data/graphs/com-lj.ungraph-preprocessed-*.bin': 'lj',
'data/graphs/com-orkut.ungraph-preprocessed-*.bin': 'orkut',
'data/graphs/com-youtube.ungraph-preprocessed-*.bin': 'youtube',
'data/graphs/com-amazon.ungraph-preprocessed-*.bin': 'amazon',
'data/graphs/europe.osm-preprocessed-*.bin': 'osm-europe',
}
frames['program_run'].replace({ 'graph': graph_names }, inplace=True)
# -
# ## Average running times
#
# - 10 runs for each algorithm and graph
# - only 5 runs from gossip map to not mix runs with and without patches
# - some runs of GossipMap on friendster and uk-2007-05 crashed
# +
all_data = frames['clustering'] \
.merge(frames['algorithm_run'], left_on='algorithm_run_id', right_index=True) \
.merge(frames['program_run'], left_on='program_run_id', right_index=True) \
.groupby(['graph', 'algorithm'])['runtime'].mean().round(0).to_frame() \
.unstack()["runtime"][[seq_louvain_label, plm_label, dlslm_label, dlslm_nc_label, seq_infomap_label, relax_map_label, gossip_map_label, dlslm_me_label]]
all_data = all_data.loc[frames['program_run'].sort_values('edge_count')['graph'].dropna().unique()]
graph_data = frames['program_run'].dropna(subset=['hosts', 'edge_count']).groupby('graph').agg({ 'node_count': 'first', 'edge_count': 'first', 'hosts': 'max' })
graph_data['hosts'] = graph_data['hosts'].astype(int)
graph_data['edge_count'] = graph_data['edge_count'].astype(int)
graph_data.columns = ['n', 'm', 'hosts']
res = graph_data.loc[['uk-2002', 'uk-2007-05', 'friendster', 'lj', 'orkut']].sort_values('m').merge(all_data, left_index=True, right_index=True)
# with open("../../dist-thrill-cluster/plots/real_world_runtimes.tex", "w") as file:
print(res.to_latex().replace('.0', '').replace(' NaN', 'oom'))
res
| notebooks/Paper - Real World Runtimes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mowl39
# language: python
# name: mowl39
# ---
# # Walking
# +
import sys
sys.path.append("../../../")
import torch as th
from mowl.datasets.ppi_yeast import PPIYeastSlimDataset
from mowl.embeddings.graph_based.dl2vec.model import DL2Vec
from gensim.models import Word2Vec
import pickle as pkl
import numpy as np
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from mowl.graph.factory import parser_factory
from mowl.graph.edge import Edge
import gensim
import logging
import time
logging.basicConfig(set_level = logging.INFO)
# -
# ## Projecting the ontology
# First we need to get a graph from an ontology. The following function will do it:
def getOntProjection():
logging.info("Creating dataset...")
start = time.time()
dataset = PPIYeastSlimDataset()
end = time.time()
logging.info("Dataset created in %f seconds", end-start)
logging.info("Projecting graph...")
start = time.time()
parser = parser_factory(
"dl2vec", # taxonomy, taxonomy_rels, owl2vec_star
dataset.ontology,
bidirectional_taxonomy=True)
edges = parser.parse()
end = time.time()
logging.info("Graph projected in %f seconds", end - start)
entities, _ = Edge.getEntitiesAndRelations(edges)
return edges, entities
# ## Learning embeddings with Word2Vec
#
# Once the walks are generated, we will use them to learn embeddings using the Word2Vec model:
def learnEmbeddingsWithWord2Vec(corpus_path, entities):
logging.info("Learning embeddings..")
start = time.time()
sentences = gensim.models.word2vec.LineSentence(corpus_path)
model = gensim.models.Word2Vec(
sentences,
sg=1,
min_count=1,
vector_size=100,
window = 10,
epochs = 10,
workers = 16)
end = time.time()
logging.info("Embeddings learned in %f seconds", end - start)
vectors = model.wv
embeddings = {}
for node in entities:
if node.startswith("4932"):
embeddings[node] = vectors[node]
return embeddings, model.vector_size
# ## Plotting TSNE representations
# Once the embeddings are ready, we can use them for different tasks. Here we use the TSNE method to have a visual representation of them:
def plotTSNE(embeddings, size):
ec_numbers = {}
with open('data/yeast_ec.tab') as f:
next(f)
for line in f:
it = line.strip().split('\t', -1)
if len(it) < 5:
continue
if it[3]:
prot_id = it[3].split(';')[0]
prot_id = '{0}'.format(prot_id)
ec_numbers[prot_id] = it[4]
ec_dict = {}
for prot in ec_numbers:
if prot in embeddings:
ec_dict[prot] = embeddings[prot]
embeds = np.zeros((len(ec_dict), size), dtype=np.float32)
for i, emb in enumerate(ec_dict.values()):
embeds[i, :] = emb
nodemap = {}
for i, m in enumerate(ec_dict.keys()):
nodemap[i] = m
X = TSNE(n_components=2, verbose=1, n_iter=5000, n_jobs=8).fit_transform(embeds)
classes = {'0': [[], []]}
for item in nodemap.items():
k, v = item
if v in ec_numbers:
ec = ec_numbers[v].split('.')[0]
if ec not in classes:
classes[ec] = [[], []]
classes[ec][0].append(X[k, 0])
classes[ec][1].append(X[k, 1])
colors = iter(plt.cm.rainbow(np.linspace(0, 1, len(classes))))
fig, ax = plt.subplots(figsize=(20, 20))
for ec, items in classes.items():
if ec == '0':
continue
color = next(colors)
ax.scatter(items[0], items[1], color=color, label=ec)
ax.legend()
ax.grid(True)
plt.show()
# ## Putting all together and trying different walking methods
# Now, we can use the functions defined above and test them with the walking methods existing in mOWL
from mowl.walking.node2vec.model import Node2Vec as N2V
from mowl.walking.deepwalk.model import DeepWalk as DW
from mowl.walking.walkRdfAndOwl.model import WalkRDFAndOWL as WRO
edges, entities = getOntProjection()
# ### DeepWalk
# +
logging.info("Walking..")
start = time.time()
walksFile = "data/walksDeepwalk"
walker = DW(
edges,
100, #num_walks
100, #walk_length
0.1, #alpha
walksFile, #file to write the walks
workers = 16, #num_workers,
)
walker.walk()
end = time.time()
logging.info("Walks generated in %f seconds", end - start)
dwEmbeddings, size = learnEmbeddingsWithWord2Vec(walksFile, entities)
# -
plotTSNE(dwEmbeddings, size)
# ## Node2Vec
# +
logging.info("Walking..")
start = time.time()
walksFile = "data/walksNode2Vec"
walker = N2V(
edges,
100, #num_walks
100, #walk_length
10, #p
0.1, #q
walksFile,
workers = 16, #num_workers,
)
walker.walk()
end = time.time()
logging.info("Walks generated in %f seconds", end - start)
n2vEmbeddings, size = learnEmbeddingsWithWord2Vec(walksFile, entities)
# -
plotTSNE(n2vEmbeddings, size)
# ## Walking RDF and OWL
# +
logging.info("Walking..")
start = time.time()
walksFile = "data/walksWalkRDFAndOWL"
walker = WRO(
edges,
100, #num_walks
100, #walk_length
walksFile,
workers = 16, #num_workers,
)
walker.walk()
end = time.time()
logging.info("Walks generated in %f seconds", end - start)
wroEmbeddings, size = learnEmbeddingsWithWord2Vec(walksFile, entities)
# -
plotTSNE(wroEmbeddings, size)
| docs/source/tutorials/Walking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
import hvplot.pandas # noqa
# Andrews curves provides a mechanism for visualising clusters of multivariate data.
#
# Andrews curves have the functional form:
#
# f(t) = x_1/sqrt(2) + x_2 sin(t) + x_3 cos(t) + x_4 sin(2t) + x_5 cos(2t) + ...
#
# Where *x* coefficients correspond to the values of each dimension and *t* is
# linearly spaced between *-pi* and *+pi*. Each row of frame then corresponds to
# a single curve.
# +
from bokeh.sampledata import iris
iris = iris.flowers
# -
iris.head()
hvplot.plotting.andrews_curves(
iris,
class_column='species',
samples=20,
)
| examples/reference/pandas/andrewscurves.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/RoyMillamis/CPEN-21A-CPE-1-2/blob/main/OOP_Concepts2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="xakRlbfpwxBu"
# Classes with Multiple Objects
# + colab={"base_uri": "https://localhost:8080/"} id="frHhnumZw5eW" outputId="c3518a8d-96dc-4866-dffa-f5a21a8dd6a8"
class Birds:
def __init__(self,bird_name):
self.bird_name=bird_name
def flying_birds(self):
print(f"{self.bird_name} flies above the sky")
def non_flying_birds(self):
print(f"{self.bird_name} is the national bird of the Philippines")
vulture=Birds("Griffon Vulture")
crane=Birds("Common Crane")
emu=Birds("Emu")
vulture.flying_birds()
crane.flying_birds()
emu.non_flying_birds()
# + [markdown] id="0zYye040y4p1"
# Encapsulation
# + colab={"base_uri": "https://localhost:8080/"} id="qRTNegmxytnf" outputId="0ef36eb7-b23f-46f5-a6fd-1f4667a86cae"
class foo:
def __init__(self,a,b):
self.a=a
self.b=b
def add(self):
return self.a+self.b
number = foo(3,4)
number.add()
# + colab={"base_uri": "https://localhost:8080/"} id="Tc84BLlXztuM" outputId="83985454-4141-4317-bb1f-442e5cdacdf9"
class foo:
def __init__(self,a,b):
self.a=a
self.b=b
def add(self):
return self.a+self.b
number = foo(3,4) #9+3=13
number.add()
number.a=9
number.add()
# + [markdown] id="XU9FYT3e0Ygz"
# Encapsulation using mangling with double underscores
# + colab={"base_uri": "https://localhost:8080/"} id="iyit_YLZz-ME" outputId="06074a3a-4487-4b79-8453-dd03b4cce4d0"
class foo:
def __init__(self,a,b):
self.__a=a
self.__b=b
def add(self):
return self.__a+self.__b # private attributes
number = foo(3,4) #7+4=11
number.add()
number.a=7
number.add()
# + [markdown] id="RQwUHY6I0-6V"
# Encapsulation with Private Attributes
# + colab={"base_uri": "https://localhost:8080/"} id="2AcUj7eU0lCd" outputId="10ae6cdd-8c0c-4ed5-c814-dcf4f7bf3651"
class Counter:
def __init__(self):
self.current = 0
def increment(self):
self.current +=1
def value(self):
return self.current
def reset(self):
self.current=0
num = Counter()
num.increment() # counter = counter +1
num.increment()
num.increment()
num.value()
# + [markdown] id="Wnjqf97I3R9M"
# Encapsulation using mangling with double underscores
# + colab={"base_uri": "https://localhost:8080/"} id="heVoWYkn2SYi" outputId="e2ee3be0-75ff-401a-d27e-e6ccbe80dae6"
class Counter:
def __init__(self):
self.__current = 0
def increment(self):
self.__current +=1
def value(self):
return self.__current
def reset(self):
self.__current=0
num = Counter()
num.counter = 1
num.increment()
num.increment()
num.increment()
num.value()
# + colab={"base_uri": "https://localhost:8080/"} id="FmIfdu373rea" outputId="52e91232-0aa4-4f43-82a6-1e0c5492351c"
class Counter:
def __init__(self):
self.__current = 0
def increment(self):
self.__current +=1
def value(self):
return self.__current
def reset(self):
self.__current=0
num = Counter()
num.increment()
num.increment()
num.increment()
num.counter = 1
num.value()
# + [markdown] id="6B_OmC-S4u4R"
# Inheritance
# + colab={"base_uri": "https://localhost:8080/"} id="j6hyOP1q4way" outputId="6612a10d-f540-4d35-c651-bb2627ac8a06"
class Person:
def __init__ (self,firstname,surname):
self.firstname=firstname
self.surname= surname
def printname(self):
print(self.firstname,self.surname)
person = Person("Roy","Millamis")
person.printname()
# + colab={"base_uri": "https://localhost:8080/"} id="m78xxl6b50xR" outputId="cf2dfa95-ef81-4289-92cd-04e8a6deb85d"
class Person:
def __init__ (self,firstname,surname):
self.firstname=firstname
self.surname= surname
def printname(self):
print(self.firstname,self.surname)
person = Person("Roy","Millamis")
person.printname()
class Teacher(Person):
pass
person2= Teacher("Paul","Millamis")
person2.printname()
# + colab={"base_uri": "https://localhost:8080/"} id="o6EN2uDu6XlI" outputId="2515865b-4691-409b-bd32-90688de32838"
class Person:
def __init__ (self,firstname,surname):
self.firstname=firstname
self.surname= surname
def printname(self):
print(self.firstname,self.surname)
person = Person("Roy","Millamis")
person.printname()
class Teacher(Person):
pass
person2= Teacher("Paul","Millamis")
person2.printname()
class Student(Person):
pass
person3 =Student("Wenie","Millamis")
person3.printname()
# + [markdown] id="l_g0RozZ6vLp"
# Polymorphism
# + colab={"base_uri": "https://localhost:8080/"} id="xvUNYYQw6y_f" outputId="203a8d30-6caa-4eb0-e1dd-a3e1a2dceb5b"
class RegularPolygon:
def __init__(self,side):
self.side = side
class Square(RegularPolygon):
def area(self):
return self.side*self.side
class EquilateralTriangle(RegularPolygon):
def area(self):
return self.side*self.side*0.433
object = Square(4)
print(object.area())
object2=EquilateralTriangle(3)
print(object2.area())
# + [markdown] id="-fWwwmSj9L3v"
# Application 1 -
# 1. Create a Python Program that displays the name of three students (Student1,Student 2, and Student 3) and their term grade
# 2. Create a class name Person and Attributes - std1,std2,std3,pre,mid,fin
# 3. Compute the average of each term grade using Grade()method
# 4. Information about student's grades must be hidden from others
# + id="9X8fDy9TJvCx" colab={"base_uri": "https://localhost:8080/"} outputId="de80077e-d352-4915-fa2b-9615f7df6e64"
class Person:
def __init__(self,std1,pre,mid,fin):
self.std1=std1
self.__pre=pre
self.__mid=mid
self.__fin=fin
def Grade(self):
print(f"{self.std1} your average grade in prelim is {self.__pre*.70}, in midterm {self.__mid*.70}, and in final {self.__fin*.70}")
s1=Person("student1",100,100,100)
s1.Grade()
class Person2(Person):
def __init__(self,std2,pre,mid,fin):
self.std2=std2
self.__pre=pre
self.__mid=mid
self.__fin=fin
def Grade(self):
print(f"{self.std2} your average grade in prelim is {self.__pre*.70}, in midterm {self.__mid*.70}, and in final {self.__fin*.70}")
s2 = Person2("Student2",60,70,70)
s2.Grade()
class Person3(Person):
def __init__(self,std3,pre,mid,fin):
self.std3=std3
self.__pre=pre
self.__mid=mid
self.__fin=fin
def Grade(self):
print(f"{self.std3} your average grade in prelim is {self.__pre*.70}, in midterm {self.__mid*.70}, and in final {self.__fin*.70}")
s3 = Person2("Student3",50,60,70)
s3.Grade()
| OOP_Concepts2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:myenv]
# language: python
# name: conda-env-myenv-py
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="rTlxLZtkozB8" outputId="44f4f006-c338-4e96-aef5-8d7a6c18c99d"
#from google.colab import drive
#drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="gU1HikBFrUqw" outputId="f5b021da-adcb-4b7d-9599-fb8fc24dbc03"
# #!pip install -q condacolab
#import condacolab
#condacolab.install()
# + colab={"base_uri": "https://localhost:8080/"} id="zsEE2qgwwG4w" outputId="20df1f86-8d01-4c98-d915-0a6eea68dc1c"
# #!mamba install -q openmm
# #!mamba install -q mdtraj
# #!mamba install -q mdshare
# #!mamba install -q nglview
# #!mamba install -q pyemma
# #!mamba install -q msmtools
# + [markdown] id="Owjvo0yRJMul"
# The tutorial aims to build a coarse-grained model of the B1 immunoglobulin-binding domain of streptococcal protein G (PDB code 1PGB, further referred to as protein G) that can capture folding-unfolding events.
# + [markdown] id="qEjXA9D8Jr1k"
# **Task 1**
# + [markdown] id="hUzSGcMrJsKZ"
# In this task, we will build a structure-based model of protein G and perform a short simulation. We will represent each residue as a single atom with unit mass, with each pair of consecutive atoms connected by a bond. Interactions between atoms are defined as followed:
# + [markdown] id="kUmFecvAKWv9"
# $$
# V = V_{bond} + V_{angle} + V_{dihedral} + V_{nonbonded}
# $$
#
# $$
# V_{bond} = \sum_{bond} \frac{Kr}{2} (r - r_0)^2
# $$
#
# $$
# V_{angle} = \sum{angle} \frac{K_{\theta}}{2} (\theta - \theta_0)^2
# $$
#
# $$
# V_{dihedral} = \sum_{dihedral} [(1 - cos(\phi - \phi_0)) + 0.5 (1 - cos(3*(\phi - \phi_0))]
# $$
#
# $$
# V_{nonbonded} = \sum_{native} 5 \left(\frac{r_0}{2} \right)^{12} - 6\left(\frac{r_0}{2} \right)^{10} + \sum_{non-native} \left(\frac{\sigma}{r} \right)^{12}
# $$
# In these equations, $r_0, \theta_0 , \Psi_0$, represent distances, angles and dihedral angles found in native structure. Values for $K_{\theta}, \sigma, K_r$ you can find in seed_notebook. We will build this model using tools provided by the OpenMM package.
# + [markdown] id="OPordEGWKYhu"
# **1.1)** You are given a file '1PGB.pdb'. It is a pdb file that contains native structure of protein G. Using mdtraj, create a pdb file that contains only $C\alpha$ atoms and their respective positions. This file represents a topologynof our model. **Hint:** mdtraj.trajectory.atom_slice method may be useful(1 pt).
# + colab={"base_uri": "https://localhost:8080/", "height": 17, "referenced_widgets": ["882311e98dd94e4faac0a2f87c8dd330"]} id="uVCzmlg2zAwv" outputId="6898ed7d-9c06-4a8b-d335-57a80d2239d6"
from sys import stdout
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
import numpy as np
import mdtraj as md
import pandas
import matplotlib.pyplot as plt
import nglview
import pdbfixer
# +
#fixer = pdbfixer.PDBFixer(pdbid='1PGB')
# +
#fixer.findMissingResidues()
#print(fixer.missingResidues)
#fixer.findNonstandardResidues()
#print(fixer.nonstandardResidues)
#fixer.findMissingAtoms()
#print(fixer.missingAtoms)
# -
main_pdb = md.load_pdb('1PGB.pdb')
atoms_to_keep = [atom.index for atom in main_pdb.topology.atoms if atom.name == 'CA']
# + id="BBZ_tW6VzBEl"
new_pdb = main_pdb.atom_slice(atoms_to_keep)
# + colab={"base_uri": "https://localhost:8080/"} id="bLl0HC7bCDNn" outputId="d9aeb23e-69e4-45ab-fc10-8ae88e24374c"
new_pdb.save('CA_only.pdb')
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="LNQZiKOkCDYm" outputId="a7b6bfac-da0d-47f1-db27-11c066c0f79a"
#atoms, bonds = coord.topology.to_dataframe()
#atoms
# + id="W5q-NjBb7e91"
#atoms_to_keep = [a.index for a in coord.topology.atoms if a.name == 'CA']
#new_coord = coord.restrict_atoms(atoms_to_keep)
#coord.save('CA-only.pdb')
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="55BC-V7d7fCr" outputId="ccdaf1cb-fb4c-4c58-ab7b-b8450d2ed3c6"
#atoms, bonds = new_coord.topology.to_dataframe()
#atoms
# + [markdown] id="UyAVt2Bd3X82"
# **1.2)** Create a simtk.openmm.app.topology.Topology object. Add a chain, all the residues, one CA atom per residue, and bonds between atoms from consecutive residues (1 pt).
# + id="4K2vxSKj7fPX"
pdb = PDBFile('CA_only.pdb')
#pdb = md.load('CA_only.pdb')
#main_pdb = PDBFile('1PGB.pdb')
# -
forcefield = ForceField('amber99sbildn.xml', 'tip3p.xml')
top = pdb.getTopology()
bonds = list(top.bonds())
for b in bonds:
names = [b.atom1.name, b.atom2.name]
if 'CA' in names:
print(names, b.atom1.index, b.atom2.index)
else:
print('no Ca')
top = topology.Topology
chain=top.addChain(id='A')
residue = top.addResidue(MET, 0)
top.addResidue()
# + colab={"base_uri": "https://localhost:8080/", "height": 392} id="Iz6TlFAV7fTn" outputId="38ba1f4b-35e4-406d-cd1c-7052f1fbb2ee"
system = forcefield.createSystem(pdb.topology)
#integrator = LangevinMiddleIntegrator(300*kelvin, 1/picosecond, 0.004*picoseconds)
# + id="POd3EXZXCDi8"
# + id="NZbwpwiGCDs5"
# + id="Mc7av6ci4Ni7"
# + id="tr04PqLR4Nl1"
# + id="QbYyzFju4Noi"
# + id="qFoSafZM4NrI"
# + id="FZ-mGGQc4Nts"
# + id="IJBzHaEc4NwI"
# + id="K4KQaRx04NzM"
| Worksheet8/HW_8.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analizar UCI census dataset.
import re
import numpy as np
import pandas as pd
eps = np.finfo(float).eps
from numpy import log2 as log
from tabulate import tabulate as tb
from anytree import Node, RenderTree
from anytree import search as anys
from anytree.exporter import DotExporter
from IPython.display import Image
from IPython.display import Markdown
import time
# ##### Load dataset:
# +
features = ["Age", "Workclass", "fnlwgt", "Education", "Education-Num", "Marital Status",
"Occupation", "Relationship", "Race", "Sex", "Capital Gain", "Capital Loss",
"Hours per week", "Country", "Target"]
train_data = pd.read_csv(
#"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data",
"adult.data",
names=features,
sep=r'\s*,\s*',
engine='python',
na_values="?").dropna()
Target = 'Target'
Labels = train_data.Target.unique()
counts = train_data.Target.value_counts()
print(counts)
test_data = pd.read_csv(
#"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.test",
"adult.test_fix",
names=features,
sep=r'\s*,\s*',
skiprows=[0],
engine='python',
na_values="?").dropna()
Labels = test_data.Target.unique()
counts = test_data.Target.value_counts()
print(counts)
train_data = train_data.reset_index(drop=True)
test_data = test_data.reset_index(drop=True)
# -
train_data.head(5)
# # Escolher uma coluna para fazer a predição. Qual?
# ##### Impurity functions:
#
# - **Gini index** [Gini, 1912]:
#
# $ i_G(t) = \sum\limits_{k \forall \Omega} p_{kt}(1 - p_{kt}) $
#
# - **Shannon entropy** [Shannon and Waver, 1949]:
#
# $ i_H(t) = \sum\limits_{k \forall \Omega} |p_{kt}\log_2(p_{kt})| $
#
# With the set of classes $\Omega$ and the probability of $k$-classe given node $t$: $p_{kt} \triangleq p(c_k|t) ~|~ p(c_k|t) \in [0, 1]$.
# Obs.: some authors uses $S$ (common symbol) to denote *entropy*, here we use $i$ (impurity).
# Obs. 2: see Figure 3.1 and 3.3 in Louppe [2014] for a better view of a decision tree structure.
#
# - **Fcuntion defined**:
#
# $ i_T(t) = \sum\limits_{k \forall \Omega} f_T(p_{kt}) $
#
# $f_T(x) = \begin{cases}
# x(1-x) & T = \text{Gini} \\
# |x\log_2(x)| & T = \text{Shannon} \\
# \end{cases} $
#
# Were $T$ is the type of impurity (Gini or Shannon).
#
# ## Calcula a entropia total (considerando o dataset/batch inteiro)
def find_entropy(df):
entropy = 0
values = df[Target].unique()
for value in values:
temp = df[Target].value_counts()[value]/len(df[Target])
entropy += -temp*np.log2(temp)
return entropy
Markdown("## {}".format(find_entropy(train_data)))
# ## Cacula a entropia considerando cada coluna ($feature$)
# +
def find_entropy_attribute(df,attribute):
if not np.issubdtype(df[attribute].dtype, np.number):
return find_entropy_attribute_not_number(df,attribute), None
else:
return find_entropy_attribute_number(df,attribute)
def find_entropy_attribute_not_number(df,attribute):
target_variables = df[Target].unique() #This gives all 'Yes' and 'No'
variables = df[attribute].unique() #This gives different features in that attribute (like 'Hot','Cold' in Temperature)
entropy2 = 0
for variable in variables:
entropy = 0
for target_variable in target_variables:
num = len(df[attribute][df[attribute]==variable][df[Target] ==target_variable])
den = len(df[attribute][df[attribute]==variable])
fraction = num/(den+eps)
entropy += -fraction*log(fraction+eps)
entropy2 += -(den/len(df))*entropy
return abs(entropy2)
def find_entropy_attribute_number(df,attribute):
target_variables = df[Target].unique() #This gives all 'Yes' and 'No'
variables = df[attribute].unique() #This gives different features in that attribute (like 'Hot','Cold' in Temperature)
variables.sort()
if len(variables)>2:
variables = variables[1:-1]
vk3 = variables[0]
entropy3 = 0
else:
vk3 = variables[0]
entropy3 = np.Inf
for vk in variables:
entropy = 0
for target_variable in target_variables:
num = len(df[attribute][df[attribute]<=vk][df[Target] ==target_variable])
den = len(df[attribute][df[attribute]<=vk])
fraction = num/(den+eps)
entropy += -fraction*log(fraction+eps)
for target_variable in target_variables:
num = len(df[attribute][df[attribute]>vk][df[Target] ==target_variable])
den = len(df[attribute][df[attribute]>vk])
fraction = num/(den+eps)
entropy += -fraction*log(fraction+eps)
entropy2 = (den/len(df))*abs(entropy)
#print(str(entropy2)+"|"+str(vk))
if entropy2>entropy3:
entropy3 = entropy2
vk3 = vk
return abs(entropy3),vk3
# -
train_data.columns.difference([Target])
Markdown("## Age: {} <br/><br/> Sex: {} <br/><br/> Education: {} <br/><br/> Relationship: {} <br/><br/> Capital Gain: {} <br/><br/> Capital Loss: {} <br/><br/>".format(
find_entropy_attribute(train_data,'Age'),
find_entropy_attribute(train_data,'Sex'),
find_entropy_attribute(train_data,'Education'),
find_entropy_attribute(train_data,'Relationship'),
find_entropy_attribute(train_data,'Capital Gain'),
find_entropy_attribute(train_data,'Capital Loss')))
# ## Função para selecionar a features ótima.
def find_winner(df,rfs_par=False):
if not rfs_par:
IG = []
vk = list()
for key in df.columns.difference([Target]):
temp,temp2 = find_entropy_attribute(df,key)
vk.append(temp2)
IG.append(find_entropy(df)-temp)
return df.columns.difference([Target])[np.argmax(IG)], vk[np.argmax(IG)]
else:
key = df.columns.difference([Target])[np.random.randint(len(df.columns.difference([Target]))-1)]
temp,vk = find_entropy_attribute(df,key)
return key, vk
temp = find_winner(train_data)
Markdown("## Melhor $feature$: {} <br/><br/> ".format(temp))
# ## Separa a base em duas 'Capital Loss' > 155 e <= 155
#
#
# ## Se fosse 'Sex'?
# ## Duas bases com 'Sex' = Male e outra com 'Sex' = Female
#
# ## Para cada base, repete o processo com algum critério de parada.
# <br><br><br>
# ## Funções para contruir e mostrar uma árvore dicisória.
# ### Constrói uma árvore:
def buildtree(df,tree=None, mytree=None, T_pro=0.9, T_pro_num=0.6,total_splits=10,splits=1):
def ramificatree(Thd,ss):
if (len(clValue)==1):
tree[node][value] = {}
tree[node][value]['Class'] = clValue[0]
tree[node][value]['Prob'] = 1.0
#print_result_node(node,value,clValue[0],1)
else:
prob = counts.max() / counts.sum()
if (prob>=Thd)or(splits>=total_splits):
tree[node][value] = {}
tree[node][value]['Class'] = clValue[counts.argmax()]
tree[node][value]['Prob'] = prob
#print_result_node(node,value,clValue[counts.argmax()],prob)
else:
ss += 1
tree[node][value] = buildtree(subtable,splits=ss)
#print(node +' : '+value+' : *')
#print(find_winner(df))
#formata_dados(dados)
node,vk = find_winner(df)
if tree is None:
tree={}
tree[node] = {}
if vk is None:
attValue = np.unique(df[node])
for value in attValue:
subtable = df[df[node] == value].reset_index(drop=True)
clValue,counts = np.unique(subtable[Target],return_counts=True)
splits += 1
ramificatree(T_pro,ss=splits)
else:
if (len(df[node][df[node] <= vk].unique())>0) and (len(df[node][df[node] > vk].unique())>0):
# >vk
value = node+' >'+str(vk)
subtable = df[df[node] > vk].rename(columns = {node:value}).reset_index(drop=True)
clValue,counts = np.unique(subtable[Target],return_counts=True)
if (len(subtable[value].unique())==1) and (len(clValue)>1):
tree[node][value] = {}
tree[node][value]['Class'] = clValue[counts.argmax()]
prob = counts.max() / counts.sum()
tree[node][value]['Prob'] = prob
#print_result_node(node,value,clValue[counts.argmax()],prob)
else:
splits += 1
ramificatree(T_pro_num,ss=splits)
clValue_antes = clValue[0]
value_antes = value
# <=vk
value = node+' <='+str(vk)
subtable = df[df[node] <= vk].rename(columns = {node:value}).reset_index(drop=True)
clValue,counts = np.unique(subtable[Target],return_counts=True)
if ((len(subtable[value].unique())==1) and (len(clValue)>1)):
tree[node][value] = {}
tree[node][value]['Class'] = clValue[counts.argmax()]
prob = counts.max() / counts.sum()
tree[node][value]['Prob'] = prob
#print_result_node(node,value,clValue[counts.argmax()],prob)
else:
splits += 1
ramificatree(T_pro_num,ss=splits)
else:
df[node] = df[node].astype(str)
buildtree(df)
return tree
t = time.process_time()
tree = buildtree(train_data)
elapsed_time1 = time.process_time() - t
Markdown("## Tempo (s): {:.2f} <br/><br/> ".format(elapsed_time1))
tree
# ### Funções para mostrar a árvore.
# +
def print_tree(arg):
for pre, fill, node in RenderTree(arg):
print("%s%s" % (pre, node.name))
def converte_para_anytree(tree,node=None,mytree=None):
if node is None:
temp = list(tree.keys())
node = temp[0]
mytree = {}
mytree[node] = Node(node)
converte_para_anytree(tree,node,mytree)
else:
tree = tree[node]
if not isinstance(tree, str):
childs = list(tree.keys())
for child in childs:
if (list(tree[child])[0] == 'Class'):
temp = mytree[node]
mytree[child] = Node(child, parent=temp, target=tree[child]['Class'], prob=tree[child]['Prob'])
else:
temp = mytree[node]
mytree[child] = Node(child, parent=temp)
converte_para_anytree(tree,child,mytree)
else:
mytree[node] = 'Fim'
return mytree
#anys.findall_by_attr(mytree['Taste'], name="target", value='Yes')
def mostra_tree(tree):
mytree = converte_para_anytree(tree)
temp = list(tree.keys())
root = temp[0]
mytree[root]
for pre, fill, node in RenderTree(mytree[root]):
txt_node = str(node)
m = re.search('prob\=\d+\.\d+', txt_node)
if Labels[0] in txt_node:
if not m is None:
print("%s%s" % (pre, node.name+': '+Labels[0]+' ('+m.group()[5:]+')'))
else:
print("%s%s" % (pre, node.name+': '+Labels[0]+' (?)'))
elif Labels[1] in txt_node:
if not m is None:
print("%s%s" % (pre, node.name+': '+Labels[1]+' ('+m.group()[5:]+')'))
else:
print("%s%s" % (pre, node.name+': '+Labels[1]+' (?)'))
else:
print("%s%s" % (pre, node.name))
def mostra_tree_graph(tree, largura=None, altura=None):
mytree = converte_para_anytree(tree)
temp = list(tree.keys())
root = temp[0]
mytree[root]
DotExporter(mytree[root]).to_picture("tree.png")
return Image(filename='tree.png', width=largura, height=altura)
# -
mostra_tree(tree)
mostra_tree_graph(tree)
# ## Fazendo uma predição
def predict(inst,tree):
for node in tree.keys():
if ('<=' in str(tree[node].keys())):
childs = list(tree[node].keys())
if ('<=' in childs[1]):
temp = childs[1]
childs[1] = childs[0]
childs[0] = temp
vk = float(childs[1].split('>')[1])
if ('>' in node):
valor = float(str(inst[node.split('>')[0][:-1]]))
elif ('<=' in node):
valor = float(str(inst[node.split('<')[0][:-1]]))
else:
valor = float(str(inst[node]))
if (valor > vk):
tree = tree[node][childs[1]]
prediction = None
prob = None
if (list(tree)[0] != 'Class'):
prediction,prob = predict(inst, tree)
else:
prediction = tree['Class']
prob = tree['Prob']
break;
else:
tree = tree[node][childs[0]]
prediction = None
prob = None
if (list(tree)[0] != 'Class'):
prediction,prob = predict(inst, tree)
else:
prediction = tree['Class']
prob = tree['Prob']
break;
else:
value = str(inst[node])
if value in tree[node].keys():
tree = tree[node][value]
prediction = None
prob = None
if (list(tree)[0] != 'Class'):
prediction,prob = predict(inst, tree)
else:
prediction = tree['Class']
prob = tree['Prob']
break;
else:
prediction = 'Not exists node: '+value
prob = 0
return prediction, prob
train_data.loc[0]
Markdown("## {}".format(predict(train_data.loc[0],tree)))
# +
def test_step_prob(arg,tree):
P = 0;
S = 0
for i in range(0,len(arg)):
S += (predict(arg.iloc[i],tree)[0] == arg.iloc[i].Target)*1
P += predict(arg.iloc[i],tree)[1]
S = S / len(arg)
P = P / len(arg)
return S,P
def test_step(arg,tree):
NO = 0;
YES = 0
for i in range(0,len(arg)):
if (predict(arg.iloc[i],tree)[0] == arg.iloc[i].Target):
YES += 1
else:
NO += 1
YES = YES / len(arg)
NO = NO / len(arg)
#print("YES: "+str(YES)+'. NO: '+str(NO)+'.')
return YES,NO
# -
temp = test_step_prob(train_data,tree)
temp2 = test_step_prob(test_data,tree)
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
# ### Função para amostragem aleatória dado o percentual ($ bagging $).
def amostra_dados(dados,n_samples):
dados2 = dados.loc[dados[Target]==Labels[0]].sample(int(n_samples/2))
dados2 = dados2.append(dados.loc[dados[Target]==Labels[1]].sample(int(n_samples/2)), ignore_index=True).reset_index(drop=True)
return dados2
n_samples = 50
train_batch = amostra_dados(train_data,n_samples)
test_batch = amostra_dados(test_data,n_samples)
test_batch.head(5)
t = time.process_time()
tree2 = buildtree(train_batch)
elapsed_time2 = time.process_time() - t
Markdown("## Tempo: {} <br/><br/> ".format(elapsed_time2))
tree2
mostra_tree(tree2)
mostra_tree_graph(tree2)
Markdown("## Comp. tempos (tamanhos): {:.4f} ({:.4f}) <br/><br/> ".format(elapsed_time2 / elapsed_time1, n_samples/len(train_data)))
temp = test_step_prob(train_batch,tree)
temp2 = test_step_prob(test_batch,tree)
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
mostra_tree(tree2)
mostra_tree(tree)
# # Mais ramificações.
t = time.process_time()
tree = buildtree(train_data,T_pro=0.95, T_pro_num=0.8,total_splits=100)
elapsed_time3 = time.process_time() - t
Markdown("## Tempo (s): {:.2f} <br/><br/> ".format(elapsed_time3))
mostra_tree(tree)
# # Volte para a apresentação!
# # Combinando as árvores:
n_samples=50
forest = list()
M = 10
for m in range(0,M):
print(str(m+1)+'/'+str(M), end='\r')
train_bag = amostra_dados(train_data,n_samples)
forest.append(buildtree(train_bag))
mostra_tree(forest[0])
mostra_tree(forest[1])
mostra_tree(forest[2])
# ## Funções para predição com a floresta.
# +
def predict_forest(arg,forest):
prob_yes = 0
prob_no = 0
count_yes = 0
count_no = 0
for tree in forest:
result = predict(arg,tree)
if (result[0] == arg[Target]):
prob_yes += result[1]
count_yes += 1
else:
prob_no += result[1]
count_no += 1
return prob_yes, prob_no, prob_yes / max(count_yes,1), prob_no / max(count_no,1)
def test_step_forest(arg,forest):
sum_prop = 0;
count_prop = 0;
YES = 0
for i in range(0,len(arg)):
result = predict_forest(arg.loc[i],forest)
if result[0]>result[1]:
YES += 1
count_prop += 1
sum_prop += result[2]
YES = YES / len(arg)
sum_prop = sum_prop / count_prop
#print("YES: "+str(YES)+'. NO: '+str(NO)+'.')
return YES,sum_prop
# -
temp = predict_forest(train_data.loc[0],forest)
Markdown("## Soma das probabilidades (média) <br/><br/> Certo: {:.4f} ({:.4f}), Errado {:.4f} ({:.4f})".format(temp[0], temp[2], temp[1], temp[3]))
temp = test_step_forest(train_data,forest)
temp2 = test_step_forest(test_data,forest)
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
# # Seleção aleatória de $ features $
#
# # (RFS: Random Feature Selection).
# +
def find_winner(df,rfs_par=False):
if not rfs_par:
IG = []
vk = list()
for key in df.columns.difference([Target]):
temp,temp2 = find_entropy_attribute(df,key)
vk.append(temp2)
IG.append(find_entropy(df)-temp)
return df.columns.difference([Target])[np.argmax(IG)], vk[np.argmax(IG)]
else:
key = df.columns.difference([Target])[np.random.randint(len(df.columns.difference([Target]))-1)]
temp,vk = find_entropy_attribute(df,key)
return key, vk
def buildtree(df,tree=None, mytree=None, T_pro=0.9, T_pro_num=0.6,total_splits=10,splits=1,rfs=False):
def ramificatree(Thd,ss):
if (len(clValue)==1):
tree[node][value] = {}
tree[node][value]['Class'] = clValue[0]
tree[node][value]['Prob'] = 1.0
#print_result_node(node,value,clValue[0],1)
else:
prob = counts.max() / counts.sum()
if (prob>=Thd)or(splits>=total_splits):
tree[node][value] = {}
tree[node][value]['Class'] = clValue[counts.argmax()]
tree[node][value]['Prob'] = prob
#print_result_node(node,value,clValue[counts.argmax()],prob)
else:
ss += 1
tree[node][value] = buildtree(subtable,splits=ss)
#print(node +' : '+value+' : *')
#print(find_winner(df))
#formata_dados(dados)
node,vk = find_winner(df,rfs_par=rfs)
if tree is None:
tree={}
tree[node] = {}
if vk is None:
attValue = np.unique(df[node])
for value in attValue:
subtable = df[df[node] == value].reset_index(drop=True)
clValue,counts = np.unique(subtable[Target],return_counts=True)
splits += 1
ramificatree(T_pro,ss=splits)
else:
if (len(df[node][df[node] <= vk].unique())>0) and (len(df[node][df[node] > vk].unique())>0):
# >vk
value = node+' >'+str(vk)
subtable = df[df[node] > vk].rename(columns = {node:value}).reset_index(drop=True)
clValue,counts = np.unique(subtable[Target],return_counts=True)
if (len(subtable[value].unique())==1) and (len(clValue)>1):
tree[node][value] = {}
tree[node][value]['Class'] = clValue[counts.argmax()]
prob = counts.max() / counts.sum()
tree[node][value]['Prob'] = prob
#print_result_node(node,value,clValue[counts.argmax()],prob)
else:
splits += 1
ramificatree(T_pro_num,ss=splits)
clValue_antes = clValue[0]
value_antes = value
# <=vk
value = node+' <='+str(vk)
subtable = df[df[node] <= vk].rename(columns = {node:value}).reset_index(drop=True)
clValue,counts = np.unique(subtable[Target],return_counts=True)
if ((len(subtable[value].unique())==1) and (len(clValue)>1)):
tree[node][value] = {}
tree[node][value]['Class'] = clValue[counts.argmax()]
prob = counts.max() / counts.sum()
tree[node][value]['Prob'] = prob
#print_result_node(node,value,clValue[counts.argmax()],prob)
else:
splits += 1
ramificatree(T_pro_num,ss=splits)
else:
df[node] = df[node].astype(str)
buildtree(df)
return tree
# -
n_samples=50
forest = list()
M = 10
for m in range(0,M):
print(str(m+1)+'/'+str(M), end='\r')
train_bag = amostra_dados(train_data,n_samples)
forest_rfs.append(buildtree(train_bag,rfs=True))
temp = test_step_forest(train_data,forest_rfs)
temp2 = test_step_forest(test_data,forest_rfs)
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
# # As árvores são diferentes?
mostra_tree(forest[1])
mostra_tree(forest[0])
mostra_tree(forest[3])
mostra_tree(forest[5])
size_tree = np.empty((M,1))
m=0
for tree in forest:
size_tree[m] = len(str(tree))
m+=1
mostra_tree(forest[size_tree.argmin()])
mostra_tree(forest[size_tree.argmax()])
# # Com RFS foi pior. Como melhorar?
n_samples=40
forest = list()
forest_rfs = list()
M = 100
for m in range(0,M):
print(str(m+1)+'/'+str(M), end='\r')
train_bag = amostra_dados(train_data,n_samples)
forest.append(buildtree(train_bag,T_pro=0.8, T_pro_num=0.8))
forest_rfs.append(buildtree(train_bag,T_pro=0.8, T_pro_num=0.8, rfs=True))
temp = test_step_forest(train_data,forest)
temp2 = test_step_forest(test_data,forest)
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
temp = test_step_forest(train_data,forest_rfs)
temp2 = test_step_forest(test_data,forest_rfs)
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
n_samples=100
forest = list()
forest_rfs = list()
M = 100
for m in range(0,M):
print(str(m+1)+'/'+str(M), end='\r')
train_bag = amostra_dados(train_data,n_samples)
forest.append(buildtree(train_bag,T_pro=0.8, T_pro_num=0.8))
forest_rfs.append(buildtree(train_bag,T_pro=0.8, T_pro_num=0.8, rfs=True))
temp = test_step_forest(train_data,forest)
temp2 = test_step_forest(test_data,forest)
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
temp = test_step_forest(train_data,forest_rfs)
temp2 = test_step_forest(test_data,forest_rfs)
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
n_samples=100
forest = list()
forest_rfs = list()
M = 200
for m in range(0,M):
print(str(m+1)+'/'+str(M), end='\r')
train_bag = amostra_dados(train_data,n_samples)
forest.append(buildtree(train_bag,T_pro=0.8, T_pro_num=0.8))
forest_rfs.append(buildtree(train_bag,T_pro=0.8, T_pro_num=0.8, rfs=True))
temp = test_step_forest(train_data,forest)
temp2 = test_step_forest(test_data,forest)
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
temp = test_step_forest(train_data,forest_rfs)
temp2 = test_step_forest(test_data,forest_rfs)
Markdown("## Base de teste, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp2[0], temp2[1]))
Markdown("## Base de treino, precisão: {:.4f}, média das probabilidades {:.4f}".format(temp[0], temp[1]))
# # Fim
| RFCPY/exemplo4-Meetup-MLPOA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ###### Content under Creative Commons Attribution license CC-BY 4.0, code under MIT license (c) 2019 <NAME>, based on (c)2018 <NAME>, <NAME> [CFD Python](https://github.com/barbagroup/CFDPython#cfd-python), (c)2014 <NAME>, <NAME>, <NAME> [Practical Numerical Methods with Python](https://github.com/numerical-mooc/numerical-mooc#practical-numerical-methods-with-python), also under CC-BY.
from IPython.core.display import HTML
css_file = '../style/custom.css'
HTML(open(css_file, 'r').read())
# # 1D Diffusion
#
# We introduced finite-difference methods for partial differential equations (PDEs) in the [second module](https://github.com/daniel-koehn/Differential-equations-earth-system/tree/master/02_finite_difference_intro#numerical-solution-of-differential-equations-introduction-to-the-finite-difference-method), and looked at advection problems in more depth in [module 4](https://github.com/daniel-koehn/Differential-equations-earth-system/tree/master/04_Advection_1D#differential-equations-in-earth-sciences-1d-nonlinear-advection). Now we'll look at solving problems dominated by diffusion.
#
# Why do we separate the discussion of how to solve advection-dominated and diffusion-dominated problems, you might ask? It's all about the harmony between mathematical model and numerical method. Advection and diffusion are inherently different physical processes.
#
# * _Advection_—imagine a surfer on a tall wave, moving fast towards the beach ... advection implies transport, speed, direction. The physics has a directional bias, and we discovered that numerical methods should be compatible with that. That's why we use _upwind_ methods for advection, and we pay attention to problems where waves move in opposite directions, needing special schemes like the _Marker-in-Cell_ approach
#
# * _Diffusion_—now imagine a drop of food dye in a cup of water, slowly spreading in all directions until all the liquid takes a uniform color. [Diffusion](http://en.wikipedia.org/wiki/Diffusion) spreads the concentration of something around (atoms, people, ideas, dirt, anything!). Since it is not a directional process, we need numerical methods that are isotropic (like central differences).
from IPython.display import Image
Image(url='http://upload.wikimedia.org/wikipedia/commons/f/f9/Blausen_0315_Diffusion.png')
# In the previous Jupyter notebooks of this series, we studied the numerical solution of the linear and non-linear advection equations using the finite-difference method, and learned about the CFL condition. Now, we will look at the one-dimensional diffusion equation:
#
# $$
# \begin{equation}
# \frac{\partial u}{\partial t}= \nu \frac{\partial^2 u}{\partial x^2} \tag{1}
# \end{equation}
# $$
#
# where $\nu$ is a constant known as the *diffusion coefficient*.
#
# The first thing you should notice is that this equation has a second-order derivative. We first need to learn what to do with it!
# ### Discretizing 2nd-order derivatives
# The second-order derivative can be represented geometrically as the line tangent to the curve given by the first derivative. We will discretize the second-order derivative with a Central Difference scheme: a combination of forward difference and backward difference of the first derivative. Consider the Taylor expansion of $u_{i+1}$ and $u_{i-1}$ around $u_i$:
#
# $$
# u_{i+1} = u_i + \Delta x \frac{\partial u}{\partial x}\big|_i + \frac{\Delta x^2}{2!} \frac{\partial ^2 u}{\partial x^2}\big|_i + \frac{\Delta x^3}{3!} \frac{\partial ^3 u}{\partial x^3}\big|_i + {\mathcal O}(\Delta x^4)
# $$
#
# $$
# u_{i-1} = u_i - \Delta x \frac{\partial u}{\partial x}\big|_i + \frac{\Delta x^2}{2!} \frac{\partial ^2 u}{\partial x^2}\big|_i - \frac{\Delta x^3}{3!} \frac{\partial ^3 u}{\partial x^3}\big|_i + {\mathcal O}(\Delta x^4)
# $$
#
# If we add these two expansions, the odd-numbered derivatives will cancel out. Neglecting any terms of ${\mathcal O}(\Delta x^4)$ or higher (and really, those are very small), we can rearrange the sum of these two expansions to solve for the second-derivative.
#
# $$
# u_{i+1} + u_{i-1} = 2u_i+\Delta x^2 \frac{\partial ^2 u}{\partial x^2}\big|_i + {\mathcal O}(\Delta x^4)
# $$
#
# And finally:
#
# $$
# \begin{equation}
# \frac{\partial ^2 u}{\partial x^2}=\frac{u_{i+1}-2u_{i}+u_{i-1}}{\Delta x^2} + {\mathcal O}(\Delta x^2)\notag
# \end{equation}
# $$
#
# The central difference approximation of the 2nd-order derivative is 2nd-order accurate.
# ### Back to diffusion
# We can now write the discretized version of the diffusion equation in 1D:
#
# $$
# \begin{equation}
# \frac{u_{i}^{n+1}-u_{i}^{n}}{\Delta t}=\nu\frac{u_{i+1}^{n}-2u_{i}^{n}+u_{i-1}^{n}}{\Delta x^2} \notag
# \end{equation}
# $$
#
# As before, we notice that once we have an initial condition, the only unknown is $u_{i}^{n+1}$, so we re-arrange the equation to isolate this term:
#
# $$
# \begin{equation}
# u_{i}^{n+1}=u_{i}^{n}+\frac{\nu\Delta t}{\Delta x^2}(u_{i+1}^{n}-2u_{i}^{n}+u_{i-1}^{n}) \notag
# \end{equation}
# $$
#
# This discrete equation allows us to write a program that advances a solution in time—but we need an initial condition. Let's continue using our favorite: the hat function. So, at $t=0$, $u=2$ in the interval $0.5\le x\le 1$ and $u=1$ everywhere else.
# ### Stability of the diffusion equation
# The diffusion equation is not free of stability constraints. Just like the linear and non-linear advection equations, there are a set of discretization parameters $\Delta x$ and $\Delta t$ that will make the numerical solution blow up. For the diffusion equation and the discretization used here, the stability condition for diffusion is
#
# $$
# \begin{equation}
# \nu \frac{\Delta t}{\Delta x^2} \leq \frac{1}{2} \notag
# \end{equation}
# $$
# ### And solve!
# We are ready for some number-crunching!
#
# The next two code cells initialize the problem by loading the needed libraries, then defining the solution parameters and initial condition. This time, we don't let the user choose just *any* $\Delta t$, though; we have decided this is not safe: people just like to blow things up. Instead, the code calculates a value of $\Delta t$ that will be in the stable range, according to the spatial discretization chosen! You can now experiment with different solution parameters to see how the numerical solution changes, but it won't blow up.
import numpy
from matplotlib import pyplot
# %matplotlib inline
# Set the font family and size to use for Matplotlib figures.
pyplot.rcParams['font.family'] = 'serif'
pyplot.rcParams['font.size'] = 16
# +
# Set parameters.
nx = 41 # number spatial grid points
L = 2.0 # length of the domain
dx = L / (nx - 1) # spatial grid size
nu = 0.3 # viscosity
sigma = 0.2 # CFL limit
dt = sigma * dx**2 / nu # time-step size
nt = 20 # number of time steps to compute
# Get the grid point coordinates.
x = numpy.linspace(0.0, L, num=nx)
# Set the initial conditions.
u0 = numpy.ones(nx)
mask = numpy.where(numpy.logical_and(x >= 0.5, x <= 1.0))
u0[mask] = 2.0
# +
# Integrate in time.
u = u0.copy()
# loop over time steps
for n in range(nt):
un = u.copy() # store old field u
# loop over spatial grid points
for i in range(1,nx-1):
u[i] = un[i] + nu * dt / dx**2 * (un[i+1] - 2 * un[i] + un[i-1])
# +
# Plot the solution after nt time steps
# along with the initial conditions.
pyplot.figure(figsize=(6.0, 4.0))
pyplot.xlabel('x')
pyplot.ylabel('u')
pyplot.grid()
pyplot.plot(x, u0, label='Initial',
color='C0', linestyle='--', linewidth=2)
pyplot.plot(x, u, label='nt = {}'.format(nt),
color='C1', linestyle='-', linewidth=2)
pyplot.legend(loc='upper right')
pyplot.xlim(0.0, L)
pyplot.ylim(0.5, 2.5);
# -
# ## Animations
# Looking at before-and-after plots of the wave in motion is helpful, but it's even better if we can see it changing!
# We are going to create an animation.
# This takes a few steps, but it's actually not hard to do!
#
# First, we define a function, called `diffusion`, that computes and plots the numerical solution of the 1D diffusion equation over the time steps:
def diffusion(u0, sigma=0.5, nt=20):
"""
Computes the numerical solution of the 1D diffusion equation
over the time steps.
Parameters
----------
u0 : numpy.ndarray
The initial conditions as a 1D array of floats.
sigma : float, optional
The value of nu * dt / dx^2;
default: 0.5.
nt : integer, optional
The number of time steps to compute;
default: 20.
"""
# copy initial condition u0 -> u
u = u0.copy()
# plot initial condition
fig = pyplot.figure(figsize=(9.0, 6.0))
pyplot.xlabel('x')
pyplot.ylabel('u')
pyplot.grid()
# initial u0
init = pyplot.plot(x, u0, color='C0', linestyle='-', linewidth=3, label='Initial u0')
# initialize finite-difference solution u
# Note: comma is needed to update the variable
line, = pyplot.plot(x, u, color='C1', linestyle='-', linewidth=3, label='FD solution u')
pyplot.xlim(0.0, L)
pyplot.ylim(0.5, 2.5)
pyplot.legend(loc='upper right')
fig.tight_layout()
# activate interactive plot (will not work in JupyterLab)
pyplot.ion()
pyplot.show(block=False)
# finite difference solution of the 1D diffusion eq.
# loop over timesteps
for n in range(nt):
un = u.copy() # store old field u
# loop over spatial grid
for i in range(1,nx-1):
u[i] = un[i] + nu * dt / dx**2 * (un[i+1] - 2 * un[i] + un[i-1])
# update field u
line.set_ydata(u)
# update figure
fig.canvas.draw()
# We now call the function `diffusion` to compute and animate the history of the solution:
# +
# %matplotlib notebook
# Compute the history of the numerical solution.
diffusion(u0, sigma=sigma, nt=500)
# -
# ## What we learned:
#
# - How to solve the 1D diffusion equation using the FTCS finite difference method
#
# - Animating the time evolution of the 1D diffusion equation solution
| 05_Diffusion_1D/01_Diffusion_1D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Day 10
# #### part A
import numpy as np
infile="10a_input.txt"
vals=np.loadtxt(infile)
vals=np.sort(vals)
vals[0]
diffs=np.diff(vals)
diffs
count1=np.count_nonzero(diffs==1)
count3=np.count_nonzero(diffs==3)
# final result
(count1+1)*(count3+1)
# first +1: vals[0]- chargingoutlet(0)
# second +1: device adapter is diff=3
# #### part B
# +
# observation: there are no 2's in the diff vector. which seems to be a delibarate choice
# -
# include the first jolt
diffsext=np.concatenate((np.array([1]),diffs))
diffsext
# find lengths of consecutive 1's stretches: here adapters can be skipped
lengths=[]
curlen=0
for idx in range(len(diffsext)):
if diffsext[idx]==1:
curlen+=1
else:
if curlen!=0:
lengths.append(curlen)
curlen=0
# append the last one
if curlen!=0:
lengths.append(curlen)
lengths=np.array(lengths)
lengths
# +
# possible re-arrangements for repeated 1's:
# 1 -> 1 (1 option)
# 11 -> 11,2 (2 options)
# 111 -> 111,12,21,3 (4 options)
# 1111 -> 1111,112,121,211,13,31,22 (7 options)
# -
# longest stretch is 4 1's
cnt1=np.count_nonzero(lengths==1)
cnt2=np.count_nonzero(lengths==2)
cnt3=np.count_nonzero(lengths==3)
cnt4=np.count_nonzero(lengths==4)
print(cnt1,cnt2,cnt3,cnt4)
# final result
1**cnt1 * 2**cnt2 * 4**cnt3 * 7**cnt4
| advent_of_code_10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import statsmodels.api as sm
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
x = [[0, 1], [5, 1], [15, 2], [25, 5], [35, 11], [45, 15], [55, 34], [60, 35]]
y = [4, 5, 20, 14, 32, 22, 38, 43]
x, y = np.array(x), np.array(y)
x = sm.add_constant(x)
print(x)
print(y)
model = sm.OLS(y, x)
results = model.fit()
print(results.summary())
print('coefficient of determination:', results.rsquared)
print('adjusted coefficient of determination:', results.rsquared_adj)
print('regression coefficients:', results.params)
print('predicted response:', results.fittedvalues, sep='\n')
print('predicted response:', results.predict(x), sep='\n')
file = '../data/unconv_MV_v5.csv'
df = pd.read_csv(file)
df.head(10)
# +
X = df['Por'].values.reshape(-1,1)
y = df['Prod'].values
################################################ Train #############################################
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
response = model.predict(X)
############################################## Evaluate ############################################
r2 = model.score(X, y)
############################################## Plot ################################################
plt.style.use('default')
plt.style.use('ggplot')
fig, ax = plt.subplots(figsize=(8, 4))
ax.plot(X, response, color='k', label='Regression model')
ax.scatter(X, y, edgecolor='k', facecolor='grey', alpha=0.7, label='Sample data')
ax.set_ylabel('Gas production (Mcf/day)', fontsize=14)
ax.set_xlabel('Porosity (%)', fontsize=14)
ax.text(0.8, 0.1, 'aegis4048.github.io', fontsize=13, ha='center', va='center',
transform=ax.transAxes, color='grey', alpha=0.5)
ax.legend(facecolor='white', fontsize=11)
ax.set_title('$R^2= %.2f$' % r2, fontsize=18)
fig.tight_layout()
# +
features = ['Por']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
# -
print(X.shape)
print(y.shape)
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
model.coef_
model.intercept_
model.score(X, y)
x_pred = np.array([15])
x_pred = x_pred.reshape(-1, len(features))
model.predict(x_pred)
x_pred = np.array([14, 18])
x_pred = x_pred.reshape(-1, len(features))
model.predict(x_pred)
# +
x_pred = np.linspace(0, 40, 200) # 200 data points between 0 ~ 40
x_pred = x_pred.reshape(-1, len(features)) # preprocessing required by scikit-learn functions
y_pred = model.predict(x_pred)
# +
plt.style.use('default')
plt.style.use('ggplot')
fig, ax = plt.subplots(figsize=(7, 3.5))
ax.plot(x_pred, y_pred, color='k', label='Regression model')
ax.scatter(X, y, edgecolor='k', facecolor='grey', alpha=0.7, label='Sample data')
ax.set_ylabel('Gas production (Mcf/day)', fontsize=14)
ax.set_xlabel('Porosity (%)', fontsize=14)
ax.legend(facecolor='white', fontsize=11)
ax.text(0.55, 0.15, '$y = %.2f x_1 - %.2f $' % (model.coef_[0], abs(model.intercept_)), fontsize=17, transform=ax.transAxes)
fig.tight_layout()
# +
features = ['Por', 'Brittle', 'Perm', 'TOC']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
# -
model.coef_
model.intercept_
model.score(X, y)
x_pred = np.array([12, 81, 2.31, 2.8])
x_pred = x_pred.reshape(-1, len(features))
model.predict(x_pred)
x_pred = np.array([[12, 81, 2.31, 2.8], [15, 60, 2.5, 1]])
x_pred = x_pred.reshape(-1, len(features))
model.predict(x_pred)
# +
features = ['Por', 'Brittle', 'Perm', 'TOC', 'AI', 'VR']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle', 'Perm', 'TOC', 'VR']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle', 'Perm', 'TOC', 'AI']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle', 'Perm', 'TOC']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle', 'Perm', 'AI']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle', 'Perm', 'VR']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle', 'TOC', 'VR']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle', 'TOC']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle', 'VR']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle', 'AI']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
print('')
########################################################################################
features = ['Por', 'Brittle']
target = 'Prod'
X = df[features].values.reshape(-1, len(features))
y = df[target].values
ols = linear_model.LinearRegression()
model = ols.fit(X, y)
print('Features : %s' % features)
print('Regression Coefficients : ', [round(item, 2) for item in model.coef_])
print('R-squared : %.2f' % model.score(X, y))
print('Y-intercept : %.2f' % model.intercept_)
# +
df = df.iloc[:, 1:-1]
corr = df.corr(method='spearman')
# Generate a mask for the upper triangle
mask = np.zeros_like(corr, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set up the matplotlib figure
fig, ax = plt.subplots(figsize=(6, 5))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True, sep=100)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(corr, mask=mask, cmap=cmap, vmin=-1, vmax=1, center=0, linewidths=.5)
fig.suptitle('Correlation matrix of features', fontsize=15)
ax.text(0.77, 0.2, 'aegis4048.github.io', fontsize=13, ha='center', va='center',
transform=ax.transAxes, color='grey', alpha=0.5)
fig.tight_layout()
# -
| lectures/linear-regressions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/willianszwy/Forest-Cover-Type/blob/main/PP2_2_1_Conhecendo_o_Conjunto_de_Dados.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="pFbOuY33cexF"
# ## Redes Neurais Artificiais 2021.1
#
# - **Disciplina**: Redes Neurais Artificiais 2021.1
# - **Professora**: <NAME> (<EMAIL>)
# - **Github**: http://github.com/elloa
#
# ### Explorando uma base de dados
#
# Nesta atividade prática iremos explorar a seguinte base de dados **_Forest Cover Type_**
#
# 1. Obtenha esta base de dados no seguinte link: https://www.kaggle.com/uciml/forest-cover-type-dataset/download
# 2. Consulte a documentação oficial da base de dados: https://archive.ics.uci.edu/ml/datasets/covertype
# 3. **Responda**:
#
# 3.1 O que é cada exemplo na base de dados?
# 3.2 Em que ano ela foi obtida?
# 3.3 Quem foram os responsáveis pela criação desta base de dados?
# + id="DIZUa8u4cexH"
## Reservado para a importação de bibliotecas
import pandas as pd
# + [markdown] id="KABVgstMcexI"
# **3.1 - Resposta:** Cada exemplo corresponde a uma área de 30m x 30m localizadas nas quatro áreas selvagens da Floresta Nacional Roosevelt, no norte do Colorado, Estados Unidos.
# + [markdown] id="Cy_pa6ZscexJ"
# **3.2 - Resposta:** Segundo a documentação encontrada no site https://archive.ics.uci.edu/ml/datasets/covertype, a data de publicação da base de dados é 01/08/1998.
# + [markdown] id="czD6Ht5XcexI"
# **3.3 - Resposta:** Os responsáveis pela base de dados são: <NAME> (Estação de pesquisa da Rocky Montain)e os professores Dr. <NAME> e Dr, <NAME>, ambos da Universidade do Colorado.
# + [markdown] id="50o5xb-pcexJ"
# ### Manipulando a base de dados
#
# 1. Abra a base de dados com o pandas
# 2. Imprima os 10 primeiros exemplos pertencentes à esta base
# 3. Imprima os 10 últimos exemplos pertencentes à esta base
# 4. Quantos exemplos esta base de dados possui?
# 5. Quais são os atributos que a base de dados possui e quais seus tipos?
# 6. Há algum dado faltante na base?
# 7. De acordo com a documentação da base de dados, qual o significado dos atributos do tipo "Soil Type"?
# 7.1 Este atributo é quantitativo ou qualitativo?
# 7.2 Qual o tipo de codificação utilizada para denotar este atributo? Consulte a seguinte referência:
# * https://pbpython.com/categorical-encoding.html
# + [markdown] id="3W-xIbT5W4r_"
# **1 - Resposta:**
# + colab={"base_uri": "https://localhost:8080/"} id="MNGIFOrEcexJ" outputId="b04c2c4e-9bdc-4401-b9fa-47269b2dc934"
# !wget https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz
# + id="IsO0YGLpcexJ" colab={"base_uri": "https://localhost:8080/"} outputId="e165c27f-c17f-4735-911f-9445c27877e8"
# !gunzip covtype.data.gz
# + id="h9sUvOa9cexK"
columns = ['Elevation', 'Aspect', 'Slope', 'Horizontal_Distance_To_Hydrology', 'Vertical_Distance_To_Hydrology', 'Horizontal_Distance_To_Roadways', 'Hillshade_9am', 'Hillshade_Noon', 'Hillshade_3pm', 'Horizontal_Distance_To_Fire_Points', 'Wilderness_Area1', 'Wilderness_Area2', 'Wilderness_Area3', 'Wilderness_Area4', 'Soil_Type1', 'Soil_Type2', 'Soil_Type3', 'Soil_Type4', 'Soil_Type5', 'Soil_Type6', 'Soil_Type7', 'Soil_Type8', 'Soil_Type9', 'Soil_Type10', 'Soil_Type11', 'Soil_Type12', 'Soil_Type13', 'Soil_Type14', 'Soil_Type15', 'Soil_Type16', 'Soil_Type17', 'Soil_Type18', 'Soil_Type19', 'Soil_Type20', 'Soil_Type21', 'Soil_Type22', 'Soil_Type23', 'Soil_Type24', 'Soil_Type25', 'Soil_Type26', 'Soil_Type27', 'Soil_Type28', 'Soil_Type29', 'Soil_Type30', 'Soil_Type31', 'Soil_Type32', 'Soil_Type33', 'Soil_Type34', 'Soil_Type35', 'Soil_Type36', 'Soil_Type37', 'Soil_Type38', 'Soil_Type39', 'Soil_Type40', 'Cover_Type']
df_forest_cover = pd.read_csv("covtype.data", names = columns)
# + [markdown] id="ySpKZhPMUD1b"
# **2 - Resposta:** 10 primeiros exemplos
# + colab={"base_uri": "https://localhost:8080/", "height": 379} id="4UaGQez2cexK" outputId="2687e57a-08f8-4bd0-cd0b-fc98f2928ca0"
df_forest_cover.head(10)
# + [markdown] id="r43DM-6WUUn4"
# **3 - Resposta:** 10 últimos exemplos
# + colab={"base_uri": "https://localhost:8080/", "height": 379} id="1c-9vcEiUUON" outputId="54d5e8aa-22cc-4aae-eba9-11d3941bc0cc"
df_forest_cover.tail(10)
# + [markdown] id="9gcKgjiEUz_D"
# **4 - Resposta:**
# + colab={"base_uri": "https://localhost:8080/"} id="6STzzqX-UnZx" outputId="33ef60d1-3a7c-4910-c760-846eebd5caa5"
print("A base de dados possui {0} exemplos".format(len(df_forest_cover)))
# + [markdown] id="w8LdCdscVFmC"
# **5 - Resposta:**
# + colab={"base_uri": "https://localhost:8080/"} id="OXylQOV7VE3K" outputId="c8720908-0a8a-4f82-9a7a-97998ba76e82"
df_forest_cover.shape
# + [markdown] id="ge5eQfHgVElN"
# Cada exemplo possui 55 atributos, sendo eles informardos na célula a seguir:
# + colab={"base_uri": "https://localhost:8080/"} id="ZP5C5NN-WNFr" outputId="98ffa3ba-e703-4f58-e9b5-067561e3246c"
df_forest_cover.columns
# + [markdown] id="-cg9eYOaWSOt"
# Tipos de cada atributo:
# + colab={"base_uri": "https://localhost:8080/"} id="dSFp-rVjWR4a" outputId="211be430-1588-479a-fb59-dcb0d3322da5"
df_forest_cover.dtypes
# + [markdown] id="5gbk8JiAWw0L"
# **6 - Resposta:**
# + id="uXBGW9JkYGSO" colab={"base_uri": "https://localhost:8080/", "height": 439} outputId="5181338d-f735-4d50-9a30-4c15a15b7dfd"
df_forest_cover.isna()
# + id="0L4AEsbpZDjj" colab={"base_uri": "https://localhost:8080/", "height": 439} outputId="2a86708f-c562-4488-e009-df0114bd54ca"
df_forest_cover.isnull()
# + [markdown] id="S_rBqsLXZLKD"
# Como podemos ver, as funções ```isna``` e ```isnull``` não identificaram nenhuma irregularidade com os dados. Conclui-se então que não há dados faltantes.
#
#
# + [markdown] id="ZcIyFCBXr5q9"
# **7 - Resposta:** ``Soil_Type`` é uma classe que indica a designação do solo, dividida em 40 colunas binárias na base de dados sendo que cada coluna é um valor binário indicando qual valor da classe o exemplo faz parte: **0** é ausência e **1** é presença do tipo de solo.
#
# O atributo é um valor qualitativo para a base de dados.
#
# Como esse atributo foi convertido de uma classe com ***n*** elementos para valores binários entre 0 e 1 ao longo de ***n*** novas colunas, o método de codificação utilizado foi *One Hot Encoding*.
# + [markdown] id="bb94loPDcexK"
# ### Visualizando a base de dados
#
# 1. Baseando-se nos fundamentos de visualização de dados abordados na disciplina, plote cada um dos atributos preditores de maneira a enfatizar a sua distribuição, tendência central e dispersão
# 1.1. Considere que o número de colunas no dataset é diferente do número de atributos, conforme discussão promovida a respeito do dataset
# 1.2. Se preferir, opte por complementar as informações visuais com medidas estatísticas
# 2. A partir da visualização elaborada, o que pode-se dizer a respeito do balanceamento do atributo-alvo?
# 3. Que tipo de tarefa de Aprendizado de Máquina é sugestiva para este problema?
# 3.1. Apresente métricas de desempenho compatíveis para a avaliação do problema (liste-as)
# 3.2. Escolha uma das métricas apresentadas para ser utilizada como referência pela equipe
# + [markdown] id="UOvApJtKdT--"
# **1 - Resposta:** Plote dos gráficos
# + id="aQ1Aw0RscexL" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="b4296934-de5c-471e-ee0d-7d042ecb4e26"
df_forest_cover.hist('Elevation', bins=20)
df_forest_cover.hist('Aspect', bins=20)
df_forest_cover.hist('Slope', bins=20)
df_forest_cover.hist('Horizontal_Distance_To_Hydrology', bins=20)
df_forest_cover.hist('Vertical_Distance_To_Hydrology', bins=20)
df_forest_cover.hist('Horizontal_Distance_To_Roadways', bins=20)
df_forest_cover.hist('Hillshade_9am', bins=20)
df_forest_cover.hist('Hillshade_Noon', bins=20)
df_forest_cover.hist('Hillshade_3pm', bins=20)
df_forest_cover.hist('Horizontal_Distance_To_Fire_Points', bins=20)
# + id="eWHso8GhcexL" colab={"base_uri": "https://localhost:8080/", "height": 315} outputId="15596db9-5530-4afe-b66d-54acd86c0261"
df_forest_cover.hist('Cover_Type')
# + [markdown] id="IP5wpbPAbLh-"
# **2 - Resposta:** Pode-se afirmar que um número próximo de 500 mil amostras dos dados é classificada como possuindo cobertura do tipo 1 ou 2, ou seja, a grande maioria dos exemplos presentes na base de dados. O tipo 3 corresponde a menor de 50 mil amostras. Os tipos 6 e 7, próximos de 25 mil. Os tipos 4 e 5 são os menos expressivos em termos quantitativos.
# + [markdown] id="I5cpiGvQl5Yf"
# **3 - Resposta:** Como o atributo preditor é um valor discreto (inteiro), a tarefa ideal para o problema é a classificação.
#
# Quanto às métricas de desempenho para tarefa de classificação, abaixo algumas compatíveis com a tarefa de classificação:
# * Acurácia;
# * F1 Score;
# * Recall.
#
# A equipe optou por usar *F1 Score* como métrica de desempenho.
| PP2_2_1_Conhecendo_o_Conjunto_de_Dados.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:farallon-fall-2020]
# language: python
# name: conda-env-farallon-fall-2020-py
# ---
# ## Import your modules or packages
# +
import warnings
warnings.simplefilter('ignore') # filter some warning messages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import xarray as xr
# -
# # `pandas` and csv files
# ## First read your data into a dataframe, and take a look
# read data into a dataframe
df_T = pd.read_csv('./../data/AllT_updated.csv', index_col=0)
# display the first lines, the last lines, and all
df_T.head()
# quick plot of one column
df_T['Temperature'].plot()
# selection of a particular year and a column, and then plot
df_T[df_T['YEAR']==2014]['Temperature'].plot()
# ----------
# # `xarray` and Satellite data
# # One day of Data
# ## Get some data and check what's in it
# +
# assign an url of where the data is in NETCDF format
url = 'https://podaac-opendap.jpl.nasa.gov/opendap/allData/ghrsst/data/GDS2/L4/GLOB/CMC/CMC0.2deg/v2/2011/305/20111101120000-CMC-L4_GHRSST-SSTfnd-CMC0.2deg-GLOB-v02.0-fv02.0.nc'
# open the url with xarray!
ds_sst = xr.open_dataset(url)
# an object or variable in the last line display what is in it
ds_sst
# -
# Display values (or data) from a variable in the Dataset
ds_sst.analysed_sst.data
# Display attributes of a variable in the Dataset
ds_sst.analysed_sst.attrs
# Easy plot of a variable in a Dataset
ds_sst.analysed_sst.plot()
# and now the mask
ds_sst.mask.plot()
# selecting the ocean mask
mask_ocean = ds_sst.mask.where(ds_sst.mask==1)
mask_ocean.plot()
# get sea surface temperature with mask, in Celsius
sst_global = ds_sst.analysed_sst*mask_ocean
sst_global -= 273.15
sst_global.plot()
sst_global
# Lets focus in our neigborhood
sst_california = sst_global.sel(lat=slice(30,45),lon=slice(-130,-115))
sst_california.plot()
# take the average of a box
sst_cal_pt = sst_global.sel(lat=slice(38,42),lon=slice(-126,-123)).mean()
sst_cal_pt.data
# ----------
# ## [MUR SST](https://podaac.jpl.nasa.gov/Multi-scale_Ultra-high_Resolution_MUR-SST) [AWS Public dataset program](https://registry.opendata.aws/mur/)
#
# ### Access the MUR SST Zarr store which is in an s3 bucket.
#
# 
#
# We will start with my favorite Analysis Ready Data (ARD) format: [Zarr](https://zarr.readthedocs.io/en/stable/). Using data stored in Zarr is fast, simple, and contains all the metadata normally in a netcdf file, so you can figure out easily what is in the datastore.
#
# - Fast - Zarr is fast because all the metadata is consolidated into a .json file. Reading in massive datasets is lightning fast because it only reads the metadata and does read in data until it needs it for compute.ac
#
# - Simple - Filenames? Who needs them? Who cares? Not I. Simply point your read routine to the data directory.
#
# - Metadata - all you want!
# +
# filter some warning messages
import warnings
warnings.filterwarnings("ignore")
#libraries
import datetime as dt
import xarray as xr
import fsspec
import s3fs
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
# make datasets display nicely
xr.set_options(display_style="html")
#magic fncts #put static images of your plot embedded in the notebook
# %matplotlib inline
plt.rcParams['figure.figsize'] = 12, 6
# %config InlineBackend.figure_format = 'retina'
# +
# %%time
file_location = 's3://mur-sst/zarr'
ikey = fsspec.get_mapper(file_location, anon=True)
ds_sst = xr.open_zarr(ikey,consolidated=True)
ds_sst
# -
# ### Read entire 10 years of data at 1 point.
#
# Select the ``analysed_sst`` variable over a specific time period, `lat`, and `lon` and load the data into memory. This is small enough to load into memory which will make calculating climatologies easier in the next step.
# +
# %%time
sst_timeseries = ds_sst['analysed_sst'].sel(time = slice('2010-01-01','2020-01-01'),
lat = 47,
lon = -145
).load()
sst_timeseries.plot()
# -
# ### The anomaly is more interesting...
#
# Use [.groupby](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.groupby.html#xarray-dataarray-groupby) method to calculate the climatology and [.resample](http://xarray.pydata.org/en/stable/generated/xarray.Dataset.resample.html#xarray-dataset-resample) method to then average it into 1-month bins.
# - [DataArray.mean](http://xarray.pydata.org/en/stable/generated/xarray.DataArray.mean.html#xarray-dataarray-mean) arguments are important! Xarray uses metadata to plot, so keep_attrs is a nice feature. Also, for SST there are regions with changing sea ice. Setting skipna = False removes these regions.
# +
# %%time
sst_climatology = sst_timeseries.groupby('time.dayofyear').mean('time',keep_attrs=True,skipna=False)
sst_anomaly = sst_timeseries.groupby('time.dayofyear')-sst_climatology
sst_anomaly_monthly = sst_anomaly.resample(time='1MS').mean(keep_attrs=True,skipna=False)
#plot the data
sst_anomaly.plot()
sst_anomaly_monthly.plot()
plt.axhline(linewidth=2,color='k')
plt.savefig('./figures/sst_anomaly.png')
# -
| notebooks/CalAcademy/Data_CalAcademy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn import tree
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
#header = 0 removes the first row which is total words, number of words, etc
train = pd.read_csv(r"C:\Users\beebe\OneDrive\Documents\DataScience Practice\train.csv", sep= ',', header=0)
train.head()
print("Dataset Length:", len(train))
print("Data Shape:",train.shape)
#seperating target variable
# 0:13 means starting from number_words_female to Age co_lead
# not adding Lead because it is categorical (1-14, when counting like this)
x=train.values[:, 0:13]
# here we take only the 14th row(1-14, lead) becase it is the y
y=train.values[:,13]
#Spliting data
X_train,X_test, y_train, y_test = train_test_split(x, y,test_size = 0.3,random_state= 100)
#function to perform training with entrop. fiiting the model
#max_depth=3 only doing 3 layers before it stops, min_samples_leaf=5, atleast 5 splits at the end
clf_entropy= DecisionTreeClassifier(criterion = "entropy", random_state=100,max_depth=3,min_samples_leaf=5)
clf_entropy.fit(X_train, y_train)
#making predictions
y_pred= clf_entropy.predict(X_test)
y_pred
#accuracy
print("Accuracy is", accuracy_score(y_test, y_pred)*100)
| Word_spoken_in_movie.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Direct Optimization + A* sampling for TSP
import os
import numpy as np
import torch
import matplotlib
import matplotlib.pyplot as plt
import string
# %matplotlib inline
# +
from utils import load_model
import dirpg
from a_star_sampling import Node, Trajectory
# +
class opts:
def __init__(self,
max_interactions=200,
alpha=1.0,
epsilon=2.0,
heuristic='mst',
independent_gumbel=False,
first_improvement=False,
dynamic_weighting = False,
dfs_like=False,
not_prune=False):
self.max_interactions = max_interactions
self.first_improvement = first_improvement
self.dynamic_weighting = dynamic_weighting
self.independent_gumbel = independent_gumbel
self.heuristic = heuristic
self.dfs_like = dfs_like
self.not_prune = not_prune
self.alpha=alpha
self.epsilon = epsilon
dirpg_opts = opts()
num_cities = 8
model, _ = load_model('outputs/tsp_{}/jupyter_example/DirPG_20200506T134440/'.format(num_cities), epoch = 0) # 'pretrained/tsp_100/')
#model, _ = load_model('outputs/tsp_{}/visuals/DirPG_20200421T162602/'.format(num_cities), epoch = 1)
#model, _ = load_model('outputs/tsp_{}/2epochs_ours/DirPG_20200506T004445/'.format(num_cities), epoch = 0)
model.eval() # Put in evaluation mode to not track gradients
dirpg = dirpg.DirPG(model, dirpg_opts)
# +
import heapq
from utils import utils_gumbel
import networkx as nx
import time
class PriorityQueue:
def __init__(self,
init_state,
distance_mat,
epsilon,
search_params,
inference=False
):
self.queue = []
self.G = nx.Graph()
Node.epsilon = epsilon
init_state = init_state._replace(first_a=init_state.first_a.squeeze(0),
prev_a=init_state.prev_a.squeeze(0),
visited_=init_state.visited_.squeeze(0),
lengths=init_state.lengths.squeeze(0),
cur_coord=init_state.cur_coord.squeeze(0),
ids=init_state.ids.squeeze(0),
i=init_state.i.squeeze(0))
special_action = init_state.prev_a.item()
not_visited = [i for i in range(init_state.loc.size(1)) if i != special_action]
self.first_coord = init_state.loc[init_state.ids, special_action]
self.graph_size = distance_mat.shape[1]
# global nodes parameters #
Node.alpha = search_params['alpha']
Node.epsilon = epsilon
Node.dynamic_weighting = search_params['dynamic_weighting']
Node.heuristic = search_params['heuristic']
Node.graph_size = self.graph_size
Node.dist = distance_mat
self.mst_edges = mst.prim_pytorch(distance_mat).tolist()
self.mst_val = np.sum(self.mst_edges)
self.root_node = Node(id=init_state.ids,
first_a=init_state.first_a.item(),
next_actions=not_visited, # torch.tensor(not_visited), # number of cities
not_visited=not_visited,
prefix=[special_action],
lengths=0.0,
cur_coord=self.first_coord,
max_gumbel=utils_gumbel.sample_gumbel(0),
t_opt=True)
self.G.add_node(self.root_node)
heapq.heappush(self.queue, self.root_node)
if search_params['independent_gumbel']:
direct_node = copy.copy(self.root_node)
direct_node.t_opt = False
heapq.heappush(self.queue, direct_node)
self.current_node = self.root_node
self.id = init_state.ids.item()
self.trajectories_list = []
self.t_opt = None
self.t_direct = None
self.prune_count = 0
self.orig_dist = distance_mat
self.start_search_direct = False
self.start_time = float('Inf')
# self.max_search_time = max_search_time
self.num_interactions = 0
self.first_improvement = search_params['first_improvement']
self.max_interactions = search_params['max_interactions']
self.dfs_like = search_params['dfs_like']
self.p = search_params['prune']
self.dynamic_weighting = search_params['dynamic_weighting']
self.inference = inference
self.prune = False
self.lower_bound = -float('Inf')
####### plotting #######
#priority-queue:
self.labels = {self.root_node : 'root'}
self.nodes_opt = []
self.other_nodes = []
self.ids = 1
self.direct_node = None
#prefix:
def pop(self):
if not self.queue:
print('the queue is empty')
return 'break'
parent = heapq.heappop(self.queue)
self.current_node = parent
if self.num_interactions >= self.max_interactions:
print('interactions budget is over')
return 'break'
if self.prune and self.lower_bound > parent.upper_bound:
self.prune_count += 1
return self.pop()
# Start the search time count
if not parent.t_opt and not self.start_search_direct:
self.start_time = time.time()
self.start_search_direct = True
if parent.done:
return self.set_trajectory(parent)
return parent
def set_trajectory(self, node):
t = Trajectory(actions=node.prefix,
gumbel=node.max_gumbel,
length=node.lengths - (self.first_coord - node.cur_coord).norm(p=2, dim=-1),
objective=node.objective)
self.trajectories_list.append(t)
if node.t_opt:
self.t_opt = t
self.t_direct = t
self.direct_node = node
self.lower_bound = t.objective
if self.inference:
return 'break'
else:
if t.objective > self.t_direct.objective:
# if len(self.trajectories_list) > 2:
# print('here: ', len(self.trajectories_list))
self.t_direct = t
self.lower_bound = t.objective
self.direct_node = node
if self.first_improvement:
#print('***** priority(direct) > priority(opt) *****')
print('first improvement')
return 'break'
if self.queue:
return self.pop()
else:
# print('break')
print('5')
return 'break'
def expand(self, state, logprobs):
self.num_interactions += 1
special_action = state.prev_a.item()
s = time.time()
not_visited = [i for i in self.current_node.not_visited if i != special_action]
cur_coord = state.loc[self.current_node.id, special_action]
length = -(cur_coord - self.current_node.cur_coord).norm(p=2, dim=-1)
#updated_prefix = self.current_node.prefix + [special_action]
#dist = np.delete(np.delete(self.orig_dist, self.current_node.prefix[1:], 0), self.current_node.prefix[1:], 1)
#print('******** orig ******')
#print(self.orig_dist)
#print('******** mod ******')
#print(dist)
special_child = Node(
id=self.current_node.id,
first_a=self.current_node.first_a,
not_visited=not_visited,
prefix=self.current_node.prefix + [special_action],
lengths=self.current_node.lengths + length,
cur_coord=cur_coord,
done=len(not_visited) == 0,
logprob_so_far=self.current_node.logprob_so_far + logprobs[special_action],
max_gumbel=self.current_node.max_gumbel,
next_actions=not_visited,
bound_togo=self.current_node.bound_togo + sum(self.mst_edges[special_action]),
depth=self.current_node.depth + 1,
t_opt=self.current_node.t_opt,
dfs_like=self.dfs_like)
if special_child.t_opt:
self.nodes_opt.append(special_child)
else:
self.other_nodes.append(special_child)
self.G.add_edge(self.current_node, special_child)
self.labels[special_child] = str(self.ids)
self.ids+=1
if self.prune and special_child.upper_bound < self.lower_bound:
self.prune_count += 1
else:
heapq.heappush(self.queue, special_child)
# Sample the max gumbel for the non-chosen actions and create an "other
# children" node if there are any alternatives left.
m = time.time()
other_actions = [i for i in self.current_node.next_actions if i != special_action]
assert len(other_actions) == len(self.current_node.next_actions) - 1
other_children = None
if other_actions and not self.inference:
other_max_location = utils_gumbel.logsumexp(logprobs[other_actions])
other_max_gumbel = utils_gumbel.sample_truncated_gumbel(self.current_node.logprob_so_far + other_max_location,
self.current_node.max_gumbel).item()
other_children = Node(
id=self.current_node.id,
first_a=self.current_node.first_a,
not_visited=self.current_node.not_visited,
prefix=self.current_node.prefix,
lengths=self.current_node.lengths,
cur_coord=self.current_node.cur_coord,
done=self.current_node.done,
logprob_so_far=self.current_node.logprob_so_far,
max_gumbel=other_max_gumbel,
next_actions=other_actions,
bound_togo=self.current_node.bound_togo,
depth=self.current_node.depth + 1,
t_opt=False,
dfs_like=False)
self.other_nodes.append(other_children)
self.G.add_edge(self.current_node, other_children)
self.labels[other_children] = str(self.ids)
self.ids+=1
if self.prune and other_children.upper_bound < self.lower_bound:
self.prune_count += 1
else:
heapq.heappush(self.queue, other_children)
f = time.time()
sp = m - s
oth = f - m
return special_child, other_children
# +
def encode(x, dirpg):
embeddings = dirpg.encoder(x, only_encoder=True)
state = dirpg.encoder.problem.make_state(x)
fixed = dirpg.encoder.precompute(embeddings)
return state, fixed
x = torch.rand(1, 20, 2)
def init_queue(x, dirpg, epsilon=1.0, alpha=1.0, start_from = 0):
dirpg.search_params['alpha'] = alpha
state, fixed = encode(x, dirpg)
_, state = dirpg.forward_and_update(state, fixed, first_action=start_from)
return PriorityQueue(init_state=state[torch.tensor(0)],
distance_mat=state.dist[0],
epsilon = epsilon,
inference=False,
search_params=dirpg.search_params), state, fixed
# +
def sample(queue, fixed, state):
while queue:
parent = queue.pop()
if parent == 'break':
return queue
batch_state = state.stack_state([parent])
log_p, state = dirpg.forward_and_update(batch_state, fixed)
queue.expand(state[torch.tensor(0)], log_p[0])
queue,state, fixed = init_queue(x, dirpg)
queue = sample(queue, fixed, state)
print(queue.num_interactions)
# -
# ## Interactive tree plot
plt.rcParams['figure.figsize'] = [16, 6]
np.random.seed(3)
torch.manual_seed(3)
x = torch.rand(1, num_cities, 2)
#x = torch.load('good_example_8graph')
abc = string.ascii_lowercase[:x.size(1)]
direct, first_direct = None, True
queue,state, fixed = init_queue(x, dirpg, epsilon=2.0, alpha=1.0)
# ### Press Ctrl+Entr to expand the queue
#
# #### Left: priority queue, Right: prefix of the current node (yellow node in the left fig)
# +
cities = nx.DiGraph()
cities.add_nodes_from(range(x.size(1)))
parent = queue.pop()
if parent == 'break':
print('END')
else:
batch_state = state.stack_state([parent])
log_p, state = dirpg.forward_and_update(batch_state, fixed)
sp, oth = queue.expand(state[torch.tensor(0)], log_p[0])
if queue.t_opt is not None:
print('t_opt: ')
print([abc[i] for i in queue.t_opt.actions])
if queue.t_direct is not None:
print('t_direct: ')
print([abc[i] for i in queue.t_direct.actions])
print('special child prefix: ')
print([abc[i] for i in sp.prefix])
print('depth: ', sp.depth)
plt.subplot(121)
pos = nx.kamada_kawai_layout(queue.G)
# nx.draw_networkx(queue.G,pos=pos, with_labels=False, font_weight='bold')
colors = []
nx.draw_networkx_nodes(queue.G, pos,
nodelist=[queue.root_node],
node_size = 1000,
node_color='g',
alpha=0.8)
opt_nodes = [i for i in queue.nodes_opt if i!=sp]
nx.draw_networkx_nodes(queue.G, pos,
nodelist=opt_nodes,
node_size = 500,
node_color='r',
alpha=0.5)
in_queue = [i for i in queue.other_nodes if i in queue.queue]
nx.draw_networkx_nodes(queue.G, pos,
nodelist=in_queue,
node_size = 500,
node_color='y',
alpha=0.8)
out_of_queue = [i for i in queue.other_nodes if i not in queue.queue]
nx.draw_networkx_nodes(queue.G, pos,
nodelist=out_of_queue,
node_size = 500,
node_color=[(0.2,0.2,0.2) for _ in range(len(out_of_queue))],
alpha=0.6)
nx.draw_networkx_nodes(queue.G, pos,
nodelist=[sp],
node_size = 500,
node_color=[(0.0,1.0,0.0)],
alpha=0.8)
"""
if first_direct and queue.t_direct != queue.t_opt:
first_direct = False
direct = queue.t_direct
"""
if queue.direct_node is not None:
"""
if direct != queue.t_direct:
direct = queue.t_direct
"""
nx.draw_networkx_nodes(queue.G, pos,
nodelist=[queue.direct_node],
node_shape='^',
node_size = 800,
node_color=[(0.0,1.0,0.0)],
alpha=0.8)
nx.draw_networkx_edges(queue.G, pos, width=1.0, alpha=0.5)
nx.draw_networkx_edges(queue.G, pos,
edgelist=[(parent, sp)],
width=8, alpha=0.5, edge_color='r')
nx.draw_networkx_labels(queue.G, pos, labels= queue.labels, font_size=16)
#####################
plt.subplot(122)
pos2 = {i:loc.numpy() for i,loc in enumerate(x[0])}
edgelist = [(sp.prefix[i],sp.prefix[i+1]) for i in range(len(sp.prefix)) if i<len(sp.prefix)-1]
nx.draw_networkx_nodes(cities, pos2,
node_size = 1000,
node_color='lightgrey',
alpha=1.0)
nx.draw_networkx_nodes(cities, pos2,
nodelist=[sp.prefix[0]],
node_size = 1000,
node_color='g',
alpha=0.8)
nx.draw_networkx_edges(cities, pos2,
edgelist=edgelist,
width=3, alpha=0.5, min_target_margin=15)
if queue.t_opt is not None:
a = queue.t_opt.actions
edgelist = [(a[i],a[i+1]) for i in range(len(a)) if i<len(a)-1]
nx.draw_networkx_edges(cities, pos2,
edgelist=edgelist,
width=8, alpha=0.3, edge_color='r',min_target_margin=15)
if queue.t_direct != queue.t_opt:
a = queue.t_direct.actions
edgelist = [(a[i],a[i+1]) for i in range(len(a)) if i<len(a)-1]
nx.draw_networkx_edges(cities, pos2,
edgelist=edgelist,
width=8, alpha=0.3, edge_color='g',min_target_margin=15)
l = {i:abc[i] for i in range(x.size(1))}
nx.draw_networkx_labels(cities, pos2, labels=l, font_size=14)
last_parent = parent
#nx.draw_networkx(cities,pos, edgelist=edgelist, node_size= 500, node_color='lightgrey' )
# -
# ## Node size:
# ##### max Gumbel
# ## Node color:
# ##### epsilon(length + 2MST)
# +
def make_circule(n):
G = nx.Graph()
G.add_nodes_from(range(n))
pos = nx.circular_layout(G, scale=0.5, center=(0.5,0.5))
return torch.tensor([np.stack(list(pos.values()))], dtype = torch.float32)
# +
def min_max_norm(x, a, b):
min_x = np.min(x)
max_x = np.max(x)
return a + (((x - min_x)*(b-a))/(max_x - min_x))
def norm(x):
return (x-np.mean(x))/np.std(x)
# -
np.random.seed(4)
torch.manual_seed(4)
x = torch.rand(1, num_cities, 2)
# x = make_circule(num_cities)
#x = torch.load('good_example_8graph')
queue,state, fixed = init_queue(x, dirpg, epsilon=10.0, alpha=2.0)
update = False
direct, first_direct = None, True
parent = queue.pop()
# +
Node.budget = dirpg_opts.max_interactions
update = not update
cities = nx.DiGraph()
cities.add_nodes_from(range(x.size(1)))
if parent == 'break':
print('END')
else:
if update:
batch_state = state.stack_state([parent])
log_p, state = dirpg.forward_and_update(batch_state, fixed)
sp, oth = queue.expand(state[torch.tensor(0)], log_p[0])
if queue.t_opt is not None:
print('t_opt: ')
print([abc[i] for i in queue.t_opt.actions])
if queue.t_direct is not None:
print('t_direct: ')
print([abc[i] for i in queue.t_direct.actions])
print('special child prefix: ')
print([abc[i] for i in sp.prefix])
print('prune count: ', queue.prune_count)
print('lower bound: ',queue.lower_bound )
print('scecial child: ')
sp.print()
print('other children: ')
oth.print() if oth is not None else None
#print('scecial upper bound: ',sp.upper_bound)
#print('others upper bound: ',oth.upper_bound) if oth is not None else None
ax = plt.subplot(121)
pos = nx.kamada_kawai_layout(queue.G)
# nx.draw_networkx(queue.G,pos=pos, with_labels=False, font_weight='bold')
nx.draw_networkx_nodes(queue.G, pos,
nodelist=[queue.root_node],
node_size = 1000,
node_color='g',
alpha=0.8)
nx.draw_networkx_edges(queue.G, pos, width=1.0, alpha=0.5)
nx.draw_networkx_edges(queue.G, pos,
edgelist=[(parent, sp)],
width=8, alpha=0.5, edge_color=(0.0,1.0,0.0))
"""
print('max_gumbel + eps*(- length - 2MST) = ')
for i,j in zip([n.max_gumbel for n in queue.queue],
[Node.epsilon*n.get_upper_bound(2.0).item() for n in queue.queue]):
print(i, ' + ',j, ' = ', i+j )
"""
org_s = [n.max_gumbel for n in queue.queue]
s2 = [300+4000.0*np.exp(n.max_gumbel) for n in queue.queue]
s_mm = min_max_norm(org_s, a=np.min(org_s) ,b=np.max([5000,np.max(org_s)]))
s_n = 300+100*norm(org_s)
colors = [n.eps_reward.item() for n in queue.queue]
nx.draw(queue.G, pos,
nodelist=queue.queue,
node_size=s2,
node_color=colors,
cmap=plt.cm.YlOrRd,
alpha=0.8)
out_of_queue = [i for i in queue.G if i not in queue.queue if i != queue.root_node]
nx.draw_networkx_nodes(queue.G, pos,
nodelist=out_of_queue,
node_size = 500,
node_color=[(0.2,0.2,0.2) for _ in range(len(out_of_queue))],
alpha=0.6)
if not update:
parent = queue.pop()
ax.set_facecolor(color='none')
nx.draw_networkx_nodes(queue.G, pos,ax=ax,
nodelist=[parent],
node_size = 4000,
node_color='none',
linewidths=3.0,
node_shape = matplotlib.markers.MarkerStyle(marker='h', fillstyle='none'),
edgecolors='b')
if queue.direct_node is not None:
nx.draw_networkx_nodes(queue.G, pos,
nodelist=[queue.direct_node],
node_shape='*',
node_size = 1500,
node_color=[(0.0,1.0,0.0)],
alpha=0.8)
nx.draw_networkx_labels(queue.G, pos, labels= queue.labels, font_size=16)
#####################
plt.subplot(122)
pos2 = {i:loc.numpy() for i,loc in enumerate(x[0])}
edgelist = [(sp.prefix[i],sp.prefix[i+1]) for i in range(len(sp.prefix)) if i<len(sp.prefix)-1]
nx.draw_networkx_nodes(cities, pos2,
node_size = 1000,
node_color='lightgrey',
alpha=1.0)
nx.draw_networkx_nodes(cities, pos2,
nodelist=[sp.prefix[0]],
node_size = 1000,
node_color='g',
alpha=0.8)
nx.draw_networkx_edges(cities, pos2,
edgelist=edgelist,
width=3, alpha=0.5, min_target_margin=15)
if queue.t_opt is not None:
a = queue.t_opt.actions
edgelist = [(a[i],a[i+1]) for i in range(len(a)) if i<len(a)-1]
nx.draw_networkx_edges(cities, pos2,
edgelist=edgelist,
width=8, alpha=0.2, edge_color='r',min_target_margin=15)
if queue.t_direct != queue.t_opt:
a = queue.t_direct.actions
edgelist = [(a[i],a[i+1]) for i in range(len(a)) if i<len(a)-1]
nx.draw_networkx_edges(cities, pos2,
edgelist=edgelist,
width=8, alpha=0.2, edge_color='g',min_target_margin=15)
l = {i:abc[i] for i in range(x.size(1))}
nx.draw_networkx_labels(cities, pos2, labels=l, font_size=14)
# -
# ## DFS vs BFS
#
# ### DFS: Nodes that extends a prefix are colored orange
# ### BFS: Nodes that search for actions other than the last node are colored blue
# ### Nodes that explore different prefix are colored yellow
plt.rcParams['figure.figsize'] = [16, 6]
np.random.seed(4)
torch.manual_seed(4)
x = torch.rand(1, num_cities, 2)
# x = make_circule(num_cities)
#x = torch.load('good_example_8graph')
queue,state, fixed = init_queue(x, dirpg, epsilon=10.0, alpha=5.0)
update = False
direct, first_direct = None, True
dfs, bfs, others = [],[],[]
last_parent = None
# +
cities = nx.DiGraph()
cities.add_nodes_from(range(x.size(1)))
parent = queue.pop()
if parent == 'break':
print('END')
else:
batch_state = state.stack_state([parent])
log_p, state = dirpg.forward_and_update(batch_state, fixed)
sp, oth = queue.expand(state[torch.tensor(0)], log_p[0])
if last_parent is not None and parent not in queue.nodes_opt:
if parent.prefix == last_parent.prefix:
bfs.append(parent)
elif parent.prefix[:-1] == last_parent.prefix:
dfs.append(parent)
else:
others.append(parent)
if queue.t_opt is not None:
print('t_opt: ')
print([abc[i] for i in queue.t_opt.actions])
if queue.t_direct is not None:
print('t_direct: ')
print([abc[i] for i in queue.t_direct.actions])
print('special child prefix: ')
print([abc[i] for i in sp.prefix])
print('depth: ', sp.depth)
print('alpha: ', sp.alpha)
plt.subplot(121)
pos = nx.kamada_kawai_layout(queue.G)
# nx.draw_networkx(queue.G,pos=pos, with_labels=False, font_weight='bold')
nx.draw_networkx_nodes(queue.G, pos,
nodelist=[queue.root_node],
node_size = 1000,
node_color='g',
alpha=0.8)
opt_nodes = [i for i in queue.nodes_opt if i!=sp]
nx.draw_networkx_nodes(queue.G, pos,
nodelist=queue.nodes_opt,
node_size = 500,
node_color='r',
alpha=0.5)
others_nodes = [i for i in queue.other_nodes if i not in dfs+bfs+others]
nx.draw_networkx_nodes(queue.G, pos,
nodelist=others_nodes,
node_size = 500,
node_color=[(0.2,0.2,0.2) for _ in range(len(others_nodes))],
alpha=0.6)
if dfs:
nx.draw_networkx_nodes(queue.G, pos,
nodelist=dfs,
node_size = 500,
node_color='orange',
alpha=0.8)
if bfs:
nx.draw_networkx_nodes(queue.G, pos,
nodelist=bfs,
node_size = 500,
node_color='blue',
alpha=0.6)
if others:
nx.draw_networkx_nodes(queue.G, pos,
nodelist=others,
node_size = 500,
node_color='magenta',
alpha=0.6)
nx.draw_networkx_nodes(queue.G, pos,
nodelist=[sp],
node_size = 500,
node_color=[(0.0,1.0,0.0)],
alpha=0.8)
"""
if first_direct and queue.t_direct != queue.t_opt:
first_direct = False
direct = queue.t_direct
"""
if queue.direct_node is not None:
"""
if direct != queue.t_direct:
direct = queue.t_direct
"""
nx.draw_networkx_nodes(queue.G, pos,
nodelist=[queue.direct_node],
node_shape='^',
node_size = 800,
node_color=[(0.0,1.0,0.0)],
alpha=0.8)
nx.draw_networkx_edges(queue.G, pos, width=1.0, alpha=0.5)
nx.draw_networkx_edges(queue.G, pos,
edgelist=[(parent, sp)],
width=8, alpha=0.5, edge_color='r')
nx.draw_networkx_labels(queue.G, pos, labels= queue.labels, font_size=16)
#####################
plt.subplot(122)
pos2 = {i:loc.numpy() for i,loc in enumerate(x[0])}
edgelist = [(sp.prefix[i],sp.prefix[i+1]) for i in range(len(sp.prefix)) if i<len(sp.prefix)-1]
nx.draw_networkx_nodes(cities, pos2,
node_size = 1000,
node_color='lightgrey',
alpha=1.0)
nx.draw_networkx_nodes(cities, pos2,
nodelist=[sp.prefix[0]],
node_size = 1000,
node_color='g',
alpha=0.8)
nx.draw_networkx_edges(cities, pos2,
edgelist=edgelist,
width=3, alpha=0.5, min_target_margin=15)
if queue.t_opt is not None:
a = queue.t_opt.actions
edgelist = [(a[i],a[i+1]) for i in range(len(a)) if i<len(a)-1]
nx.draw_networkx_edges(cities, pos2,
edgelist=edgelist,
width=8, alpha=0.3, edge_color='r',min_target_margin=15)
if queue.t_direct != queue.t_opt:
a = queue.t_direct.actions
edgelist = [(a[i],a[i+1]) for i in range(len(a)) if i<len(a)-1]
nx.draw_networkx_edges(cities, pos2,
edgelist=edgelist,
width=8, alpha=0.3, edge_color='g',min_target_margin=15)
l = {i:abc[i] for i in range(x.size(1))}
nx.draw_networkx_labels(cities, pos2, labels=l, font_size=14)
last_parent = parent
#nx.draw_networkx(cities,pos, edgelist=edgelist, node_size= 500, node_color='lightgrey' )
# -
def generate_random_distance_matrix(n):
loc = torch.FloatTensor(n, 2).uniform_(0, 1)
return (loc[:, None, :] - loc[None, :, :]).norm(p=2, dim=-1)
a = generate_random_distance_matrix(6)
iu1 = np.triu_indices(6,k=1)
print(a)
print(a[iu1])
first_node = 2
prefix = [2,3]
c = a.copy()
c[:,prefix] = np.inf
c[prefix,:] = np.inf
print(c)
a
# +
def greedy_path(distance_matrix, prefix):
M = np.inf
cost = []
path = []
dest = prefix[0]
current = prefix[-1]
dm_copy = distance_matrix.copy()
np.fill_diagonal(dm_copy, M)
dm_copy[:,prefix[:-1]] = M
dm_copy[prefix[:-1],:] = M
while np.any(dm_copy != M):
greedy = np.argmin(dm_copy[current])
cost.append(dm_copy[current][greedy])
path.append(greedy)
dm_copy[:,current] = M
dm_copy[current, :] = M
current = greedy
cost.append(distance_matrix[path[-1], dest])
return np.sum(cost)
print(greedy_path(a, [2,3]))
def prim(distance_matrix, prefix):
# -
def mst(X, prefix=[]):
"""X are edge weights of fully connected graph"""
X = X.copy()
if X.shape[0] != X.shape[1]:
raise ValueError("X needs to be square matrix of edge weights")
#X = np.delete(np.delete(X, prefix[1:], 0), prefix[1:], 1)
n_vertices = X.shape[0]
spanning_edges = []
# initialize with node 0:
visited_vertices = [0]
num_visited = 1
# exclude self connections:
diag_indices = np.arange(n_vertices)
X[diag_indices, diag_indices] = np.inf
mst_val = 0
while num_visited != n_vertices:
print('***************')
print(X[visited_vertices])
new_edge = np.argmin(X[visited_vertices], axis=None)
# 2d encoding of new_edge from flat, get correct indices
mst_val += X[visited_vertices].reshape(-1)[new_edge]
print(new_edge)
new_edge = divmod(new_edge, n_vertices)
print(new_edge)
new_edge = [visited_vertices[new_edge[0]], new_edge[1]]
print(new_edge)
# add edge to tree
spanning_edges.append(new_edge)
visited_vertices.append(new_edge[1])
# remove all edges inside current tree
X[visited_vertices, new_edge[1]] = np.inf
X[new_edge[1], visited_vertices] = np.inf
num_visited += 1
return mst_val
# +
a = [[0,7,9,1.5, 6],[7,0,6,3,4],[9,6,0,2,7],[1.5,3,2,0,3.5],[6,4,7,3.5,0]]
a = torch.tensor(a, dtype=torch.float).numpy()
print(a)
mst(a, prefix = [2,1])
# +
import heapq
class Kruskals:
def __init__(self, distance_matrix):
self.dm = distance_matrix
n = distance_matrix.shape[0]
self.edges_heap = distance_matrix[np.triu_indices(n,k=1)].tolist()
heapq.heapify(self.edges_heap)
self.mst_vertices = []
self.
pass
m = Kruskals(generate_random_distance_matrix(6))
# +
class heap_wrapper:
def __init__(self, instance, i):
self.instance = instance
self.i = i
def __lt__(self, other):
return self.i < other.i
def sample(queue,state,fixed):
parent = queue.pop()
if parent == 'break':
return parent
batch_state = state.stack_state([parent])
log_p, state = dirpg.forward_and_update(batch_state, fixed)
queue.expand(state[torch.tensor(0)], log_p[0])
def for_loop_version(batch):
queues = [init_queue(x, dirpg) for x in batch]
while queues:
copy_queues = copy.copy(queues)
for q in copy_queues:
if sample(*q) == 'break':
queues.remove(q)
def heap_version(batch):
queues = [heap_wrapper(init_queue(x, dirpg),i) for i,x in enumerate(batch)]
#heapq.heapify(queues)
counter = len(batch)
while queues:
counter += 1
qw = heapq.heappop(queues)
qw.i = counter
q = qw.instance
if sample(*q) != 'break':
heapq.heappush(queues,qw)
import copy
batch_size = 200
x = torch.rand(batch_size, 1,num_cities, 2)
s = time.time()
for_loop_version(x)
d = time.time()
heap_version(x)
e = time.time()
print('loop :', d-s )
print('heap :', e-d )
#heaps = [[] for _ in range(batch_size)]
#heap = []
# -
import random
random.sample(range(10),1)[0]
# +
# import heapq
q = [] # the queue is a regular list
A = (2, 5,6, "Element A")
B = (3, 1,2, "Element B")
heapq.heappush(q, A) # push the items into the queue
heapq.heappush(q, B)
print(heapq.heappop(q)[3])
# +
def torch_divmod(x,y):
return x//y, x%y
def prim_pytorch(distance_matrix, not_visited=None):
"""Determine the minimum spanning tree for a set of points represented
: by their inter-point distances... ie their 'W'eights
:Requires:
:--------
: W - edge weights (distance, time) for a set of points. W needs to be
: a square array or a np.triu perhaps
:Returns:
:-------
: pairs - the pair of nodes that form the edges
"""
if distance_matrix.shape[0] != distance_matrix.shape[1]:
raise ValueError("distance_matrix needs to be square matrix of edge weights")
"""
dm = torch.index_select(
torch.index_select(distance_matrix, 0, not_visited),
1, not_visited) if len(not_visited) - 2 > 0 else distance_matrix
"""
dm = distance_matrix.clone()
n_vertices = torch.tensor(dm.shape[0])
visited_vertices = torch.tensor([0]) # Add the first point
num_visited = torch.tensor(1)
# exclude self connections by assigning inf to the diagonal
dm.fill_diagonal_(np.inf)
mst_edges = torch.zeros(n_vertices, n_vertices, dtype=torch.bool, device=dm.device)
while num_visited != n_vertices:
new_edge = torch.argmin(dm[visited_vertices])
print(new_edge, n_vertices)
new_edge = torch_divmod(new_edge, n_vertices)
print(new_edge)
new_edge = [visited_vertices[new_edge[0]], new_edge[1]]
mst_edges[new_edge[0], new_edge[1]] = True
print(visited_vertices,new_edge[1])
visited_vertices = torch.cat([visited_vertices,new_edge[1].unsqueeze(0)], dim=0)
dm[visited_vertices, new_edge[1]] = np.inf
dm[new_edge[1], visited_vertices] = np.inf
num_visited += 1
return (mst_edges*distance_matrix)
# -
np.sum([[1,2,3],[1,2,3]])
# +
a = generate_random_distance_matrix(5)
print(a)
a = prim_pytorch(a).tolist()
print(a[2])
np.sum(a[2])
#mst(a.numpy())
# +
def reduce_distance_matrix(x,nodes, prefix=True):
"""args:
x: distance metrix NxN
nodes: list of nodes to remove or to keep depending on prefix arg
prefix: if true"""
if prefix:
return np.delete(np.delete(x, nodes[1:], 0), nodes[1:], 1)
else: # not_visited +
ind = torch.tensor(nodes, dtype=torch.long)
return torch.index_select(torch.index_select(x,0,ind),1,ind)
def reduced_mst(prefix, dm, mst_val):
chosen = prefix[-1]
not_visited = [j for j in range(len(dm)) if j not in prefix]
reduce_distance_matrix(generate_random_distance_matrix(5),[2,3], False)
# -
# ## Compare MST to Reduced-MST
#
# The root node of the priority queue computes the MST of the complete graph (n cities).
# When the priority queue is expanded, the next node computes the MST of n-1 cities.
#
# Here we compare two alternatives for estimating the MST starting from n to 1:
# 1. exact MST computation
# 2. removing the chosen node and edges from the last MST
#
# +
n = 5
mst_vals = [0 for _ in range(n)]
rmst_vals = [0 for _ in range(n)]
for trial in range(1):
x = generate_random_distance_matrix(n)
mst = _mst(x)
prefix = []
for chosen in range(n,1,-1):
exact_mst = _mst(x)
rmst = reduced_mst(chosen, rmst)
prefix.append(chosen)
not_visited = [j for j in range(n) if j not in prefix]
x = reduce_distance_matrix(x,not_visited,False)
print(x)
# +
def convert_distance_matrix_to_batched_edges(distance_matrix):
"""distance_matrix: batch of distance matrices. size: [batch, n, n]
returns weights_and_edges: in shape (batch_size, n * (n - 1) / 2, 3), where
weights_and_edges[.][i] = [weight_i, node1_i, node2_i] for edge i."""
weights_and_edges = torch.
kruskals_cpp_pytorch()
# +
import random
np_time = 0
torch_time = 0
for i in range(5,20):
prefix = random.sample(range(i), random.sample(range(1,i), 1)[0])
not_visited = [j for j in range(i) if j not in prefix]
a = generate_random_distance_matrix(i)
s = time.time()
reduce_distance_matrix(a,prefix, True)
d = time.time()
reduce_distance_matrix(a,not_visited, False)
torch_time += (time.time() - d)
np_time += (d-s)
print(torch_time)
print(np_time)
# -
# --no_progress_bar --graph_size 20 --not_prune --annealing 0.005 --epsilon 25 --alpha 1.5 --dynamic_weighting --exp_name bs200eps25alpha15ann005 --epoch_size 128000 --n_epochs 100 --batch_size 200
#
#
prefix = random.sample(range(10), random.sample(range(1,10), 1)[0])
print(prefix)
not_visited = [j for j in range(10) if j not in prefix]
not_visited
import random
# +
l = []
for i in range(100000):
l.append(torch.tensor(i))
s = time.time()
a = torch.tensor(l)
print(time.time()-s)
s = time.time()
b = torch.stack(l)
print(time.time()-s)
# -
import torch
import kruskals_cpp
n = 4
weights = np.array([0.7601073, -0.20460297, -0.4689217, -0.5127163,
-1.9022679, 1.1506207])
vertices = np.triu_indices(n=n-1, m=n, k=1)
weights_and_edges = np.array(
[list(e) for e in zip(weights, vertices[0], vertices[1])])
# +
class A:
one = 1
two = 2
def __init__(self, x):
self.x = x
class B:
def __init__(self, param):
self.
# -
| .ipynb_checkpoints/visualize_tree_search-Copy1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cvnd_1]
# language: python
# name: conda-env-cvnd_1-py
# ---
# # LSTM Structure and Hidden State
#
# We know that RNNs are used to maintain a kind of memory by linking the output of one node to the input of the next. In the case of an LSTM, for each piece of data in a sequence (say, for a word in a given sentence),
# there is a corresponding *hidden state* $h_t$. This hidden state is a function of the pieces of data that an LSTM has seen over time; it contains some weights and, essentially, represents a kind of memory for the data that the LSTM has already seen. So, for an LSTM that is looking at words in a sentence, the hidden state of the LSTM will change based on each new word it sees. And, we can use the hidden state to predict the next word in a sequence or help identify the type of word in a language model, and lots of other things!
#
#
# ### LSTMs in Pytorch
#
# To create and train an LSTM, you have to know how to structure the inputs, and hidden state of an LSTM.
#
# In PyTorch, an LSTM expects all of its inputs to be 3D tensors, as follows:
# * The first axis is the number of sequences of input data (a dimension of 20 could represent 20 input sequences)
# * The second represents the number of sequences that will be processed in a batch of data (i.e. a dimension of 10 for a batch size of 10 input sequences)
# * The third represents the number if inputs to process at a time, for example: 1 for one word at a time word
#
# These will become clearer in the example in this notebook. This and the following notebook are modified versions of [this PyTorch LSTM tutorial](https://pytorch.org/tutorials/beginner/nlp/sequence_models_tutorial.html#lstm-s-in-pytorch).
#
# Let's take a simple example and say we have a batch_size of 1, for processing one sentence. If we want to run the sequence model over one sentence "Giraffes in a field", our input should look like this `1x4` row vector:
#
# \begin{align}\begin{bmatrix}
# \text{Giraffes }
# \text{in }
# \text{a }
# \text{field}
# \end{bmatrix}\end{align}
#
# With an additional 2nd dimension of size 1 to indicate that this one sentence is one batch.
#
# If we wanted to process this sentence one word at a time, the 1st axis will also have a size of 1.
#
# Next, let's see an example of one LSTM that is designed to look at a sequence of 4 values (numerical values since those are easiest to create and track) and generate 3 values as output; you are encouraged to change these input/hidden-state sizes to see the effect on the structure of the LSTM!
# +
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
# %matplotlib inline
torch.manual_seed(2) # so that random variables will be consistent and repeatable for testing
# -
# ### Define a simple LSTM
#
#
# **A note on hidden and output dimensions**
#
# The `hidden_dim` and size of the output will be the same unless you define your own LSTM and change the number of outputs by adding a linear layer at the end of the network, ex. fc = nn.Linear(hidden_dim, output_dim).
# +
# define an LSTM with an input dim of 4 and hidden dim of 3
# this expects to see 4 values as input and generates 3 values as output
input_dim = 4
hidden_dim = 3
lstm = nn.LSTM(input_size=input_dim, hidden_size=hidden_dim)
# make 5 input sequences of 4 random values each
inputs_list = [torch.randn(1, input_dim) for _ in range(5)]
print('inputs: \n', inputs_list)
print('\n')
# initialize the hidden state
# (1 layer, 1 batch_size, 3 outputs)
# first tensor is the hidden state, often called h0
# second tensor initializes the cell memory, c0
hidden = (torch.randn(1, 1, hidden_dim),
torch.randn(1, 1, hidden_dim))
# step through the sequence one element at a time.
for i in inputs_list:
# after each step, hidden contains the hidden state
out, hidden = lstm(i.view(1, 1, -1), hidden)
print('out: \n', out)
print('hidden: \n', hidden)
# -
# You should see that the output and hidden Tensors are always of length 3, which we specified when we defined the LSTM with `hidden_dim`.
# ### All at once
#
# A for loop is not very efficient for large sequences of data, so we can also, **process all of these inputs at once.**
#
# 1. concatenate all our input sequences into one big tensor, with a defined batch_size
# 2. define the shape of our hidden state
# 3. get the outputs and the *most recent* hidden state (created after the last word in the sequence has been seen)
#
#
# The outputs may look slightly different due to our differently initialized hidden state.
# +
# turn inputs into a tensor with 5 rows of data
# add the extra 2nd dimension (1) for batch_size
inputs = torch.cat(inputs_list).view(len(inputs_list), 1, -1)
# print out our inputs and their shape
# you should see (number of sequences, batch size, input_dim)
print('inputs size: \n', inputs.size())
print('\n')
print('inputs: \n', inputs)
print('\n')
# initialize the hidden state
hidden = (torch.randn(1, 1, hidden_dim),
torch.randn(1, 1, hidden_dim))
# get the outputs and hidden state
out, hidden = lstm(inputs, hidden)
print('out: \n', out)
print('hidden: \n', hidden)
# -
# ### Next: Part of Speech
#
# Now that you have an understanding of the input and output size for an LSTM, next let's define our own model to tag parts of speech (nouns, verbs, determinants), include an LSTM and a Linear layer to define a desired output size, *and* finally train our model to create a distribution of class scores that associates each input word with a part of speech.
| 2_4_LSTMs/.ipynb_checkpoints/1. LSTM Structure-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Text in Python
#
# Für die computergestützte Textanalyse sind Texte zunächst nur eine Aneinanderreihung von Buchstaben oder genauer: Zeichen. Diese Art von Text, die ohne Formatierung wie Schriftart, Schritgröße oder Fettungen auskommt, wird als “plain text” bezeichnet. Plain text erhält man etwa, wenn man ein Word-Dokument als TXT-Datei mit der Endung .txt speichert. Der entsprechende Datentyp in Python heißt `str`, von “string” (Zeichenkette).
#
# Texte werden in unterschiedlichen Kodierungen gespeichert, die festlegen, wie die einzelnen Zeichen (etwa „a“, „á“, aber auch „道“) für den Computer in 0 und 1 umgewandelt werden. Die zeitgemäße Kodierung ist UTF-8, die dem Unicode-Standard folgt. Textdateien sollten für die Arbeit in Python immer in dieser Kodierung gespeichert werden.
#
# Um in Python mit Texten arbeiten zu können, muss man sie erst einmal aus einer Datei laden und z.B. in einer Variable speichern. Dabei muss man die Datei nicht die ganze Zeit geöffnet lassen. Nachdem der Text in einer Variable gespeichert ist, kann (und sollte) man die Datei wieder schließen. Die Arbeitsschritte sind also:
#
# 1. Datei öffnen,
# 2. Inhalt einlesen,
# 3. Datei schließen,
# 4. Inhalt analysieren.
# Ein Hinweis zu Dateipfaden: Wenn man in Python eine Datei öffnen will, muss man den Pfad der Datei angeben. Statt eines vollständigen Pfads wie "`C:\Users\me\Documents\Text.txt`" kann man auch einen Pfad relativ vom IPython Notebook angeben. Wenn die Datei im gleichen Verzeichnis liegt wie das Notebook, ist dies einfach der Dateiname, also z.B. "`Text.txt`". Wenn die Datei in einem Unterverzeichnis liegt, muss man dieses voranstellen, z.B. "`Daten\Text.txt`". Ein *übergeordnetes* Verzeichnis erreicht man mit "`..`", also z.B. "`..\Daten\Text.txt`".
#
# Da sich die Pfadtrenner zwischen MacOS X und Linux ("`/`") und Windows ("`\`") unterscheiden, lassen sie sich nicht ohne weiteres übernehmen. Python hat daher die Funktion `path.join()`, die je nach Betriebssystem den richtigen Trenner verwendet:
from os import path
filepath = path.join('..', 'Daten', 'Rede_Jugend_forscht.txt')
# MacOS, Linux: '../Daten/Rede_Jugend_forscht.txt'
# Windows: '..\\Daten\\Rede_Jugend_forscht.txt'
textfile = open(filepath)
text = textfile.read()
textfile.close()
# In Python gibt es auch einen Mechanismus, bei dem eine Datei nur so lange geöffnet bleibt, wie sie gebraucht wird. Anschließend wird sie automatisch wieder geschlossen:
with open(filepath) as textfile:
text = textfile.read()
# Nun ist der gesamte Text als sehr lange Zeichenkette in einer Variable gespeichert:
len(text)
# Man kann sich zur Kontrolle den Anfang des Textes ausgeben lassen:
sample = text[0:200]
sample
# In dieser unleserlichen Ansicht steht die Zeichenfolge `\n` für einen Zeilenumbruch. Ein Zeilenumbruch ist für Python auch nur ein Zeichen. Da es aber nicht ohne weiteres dargestellt werden kann, wird es hier durch ein solches Spezialkommando symbolisiert.
#
# Eine leserliche Ansicht erhält man mit dem `print()`-Befehl:
print(sample)
# Für die Analyse sind in der Regel nicht Text als Ganzes relevant, sondern kleinere Einheiten wie z.B. Zeilen oder Wörter. Dabei hängt es von der Fragestellung ab, welche Einheit betrachtet werden soll. Zur Veranschaulichung betrachten wir den Text zeilenweise:
sample.splitlines()
# Der Text ist nun in eine Liste aus kürzeren Zeichenketten zerlegt, eine leere Zeichenkette steht dabei für eine Leerzeile bzw. einen Absatz.
#
# Für die Analyse kann es sinnvoll sein, die Anrede auszuschließen. Näherungsweise kann man in diesem Beispiel die Anrede an besonders kurzen Zeilen erkennen. Über einen Filter kann man diese Zeilen herausfinden:
lines = text.splitlines()
[line for line in lines if len(line) < 80]
# Der bereinigte Text sollte dementsprechend nur die längeren Zeilen enthalten:
cleaned_text = [line for line in lines if len(line) >= 80]
cleaned_text[0]
# In der Regel ist die interessierende Analyseeinheit das Wort. Näherungsweise kann man den Text in Wörter zerlegen, indem man ihn an den Leerzeichen trennt:
words = sample.split()
words[0:20]
# Das Ergebnis zeigt, dass Satzzeichen hier nicht berücksichtigt werden und weiter am Wort „kleben“. Die folgenden Einheiten zeigen daher bessere Wege, Texte in Wörter zu zerlegen.
| 00_Python/Text in Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=[]
# <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# + [markdown] papermill={} tags=[]
# # Youtube - Extract transcript from video ID
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/Youtube/Youtube_Extract_transcript_from_video_ID.ipynb" target="_parent"><img src="https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg=="/></a>
# + [markdown] papermill={} tags=[]
# **Tags:** #youtube #transcript #video
# + [markdown] papermill={} tags=[]
# Author: <NAME> (<EMAIL>)
# + [markdown] papermill={} tags=[]
# ## Input
# + [markdown] papermill={} tags=[]
# ### Install packages
# + papermill={} tags=[]
# !pip install youtube_transcript_api
# + [markdown] papermill={} tags=[]
# ### Import library
# + papermill={} tags=[]
from youtube_transcript_api import YouTubeTranscriptApi
# + [markdown] papermill={} tags=[]
# ### Variables
# + papermill={} tags=[]
video_id = "W02XVb8IbGA"
file_name = "⏱1-minute pitch video for Pioneer"
# + [markdown] papermill={} tags=[]
# ## Model
# + [markdown] papermill={} tags=[]
# ### Extract the transcript in JSON
# + papermill={} tags=[]
json = YouTubeTranscriptApi.get_transcript(video_id)
# + [markdown] papermill={} tags=[]
# ### Parse JSON in text string
# + papermill={} tags=[]
para = ""
for i in json :
para += i["text"]
para += " "
para
# + [markdown] papermill={} tags=[]
# ### Count the number of words in the paragraph
# + papermill={} tags=[]
number_of_words = len(para)
number_of_words
# + [markdown] papermill={} tags=[]
# ## Output
# + [markdown] papermill={} tags=[]
# ### Save to txt file
# + papermill={} tags=[]
text_file = open(f"{file_name}.txt", "w")
text_file.write(para)
text_file.close()
| Youtube/Youtube_Extract_transcript_from_video_ID.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Based on the results that we have obtained from the qiime2 commands, it would be nice to dig in a little further to actually determine the microbes present in each of the balances. To do this, we'll need to get a little more familiar with the Qiime2 Artifact API. To access the underlying data within Qiime2 Artifacts, we'll need to load them into memory, and convert the contents into more familiar Python objects.
# +
import qiime2
import pandas as pd
from skbio import TreeNode
from gneiss.regression import OLSModel
import matplotlib.pyplot as plt
# %matplotlib inline
# Load the table
table_art = qiime2.Artifact.load('88soils_filt100.biom.qza')
table = table_art.view(pd.DataFrame)
# Load the metadata
metadata = pd.read_table('88soils_modified_metadata.txt', index_col=0)
# Obtain the tree
tree_art = qiime2.Artifact.load('ph_tree.nwk.qza')
tree = tree_art.view(TreeNode)
# Unpack the results from the regression
viz = qiime2.Visualization.load('88soils_regression_summary.qzv')
viz.export_data('regression_summary_dir')
predicted_balances = pd.read_csv('regression_summary_dir/predicted.csv', index_col=0)
# -
# Now we'll want to see how good of a prediction the regression can perform. We'll visualize the raw heatmap next to the predicted heatmap. The rows and columns will be sorted using the pH information. We'll be using the function `niche_sort` to handle this.
from gneiss.sort import niche_sort
observed_table = niche_sort(table, metadata.ph)
# Note that the regression we made was performed directly on the balances. However, we want to see how good the prediction was done of the proportions. Fortunately, we can map the results from the balances directly back to the proportions using the inverse ilr transform.
# +
from skbio.stats.composition import ilr_inv
from gneiss.balances import balance_basis
basis, nodes = balance_basis(tree)
ids = [n.name for n in tree.tips()]
predicted_table = ilr_inv(predicted_balances.T, basis)
predicted_table = pd.DataFrame(predicted_table, columns=ids,
index=predicted_balances.columns)
predicted_table = predicted_table.reindex(index=observed_table.index,
columns=observed_table.columns)
# -
# Now we have obtained the predicted proportions, let's see how these compare with the raw proportions.
from skbio.stats.composition import closure
import seaborn as sns
fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(15, 5))
sns.heatmap(closure(observed_table.T), robust=True, ax=ax1, cmap='Reds')
sns.heatmap(predicted_table.T, robust=True, ax=ax2, cmap='Reds')
ax1.set_title('Observed proportions')
ax1.set_xticks([])
ax1.set_yticks([])
ax2.set_xticks([])
ax2.set_yticks([])
ax1.set_xlabel('Samples')
ax1.set_ylabel('OTUs')
ax2.set_title('Predicted proportions')
ax2.set_xlabel('Samples')
ax2.set_ylabel('OTUs')
# From this, it is clear that the linear regression on balances can capture the overall trends of OTUs vs pH. Ecologically, this makes sense. Bacteria tend to prefer to live in specific ranges of pH. So it isn't entirely surprising that microbial abundances can be predicted from pH. At the same time, the pattern was not obviously apparent until linear regressions on balances were applied.
# In short, applying linear regressions to balances are useful for studying gradients.
| ipynb/88soils/88soils-python-tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import scipy as sc
import math
from datetime import timedelta, datetime
from operator import attrgetter
import copy_plottrajectoriesfile as cpt
import matplotlib.animation as animation
import imageio #to make .gif out of multiple .png
import matplotlib.pyplot as plt
import scipy.io as sio #to load matlab file
import xarray as xr
from shapely.geometry import Point, Polygon
import pandas as pd
file=xr.open_dataset('nc_trajectory_files/17_0deg_CD015_konsole.nc')
file['time'][10000,:]
len(file['time'])
np.min(np.where(np.isnat(file['time'][1,:])))
age_particles = [(file['time'][i,0]-file['time'][i,np.min(np.where(np.isnat(file['time'][i,:])))]) for i in range(len(file['time']))]
age_particles = [(file['time'][i,0].values+file['time'][i,np.min(np.where(np.isnat(file['time'][i,:])))-1].values) for i in range(1000,1005)]
age_particles
np.datetime64('2009-01-01') - np.datetime64('2008-01-01')
coords = [(mat_boundaries['lat_pol4'][0][i],mat_boundaries['lon_pol4'][0][i]) for i in range(0,len(mat_boundaries['lon_pol4'][0]))]
mat = sio.loadmat('polygons_natacha_list.mat')
list_coords=coords[:-1]
type(list_coords[0])
# +
#(mat['list'][0][:-2])
# +
# Create Point objects
p1 = Point(24.952242, 60.1696017)
p2 = Point(24.976567, 60.1612500)
# Create a Polygon
coords = [(24.950899, 60.169158), (24.953492, 60.169158), (24.953510, 60.170104), (24.950958, 60.169990)]
poly = Polygon(coords)
# -
poly
poly = Polygon(list_coords)
poly
# %matplotlib qt
(file['lat'][0][3])
# +
def Try(i):
liste=[]
for j in range(0,len(file['lat'][i])):
liste.append((file['lat'][i][j],file['lon'][i][j]))
return liste
coords_particle = []
for i in range(0,len(file['lat'])):
coords_particle.append(Try(i))
# -
p3, p8 = [], []
for i in range(0,361):
p3.append(Point(coords_particle[2][i]))
p8.append(Point(coords_particle[7][i]))
for i in range(0,361):
if(p8[i].within(poly)):
print(i)
break
import pandas as pd
deg_0=pd.read_csv('results_beaching/01_18_0deg_cay1.csv')
deg_0_time=[datetime.strptime(day[:-19], '%Y-%m-%d') for day in deg_0['time'].values]
filename = "nc_trajectory_files/18_40deg_cay1.nc"
file = xr.open_dataset(filename)
# +
"""
Create Point objects for each timestep for each particle
"""
nb_particles = len(file['lat'])
def GetAllTimeCoords(i): #i is the number of the particle
coords=[]
for j in range(0,len(file['lat'][i])): #j is the time
coords.append((file['lat'][i][j],file['lon'][i][j]))
# print('oui')
return coords
# -
coords_particle = [[] for i in range(nb_particles)]
for i in range(0,nb_particles):
print(i)
coords_particle[i]=(GetAllTimeCoords(i))
import pandas as pd
# %matplotlib inline
deg_0_cay1=pd.read_csv('results_beaching/02_18/02_18_0deg_cay1.csv')
random_cay1=pd.read_csv('results_beaching/02_18/02_18_random_cay1.csv')
deg_40_cay1=pd.read_csv('results_beaching/02_18/02_18_40deg_cay1.csv')
deg_0_cd004=pd.read_csv('results_beaching/02_18/02_18_0deg_CD0.04.csv')
random_cd004=pd.read_csv('results_beaching/02_18/02_18_random_CD0.04.csv')
#deg_0_cd01=pd.read_csv('results_beaching/02_18/02_18_0deg_CD0.1.csv')
random_cd01=pd.read_csv('results_beaching/02_18/02_18_random_CD0.1.csv')
# %matplotlib qt
random_cd01['time']=random_cd01['time'].astype("datetime64")
random_cd01['time'].groupby(random_cd01["time"].dt.day).count().plot(kind="bar")
# %matplotlib qt
random_cd004['time']=random_cd004['time'].astype("datetime64")
random_cd004['time'].groupby(random_cd004["time"].dt.day).count().plot(kind="bar")
deg_0_cd004['time']=deg_0_cd004['time'].astype("datetime64")
deg_0_cd004['time'].groupby(deg_0_cd004["time"].dt.day).count().plot(kind="bar")
deg_0_cay1['time']=deg_0_cay1['time'].astype("datetime64")
deg_0_cay1['time'].groupby(deg_0_cay1["time"].dt.day).count().plot(kind="bar")
random_cay1['time']=random_cay1['time'].astype("datetime64")
random_cay1['time'].groupby(random_cay1["time"].dt.day).count().plot(kind="bar")
deg_40_cay1['time']=deg_40_cay1['time'].astype("datetime64")
deg_40_cay1['time'].groupby(deg_40_cay1["time"].dt.day).count().plot(kind="bar")
from parcels import (FieldSet, AdvectionRK4, BrownianMotion2D, plotTrajectoriesFile, Field,
ParticleSet, JITParticle, Variable, ErrorCode)
# %matplotlib inline
plotTrajectoriesFile('nc_trajectory_files/local/maroubraa_obs.nc')
import xarray as xr
yp = xr.open_dataset('nc_trajectory_files/local/maroubra_obs.nc')
yp
yp['lon'][4]
import matplotlib.pyplot as plt
plt.hist2d(yp['lon'][0].data,yp['lat'][0].data, range= [[xmin, xmax], [ymin, ymax]])
| imos_current_data/analysis_imos_current_data/Untitled2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from bokeh.plotting import figure, output_file, show
train_df = pd.read_csv("train.csv")
df2 = pd.DataFrame([[45, 100],[90,10],[90,10],[80,15],[95,10],[90,15],[75,10],[95,20],[40,90],[60,20]],columns=['x','y'])
train_df.dropna(inplace=True,axis=0,how='any')
new_train_df = train_df.append(df2)
#train_df.y.fillna(train_df.y.max(),inplace=True)
Y_train = train_df.y
X = train_df.x
m = Y_train.shape[0]
new_Y_train = new_train_df.y
new_X = new_train_df.x
new_m = new_Y_train.shape[0]
print("Y shape is : " , m)
train_df.head()
#Plotting data
output_file("plot.html")
p = figure()
p.sizing_mode = 'scale_width'
p.circle(train_df.x,train_df.y,size=10,color='red',alpha=0.5)
show(p)
def computeCost(X,y,theta):
size = y.shape[0]
J = (1/(2*size)) * np.sum((np.matmul(X,theta) - y)**2)
return J
def gradientDescent(X,y,theta,alpha,num_iter):
print("initial theta shape is : ", theta)
j = []
#print("X contains null : " , np.isnan(Y).any())
for i in range(num_iter):
error = (np.dot(X,theta) - y)
#print(np.isnan(error).any())
theta = theta - ((alpha)*(np.dot(error,X)/y.shape[0]))
j.append(computeCost(X,y,theta))
print("theta is : ", theta)
print("Cost is : " , computeCost(X,y,theta))
return theta,j
X = np.transpose(X)
new_X = np.transpose(new_X)
X = np.c_[np.ones(m),X]
new_X = np.c_[np.ones(new_m),new_X]
print(X.shape)
print(np.sum(X))
print(np.isnan(Y_train).any())
theta = np.ones(X.shape[1])
new_theta = np.ones(new_X.shape[1])
iterations = 100000
alphas = [0.0005]
print(theta.shape)
print("old : " ,computeCost(X,Y_train,theta))
print("new : " ,computeCost(new_X,new_Y_train,new_theta))
# +
new_J_dic = {}
new_theta_dic = {}
J_dic = {}
theta_dic = {}
for i,alpha in enumerate(alphas):
theta = np.ones(X.shape[1])
theta,J = gradientDescent(X,Y_train,theta,alpha,iterations)
J_dic[alpha] = J
theta_dic[alpha] = theta
for i,alpha in enumerate(alphas):
new_theta = np.ones(X.shape[1])
new_theta,new_J = gradientDescent(new_X,new_Y_train,new_theta,alpha,iterations)
new_J_dic[alpha] = new_J
new_theta_dic[alpha] = new_theta
# -
#Plotting J against iterations data
output_file("Jplot.html")
p1 = figure()
p1.sizing_mode = 'scale_width'
for color, alpha in zip(['red','green','blue','yellow','purple'],alphas):
p1.line(list(range(1,iterations+1)),J_dic[alpha],line_width=2,color=color,
alpha=0.8,muted_color=color, muted_alpha=0.2,legend=str(alpha))
p1.xaxis.axis_label = "Iterations"
p1.yaxis.axis_label = "J"
p1.legend.location = "top_right"
p1.legend.click_policy="mute"
show(p1)
test_df = pd.read_csv("test.csv")
test_df.loc[test_df.y.isnull()]
X_test = np.transpose(test_df.x)
m_test = test_df.y.shape[0]
X_test = np.c_[np.ones(m_test),X_test]
predict = np.dot(X_test,theta_dic[0.0005])
correct = [(i,j) for i, j in zip(test_df.y.values,predict)]
print(correct)
def slope_intercept(x,y,tht_dic):
slope = round(tht_dic[0.0005][1],2)
b = np.mean(y) - np.dot(np.mean(x),slope)
b = round(tht_dic[0.0005][0],2)
return slope,b
slope,b = slope_intercept(X, Y_train,theta_dic)
new_slope, new_b = slope_intercept(new_X,new_Y_train,new_theta_dic)
reg_line = [np.dot(slope,x) + b for x in train_df.x]
new_reg_line = [np.dot(new_slope,x) + new_b for x in new_train_df.x]
#Plotting data
output_file("plot2.html")
p2 = figure()
p2.sizing_mode = 'scale_width'
p2.circle(train_df.x,train_df.y,size=10,color='red',alpha=0.5)
p2.line(train_df.x,reg_line,line_width=5)
p2.line(new_train_df.x,new_reg_line,color='green',line_width=5)
show(p2)
| Linear Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
from tqdm.auto import tqdm
# %load_ext autoreload
# %autoreload 2
from helpers import get_df_from_logdir
import pandas as pd
import networkx as nx
import plotly.express as px
import numpy as np
import json
from matplotlib import pyplot as plt
from causal_util.helpers import unpickle
pd.set_option('display.max_columns', None)
# -
# /home/sergei/ray_results/rl_const_sparsity_obs_space_env_sm5_linear_with_lagrange_dual_sparsity_gridsearch_eye_coeff
config_name = 'rl_const_sparsity_obs_space_env_sm5_linear_with_lagrange_dual_sparsity_per_component_gridsearch_eye_coeff'
path = f"/home/sergei/ray_results/{config_name}/"
trials = os.listdir(path)
trials = [os.path.join(path, trial) for trial in trials]
trials = sorted(filter(os.path.isdir, trials))
print(f"Got {len(trials)} trials")
# # Reading trial data
def get_all_epochs(trial):
try:
df = get_df_from_logdir(trial, do_tqdm=False)
except FileNotFoundError:
return None
except json.JSONDecodeError:
return None
return df
dfs = {trial: get_all_epochs(trial) for trial in tqdm(trials)}
df = list(dfs.values())[0]
data = [df[~pd.isna(df.epochs)].iloc[-1] for df in dfs.values() if hasattr(df, 'epochs')]
df = pd.DataFrame(data)
df.to_csv('sm5_linear_lagrange_per_component.csv', index=False)
df = pd.read_csv('sm5_linear_lagrange_per_component.csv')
list(df.columns)
# plot_bar('epochs')
plt.rcParams.update({
#'font.size': 8,
'text.usetex': False,
# 'text.latex.preamble': r'\usepackage{amsfonts}',
'font.family' : 'normal',
'font.weight' : 'normal',
'font.size' : 20
})
plt.hist(df.epochs)
plt.title("Sparsity w.r.t. $c_{eye}$")
plt.scatter(df['config/_gin__eyecoeff__loguniform'], df['metrics/nnz'])
plt.xscale('log')
plt.xlabel("$c_{eye}$")
plt.ylabel("Number of edges")
plt.axhline(15, ls='--')
df_fine = df[df['metrics/nnz'] <= 15]
def find_trial(row, path=path):
"""Get folder name for a trial row."""
trials = os.listdir(path)
is_match = [row.trial_id in trial and row.experiment_tag in trial for trial in trials]
assert sum(is_match) == 1
idx = np.where(is_match)[0][0]
trial = trials[idx]
return trial
from path import Path
import gin
import sparse_causal_model_learner_rl.config
from causal_util import load_env, get_true_graph
gin.bind_parameter('Config._unpickle_skip_init', True)
gin.enter_interactive_mode()
trial_names = [find_trial(row) for _, row in df_fine.iterrows()]
# +
class model_():
def __init__(self, Mf, Ma):
self.Mf = Mf
self.Ma = Ma
def adhoc_model(l):
"""Get an instance with Mf, Ma attributes."""
n_f = l._unpickled_state['config'].get('feature_shape')[0]
if gin.query_parameter('ModelDecoder.add_batch_number'):
n_f += 1
fadd = l._unpickled_state['config'].get('additional_feature_keys')
print(n_f, fadd)
gin.bind_parameter('graph_for_matrices.additional_features', fadd)
gin.bind_parameter('plot_model.vmin', 0.0)
gin.bind_parameter('plot_model.singlecolor_palette', True)
gin.bind_parameter('plot_model.additional_features', fadd)
P = l._unpickled_state['trainables_weights']['model']['model.switch.probas'].T
print(P.shape)
Mf = P[:, :n_f].detach().cpu().numpy()
Ma = P[:, n_f:].detach().cpu().numpy()
model = model_(Mf, Ma)
return model
# -
def model_from_trial(trial):
trial_path = os.path.join(path, trial)
checkpoints = [x for x in os.listdir(trial_path) if x.startswith('checkpoint')]
checkpoint_epochs = {x: int(x.split('_')[1]) for x in checkpoints}
checkpoints_rev = sorted(checkpoints, key=lambda x: checkpoint_epochs[x], reverse=True)
l = None
for checkpoint in checkpoints_rev:
print("Trying checkpoint", checkpoint)
try:
ckpt_path = os.path.join(trial_path, checkpoint, 'checkpoint')
l = unpickle(ckpt_path)
# print(gin.config_str())
break
except Exception as e:
print("Can't read", checkpoint)
if l is None:
raise ValueError("No checkpoints for trial", trial)
gin.parse_config(l._unpickled_state['gin_config'])
model = adhoc_model(l)
return l, model
from itertools import permutations
from scipy.spatial.distance import cosine
# +
def permute_model(m, perm, thr=0.9):
permuted_Mf = m.Mf[perm, :][:, perm] > thr
permuted_Ma = m.Ma[perm, :] > thr
return permuted_Mf, permuted_Ma
def distance(m, m_true, perm, thr=0.9):
permuted_Mf, permuted_Ma = permute_model(m, perm, thr=thr)
Mf_true, Ma_true = m_true.Mf > thr, m_true.Ma > thr
cosF = cosine(permuted_Mf.flatten(), Mf_true.flatten())
cosA = cosine(permuted_Ma.flatten(), Ma_true.flatten())
cos = cosF + cosA
return cos
# -
def check_last_feature(m, thr=0.9):
Mf = m.Mf > 0.9
Ma = m.Ma > 0.9
assert np.sum(Mf[-1, :-1]) == 0
assert np.sum(Mf[:-1, -1]) == 0
assert Mf[-1, -1] == True, Mf[-1, -1]
assert np.sum(Ma[-1, :]) == 0
def visualize_trial(trial):
l, m = model_from_trial(trial)
check_last_feature(m)
env = load_env()
G = get_true_graph(env)
m_true = model_(G.As, G.Aa)
fig = l.visualize_model(m)
display(fig)
gvz_m = l.visualize_graph(model=m)[1]
gvz_m_true = l.visualize_graph(model=m_true)[1]
display(gvz_m)
n_f = m.Mf.shape[0]
perms = list(permutations(range(n_f - 1)))
distances = [distance(m, m_true, perm) for perm in perms]
idx = np.argmin(distances)
print("Best cosine", distances[idx])
print("Best permutation", perms[idx])
best_f, best_a = permute_model(m, perms[idx])
display(l.visualize_graph(model=model_(1. * best_f, 1. * best_a))[1])
return distances[idx]
cosines = []
for trial in trial_names:
print(f"===== {trial} ======")
dst = visualize_trial(trial)
cosines.append(dst)
plt.title("Cosine distance to best matching\ngraph with nnz=15")
plt.hist(cosines)
plt.scatter(df_fine['config/_gin__eyecoeff__loguniform'], cosines)#df['metrics/nnz']))
plt.xscale('log')
plt.scatter(df_fine['metrics/nnz'], cosines)
l, _ = model_from_trial(trial_names[0])
env = load_env()
G = get_true_graph(env)
m_true = model_(G.As, G.Aa)
l.visualize_graph(model=m_true)[1]
_ = l.visualize_model(model=m_true)
nx.draw(nx_m_true, pos = nx.spring_layout(nx_m_true))
from networkx.algorithms import isomorphism
match = isomorphism.GraphMatcher(nx_m, nx_m_true)
dir(match)
match.subgraph_is_isomorphic()
| causal_analysis/sm5_linear_lagrange_per_component.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings('ignore')
pd.pandas.set_option('display.max_columns',0)
data=pd.read_csv("Data_For_Final_Model.csv",index_col=0)
data.sample(5,random_state=1)
X=data.drop(['Outcome'],axis=1)
y=data.Outcome
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.30,random_state=2,
stratify=y)
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print(f"Yes And No Value In Tain(y_Train)")
print(y_train.value_counts())
print(f"Yes And No Value In Tain(y_Test)")
print(y_test.value_counts())
# ### LogisticRegression
from sklearn.linear_model import LogisticRegression
log_model=LogisticRegression(random_state=34)
log_model.fit(X_train,y_train)
prediction=log_model.predict(X_test)
from sklearn.metrics import classification_report
print(classification_report(y_test,prediction))
print(log_model.score(X_train,y_train))
print(log_model.score(X_test,y_test))
# ### RandomForest
from sklearn.ensemble import RandomForestClassifier
rfm=RandomForestClassifier(random_state=3)
rfm.fit(X_train,y_train)
prediction=rfm.predict(X_test)
print(classification_report(y_test,prediction))
print(rfm.score(X_train,y_train))
print(rfm.score(X_test,y_test))
# ### Support Vector Machine
from sklearn.svm import SVC
classifier=SVC(random_state=4)
classifier.fit(X_train,y_train)
prediction=classifier.predict(X_test)
print(classification_report(y_test,prediction))
print(classifier.score(X_train,y_train))
print(classifier.score(X_test,y_test))
# #### Summary Of Three Diff. ML Algorithm
#
# 1. LogisticRegression Accuracy is 73% .
# 2. RandomForest Accuracy is 89% .
# 3. Support Vector Machine Accuracy is 78% .
#
# From the Above Analysis We can Extrapolate(conclude) that without Any Hyper-parameter tunning we got 89% Accuracy by using RandomForest Model. So, Let's use RandomForest Model for Hyper-Parameter to achieve Better Accuracy .
# #### Hyperparameter tuning Method
# It is a Process of which help us findout the best parameter for our model.
# 1. GridSearchCV
# 2. RandomizedSearchCV
# 3. Bayesian Optimization-Automate Hyperparameter Tuning (Hyperopt)
# 4. Sequential model based optimization
# 5. Optuna-Automate Hyperparameter Tuning
# 6. Genetic Algorithm
#
print("Default Parameter Used By RandomForest \n",rfm.get_params())
rfm=RandomForestClassifier(random_state=3)
from sklearn.model_selection import RandomizedSearchCV
rfm_grid={
'n_estimators':np.arange(60,150,10),
'criterion':['gini','entropy'],
'max_features' : ["auto", "sqrt", "log2"]
}
print(rfm_grid)
rfm_randomcv=RandomizedSearchCV(estimator=rfm,param_distributions=rfm_grid,n_jobs=-1,
random_state=20,verbose=1)
rfm_randomcv.fit(X_train,y_train)
rfm_randomcv.best_params_
# Above Parameter And Default Parameter are Same. So, No need To of Parameter tuning.
prediction=rfm_randomcv.predict(X_test)
print(classification_report(y_test,prediction))
print(rfm_randomcv.score(X_train,y_train))
print(rfm_randomcv.score(X_test,y_test))
X.columns
test_data_1=[[0.352941,0.387097,0.510204,0.119565,0.169675,0.367925,0.286095,0.333333
]]
test_data_2=[[0.588235,0.296774,0.622449,0.271739,0.132371,0.525157,0.331853,0.583333]]
test_data_3=[[0.058824,0.393548,0.346939,0.239130,0.132371,0.191824,0.048423,0.000000]]
test_data_4=[[0.294118,0.593548,0.591837,0.239130,0.132371,0.443396,0.249667,0.800000]]
test_data_5=[[0.588235,0.800000,0.510204,0.239130,0.132371,0.622642,0.203909,0.216667]]
print(rfm_randomcv.predict(test_data_1))
print(rfm_randomcv.predict(test_data_2))
print(rfm_randomcv.predict(test_data_3))
print(rfm_randomcv.predict(test_data_4))
print(rfm_randomcv.predict(test_data_5))
X_test.head()
y_test.head()
#Row Number 5 (False)
test_data=[[1,89,66,23,94,28.1,0.167,21]]
print(rfm_randomcv.predict(test_data))
#Row Number 179(True)
test_data=[[0,129,110,46,130,67.1,0.319,26]]
print(rfm_randomcv.predict(test_data))
#Row Number 501 (True)
test_data=[[6,154,74,32,193,29.3,0.839,39]]
print(rfm_randomcv.predict(test_data))
#Row Number (False)
test_datat=[[2,82,52,22,115,28.5,1.699,25]]
print(rfm_randomcv.predict(test_data))
rfm_randomcv
#Model Saving
import pickle
with open('Diabetes_Model','wb') as f:
pickle.dump(rfm_randomcv,f)
#Testing the model
with open('Diabetes_Model','rb') as f:
mod=pickle.load(f)
x=mod.predict([[6,154,74,32,193,29.3,0.839,39]])
x
| .ipynb_checkpoints/Model-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="IhQhotF3atGI" colab_type="text"
# # Housing Market
# + [markdown] id="0tMlS0ZMatGQ" colab_type="text"
# ### Introduction:
#
# This time we will create our own dataset with fictional numbers to describe a house market. As we are going to create random data don't try to reason of the numbers.
#
# ### Step 1. Import the necessary libraries
# + id="YvTEPiATatGU" colab_type="code" colab={}
import pandas as pd
import numpy as np
# + [markdown] id="HWPrKVoLatHa" colab_type="text"
# ### Step 2. Create 3 differents Series, each of length 100, as follows:
# 1. The first a random number from 1 to 4
# 2. The second a random number from 1 to 3
# 3. The third a random number from 10,000 to 30,000
# + id="GXv1g3bVatHk" colab_type="code" colab={}
x1 = pd.Series(np.random.randint(1, high=5,size=100))
x2 = pd.Series(np.random.randint(1, high=4,size=100))
x3 = pd.Series(np.random.randint(10000, high=30001,size=100))
# + [markdown] id="Z4aFT7y_atIt" colab_type="text"
# ### Step 3. Let's create a DataFrame by joinning the Series by column
# + id="SAKkggyeatIz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="246da127-2bfd-4a09-c80c-771133e39d9f"
df = pd.concat([x1, x2, x3], axis=1)
df.head()
# + [markdown] id="6iymHhMtatJL" colab_type="text"
# ### Step 4. Change the name of the columns to bedrs, bathrs, price_sqr_meter
# + id="DloBWnwzatJO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 204} outputId="f247b3da-a2cf-4eb2-9707-5fd869e5890b"
df.rename(columns = {0: 'bedrs', 1: 'bathrs', 2: 'price_sqr_meter'}, inplace=True)
df.head()
# + [markdown] id="IIroRBbgatJq" colab_type="text"
# ### Step 5. Create a one column DataFrame with the values of the 3 Series and assign it to 'bigcolumn'
# + id="XPdFI4ElatJs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="f3b3e72c-e27a-49cb-86cf-e13148ccabff"
# join concat the values
bigcolumn = pd.concat([x1, x2, x3], axis=0)
# it is still a Series, so we need to transform it to a DataFrame
bigcolumn = bigcolumn.to_frame()
print(type(bigcolumn))
# + id="jJh-JOttdoUs" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="5b0029ef-407a-47b6-acc0-2b98391251cf"
bigcolumn
# + [markdown] id="Yk4WgiAuatMh" colab_type="text"
# ### Step 6. Oops, it seems it is going only until index 99. Is it true?
# + id="luFtQN9yatMj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9822273d-45fd-4685-ae00-de4f0f0d10f1"
len(bigcolumn)
# + [markdown] id="qNlq--LZatNX" colab_type="text"
# ### Step 7. Reindex the DataFrame so it goes from 0 to 299
# + id="9-7vqXQvatNa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 419} outputId="34ce9bfc-486b-4681-9fa6-2132308432ca"
bigcolumn.reset_index(drop=True, inplace=True)
bigcolumn
| 05_Merge/Housing Market/my_Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys as sys
import pandas as pd
sys.path.append('/Users/grlurton/Documents/bluesquare/data_pipelines/src')
import audit.dhis as dhis
import audit.completeness as cplt
ds = pd.read_csv('/Users/grlurton/data/dhis/rdc/snis/data_sets.csv')
data_elements_sets = pd.read_csv('/Users/grlurton/data/dhis/rdc/snis/data_elements_sets.csv')
org_units_sets = pd.read_csv('/Users/grlurton/data/dhis/rdc/snis/org_units_report.csv')
org_units_sets.columns = ['OrgUnit_id', 'dataSet_id']
data_elements_sets.columns = ['dataElement_id', 'dataSet_id']
data_elements_org_units_sets = data_elements_sets.merge(org_units_sets)
# -
# %load_ext autoreload
# %autoreload 2
hivdr = dhis.dhis_instance('hivdr', 'grlurton', 'localhost',"")
full_de_cc_hivdr = hivdr.build_de_cc_table()
full_de_cc_hivdr
ihp = dhis.dhis_instance('ihp', 'grlurton', 'localhost')
full_de_cc_ihp = ihp.build_de_cc_table()
# %%time
ihp_reported_values = ihp.get_reported_de()
hivdr_reported_values = hivdr.get_reported_de()
hivdr = ihp
hivdr_reported_values = ihp_reported_values
reported_values_map = cplt.aggr_reported(hivdr_reported_values, ['quarterly'], ['uidlevel2', 'namelevel2'])
reported_values_timeline = cplt.aggr_reported(hivdr_reported_values, ['monthly'], ['uidlevel2', 'namelevel2'])
reported_value_table = cplt.aggr_reported(hivdr_reported_values, ['quarterly'], ['uidlevel2', 'namelevel2','uidlevel3','namelevel3'])
full_de_cc = hivdr.build_de_cc_table()
expectation_full = cplt.make_full_data_expectations(data_elements_sets, org_units_sets, full_de_cc, ihp.orgunitstructure)
de_expectation = cplt.aggr_expectation(expectation_full, 'uidlevel2')
de_availability_map = cplt.make_availability(reported_values_map, de_expectation, 'uidlevel2', 3)
de_availability_timeline = cplt.make_availability(reported_values_timeline, de_expectation, 'uidlevel2', 1)
de_expectation = cplt.aggr_expectation(expectation_full, ['uidlevel2','uidlevel3'])
de_availability_table = reported_value_table.merge(de_expectation, left_on = ['uidlevel3','uid_data_element'], right_on = ['uidlevel3','dataElement_id'])
de_availability_table['value'] = de_availability_table['count'] / (3 * de_availability_table.n_expected)
de_availability_map = de_availability_map.drop(['count', 'n_expected','dataElement_id'], axis = 1)
de_availability_timeline = de_availability_timeline.drop(['count', 'n_expected','dataElement_id'], axis = 1)
de_availability_table = de_availability_table.drop(['count', 'n_expected', 'uidlevel2_y', 'dataElement_id'], axis = 1)
de_availability_map.columns = ['period', 'uidlevel2', 'namelevel2', 'uid_data_element', 'name_data_element', 'value']
de_availability_timeline.columns = ['period', 'uidlevel2', 'namelevel2', 'uid_data_element', 'name_data_element', 'value']
de_availability_table.columns = ['period', 'uidlevel2', 'namelevel2', 'uidlevel3', 'namelevel3', 'uid_data_element', 'name_data_element', 'value']
de_availability_map.to_csv('../../data_projects/ihp/data/IHP_de_availabilty_map.csv',index=False, sep=';')
de_availability_timeline.to_csv('../../data_projects/ihp/data/IHP_de_availability_timeline.csv', index = False, sep=';')
de_availability_table.to_csv('../../data_projects/ihp/data/IHP_de_availability_table.csv', index = False, sep=';')
de_availability_map.to_csv('../../data_projects/hivdr/data/HIVDR_de_availabilty_map.csv',index=False)
de_availability_timeline.to_csv('../../data_projects/hivdr/data/HIVDR_de_availability_timeline.csv', index = False)
de_availability_table.to_csv('../../data_projects/hivdr/data/HIVDR_de_availability_table.csv', index = False)
# +
hivdr = dhis.dhis_instance('hivdr', 'grlurton', 'localhost')
total_patients_data = hivdr.get_data('Yj8caUQs178')
hivdr.get_data('Yj8caUQs178')
| notebooks/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="tOhAux1ubEKG" outputId="727c632b-d755-4ac2-dd1a-9b84c4f1259f"
# And for visualization on Colab install
# # !apt-get install x11-utils > /dev/null 2>&1
# # !pip install pyglet
# # !apt-get install -y xvfb python-opengl > /dev/null 2>&1
# # !pip install gym pyvirtualdisplay > /dev/null 2>&1
# + id="JZV-qP-yay8_"
import random
import gym
#import math
import numpy as np
from collections import deque
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
# + colab={"base_uri": "https://localhost:8080/"} id="i63z1vW0c4Sp" outputId="222984a5-6556-4942-a509-c5a1a639c63f"
## Uncomment if working on Colab
# from pyvirtualdisplay import Display
# display = Display(visible=0, size=(600, 400))
# display.start()
# + id="ikpmIrLyay9B"
EPOCHS = 1000
THRESHOLD = 45
MONITOR = True
# + id="trKmD7d2ay9C"
class DQN():
def __init__(self, env_string,batch_size=64):
self.memory = deque(maxlen=100000)
self.env = gym.make(env_string)
input_size = self.env.observation_space.shape[0]
action_size = self.env.action_space.n
self.batch_size = batch_size
self.gamma = 1.0
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
alpha=0.01
alpha_decay=0.01
if MONITOR: self.env = gym.wrappers.Monitor(self.env, 'data/'+env_string, force=True)
# Init model
self.model = Sequential()
self.model.add(Dense(24, input_dim=input_size, activation='tanh'))
self.model.add(Dense(48, activation='tanh'))
self.model.add(Dense(action_size, activation='linear'))
self.model.compile(loss='mse', optimizer=Adam(lr=alpha, decay=alpha_decay))
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def choose_action(self, state, epsilon):
if np.random.random() <= epsilon:
return self.env.action_space.sample()
else:
return np.argmax(self.model.predict(state))
def preprocess_state(self, state):
return np.reshape(state, [1, 4])
def replay(self, batch_size):
x_batch, y_batch = [], []
minibatch = random.sample(self.memory, min(len(self.memory), batch_size))
for state, action, reward, next_state, done in minibatch:
y_target = self.model.predict(state)
y_target[0][action] = reward if done else reward + self.gamma * np.max(self.model.predict(next_state)[0])
x_batch.append(state[0])
y_batch.append(y_target[0])
self.model.fit(np.array(x_batch), np.array(y_batch), batch_size=len(x_batch), verbose=0)
#epsilon = max(epsilon_min, epsilon_decay*epsilon) # decrease epsilon
def train(self):
scores = deque(maxlen=100)
avg_scores = []
for e in range(EPOCHS):
state = self.env.reset()
state = self.preprocess_state(state)
done = False
i = 0
while not done:
action = self.choose_action(state,self.epsilon)
next_state, reward, done, _ = self.env.step(action)
next_state = self.preprocess_state(next_state)
self.remember(state, action, reward, next_state, done)
state = next_state
self.epsilon = max(self.epsilon_min, self.epsilon_decay*self.epsilon) # decrease epsilon
i += 1
scores.append(i)
mean_score = np.mean(scores)
avg_scores.append(mean_score)
if mean_score >= THRESHOLD and e >= 100:
print('Ran {} episodes. Solved after {} trials ✔'.format(e, e - 100))
return avg_scores
if e % 100 == 0:
print('[Episode {}] - Mean survival time over last 100 episodes was {} ticks.'.format(e, mean_score))
self.replay(self.batch_size)
print('Did not solve after {} episodes 😞'.format(e))
return avg_scores
# + colab={"base_uri": "https://localhost:8080/"} id="4STstW_7ay9E" outputId="b9a26bf3-dd8c-4b4f-c92c-9b6a75333acf"
env_string = 'CartPole-v0'
agent = DQN(env_string)
scores = agent.train()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="28iEbGwzay9F" outputId="e9ab9177-f5eb-472f-dead-64a17be16cf7"
import matplotlib.pyplot as plt
plt.plot(scores)
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="178ufOPzay9F" outputId="3836158e-b35a-471d-c59f-a2c0460712b8"
agent.model.summary()
# + id="5E_2klZ3ay9G"
agent.env.close()
# + id="b0TrCnMbay9H"
| Chapter_12/DQNCartPole.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from linkedin_scraper_2 import Company
from selenium import webdriver
import time
import os
import sqlite3
import pandas as pd
import re
import csv
# +
# Check if database exists, and if it doesn't set it up:
conn = sqlite3.connect("linkedIn_Companies.db")
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute('''CREATE TABLE IF NOT EXISTS company_info(
company_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
company_name TEXT NOT NULL)''')
cur.execute(''' CREATE TABLE IF NOT EXISTS employee_info(
employee_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
company_id INTEGER NOT NULL,
name TEXT NOT NULL,
profile_picture TEXT,
location TEXT,
role TEXT,
FOREIGN KEY(company_id) REFERENCES company_info(company_id) ON DELETE CASCADE
)
''')
# +
# Ask user for linkedin_url
# use this link: https://www.linkedin.com/company/Util-
# DON'T FORGET TO HIT ENTER
print("Please enter the LinkedIn URL of the company you wish to search for:\n")
companyURL = input()
# -
driver = webdriver.Chrome()
company = Company(companyURL, driver = driver, scrape=False, get_employees = True)
# LOGIN ON NEW SCREEN BEFORE MOVING FORWARD #
#Search (press play)
company = Company(companyURL, driver = driver, scrape=True, get_employees = True)
# Temporary fix for formatting issue with returning pd.read_sql_query...
# enter company_id based on information above, then you will see a formatted table of the information
company_id = 1;
pd.read_sql_query("SELECT * FROM employee_info WHERE company_id={0}".format(company_id), conn)
# +
# if you want to search another company, you can update the companyURL by re-entering a new url in the input box
# then skip the cell that initiates a new driver, and press play in the cell titled "search" --
# Then you won't have to login again.
# -
| UTIL_DE_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.quantum_info.operators import Operator
from qiskit import BasicAer
from qiskit import execute
from qiskit.tools.visualization import plot_histogram
from IBMQuantumExperience.IBMQuantumExperience import IBMQuantumExperience
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit.tools.monitor import job_monitor
from math import *
# +
controls = QuantumRegister(2)
circuit = QuantumCircuit(controls)
cx = Operator([
[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]
])
circuit.unitary(cx, [0, 1], label='cx')
# -
circuit.draw(output='mpl')
token = open("../ibm_token.txt","r").read().strip()
IBMQ.enable_account(token)
# +
shots = 1024
provider = IBMQ.get_provider()
print(provider.backends())
backend = provider.get_backend('ibmq_athens')
job_exp = execute(qc, backend=backend, shots=shots)
job_monitor(job_exp)
# -
result_exp = job_exp.result()
print(result_exp)
counts_exp = result_exp.get_counts(qc)
plot_histogram([counts_exp,counts])
jobID = job_exp.job_id()
print('JOB ID: {}'.format(jobID))
| custom gate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="7Zl22fWWN2PT" outputId="51cdf239-13c4-4dae-f03d-cc56fbe1d120"
rows = 5
#5,4,3,2,1 5 rows or lines
#5,4,3,2,1
#range (5,0,-1)
#row =0 num=0
#row = 5 num = 5
# row =4 num = 4
#3,3,
#2,2,
#1,1
#o not there
#num =5 i =0,1,2,3,4 (0,5)
#num =4 i =0,1,2,3 (0,4)
#num =3 i =0,1,2 (0,3)
#num =2 i =0,1 (0,2)
#num =1 i =0 (0,1)
for num in range(rows,0,-1 ):
for i in range(num): #num = 5 (0,5)
# print(i, end =" ")
print(num, end=" ")
print(" ")
# + id="VBEuC9kiOXcp" outputId="21af1eb4-db35-489e-e955-e61417f3a894" colab={"base_uri": "https://localhost:8080/"}
rows=5
for num in range(rows,0,-1):
for i in range(num):
print(rows, end=" ")
print(" ")
# + id="-W1BLmOTOZ3n" outputId="93262c23-28a2-402d-d778-020785a09024" colab={"base_uri": "https://localhost:8080/"}
rows=5
for num in range(rows+1):
for i in range(num):
print(rows, end=" ")
print(" ")
| Pattern_codes/Pattern_Python_5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
import pandas as pd
import bebi103
import bokeh.io
bokeh.io.output_notebook()
# +
df = pd.read_csv('frog_tongue_adhesion.csv', comment='#')
data_dict = {'ID': ['I', 'II', 'III', 'IV'],
'age': ['adult', 'adult', 'juvenile', 'juvenile'],
'SVL (mm)': [63, 70, 28, 31],
'weight (g)': [63.1, 72.7, 12.7, 12.7],
'species': ['cross', 'cross', 'cranwelli', 'cranwelli']}
df = df.merge(pd.DataFrame(data=data_dict))
# -
# ## Jitter plots
p = bebi103.viz.jitter( data=df,
cats='ID',
val='impact force (mN)',
p=None,
horizontal=False,
x_axis_label='ID',
y_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='linear',
show_legend=False,
color_column=None,
tooltips=None)
bokeh.io.show(p)
p = bebi103.viz.jitter( data=df,
cats='ID',
val='impact force (mN)',
p=None,
horizontal=False,
x_axis_label='ID',
y_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='linear',
show_legend=False,
color_column='age',
tooltips=None)
bokeh.io.show(p)
p = bebi103.viz.jitter( data=df,
cats='ID',
val='impact force (mN)',
p=None,
horizontal=False,
x_axis_label='ID',
y_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='linear',
show_legend=True,
color_column=None,
tooltips=None)
bokeh.io.show(p)
p = bebi103.viz.jitter( data=df,
cats='ID',
val='impact force (mN)',
p=None,
horizontal=False,
x_axis_label='ID',
y_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='linear',
show_legend=True,
color_column='trial number',
tooltips=None)
bokeh.io.show(p)
p = bebi103.viz.jitter( data=df,
cats='ID',
val='impact force (mN)',
p=None,
horizontal=True,
x_axis_label='ID',
y_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='linear',
show_legend=False,
color_column=None,
tooltips=[('impact force (mN)', '@{impact force (mN)}'),
('trial number', '@{trial number}'),
('adhesive force (mN)', '@{adhesive force (mN)}')])
bokeh.io.show(p)
p = bebi103.viz.jitter( data=df,
cats='ID',
val='impact force (mN)',
p=None,
horizontal=True,
x_axis_label='ID',
y_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='log',
show_legend=False,
color_column=None,
tooltips=[('impact force (mN)', '@{impact force (mN)}'),
('trial number', '@{trial number}'),
('adhesive force (mN)', '@{adhesive force (mN)}')])
bokeh.io.show(p)
p = bebi103.viz.jitter( data=df,
cats=['age', 'ID'],
val='impact force (mN)',
p=None,
horizontal=False,
x_axis_label='ID',
y_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='linear',
show_legend=False,
color_column=None,
tooltips=None)
bokeh.io.show(p)
p = bebi103.viz.jitter( data=df,
cats=['age', 'ID'],
val='impact force (mN)',
p=None,
horizontal=True,
y_axis_label=None,
x_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='linear',
show_legend=False,
color_column='trial number',
tooltips=None)
bokeh.io.show(p)
# ## ECDFs
p = bebi103.viz.ecdf_collection(data=df,
cats='ID',
val='impact force (mN)',
p=None,
x_axis_label='impact force (mN)',
y_axis_label=None,
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
order=['I', 'III', 'IV', 'II'],
val_axis_type='linear',
show_legend=True,
tooltips=[('impact force (mN)', '@{impact force (mN)}'),
('trial number', '@{trial number}'),
('adhesive force (mN)', '@{adhesive force (mN)}')])
bokeh.io.show(p)
p = bebi103.viz.ecdf_collection(data=df,
cats='ID',
val='impact force (mN)',
p=None,
formal=True,
x_axis_label='impact force (mN)',
y_axis_label=None,
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
order=['I', 'III', 'IV', 'II'],
val_axis_type='linear',
show_legend=True,
tooltips=[('impact force (mN)', '@{impact force (mN)}'),
('trial number', '@{trial number}'),
('adhesive force (mN)', '@{adhesive force (mN)}')])
bokeh.io.show(p)
p = bebi103.viz.colored_ecdf(data=df,
cats='ID',
val='impact force (mN)',
p=None,
x_axis_label='impact force (mN)',
y_axis_label=None,
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
order=['I', 'III', 'IV', 'II'],
val_axis_type='linear',
show_legend=True,
tooltips=[('impact force (mN)', '@{impact force (mN)}'),
('trial number', '@{trial number}'),
('adhesive force (mN)', '@{adhesive force (mN)}')])
bokeh.io.show(p)
# ## Box plots
p = bebi103.viz.box(data=df,
cats='ID',
val='impact force (mN)',
p=None,
horizontal=True,
y_axis_label='ID',
x_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='log')
bokeh.io.show(p)
p = bebi103.viz.box(data=df,
cats=['age', 'ID'],
val='impact force (mN)',
p=None,
horizontal=True,
y_axis_label='ID',
x_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='linear')
bokeh.io.show(p)
p = bebi103.viz.box(data=df,
cats=['age', 'ID', 'SVL (mm)'],
val='impact force (mN)',
p=None,
horizontal=True,
y_axis_label='ID',
x_axis_label='impact force (mN)',
title=None,
plot_height=300,
plot_width=400,
palette=['#4e79a7', '#f28e2b', '#e15759', '#76b7b2', '#59a14f',
'#edc948', '#b07aa1', '#ff9da7', '#9c755f', '#bab0ac'],
width=0.4,
order=None,
val_axis_type='linear')
bokeh.io.show(p)
| tests/test_viz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # heatmaps
# with Formula 1 data from https://ergast.com/mrd/db
# +
# %autosave 0
from tools import *
f1 = ErgastZIP(ERGAST_ZIP)
plot = Plot()
season = 2019
# -
# ## lights out
qtimes = (
f1.qualifying
.join(f1.drivers['driver'], on='id_driver')
.join(f1.races['id_circuit round season'.split()], on='id_race')
.join(f1.circuits['circuit'], on='id_circuit')
.assign(seconds = lambda df: df['q1 q2 q3'.split()].min(axis=1))
['season round circuit driver pos seconds'.split()]
.sort_values('season round pos'.split()).reset_index(drop=True)
)
qtimes
# +
def qfactor(data, season, limit=1.07, **kwargs):
kwargs.setdefault('cmap', 'inferno_r')
kwargs.setdefault('colorbar', True)
kwargs.setdefault('title', f"{season} qualifying time / pole time")
data = data[data['season'].eq(season)]
circuits = data['circuit'].unique()
data = data.pivot(index='driver', columns='circuit', values='seconds')
data = data[circuits]
data = data.div(data.min(axis=0), axis=1).clip(upper=limit)
data = data.loc[data.mean(axis=1).sort_values().index]
return plot.heat(data, **kwargs)
axes = qfactor(qtimes, season)
savepng(axes, 'lights_out')
# -
# ## away we go
starts = (
f1.lap_times[lambda df: df['lap'].eq(1)]
.join(f1.drivers['driver'], on='id_driver')
.join(f1.races['id_circuit round season'.split()], on='id_race')
.join(f1.circuits['circuit'], on='id_circuit')
['season round circuit driver pos seconds'.split()]
.sort_values('season round pos'.split()).reset_index(drop=True)
)
starts
# +
def firstlaps(data, season, limit=1.20, **kwargs):
kwargs.setdefault('cmap', 'RdYlGn_r')
axes = qfactor(data, season, limit=limit, **kwargs)
axes.set_title(f"{season} first lap time / best first lap time")
return axes
axes = firstlaps(starts, season)
savepng(axes, 'away_we_go')
# -
| books/plot.heat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# things we need for NLP
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
from flask import Flask, jsonify, request
from flask_cors import CORS, cross_origin
import pickle
import pandas as pd
import numpy as np
import tensorflow as tf
# -
data = pickle.load( open( "katana-assistant-data.pkl", "rb" ) )
words = data['words']
classes = data['classes']
# +
def clean_up_sentence(sentence):
# tokenize the pattern
sentence_words = nltk.word_tokenize(sentence)
# stem each word
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for each word in the bag that exists in the sentence
def bow(sentence, words, show_details=True):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
# -
p = bow("Load bood pessure for patient", words)
print (p)
print (classes)
# +
# Use pickle to load in the pre-trained model
global graph
graph = tf.compat.v1.get_default_graph()
with open(f'katana-assistant-model.pkl', 'rb') as f:
model = pickle.load(f)
# -
def classify_local(sentence):
ERROR_THRESHOLD = 0.25
# generate probabilities from the model
input_data = pd.DataFrame([bow(sentence, words)], dtype=float, index=['input'])
results = model.predict([input_data])[0]
# filter out predictions below a threshold, and provide intent index
results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append((classes[r[0]], str(r[1])))
# return tuple of intent and probability
return return_list
classify_local('Hello, good day!')
classify_local('How you can assist me?')
classify_local('Get me to adverse medicine form')
classify_local('Place to log blood pressure')
classify_local('Fetch blood result for patient')
classify_local('Blood pressure monitoring in hospital')
classify_local('Look for hospital to monitor blood pressure')
# +
app = Flask(__name__)
CORS(app)
@app.route("/katana-ml/api/v1.0/assistant", methods=['POST'])
def classify():
ERROR_THRESHOLD = 0.25
sentence = request.json['sentence']
# generate probabilities from the model
input_data = pd.DataFrame([bow(sentence, words)], dtype=float, index=['input'])
results = model.predict([input_data])[0]
# filter out predictions below a threshold
results = [[i,r] for i,r in enumerate(results) if r>ERROR_THRESHOLD]
# sort by strength of probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
# return tuple of intent and probability
response = jsonify(return_list)
return response
# running REST interface, port=5000 for direct test, port=5001 for deployment from PM2
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', port=5001)
| mlmodels/katana-assistant-endpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/BrunaKuntz/Python-Curso-em-Video/blob/main/Mundo03/Desafio098.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="xolHFjDi_Hj4"
#
# # **Desafio 098**
# **Python 3 - 3º Mundo**
#
# Descrição: Faça um programa que tenha uma função chamada contador(), que receba três parâmetros: início, fim e passo. Seu programa tem que realizar três contagens através da função criada:
#
# a) de 1 até 10, de 1 em 1
# b) de 10 até 0, de 2 em 2
# c) uma contagem personalizada
#
# Link: https://www.youtube.com/watch?v=DCBlt_z2UOE
# + id="8Lylf4Su-6aB"
from time import sleep
def contador(inicio, fim, passo):
if passo < 0:
passo = -(passo)
elif passo == 0:
passo += 1
print('=-' * 20)
print(f'Contagem de {inicio} até {fim} de {passo} em {passo}')
if inicio > fim:
fim -= 1
if passo > 0:
passo = -(passo)
elif inicio < fim:
fim += 1
for i in range(inicio, fim, passo):
print(f'{i} ', end='')
sleep(0.3)
print('FIM!')
# programa principal
contador(1, 10, 1)
contador(10, 0, -2)
print('=-'*20)
print('Agora é a sua vez de personalizar a contagem!')
inicio = int(input('Início: '))
fim = int(input('Fim: '))
passo = int(input('Passo: '))
contador(inicio, fim, passo)
| Mundo03/Desafio098.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (sunpy-release)
# language: ''
# name: sunpy-release
# ---
# In this exercise we are going to show a few basic operations with SunPy Map, and SunPy's VSO client.
import astropy.units as u
import sunpy.map
from sunpy.net import vso
# %matplotlib inline
# ## Downloading Data with VSO
vc = vso.VSOClient()
aia = vso.attrs.Instrument('AIA') & vso.attrs.Wave(17.1*u.nm, 17.1*u.nm)
hmi = vso.attrs.Instrument('HMI') & vso.attrs.Physobs('LOS_magnetic_field') & vso.attrs.Provider('JSOC')
res = vc.query(vso.attrs.Time('2011-06-07', '2011-06-08'), vso.attrs.Sample(25*u.hour), aia | hmi)
res
files = vc.get(res).wait()
files
# ## Making Maps
aia, hmi = sunpy.map.Map(files)
aia.data
aia.meta
aia.instrument, hmi.instrument
aia.coordinate_system, hmi.coordinate_system
# ## Plotting Maps
aia.peek(draw_grid=True)
hmi.peek(vmin=-1500, vmax=1500)
hmi2 = hmi.rotate(order=3)
hmi2.peek()
# ## Submaps
aia_sub = aia.submap([-1200, -650]*u.arcsec, [50, 600]*u.arcsec)
aia_sub.peek()
aia_sub = aia.submap([1000, 3000]*u.pixel, [1500, 3500]*u.pixel)
aia_sub.peek()
# ## More Advanced Plotting with WCSAxes
import matplotlib.pyplot as plt
from sunpy.visualization.wcsaxes_compat import wcsaxes_heliographic_overlay
# +
ax = plt.subplot(projection=aia_sub)
im = aia_sub.plot()
overlay = ax.get_coords_overlay('heliographic_stonyhurst')
lon, lat = overlay
lon.coord_wrap = 180.
lon.set_axislabel('Solar Longitude [deg]')
lat.set_axislabel('Solar Latitude [deg]')
lon.set_ticks_position('tr')
lat.set_ticks_position('tr')
overlay.grid(lw=2, alpha=1, color='red')
ax.set_title(aia_sub.name, y=1.10)
plt.colorbar(pad=0.14)
| 2016_SPD_Boulder/SunPy/Downloading and Plotting Data - Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.3
# language: julia
# name: julia-1.5
# ---
# $\newcommand{\calf}{{\cal F}}
# \newcommand{\dnu}{d \nu}
# \newcommand{\mf}{{\bf F}}
# \newcommand{\vu}{{\bf u}}
# \newcommand{\ve}{{\bf e}}
# \newcommand{\mg}{{\bf G}}
# \newcommand{\ml}{{\bf L}}
# \newcommand{\mg}{{\bf G}}
# \newcommand{\mi}{{\bf I}}
# \newcommand{\diag}{\mbox{diag}}
# \newcommand{\begeq}{{\begin{equation}}}
# \newcommand{\endeq}{{\end{equation}}}
# $
include("mprnote.jl")
# # Newton's Method in Multiple Precision: <NAME>
#
# This notebook documents the results in
# <NAME>, ***Newton's Method in Mixed Precision***, 2020.<cite data-cite="ctk:sirev20"><a href="newtonmp.html#ctk:sirev20">(Kel20)</cite><br>
#
# As an example we will solve the Chandrasekhar H-equation <cite data-cite="chand"><a href="newtonmp.html#chand">(Cha60)</cite>. This equation, which we describe in detail in the Example section, has a fast $O(N \log(N))$ function evaluation, a Jacobian evaluation that is $O(N^2)$ work analytically and $O(N^2 \log(N))$ with a finite difference. This means that most of the work, if you do things right, is in the LU factorization of the Jacobian.
#
# The difference between double, single, and half precision will be clear in the results from the examples. This notebook has no half-precision computations. Julia does half-precsion in software and that is very slow.
#
# ## Contents
# - [Example: The Chandrasekhar H-Equation](#The-Chandrasekhar-H-Equation): Integral equations example
#
# - [Setting up the notebook](#Setting-up): Install the application.
#
# - [First run of the solver](#Running-the-solver): First solver teset
#
# - [How to use the solver](#NSOL): Using nsol.jl.
#
# - [The results in the paper](#The-results-in-the-paper): Running the codes that generated the results
# ## The Chandrasekhar H-Equation
#
#
#
# The example is the mid-point rule discretization of the Chandrasekhar H-equation <cite data-cite="chand"><a href="newtonmp.html#chand">(Cha60)</cite>.
#
#
#
# \begin{equation}
# {\calf}(H)(\mu) = H(\mu) -
# \left(
# 1 - \frac{c}{2} \int_0^1 \frac{\mu H(\mu)}{\mu + \nu} \dnu
# \right)^{-1} = 0.
# \end{equation}
#
#
# The nonlinear operator $\calf$ is defined on $C[0,1]$, the space of
# continuous functions on $[0,1]$.
#
# The equation has a well-understood dependence on the parameter $c$
# <cite data-cite="twm68"><a href="newtonmp.html#twm68">(Mul68)</cite>,
# <cite data-cite="ctk:n1"><a href="newtonmp.html#ctk:n1">(DK80)</cite>.
# The equation has unique solutions at $c=0$
# and $c=1$ and two solutions for $0 < c < 1$. There is a simple fold
# singularity
# <cite data-cite="herb"><a href="newtonmp.html#herb">(Kel87)</cite>
# at $c=1$. Only one
# <cite data-cite="chand"><a href="newtonmp.html#chand">(Cha60)</cite>,
# <cite data-cite="busb"><a href="newtonmp.html#busb">(Bus60)</cite>
# of the two solutions for $0 < c < 1$ is of physical interest
# and that is the one easiest to find numerically. One must do
# a continuation computation to find the other one.
#
# The structure of the singularity is preserved if one discretizes
# the integral with any rule that integrates constants exactly. For
# the purposes of this paper the composite midpoint rule will suffice.
# The $N$-point composite midpoint rule is
# \begin{equation}
# \int_0^1 f(\nu) \dnu \approx \frac{1}{N} \sum_{j=1}^N f(\nu_j)
# \end{equation}
# where $\nu_j = (j - 1/2)/N$ for $1 \le j \le N$. This rule is
# second-order accurate for sufficiently smooth functions $f$. The
# solution of the integral equation is, however, not smooth enough. $H'(\mu)$
# has a logarithmic singularity at $\mu=0$.
#
# The discrete problem is
#
# \begin{equation}
# \mf(\vu)_i \equiv
# u_i - \left(
# 1 - \frac{c}{2N} \sum_{j=1}^N \frac{u_j \mu_i}{\mu_j + \mu_i}
# \right)^{-1}
# =0.
# \end{equation}
#
# One can simplify the approximate integral operator
# and expose some useful structure. Since
#
#
# \begin{equation}
# \frac{c}{2N} \sum_{j=1}^N \frac{u_j \mu_i}{\mu_j + \mu_i}
# = \frac{c (i - 1/2) }{2N} \sum_{j=1}^N \frac{u_j}{i+j -1}.
# \end{equation}
#
#
# hence the approximate integral operator is
# the product of a diagonal matrix and a Hankel matrix and
# one can use an FFT to evaluate that operator with $O(N \log(N))$
# work
# <cite data-cite="golub"><a href="newtonmp.html#golub">(GV96)</cite>.
#
# We can express the approximation of the integral operator in matrix
# form
# \begin{equation}
# (\ml \vu)_i = \frac{c (i - 1/2) }{2N} \sum_{j=1}^N \frac{u_j}{i+j -1}
# \end{equation}
# and compute the Jacobian analytically as
# \begin{equation}
# \mf'(\vu) = \mi - \diag(\mg(\vu))^2 \ml
# \end{equation}
# where
# \begin{equation}
# \mg(\vu)_i = \left(
# 1 - \frac{c}{2N} \sum_{j=1}^N \frac{u_j \mu_i}{\mu_j + \mu_i}
# \right)^{-1}.
# \end{equation}
# Hence the data for the Jacobian is already available after
# one computes $\mf(\vu) = \vu - \mg(\vu)$ and the Jacobian can
# be computed with $O(N^2)$ work.
# We do that in this example and therefore the only $O(N^3)$
# part of the solve is the matrix factorization.
#
# One could also approximate the Jacobian with forward differences.
# In this case one approximates the $j$th column $\mf'(\vu)_j$
# of the Jacobian with
# \begin{equation}
# \frac{\mf(\vu + h {\tilde \ve}_j) - \mf(\vu)}{h}
# \end{equation}
# where ${\tilde \ve}_j$ is a unit vector in the $j$th coordinate
# direction and $h$ is a suitable difference increment. If one computes
# $\mf$ in double precision with unit roundoff $u_d$, then
# $h =O(\| \vu \| \sqrt{u_d})$ is a reasonable choice
# <cite data-cite="ctk:roots"><a href="newtonmp.html#ctk:roots">(Kel95)</cite>.
# Then
# the error in the Jacobian is $O(\sqrt{u_d}) = O(u_s)$ where $u_s$ is
# unit roundoff in single precision. The cost of a finite difference Jacobian
# in this example is $O(N^2 \log(N))$ work.
#
# The analysis in <cite data-cite="ctk:sirev20"><a href="newtonmp.html#ctk:sirev20">(Kel20)</cite> suggests that there
# is no significant difference in the nonlinear iteration
# from either the choice of analytic or finite difference Jacobians
# or the choice of single or double precision for the linear solver. This notebook has the data used in that paper
# to support that assertion. You will be able to duplicate the results and play with the codes.
#
# Half precision is another story and we have those codes for you, too.
#
#
# ## Setting up
#
#
# You need to install these packages with __Pkg__. I assume you know how to do that.
#
# - SIAMFANLEquations
# - PyPlot
# - LinearAlgebra
# - Printf
# - IJulia (You must have done this already or you would not be looking at this notebook.)
#
# To render the LaTeX you'll need to run the first markdown cell in the notebook. That sets up the commands you need to render the LaTeX correctly.
#
# The directory is a Julia project. So all you should need to do to get going is to run the first code cell in this notebook. That cell has one line
#
# ```
# include("mprnote.jl")
# ```
#
# Then you can do a simple solve and test that you did it right by typing
# ```Julia
# hout=heqtest()
# ```
# which I will do in the next code cell. Now ...
#
# **Make absolutely sure that you are in the MPResults directory**. Then ...
#
# ## Running the solver
# The codes solve the H-equation and plot/tabulate the results in various ways. __heqtest__ prints on column of the tables in Chandrasekhar's book. It calls __nsold.jl__ . I've shown you the output from the solver but that is not important for now. You get the details on the solver in the next section.
heqtest()
# heqtest.jl, calls the solver and harvests some iteration statistics. The two columns of numbers are the reults from <cite data-cite="chand"><a href="newtonmp.html#chand">(Cha60)</cite> (page 125). The iteration statistics are from nsold.jl, the solver.
# ## NSOL
# The solver is ```nsol``` from my pacakge [SIAMFANLEquations.jl](#https://github.com/ctkelley/SIAMFANLEquations.jl)
# <cite data-cite="ctk:fajulia"><a href="siamfa.html#ctk:fajulia">(Kel20d)</cite>. That pacackage and an
# [IJulia Notebook](#https://github.com/ctkelley/NotebookSIAMFANL)
# <cite data-cite="ctk:notebooknl"><a href="siamfa.html#ctk:notebooknl">(Kel20b)</cite>
# support my upcoming book __Solving Nonlinear Equations with Iterative Methods:
# Solvers and Examples in Julia__
# <cite data-cite="ctk:siamfanl"><a href="siamfa.html#ctk:siamfanl">(Kel20c)</cite>
# The solver and the H-equation example are both from that package. When you run the first code cell you are set up.
#
#
# ## Using nsol.jl
# At the level of this notebook, it's pretty simple. Remember that Julia hates to allocate memory. So your function and Jacobian evaluation routines should expect the calling function to **preallocate** the storage for both the function and Jacobian. Your functions will then use __.=__ to put the function and Jacobian where they are supposed to be.
#
# It's worthwhile to look at the help screen.
?nsol
# ## How nsol.jl controls the precision of the Jacobian
# You can control the precision of the Jacobian by simply allocating FPS in your favorite precision. So if I have a problem with N=256 unknowns I will declare FP as zeros(N,1) and may declare FPS as zeros(N,N) or Float32.(zeros(N,N)).
#
# Note the __.__ between Float32 and the paren. This, as is standard Julia practice, applies the conversion to Float32 to everyelement in the array. If you forget the __.__ Julia will complain.
# # The results in the paper
#
#
# The paper has plots for double, single, and half precsion computations for c=.5, .99, and 1.0. The half precision results take a very long time to get. On my computer (2019 iMac; 8 cores, 64GB of memory) the half precision compute time was over two weeks. Kids, don't try this at home.
#
# The data for the paper are in the cleverly named directory __Data_From_Paper__
#
# __cd to the directory MPResults__ and __from that directory__ run
#
# ```Julia
# data_harvest()
# ```
#
# at the julia prompt you will generate all the tables and plots in the paper.
#
# If you have the time and patience you can also generate the data with __data_populate.jl__. This creates binary files with the iteration histories and you can see for yourself how long it takes. You can reduce the number of grid levels and __turn half precision off__. I will turn half precision off in the example in this notebook. That means that the code will run in a reasonable amount of time instead of the __two weeks__ it needs for the half precision results.
#
# ```data_populate(c; half=false,level=p)``` does double and single preicsion solves for $1024 \times 2^k$ point grids for k=0, ... p-1 . Set ```half=true``` to make the computations take much longer.
#
# Here is a simple example of using data_populate and the plotter code plot_nsold.jl. I'm only using the 1024, 2048 and 4096 point grids. The plot in the paper uses more levels. This is part of Figure 1 in the paper.
#
# Look at the source to __data_populate.jl__ and __PlotData.jl__ and you'll see how I did this. These codes only mange files, plots, and tables. There is nothing really exciting here. You don't need to know much Julia to understand this, but you do need to know something and I can't help with that.
#
# To begin with, I will create a directory called Data_Test to put all this stuff. I will cd to that directory.
cd(MPRdir)
Home4mp="Data_Test"
try (mkdir(Home4mp))
catch
end
cd(Home4mp)
pwd()
# Now I'll run __data_populate__ to create a subdirectory with the data. cd to that directory and run __PlotData__. That's how I created the plots in the paper with all values of *c* and all the problem sizes. The half precision computation took two weeks.
#
# __PlotData(.5, level=3)__ makes the four-plot with only double and single (no half).
#
# Even with this small problem, __data_populate__ takes a while. Be patient. Once it's done the plots will appear
# pretty rapidly.
using PyPlot
data_populate(.5; level=3)
PlotData(.5; level=3);
# Finally, I will duplicate the tables and plots in the paper with the precomputed data in the Complete_Data directory. I'll cd to that directory and make the plots with __data_harvest.jl__.
#
cd(MPRdir)
cd("Complete_Data")
data_harvest()
# The three LaTeX tables are pretty vivid demonstrations that using a half-precision Jacobian is a poor idea. The columns are ratios of successive residual norms. Those ratios are supposed to go to zero if the convergence is q-superlinear. In the half precision case, it is not. You can also see that from the plots at the bottom. I make the tables with three calls to __MakeTable.jl__ which is contained in the file __TableData.jl__.
MakeTable(.5)
MakeTable(.99)
MakeTable(1.0)
# Finally, I will cd to the directory that contains the notebook.
cd(MPRdir)
| MPResults.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to define a simulation to call FEMM
#
# This tutorial shows the different steps to **compute magnetic flux and electromagnetic torque** with pyleecan **automated coupling with FEMM**. FEMM must be installed for this tutorial. This tutorial was tested with the release [21Apr2019 of FEMM](http://www.femm.info/wiki/Download). Please note that the coupling with FEMM is only available on Windows.
#
# The notebook related to this tutorial is available on [GitHub](https://github.com/Eomys/pyleecan/tree/master/Tutorials/tuto_Simulation_FEMM.ipynb).
#
# Every electrical machine defined in Pyleecan can be automatically drawn in [FEMM](http://www.femm.info/wiki/HomePage) to compute torque, airgap flux and electromotive force. To do so, the tutorial is divided into four parts:
# - defining or loading the machine
# - defining the simulation inputs
# - setting up and running of the magnetic solver
# - plotting of the magnetic flux for the first time step
#
# ## Defining or loading the machine
#
# The first step is to define the machine to simulate. For this tutorial we use the Toyota Prius 2004 machine defined in [this tutorial](https://www.pyleecan.org/tuto_Machine.html).
# +
# %matplotlib notebook
# Load the machine
from os.path import join
from pyleecan.Functions.load import load
from pyleecan.definitions import DATA_DIR
IPMSM_A = load(join(DATA_DIR, "Machine", "IPMSM_A.json"))
IPMSM_A.plot()
# -
# ## Simulation definition
# ### Inputs
#
# The simulation is defined with a [**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object. This object correspond to a simulation with 5 sequential physics (or modules):
# - electrical
# - magnetic
# - force
# - structural
# - acoustic
#
# [**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object enforce a weak coupling between each physics: the input of each physic is the output of the previous one.
#
# In this tutorial we will focus only on the magnetic module. The Magnetic physic is defined with the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) and the other physics are desactivated (set to None).
#
# We define the starting point of the simulation with an [**InputCurrent**](http://www.pyleecan.org/pyleecan.Classes.InputCurrent.html) object to enforce the electrical module output with:
# - angular and the time discretization
# - rotor speed
# - stator currents
# +
from numpy import ones, pi, array, linspace
from pyleecan.Classes.Simu1 import Simu1
from pyleecan.Classes.InputCurrent import InputCurrent
from pyleecan.Classes.MagFEMM import MagFEMM
# Create the Simulation
mySimu = Simu1(name="EM_SIPMSM_AL_001", machine=IPMSM_A)
# Defining Simulation Input
mySimu.input = InputCurrent()
# Rotor speed [rpm]
mySimu.input.N0 = 2000
# time discretization [s]
mySimu.input.time = linspace(start=0, stop=60/mySimu.input.N0, num=16, endpoint=False) # 16 timesteps
# Angular discretization along the airgap circonference for flux density calculation
mySimu.input.angle = linspace(start = 0, stop = 2*pi, num=2048, endpoint=False) # 2048 steps
# Stator currents as a function of time, each column correspond to one phase [A]
mySimu.input.Is = array(
[
[ 1.77000000e+02, -8.85000000e+01, -8.85000000e+01],
[ 5.01400192e-14, -1.53286496e+02, 1.53286496e+02],
[-1.77000000e+02, 8.85000000e+01, 8.85000000e+01],
[-3.25143725e-14, 1.53286496e+02, -1.53286496e+02],
[ 1.77000000e+02, -8.85000000e+01, -8.85000000e+01],
[ 2.11398201e-13, -1.53286496e+02, 1.53286496e+02],
[-1.77000000e+02, 8.85000000e+01, 8.85000000e+01],
[-3.90282030e-13, 1.53286496e+02, -1.53286496e+02],
[ 1.77000000e+02, -8.85000000e+01, -8.85000000e+01],
[ 9.75431176e-14, -1.53286496e+02, 1.53286496e+02],
[-1.77000000e+02, 8.85000000e+01, 8.85000000e+01],
[-4.33634526e-13, 1.53286496e+02, -1.53286496e+02],
[ 1.77000000e+02, -8.85000000e+01, -8.85000000e+01],
[ 4.55310775e-13, -1.53286496e+02, 1.53286496e+02],
[-1.77000000e+02, 8.85000000e+01, 8.85000000e+01],
[-4.76987023e-13, 1.53286496e+02, -1.53286496e+02]
]
)
# -
# The stator currents are enforced as a function of time for each phase. The current can also be enforced sinusoïdal by using Id_ref/Iq_ref as explained in the [How to set the Operating Point tutorial](https://www.pyleecan.org/tuto_Operating_point.html).
#
# ### MagFEMM configuration
# For the configuration of the Magnetic module, we use the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) that compute the airgap flux density by calling FEMM. The model parameters are set though the properties of the [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) object. In this tutorial we will present the main ones, the complete list is available by looking at [**Magnetics**](http://www.pyleecan.org/pyleecan.Classes.Magnetics.html) and [**MagFEMM**](http://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) classes documentation.
#
# *type_BH_stator* and *type_BH_rotor* enable to select how to model the B(H) curve of the laminations in FEMM. The material parameter and in particular the B(H) curve are setup directly [in the machine](https://www.pyleecan.org/tuto_Machine.html).
# +
from pyleecan.Classes.MagFEMM import MagFEMM
mySimu.mag = MagFEMM(
type_BH_stator=0, # 0 to use the material B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
type_BH_rotor=0, # 0 to use the material B(H) curve,
# 1 to use linear B(H) curve according to mur_lin,
# 2 to enforce infinite permeability (mur_lin =100000)
file_name = "", # Name of the file to save the FEMM model
)
# We only use the magnetic part
mySimu.force = None
mySimu.struct = None
# -
# Pyleecan coupling with FEMM enables to define the machine with symmetry and with sliding band to optimize the computation time. The angular periodicity of the machine will be computed and (in the particular case) only 1/8 of the machine (4 symmetry + antiperiodicity):
mySimu.mag.is_periodicity_a=True
# At the end of the simulation, the mesh and the solution can be saved in the **Output** object with:
mySimu.mag.is_get_mesh = True # To get FEA mesh for latter post-procesing
mySimu.mag.is_save_FEA = False # To save FEA results in a dat file
# ## Run simulation
myResults = mySimu.run()
# When running the simulation, a FEMM window should open so you can see pyleecan drawing the machine and defining the surfaces.
# 
# The simulation will compute 16 different timesteps by updating the current and the sliding band boundary condition.
#
# Once the simulation is finished, an Output object is return. The results are stored in the magnetic part of the output (i.e. _myResults.mag_ ) and different plots can be called. This _myResults.mag_ contains:
# - *time*: magnetic time vector without symmetry
# - *angle*: magnetic position vector without symmetry
# - *B*: airgap flux density (contains radial and tangential components)
# - *Tem*: electromagnetic torque
# - *Tem_av*: average electromagnetic torque
# - *Tem_rip_pp* : Peak to Peak Torque ripple
# - *Tem_rip_norm*: Peak to Peak Torque ripple normalized according to average torque
# - *Phi_wind_stator*: stator winding flux
# - *emf*: electromotive force
#
#
# ## Plot results
# **Output** object embbed different plot to visualize results easily. A dedicated tutorial is available [here](https://www.pyleecan.org/tuto_Plots.html).
#
# For instance, the radial and tangential magnetic flux in the airgap at a specific timestep can be plotted with:
# Radial magnetic flux
myResults.plot_2D_Data("mag.B","angle","time[1]",component_list=["radial"])
myResults.plot_2D_Data("mag.B","wavenumber=[0,76]","time[1]",component_list=["radial"])
# Tangential magnetic flux
myResults.plot_2D_Data("mag.B","angle","time[1]",component_list=["tangential"])
myResults.plot_2D_Data("mag.B","wavenumber=[0,76]","time[1]",component_list=["tangential"])
# If the mesh was saved in the output object (mySimu.mag.is_get_mesh = True), it can be plotted with:
# + tags=[]
myResults.mag.meshsolution.plot_contour(label="B", group_names="stator")
# -
# <div>
# <img src="https://www.pyleecan.org/_static/tuto_Simulation_FEMM_Bmesh.png" width="800"/>
# </div>
# Finally, it is possible to extend pyleecan by implementing new plot by using the results from output. For instance, the following plot requires plotly to display the radial flux density in the airgap over time and angle.
# +
# #%run -m pip install plotly # Uncomment this line to install plotly
import plotly.graph_objects as go
from plotly.offline import init_notebook_mode
init_notebook_mode()
result = myResults.mag.B.get_rad_along("angle{°}", "time")
x = result["angle"]
y = result["time"]
z = result["B_r"]
fig = go.Figure(data=[go.Surface(z=z, x=x, y=y)])
fig.update_layout( )
fig.update_layout(title='Radial flux density in the airgap over time and angle',
autosize=True,
scene = dict(
xaxis_title='Angle [°]',
yaxis_title='Time [s]',
zaxis_title='Flux [T]'
),
width=700,
margin=dict(r=20, b=100, l=10, t=100),
)
fig.show(config = {"displaylogo":False})
| Tutorials/tuto_Simulation_FEMM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use(["seaborn", "thesis"])
# +
from pyscf.gto import Mole
mol1 = Mole()
mol1.basis = "sto-3g"
mol1.atom = """ C -1.50476 1.90801 0.00093
C -0.22023 2.71866 -0.06451
C 1.01068 1.83222 0.11410
C 2.29631 2.63776 0.01549
H -1.59762 1.39598 0.96402
H -1.53361 1.15500 -0.79310
H -2.37303 2.56312 -0.12095
H -0.24013 3.48852 0.71563
H -0.17089 3.23636 -1.02961
H 1.01956 1.04649 -0.65016
H 0.97148 1.33457 1.09022
H 2.38830 3.10916 -0.96831
H 3.16377 1.98667 0.16210
H 2.32777 3.42305 0.77738
"""
mol1.build()
# +
mol2 = Mole()
mol2.basis = "sto-3g"
mol2.atom = """ C -1.50476 1.90801 0.00093
C 0.13885 3.31669 -0.36015
C 2.61235 2.23226 0.94149
H -1.59762 1.39598 0.96402
C -0.46330 5.12481 0.01549
H -2.00496 0.79657 -0.80877
H -2.37303 2.56312 -0.12095
H 0.44073 3.41057 -1.38238
H 2.43316 2.09249 1.98707
H 3.41832 2.92307 0.80696
H 2.86836 1.29419 0.49497
H -0.80308 5.18438 1.02836
H -1.26579 5.37802 -0.64540
H 0.34729 5.80805 -0.12955"""
mol2.build()
# +
from pyscf.scf.hf import init_guess_by_atom
sns.heatmap(
init_guess_by_atom(mol1) - init_guess_by_atom(mol2)
)
# -
| notebooks/SADIsomers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.4.0-dev
# language: julia
# name: julia-0.4
# ---
# # quant-econ Solutions: The Kalman Filter
#
# Solutions for http://quant-econ.net/jl/kalman.html
using QuantEcon
using PyPlot
# ## Exercise 1
# +
import Distributions: Normal, pdf
# == Parameters == #
theta = 10
A, G, Q, R = 1.0, 1.0, 0.0, 1.0
x_hat_0, Sigma_0 = 8.0, 1.0
# == Initialize Kalman filter == #
kalman = Kalman(A, G, Q, R)
set_state!(kalman, x_hat_0, Sigma_0)
# == Run == #
N = 5
fig, ax = subplots(figsize=(10,8))
xgrid = linspace(theta - 5, theta + 2, 200)
for i=1:N
# Record the current predicted mean and variance, and plot their densities
m, v = kalman.cur_x_hat, kalman.cur_sigma
ax[:plot](xgrid, pdf(Normal(m, sqrt(v)), xgrid),
label=LaTeXString("\$t=$i\$"))
# Generate the noisy signal
y = theta + randn()
# Update the Kalman filter
update!(kalman, y)
end
ax[:set_title](LaTeXString("First $N densities when \$\\theta = $theta\$"))
ax[:legend](loc="upper left");
plt.show()
# -
# ## Exercise 2
# +
srand(42) # reproducible results
epsilon = 0.1
kalman = Kalman(A, G, Q, R)
set_state!(kalman, x_hat_0, Sigma_0)
nodes, weights = qnwlege(21, theta-epsilon, theta+epsilon)
T = 600
z = Array(Float64, T)
for t=1:T
# Record the current predicted mean and variance, and plot their densities
m, v = kalman.cur_x_hat, kalman.cur_sigma
dist = Normal(m, sqrt(v))
integral = do_quad((x)->pdf(dist, x), nodes, weights)
z[t] = 1. - integral
# Generate the noisy signal and update the Kalman filter
update!(kalman, theta + randn())
end
fig, ax = subplots(figsize=(9, 7))
ax[:set_ylim](0, 1)
ax[:set_xlim](0, T)
ax[:plot](1:T, z)
ax[:fill_between](1:T, zeros(T), z, color="blue", alpha=0.2);
# -
# ## Exercise 3
# +
import Distributions: MultivariateNormal, rand
srand(41) # reproducible results
# === Define A, Q, G, R === #
G = eye(2)
R = 0.5 .* G
A = [0.5 0.4
0.6 0.3]
Q = 0.3 .* G
# === Define the prior density === #
Sigma = [0.9 0.3
0.3 0.9]
x_hat = [8, 8]''
# === Initialize the Kalman filter === #
kn = Kalman(A, G, Q, R)
set_state!(kn, x_hat, Sigma)
# === Set the true initial value of the state === #
x = zeros(2)
# == Print eigenvalues of A == #
println("Eigenvalues of A:\n$(eigvals(A))")
# == Print stationary Sigma == #
S, K = stationary_values(kn)
println("Stationary prediction error variance:\n$S")
# === Generate the plot === #
T = 50
e1 = Array(Float64, T)
e2 = Array(Float64, T)
for t=1:T
# == Generate signal and update prediction == #
dist = MultivariateNormal(G*x, R)
y = rand(dist)
update!(kn, y)
# == Update state and record error == #
Ax = A * x
x = rand(MultivariateNormal(Ax, Q))
e1[t] = sum((x - kn.cur_x_hat).^2)
e2[t] = sum((x - Ax).^2)
end
fig, ax = subplots(figsize=(9,6))
ax[:plot](1:T, e1, "k-", lw=2, alpha=0.6, label="Kalman filter error")
ax[:plot](1:T, e2, "g-", lw=2, alpha=0.6, label="conditional expectation error")
ax[:legend]();
# -
| solutions/kalman_solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualization with Matplotlib
# **Learning Objectives:** Learn how to make basic plots using Matplotlib's *pylab* API and how to use the Matplotlib documentation.
#
# This notebook focuses only on the Matplotlib API, rather that the broader question of how you can use this API to make effective and beautiful visualizations.
# ## Imports
# The following imports should be used in all of your notebooks where Matplotlib in used:
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# ## Overview
# The following conceptual organization is simplified and adapted from <NAME>'s [AnatomyOfMatplotlib](https://github.com/WeatherGod/AnatomyOfMatplotlib) tutorial.
# ### Figures and Axes
#
# * In Matplotlib a single visualization is a `Figure`.
# * A `Figure` can have multiple areas, called *subplots*. Each *subplot* is an `Axes`.
# * If you don't create a `Figure` and `Axes` yourself, Matplotlib will automatically create one for you.
# * All plotting commands apply to the current `Figure` and `Axes`.
#
# The following functions can be used to create and manage `Figure` and `Axes` objects.
#
# Function | Description
# :-----------------|:----------------------------------------------------------
# `figure` | Creates a new Figure
# `gca` | Get the current Axes instance
# `savefig` | Save the current Figure to a file
# `sca` | Set the current Axes instance
# `subplot` | Create a new subplot Axes for the current Figure
# `subplots` | Create a new Figure and a grid of subplots Axes
# ### Plotting Functions
#
# Once you have created a `Figure` and one or more `Axes` objects, you can use the following function to put data onto that `Axes`.
#
# Function | Description
# :-----------------|:--------------------------------------------
# `bar` | Make a bar plot
# `barh` | Make a horizontal bar plot
# `boxplot` | Make a box and whisker plot
# `contour` | Plot contours
# `contourf` | Plot filled contours
# `hist` | Plot a histogram
# `hist2d` | Make a 2D histogram plot
# `imshow` | Display an image on the axes
# `matshow` | Display an array as a matrix
# `pcolor` | Create a pseudocolor plot of a 2-D array
# `pcolormesh` | Plot a quadrilateral mesh
# `plot` | Plot lines and/or markers
# `plot_date` | Plot with data with dates
# `polar` | Make a polar plot
# `scatter` | Make a scatter plot of x vs y
# ### Plot modifiers
#
# You can then use the following functions to modify your visualization.
#
# Function | Description
# :-----------------|:---------------------------------------------------------------------
# `annotate` | Create an annotation: a piece of text referring to a data point
# `box` | Turn the Axes box on or off
# `clabel` | Label a contour plot
# `colorbar` | Add a colorbar to a plot
# `grid` | Turn the Axes grids on or off
# `legend` | Place a legend on the current Axes
# `loglog` | Make a plot with log scaling on both the *x* and *y* axis
# `semilogx` | Make a plot with log scaling on the *x* axis
# `semilogy` | Make a plot with log scaling on the *y* axis
# `subplots_adjust` | Tune the subplot layout
# `tick_params` | Change the appearance of ticks and tick labels
# `ticklabel_format`| Change the ScalarFormatter used by default for linear axes
# `tight_layout` | Automatically adjust subplot parameters to give specified padding
# `text` | Add text to the axes
# `title` | Set a title of the current axes
# `xkcd` | Turns on [XKCD](http://xkcd.com/) sketch-style drawing mode
# `xlabel` | Set the *x* axis label of the current axis
# `xlim` | Get or set the *x* limits of the current axes
# `xticks` | Get or set the *x*-limits of the current tick locations and labels
# `ylabel` | Set the *y* axis label of the current axis
# `ylim` | Get or set the *y*-limits of the current axes
# `yticks` | Get or set the *y*-limits of the current tick locations and labels
# ## Basic plotting
# For now, we will work with basic line plots (`plt.plot`) to show how the Matplotlib pylab plotting API works. In this case, we don't create a `Figure` so Matplotlib does that automatically.
t = np.linspace(0, 10.0, 100)
plt.plot(t, np.sin(t))
plt.xlabel('Time')
plt.ylabel('Signal')
plt.title('My Plot'); # supress text output
# ## Basic plot modification
# With a third argument you can provide the series color and line/marker style. Here we create a `Figure` object and modify its size.
# +
f = plt.figure(figsize=(9,6)) # 9" x 6", default is 8" x 5.5"
plt.plot(t, np.sin(t), 'r.');
plt.xlabel('x')
plt.ylabel('y')
# -
# Here is a list of the single character color strings:
#
# ```
# b: blue
# g: green
# r: red
# c: cyan
# m: magenta
# y: yellow
# k: black
# w: white
# ```
# The following will show all of the line and marker styles:
from matplotlib import lines
lines.lineStyles.keys()
from matplotlib import markers
markers.MarkerStyle.markers.keys()
# To change the plot's limits, use `xlim` and `ylim`:
plt.plot(t, np.sin(t)*np.exp(-0.1*t),'bo')
plt.xlim(-1.0, 11.0)
plt.ylim(-1.0, 1.0)
# You can change the ticks along a given axis by using `xticks`, `yticks` and `tick_params`:
plt.plot(t, np.sin(t)*np.exp(-0.1*t),'bo')
plt.xlim(0.0, 10.0)
plt.ylim(-1.0, 1.0)
plt.xticks([0,5,10], ['zero','five','10'])
plt.tick_params(axis='y', direction='inout', length=10)
# ## Box and grid
# You can enable a grid or disable the box. Notice that the ticks and tick labels remain.
plt.plot(np.random.rand(100), 'b-')
plt.grid(True)
plt.box(False)
# ## Multiple series
# Multiple calls to a plotting function will all target the current `Axes`:
plt.plot(t, np.sin(t), label='sin(t)')
plt.plot(t, np.cos(t), label='cos(t)')
plt.xlabel('t')
plt.ylabel('Signal(t)')
plt.ylim(-1.5, 1.5)
plt.xlim(right=12.0)
plt.legend()
# ## Subplots
# Subplots allow you to create a grid of plots in a single figure. There will be an `Axes` associated with each subplot and only one `Axes` can be active at a time.
#
#
# The first way you can create subplots is to use the `subplot` function, which creates and activates a new `Axes` for the active `Figure`:
# +
plt.subplot(2,1,1) # 2 rows x 1 col, plot 1
plt.plot(t, np.exp(0.1*t))
plt.ylabel('Exponential')
plt.subplot(2,1,2) # 2 rows x 1 col, plot 2
plt.plot(t, t**2)
plt.ylabel('Quadratic')f
plt.xlabel('x')
plt.tight_layout()
# -
# In many cases, it is easier to use the `subplots` function, which creates a new `Figure` along with an array of `Axes` objects that can be indexed in a rational manner:
# +
f, ax = plt.subplots(2, 2)
for i in range(2):
for j in range(2):
plt.sca(ax[i,j])
plt.plot(np.random.rand(20))
plt.xlabel('x')
plt.ylabel('y')
plt.tight_layout()
# -
# The `subplots` function also makes it easy to pass arguments to `Figure` and to share axes:
# +
f, ax = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(6,6))
for i in range(2):
for j in range(2):
plt.sca(ax[i,j])
plt.plot(np.random.rand(20))
if i==1:
plt.xlabel('x')
if j==0:
plt.ylabel('y')
plt.tight_layout()
# -
# ## More marker and line styling
# All plot commands, including `plot`, accept keyword arguments that can be used to style the lines in more detail. Fro more information see:
#
# * [Controlling line properties](http://matplotlib.org/users/pyplot_tutorial.html#controlling-line-properties)
# * [Specifying colors](http://matplotlib.org/api/colors_api.html#module-matplotlib.colors)
plt.plot(t, np.sin(t), marker='o', color='darkblue',
linestyle='--', alpha=0.3, markersize=10)
# ## Resources
#
# * [Matplotlib Documentation](http://matplotlib.org/contents.html), Matplotlib developers.
# * [Matplotlib Gallery](http://matplotlib.org/gallery.html), Matplotlib developers.
# * [Matplotlib List of Plotting Commands](http://matplotlib.org/api/pyplot_summary.html), Matplotlib developers.
# * [AnatomyOfMatplotlib](https://github.com/WeatherGod/AnatomyOfMatplotlib), <NAME>.
# * [Matplotlib Tutorial](http://nbviewer.ipython.org/github/jrjohansson/scientific-python-lectures/blob/master/Lecture-4-Matplotlib.ipynb), <NAME>.
| days/day06/Matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Header starts here.
from sympy.physics.units import *
from sympy import *
# Rounding:
import decimal
from decimal import Decimal as DX
from copy import deepcopy
def iso_round(obj, pv, rounding=decimal.ROUND_HALF_EVEN):
import sympy
"""
Rounding acc. to DIN EN ISO 80000-1:2013-08
place value = Rundestellenwert
"""
assert pv in set([
# place value # round to:
1, # 1
0.1, # 1st digit after decimal
0.01, # 2nd
0.001, # 3rd
0.0001, # 4th
0.00001, # 5th
0.000001, # 6th
0.0000001, # 7th
0.00000001, # 8th
0.000000001, # 9th
0.0000000001, # 10th
])
objc = deepcopy(obj)
try:
tmp = DX(str(float(objc)))
objc = tmp.quantize(DX(str(pv)), rounding=rounding)
except:
for i in range(len(objc)):
tmp = DX(str(float(objc[i])))
objc[i] = tmp.quantize(DX(str(pv)), rounding=rounding)
return objc
# LateX:
kwargs = {}
kwargs["mat_str"] = "bmatrix"
kwargs["mat_delim"] = ""
# kwargs["symbol_names"] = {FB: "F^{\mathsf B}", }
# Units:
(k, M, G ) = ( 10**3, 10**6, 10**9 )
(mm, cm) = ( m/1000, m/100 )
Newton = kg*m/s**2
Pa = Newton/m**2
MPa = M*Pa
GPa = G*Pa
kN = k*Newton
deg = pi/180
half = S(1)/2
# Header ends here.
#
M,l,EI = var("M,l,EI")
sub_list=[
( M, 10 *Newton*m ),
( l, 1 *m ),
( EI, 200*GPa * 2*mm*6*mm**3/ 12 ),
]
l2 = l*l
l3 = l*l*l
K = EI/l3
K *= Matrix(
[
[ 4*l2 , -6*l , 2*l2 , 6*l ],
[ -6*l , 12 , -6*l , -12 ],
[ 2*l2 , -6*l , 4*l2 , 6*l ],
[ 6*l , -12 , 6*l , 12 ],
]
)
p2 = var("psi2")
M1,F1,F2 = var("M1,F1,F2")
u = Matrix([0,0,p2,0])
f = Matrix([M1,F1,M,F2])
unks = [p2,M1,F1,F2]
eq = Eq(K*u , f)
sol = solve(eq, unks)
p2 = sol[p2]
M1 = sol[M1]
F1 = sol[F1]
F2 = sol[F2]
pprint("\nM1 / Nm:")
tmp = M1
pprint(tmp)
tmp = tmp.subs(sub_list)
tmp /= Newton*m
tmp = iso_round(tmp,1)
pprint(tmp)
pprint("\nF1 / N:")
tmp = F1
tmp = tmp.subs(sub_list)
tmp /= Newton
tmp = iso_round(tmp,1)
pprint(tmp)
pprint("\nF2 / N:")
tmp = F2
tmp = tmp.subs(sub_list)
tmp /= Newton
tmp = iso_round(tmp,1)
pprint(tmp)
pprint("\nψ₂:")
tmp = p2
pprint(tmp)
pprint("\nψ₂ / rad:")
tmp = p2
tmp = tmp.subs(sub_list)
tmp = iso_round(tmp,1)
pprint(tmp)
# M1 / Nm:
# M
# ─
# 2
# 5
#
# F1 / N:
# -15
#
# F2 / N:
# 15
#
# ψ₂:
# M⋅l
# ────
# 4⋅EI
#
# ψ₂ / rad:
# 12
| ipynb/WB-Klein/5/5.7_cc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/aubricot/computer_vision_with_eol_images/blob/master/object_detection_for_image_cropping/archive/calculate_error_mAP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="k3JTznL1ocgj"
# # Calculate detection result error (mAP) from YOLO object detection results
# ---
# Last Updated 8 April 2020
# --*Update as of 1 June 2021--Darkflow builds are no longer being updated and only support Tensorflow 1.x builds. As a result, this notebook and others associated with darkflow in this repo are left in their state from 8 April 2020. Functions may become deprecated or lose functionality. For updated mAP and AR with Tensorflow models, refer to [Tensorflow notebooks](https://github.com/aubricot/computer_vision_with_eol_images/blob/master/object_detection_for_image_cropping/lepidoptera/lepidoptera_train_tf2_ssd_rcnn.ipynb). For object detection with YOLO v4 in it's native state and calculation of performance metrics, see [Object Detection for Image Tagging Notebooks](https://github.com/aubricot/computer_vision_with_eol_images/tree/master/object_detection_for_image_tagging)*--
# Use exported detection result coordinates from YOLO via darkflow to calculate detection error using mean Average Precision (mAP) and Intersection over Union (IoU).
#
# Code modified from the [mAP GitHub repo](https://github.com/Cartucho/mAP#create-the-ground-truth-files).
# + id="pjGPa9NbYD1_"
# Mount google drive to import/export files
from google.colab import drive
drive.mount('/content/drive', force_remount=True)
# + [markdown] id="_ym-DoQfrGpZ"
# ### Convert test image detection results (jsons) and annotation files (xmls) to text files
# ---
# + id="IMnDtYBkoQ-M"
# Install the mAP repository to calculate error from detection results
import os
# %cd drive/My Drive/fall19_smithsonian_informatics/train
if not os.path.exists("eval"):
# !mkdir eval
# %cd eval
# !git clone https://github.com/Cartucho/mAP
# %cd ../
# + id="4FuEdcxpiCT2"
# Move yolo detection results (jsons exported from training_yolo.ipynb) to detection-results/
# %cd drive/My Drive/fall19_smithsonian_informatics/train
# !mv test_images/out/* eval/mAP/input/detection-results/
# !rm -rf test_images/out
# + id="yBfdBGNKnd4l"
# Copy image annotations (xmls formatted with ground truth bounding boxes) to ground-truth/
# %cd drive/My Drive/fall19_smithsonian_informatics/train
# !cp test_ann/* eval/mAP/input/ground-truth/
# + id="7JuYLqfXpBsT"
# Convert jsons to format needed for mAP calc
# %cd eval/mAP/scripts/extra
# !python convert_dr_darkflow_json.py
# + id="-gM1RtGAm7Fg"
# Convert xmls to format needed for mAP calc
# %cd eval/mAP/scripts/extra
# !python convert_gt_xml.py
# + id="UKMap2ZRtC9L"
# Remove sample images in input/images-optional
# # cd to mAP
# %cd ../../
# !rm -rf input/images-optional/*
# + [markdown] id="qRcR2EbErUwO"
# ### Calculate mAP for test images
# ---
# + id="lg3xlEK2uXNq"
# Calculate mAP for detection results
# Output will be in mAP/results
# %cd eval/mAP
# !python main.py
| object_detection_for_image_cropping/archive/calculate_error_mAP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demonstrate Git Based ML Pipeline Automation
# --------------------------------------------------------------------
#
# Creating a local function, running predefined functions, creating and running a full ML pipeline with local and library functions.
#
# #### **notebook how-to's**
# * Create and test a simple function
# * Examine data using serverless (containarized) `describe` function
# * Create an automated ML pipeline from various library functions
# * Running and tracking the pipeline results and artifacts
# ## Create and Test a Local Ingestion/Data-prep Function (e.g. Iris Data Generator)
# Import nuclio SDK and magics, <b>do not remove the cell and comment !!!</b>
# nuclio: ignore
import nuclio
# <b>Specify function dependencies and configuration<b>
# %nuclio config spec.image = "mlrun/ml-models"
# #### Function code
# Generate the iris dataset and log the dataframe (as csv or parquet file)
# +
import os
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import accuracy_score
from mlrun.artifacts import TableArtifact, PlotArtifact
import pandas as pd
def iris_generator(context, format='csv'):
iris = load_iris()
iris_dataset = pd.DataFrame(data=iris.data, columns=iris.feature_names)
iris_labels = pd.DataFrame(data=iris.target, columns=['label'])
iris_dataset = pd.concat([iris_dataset, iris_labels], axis=1)
context.logger.info('saving iris dataframe to {}'.format(context.artifact_path))
context.log_dataset('iris_dataset', df=iris_dataset, format=format, index=False)
# -
# The following end-code annotation tells ```nuclio``` to stop parsing the notebook from this cell. _**Please do not remove this cell**_:
# +
# nuclio: end-code
# marks the end of a code section
# -
# ## Create a project to host our functions, jobs and artifacts
#
# Projects are used to package multiple functions, workflows, and artifacts. We usually store project code and definitions in a Git archive.
#
# The following code creates a new project in a local dir and initialize git tracking on that
# +
from os import path
from mlrun import run_local, NewTask, mlconf, import_function, mount_v3io
mlconf.dbpath = mlconf.dbpath or 'http://mlrun-api:8080'
# specify artifacts target location
artifact_path = mlconf.artifact_path or path.abspath('./')
project_name = 'gitops-project'
# -
from mlrun import new_project, code_to_function
project_dir = './'
skproj = new_project(project_name, project_dir)
# <a id='test-locally'></a>
# ### Run/test the data generator function locally
#
# The functions above can be tested locally. Parameters, inputs, and outputs can be specified in the API or the `Task` object.<br>
# when using `run_local()` the function inputs and outputs are automatically recorded by MLRun experiment and data tracking DB.
#
# In each run we can specify the function, inputs, parameters/hyper-parameters, etc... For more details, see the [mlrun_basics notebook](mlrun_basics.ipynb).
# run the function locally
gen = run_local(name='iris_gen', handler=iris_generator,
project=project_name, artifact_path=path.join(artifact_path, 'data'))
# #### Convert our local code to a distributed serverless function object
gen_func = code_to_function(name='gen_iris', kind='job')
skproj.set_function(gen_func)
# ## Create a Fully Automated ML Pipeline
#
# #### Add more functions to our project to be used in our pipeline (from the functions hub/marketplace)
#
# AutoML training (classifier), Model validation (test_classifier), Real-time model server, and Model REST API Tester
skproj.set_function('hub://sklearn_classifier', 'train')
skproj.set_function('hub://test_classifier', 'test')
skproj.set_function('hub://model_server', 'serving')
skproj.set_function('hub://model_server_tester', 'live_tester')
skproj.set_function('hub://github_utils:development', 'git_utils')
#print(skproj.to_yaml())
# #### Define and save a pipeline
#
# The following workflow definition will be written into a file, it describes a Kubeflow execution graph (DAG)<br>
# and how functions and data are connected to form an end to end pipeline.
#
# * Build the iris generator (ingest) function container
# * Ingest the iris data
# * Analyze the dataset (describe)
# * Train and test the model
# * Deploy the model as a real-time serverless function
# * Test the serverless function REST API with test dataset
#
# Check the code below to see how functions objects are initialized and used (by name) inside the workflow.<br>
# The `workflow.py` file has two parts, initialize the function objects and define pipeline dsl (connect the function inputs and outputs).
#
# > Note: the pipeline can include CI steps like building container images and deploying models as illustrated in the following example.
#
# +
# %%writefile ./workflow.py
from kfp import dsl
from mlrun import mount_v3io, NewTask
funcs = {}
this_project = None
DATASET = 'iris_dataset'
LABELS = "label"
# init functions is used to configure function resources and local settings
def init_functions(functions: dict, project=None, secrets=None):
for f in functions.values():
f.apply(mount_v3io())
# uncomment this line to collect the inference results into a stream
# and specify a path in V3IO (<datacontainer>/<subpath>)
#functions['serving'].set_env('INFERENCE_STREAM', 'users/admin/model_stream')
@dsl.pipeline(
name="Demo training pipeline",
description="Shows how to use mlrun."
)
def kfpipeline():
exit_task = NewTask(handler='run_summary_comment')
exit_task.with_params(workflow_id='{{workflow.uid}}',
repo=this_project.params.get('git_repo'),
issue=this_project.params.get('git_issue'))
exit_task.with_secrets('inline', {'GITHUB_TOKEN': this_project.get_secret('GITHUB_TOKEN')})
with dsl.ExitHandler(funcs['git_utils'].as_step(exit_task, name='exit-handler')):
# run the ingestion function with the new image and params
ingest = funcs['gen-iris'].as_step(
name="get-data",
handler='iris_generator',
params={'format': 'pq'},
outputs=[DATASET])
# train with hyper-paremeters
train = funcs["train"].as_step(
name="train",
params={"sample" : -1,
"label_column" : LABELS,
"test_size" : 0.10},
hyperparams={'model_pkg_class': ["sklearn.ensemble.RandomForestClassifier",
"sklearn.linear_model.LogisticRegression",
"sklearn.ensemble.AdaBoostClassifier"]},
selector='max.accuracy',
inputs={"dataset" : ingest.outputs[DATASET]},
labels={"commit": this_project.params.get('commit', '')},
outputs=['model', 'test_set'])
# test and visualize our model
test = funcs["test"].as_step(
name="test",
params={"label_column": LABELS},
inputs={"models_path" : train.outputs['model'],
"test_set" : train.outputs['test_set']})
# deploy our model as a serverless function
deploy = funcs["serving"].deploy_step(models={f"{DATASET}_v1": train.outputs['model']},
tag=this_project.params.get('commit', 'v1'))
# test out new model server (via REST API calls)
tester = funcs["live_tester"].as_step(name='model-tester',
params={'addr': deploy.outputs['endpoint'], 'model': f"{DATASET}_v1"},
inputs={'table': train.outputs['test_set']})
# -
# register the workflow file as "main", embed the workflow code into the project YAML
skproj.set_workflow('main', 'workflow.py')
# Save the project definitions to a file (project.yaml), it is recommended to commit all changes to a Git repo.
skproj.artifact_path = 'v3io:///users/admin/pipe/{{workflow.uid}}'
skproj.save()
# ### Set parameters for test
skproj.params['git_repo'] = 'yaronha/tstactions'
skproj.params['git_issue'] = 4
skproj.with_secrets('inline', {'GITHUB_TOKEN': '<your git token>'})
# <a id='run-pipeline'></a>
# ## Run a pipeline workflow
# use the `run` method to execute a workflow, you can provide alternative arguments and specify the default target for workflow artifacts.<br>
# The workflow ID is returned and can be used to track the progress or you can use the hyperlinks
#
# > Note: The same command can be issued through CLI commands:<br>
# `mlrun project my-proj/ -r main -p "v3io:///users/admin/mlrun/kfp/{{workflow.uid}}/"`
#
# The dirty flag allow us to run a project with uncommited changes (when the notebook is in the same git dir it will always be dirty)
run_id = skproj.run(
'main',
arguments={},
dirty=True)
# #### Track pipeline results
from mlrun import get_run_db
db = get_run_db().connect()
db.list_runs(project=skproj.name, labels=f'workflow={run_id}').show()
# **[back to top](#top)**
| gitops_project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + nbsphinx="hidden"
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from auxiliary import get_treatment_probability
from auxiliary import get_plot_probability
from auxiliary import plot_outcomes
# -
# # Regression discontinuity design
# This following material is mostly based on the following review:
#
# * <NAME>., and <NAME>. (2010). [Regression discontinuity designs in economics](https://www.aeaweb.org/articles?id=10.1257/jel.48.2.281). *Journal of Economic Literature, 48*(2), 281–355.
#
# The ides of the authors is to throughout contrast RDD to its alternatives. They initially just mention selected features throughout the introduction but then also devote a whole section to it. This clearly is a core strength of the article. I hope to maintain this focus in my lecture. Also, their main selling point for RDD as the close cousin to standard randomized controlled trial is that the behavioral assumption of imprecise control about the assignment variable translates
# into the statistical assumptions of a randomized experiment.
# ## Intuition
# **Key points:**
#
# - RD designs can be invalid if individuals can precisely manipulate the assignment variable - discontinuity rules might generate incentives
#
# - If individuals - even while having some influence - are unable to precisely manipulate the assignment variable, a consequence of this is that the variation in treatment near the threshold is randomized as though from a randomized experiment - contrast to IV assumption
#
# - RD designs can be analyzed - and tested - like randomized experiments.
#
# - Graphical representation of an RD design is helpful and informative, but the visual presentation should not be tilted toward either finding an effect or finding no effect.
#
# - Nonparametric estimation does not represent a "solution" to functional form issues raised by RD designs. It is therefore helpful to view it as a complement to - rather than a substitute for - parametric estimation.
#
# - Goodness-of-fit and other statistical tests can help rule out overly restrictive specifications.
# **Baseline**
#
# A simple way to estimating the treatment effect $\tau$ is to run the following linear regression.
#
# \begin{align*}
# Y = \alpha + D \tau + X \beta + \epsilon,
# \end{align*}
#
# where $D \in [0, 1]$ and we have $D = 1$ if $X \geq c$ and $D=0$ otherwise.
# **Baseline setup**
#
# <img src="material/fig-1.png" width="500">
#
# * "all other factors" determining $Y$ must be evolving "smoothly" (continously) with respect to $X$.
#
# * the estimate will depend on the functional form
# **Potential outcome framework**
#
# <img src="material/fig-2.png" width="500">
# **Potential outcome framework**
#
#
# Suppose $D = 1$ if $Z \geq z_0$, and $D=0$ otherwise
# \begin{align*}
# \Rightarrow\begin{cases}
# E(Y \mid Z = z) = E(Y_0 \mid Z = z) & \text{for}\quad Z < z_0 \\
# E(Y \mid Z = z) = E(Y_1 \mid Z = z) & \text{for}\quad Z \geq z_0
# \end{cases}
# \end{align*}
#
# Suppose $E(Y_1\mid Z = z), E(Y_0\mid Z = z)$ are continuous in $x$.
# \begin{align*}
# \Rightarrow\begin{cases}
# \lim{\epsilon \searrow 0} E(Y_0\mid Z = z_0 - \epsilon) = E(Y_0\mid Z = z_0) \\
# \lim{\epsilon \searrow 0} E(Y_1\mid Z = z_0 + \epsilon) = E(Y_1\mid Z = z_0) \\
# \end{cases}
# \end{align*}
#
# \begin{align*}
# &\lim{\epsilon \searrow 0} E(Y\mid Z = z_0 + \epsilon) - \lim{\epsilon \searrow 0}E(Y\mid Z = z_0 - \epsilon) \\
# &\qquad= \lim{\epsilon \searrow 0} E(Y_1\mid Z = z_0 + \epsilon) - \lim{\epsilon \searrow 0}E(Y_0\mid Z = z_0 - \epsilon) \\
# &\qquad=E(Y_1\mid Z = z_0) - E(Y_0\mid Z = z_0) \\
# &\qquad=E(Y_1 - Y_0\mid Z = z_0)
# \end{align*}
#
# $\Rightarrow$ average treatment effect at the cutoff
# ### Sharp and Fuzzy design
grid = np.linspace(0, 1.0, num=1000)
for version in ["sharp", "fuzzy"]:
probs = get_treatment_probability(version, grid)
get_plot_probability(version, grid, probs)
for version in ["sharp", "fuzzy"]:
plot_outcomes(version, grid)
# **Alternatives**
#
# Consider the standard assumptions for matching:
#
# - ignorability - trivially satisfied by research design as there is no variation left in $D$ conditional on $X$
# - common support - cannot be satisfied and replaced by continuity
#
# Lee and Lemieux (2010) emphasize the close connection of RDD to randomized experiments.
# - How does the graph in the potential outcome framework change?
# <img src="material/fig-3.png" width="500">
# Continuity, the key assumption of RDD, is a consequence of the research design (e.g. randomization) and not simply imposed.
# ## Identification
# **Question**
#
# How do I know whether an RD design is appropriate for my context? When are the identification assumptions plausable or implausable?
# **Answers**
#
# $\times$ An RD design will be appropriate if it is plausible that all other unobservable factors are "continuously" related to the assignment variable.
#
# $\checkmark$ When there is a continuously distributed stochastic error component to the assignment variable - which can occur when optimizing agents do not have \textit{precise} control over the assignment variable - then the variation in the treatment will be as good as randomized in a neighborhood around the discontinuity threshold.
# **Question**
#
# Is there any way I can test those assumptions?
# **Answers**
#
# $\times$ No, the continuity assumption is necessary so there are no tests for the validity of the design.
#
# $\checkmark$ Yes. As in randomized experiment, the distribution of observed baseline covariates should not change discontinuously around the threshold.
# **Simplified setup**
#
# \begin{align*}
# Y & = D \tau + W \delta_1 + U \\
# D & = I [X \geq c] \\
# X & = W \delta_2 + V
# \end{align*}
#
# - $W$ is the vector of all predetermined and observable characteristics.
#
# What are the source of heterogeneity in the outcome and assignment variable?
# The setup for an RD design is more flexible than other estimation strategies.
# - We allow for $W$ to be endogenously determined as long as it is determined prior to $V$.
# - We take no stance as to whether some elements $\delta_1$ and $\delta_2$ are zero (exclusion restrictions)
# - We make no assumptions about the correlations between $W$, $U$, and $V$.
# <img src="material/fig-4.png" width="500">
# **Local randomization**
#
# We say individuals have imprecise control over $X$ when conditional on $W = w$ and $U = u$ the density of $V$ (and hence $X$) is continuous.
# **Applying Baye's rule**
#
# \begin{align*}
# & \Pr[W = w, U = u \mid X = x] \\
# &\qquad\qquad = f(x \mid W = w, U = u) \quad\frac{\Pr[W = w, U = u]}{f(x)}
# \end{align*}
# **Local randomization:** If individuals have imprecise control over $X$ as defined above, then $\Pr[W =w, U = u \mid X = x]$ is continuous in $x$: the treatment is "as good as" randomly assigned around the cutoff.
#
# $\Rightarrow$ the behavioral assumption of imprecise control of $X$ around the threshold has the prediction that treatment is locally randmized.
# **Consequences**
#
# - testing prediction that $\Pr[W =w, U = u \mid X = x]$ is continuous in $X$ by at least looking at $\Pr[W =w\mid X = x]$
# - irrelevance of including baseline covariates
# ## Interpretation
# **Questions**
#
# To what extent are results from RD designs generalizable?
# **Answers**
#
# $\times$ The RD estimate of the treatment effect is only applicable to the subpopulation of individuals at the discontinuity threshold and uninformative about the effect everywhere else.
#
# $\checkmark$ The RD estimand can be interpreted as a weighted average treatment effect, where the weights are relative ex ante probability that the value of an individual's assignment variable will be in the neighborhood of the threshold.
# ## Alternative evaluation strategies
#
# - randomized experiment
# - regression discontinuity design
# - matching on observables
# - instrumental variables
#
# How do the (assumed) relationships between treatment, observables, and unobservable differ across research designs?
# **Endogenous dummy variable**
#
# \begin{align*}
# Y & = D \tau + W \delta_1 + U \\
# D & = I[X \geq c] \\
# X & = W \delta_2 + V
# \end{align*}
#
# <img src="material/fig-5-a.png" width="500">
#
# * By construction $X$ is not related to any other observable or unoservable characteristic.
# <img src="material/fig-5-b.png" width="500">
#
# * $W$ and $D$ might be systematically related to $X$
# <img src="material/fig-5-c.png" width="500">
#
# * The crcial assumptions is that the two lines in the left graph are actually superimposed of each other.
# <img src="material/fig-5-d.png" width="500">
#
# * The instrument must affect treatment probablity.
# * A proper instructment requires the line in the left graph to be flat.
# ## Estimation
# ### Lee (2008)
#
# The author studies the "incumbency advantage", i.e. the overall causal impact of being the current incumbent party in a district on the votes obtained in the district's election.
#
# * Lee, <NAME>. (2008). Randomized experiments from non-random selection in U.S. House elections. Journal of Econometrics.
df_base = pd.read_csv("../../datasets/processed/msc/house.csv")
df_base.head()
# The column `vote_last` refers to the Democrat's winning margin and is thus bounded between $-1$ and $1$. So a positive number indicates a Democrat as the incumbent.
# ### What are the basic characteristics of the dataset?
df_base.plot.scatter(x=0, y=1)
# What is the re-election rate?
pd.crosstab(
df_base.vote_last > 0.0, df_base.vote_next > 0.5, margins=True, normalize="columns",
)
# ### Regression discontinuity design
# How does the average vote in the next election look like as we move along last year's election.
df_base["bin"] = pd.cut(df_base.vote_last, 200, labels=False)
df_base.groupby("bin").vote_next.mean().plot()
# Now we turn to an explicit model of the conditional mean.
# +
def fit_regression(incumbent, level=4):
assert incumbent in ["republican", "democratic"]
if incumbent == "republican":
df_incumbent = df_base.loc[df_base.vote_last < 0.0, :].copy()
else:
df_incumbent = df_base.loc[df_base.vote_last > 0.0, :].copy()
formula = "vote_next ~ vote_last"
for level in range(2, level + 1):
label = "vote_last_{:}".format(level)
df_incumbent.loc[:, label] = df_incumbent["vote_last"] ** level
formula += f" + {label}"
rslt = smf.ols(formula=formula, data=df_incumbent).fit()
return rslt
rslt = dict()
for incumbent in ["republican", "democratic"]:
rslt = fit_regression(incumbent, level=3)
title = "\n\n {:}\n".format(incumbent.capitalize())
print(title, rslt.summary())
# -
# How does the predictions look like?
for incumbent in ["republican", "democratic"]:
rslt = fit_regression(incumbent, level=4)
# For our predictions, we need to set up a grid for the evaluation.
if incumbent == "republican":
grid = np.linspace(-0.5, 0.0, 100)
else:
grid = np.linspace(+0.0, 0.5, 100)
df_grid = pd.DataFrame(grid, columns=["vote_last"])
for level in range(2, 5):
label = "vote_last_{:}".format(level)
df_grid.loc[:, label] = df_grid["vote_last"] ** level
ax = rslt.predict(df_grid).plot(title=incumbent.capitalize())
plt.show()
# We can now compute the difference at the cutoffs to get an estimate for the treatment effect.
# +
before_cutoff = df_base.groupby("bin").vote_next.mean()[99]
after_cutoff = df_base.groupby("bin").vote_next.mean()[100]
effect = after_cutoff - before_cutoff
print("Treatment Effect: {:5.3f}%".format(effect * 100))
# -
# ### How does the estimated treatment effect depend on the choice of the bin width?
for num_bins in [100, 200]:
df = df_base.copy(deep=True)
df["bin"] = pd.cut(df_base.vote_last, num_bins, labels=False)
info = df.groupby("bin").vote_next.mean()
lower = (num_bins / 2) - 1
effect = info[lower + 1] - info[lower]
print(
" Number of bins: {:}, Width {:>5}, Effect {:5.2f}%".format(
num_bins, 1.0 / num_bins, effect * 100
)
)
# ### Regression
# There are several alternatives to estimate the conditional mean functions.
#
# * pooled regressions
#
# * local linear regressions
# It will be useful to split the sample by the cutoff value
# for easier access going forward.
df_base["D"] = df_base.vote_last > 0
# #### Pooled regression
# We estimate the conditinal mean using the whole function.
#
# \begin{align*}
# Y = \alpha_r + \tau D + \beta X + \epsilon
# \end{align*}
#
# This allows for a difference in levels but not slope.
smf.ols(formula="vote_next ~ vote_last + D", data=df_base).fit().summary()
# #### Local linear regression
# We now turn to local regressions by restricting the estimation to observations close to the cutoff.
#
# \begin{align*}
# Y = \alpha_r + \tau D + \beta X + \gamma X D + \epsilon,
# \end{align*}
#
# where $-h \geq X \geq h$. This allows for a difference in levels and slope.
for h in [0.3, 0.2, 0.1, 0.05, 0.01]:
# We restrict the sample to observations close
# to the cutoff.
df = df_base[df_base.vote_last.between(-h, h)]
formula = "vote_next ~ D + vote_last + D * vote_last"
rslt = smf.ols(formula=formula, data=df).fit()
info = [h, rslt.params[1] * 100, rslt.pvalues[1]]
print(" Bandwidth: {:>4} Effect {:5.3f}% pvalue {:5.3f}".format(*info))
# There exists some work that can guide the choice of the bandwidth. Now, let's summarize the key issues and some review best practices.
# ## Checklist
# **Recommendations:**
# - To assess the possibility of manipulations of the assignment variable, show its distribution.
# - Present the main RD graph using binned local averages.
# - Graph a benchmark polynomial specification
# - Explore the sensitivity of the results to a range of bandwidth, and a range of orders to the polynomial.
# - Conduct a parallel RD analysis on the baseline covariates.
# - Explore the sensitivity of the results to the inclusion of baseline covariates.
# ## References
# - <NAME>., <NAME>., and <NAME>. (2001). [Identification and estimation of treatment effects with a regression-discontinuity design](https://www.jstor.org/stable/2692190). *Econometrica, 69*(1), 201–209.
# - <NAME>. (2008). [Randomized experiments from nonrandom selection in US House elections](https://www.sciencedirect.com/science/article/abs/pii/S0304407607001121). *Journal of Econometrics, 142*(2), 675–697.
# - <NAME>., and <NAME>. (2010). [Regression discontinuity designs in economics](https://www.aeaweb.org/articles?id=10.1257/jel.48.2.281). *Journal of economic literature, 48*(2), 281–355.
# - <NAME>., and <NAME>. (1960). [Regression-discontinuity analysis: An alternative to the ex-post facto experiment](https://psycnet.apa.org/record/1962-00061-001). *Journal of Educational Psychology, 51*(6), 309–317.
| lectures/regression-discontinuity/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv-win
# language: python
# name: venv-win
# ---
# # Autoencoder Network Model
#
# ## Train for 5 epochs
#
# Observe the loss decreasing while training.
# %%capture
# !python ConvolutionalAutoEncoder.py --max_epochs=5
# ## Inspect logs
# ⚠️ Make sure to **Stop** the cell execution after observing the logs
#
# !tensorboard --logdir=./lightning_logs
# ## Load trained model
# Checkpoints are saved incrementally for each training session under `./lightning_logs/version_X`.
# +
__import__("sys").path.append("..")
import utils
from ConvolutionalAutoEncoder import ConvolutionalAutoEncoder
model = utils.load_trained_model_for_evaluation(ConvolutionalAutoEncoder, 0)
print(model)
# -
# ## Generate similar MNIST images
# +
import torch
import random
test_len = model.mnist_test.data.shape[0]
print(" Original image <----> Generated Image")
for i in range(0, 10):
orig_img, _ = model.mnist_test[random.randint(0, test_len)]
orig_img = orig_img.view(1, 1, 28, 28)
predicted_img = model(orig_img).detach()
utils.plot_images([orig_img.view(28, 28), predicted_img.view(28, 28)])
| 05-02-Convolutional-Autoencoder/Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from sklearn.datasets import fetch_openml
from sklearn.utils.extmath import softmax
import matplotlib.pyplot as plt
from matplotlib import pyplot
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = ['Times New Roman'] + plt.rcParams['font.serif']
# ## Load and display MNIST handwritten digits dataset
# +
# Load data from https://www.openml.org/d/554
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
# X = X.values ### Uncomment this line if you are having type errors in plotting. It is loading as a pandas dataframe, but our indexing is for numpy array.
X = X / 255.
print('X.shape', X.shape)
print('y.shape', y.shape)
'''
Each row of X is a vectroization of an image of 28 x 28 = 784 pixels.
The corresponding row of y holds the true class label from {0,1, .. , 9}.
'''
# -
# see how many images are there for each digit
for j in np.arange(10):
idx = np.where(y==str(j))
idx = np.asarray(idx)[0,:]
print('digit %i length %i' % (j, len(idx)))
# Plot some sample images
ncols = 10
nrows = 4
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=[15, 6.5])
for j in np.arange(ncols):
for i in np.arange(nrows):
idx = np.where(y==str(j)) # index of all images of digit 'j'
idx = np.asarray(idx)[0,:] # make idx from tuple to array
idx_subsampled = np.random.choice(idx, nrows)
ax[i,j].imshow(X[idx_subsampled[i],:].reshape(28,28))
# ax[i,j].title.set_text("label=%s" % y[idx_subsampled[j]])
if i == 0:
# ax[j,i].set_ylabel("label=%s" % y[idx_subsampled[j]])
ax[i,j].set_title("label$=$%s" % y[idx_subsampled[i]], fontsize=14)
# ax[i].legend()
plt.subplots_adjust(wspace=0.3, hspace=-0.1)
plt.savefig('MNIST_ex1.pdf', bbox_inches='tight')
# +
# Split the dataset into train and test sets
X_train = []
X_test = []
y_test = []
y_train = []
for i in np.arange(X.shape[0]):
# for each example i, make it into train set with probabiliy 0.8 and into test set otherwise
U = np.random.rand() # Uniform([0,1]) variable
if U<0.8:
X_train.append(X[i,:])
y_train.append(y[i])
else:
X_test.append(X[i,:])
y_test.append(y[i])
X_train = np.asarray(X_train)
X_test = np.asarray(X_test)
y_train = np.asarray(y_train)
y_test = np.asarray(y_test)
print('X_train.shape', X_train.shape)
print('X_test.shape', X_test.shape)
print('y_train.shape', y_train.shape)
print('y_test.shape', y_test.shape)
# +
def sample_binary_MNIST(list_digits=['0','1'], full_MNIST=None, noise_ratio = 0):
# get train and test set from MNIST of given two digits
# e.g., list_digits = ['0', '1']
if full_MNIST is not None:
X, y = full_MNIST
else:
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X = X / 255.
idx = [i for i in np.arange(len(y)) if y[i] in list_digits] # list of indices where the label y is in list_digits
X01 = X[idx,:]
y01 = y[idx]
X_train = []
X_test = []
y_test = [] # list of integers 0 and 1s
y_train = [] # list of integers 0 and 1s
for i in np.arange(X01.shape[0]):
# for each example i, make it into train set with probabiliy 0.8 and into test set otherwise
U = np.random.rand() # to assign to train/test
U2 = np.random.rand() # to determine noise/no noise
label = 0
if y01[i] == str(list_digits[1]):
label = 1
pixel_value = X01[i,:]
# add noise to [noise_ratio] proportion of the x values
if U2 < noise_ratio:
noise = np.random.rand()
pixel_value += noise
if U<0.8:
X_train.append(pixel_value)
y_train.append(label)
else:
# X_test.append(X01[i,:])
X_test.append(pixel_value)
y_test.append(label)
X_train = np.asarray(X_train)
X_test = np.asarray(X_test)
y_train = np.asarray(y_train).reshape(-1,1)
y_test = np.asarray(y_test).reshape(-1,1)
return X_train, X_test, y_train, y_test
X_train, X_test, y_train, y_test = sample_binary_MNIST(list_digits=['0','1'], full_MNIST=[X, y])
print('X_train.shape', X_train.shape)
print('X_test.shape', X_test.shape)
print('y_train.shape', y_train.shape)
print('y_test.shape', y_test.shape)
print('y_test', y_test)
# -
def list2onehot(y, list_classes):
"""
y = list of class lables of length n
output = n x k array, i th row = one-hot encoding of y[i] (e.g., [0,0,1,0,0])
"""
Y = np.zeros(shape = [len(y), len(list_classes)], dtype=int)
for i in np.arange(Y.shape[0]):
for j in np.arange(len(list_classes)):
if y[i] == list_classes[j]:
Y[i,j] = 1
return Y
# +
def sample_multiclass_MNIST(list_digits=['0','1', '2'], full_MNIST=None):
# get train and test set from MNIST of given digits
# e.g., list_digits = ['0', '1', '2']
if full_MNIST is not None:
X, y = full_MNIST
else:
X, y = fetch_openml('mnist_784', version=1, return_X_y=True)
X = X / 255.
Y = list2onehot(y.tolist(), list_digits)
idx = [i for i in np.arange(len(y)) if y[i] in list_digits] # list of indices where the label y is in list_digits
X01 = X[idx,:]
y01 = Y[idx,:]
X_train = []
X_test = []
y_test = [] # list of one-hot encodings (indicator vectors) of each label
y_train = [] # list of one-hot encodings (indicator vectors) of each label
for i in np.arange(X01.shape[0]):
# for each example i, make it into train set with probabiliy 0.8 and into test set otherwise
U = np.random.rand() # Uniform([0,1]) variable
if U<0.8:
X_train.append(X01[i,:])
y_train.append(y01[i,:].copy())
else:
X_test.append(X01[i,:])
y_test.append(y01[i,:].copy())
X_train = np.asarray(X_train)
X_test = np.asarray(X_test)
y_train = np.asarray(y_train)
y_test = np.asarray(y_test)
return X_train, X_test, y_train, y_test
# test
X_train, X_test, y_train, y_test = sample_multiclass_MNIST(list_digits=['0','1', '2'], full_MNIST=[X, y])
print('X_train.shape', X_train.shape)
print('X_test.shape', X_test.shape)
print('y_train.shape', y_train.shape)
print('y_test.shape', y_test.shape)
print('y_test', y_test)
# -
# ## Logistic Regression
# sigmoid and logit function
def sigmoid(x):
return np.exp(x)/(1+np.exp(x))
# plot sigmoid function
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[10,3])
x = np.linspace(-7, 7, 100)
ax.plot(x, sigmoid(x), color='blue', label="$y=\sigma(x)=\exp(x)/(1+\exp(x))$")
plt.axhline(y=1, color='g', linestyle='--')
plt.axvline(x=0, color='g', linestyle='--')
ax.legend()
plt.savefig('sigmoid_ex.pdf', bbox_inches='tight')
def fit_LR_GD(Y, H, W0=None, sub_iter=100, stopping_diff=0.01):
'''
Convex optimization algorithm for Logistic Regression using Gradient Descent
Y = (n x 1), H = (p x n) (\Phi in lecture note), W = (p x 1)
Logistic Regression: Y ~ Bernoulli(Q), Q = sigmoid(H.T @ W)
MLE -->
Find \hat{W} = argmin_W ( sum_j ( log(1+exp(H_j.T @ W) ) - Y.T @ H.T @ W ) )
'''
if W0 is None:
W0 = np.random.rand(H.shape[0],1) #If initial coefficients W0 is None, randomly initialize
W1 = W0.copy()
i = 0
grad = np.ones(W0.shape)
while (i < sub_iter) and (np.linalg.norm(grad) > stopping_diff):
Q = 1/(1+np.exp(-H.T @ W1)) # probability matrix, same shape as Y
# grad = H @ (Q - Y).T + alpha * np.ones(W0.shape[1])
grad = H @ (Q - Y)
W1 = W1 - (np.log(i+1) / (((i + 1) ** (0.5)))) * grad
i = i + 1
# print('iter %i, grad_norm %f' %(i, np.linalg.norm(grad)))
return W1
def fit_LR_NR(Y, H, W0=None, sub_iter=100, stopping_diff=0.01):
'''
Convex optimization algorithm for Logistic Regression using Newton-Ralphson algorithm.
Y = (n x 1), H = (p x n) (\Phi in lecture note), W = (p x 1)
Logistic Regression: Y ~ Bernoulli(Q), Q = sigmoid(H.T @ W)
MLE -->
Find \hat{W} = argmin_W ( sum_j ( log(1+exp(H_j.T @ W) ) - Y.T @ H.T @ W ) )
'''
### Implement by yourself.
# +
# fit logistic regression using GD
X_train, X_test, y_train, y_test = sample_binary_MNIST(['0', '1'], full_MNIST = [X,y])
# Feature matrix of size (p x n) = (feature dim x samples)
H_train = np.vstack((np.ones(X_train.shape[0]), X_train.T)) # add first row of 1's for bias features
W = fit_LR_GD(Y=y_train, H=H_train)
plt.imshow(W[1:,:].reshape(28,28))
# +
# plot fitted logistic regression curve
digit_list_list = [['0','1'],['0','7'],['2','3'],['2', '8']] # list of list of two digits
# fit LR for each cases
W_array = []
for i in np.arange(len(digit_list_list)):
L = digit_list_list[i]
X_train, X_test, y_train, y_test = sample_binary_MNIST(list_digits=L, full_MNIST = [X,y])
H_train = np.vstack((np.ones(X_train.shape[0]), X_train.T)) # add first row of 1's for bias features
W = fit_LR_GD(Y=y_train, H=H_train)
W = fit_LR_GD(Y=y_train, H=H_train)
W_array.append(W.copy())
W_array = np.asarray(W_array)
# make plot
fig, ax = plt.subplots(nrows=1, ncols=len(digit_list_list), figsize=[16, 4])
for i in np.arange(len(digit_list_list)):
L = digit_list_list[i]
W = W_array[i]
im = ax[i].imshow(W[1:,:].reshape(28,28), vmin=np.min(W_array), vmax=np.max(W_array))
ax[i].title.set_text("LR coeff. for %s vs. %s" % (L[0], L[1]))
# ax[i].legend()
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.92, 0.15, 0.01, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.savefig('LR_MNIST_training_ex.pdf', bbox_inches='tight')
# -
def compute_accuracy_metrics(Y_test, P_pred, use_opt_threshold=False):
# y_test = binary label
# Q = predicted probability for y_test
# compuate various binary classification accuracy metrics
fpr, tpr, thresholds = metrics.roc_curve(Y_test, P_pred, pos_label=None)
mythre = thresholds[np.argmax(tpr - fpr)]
myauc = metrics.auc(fpr, tpr)
# print('!!! auc', myauc)
# Compute classification statistics
threshold = 0.5
if use_opt_threshold:
threshold = mythre
Y_pred = Q.copy()
Y_pred[Y_pred < threshold] = 0
Y_pred[Y_pred >= threshold] = 1
mcm = confusion_matrix(Y_test, Y_pred)
tn = mcm[0, 0]
tp = mcm[1, 1]
fn = mcm[1, 0]
fp = mcm[0, 1]
accuracy = (tp + tn) / (tp + tn + fp + fn)
sensitivity = tn / (tn + fp)
specificity = tp / (tp + fn)
precision = tp / (tp + fp)
fall_out = fp / (fp + tn)
miss_rate = fn / (fn + tp)
# Save results
results_dict = {}
results_dict.update({'Y_test': Y_test})
results_dict.update({'Y_pred': Y_pred})
results_dict.update({'AUC': myauc})
results_dict.update({'Opt_threshold': mythre})
results_dict.update({'Accuracy': accuracy})
results_dict.update({'Sensitivity': sensitivity})
results_dict.update({'Specificity': specificity})
results_dict.update({'Precision': precision})
results_dict.update({'Fall_out': fall_out})
results_dict.update({'Miss_rate': miss_rate})
return results_dict
# +
# fit logistic regression using GD and compute binary classification accuracies
# Get train and test data
digits_list = ['4', '7']
# digits_list = ['3', '8']
X_train, X_test, y_train, y_test = sample_binary_MNIST(digits_list, full_MNIST = [X,y], noise_ratio = 0.9)
# Feature matrix of size (p x n) = (feature dim x samples)
list_train_size = [1,10, 30, 100]
# train the regression coefficients for all cases
W_list = []
results_list = []
for i in np.arange(len(list_train_size)):
size = list_train_size[i]
idx = np.random.choice(np.arange(len(y_train)), size)
X_train0 = X_train[idx, :]
y_train0 = y_train[idx]
# Train the logistic regression model
H_train0 = np.vstack((np.ones(X_train0.shape[0]), X_train0.T)) # add first row of 1's for bias features
W = fit_LR_GD(Y=y_train0, H=H_train0)
W_list.append(W.copy()) # make sure use copied version of W since the same name is overrided in the loop
# Get predicted probabilities
H_test = np.vstack((np.ones(X_test.shape[0]), X_test.T))
Q = 1 / (1 + np.exp(-H_test.T @ W)) # predicted probabilities for y_test
# Compute binary classification accuracies
results_dict = compute_accuracy_metrics(Y_test=y_test, P_pred = Q)
results_dict.update({'train size':X_train0.shape[0]}) # add the train data size to the results dictionary
results_list.append(results_dict.copy())
# Print out the results
"""
keys_list = [i for i in results_dict.keys()]
for key in keys_list:
if key not in ['Y_test', 'Y_pred']:
print('%s = %f' % (key, results_dict.get(key)))
"""
# make plot
fig, ax = plt.subplots(nrows=1, ncols=len(list_train_size), figsize=[16, 4])
for i in np.arange(len(list_train_size)):
result_dict = results_list[i]
W = W_list[i][1:,:]
im = ax[i].imshow(W.copy().reshape(28,28), vmin=np.min(W_list), vmax=np.max(W_list))
subtitle = ""
keys_list = [i for i in results_list[i].keys()]
for key in keys_list:
if key not in ['Y_test', 'Y_pred', 'AUC', 'Opt_threshold']:
subtitle += "\n" + str(key) + " = " + str(np.round(results_list[i].get(key),3))
# print('%s = %f' % (key, results_list[i].get(key)))
ax[i].set_title('Opt. regression coeff.', fontsize=13)
ax[i].set_xlabel(subtitle, fontsize=20)
fig.subplots_adjust(right=0.9)
fig.suptitle("MNIST Binary Classification by LR for %s vs. %s" % (digits_list[0], digits_list[1]), fontsize=20, y=1.05)
cbar_ax = fig.add_axes([0.92, 0.15, 0.01, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.savefig('LR_MNIST_test_ex1.svg', bbox_inches='tight')
# -
# ## Multiclass Logistic Regression
def fit_MLR_GD(Y, H, W0=None, sub_iter=100, stopping_diff=0.01):
'''
Convex optimization algorithm for Multiclass Logistic Regression using Gradient Descent
Y = (n x k), H = (p x n) (\Phi in lecture note), W = (p x k)
Multiclass Logistic Regression: Y ~ vector of discrete RVs with PMF = sigmoid(H.T @ W)
MLE -->
Find \hat{W} = argmin_W ( sum_j ( log(1+exp(H_j.T @ W) ) - Y.T @ H.T @ W ) )
'''
k = Y.shape[1] # number of classes
if W0 is None:
W0 = np.random.rand(H.shape[0],k) #If initial coefficients W0 is None, randomly initialize
W1 = W0.copy()
i = 0
grad = np.ones(W0.shape)
while (i < sub_iter) and (np.linalg.norm(grad) > stopping_diff):
Q = 1/(1+np.exp(-H.T @ W1)) # probability matrix, same shape as Y
# grad = H @ (Q - Y).T + alpha * np.ones(W0.shape[1])
grad = H @ (Q - Y)
W1 = W1 - (np.log(i+1) / (((i + 1) ** (0.5)))) * grad
i = i + 1
# print('iter %i, grad_norm %f' %(i, np.linalg.norm(grad)))
return W1
# +
def custom_softmax(a):
"""
given an array a = [a_1, .. a_k], compute the softmax distribution p = [p_1, .. , p_k] where p_i \propto exp(a_i)
"""
a1 = a - np.max(a)
p = np.exp(a1)
if type(a) is list:
p = p/np.sum(p)
else:
row_sum = np.sum(p, axis=1)
p = p/row_sum[:, np.newaxis]
return p
print(np.sum(custom_softmax([1,20,30,50])))
a= np.ones((2,3))
print(softmax(a))
# -
def multiclass_accuracy_metrics(Y_test, P_pred, class_labels=None, use_opt_threshold=False):
# y_test = multiclass one-hot encoding labels
# Q = predicted probability for y_test
# compuate various classification accuracy metrics
results_dict = {}
y_test = []
y_pred = []
for i in np.arange(Y_test.shape[0]):
for j in np.arange(Y_test.shape[1]):
if Y_test[i,j] == 1:
y_test.append(j)
if P_pred[i,j] == np.max(P_pred[i,:]):
# print('!!!', np.where(P_pred[i,:]==np.max(P_pred[i,:])))
y_pred.append(j)
confusion_mx = metrics.confusion_matrix(y_test, y_pred)
print('!!! confusion_mx', confusion_mx)
results_dict.update({'confusion_mx':confusion_mx})
return results_dict
# +
# fit multiclass logistic regression using GD
list_digits=['0', '1', '2']
X_train, X_test, y_train, y_test = sample_multiclass_MNIST(list_digits=list_digits, full_MNIST = [X,y])
# Feature matrix of size (p x n) = (feature dim x samples)
H_train = np.vstack((np.ones(X_train.shape[0]), X_train.T)) # add first row of 1's for bias features
W = fit_MLR_GD(Y=y_train, H=H_train)
print('!! W.shape', W.shape)
# Get predicted probabilities
H_test = np.vstack((np.ones(X_test.shape[0]), X_test.T))
Q = softmax(H_test.T @ W.copy()) # predicted probabilities for y_test # Uses sklearn's softmax for numerical stability
print('!!! Q', Q)
results_dict = multiclass_accuracy_metrics(Y_test=y_test, P_pred=Q)
confusion_mx = results_dict.get('results_dict')
# make plot
fig, ax = plt.subplots(nrows=1, ncols=len(list_digits), figsize=[12, 4])
for i in np.arange(len(list_digits)):
L = list_digits[i]
im = ax[i].imshow(W[1:,i].reshape(28,28), vmin=np.min(W), vmax=np.max(W))
ax[i].title.set_text("MLR coeff. for %s" % L )
# ax[i].legend()
# if i == len(list_digits) - 1:
cbar_ax = fig.add_axes([0.92, 0.15, 0.01, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.savefig('MLR_MNIST_ex1.pdf', bbox_inches='tight')
# +
# fit multiclass logistic regression using GD and compute multiclass classification accuracies
# Get train and test data
digits_list = ['0', '1', '2', '3', '4']
X_train, X_test, y_train, y_test = sample_multiclass_MNIST(digits_list, full_MNIST = [X,y])
# Feature matrix of size (p x n) = (feature dim x samples)
list_train_size = [1,10, 30, 100]
# train the regression coefficients for all cases
W_list = []
results_list = []
for i in np.arange(len(list_train_size)):
size = list_train_size[i]
idx = np.random.choice(np.arange(len(y_train)), size)
X_train0 = X_train[idx, :]
y_train0 = y_train[idx, :]
# Train the multiclass logistic regression model
H_train0 = np.vstack((np.ones(X_train0.shape[0]), X_train0.T)) # add first row of 1's for bias features
W = fit_MLR_GD(Y=y_train0, H=H_train0)
W_list.append(W.copy()) # make sure use copied version of W since the same name is overrided in the loop
# Get predicted probabilities
H_test = np.vstack((np.ones(X_test.shape[0]), X_test.T))
Q = softmax(H_test.T @ W.copy()) # predicted probabilities for y_test # Uses sklearn's softmax for numerical stability
results_dict = multiclass_accuracy_metrics(Y_test=y_test, P_pred=Q)
results_dict.update({'train size':X_train0.shape[0]}) # add the train data size to the results dictionary
results_list.append(results_dict.copy())
# make plot
fig, ax = plt.subplots(nrows=len(list_train_size), ncols=len(digits_list)+1, figsize=[15, 10])
for i in np.arange(len(list_train_size)):
for j in np.arange(len(digits_list)+1):
if j < len(digits_list):
L = digits_list[j]
W = W_list[i]
im = ax[i,j].imshow(W[1:,j].reshape(28,28), vmin=np.min(W), vmax=np.max(W))
ax[i,j].title.set_text("MLR coeff. for %s" % L )
if j == 0:
ax[i,j].set_ylabel("train size = %i" % results_list[i].get("train size"), fontsize=13)
divider = make_axes_locatable(ax[i,j])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax)
else:
confusion_mx = results_list[i].get("confusion_mx")
im_confusion = ax[i,j].matshow(confusion_mx)
# ax[i,j].set_title("Confusion Matrix")
ax[i,j].set_xlabel("Confusion Matrix", fontsize=13)
# ax[i].legend()
# if i == len(list_digits) - 1:
divider = make_axes_locatable(ax[i,j])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im_confusion, cax=cax)
plt.subplots_adjust(wspace=0.3, hspace=0.3)
plt.savefig('MLR_MNIST_test_ex2.pdf', bbox_inches='tight')
# -
# ## Probit Regression
# +
# probit function
from scipy.stats import norm
def probit(x):
return norm.cdf(x) # Yes, it is exactly the standard normal CDF.
# -
# plot probit and sigmoid function
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[10,3])
x = np.linspace(-7, 7, 100)
ax.plot(x, sigmoid(x), color='blue', label="$y=\sigma(x)=\exp(x)/(1+\exp(x))$")
ax.plot(x, probit(x), color='red', label="$y=\psi(x)=Probit(x)$")
plt.axhline(y=1, color='g', linestyle='--')
plt.axvline(x=0, color='g', linestyle='--')
ax.legend()
plt.savefig('probit_ex.pdf', bbox_inches='tight')
def get_PR_loss(Y,H,W1):
# H = phi in lecture notes
p = H.shape[1]
l_PR = 0
for i in range(p):
y_i = Y[i,:]
H_iT = H[:,i].T
l_PR -= (y_i * np.log(probit(H_iT @ W1)) + (1-y_i) * np.log(probit(-H_iT @ W1)))[0]
return l_PR
def fit_PR_GD(Y, H, W0=None, sub_iter=100, stopping_diff=0.01):
'''
Convex optimization algorithm for Probit Regression using Gradient Descent
Y = (n x 1), H = (p x n) (\Phi in lecture note), W = (p x 1)
Logistic Regression: Y ~ Bernoulli(Q), Q = Probit(H.T @ W)
'''
print('fit_PR_GD called')
# loss_list = [] # to store loss values to plot later
if W0 is None:
W0 = np.random.rand(H.shape[0],1) #If initial coefficients W0 is None, randomly initialize
W1 = W0.copy()
i = 0
grad = np.ones(W0.shape)
while (i < sub_iter) and (np.linalg.norm(grad) > stopping_diff):
Q = norm.pdf(H.T @ W1) * ( (1-Y)/norm.cdf(-H.T @ W1) - Y/norm.cdf(H.T @ W1) )
# grad = H @ (Q - Y).T + alpha * np.ones(W0.shape[1])
grad = H @ Q
# gamma = 1
gamma = 40
delta = 0.005
eta = gamma * (i+1)**-delta
# W1 = W1 - gamma * (np.log(i+1) / (((i + 1) ** (0.5)))) * grad
W1 = W1 - eta * grad
i = i + 1
loss = get_PR_loss(Y,H,W1)
# loss_list.append(loss)
if(i % 20 == 0):
print('iter %i, l_PR %f' %(i,loss))
# print('iter %i, grad_norm %f' %(i, np.linalg.norm(grad)))
# return (W1,loss_list)
return W1
# +
# plot fitted probit regression curve
# digit_list_list = [['0','1'],['0','7'],['2','3'],['2', '8']] # list of list of two digits
digit_list_list = [['0','1']] # list of list of two digits
# fit LR for each cases
W_array = []
for i in np.arange(len(digit_list_list)):
L = digit_list_list[i]
X_train, X_test, y_train, y_test = sample_binary_MNIST(list_digits=L, full_MNIST = [X,y])
H_train = np.vstack((np.ones(X_train.shape[0]), X_train.T)) # add first row of 1's for bias features
W, loss_list = fit_PR_GD(Y=y_train, H=H_train/1000)
W_array.append(W.copy())
W_array = np.asarray(W_array)
# make plot
fig, ax = plt.subplots(nrows=1, ncols=len(digit_list_list), figsize=[16, 4])
for i in np.arange(len(digit_list_list)):
L = digit_list_list[i]
W = W_array[i]
# only one subplot -> no subscript
im = ax.imshow(W[1:,:].reshape(28,28), vmin=np.min(W_array), vmax=np.max(W_array))
ax.title.set_text("LR coeff. for %s vs. %s" % (L[0], L[1]))
# im = ax[i].imshow(W[1:,:].reshape(28,28), vmin=np.min(W_array), vmax=np.max(W_array))
# ax[i].title.set_text("LR coeff. for %s vs. %s" % (L[0], L[1]))
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.62, 0.15, 0.01, 0.7])
# cbar_ax = fig.add_axes([0.92, 0.15, 0.01, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.savefig('PR_MNIST_training_ex.svg', bbox_inches='tight')
# -
# print(loss_list)
index = np.arange(1,len(loss_list)+1)
# plt.xticks(index)
plt.title('Loss per Iteration')
plt.xlabel('Iteration')
plt.ylabel('Probit Loss')
plt.plot(index,loss_list)
plt.savefig('PR_GD_Loss.svg', bbox_inches='tight')
# +
# fit probit regression using GD and compute binary classification accuracies
# Get train and test data
digits_list = ['4', '7']
X_train, X_test, y_train, y_test = sample_binary_MNIST(digits_list, full_MNIST = [X,y],noise_ratio = 0.9)
# Feature matrix of size (p x n) = (feature dim x samples)
list_train_size = [1,10, 30, 100]
# train the regression coefficients for all cases
W_list = []
results_list = []
for i in np.arange(len(list_train_size)):
size = list_train_size[i]
idx = np.random.choice(np.arange(len(y_train)), size)
X_train0 = X_train[idx, :]
y_train0 = y_train[idx]
# Train the logistic regression model
H_train0 = np.vstack((np.ones(X_train0.shape[0]), X_train0.T)) # add first row of 1's for bias features
W = fit_PR_GD(Y=y_train0, H=H_train0/1000) # reduce the scale of H for numerical stability
W_list.append(W.copy()) # make sure use copied version of W since the same name is overrided in the loop
# Get predicted probabilities
H_test = np.vstack((np.ones(X_test.shape[0]), X_test.T))
Q = 1 / (1 + np.exp(-H_test.T @ W)) # predicted probabilities for y_test
# Compute binary classification accuracies
results_dict = compute_accuracy_metrics(Y_test=y_test, P_pred = Q)
results_dict.update({'train size':X_train0.shape[0]}) # add the train data size to the results dictionary
results_list.append(results_dict.copy())
# Print out the results
"""
keys_list = [i for i in results_dict.keys()]
for key in keys_list:
if key not in ['Y_test', 'Y_pred']:
print('%s = %f' % (key, results_dict.get(key)))
"""
# make plot
fig, ax = plt.subplots(nrows=1, ncols=len(list_train_size), figsize=[16, 4])
for i in np.arange(len(list_train_size)):
result_dict = results_list[i]
W = W_list[i][1:,:]
im = ax[i].imshow(W.copy().reshape(28,28), vmin=np.min(W_list), vmax=np.max(W_list))
subtitle = ""
keys_list = [i for i in results_list[i].keys()]
for key in keys_list:
if key not in ['Y_test', 'Y_pred', 'AUC', 'Opt_threshold']:
subtitle += "\n" + str(key) + " = " + str(np.round(results_list[i].get(key),3))
# print('%s = %f' % (key, results_list[i].get(key)))
ax[i].set_title('Opt. regression coeff.', fontsize=13)
ax[i].set_xlabel(subtitle, fontsize=20)
fig.subplots_adjust(right=0.9)
fig.suptitle("MNIST Binary Classification by LR for %s vs. %s" % (digits_list[0], digits_list[1]), fontsize=20, y=1.05)
cbar_ax = fig.add_axes([0.92, 0.15, 0.01, 0.7])
fig.colorbar(im, cax=cbar_ax)
plt.savefig('PR_MNIST_test_ex1.svg', bbox_inches='tight')
# -
| notebooks/.ipynb_checkpoints/Math156_classification_MNIST-checkpoint_LOCAL_721.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WeatherPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
import urllib
from scipy.stats import linregress
from pprint import pprint
from urllib.request import urlopen
import json as simplejson
import json
from sklearn import datasets
from scipy.stats import linregress
# Import API key
from api_key import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# -
# ## Generate Cities List
# +
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
print(len(cities))
# +
#configurations information
url = "http://api.openweathermap.org/data/2.5/weather?"
#Setting the units to imperieal format
units = "imperial"
#Building the query url
query_url = f"{url}appid={weather_api_key}&units={units}&q="
#Grabbing the weather data
weather_response = requests.get(query_url + city)
weather_json = weather_response.json()
#printing out the json
print(json.dumps(weather_json, indent=4))
# +
print(requests.get(query_url + city))
# +
#empty lists that will hold my city data information
#city name
city_name = []
#latitude
lat_data = []
#longtitude
long_data = []
#temperature
max_temp = []
#humidity
humidity = []
#cloudiness
cloud = []
#windiness
wind = []
#country
country = []
#date
date = []
# Create a processing record counter
record_counter = 1
#Printing an alert that notifies the user that we are starting the api log requests
print(f"BEGINNING OF LOG STATEMENT")
print(f"--------------------------")
#Looping through the list of cities and appending them to the list created above
for city in cities:
# exception handling
try:
#Building a query url based on current element units
response = requests.get(query_url + city).json()
# Parse out the max temp, humidity, and cloudiness
city_name.append(response["name"])
lat_data.append(response["coord"]["lat"])
long_data.append(response["coord"]["lon"])
max_temp.append(response["main"]["temp_max"])
humidity.append(response["main"]["humidity"])
cloud.append(response["clouds"]["all"])
wind.append(response["wind"]["speed"])
country.append(response["sys"]["country"])
date.append(response["dt"])
city_counter = response["name"]
print(f"Processing record {record_counter}|{city_counter}")
#increaseing the record counter by 1 for each iteration
record_counter += 1
# If an error is experienced, skip the city
except:
print("City not in list..")
continue
# Indicate that Data Loading is complete
print("-----------------------------")
print("Data Retrieval Complete ")
print("-----------------------------")
# -
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
# +
#Creating the data frame with the appended information above
city_df = pd.DataFrame({
'City':city_name,
'Latitude':lat_data,
'Longtitude':long_data,
'Max Temp':max_temp,
'Humidity':humidity,
'Cloudiness':cloud,
'Wind Speed':wind,
'Country':country,
'Date':date
})
#finalizing the data frame
city_df = pd.DataFrame(city_df)
#exporting the frame to a csv
city_df.to_csv('City_Data.csv',index=False)
city_df
# -
# ## Inspect the data and remove the cities where the humidity > 100%.
# ----
# Skip this step if there are no cities that have humidity > 100%.
#There are no cities with the humidity over 100% in this data set
city_df.describe()
# +
#double checked to see if there were any cities above 100% humidity
#from the above description we see that the max humidity rests at 100% on the dot
city_df.loc[city_df['Humidity'] > 100]
# -
# Get the indices of cities that have humidity over 100%.
# +
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
# -
# ## Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# ## Latitude vs. Temperature Plot
# This graph tell us that the closer we get a latitude value of 0 to 20 degrees, the more likely the temperature is to rise.
# It is safe to say that location does play a major factor in temperature.
# +
#using the subplot function from matplot lib
#From my understanding it give me more freedom
fig, ax = plt.subplots()
ax.set_title('Latitude vs Temperature',fontsize=16,loc='center',)
city_df.plot(kind='scatter',x='Latitude',y='Max Temp',c='dodgerblue',edgecolor='black',ax=ax)
ax.set_xlabel('Latitude',fontsize=13)
ax.set_ylabel('Temperature',fontsize=13)
ax.grid(linestyle='-',linewidth='0.5',color='black')
# -
# ## Latitude vs. Humidity Plot
# The following is the same approach but for humidity. The graph below shows a large cluster of marker at the latitude
# value of 60 degrees. This tells us that there is a high correlation of humidity and those cities located in the 60 degree
# latititude of the world.
# +
#using the subplot function from matplot lib
#From my understanding it give me more freedom
fig, ax = plt.subplots()
ax.set_title('Latitude vs Humidity',fontsize=16,loc='center',)
city_df.plot(kind='scatter',x='Latitude',y='Humidity',c='dodgerblue',edgecolor='black',ax=ax)
ax.set_xlabel('Latitude',fontsize=13)
ax.set_ylabel('Humidity (%)',fontsize=13)
ax.grid(linestyle='-',linewidth='0.5',color='black')
# -
# ## Latitude vs. Cloudiness Plot
# +
#using the subplot function from matplot lib
#From my understanding it give me more freedom
fig, ax = plt.subplots()
ax.set_title('Latitude vs Cloudiness',fontsize=16,loc='center',)
city_df.plot(kind='scatter',x='Latitude',y='Cloudiness',c='dodgerblue',edgecolor='black',ax=ax)
ax.set_xlabel('Latitude',fontsize=13)
ax.set_ylabel('Cloudiness (%)',fontsize=13)
ax.grid(linestyle='-',linewidth='0.5',color='black')
# -
# ## Latitude vs. Wind Speed Plot
# +
#using the subplot function from matplot lib
#From my understanding it give me more freedom
fig, ax = plt.subplots()
ax.set_title('Latitude vs Wind Speed',fontsize=16,loc='center',)
city_df.plot(kind='scatter',x='Latitude',y='Wind Speed',c='dodgerblue',edgecolor='black',ax=ax)
ax.set_xlabel('Latitude',fontsize=13)
ax.set_ylabel('Wind Speed (mph)',fontsize=13)
ax.grid(linestyle='-',linewidth='0.5',color='black')
# -
# ## Linear Regression
# #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
northern_hem = city_df.loc[city_df['Latitude'] >= 0]
northern_hem = pd.DataFrame(northern_hem)
x_values = northern_hem['Max Temp']
y_values = northern_hem['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values,y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values, y_values, c='dodgerblue',edgecolor='black')
plt.plot(x_values, regress_values, "r-")
plt.annotate(line_eq,(6,10),fontsize=13,color='red')
plt.xlabel('Latitude',fontsize=13)
plt.ylabel('Max Temp',fontsize=13)
plt.title('Max Temp vs Latitude',fontsize=15)
print(f"The r-squared is: {round(rvalue**2,5)}")
plt.show()
# -
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
southern_hem = city_df.loc[city_df['Latitude'] < 0]
southern_hem = pd.DataFrame(southern_hem)
x_values = southern_hem['Max Temp']
y_values = southern_hem['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values,y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values, y_values, c='dodgerblue',edgecolor='black')
plt.plot(x_values, regress_values, "r-")
plt.annotate(line_eq,(45,0),fontsize=14,color='red')
plt.xlabel('Latitude',fontsize=13)
plt.ylabel('Max Temp',fontsize=13)
plt.title('Max Temp vs Latitude in Souther Hem',fontsize=15)
print(f"The r-squared is: {round(rvalue**2,5)}")
plt.show()
# -
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
northern_hem = city_df.loc[city_df['Latitude'] >= 0]
northern_hem = pd.DataFrame(northern_hem)
x_values = northern_hem['Latitude']
y_values = northern_hem['Humidity']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values,y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values, y_values, c='dodgerblue',edgecolor='black')
plt.plot(x_values, regress_values, "r-")
plt.annotate(line_eq,(45,20),fontsize=14,color='red')
plt.xlabel('Latitude',fontsize=13)
plt.ylabel('Humidity (%)',fontsize=13)
plt.title('Humidity (%) vs Latitude',fontsize=15)
print(f"The r-squared is: {round(rvalue**2,5)}")
plt.show()
# -
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
southern_hem = city_df.loc[city_df['Latitude'] < 0]
southern_hem = pd.DataFrame(southern_hem)
x_values = southern_hem['Latitude']
y_values = southern_hem['Humidity']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values,y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values, y_values, c='dodgerblue',edgecolor='black')
plt.plot(x_values, regress_values, "r-")
plt.annotate(line_eq,(-50,40),fontsize=14,color='red')
plt.xlabel('Latitude',fontsize=13)
plt.ylabel('Humidity (%)',fontsize=13)
plt.title('Humidity (%) vs Latitude',fontsize=15)
print(f"The r-squared is: {round(rvalue**2,5)}")
plt.show()
# -
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
northern_hem = city_df.loc[city_df['Latitude'] >= 0]
northern_hem = pd.DataFrame(northern_hem)
y_values = northern_hem['Cloudiness']
x_values = northern_hem['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values,y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values, y_values, c='dodgerblue',edgecolor='black')
plt.plot(x_values, regress_values, "r-")
plt.annotate(line_eq,(25,65),fontsize=14,color='red')
plt.xlabel('Latitude',fontsize=13)
plt.ylabel('Cloudiness (%)',fontsize=13)
plt.title('Cloudiness (%) vs Latitude',fontsize=15)
print(f"The r-squared is: {round(rvalue**2,5)}")
plt.show()
# -
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
southern_hem = city_df.loc[city_df['Latitude'] < 0]
southern_hem = pd.DataFrame(southern_hem)
y_values= southern_hem['Cloudiness']
x_values = southern_hem['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values,y_values)
regress_values = cloud_lat * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values, y_values, c='dodgerblue',edgecolor='black')
plt.plot(x_values, regress_values, "r-")
plt.annotate(line_eq,(-50,60),fontsize=14,color='red')
plt.xlabel('Latitude',fontsize=13)
plt.ylabel('Cloudiness (%)',fontsize=13)
plt.title('Cloudiness (%) vs Latitude',fontsize=15)
print(f"The r-squared is: {round(rvalue**2,5)}")
plt.show()
# -
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
northern_hem = city_df.loc[city_df['Latitude'] >= 0]
northern_hem = pd.DataFrame(northern_hem)
y_values = northern_hem['Wind Speed']
x_values = northern_hem['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values,y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values, y_values, c='dodgerblue',edgecolor='black')
plt.plot(x_values, regress_values, "r-")
plt.annotate(line_eq,(0,25),fontsize=14,color='red')
plt.xlabel('Latitude',fontsize=13)
plt.ylabel('Wind Speed (mph)',fontsize=13)
plt.title('Wind Speed (mph) vs Latitude',fontsize=15)
print(f"The r-squared is: {round(rvalue**2,5)}")
plt.show()
# -
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
southern_hem = city_df.loc[city_df['Latitude'] < 0]
southern_hem = pd.DataFrame(southern_hem)
y_values = southern_hem['Wind Speed']
x_values = southern_hem['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values,y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.scatter(x_values, y_values, c='dodgerblue',edgecolor='black')
plt.plot(x_values, regress_values, "r-")
plt.annotate(line_eq,(-50,18),fontsize=14,color='red')
plt.xlabel('Latitude',fontsize=13)
plt.ylabel('Wind Speed (mph)',fontsize=13)
plt.title('Wind Speed (mph) vs Latitude',fontsize=15)
print(f"The r-squared is: {round(rvalue**2,5)}")
plt.show()
# -
| WeatherPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # WPP: total population
# ## Parameters
# + tags=["parameters"]
dest_dir = "/tmp/wpp_2019_total_population"
# -
# ## Walden
from owid import walden
walden_ds = walden.Catalog().find_one("wpp", "2019", "standard_projections")
walden_ds
# ## Unzip
import tempfile
import shutil
temp_dir = tempfile.mkdtemp()
import zipfile
zipfile.ZipFile(walden_ds.local_path).extractall(temp_dir)
# !ls {temp_dir}/WPP2019
# ## Make dataset
from owid.catalog import Dataset
from etl.steps.data import converters
ds = Dataset.create_empty(dest_dir)
ds.metadata = converters.convert_walden_metadata(walden_ds)
ds.save()
# ## Add tables
from owid.catalog import Table
import pandas as pd
# ### Total population
df = pd.read_csv(f"{temp_dir}/WPP2019/WPP2019_TotalPopulationBySex.csv")
df.head()
df.columns = [
"loc_id",
"location",
"var_id",
"variant",
"year",
"mid_period",
"population_male",
"population_female",
"population_total",
"population_density",
]
t = Table(df[["loc_id", "location"]].drop_duplicates().set_index("loc_id"))
t.metadata.short_name = "location_codes"
ds.add(t)
t = Table(df[["var_id", "variant"]].drop_duplicates().set_index("var_id"))
t.metadata.short_name = "variant_codes"
ds.add(t)
df.drop(columns=["loc_id", "var_id"], inplace=True)
for col in ["location", "variant"]:
df[col] = df[col].astype("category")
df.set_index(["variant", "location", "year"], inplace=True)
df
df.index.levels[0]
t = Table(df)
t.metadata.short_name = "total_population"
ds.add(t)
# ### Fertility by age
df = pd.read_csv(f"{temp_dir}/WPP2019/WPP2019_Fertility_by_Age.csv")
df.head()
df.drop(
columns=["LocID", "VarID", "MidPeriod", "AgeGrpStart", "AgeGrpSpan"], inplace=True
)
df.columns = [
"location",
"variant",
"year_range",
"age_group",
"asfr",
"pasfr",
"births",
]
df.head()
for col in ["location", "variant", "year_range", "age_group"]:
df[col] = df[col].astype("category")
df.set_index(["variant", "location", "year_range", "age_group"], inplace=True)
t = Table(df)
t.metadata.short_name = "fertility_by_age"
ds.add(t)
# ### Population by age and sex
df = pd.read_csv(f"{temp_dir}/WPP2019/WPP2019_PopulationByAgeSex_Medium.csv")
df.head()
df.drop(
columns=["LocID", "VarID", "MidPeriod", "AgeGrpStart", "AgeGrpSpan"], inplace=True
)
df.columns = [
"location",
"variant",
"year",
"age_group",
"population_male",
"population_female",
"population_total",
]
df.head()
for col in ["location", "variant", "age_group"]:
df[col] = df[col].astype("category")
df.set_index(["variant", "location", "year", "age_group"], inplace=True)
df.head()
t = Table(df)
t.metadata.short_name = "population_by_age_sex"
ds.add(t)
# ## Clean up
shutil.rmtree(temp_dir)
| etl/steps/data/meadow/wpp/2019/standard_projections.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Programming Bootcamp 2018
# # Lesson 1 Exercises
# ---
# ** How to earn points **
#
# If you would like to get points/feedback for your work, enter your name below and attach this notebook to a post to Instructor "<NAME>" on piazza with "Bootcamp Lesson 1" as the subject line **before 9:00 pm on 7/24**. Check the submit homework post! You do not need to complete all the problems to get points. Points will be assigned based on participation. Those who consistenly participate throughout bootcamp will get a ~prize~.
# **Name** (double click here to edit):
# ---
# ** Notes on using this notebook **
#
# - Cells with `In []` next to them are code cells. Run code using Ctrl+Enter (or Shift+Enter)
# - In places where it says "Your guess" or "Your answer", double click to edit the cell with your answer.
# - I will often provide you with expected output for a given problem. Use this to check that your code is correct.
# - If the directions for a question don't specify a particular way of doing things, this means you can do it however you want (within reason). For example, if I don't specifically say "print using one line of code", then you can print using multiple print statements if you want.
# ---
# ## 1. Guess the output: print statement practice (1pt)
#
# For the following blocks of code, **first try to guess what the output will be**, and then run the code yourself. These examples may introduce some ideas and common pitfalls that were not explicitly covered in the lecture, so be sure to complete this section. Points will be given for filling in the guesses; guessing wrong won't be penalized.
print ("hello", "world")
# Your guess:
#
print ("hello" + "world")
# Your guess:
a = 3.1415926
print ('{:.2f} '.format(a))
# Your guess:
#
print ("I have", 5, "avocados")
# Your guess:
#
print ("I have" + 5 + "avocados")
# Your guess:
#
print (10 - 5 * 3)
# Your guess:
#
print ((10 - 5) * 3)
# Your guess:
#
print (25 % 5)
# Your guess:
#
print (25 % 6)
# Your guess:
#
print (-4 ** 2)
# Your guess:
print ((-4) ** 2)
# Your guess:
print (9 / 2)
# Your guess:
#
print (9.0 / 2)
# Your guess:
#
print (9 / float(2))
# Your guess:
#
# ---
# ## 2. Guess the output: variables practice (1pt)
#
# For the following blocks of code, **first try to guess what the output will be**, and then run the code yourself.
x = 4
print (x * 3)
# Your guess:
#
x = "4"
print (x * 3)
# Your guess:
#
x = "4"
print (int(x) * 3)
# Your guess:
#
x = "apples"
y = x
print (y)
# Your guess:
#
x = "apples"
y = "bananas"
x = y
y = x
print(y)
# Your guess:
#
x = "apples"
y = "bananas"
print (x + y)
# Your guess:
#
x = 4
x = 1
print (x)
# Your guess:
#
x = 4
x + 1
print (x)
# Your guess:
#
x = 2
y = 4
print ((x * y) ** x)
# Your guess:
#
x = 25
print (x ** 0.5)
# Your guess:
#
# ---
# ## 3. On your own: printing (1pt)
# Write a single line of code that prints your favorite movie title:
# Add a single line of code to what's already below to print the variables `color` and `number`:
# +
color = "blue"
number = 90210
# -
# ---
# ## 4. Fix the code (1pt)
# The following code blocks have bugs. Fix each one so that it runs without error. There may be multiple errors!
print "I just ate", 20, "cookies"
1stPerson = "Scott"
print (1stPerson)
fruit = "plum"
print ("I bought a", friut)
file_num = "5"
file_name = sequences.bed
file_string = "File number " + file_num ": " + file_name
prnt (file_string)
# ---
# ## 5. Math practice I (1pt)
# The equation for a line is
# ```
# y = mx + b
# ```
# Write code to solve for y for a given m, x, and b. Print the value of y at the end. Use the code below as a starting point, and fill in the rest of the needed code.
#
# **[ Check your answer ]** For the given values of m, x, and b below (0.5, 12, 4), you should get the answer 10.0.
m = 0.5
x = 12
b = 4
# ---
# ## 6. Math practice II (2pt)
# The quadratic formula is defined as:
# $$ x = \frac{-b \pm \sqrt{b^2 - 4ac}}{2a} $$
# Write code to calculate the quadratic formula for a given a, b, and c. Your code should print both possible values for x (that is, the values produced by doing + or - for the $\pm$).
#
# I haven't told you yet how to find the square root in Python because I'd like you to look this up for yourself! There are actually several different ways -- try googling it.
#
# **[ Check your answer ]** For the given values of a, b, and c (-3, 3, 1), you should get the answers -0.264 and 1.264.
a = -3
b = 3
c = 1
# After you verify that your code works, try a few different values of these variables. Note, you will get an error message if $b^2 - 4ac$ is negative, since you can't take the square root of a negative number. This is fine for now -- later we'll go over ways to prevent errors like that from occurring.
# ---
# ## 7. Reading user input (1pt)
#
# We'll go over this next time, but here's a head start. Run the following code:
print ("Enter your first name")
firstName = input()
print ("Enter your last name")
lastName = input()
print ("Welcome,", firstName, lastName)
# What happens? What does `input()` do?
# Your answer:
# Google it to see if you're right.
#
# Here's another way you can use the `input()` function:
firstName = input("Enter your first name:")
lastName = input("Enter your last name:")
print ("Welcome,", firstName, lastName)
# Run this. What is different about the output?
# Your answer:
# Oftentimes we can change the behavior of a function by putting different things within the () braces. The values we put in these braces are called arguments. You can send these parameters along with the function to make it do slightly different things. In this case, `input()` only takes one argument, which is a string that is used as a "prompt". Different function take different types/numbers of arguments. We're going to see this a lot in the future, so just keep it in the back of your mind.
# ---
# ## 8. Interactive quadratic formula (2pt)
#
# Edit your quadratic formula code from problem 6 so that it takes the values of a, b, and c interactively using `input()`.
#
# *Hint:* you may have noticed when you looked up `input()` that it reads in everything as a string. So if you input `3.5`, what actually gets read is `"3.5"` (a string). This will cause an error when you try to do math using these values. To use them as decimal numbers, we must convert them from strings to floats using the `float()` function.
#
# Here's an example of how to do this:
# ```
# age = input("Your age:")
# age = float(age)
# ```
# Or:
# ```
# age = float(input("Your age:"))
# ```
#
# Now, apply this to your quadratic formula code to read in the values of a, b, and c. Check that you still get the correct answer!
| class_materials/Intro2Python/2019/.ipynb_checkpoints/lab1_exercises-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.read_csv('data/parsed.csv')
df.columns
"""
1. Find the 95 th percentile of earthquake magnitude in Japan using the mb
magnitude type.
"""
df[df.magType == 'mb'].set_index(
'place'
).filter(
like='Japan', axis=0
).mag.describe(
percentiles=[0.05, 0.95]
)
# alternative
df[(df.parsed_place == 'Japan') & (df.magType == 'mb')].mag.quantile(0.95)
"""
2. Find the percentage of earthquakes in Indonesia that were coupled with tsunamis.
"""
fraction = df[(df.parsed_place == 'Indonesia')].tsunami.value_counts(normalize=True)[1]
percentage = f"{fraction :.2%}"
print(percentage)
"""
3. Calculate summary statistics for earthquakes in Nevada.
"""
df[df.parsed_place == 'Nevada'].describe(include='all')
"""
4. Add a column indicating whether the earthquake happened in a country or US
state that is on the Ring of Fire. Use Alaska, Antarctica (look for Antarctic), Bolivia,
California, Canada, Chile, Costa Rica, Ecuador, Fiji, Guatemala, Indonesia, Japan,
Kermadec Islands, Mexico (be careful not to select New Mexico), New Zealand,
Peru, Philippines, Russia, Taiwan, Tonga, and Washington.
"""
df['ring_of_fire'] = (df.parsed_place.isin([
'Alaska', 'Bolivia',
'California', 'Canada', 'Chile', 'Costa Rica', 'Ecuador', 'Fiji',
'Guatemala', 'Indonesia', 'Japan',
'Kermadec Islands', 'Mexico', 'New Zealand',
'Peru', 'Philippines', 'Russia', 'Taiwan', 'Tonga', 'Washington'
]) | (df.parsed_place.str.contains('Antarctic')))
"""
5. Calculate the number of earthquakes in the Ring of Fire locations and the number
outside of them.
"""
df.ring_of_fire.value_counts()
"""
6. Find the tsunami count along the Ring of Fire.
"""
df[df.ring_of_fire & df.tsunami].tsunami.count()
| ch_02/exercises.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:gluon]
# language: python
# name: conda-env-gluon-py
# ---
# +
from mxnet import gluon
import mxnet as mx_net
import os
import numpy as np
from mxnet import image
from mxnet import nd
from mxnet import init
from mxnet import cpu
import matplotlib as _plotlib
import matplotlib.pyplot as _pyplot
from mxnet.contrib.ndarray import MultiBoxPrior
from mxnet.gluon import nn
from mxnet.contrib.ndarray import MultiBoxDetection
import time
ctx = mx_net.cpu()
#mean_rgb_value = nd.array([123, 117, 104])
mean_rgb_value = nd.array([123-55, 117-50, 104+60]) #检测大皮卡丘 pikachu27.png
mean_rgb_value = nd.array([123-20, 117+20, 104-40])
NumOfClass=1
NamesOfClass = ['pikachu'] #类名称
shape_dataset = 256
# %matplotlib inline
_plotlib.rcParams['figure.dpi']= 120
def RectBox(Box_Rectangle, color, linewidth=3):
#转换锚框成为rectangle
Box_Rectangle = Box_Rectangle.asnumpy()
return _pyplot.Rectangle(
(Box_Rectangle[0], Box_Rectangle[1]), Box_Rectangle[2]-Box_Rectangle[0], Box_Rectangle[3]-Box_Rectangle[1],
fill=False, edgecolor=color, linewidth=linewidth)
#预测物体的类别
def ClassPredictor(Anchors_Numbers, Classes_Numbers):
#返回一个预测层
return nn.Conv2D(Anchors_Numbers * (Classes_Numbers + 1), 3, padding=1)
#预测物体的边框
def YucheBox(Anchors_Numbers):
#返回一个预测边框位置的网络
return nn.Conv2D(Anchors_Numbers * 4, 3, padding=1)
#减半模块
def Reduce_Module(out_lays):
#连接两个Conv-BatchNorm-Relu blocks和一个 pooling layer使得最后输出的特征减半
outputs = nn.HybridSequential()
for _ in range(2):
outputs.add(nn.Conv2D(out_lays, 3, strides=1, padding=1))#输出 num_filters 个通道数
outputs.add(nn.BatchNorm(in_channels=out_lays))#归一化
outputs.add(nn.Activation('relu'))
outputs.add(nn.MaxPool2D(2))
return outputs
#将不同层的输出合并
def Fla_yuche(pred):
return pred.transpose(axes=(0,2,3,1)).flatten()
def link_yuche(preds):
return nd.concat(*preds, dim=1)
#主体网络
def main_body_net():
outputs = nn.HybridSequential()
for range_prediction in [16, 32, 64]:
outputs.add(Reduce_Module(range_prediction))
return outputs
#定义ssd模型
def SSD_Model(Anchors_Numbers, Classes_Numbers):
ReduceSamplers = nn.Sequential()
for _ in range(3):
ReduceSamplers.add(Reduce_Module(128))
ClassPred = nn.Sequential()
Box_Pred = nn.Sequential()
for _ in range(5):
ClassPred.add(ClassPredictor(Anchors_Numbers, Classes_Numbers))
Box_Pred.add(YucheBox(Anchors_Numbers))
All_Models = nn.Sequential()
All_Models.add(main_body_net(), ReduceSamplers, ClassPred, Box_Pred)
return All_Models
#计算预测
def SsdModelForward(x, All_Models, sizes, ratios, verbose=False):
main_body_net, ReduceSamplers, ClassPred, Box_Pred = All_Models
output_anchors, output_class_preds, output_box_preds = [], [], []
# feature extraction
x = main_body_net(x)#feature extraction完毕
for i in range(5):
# predict
output_anchors.append(MultiBoxPrior(
x, sizes=sizes[i], ratios=ratios[i]))
output_class_preds.append(
Fla_yuche(ClassPred[i](x)))
output_box_preds.append(
Fla_yuche(Box_Pred[i](x)))
if verbose:
print('Predict scale', i, x.shape, 'with',
output_anchors[-1].shape[1], 'output_anchors')
# down sample
if i < 3:
x = ReduceSamplers[i](x)
elif i == 3:
x = nd.Pooling(
x, global_pool=True, pool_type='max',
kernel=(x.shape[2], x.shape[3]))
# concat data
return (link_yuche(output_anchors),
link_yuche(output_class_preds),
link_yuche(output_box_preds))
#完整的模型
class ToySSD(gluon.Block):
def __init__(self, Classes_Numbers, verbose=False, **kwargs):
super(ToySSD, self).__init__(**kwargs)
# anchor Box_Rectangle sizes and ratios for 5 feature scales
self.sizes = [[.2,.272], [.37,.447], [.54,.619],
[.71,.79], [.88,.961]]
self.ratios = [[1,2,.5]]*5
self.Classes_Numbers = Classes_Numbers
self.verbose = verbose
Anchors_Numbers = len(self.sizes[0]) + len(self.ratios[0]) - 1
# use name_scope to guard the names
with self.name_scope():
self.All_Models = SSD_Model(Anchors_Numbers, Classes_Numbers)
def forward(self, x):
output_anchors, output_class_preds, output_box_preds = SsdModelForward(
x, self.All_Models, self.sizes, self.ratios,
verbose=self.verbose)
# it is better to have class predictions reshaped for softmax computation
output_class_preds = output_class_preds.reshape(shape=(0, -1, self.Classes_Numbers+1))
return output_anchors, output_class_preds, output_box_preds
#预测初始化
os.makedirs('checkpoints',exist_ok=True)
filename = "checkpoints/testnet.params"
filename_2 = "checkpoints_2/ssd_net.params"
filename_3 = "checkpoints_3/ssd_net_3.params"
ctx = cpu(0)
#TrainData.reshape(label_shape=(3, 5))
#TrainData = TestData.sync_label_shape(TrainData)
net = ToySSD(NumOfClass)
net.load_params(filename_3, ctx=ctx)
#图像预处理
def img_Processor(file_name):
with open(file_name, 'rb') as f:
img = image.imdecode(f.read())
# resize to shape_dataset
data = image.imresize(img, shape_dataset, shape_dataset)
# minus rgb mean
data = data.astype('float32') - mean_rgb_value
# convert to batch_test x channel x height xwidth
return data.transpose((2,0,1)).expand_dims(axis=0), img
#定义预测函数
def predict(x):
output_anchors, output_class_preds, output_box_preds = net(x.as_in_context(ctx))
output_class_probs = nd.SoftmaxActivation(
output_class_preds.transpose((0,2,1)), mode='channel')
return MultiBoxDetection(output_class_probs, output_box_preds, output_anchors,force_suppress=True, clip=False)
#预测
path='../img/pikachu17.png'#threshold=0.51 pikachu6_2 pikachu16.png
path_2='../img/pikachu15.jpg'#threshold=0.45
path_3='../img/pikachu6_2.jpg'
tic = time.time()
x, img = img_Processor(path)
outputs = predict(x)
print("单次检测时间为:",(time.time()-tic))
#print(' time %.1f sec' % (time.time()-tic))
#outputs.shape
# -
print(outputs[0][0:10])
# +
#显示输出
five_colors = ['blue', 'green', 'red', 'black', 'magenta']
_plotlib.rcParams['figure.figsize'] = (6,6)
def display_preds(img, outputs, threshold=0.5):
_pyplot.imshow(img.asnumpy())
for rows in outputs:
rows = rows.asnumpy()
class_num_id, class_score = int(rows[0]), rows[1]
if class_num_id < 0 or class_score < threshold:
continue
color = five_colors[class_num_id%len(five_colors)]#例如0%5=0 1%5=1 2%5=2
Box_Rectangle = rows[2:6] * np.array([img.shape[0],img.shape[1]]*2)
rect = RectBox(nd.array(Box_Rectangle), color, 2)
_pyplot.gca().add_patch(rect)
text = NamesOfClass[class_num_id]
_pyplot.gca().text(Box_Rectangle[0], Box_Rectangle[1],
'{:s} {:.2f}'.format(text, class_score),
bbox=dict(facecolor=color, alpha=0.5),
fontsize=10, color='white')
_pyplot.show()
display_preds(img, outputs[0], threshold=0.43)
# -
| chapter_computer-vision/change_test_ssd_one_Success.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
import glob
import os
input_shape = (256, 256)
raw_data = 'data/raw_data/'
processed_data = 'data/prepared_data/'
imgs = glob.glob(raw_data + '*.jpg')
len(imgs)
# each file will have 1000 images
batch = 36
# +
# dumping numpy batch images to processed_data folder
counter = 1
def dumpy_numpy(data):
global counter
file_path = os.path.join(processed_data, str(counter))
np.save(file_path, data)
counter += 1
# -
# convert to numpy files
bulk = []
for i, file in enumerate(imgs, 1):
try:
image = cv2.imread(file)
image = cv2.resize(image, input_shape)
bulk.append(image)
except Exception as e:
print("error: ", e)
print("file name: ", file)
print("Proccessed: %s / %s image" %(i, len(imgs)))
if len(bulk) >= batch or i == len(imgs):
print("Dumping batch: ", len(bulk))
dumpy_numpy(bulk)
bulk = []
| prepare_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.2 64-bit (''university'': conda)'
# language: python
# name: python38264bituniversitycondab97bf0abd727460988fa3373b2696f9e
# ---
import sympy as sp
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
x = sp.symbols('x')
f = sp.sin(x) / sp.sqrt(1 - x)
start, end = 0, 1
f
# We will be integrating the following function from 0 to 1
# # Let's plot it first!
# +
x_plot = np.linspace(start, end, 300, endpoint=False)
y_plot = sp.lambdify(x, f, 'numpy')(x_plot)
sns.set_style('whitegrid')
plt.figure(figsize=(12, 6))
sns.lineplot(x_plot, y_plot);
# -
# # Exact value
true_value = 1.18698444
# Thanks, <NAME>!
# # Midpoint Riemann sum
# +
nodes_count = 3
nodes = np.linspace(start, end, nodes_count, endpoint=False)
step = (nodes[1] - nodes[0])
nodes += step / 2
values = sp.lambdify(x, f, 'numpy')(nodes)
mid_riemann_value = step * values.sum()
# -
mid_riemann_value
# # Using weights
p = 1 / sp.sqrt(1 - x)
nodes = [sp.Rational(1, 6), 0.5, sp.Rational(5, 6)]
phi = f / p
w = (x - nodes[0]) * (x - nodes[1]) * (x - nodes[2])
dw = w.diff()
coeffs = [
11 / 20,
-1 / 10,
31 / 20
]
coeffs
weights_value = sum([coeffs[i] * phi.evalf(subs={x: nodes[i]}) for i in range(len(nodes))])
weights_value
# # Gauss time!
# 
roots = [-1 / sp.sqrt(3), 1 / sp.sqrt(3)]
coeffs = [1, 1]
nodes = [(start + end + (end - start) * r) / 2 for r in roots]
gauss_value = sum([coeffs[i] * f.evalf(subs={x: nodes[i]}) for i in range(len(nodes))]) * (end - start) / 2
gauss_value
# # Gauss-like formulas
p = 1 / sp.sqrt(1 - x)
nodes_count = 2
mus = [
float(
sp.integrate(
p * x ** k,
(x, 0, 1)
)
)
for k in range(2 * nodes_count)
]
for i in range(2 * nodes_count):
print(f'mu_{i} = {mus[i]}')
# Huge thanks to Wolfram Alpha (again)!
poly_coeffs = [-8 / 7, 8 / 35]
polynom = x**2 + x * poly_coeffs[0] + poly_coeffs[1]
nodes = sp.solve(polynom)
phi = f / p
coeffs = [
(mus[1] - mus[0] * nodes[1]) / (nodes[0] - nodes[1]),
(mus[1] - mus[0] * nodes[0]) / (nodes[1] - nodes[0])
]
gauss_like_value = sum([coeffs[i] * phi.evalf(subs={x: nodes[i]}) for i in range(nodes_count)])
gauss_like_value
| year-2/computational-workshop/task-6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
import pandas as pd
import statsmodels.formula.api as sm
iris=pd.read_csv("http://vincentarelbundock.github.io/Rdatasets/csv/datasets/iris.csv")
iris =iris.drop('Unnamed: 0', 1)
iris.head()
iris.columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width',
'Species']
iris.columns
result = sm.ols(formula="Sepal_Length ~ Petal_Length + Sepal_Width + Petal_Width + Species", data=iris)
result.fit()
result.fit().summary()
result.fit().params
result.fit().outlier_test(method='bonf', alpha=0.05)
dir(result.fit())
test=result.fit().outlier_test()
print ('Bad data points (bonf(p) < 0.05):')
print (test[test.icol(2) < 0.05])
| reg+model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score as acc
from sklearn.preprocessing import StandardScaler as ss, RobustScaler as rs
from sklearn.model_selection import train_test_split as tts
from sklearn.linear_model import LogisticRegression as LR
from sklearn.tree import DecisionTreeClassifier as DTC
from sklearn.ensemble import RandomForestClassifier as RFC, GradientBoostingClassifier as GBC, ExtraTreesClassifier as ETC
from sklearn.neural_network import MLPClassifier as MLP
import category_encoders as ce
# %matplotlib inline
import matplotlib.pyplot as plt
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
train_features = pd.read_csv('https://drive.google.com/uc?export=download&id=14ULvX0uOgftTB2s97uS8lIx1nHGQIB0P')
train_labels = pd.read_csv('https://drive.google.com/uc?export=download&id=1r441wLr7gKGHGLyPpKauvCuUOU556S2f')
test_features = pd.read_csv('https://drive.google.com/uc?export=download&id=1wvsYl9hbRbZuIuoaLWCsW_kbcxCdocHz')
sample_submission = pd.read_csv('https://drive.google.com/uc?export=download&id=1kfJewnmhowpUo381oSn3XqsQ6Eto23XV')
train_features.shape, train_labels.shape, test_features.shape, sample_submission.shape
num_features = ['id',
'latitude',
'longitude',
'population',
'construction_year',
'amount_tsh',
'gps_height']
num_data = train_features[num_features]
num_test = test_features[num_features]
num_data.head()
num_data = num_data.set_index('id')
num_test = num_test.set_index('id')
num_data.head()
imputer = IterativeImputer(missing_values=0,
initial_strategy='most_frequent',
imputation_order='random',
sample_posterior=True)
num_data = imputer.fit_transform(num_data)
num_test = imputer.transform(num_test)
train_features = train_features.drop(columns=['latitude',
'longitude',
'population',
'construction_year',
'amount_tsh',
'gps_height'])
test_features = test_features.drop(columns=['latitude',
'longitude',
'population',
'construction_year',
'amount_tsh',
'gps_height'])
data = pd.DataFrame(num_data, columns=['latitude',
'longitude',
'population',
'construction_year',
'amount_tsh',
'gps_height'])
test = pd.DataFrame(num_data, columns=['latitude',
'longitude',
'population',
'construction_year',
'amount_tsh',
'gps_height'])
data['id'] = train_features['id']
test['id'] = test_features['id']
train_features = train_features.merge(data)
test_features = test_features.merge(test)
train_features['construction_year'] = train_features['construction_year'].astype(int)
test_features['construction_year'] = test_features['construction_year'].astype(int)
train_features['construction_year'] = pd.to_datetime(train_features['construction_year'], format='%Y')
test_features['construction_year'] = pd.to_datetime(test_features['construction_year'], format='%Y')
now = pd.Timestamp.now()
train_features['age'] = (now - train_features['construction_year']).dt.days
test_features['age'] = (now - test_features['construction_year']).dt.days
train_features = train_features.drop(columns='construction_year')
test_features = test_features.drop(columns='construction_year')
# +
cat_features = [
'source_type',
# 'quality_group',
'extraction_type',
'quantity_group',
# 'management_group',
'basin',
'payment_type',
# 'permit',
# 'scheme_management'
]
num_features = [
'amount_tsh',
'gps_height',
'longitude',
'latitude',
'num_private',
'region_code',
'district_code',
'population',
'age'
]
features = cat_features + num_features
train = train_features[features]
y_train = train_labels['status_group']
test = test_features[features]
X_train, X_val, y_train, y_val = tts(train,
y_train,
train_size=.8,
test_size=.2,
stratify=y_train,
random_state=42)
print(X_train.shape, X_val.shape, y_train.shape, y_val.shape, test.shape)
encoder = ce.OneHotEncoder(use_cat_names=True)
X_train_enc = encoder.fit_transform(X_train)
X_val_enc = encoder.transform(X_val)
X_test_features = encoder.transform(test)
print(X_train_enc.shape, X_val_enc.shape, X_test_features.shape)
# +
# %%time
model = RFC(n_estimators=250,
min_samples_leaf=5,
criterion='entropy',
max_features=.9,
min_samples_split=9,
random_state=42,
bootstrap=True
)
model.fit(X_train_enc, y_train)
print(model.score(X_val_enc, y_val))
# +
# predicted = model.predict(X_test_features)
# submission = sample_submission.copy()
# submission['status_group'] = predicted
# submission.to_csv('sub_4.csv', index=False)
| module1-logistic-regression/Classification1 Day2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # A SageMaker Workflow
#
# The pipeline that we create follows a typical Machine Learning Application pattern of pre-processing, training, evaluation, and model registration:
#
# 
#
# ### Create SageMaker Clients and Session
#
# First, we create a new SageMaker Session in the current AWS region. We also acquire the role arn for the session.
#
# This role arn should be the execution role arn that you set up in the Prerequisites section of this notebook.
# +
from botocore.exceptions import ClientError
import os
import sagemaker
import logging
import boto3
import sagemaker
import pandas as pd
sess = sagemaker.Session()
bucket = sess.default_bucket()
role = sagemaker.get_execution_role()
region = boto3.Session().region_name
sm = boto3.Session().client(service_name="sagemaker", region_name=region)
# -
# # Track the Pipeline as an `Experiment`
# +
import time
timestamp = int(time.time())
pipeline_name = "BERT-pipeline-{}".format(timestamp)
# -
# %store pipeline_name
# +
import time
from smexperiments.experiment import Experiment
pipeline_experiment = Experiment.create(
experiment_name=pipeline_name,
description="Amazon Customer Reviews BERT Pipeline Experiment",
sagemaker_boto_client=sm,
)
pipeline_experiment_name = pipeline_experiment.experiment_name
print("Pipeline experiment name: {}".format(pipeline_experiment_name))
# -
# %store pipeline_experiment_name
# # Create the `Trial`
# +
import time
from smexperiments.trial import Trial
pipeline_trial = Trial.create(
trial_name="trial-{}".format(timestamp), experiment_name=pipeline_experiment_name, sagemaker_boto_client=sm
)
pipeline_trial_name = pipeline_trial.trial_name
print("Trial name: {}".format(pipeline_trial_name))
# -
# %store pipeline_trial_name
# # Define Parameters to Parametrize Pipeline Execution
#
# We define Workflow Parameters by which we can parametrize our Pipeline and vary the values injected and used in Pipeline executions and schedules without having to modify the Pipeline definition.
#
# The supported parameter types include:
#
# * `ParameterString` - representing a `str` Python type
# * `ParameterInteger` - representing an `int` Python type
# * `ParameterFloat` - representing a `float` Python type
#
# These parameters support providing a default value, which can be overridden on pipeline execution. The default value specified should be an instance of the type of the parameter.
#
# The parameters defined in this workflow below include:
#
# * `processing_instance_type` - The `ml.*` instance type of the processing job.
# * `processing_instance_count` - The instance count of the processing job. For illustrative purposes only: 1 is the only value that makes sense here.
# * `train_instance_type` - The `ml.*` instance type of the training job.
# * `model_approval_status` - What approval status to register the trained model with for CI/CD purposes. Defaults to "PendingManualApproval". (NOTE: not available in service yet)
# * `input_data` - The URL location of the input data
# # Pipeline Parameters
from sagemaker.workflow.parameters import (
ParameterInteger,
ParameterString,
ParameterFloat,
)
# # Experiment Parameters
# %store -r pipeline_experiment_name
exp_name = ParameterString(
name="ExperimentName",
default_value=pipeline_experiment_name,
)
# 
# # Processing Step Parameters
raw_input_data_s3_uri = "s3://{}/amazon-reviews-pds/tsv/".format(bucket)
print(raw_input_data_s3_uri)
# !aws s3 ls $raw_input_data_s3_uri
# +
import time
timestamp = int(time.time())
input_data = ParameterString(
name="InputData",
default_value=raw_input_data_s3_uri,
)
processing_instance_count = ParameterInteger(name="ProcessingInstanceCount", default_value=1)
processing_instance_type = ParameterString(name="ProcessingInstanceType", default_value="ml.c5.2xlarge")
max_seq_length = ParameterInteger(
name="MaxSeqLength",
default_value=64,
)
balance_dataset = ParameterString(
name="BalanceDataset",
default_value="True",
)
train_split_percentage = ParameterFloat(
name="TrainSplitPercentage",
default_value=0.90,
)
validation_split_percentage = ParameterFloat(
name="ValidationSplitPercentage",
default_value=0.05,
)
test_split_percentage = ParameterFloat(
name="TestSplitPercentage",
default_value=0.05,
)
feature_store_offline_prefix = ParameterString(
name="FeatureStoreOfflinePrefix",
default_value="reviews-feature-store-" + str(timestamp),
)
feature_group_name = ParameterString(name="FeatureGroupName", default_value="reviews-feature-group-" + str(timestamp))
# -
# !pygmentize ./preprocess-scikit-text-to-bert-feature-store.py
# We create an instance of an `SKLearnProcessor` processor and we use that in our `ProcessingStep`.
#
# We also specify the `framework_version` we will use throughout.
#
# Note the `processing_instance_type` and `processing_instance_count` parameters that used by the processor instance.
# +
from sagemaker.sklearn.processing import SKLearnProcessor
processor = SKLearnProcessor(
framework_version="0.23-1",
role=role,
instance_type=processing_instance_type,
instance_count=processing_instance_count,
env={"AWS_DEFAULT_REGION": region},
)
# +
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.workflow.steps import ProcessingStep
processing_inputs = [
ProcessingInput(
input_name="raw-input-data",
source=input_data,
destination="/opt/ml/processing/input/data/",
s3_data_distribution_type="ShardedByS3Key",
)
]
processing_outputs = [
ProcessingOutput(
output_name="bert-train",
s3_upload_mode="EndOfJob",
source="/opt/ml/processing/output/bert/train",
),
ProcessingOutput(
output_name="bert-validation",
s3_upload_mode="EndOfJob",
source="/opt/ml/processing/output/bert/validation",
),
ProcessingOutput(
output_name="bert-test",
s3_upload_mode="EndOfJob",
source="/opt/ml/processing/output/bert/test",
),
]
processing_step = ProcessingStep(
name="Processing",
code="preprocess-scikit-text-to-bert-feature-store.py",
processor=processor,
inputs=processing_inputs,
outputs=processing_outputs,
job_arguments=[
"--train-split-percentage",
str(train_split_percentage.default_value),
"--validation-split-percentage",
str(validation_split_percentage.default_value),
"--test-split-percentage",
str(test_split_percentage.default_value),
"--max-seq-length",
str(max_seq_length.default_value),
"--balance-dataset",
str(balance_dataset.default_value),
"--feature-store-offline-prefix",
str(feature_store_offline_prefix.default_value),
"--feature-group-name",
str(feature_group_name.default_value),
],
)
print(processing_step)
# -
# 
# Finally, we use the processor instance to construct a `ProcessingStep`, along with the input and output channels and the code that will be executed when the pipeline invokes pipeline execution. This is very similar to a processor instance's `run` method, for those familiar with the existing Python SDK.
#
# Note the `input_data` parameters passed into `ProcessingStep` as the input data of the step itself. This input data will be used by the processor instance when it is run.
#
# Also, take note the `"bert-train"`, `"bert-validation"` and `"bert-test"` named channels specified in the output configuration for the processing job. Such step `Properties` can be used in subsequent steps and will resolve to their runtime values at execution. In particular, we'll call out this usage when we define our training step.
# # Train Step
# <img src="img/train_model.png" width="90%" align="left">
# +
train_instance_type = ParameterString(name="TrainInstanceType", default_value="ml.c5.9xlarge")
train_instance_count = ParameterInteger(name="TrainInstanceCount", default_value=1)
# -
# # Setup Training Hyper-Parameters
# Note that `max_seq_length` is re-used from the processing hyper-parameters above
# +
epochs = ParameterInteger(name="Epochs", default_value=1)
learning_rate = ParameterFloat(name="LearningRate", default_value=0.00001)
epsilon = ParameterFloat(name="Epsilon", default_value=0.00000001)
train_batch_size = ParameterInteger(name="TrainBatchSize", default_value=128)
validation_batch_size = ParameterInteger(name="ValidationBatchSize", default_value=128)
test_batch_size = ParameterInteger(name="TestBatchSize", default_value=128)
train_steps_per_epoch = ParameterInteger(name="TrainStepsPerEpoch", default_value=50)
validation_steps = ParameterInteger(name="ValidationSteps", default_value=50)
test_steps = ParameterInteger(name="TestSteps", default_value=50)
train_volume_size = ParameterInteger(name="TrainVolumeSize", default_value=1024)
use_xla = ParameterString(
name="UseXLA",
default_value="True",
)
use_amp = ParameterString(
name="UseAMP",
default_value="True",
)
freeze_bert_layer = ParameterString(
name="FreezeBERTLayer",
default_value="False",
)
enable_sagemaker_debugger = ParameterString(
name="EnableSageMakerDebugger",
default_value="False",
)
enable_checkpointing = ParameterString(
name="EnableCheckpointing",
default_value="False",
)
enable_tensorboard = ParameterString(
name="EnableTensorboard",
default_value="False",
)
input_mode = ParameterString(
name="InputMode",
default_value="File",
)
run_validation = ParameterString(
name="RunValidation",
default_value="True",
)
run_test = ParameterString(
name="RunTest",
default_value="False",
)
run_sample_predictions = ParameterString(
name="RunSamplePredictions",
default_value="False",
)
# -
# # Setup Metrics To Track Model Performance
metrics_definitions = [
{"Name": "train:loss", "Regex": "loss: ([0-9\\.]+)"},
{"Name": "train:accuracy", "Regex": "accuracy: ([0-9\\.]+)"},
{"Name": "validation:loss", "Regex": "val_loss: ([0-9\\.]+)"},
{"Name": "validation:accuracy", "Regex": "val_accuracy: ([0-9\\.]+)"},
]
# !pygmentize src/tf_bert_reviews.py
# # Define a Training Step to Train a Model
#
# We configure an Estimator and the input dataset. A typical training script loads data from the input channels, configures training with hyperparameters, trains a model, and saves a model to `model_dir` so that it can be hosted later.
#
# We also specify the model path where the models from training will be saved.
#
# Note the `train_instance_type` parameter passed may be also used and passed into other places in the pipeline. In this case, the `train_instance_type` is passed into the estimator.
# +
from sagemaker.tensorflow import TensorFlow
estimator = TensorFlow(
entry_point="tf_bert_reviews.py",
source_dir="src",
role=role,
instance_count=train_instance_count, # Make sure you have at least this number of input files or the ShardedByS3Key distibution strategy will fail the job due to no data available
instance_type=train_instance_type,
volume_size=train_volume_size,
py_version="py37",
framework_version="2.3.1",
hyperparameters={
"epochs": epochs,
"learning_rate": learning_rate,
"epsilon": epsilon,
"train_batch_size": train_batch_size,
"validation_batch_size": validation_batch_size,
"test_batch_size": test_batch_size,
"train_steps_per_epoch": train_steps_per_epoch,
"validation_steps": validation_steps,
"test_steps": test_steps,
"use_xla": use_xla,
"use_amp": use_amp,
"max_seq_length": max_seq_length,
"freeze_bert_layer": freeze_bert_layer,
"enable_sagemaker_debugger": enable_sagemaker_debugger,
"enable_checkpointing": enable_checkpointing,
"enable_tensorboard": enable_tensorboard,
"run_validation": run_validation,
"run_test": run_test,
"run_sample_predictions": run_sample_predictions,
},
input_mode=input_mode,
metric_definitions=metrics_definitions,
)
# -
# Finally, we use the estimator instance to construct a `TrainingStep` as well as the `Properties` of the prior `ProcessingStep` used as input in the `TrainingStep` inputs and the code that will be executed when the pipeline invokes pipeline execution. This is very similar to an estimator's `fit` method, for those familiar with the existing Python SDK.
#
# In particular, we pass in the `S3Uri` of the `"train"`, `"validation"` and `"test"` output channel to the `TrainingStep`. The `properties` attribute of a Workflow step match the object model of the corresponding response of a describe call. These properties can be referenced as placeholder values and are resolved, or filled in, at runtime. For example, the `ProcessingStep` `properties` attribute matches the object model of the [DescribeProcessingJob](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeProcessingJob.html) response object.
# +
from sagemaker.inputs import TrainingInput
from sagemaker.workflow.steps import TrainingStep
training_step = TrainingStep(
name="Train",
estimator=estimator,
inputs={
"train": TrainingInput(
s3_data=processing_step.properties.ProcessingOutputConfig.Outputs["bert-train"].S3Output.S3Uri,
content_type="text/csv",
),
"validation": TrainingInput(
s3_data=processing_step.properties.ProcessingOutputConfig.Outputs["bert-validation"].S3Output.S3Uri,
content_type="text/csv",
),
"test": TrainingInput(
s3_data=processing_step.properties.ProcessingOutputConfig.Outputs["bert-test"].S3Output.S3Uri,
content_type="text/csv",
),
},
)
print(training_step)
# -
# 
# # Evaluation Step
#
# First, we develop an evaluation script that will be specified in a Processing step that will perform the model evaluation.
#
# The evaluation script `evaluation.py` takes the trained model and the test dataset as input, and produces a JSON file containing classification evaluation metrics such as accuracy.
#
# After pipeline execution, we will examine the resulting `evaluation.json` for analysis.
#
# The evaluation script:
#
# * loads in the model
# * reads in the test data
# * issues a bunch of predictions against the test data
# * builds a classification report, including accuracy
# * saves the evaluation report to the evaluation directory
# Next, we create an instance of a `ScriptProcessor` processor and we use that in our `ProcessingStep`.
#
# Note the `processing_instance_type` parameter passed into the processor.
# +
from sagemaker.sklearn.processing import SKLearnProcessor
evaluation_processor = SKLearnProcessor(
framework_version="0.23-1",
role=role,
instance_type=processing_instance_type,
instance_count=processing_instance_count,
env={"AWS_DEFAULT_REGION": region},
max_runtime_in_seconds=7200,
)
# -
# !pygmentize evaluate_model_metrics.py
# We use the processor instance to construct a `ProcessingStep`, along with the input and output channels and the code that will be executed when the pipeline invokes pipeline execution. This is very similar to a processor instance's `run` method, for those familiar with the existing Python SDK.
#
# The `TrainingStep` and `ProcessingStep` `properties` attribute matches the object model of the [DescribeTrainingJob](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeTrainingJob.html) and [DescribeProcessingJob](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeProcessingJob.html) response objects, respectively.
# +
from sagemaker.workflow.properties import PropertyFile
evaluation_report = PropertyFile(name="EvaluationReport", output_name="metrics", path="evaluation.json")
# -
evaluation_step = ProcessingStep(
name="EvaluateModel",
processor=evaluation_processor,
code="evaluate_model_metrics.py",
inputs=[
ProcessingInput(
source=training_step.properties.ModelArtifacts.S3ModelArtifacts,
destination="/opt/ml/processing/input/model",
),
ProcessingInput(
source=processing_step.properties.ProcessingInputs["raw-input-data"].S3Input.S3Uri,
destination="/opt/ml/processing/input/data",
),
],
outputs=[
ProcessingOutput(
output_name="metrics", s3_upload_mode="EndOfJob", source="/opt/ml/processing/output/metrics/"
),
],
job_arguments=[
"--max-seq-length",
str(max_seq_length.default_value),
],
property_files=[evaluation_report],
)
# 
# +
from sagemaker.model_metrics import MetricsSource, ModelMetrics
model_metrics = ModelMetrics(
model_statistics=MetricsSource(
s3_uri="{}/evaluation.json".format(
evaluation_step.arguments["ProcessingOutputConfig"]["Outputs"][0]["S3Output"]["S3Uri"]
),
content_type="application/json",
)
)
print(model_metrics)
# -
# # Register Model Step
#
# 
#
# We use the estimator instance that was used for the training step to construct an instance of `RegisterModel`. The result of executing `RegisterModel` in a pipeline is a Model Package. A Model Package is a reusable model artifacts abstraction that packages all ingredients necessary for inference. Primarily, it consists of an inference specification that defines the inference image to use along with an optional model weights location.
#
# A Model Package Group is a collection of Model Packages. You can create a Model Package Group for a specific ML business problem, and you can keep adding versions/model packages into it. Typically, we expect customers to create a ModelPackageGroup for a SageMaker Workflow Pipeline so that they can keep adding versions/model packages to the group for every Workflow Pipeline run.
#
# The construction of `RegisterModel` is very similar to an estimator instance's `register` method, for those familiar with the existing Python SDK.
#
# In particular, we pass in the `S3ModelArtifacts` from the `TrainingStep`, `step_train` properties. The `TrainingStep` `properties` attribute matches the object model of the [DescribeTrainingJob](https://docs.aws.amazon.com/sagemaker/latest/APIReference/API_DescribeTrainingJob.html) response object.
#
# Of note, we provided a specific model package group name which we will use in the Model Registry and CI/CD work later on.
# +
model_approval_status = ParameterString(name="ModelApprovalStatus", default_value="PendingManualApproval")
deploy_instance_type = ParameterString(name="DeployInstanceType", default_value="ml.m5.4xlarge")
deploy_instance_count = ParameterInteger(name="DeployInstanceCount", default_value=1)
# +
model_package_group_name = f"BERT-Reviews-{timestamp}"
print(model_package_group_name)
# -
inference_image_uri = sagemaker.image_uris.retrieve(
framework="tensorflow",
region=region,
version="2.3.1",
py_version="py37",
instance_type=deploy_instance_type,
image_scope="inference",
)
print(inference_image_uri)
# +
from sagemaker.workflow.step_collections import RegisterModel
register_step = RegisterModel(
name="RegisterModel",
# entry_point='inference.py', # Adds a Repack Step: https://github.com/aws/sagemaker-python-sdk/blob/01c6ee3a9ec1831e935e86df58cf70bc92ed1bbe/src/sagemaker/workflow/_utils.py#L44
# source_dir='src',
estimator=estimator,
image_uri=inference_image_uri, # we have to specify, by default it's using training image
model_data=training_step.properties.ModelArtifacts.S3ModelArtifacts,
content_types=["application/jsonlines"],
response_types=["application/jsonlines"],
inference_instances=[deploy_instance_type],
transform_instances=["ml.c5.18xlarge"],
model_package_group_name=model_package_group_name,
approval_status=model_approval_status,
model_metrics=model_metrics,
)
# -
# # Create Model for Deployment Step
#
# 
#
# +
from sagemaker.model import Model
model_name = "bert-model-{}".format(timestamp)
model = Model(
name=model_name,
image_uri=inference_image_uri,
model_data=training_step.properties.ModelArtifacts.S3ModelArtifacts,
sagemaker_session=sess,
role=role,
)
# +
from sagemaker.inputs import CreateModelInput
create_inputs = CreateModelInput(
instance_type=deploy_instance_type, # "ml.m5.4xlarge",
)
# +
from sagemaker.workflow.steps import CreateModelStep
create_step = CreateModelStep(
name="CreateModel",
model=model,
inputs=create_inputs,
)
# -
# # Define a Condition Step to Check Accuracy and Conditionally Register Model
#
# 
#
# Finally, we'd like to only register this model if the accuracy of the model, as determined by our evaluation step `step_eval`, exceeded some value. A `ConditionStep` allows for pipelines to support conditional execution in the pipeline DAG based on conditions of step properties.
#
# Below, we:
#
# * define a `ConditionGreaterThan` on the accuracy value found in the output of the evaluation step, `step_eval`.
# * use the condition in the list of conditions in a `ConditionStep`
# * pass the `RegisterModel` step collection into the `if_steps` of the `ConditionStep`
min_accuracy_value = ParameterFloat(name="MinAccuracyValue", default_value=0.01)
# +
from sagemaker.workflow.conditions import ConditionGreaterThanOrEqualTo
from sagemaker.workflow.condition_step import (
ConditionStep,
JsonGet,
)
minimum_accuracy_condition = ConditionGreaterThanOrEqualTo(
left=JsonGet(
step=evaluation_step,
property_file=evaluation_report,
json_path="metrics.accuracy.value",
),
right=min_accuracy_value, # accuracy
)
minimum_accuracy_condition_step = ConditionStep(
name="AccuracyCondition",
conditions=[minimum_accuracy_condition],
if_steps=[register_step, create_step], # success, continue with model registration
else_steps=[], # fail, end the pipeline
)
# -
# # Define a Pipeline of Parameters, Steps, and Conditions
#
# Let's tie it all up into a workflow pipeline so we can execute it, and even schedule it.
#
# A pipeline requires a `name`, `parameters`, and `steps`. Names must be unique within an `(account, region)` pair so we tack on the timestamp to the name.
#
# Note:
#
# * All the parameters used in the definitions must be present.
# * Steps passed into the pipeline need not be in the order of execution. The SageMaker Workflow service will resolve the _data dependency_ DAG as steps the execution complete.
# * Steps must be unique to either pipeline step list or a single condition step if/else list.
# %store -r pipeline_name
# +
from sagemaker.workflow.pipeline import Pipeline
pipeline = Pipeline(
name=pipeline_name,
parameters=[
input_data,
processing_instance_count,
processing_instance_type,
max_seq_length,
balance_dataset,
train_split_percentage,
validation_split_percentage,
test_split_percentage,
feature_store_offline_prefix,
feature_group_name,
train_instance_type,
train_instance_count,
epochs,
learning_rate,
epsilon,
train_batch_size,
validation_batch_size,
test_batch_size,
train_steps_per_epoch,
validation_steps,
test_steps,
train_volume_size,
use_xla,
use_amp,
freeze_bert_layer,
enable_sagemaker_debugger,
enable_checkpointing,
enable_tensorboard,
input_mode,
run_validation,
run_test,
run_sample_predictions,
min_accuracy_value,
model_approval_status,
deploy_instance_type,
deploy_instance_count,
],
steps=[processing_step, training_step, evaluation_step, minimum_accuracy_condition_step],
sagemaker_session=sess,
)
# -
# Let's examine the Json of the pipeline definition that meets the SageMaker Workflow Pipeline DSL specification.
#
# By examining the definition, we're also confirming that the pipeline was well-defined, and that the parameters and step properties resolve correctly.
# +
import json
from pprint import pprint
definition = json.loads(pipeline.definition())
pprint(definition)
# -
# ### Submit the pipeline to SageMaker and start execution
#
# Let's submit our pipeline definition to the workflow service. The role passed in will be used by the workflow service to create all the jobs defined in the steps.
print(pipeline_experiment_name)
# ## Ignore the `WARNING` below
# +
response = pipeline.create(role_arn=role)
pipeline_arn = response["PipelineArn"]
print(pipeline_arn)
# -
# We'll start the pipeline, accepting all the default parameters.
#
# Values can also be passed into these pipeline parameters on starting of the pipeline, and will be covered later.
# +
execution = pipeline.start(
parameters=dict(
InputData=raw_input_data_s3_uri,
ProcessingInstanceCount=1,
ProcessingInstanceType="ml.c5.2xlarge",
MaxSeqLength=64,
BalanceDataset="True",
TrainSplitPercentage=0.9,
ValidationSplitPercentage=0.05,
TestSplitPercentage=0.05,
FeatureStoreOfflinePrefix="reviews-feature-store-" + str(timestamp),
FeatureGroupName="reviews-feature-group-" + str(timestamp),
LearningRate=0.000012,
TrainInstanceType="ml.c5.9xlarge",
TrainInstanceCount=1,
Epochs=1,
Epsilon=0.00000001,
TrainBatchSize=128,
ValidationBatchSize=128,
TestBatchSize=128,
TrainStepsPerEpoch=50,
ValidationSteps=50,
TestSteps=50,
TrainVolumeSize=1024,
UseXLA="True",
UseAMP="True",
FreezeBERTLayer="False",
EnableSageMakerDebugger="False",
EnableCheckpointing="False",
EnableTensorboard="False",
InputMode="File",
RunValidation="True",
RunTest="False",
RunSamplePredictions="False",
MinAccuracyValue=0.01,
ModelApprovalStatus="PendingManualApproval",
DeployInstanceType="ml.m5.4xlarge",
DeployInstanceCount=1,
)
)
print(execution.arn)
# -
# ### Workflow Operations: examining and waiting for pipeline execution
#
# Now we describe execution instance and list the steps in the execution to find out more about the execution.
# +
from pprint import pprint
execution_run = execution.describe()
pprint(execution_run)
# -
# # Add Execution Run as Trial to Experiments
execution_run_name = execution_run["PipelineExecutionDisplayName"]
print(execution_run_name)
pipeline_execution_arn = execution_run["PipelineExecutionArn"]
print(pipeline_execution_arn)
# # List Execution Steps
# +
import time
# Giving the first step time to start up
time.sleep(30)
execution.list_steps()
# -
# # Wait for the Pipeline to Complete
#
# # _Note: If this cell errors out with `WaiterError: Waiter PipelineExecutionComplete failed: Max attempts exceeded`, just re-run it and keep waiting._
# +
# # %%time
# execution.wait()
# -
# %store -r pipeline_name
# +
# %%time
import time
from pprint import pprint
executions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)["PipelineExecutionSummaries"]
pipeline_execution_status = executions_response[0]["PipelineExecutionStatus"]
print(pipeline_execution_status)
while pipeline_execution_status == "Executing":
try:
executions_response = sm.list_pipeline_executions(PipelineName=pipeline_name)["PipelineExecutionSummaries"]
pipeline_execution_status = executions_response[0]["PipelineExecutionStatus"]
# print('Executions for our pipeline...')
# print(pipeline_execution_status)
except Exception as e:
print("Please wait...")
time.sleep(30)
pprint(executions_response)
# -
# # Wait for the Pipeline ^^ Above ^^ to Complete
#
# # _Note: If this cell errors out with `WaiterError: Waiter PipelineExecutionComplete failed: Max attempts exceeded`, just re-run it and keep waiting._
pipeline_execution_status = executions_response[0]["PipelineExecutionStatus"]
print(pipeline_execution_status)
pipeline_execution_arn = executions_response[0]["PipelineExecutionArn"]
print(pipeline_execution_arn)
# We can list the execution steps to check out the status and artifacts:
# # List Pipeline Execution Steps
pipeline_execution_status = executions_response[0]["PipelineExecutionStatus"]
print(pipeline_execution_status)
# +
from pprint import pprint
steps = sm.list_pipeline_execution_steps(PipelineExecutionArn=pipeline_execution_arn)
pprint(steps)
# -
# # List All Artifacts Generated By The Pipeline
processing_job_name = None
training_job_name = None
# +
import time
from sagemaker.lineage.visualizer import LineageTableVisualizer
viz = LineageTableVisualizer(sagemaker.session.Session())
for execution_step in reversed(steps["PipelineExecutionSteps"]):
print(execution_step)
# We are doing this because there appears to be a bug of this LineageTableVisualizer handling the Processing Step
if execution_step["StepName"] == "Processing":
processing_job_name = execution_step["Metadata"]["ProcessingJob"]["Arn"].split("/")[-1]
print(processing_job_name)
display(viz.show(processing_job_name=processing_job_name))
elif execution_step["StepName"] == "Train":
training_job_name = execution_step["Metadata"]["TrainingJob"]["Arn"].split("/")[-1]
print(training_job_name)
display(viz.show(training_job_name=training_job_name))
else:
display(viz.show(pipeline_execution_step=execution_step))
time.sleep(5)
# -
# # Track Additional Parameters in our Experiment
# -aws-processing-job is the default name assigned by ProcessingJob
processing_job_tc = "{}-aws-processing-job".format(processing_job_name)
print(processing_job_tc)
# %store -r pipeline_trial_name
print(pipeline_trial_name)
response = sm.associate_trial_component(TrialComponentName=processing_job_tc, TrialName=pipeline_trial_name)
# -aws-training-job is the default name assigned by TrainingJob
training_job_tc = "{}-aws-training-job".format(training_job_name)
print(training_job_tc)
response = sm.associate_trial_component(TrialComponentName=training_job_tc, TrialName=pipeline_trial_name)
# +
from smexperiments import tracker
processing_job_tracker = tracker.Tracker.load(trial_component_name=processing_job_tc)
# +
processing_job_tracker.log_parameters(
{
"balance_dataset": str(balance_dataset),
}
)
# must save after logging
processing_job_tracker.trial_component.save()
# +
processing_job_tracker.log_parameters(
{
"train_split_percentage": str(train_split_percentage),
}
)
# must save after logging
processing_job_tracker.trial_component.save()
# +
processing_job_tracker.log_parameters(
{
"validation_split_percentage": str(validation_split_percentage),
}
)
# must save after logging
processing_job_tracker.trial_component.save()
# +
processing_job_tracker.log_parameters(
{
"test_split_percentage": str(test_split_percentage),
}
)
# must save after logging
processing_job_tracker.trial_component.save()
# +
processing_job_tracker.log_parameters(
{
"max_seq_length": str(max_seq_length),
}
)
# must save after logging
processing_job_tracker.trial_component.save()
# +
time.sleep(5) # avoid throttling exception
processing_job_tracker.log_parameters(
{
"feature_store_offline_prefix": str(feature_store_offline_prefix),
}
)
# must save after logging
processing_job_tracker.trial_component.save()
# +
time.sleep(5) # avoid throttling exception
processing_job_tracker.log_parameters(
{
"feature_group_name": str(feature_group_name),
}
)
# must save after logging
processing_job_tracker.trial_component.save()
# -
# # Analyze Experiment
# +
from sagemaker.analytics import ExperimentAnalytics
time.sleep(30) # avoid throttling exception
import pandas as pd
pd.set_option("max_colwidth", 500)
experiment_analytics = ExperimentAnalytics(
experiment_name=pipeline_experiment_name,
)
experiment_analytics_df = experiment_analytics.dataframe()
experiment_analytics_df
# -
# # Release Resources
# + language="html"
#
# <p><b>Shutting down your kernel for this notebook to release resources.</b></p>
# <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button>
#
# <script>
# try {
# els = document.getElementsByClassName("sm-command-button");
# els[0].click();
# }
# catch(err) {
# // NoOp
# }
# </script>
# + language="javascript"
#
# try {
# Jupyter.notebook.save_checkpoint();
# Jupyter.notebook.session.delete();
# }
# catch(err) {
# // NoOp
# }
# -
| 10_pipeline/01_Create_SageMaker_Pipeline_BERT_Reviews.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Custom Display Logic Exercises
from IPython.display import display
from IPython.display import (
display_png, display_html, display_latex,
display_javascript, display_svg
)
# ## Circle class with custom display methods
# Write a simple `MyCircle` Python class. Here is a skeleton to get you started:
#
# ```python
# class MyCircle(object):
# def __init__(self, center=(0.0,0.0), radius=1.0, color='blue'):
# self.center = center
# self.radius = radius
# self.color = color
# ```
#
# Now add special display methods to this class for the following representations (remember to wrap them in Python strings):
#
# For HTML:
#
# ○
#
# For LaTeX (wrap with `$` and use a raw Python string):
#
# \bigcirc
#
# For JavaScript:
#
# alert('I am a circle!');
#
# After you write the class, create an instance and then use `display_html`, `display_svg`, `display_latex` and `display_javascript` to display those representations.
# ### Solution
# Here is the solution to the simple `MyCircle` class:
# %load soln/mycircle.py
# Now create an instance and use the display methods:
c = MyCircle()
display(c)
display_html(c)
display_latex(c)
# ## PNG formatter for `MyCircle`
# %matplotlib inline
from matplotlib import pyplot as plt
# Now let's assume that the `MyCircle` class has already been defined and add a PNG representation using a formatter display function. Here is a function that converts a `MyCircle` instance to raw PNG data.
# +
from IPython.core.pylabtools import print_figure
def circle_to_png(circle):
"""Render AnotherCircle to png data using matplotlib"""
fig, ax = plt.subplots()
patch = plt.Circle(circle.center,
radius=circle.radius,
fc=circle.color,
)
ax.add_patch(patch)
plt.axis('scaled')
data = print_figure(fig, 'png')
# We MUST close the figure, otherwise IPython's display machinery
# will pick it up and send it as output, resulting in a double display
plt.close(fig)
return data
# -
# Now use the IPython API to get the PNG formatter (`image/png`) and call the `for_type` method to register `circle_to_png` as the display function for `MyCircle`.
# %load soln/mycircle_png.py
display_png(c)
# ## PNG formatter for NumPy arrays
# In this exercise, you will register a display formatter function that generates a PNG representation of a 2d NumPy array. Here is the function that uses the [Python Imaging Library (PIL)](http://www.pythonware.com/products/pil/) to generate the raw PNG data:
# +
from PIL import Image
from io import BytesIO
import numpy as np
def ndarray_to_png(x):
if len(x.shape) != 2: return
x = np.asarray(Image.fromarray(x).resize((500, 500)))
x = (x - x.min()) / (x.max() - x.min())
img = Image.fromarray((x*256).astype('uint8'))
img_buffer = BytesIO()
img.save(img_buffer, format='png')
return img_buffer.getvalue()
# -
# Use the `for_type` method of the PNG formatter to register `ndarray_to_png` as the display function for `np.ndarray`.
# %load soln/ndarray_png.py
# Now create a few NumPy arrays and display them. Notice that their default representation in the Notebook is PNG rather than text.
a = np.random.rand(100,100)
a
# You can still display the plain text representation using the `display_pretty` function.
from IPython.display import display_pretty
display_pretty(a)
b = np.linspace(0,100.0, 100**2).reshape((100,100))
b
| 04.Jupyter_and_iPython/appendix/Rich Output Exercises - Custom Display Logic .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:machine-learning]
# language: python
# name: conda-env-machine-learning-py
# ---
# # ELM class example
# ** ELM class implements a single hidden layer ELM **
# As an example we will use the popular MNIST dataset.
import os
import tensorflow as tf
import keras
from keras.datasets import mnist;
# +
train, test = mnist.load_data(os.getcwd() + "/elm_tf_test" + "mnist.txt");
x_train, y_train = train
x_test, y_test = test
del train, test
y_train = keras.utils.to_categorical(y_train, num_classes=10)
y_test = keras.utils.to_categorical(y_test, num_classes=10)
# the input has to be flattened in order to be feeded to the network
x_train = x_train.reshape(-1, 28* 28)
x_test = x_test.reshape(-1, 28 * 28)
# -
input_size = 28*28
output_size = 10 # mnist has 10 output classes
# ### Creating ELM classifier
# +
from tfelm.elm import ELM
elm1 = ELM(input_size=input_size, output_size=output_size, l2norm=10e1, name='elm1')
# -
# This creates an ELM network with 784 input neurons and 10 output neurons.
# The l2norm is a regularization parameter used in training.
# For now the hidden layer size hasn't been specified.
# The hidden layer is added to the network through the add_layer method
elm1.add_layer(n_neurons=1024);
# This adds an hidden layer comprised of 1024 hidden layer neurons.
#
# By default the activation is set to tf.sigmoid and the initialization of weights and biases of the hidden layer is a modified He initialization:
#
# The weights are initialized by sampling from a random normal distribution with variance of 2/n_in, where n_in is the size of the previous layer, in this case the input layer.
#
# Actual network initialization is done via the compile method:
#
elm1.compile()
# ### Training the Network: fit method
# To train the network there are two main methods: train and fit.
# Fit is the most basic and simple method but is suitable only for small datasets.
#
# It needs in input two numpy arrays for the training instances and labels and an optional batch_size argument.
# Internally a TensorFlow Iterator and a TensorFlow Dataset objects are created from the numpy arrays for the training as this it is the most efficient way to train a model according to TensorLow documentation.
#
# It should be noted that, unlike conventional Neural Networks, the batch_size doesn't change the outcome of the training but it only affect training time and memory required to train the network. The smaller the less the memory required but the more the training time.
#
elm1.fit(x_train, y_train, batch_size=500)
# Now that the network has been trained, we can evaluate the performance on the test set via evaluate method.
elm1.evaluate(x_test, y_test, batch_size=500) # it accepts batch size as also the evaluation is done by batching to allow bigger datasets to be evaluated as we will see.
# To return a numpy array with actual predictions it exist a prediction method:
#
pred = elm1.predict(x_test, batch_size=500)
# This is pretty much the most basic functionalities offered by the API and are suitable for small/medium datasets as it is required the dataset is fitted into memory as an array.
# ## Training and evaluating bigger Datasets which cannot be fitted into memory
# We will use the same MNIST dataset for example purpose only.
# Instead of calling the fit method we should call the train method
# The train method requires a TensorFlow Iterator object. the TensorFlow Iterator object thus must be created esternally from the dataset.
# There are various ways to create a TF iterator object from a dataset and this strongly depends on what is your input pipeline and in what format your dataset is.
#
# Tutorials, documentation and examples on Dataset and Iterator is available at: https://www.tensorflow.org/
# ** As an example suppose we have an input pipeline in which we want to do some pre-process and data augmentation on the original MNIST dataset: **
from keras.preprocessing.image import ImageDataGenerator # we will use keras imagedatagen for simplicity
# +
batch_size = 2500
n_epochs = 10 # as the dataset is augmented the training now will be done on more "epochs", the resulting dataset will be 10 times the original.
# It could be argued that calling these epochs is not strictly correct as each "epoch" is different from the previous:
# the dataset is augmented via random trsformations
# keras ImageDataGen requires a 4-D tensor in input:
x_train = x_train.reshape(-1, 28, 28, 1)
datagen = ImageDataGenerator(
width_shift_range=0.05,
height_shift_range=0.05
)
# random height and weight shifting
datagen.fit(x_train)
# +
batches_per_epochs = len(x_train) // batch_size
def gen():
n_it = 0
for x, y in datagen.flow(x_train, y_train, batch_size=batch_size):
x = x.reshape(batch_size, 28 * 28) # the network requires a flatten array in input we flatten here
if n_it % 100 == 0:
print("generator iteration: %d" % n_it)
yield x, y
n_it += 1
if n_it >= batches_per_epochs * n_epochs:
break
data = tf.data.Dataset.from_generator(generator=gen,
output_shapes=((batch_size, 28 * 28,), (batch_size, 10,)),
output_types=(tf.float32, tf.float32))
# -
# Here we have defined a python generator from the keras ImageDataGenerator and we have used this generator to create a TensorFlow dataset. This because it isn't possible to create a Dataset directly from the keraas generator
# +
iterator = data.make_one_shot_iterator() # a TF iterator is created from the Dataset
elm1.train(iterator, n_batches=batches_per_epochs*n_epochs)
# -
# The train method has the optinal n_batches argument which serves only the purpose of extimating the ETA.
# Note that the train method does not return the network performance.
# This should be done via evaluate.
elm1.evaluate(x_test, y_test, batch_size=1024)
# To find the performance on the training set, due to the fact that ELM are not trained with gradient descent as conventional Neural Networks, one should call the evaluate function passing an iterator on the training set.
#
# Note that in fact, the actual training set is different now when evaluating, due to the random data augmentation. Unfortunately this is the only way to asses training performance in such scenario without loading and saving the augmented dataset or resorting to gradient descent to train the ELM thus giving up fast training.
# As the iterator was made as one shot only it should be re-created:
iterator = data.make_one_shot_iterator()
elm1.evaluate(tf_iterator = iterator, batch_size=1024)
# ** Instead of creating two times the iterator a better way is to create an initializable iterator in first place, before training :**
iterator = data.make_initializable_iterator()
# The iterator should be initialized:
#
#
with tf.Session() as sess:
elm1.sess.run(iterator.initializer)
# We have accessed the TF session attribute in ELM object, which has its own session and initialized the iterator inside the ELM session.
# Now taht the iterator has been initialized the iterator can be used for training
elm1.train(iterator, n_batches=batches_per_epochs*n_epochs)
# +
# this re-initialize the iterator before calling the evaluate on the training set
with tf.Session() as sess:
elm1.sess.run(iterator.initializer)
elm1.evaluate(tf_iterator = iterator, batch_size=1024)
# -
# Note how the training accuracy is slightly different due to the random trasformation due to data augmentation.
#
# This concludes this brief tutorial for ELM class training
# ### Custom Activation and Weights and Bias initialization
# +
def softsign(x):
y = x / (1+ tf.abs(x))
return y
# this is a simple function which implements a softsign activation function
# -
elm1.add_layer(1024, activation=softsign)
# This softsign function can be passed to the add_layer method, in the same way any TensorFlow pre-defined tf.nn.relu, tf.nn.elu function etc can be passed
# The add_layer method supports also custom Weights and Bias Initialization
# For example if we wish to initialize the ELM with an orthogonal weight matrix and the Bias as a unit norm vector:
# +
ortho_w = tf.orthogonal_initializer()
uni_b= tf.uniform_unit_scaling_initializer()
init_w = tf.get_variable(name='init_w',shape=[input_size, 1024], initializer=ortho_w)
init_b = tf.get_variable(name='init_b', shape=[1024], initializer=uni_b)
elm1.add_layer(1024, activation= softsign, w_init= init_w, b_init = init_b)
# -
# We have used pre-made TensoFlow initialization functions but note that numpy or every other function can be used.
#
# ** The important hing is that to w_init and b_init are passed TensorFlow variables with desired values. **
#
# +
# with numpy
import numpy as np
init_w = tf.Variable(name='init_w', initial_value=np.random.uniform(low=-1, high=1, size=[input_size, 1024]))
elm1.add_layer(1024, activation= softsign, w_init=init_w, b_init = None)
# -
# ** Note that when using custom initialization both b_init and w_init should be specified, setting b_init to None creates a network without bias **
| ELM_class_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from bqplot import *
from IPython.display import display
import numpy as np
import pandas as pd
price_data = pd.DataFrame(np.cumsum(np.random.randn(150, 2).dot([[0.5, 0.8], [0.8, 1.0]]), axis=0) + 100,
columns=['Security 1', 'Security 2'], index=pd.date_range(start='01-01-2007', periods=150))
y_data = np.cumsum(np.random.randn(100))
# ## Label positioned in data co-ordinates
# +
x_sc = LinearScale()
y_sc = LinearScale()
test_line = Lines(x=np.arange(10), y=y_data[:10], scales={'x': x_sc, 'y': y_sc})
test_label = Label(x=5.0, y=np.mean(y_data[:10]), scales={'x': x_sc, 'y': y_sc},
text='Test Label', font_size='16px', font_weight='bolder', color='orange')
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation='vertical', tick_format='0.2f')
fig = Figure(marks=[test_line, test_label], axes=[ax_x, ax_y])
display(fig)
# -
# Setting the label attribute `enable_move` to `True` makes the label draggable
test_label.enable_move = True
# ## Label positioned in terms of Figure co-ordinates
# +
x_sc = LinearScale()
y_sc = LinearScale()
test_line = Lines(x=np.arange(10), y=y_data, scales={'x': x_sc, 'y': y_sc})
test_label = Label(x=0.5, y=0.2, text='Test Label', font_size='16px',
font_weight='bolder', color='orange')
ax_x = Axis(scale=x_sc)
ax_y = Axis(scale=y_sc, orientation='vertical', tick_format='0.2f')
fig = Figure(marks=[test_line, test_label], axes=[ax_x, ax_y])
display(fig)
# -
# Rotating the label
test_label.rotate_angle = 30
# ## Label positioned at a Date value
# +
import datetime as dt
dt_sc = DateScale()
y_sc = LinearScale()
lines = Lines(x=price_data.index.values, y=price_data['Security 1'].values, scales={'x': dt_sc, 'y': y_sc})
label = Label(x=dt.date(2007, 3, 14), y=0.5, scales={'x': dt_sc}, text='Pi Day', color='orange')
ax_x = Axis(scale=dt_sc)
ax_y = Axis(scale=y_sc, orientation='vertical')
fig = Figure(marks=[lines, label], axes=[ax_x, ax_y])
display(fig)
# -
# Setting an offset in pixel
label.x_offset = 100
| examples/Label.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="xwNhk8ll8Inm"
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# + id="z3GdUeNo5PE-"
class Perceptron:
def __init__(self, eta, epochs, activationFunction):
self.weights = np.random.randn(3) * 1e-4
print(f"self.weights: {self.weights}")
self.eta = eta
self.epochs = epochs
self.activationFunction = activationFunction
def fit(self, X, y):
self.X = X
self.y = y
X_with_bias = np.c_[self.X, -np.ones((len(self.X), 1))] # concactination
print(f"X_with_bias: \n{X_with_bias}")
for epoch in range(1,self.epochs):
print(f"for epoch: {epoch}")
y_hat = self.activationFunction(X_with_bias, self.weights)
print(f"predicted value: \n{y_hat}")
error = self.y - y_hat
print(f"error: \n{error}")
self.weights = self.weights + self.eta * np.dot(X_with_bias.T, error)
print(f"updated weights: \n{self.weights}")
print("~~~~~~~~~~~~~~~~~~~~~\n")
def predict(self, X):
X_with_bias = np.c_[X, -np.ones((len(self.X), 1))]
return self.activationFunction(X_with_bias, self.weights)
# + id="7eZsLQgC8-vJ"
activationFunction = lambda inputs, weights: np.where(np.dot(inputs, weights) > 0 , 1, 0)
# + colab={"base_uri": "https://localhost:8080/", "height": 234} id="Y7SPY651uT_-" outputId="c5336f83-e837-4ade-c121-9c7db5b94d4b"
data={"x1": [1,1,0,-1,-1,-1], "x2": [1,0,1,-1,0,1],"y": [1,1,0,0,0,0]}
inp = pd.DataFrame(data)
inp
# + colab={"base_uri": "https://localhost:8080/", "height": 234} id="gMaGRXYyuhlB" outputId="cb8ec1fd-7e31-4703-b267-da077a22250c"
X = inp.drop("y", axis=1) # axis = 1 >>> dropping accross column
X
# + colab={"base_uri": "https://localhost:8080/", "height": 234} id="mGJ8FVA2unQY" outputId="e2af69b2-f3f6-4437-d5e0-cbd21fdeb12c"
y = inp['y']
y.to_frame()
# + colab={"base_uri": "https://localhost:8080/"} id="uPdePT03EFw0" outputId="31b7437b-5eeb-4bcd-f5dd-bfcd616b8b7f"
model = Perceptron(eta = 1, epochs=10, activationFunction=activationFunction)
# + colab={"base_uri": "https://localhost:8080/"} id="cn2K5oRCEtt9" outputId="f8eec7a4-80a1-45b4-a04c-c1e46f6e72da"
model.fit(X,y)
# + colab={"base_uri": "https://localhost:8080/"} id="LaxqFyX9Ew9D" outputId="28296a30-0f09-4363-bf18-15acf7b812d7"
model.predict(X)
# + colab={"base_uri": "https://localhost:8080/", "height": 275} id="MKepi2boAdQP" outputId="4b341400-f696-4de6-b69e-9e9856cb8985"
inp.plot(kind="scatter", x="x1", y="x2", c="y", s=100, cmap="winter")
plt.axhline(y=0, color="black")
plt.axvline(x=0, color="black")
| ANN/percepton from scratch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### 1.3 Pré-processamento
# Vamos entender alguns dos maiores conceitos de pré-processamento de dados em NLP (pré-processamento de texto):
# 1. Stop words;
# 2. Stemming (stemização);
# 3. Lowercasing (caixa baixa);
# 4. Pontuação;
# 5. Lemmatização.
#
# #### 1.3.1 Stop words e Pontuação
# As **stop words** são tokens (agora vamos chamar palavras de **tokens**, incluindo pontuações) que não acrescentam para o significado geral de um input textual. Vamos a um exemplo.
#
# _"O macaco gosta de banana e de maçã!!"_
#
# Na sentença acima, podemos remover as stop words (cada língua tem a sua, e são basicamente palavras que **não adicionam significado semântico** para a frase). Logo em seguida, podemos também remover as **pontuações** (caso esse pré-processamento faça sentido), pois o que nos interessa são as palavras com significado. Sendo assim:
#
# | Original | "O macaco gosta de banana e de maçã!!" |
# |:-:|:-:|
# |Stop words| "macaco gosta banana maçã!!"|
# |Punctuation| "macaco gosta banana maçã|
#
# Podemos realizar outras alterações na etapa de pré-processamento do texto. Podemos remover URLs, nomes, números de telefone, novamente, **o que fizer sentido para as intenções finais**. Em nosso caso, um analisador de sentimento SIMPLES não precisa processar nomes de pessoas, URLs, nomes de locais...
#
# #### 1.3.2 Stemming, lowercasing e lemmatization
# O **lowercasing** é o processo de transformar todos os tokens em caixa baixa. Isso reduz bastante a quantidade de vocabulários e ajuda a diminuir diferenças entre formas muito parecidas, evitando que "Macaco" seja diferenciado de "macaco" (há como calcular similaridade entre palavras caso o lowercasing não faça sentido).
#
# Já o **stemming** é um conceito linguístico relacionado ao processo de reduzir palavras à sua raíz. As palavras que são mais afetadas são aquelas que possuem forma plural, flexões, gênero. Vamos a alguns exemplos.
#
# | Token original | Stemma |
# |:-:|:-:|
# | cavalo |caval|
# |cavalos |caval|
# |cavaleiros|caval|
# |andei|and
# |andar|and
# |grandão|grand|
# |felizmente|feliz|
#
# Ao analisar a tabela, surgem algumas questões menores:
# 1. cavalo, cavala e cavaleiro, mesmo tendo significados diferentes agora são o mesmo token. Isso dá problema?
# 2. existem outros tipos de stemming?
#
# E as respostas: sim, pode dar problema. Mas apenas em alguns casos. E sim, existem outros tipos de stemming! Outros algoritmos de stemming podem não transformar o advérbio "felizmente" em "feliz", por exemplo.
#
# Você também pode ter se deparado com o termo **lemmatization**. Não confunda ele com stemming. A **lematização** é mais conservadora, transformando tokens plurais em singulares, femininos em masculinos, e formas flexionadas em não-flexionadas (como verbos no infinitivo). **A lematização é utilizada quando precisamos reconhecer o Part-of-Speech (POS) / Classe morfológica dos tokens**.
| Inteligência Artificial/Natural Language Processing (NLP)/1. Introdução ao NLP e Análise de Sentimento/1.3 Pré-processamento.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <center>
# ## <span style=color:blue> Frequency resolution </span>
# __Uncertainty Principle__ and sampling duration
#
#
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft
from IPython.html.widgets import interact
from IPython.display import clear_output, display, HTML
interact(Ejercicio1,
sigma = (5, 15, 0.1), beta = (4/3, 10/3, 0.1), rho = (20, 40, 0.1))
# +
fs = 64 # sampling frequency
f = 10 # one signal
#x = np.cos(2*np.pi*f*t) + np.cos(2*np.pi*(f+2)*t)
plt.figure(1)
plt.plot(x)
plt.show()
Nf = 64
def abs_sinc(Nf=64,deltaf = 0.5, x = 1):
t = np.arange(0,2,x/fs) # time-domain samples
x = np.cos(2*np.pi*f*t) + np.cos(2*np.pi*(f+deltaf)*t)
X = fft(x,Nf)/np.sqrt(Nf)
x_axis = np.linspace(0,fs,len(X))
plt.figure(2)
plt.plot(x_axis,abs(X))
plt.xlim(xmax=fs/2)
plt.ylim(ymax=6)
plt.title('frequency response')
plt.show()
return fft(x,Nf/np.sqrt(Nf))
interact(abs_sinc,Nf = (32,32*10,10), deltaf = (0.5,4,0.5), x = (0.5,5,0.5))
| 4. Frequency resolution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <font color='blue'>Data Science Academy</font>
# # <font color='blue'>Big Data Real-Time Analytics com Python e Spark</font>
#
# # <font color='blue'>Capítulo 3</font>
# ## Configuração e Customização Avançada do Matplotlib
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import Image
# %matplotlib inline
mpl.__version__
print(plt.style.available)
def cria_plot ():
x = np.random.randn(5000, 6)
(figure, axes) = plt.subplots(figsize = (16,10))
(n, bins, patches) = axes.hist(x, 12,
density = 1,
histtype = 'bar',
label = ['Color 1', 'Color 2', 'Color 3', 'Color 4', 'Color 5', 'Color 6'])
axes.set_title("Histograma\nPara\nDistribuição Normal", fontsize = 25)
axes.set_xlabel("Dados", fontsize = 16)
axes.set_ylabel("Frequência", fontsize = 16)
axes.legend()
plt.show()
cria_plot()
# Usuários Windows utilizem:
# #!dir estilos
# !ls -l estilos
# Usuários Windows utilizem:
# # !type estilos/personalestilo-1.mplstyle
# !cat estilos/personalestilo-1.mplstyle
plt.style.use("estilos/personalestilo-1.mplstyle")
cria_plot()
# ### Subplots
# Utilizaremos o dataset sobre automóveis do repositório de Machine Learning da UCI: [UCI Machine Learning Repository](http://archive.ics.uci.edu/ml/index.html)
#
# [Automobile Data Set](https://archive.ics.uci.edu/ml/datasets/Automobile)
#
# ## Usando o Pandas para Carregar os Dados
import sys
sys.path.append("lib")
import geradados, geraplot, radar
dados = geradados.get_raw_data()
dados.head()
dados_subset = geradados.get_limited_data()
dados_subset.head()
geradados.get_all_auto_makes()
(fabricantes, total) = geradados.get_make_counts(dados_subset)
total
dados = geradados.get_limited_data(lower_bound = 6)
dados.head()
len(dados.index)
# ## Normalizando os Dados
dados_normalizados = dados.copy()
dados_normalizados.rename(columns = {"horsepower": "power"}, inplace = True)
dados_normalizados.head()
# Valores mais altos para estas variáveis
geradados.norm_columns(["city mpg", "highway mpg", "power"], dados_normalizados)
dados_normalizados.head()
# Valores mais baixos para estas variáveis
geradados.invert_norm_columns(["price", "weight", "riskiness", "losses"], dados_normalizados)
dados_normalizados.head()
# ## Plots
figure = plt.figure(figsize = (15, 5))
prices_gs = mpl.gridspec.GridSpec(1, 1)
prices_axes = geraplot.make_autos_price_plot(figure, prices_gs, dados)
plt.show()
figure = plt.figure(figsize = (15, 5))
mpg_gs = mpl.gridspec.GridSpec(1, 1)
mpg_axes = geraplot.make_autos_mpg_plot(figure, mpg_gs, dados)
plt.show()
figure = plt.figure(figsize = (15, 5))
risk_gs = mpl.gridspec.GridSpec(1, 1)
risk_axes = geraplot.make_autos_riskiness_plot(figure, risk_gs, dados_normalizados)
plt.show()
figure = plt.figure(figsize=(15, 5))
loss_gs = mpl.gridspec.GridSpec(1, 1)
loss_axes = geraplot.make_autos_losses_plot(figure, loss_gs, dados_normalizados)
plt.show()
figure = plt.figure(figsize = (15, 5))
risk_loss_gs = mpl.gridspec.GridSpec(1, 1)
risk_loss_axes = geraplot.make_autos_loss_and_risk_plot(figure, risk_loss_gs, dados_normalizados)
plt.show()
# +
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from IPython.display import Image
import warnings
#warnings.filterwarnings('ignore')
import matplotlib
# %matplotlib inline
import sys
sys.path.append("lib")
import geradados, geraplot, radar
#plt.style.use("estilos/personalestilo-1.mplstyle")
dados = geradados.get_raw_data()
dados.head()
dados_subset = geradados.get_limited_data()
dados_subset.head()
dados = geradados.get_limited_data(lower_bound = 6)
dados.head()
dados_normalizados = dados.copy()
dados_normalizados.rename(columns = {"horsepower": "power"}, inplace = True)
figure = plt.figure(figsize = (15, 5))
radar_gs = mpl.gridspec.GridSpec(3, 7,
height_ratios = [1, 10, 10],
wspace = 0.50,
hspace = 0.60,
top = 0.95,
bottom = 0.25)
radar_axes = geraplot.make_autos_radar_plot(figure, gs=radar_gs, pddata=dados_normalizados)
plt.show()
# -
# ## Plots Combinados
#
# wireframe
# ```
# --------------------------------------------
# | overall title |
# --------------------------------------------
# | price ranges |
# --------------------------------------------
# | combined loss/risk | |
# | | radar |
# ---------------------- plots |
# | risk | loss | |
# --------------------------------------------
# | mpg |
# --------------------------------------------
# ```
# +
# Construindo as camadas (sem os dados)
figure = plt.figure(figsize=(10, 8))
gs_master = mpl.gridspec.GridSpec(4, 2, height_ratios=[1, 2, 8, 2])
# Camada 1 - Title
gs_1 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[0, :])
title_axes = figure.add_subplot(gs_1[0])
# Camada 2 - Price
gs_2 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[1, :])
price_axes = figure.add_subplot(gs_2[0])
# Camada 3 - Risks & Radar
gs_31 = mpl.gridspec.GridSpecFromSubplotSpec(2, 2, height_ratios=[2, 1], subplot_spec=gs_master[2, :1])
risk_and_loss_axes = figure.add_subplot(gs_31[0, :])
risk_axes = figure.add_subplot(gs_31[1, :1])
loss_axes = figure.add_subplot(gs_31[1:, 1])
gs_32 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[2, 1])
radar_axes = figure.add_subplot(gs_32[0])
# Camada 4 - MPG
gs_4 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec=gs_master[3, :])
mpg_axes = figure.add_subplot(gs_4[0])
# Une as camas, ainda sem dados
gs_master.tight_layout(figure)
plt.show()
# +
# Construindo as camadas (com os dados)
figure = plt.figure(figsize = (15, 15))
gs_master = mpl.gridspec.GridSpec(4, 2,
height_ratios = [1, 24, 128, 32],
hspace = 0,
wspace = 0)
# Camada 1 - Title
gs_1 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gs_master[0, :])
title_axes = figure.add_subplot(gs_1[0])
title_axes.set_title("Plots", fontsize = 30, color = "#cdced1")
geraplot.hide_axes(title_axes)
# Camada 2 - Price
gs_2 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gs_master[1, :])
price_axes = figure.add_subplot(gs_2[0])
geraplot.make_autos_price_plot(figure,
pddata = dados,
axes = price_axes)
# Camada 3, Part I - Risks
gs_31 = mpl.gridspec.GridSpecFromSubplotSpec(2, 2,
height_ratios = [2, 1],
hspace = 0.4,
subplot_spec = gs_master[2, :1])
risk_and_loss_axes = figure.add_subplot(gs_31[0, :])
geraplot.make_autos_loss_and_risk_plot(figure,
pddata = dados_normalizados,
axes = risk_and_loss_axes,
x_label = False,
rotate_ticks = True)
risk_axes = figure.add_subplot(gs_31[1, :1])
geraplot.make_autos_riskiness_plot(figure,
pddata = dados_normalizados,
axes = risk_axes,
legend = False,
labels = False)
loss_axes = figure.add_subplot(gs_31[1:, 1])
geraplot.make_autos_losses_plot(figure,
pddata = dados_normalizados,
axes = loss_axes,
legend = False,
labels = False)
# Camada 3, Part II - Radar
gs_32 = mpl.gridspec.GridSpecFromSubplotSpec(5, 3,
height_ratios = [1, 20, 20, 20, 20],
hspace = 0.6,
wspace = 0,
subplot_spec = gs_master[2, 1])
(rows, cols) = geometry = gs_32.get_geometry()
title_axes = figure.add_subplot(gs_32[0, :])
inner_axes = []
projection = radar.RadarAxes(spoke_count = len(dados_normalizados.groupby("make").mean().columns))
[inner_axes.append(figure.add_subplot(m, projection = projection)) for m in [n for n in gs_32][cols:]]
geraplot.make_autos_radar_plot(figure,
pddata = dados_normalizados,
title_axes = title_axes,
inner_axes = inner_axes,
legend_axes = False,
geometry = geometry)
# Camada 4 - MPG
gs_4 = mpl.gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = gs_master[3, :])
mpg_axes = figure.add_subplot(gs_4[0])
geraplot.make_autos_mpg_plot(figure,
pddata = dados,
axes = mpg_axes)
# Unindo as camadas
gs_master.tight_layout(figure)
plt.show()
# -
# # Fim
# ### Obrigado - Data Science Academy - <a href="http://facebook.com/dsacademybr">facebook.com/dsacademybr</a>
| code/dsa/Big Data Real-Time Analytics com Python e Spark/8-Arquivos-Cap03/13-Cap03-Matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import collections
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
if os.getcwd().endswith('notebook'):
os.chdir('..')
# -
sns.set(palette='colorblind', font_scale=1.3)
categories = ['psychrophilic', 'mesophilic', 'thermophilic', 'hyperthermophilic']
dataset_path = os.path.join(os.getcwd(), 'data/ncbi/dataset.csv')
dataset_df = pd.read_csv(dataset_path)
dataset_df.head()
def plot_genes_distribution(dataset_df, categories):
genes = dataset_df['gene_name'].unique()
n_genes = len(genes)
n_categories = len(categories)
f, axes = plt.subplots(n_genes, n_categories, figsize=(16, 5 * n_genes))
for i, gene in enumerate(genes):
for j, cat in enumerate(categories):
ax = axes[i, j]
df = dataset_df[(dataset_df['gene_name'] == gene) & (dataset_df['temperature_range'] == cat)]
df['temperature'].hist(ax=ax)
if j == 0:
ax.set_title(f'{gene} | {cat}')
else:
ax.set_title(cat)
plot_genes_distribution(dataset_df, categories)
for cat in categories:
print(cat, len(dataset_df[dataset_df['temperature_range'] == cat]))
| notebook/archive/Dataset exploration.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 4 - Introduction to Autoregressive and Automated Methods for Time Series Forecasting
# +
import datetime as dt
import os
import shutil
import warnings
from collections import UserDict
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from common.utils import load_data, mape
from IPython.display import Image
# %matplotlib inline
pd.options.display.float_format = "{:,.2f}".format
np.set_printoptions(precision=2)
warnings.filterwarnings("ignore")
# -
data_dir = "./data"
ts_data_load = load_data(data_dir)[["load"]]
ts_data_load.head()
# ## Lag plot
# +
from pandas.plotting import lag_plot
plt.figure()
lag_plot(ts_data_load)
# -
# ## Autocorrelation plot
# ### Autocorrelation Plot Results from ts_data_load dataset
# +
from pandas.plotting import autocorrelation_plot
plt.figure()
autocorrelation_plot(ts_data_load)
# -
# ### Autocorrelation Plot Results from ts_data_load_subset (First week of August 2014)
# +
ts_data_load = load_data("data/")[["load"]]
ts_data_load.head()
ts_data_load_subset = ts_data_load["2014-08-01":"2014-08-07"]
from pandas.plotting import autocorrelation_plot
plt.figure()
autocorrelation_plot(ts_data_load_subset)
# -
# ### Autocorrelation function (acf) plot on ts_data_load dataset
# +
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(ts_data_load)
pyplot.show()
# -
# ### Autocorrelation function (acf) plot on ts_data_load subset
# +
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_acf
plot_acf(ts_data_load_subset)
pyplot.show()
# -
# ### Partial correlation function (pacf) plot on ts_data_load dataset
# +
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_pacf
plot_pacf(ts_data_load, lags=20)
pyplot.show()
# -
# ### Partial correlation function (pacf) plot on ts_data_load subset
# +
from matplotlib import pyplot
from statsmodels.graphics.tsaplots import plot_pacf
plot_pacf(ts_data_load_subset, lags=30)
pyplot.show()
# -
# ## Autoregressive method class in Statsmodels
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import pandas_datareader as pdr
import seaborn as sns
from statsmodels.tsa.api import acf, graphics, pacf
from statsmodels.tsa.ar_model import AutoReg, ar_select_order
model = AutoReg(ts_data_load['load'], 1)
results = model.fit()
print(results.summary())
# #### Note: AutoReg supports describing the same covariance estimators as OLS. Below, we use cov_type="HC0", which is White’s covariance estimator. While the parameter estimates are the same, all of the quantities that depend on the standard error change.
res = model.fit(cov_type="HC0")
print(res.summary())
sns.set_style("darkgrid")
pd.plotting.register_matplotlib_converters()
sns.mpl.rc("figure", figsize=(16, 6))
fig = res.plot_predict(720, 840)
fig = plt.figure(figsize=(16, 9))
fig = res.plot_diagnostics(fig=fig, lags=20)
# ### Prepare the ts_data_load dataset for forecasting task with AutoReg() function
train_start_dt = "2014-11-01 00:00:00"
test_start_dt = "2014-12-30 00:00:00"
# +
train = ts_data_load.copy()[
(ts_data_load.index >= train_start_dt) & (ts_data_load.index < test_start_dt)
][["load"]]
test = ts_data_load.copy()[ts_data_load.index >= test_start_dt][["load"]]
print("Training data shape: ", train.shape)
print("Test data shape: ", test.shape)
# -
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
train["load"] = scaler.fit_transform(train)
train.head()
test["load"] = scaler.transform(test)
test.head()
HORIZON = 3
print("Forecasting horizon:", HORIZON, "hours")
# +
test_shifted = test.copy()
for t in range(1, HORIZON):
test_shifted["load+" + str(t)] = test_shifted["load"].shift(-t, freq="H")
test_shifted = test_shifted.dropna(how="any")
test_shifted.head(5)
# +
# %%time
training_window = 720
train_ts = train["load"]
test_ts = test_shifted
history = [x for x in train_ts]
history = history[(-training_window):]
predictions = list()
for t in range(test_ts.shape[0]):
model = AutoReg(history, 1)
model_fit = model.fit()
yhat = model_fit.forecast(steps=HORIZON)
predictions.append(yhat)
obs = list(test_ts.iloc[t])
history.append(obs[0])
history.pop(0)
print(test_ts.index[t])
print(t + 1, ": predicted =", yhat, "expected =", obs)
# -
# ## Autoregressive Integrated Moving Average method in Statsmodels
# +
import datetime as dt
import math
import os
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from common.utils import load_data, mape
from sklearn.preprocessing import MinMaxScaler
from statsmodels.tsa.statespace.sarimax import SARIMAX
# %matplotlib inline
pd.options.display.float_format = "{:,.2f}".format
np.set_printoptions(precision=2)
warnings.filterwarnings("ignore")
# -
data_dir = "./data"
ts_data_load = load_data(data_dir)[["load"]]
ts_data_load.head(10)
train_start_dt = "2014-11-01 00:00:00"
test_start_dt = "2014-12-30 00:00:00"
# +
train = ts_data_load.copy()[
(ts_data_load.index >= train_start_dt) & (ts_data_load.index < test_start_dt)
][["load"]]
test = ts_data_load.copy()[ts_data_load.index >= test_start_dt][["load"]]
print("Training data shape: ", train.shape)
print("Test data shape: ", test.shape)
# -
scaler = MinMaxScaler()
train["load"] = scaler.fit_transform(train)
train.head()
test["load"] = scaler.transform(test)
test.head()
HORIZON = 3
print("Forecasting horizon:", HORIZON, "hours")
order = (4, 1, 0)
seasonal_order = (1, 1, 0, 24)
# +
model = SARIMAX(endog=train, order=order, seasonal_order=seasonal_order)
results = model.fit()
print(results.summary())
# +
test_shifted = test.copy()
for t in range(1, HORIZON):
test_shifted["load+" + str(t)] = test_shifted["load"].shift(-t, freq="H")
test_shifted = test_shifted.dropna(how="any")
test_shifted.head(5)
# +
# %%time
training_window = 720
train_ts = train["load"]
test_ts = test_shifted
history = [x for x in train_ts]
history = history[(-training_window):]
predictions = list()
order = (2, 1, 0)
seasonal_order = (1, 1, 0, 24)
for t in range(test_ts.shape[0]):
model = SARIMAX(endog=history, order=order, seasonal_order=seasonal_order)
model_fit = model.fit()
yhat = model_fit.forecast(steps=HORIZON)
predictions.append(yhat)
obs = list(test_ts.iloc[t])
history.append(obs[0])
history.pop(0)
print(test_ts.index[t])
print(t + 1, ": predicted =", yhat, "expected =", obs)
# -
eval_df = pd.DataFrame(
predictions, columns=["t+" + str(t) for t in range(1, HORIZON + 1)]
)
eval_df["timestamp"] = test.index[0 : len(test.index) - HORIZON + 1]
eval_df = pd.melt(eval_df, id_vars="timestamp", value_name="prediction", var_name="h")
eval_df["actual"] = np.array(np.transpose(test_ts)).ravel()
eval_df[["prediction", "actual"]] = scaler.inverse_transform(
eval_df[["prediction", "actual"]]
)
eval_df.head()
if HORIZON > 1:
eval_df["APE"] = (eval_df["prediction"] - eval_df["actual"]).abs() / eval_df[
"actual"
]
print(eval_df.groupby("h")["APE"].mean())
print(
"One-step forecast MAPE: ",
(
mape(
eval_df[eval_df["h"] == "t+1"]["prediction"],
eval_df[eval_df["h"] == "t+1"]["actual"],
)
)
* 100,
"%",
)
print(
"Multi-step forecast MAPE: ",
mape(eval_df["prediction"], eval_df["actual"]) * 100,
"%",
)
| Notebooks/Chapter 4 - Introduction to Autoregressive and Automated Methods for Time Series Forecasting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Create your own element, in this case a radar plot
# This example is based on code from https://stackoverflow.com/questions/46564099/what-are-the-steps-to-create-a-radar-chart-in-bokeh-python
# <img src='./radar_plot.png'>
import numpy as np
from bokeh.plotting import figure, output_notebook, show
from bokeh.models import ColumnDataSource, LabelSet, HoverTool
output_notebook()
# ### 1. Data for one particular plot
values = [.5, 1, .8, .3, .8, .8, .8, .9]
dimensions = ['Dim 1','Dim 2','Dim 3','Dim 4','Dim 5','Dim 6','Dim 7','Dim 8']
dim_descr = {'Dim 1': 'What was the modality of the task',
'Dim 2': 'Was was the difficulty of the task',
'Dim 3': 'What was the valence of the task',
'Dim 4': 'How interesting was the task',
'Dim 5': 'Was the task too repetitive',
'Dim 6': 'Would you change the timing of the task',
'Dim 7': 'Would you recommend the task to your colleagues?',
'Dim 8': 'Were you aware of body motion associated with the task'}
responses = ['Visual', 'Very difficult', 'Very positive', 'Extremely Interesting', 'Stongly Agree', 'Strongly Disagree', 'Strongly Agree','Yes']
# ### 2. Create Figure and Draw Outer Circle
p = figure(match_aspect=True)
# Draw Outter Circle
centre = 0.5
p.circle(x=centre,y=centre,radius=0.5, fill_color=None, line_color='black', line_alpha=0.5)
show(p)
# ### 3. Draw intermediate circles
#Draw intermediate circles
p.circle(x=0.5,y=0.5,radius=.5, line_color='black', fill_color=None, line_alpha=0.5)
p.circle(x=0.5,y=0.5,radius=.1, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.2, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.3, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.4, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
show(p)
# ### 4. Remove Grid and Non-polar Axes
# Visual Aspects
p.xgrid.visible=False
p.ygrid.visible=False
p.xaxis.visible=False
p.yaxis.visible=False
p.toolbar.logo=None
p.toolbar_location='below'
show(p)
# ### 5. Draw Polar Axes
def unit_poly_verts(theta, centre):
"""Return vertices of polygon for subplot axes.
This polygon is circumscribed by a unit circle centered at (0.5, 0.5)
"""
x0, y0, r = [centre] * 3
verts = [(r*np.cos(t) + x0, r*np.sin(t) + y0) for t in theta]
return verts
# Obtain the Number of Dimensions
# ===============================
num_vars = len(values)
# Get the angle for each axes representing a dimension (from 0 to 2pi) + pi/2 --> To start on the y-axis
# ======================================================================================================
theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)
theta += np.pi/2
# Compute the intersection points with the outter circel for each of the axes
# ===========================================================================
verts = unit_poly_verts(theta, centre)
x = [v[0] for v in verts]
y = [v[1] for v in verts]
# Draw concentrical lines
# =======================
for i,j in zip(x,y):
p.line(x=[centre,i],y=[centre,j], line_color='black', line_dash='dashed', line_alpha=0.5)
show(p)
# ### 6. Add Labels and hovering capabilities to axes
# Draw Outter Dots
# ================
out_dots_TOOLTIPS = [("Dim", "@desc")]
out_dots_src = ColumnDataSource({'x':x,'y':y,'desc':list(dim_descr.values())})
g_out_dots = p.circle(x='x',y='y', color='black', source=out_dots_src)
out_dots_hover = HoverTool(renderers=[g_out_dots], tooltips=out_dots_TOOLTIPS)
p.add_tools(out_dots_hover)
show(p)
# Draw Dimension Labels
# =====================
labels_src = ColumnDataSource({'x':[i if i >= 0.5 else i-.05 for i in x],'y':[i if i >= 0.5 else i-.05 for i in y],'text':dimensions})
labels = LabelSet(x="x",y="y",text="text",source=labels_src)
p.add_layout(labels)
show(p)
# ### 7. Add Patch for a given set of data
def radar_patch(r, theta, centre ):
""" Returns the x and y coordinates corresponding to the magnitudes of
each variable displayed in the radar plot
"""
# offset from centre of circle
offset = 0.0
yt = (r*centre + offset) * np.sin(theta) + centre
xt = (r*centre + offset) * np.cos(theta) + centre
return xt, yt
# Compute the polar coordinates for the available data
# ====================================================
xt, yt = radar_patch(np.array(values), theta, centre)
# Use Bokeh Patch Element to draw the data
# ========================================
p.patch(x=xt, y=yt, fill_alpha=0.3, fill_color='blue', line_color='blue', line_width=2)
show(p)
# Patch hovering
patch_dots_TOOLTIPS = [("Response:","@desc")]
patch_dots_src = ColumnDataSource({'xt':xt,'yt':yt,'desc':responses})
patch_dots = p.circle(x='xt',y='yt',color='black', source=patch_dots_src)
patch_dots_hover = HoverTool(renderers=[patch_dots], tooltips=patch_dots_TOOLTIPS)
p.add_tools(patch_dots_hover)
show(p)
def generate_radar_chart_from_vals(vals, strs, QD, color='black'):
centre = 0.5
num_vars = len(vals)
theta = np.linspace(0, 2*np.pi, num_vars, endpoint=False)
theta += np.pi/2
verts = unit_poly_verts(theta, centre)
x = [v[0] for v in verts]
y = [v[1] for v in verts]
p =figure(match_aspect=True)
# Draw Outter Dots
out_dots_TOOLTIPS = [("Q:", "@desc")]
out_dots_src = ColumnDataSource({'x':x,'y':y,'desc':list(QD.values())})
g_out_dots = p.circle(x='x',y='y', color='black', source=out_dots_src)
out_dots_hover = HoverTool(renderers=[g_out_dots], tooltips=out_dots_TOOLTIPS)
p.add_tools(out_dots_hover)
# Draw Outter Circle
p.circle(x=0.5,y=0.5,radius=0.5, fill_color=None, line_color='black', line_alpha=0.5)
# Draw concentrical lines
for i,j in zip(x,y):
p.line(x=[centre,i],y=[centre,j], line_color='black', line_dash='dashed', line_alpha=0.5)
#Draw intermediate circles
p.circle(x=0.5,y=0.5,radius=.5, line_color='black', fill_color=None, line_alpha=0.5)
p.circle(x=0.5,y=0.5,radius=.1, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.2, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.3, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
p.circle(x=0.5,y=0.5,radius=.4, line_color='black', fill_color=None, line_alpha=0.5, line_dash='dashed')
# Visual Aspects
p.xgrid.visible=False
p.ygrid.visible=False
p.xaxis.visible=False
p.yaxis.visible=False
p.toolbar.logo=None
p.toolbar_location='below'
# Draw Question IDs
labels_txt = ['Q'+str(i).zfill(2) for i in range(1,num_vars+1)]
labels_src = ColumnDataSource({'x':[i if i >= 0.5 else i-.05 for i in x],'y':[i if i >= 0.5 else i-.05 for i in y],'text':labels_txt})
labels = LabelSet(x="x",y="y",text="text",source=labels_src)
p.add_layout(labels)
xt, yt = radar_patch(np.array(vals), theta, centre)
p.patch(x=xt, y=yt, fill_alpha=0.3, fill_color=color, line_color=color, line_width=2)
# Patch hovering
patch_dots_TOOLTIPS = [("Response:","@desc")]
patch_dots_src = ColumnDataSource({'xt':xt,'yt':yt,'desc':strs})
patch_dots = p.circle(x='xt',y='yt',color='black', source=patch_dots_src)
patch_dots_hover = HoverTool(renderers=[patch_dots], tooltips=patch_dots_TOOLTIPS)
p.add_tools(patch_dots_hover)
p.width=425
p.height=425
return p
| 2020_03_06/Talk_Part03_CreateYourElements.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + [markdown] colab_type="text" id="-DiTKVvDy8rF"
# # Github
# ### Getting set up
#
# - Where you can get the code?
# - All code is hosted on github contained at:
# - Getting your system up and running - what commands you need to install the software
#
# ### What is GIT?
# - Git is a version control system
# - What that means is that there a central location for you to upload your
# - This means you can
# - Download and edit your code from any computer that has internet access
# - Keep updates of you code if there are any changes or new features
# - Keep a versioning system that allows you to incrementally build a code base
# - Share your code with others
# - Contribute updates to others' code repositories
# - Download others code repositories
# - There are different platforms that use git. Github and Gitbucket ar popular choices.
# - If you are a student or an educator you can sign up for a free account and get a bundle of perks
#
# ### Downloading code from github
# You will need to a know a couple of commands for Git commands
#
# -To download code use:
# - To clone (or copy the repo)
# `git clone \<repo\>`
# - To update your repository from the one on github:
# `git pull`
# - For more information about github checkout this [cheat sheet page]
# (http://rogerdudler.github.io/git-guide/)
#
#
# ### Some Preliminries
# - We'll be using Python 3.6 in this course
# - If you prefer python 2.7 you use the `future` library for forward compatability
# - More details can be found [here](https://docs.python.org/3/howto/pyporting.html)
# - Will not use Windows
# - Installation of packages is difficult
# - You should probably use a virtual machine
# - Difficult setting up GPU
# - Tensorflow is under development and hasn't been fully vetted with windows
# Most architectures use a unix based system - easier to manipulate and modify
#
# ### Installing libraries
# - You can use various methods for installing libraries
# - As part of best practices you should install these libraries in a virtual environment
# - A virtual environment protects you against modifications to your kernels
# - Conda should have these installed otherwise you can you use `virtualenv venv`
# - For more details about the environment see [here](https://gist.github.com/Geoyi/d9fab4f609e9f75941946be45000632b)
# - The order in which you install matters since the packages have dependencies
# - Mac:
# ```bash
# sudo pip install -U numpy scipy matplotlib pandas jupyter
# ```
# - Ubunutu
# - Provided you have python package manager installed (pip) can use similar command otherwise use:
# ```bash
# sudo apt-get install python-numpy python-scipy python-matplotlib python-pandas python-jupyter
# ```
# + [markdown] colab_type="text" id="1Sp9-zG4y8rI"
# # Python review
# ### Lists vs Arrays
# - What's a python list?
# - What's a python array?
# - How are lists and arrays different?
# - What are the tradeoffs between a list and array
# + colab={} colab_type="code" id="qTGePtQKy8rL" outputId="95451054-0cba-4be9-cb90-c3afdba1ec87"
# --- Setting up a list/array ---
import numpy as np
L = [1,2,3]
A = np.array([1,2,3])
# --- How are they similar? ---
## Looping through an array and a list produces the same results:
# --- Looping ---
#- List -
for e in L:
print(e)
# - Array -
for e in A:
print(e)
# + colab={} colab_type="code" id="IcsnMByYy8rf" outputId="1e629404-edef-4ea0-c9d2-b25d01ff31ab"
# --- Append ---
# - List -
L.append(4)
print("List append {}".format(L))
# Modifying in place
L.append(4)
print("List append with `.append` in place {}".format(L))
# Using '+'
L = L + [5]
print("List append with '+' {}".format(L))
# - Array -
# Cannot use:
# 1. `.append(4)`
# 2. `+` [4,5]
# A.append(4) # -> results in error
# A + [4,5] # -> results in error
B = np.array([5,6,7])
print("Numpy append {}".format(np.append(A,B)))
# + [markdown] colab_type="text" id="k-hfTLqxy8rp"
# - You might be thinking why would use a numpyarray at all if it can't append elements with a `+`
# - Since numpy is a numeric computation library we use the operation `+` to add elements of an array, this is much easier in numpy than it is with lists
# - Notes:
# - How about if we want to add elements in a list together?
# - For example in the case of vector addition?
# - A canonic way is to simply use a loop and add them together
#
# + colab={} colab_type="code" id="hh25QlGoy8rq" outputId="517a4d22-de20-486d-8f48-463560200074"
# --- Algebraic Operations ---
# -- Addition --
# - List -
L2 = []
for e in L:
L2.append(e + e)
print("List addition `+` {}".format(L2))
# - Numpy -
# 1D-Vector
print("Numpy addition `+` {}".format(A+A))
# Nd-Vector
M = np.array([[1,2,3],[4,5,6],[7,8,9]])
print("Numpy addition 1D:`+` {}".format(M))
print("Numpy addition `2D:+` {}".format(M + M))
# -- Multiplication --
# - Numpy -
print("Numpy element-wise multiplication {}".format(M * M))
# -- Division --
# - Numpy -
# Directly applying without converting to float:
print("Numpy element-wise integer division {}".format(M /2))
# Re-writing this as:
M = np.array([[1,2,3],[4,5,6],[7,8,9]], dtype=float)
print(M/2)
# -- Element-wise operations --
# - Scalar Multiplication -
print("Scalar multiplication {}".format(2 * A))
# - Squaring -
print("Squaring {}".format(A**2))
# - Exponentiation -
print("Exponentation {}".format(np.exp(A)))
# - Logs -
print("Log {}".format(np.log(A)))
# - Square roots -
print("Square root {}".format(np.sqrt(A)))
# + [markdown] colab_type="text" id="E3xEVpUGy8rz"
# - In contrast these operations are not algebraic with lists
# - To perform the same elementwise operations they need to be looped over
# + colab={} colab_type="code" id="aWv3qKaWy8r3" outputId="04d90131-6167-465a-e8c8-9d644830ace5"
print("Multiplication list: {}".format(2 * L))
# print("Division list, results in an error {}".format(L/2))
# - List Division -
L2 = []
for e in L:
L2.append(e/2.0)
print(L2)
# + [markdown] colab_type="text" id="jkZQy-vyy8r-"
# ### Summary: Numpy vs lists
# - Numpy arrays are more convenient than lists for certain operations
#
# - Numpy arrays are used to manipulate vectors/matrices
#
# - We can apply similar operations to lists however requires looping through the elements of the list
#
# - In general `for` loops are slow - so we want to avoid looping through them
# - Rather vector implementations are preferred
#
# - In addition numpy arrays have been optimized on the backend to use `C++` modules to be perfomant
# + [markdown] colab_type="text" id="QTH9FRw_y8sG"
# ## Other operations
# ### Dot products
# - Why are dot products important?
# - Where are dot products used?
# - How do dot products work?
#
# - Definition:
# - Let $a$ and $b$ be two $n\times 1$ columns vectors, then the dot product between $a$ and $b$ is given by:
# $$
# a\cdot b = a^Tb = \sum_{i=1}^na_id_i
# $$
# or equivalently using the $cos(\theta)$ notation:
# $$
# a\cdot b = ||a||\times||b||\cos_{a,b}(\theta)
# $$
# where $a^T$ is the vector transpose of $a$ and $||a||$ is the magnitude of $a$.
#
# - Recall that the magnitude of the vector $a = [x_1,x_2,....,x_n]^T$ is calculated as
# $$
# ||a|| = \sqrt{\sum_{i=1}^n a_i^2}
# $$
# this is also called the $L_2$ norm. We'll see different $L$ norms later in the course
#
# - Since the angle between $a$ and $b$ is typically unknown we usually use the first definition to inform the second, specficially:
# $$
# \cos_{a,b}(\theta) = \frac{a^Tb}{||a||\times||b||}
# $$
#
# + colab={} colab_type="code" id="D8MhWxwFy8sK" outputId="bd20cad9-a95c-4602-e39f-8e680b80cea6"
# -------------------------------
# ---- Dot Product: Method 1 ----
# -------------------------------
# --- Zip ---
# `zip` is a utility
# function for creating tuples
# l1 = [1,2,3]; l2 = [4,5,6];
# print("Zipped list {}".format(zip(a1,a2)))
# --- Dot Product: Loop Approach --
a1 = np.array([6,2,3,4,5])
a2 = np.array([5,4,3,2,1])
# - Loop approach -
dotProduct = 0 # Initial value
# Loop
for e1, e2 in zip(a1,a2):
dotProduct += e1*e2
print("The dot product is {}".format(dotProduct))
# --- Dot Product: Function approach ---
approach_1 = np.sum(a1*a2)
approach_2 = (a1*a2).sum()
print("Function Approach 1: numpy method: {}".format(approach_1))
print("Function Approach 2: `sum` as an instance method {}" .format(approach_2))
# --- DotProduct: Buitlin ---
builtin_1 = np.dot(a1, a2)
builtin_2 = a1.dot(a2)
print("Builtin Approach 1: {}".format(builtin_1))
print("Builtin Approach 2: instance method {}".format(builtin_2))
# + [markdown] colab_type="text" id="kCJHhOB4y8sT"
# - Now let's try the alternate way to caluclate the dot produt using the magintude and cos
# - First we need to calculate the magnitude of a and b, we ca do this using the dot product and square root function
# + colab={} colab_type="code" id="0JKyxFWdy8sW" outputId="db371b05-c1df-4314-97b8-2105c8d89d5e"
# ----------------------------------------
# ---- Dot Product: Method 2 (Angle) ----
# ----------------------------------------
# L2 Norm: Method 1 - Direct
a1Mag = np.sqrt(a1.dot(a1)) ; a2Mag = np.sqrt(a2.dot(a2))
print("Direct calculation of L2 norm {}".format(a1Mag))
# assert(a1Mag == a2Mag) #<- Method is commutative: Unit test
# L2 Norm: Method 2 - Builtin `linalg`
a1MagM2 = np.linalg.norm(a1) ; a2MagM2 = np.linalg.norm(a2)
print("`lingalg` calculation of L2 norm {}".format(a1MagM2))
# -- Angle Calculation ---
cosAngle = a1.dot(a2) / (a1Mag * a2Mag)
print("Angle (degrees) from dot product {}".format(cosAngle))
# -- Angle in Radians --
angle = np.arccos(cosAngle)
print("Angle (radians) from dot product {}".format(angle))
# + [markdown] colab_type="text" id="iTfVvK0ny8sj"
# ## Time complexity
# - We've seen several different ways to calculate the dot product
# - A natural question to ask is "Is one better than another with respect to run time (and memory)?"
# - This is important when processing large files
# - Small differences compound and result in in large performance gains
# - Run time is in the context of __time complexity__
# - Runtime complexity is denoted by one of the four operators:
# - $O(\cdot)$ which is an asymptotic bound on run-time
# - That is if we increased the size of the operation to infinfity what would the lower bound on the runtime converge to?
# - For example:
# - Suppose a function accepts the parameter $n$
# - We find that as the size of $n$ increases our function would complete in linear time
# - Then we say our function has $O(n)$ time complexity and implies that our runtime will increase linearly with input $n$
# - [<NAME>](https://rob-bell.net/2009/06/a-beginners-guide-to-big-o-notation/) has a good page explaining the differences and couple of examples between:
# - $O(1)$ - constant,
# - $O(n)$ - linear,
# - $O(n^2)$ - quadratic and
# - $O(\log(n))$ -log runtime complexity
# - Let's take a look at acouple of functions and how long they take to execute
# - We are going to use the function `timeit` to loop through a function 1000- times and return it's values
# + colab={} colab_type="code" id="RFrCP8Zfy8sm" outputId="18418f9c-70c1-4e88-b88e-fc8bc5e5b4c6"
# ------------------------
# ---- Library Import ----
# ------------------------
import numpy as np
from datetime import datetime
# --------------------------------
# ---- Run-time: Dot Products ----
# --------------------------------
# ---- Setting up Functions to Compare ----
# -- Dot Product: Loop --
def loopDotProduct(a1, a2):
dotProduct = 0
for e1, e2 in zip(a1, a2):
dotProduct += e1 * e2
return dotProduct
# -- Dot product: Numpy --
def numpyDotProduct(a1, a2):
return a1.dot(a2)
# ---- Constants ----
np.random.seed(1234)
number_of_loops = 10000
# Two random arrays
a1 = np.random.randn(1000)
a2 = np.random.randn(1000)
# ---- Run-time Comparison ----
# - Loop method -
t0 = datetime.now()
for n in range(number_of_loops):
loopDotProduct(a1, a2)
dtLoop = datetime.now() - t0
# - Numpy method -
t0 = datetime.now()
for n in range(number_of_loops):
numpyDotProduct(a1, a2)
dtNumPy = datetime.now() - t0
# - Print results -
secondsLoop = dtLoop.total_seconds()
secondsNumPy = dtNumPy.total_seconds()
print('''
The total run time using a loop is {a},
The total run time using Numpy is: {b}.
The difference in runtime dtLoop/dtNumPy: {c}
'''.format(a = secondsLoop, b = secondsNumPy, c = secondsLoop/secondsNumPy))
# + [markdown] colab_type="text" id="mQXPJtw8y8ss"
# - The output indicates the runtime using NumPy is ~300 times faster than a for loop method!
# - A more idiomatic programming apporach is using the `timeit` function rather than system time for benchmarking, rewriting the methods
# + colab={} colab_type="code" id="oKwfdztey8ss" outputId="0c951ca3-1f77-4a87-dc1d-2b27dfa6dc5c"
import timeit
import functools
np.random.seed(1234)
# %timeit loopDotProduct(np.random.randn(100),np.random.randn(100))
# %timeit numpyDotProduct(np.random.randn(100),np.random.randn(100))
# + [markdown] colab_type="text" id="rP0sdsYTy8su"
# ## Numpy Matrix Operations
# - Here we investigate `numpy` matrices, using numpy arrays
# - These are effectively wrappers for arrays however have builtin convenience methods
#
# - A matrix is simply an array of arrays
# - It is a $n \times m$ object, where $n$ is the number of rows and $m$ is the bnumber of columns
# - Arrays can further be embedded to create higher order matrices such as tensors
# - For example a real 3-tensor is $\mathbb{R}^{n\times m \times q}$ dimensional objects
# - With higher order matrics some properties however do not generalize
# - Matrices are a special subclass of 2D tensors
# - Note:
# - The inclusion of `dtype` when specifying a matrix (or tensor) specifies the matrix-type
# - For larger arrays there is a trade-off between different types: floats, integers, chars, booleans
# - In terms of memory "chars > floats > ints"
# - Arrays take lists as inputs
# - Since matrices are such common objects numpy has a built in method for matrices
# + colab={} colab_type="code" id="8m2_vGxpy8su" outputId="144cf048-1874-4060-a60e-e7bcee78b44a"
# ---- Constructing a 3 x 3 matrix ----
L = [[1.,2.,3.],[4.,5.,6.],[7.,8.,10.]]
M = np.array(L, dtype=float)
M2 = np.matrix(L, dtype = float)
print("A 3x3 matrix with type `float` \n{}".format(M))
# ---- Common Matrix Operations ---
# Including a transpse
print(M2.T)
# Conjugate tranpose (Hermitian)
print(M2.H)
# Inverse
print(M2.I)
# Matrix multiplication
print(M2 * M2)
# Matrix exponentiation
print(M2 ** 3)
# Accessing elements w/ index notation
print(M[1,2])
# Array - matrix check
assert(type(M) == type(M2)), 'Array is not the same as a matrix'
# + [markdown] colab_type="text" id="t6GGnlhZy8sw"
# ### Linear Algebra Operations
# - We saw in the previous block that arrays and matrices are not the same thing
# - Matrices can use linear algebra rules whereas `numpy` arrays act element wise
# - So why use a `numpy` array?
# - Matrices are only 2d dimensional whereas `numpy` arrays are can be $n$ dimensional
# - Objects returned with `numpy` class are generally arrays and not matrices
# - Accordingly to avoid confusion type mis-matchs it is better to use `numpy` arrays
# - To convert a matrix into a `numpy` array use the command `np.array(M)`
# - Linear algebra operations act on 2d arrays through the `linalg` module, applying matrix operations to a `numpy` array will result in an error
# - Additional information about operations that can be performed using the linear algebra module can be found [here](https://docs.scipy.org/doc/numpy/reference/routines.linalg.html)
# - Below several common matrix operations are presented for 2d arrays
# + colab={} colab_type="code" id="i4bp9jsqy8sz" outputId="5fe211e2-c4a0-4b5a-9f10-bdeabf6ecb58"
# --- Loading linalg library ---
import numpy.linalg
# Matrix inverse
Minv = numpy.linalg.inv(M)
print("The inverse M(2d `numpyarray`) is\n {}".format(Minv))
print("The inverse M2(`matrix`) is\n {}".format(M2.I))
# Inverse check
assert((Minv == M2.I).all()), "Inverses are not the same"
print("This should be a matrix of ones:\n{}".format(M.dot(Minv)))
print("This should be a matrix of ones:\n{}".format(Minv.dot(M)))
# Here `isclose` rounds the entries in both matrices to see if they are
# within in some delta
# - Default relative tolerance: 10^-5
# - Default absolute tolerance 10^-8) tolerance
are_inverses_close = np.isclose(M.dot(Minv), Minv.dot(M))
print("Check if elements are close".format(are_inverses_close))
# Matrix Determinant
print("det(M) should be 3 {}".format(np.linalg.det(M)))
# Diagonal elements
print("diag(M) {}".format(np.diag(M)))
# Trace of a matrix
print("tr(M) {}".format(np.trace(M)))
assert(np.trace(M) == np.diag(M).sum()), "Trace & diag sum are not the same"
# + [markdown] colab_type="text" id="YqEPqQR5y8s3"
# - Two important matrix operations are the inner and outer product
# - The __inner product__ is the normed dot product
# - Whereas the __outer product__ of two vectors generates a matrix
# - The outer product is frequently used to calculate the covariance function ie. $E((X-\mu)(X-\mu)^T)$
# - If you are unfamiliar with inner and outer products along with their relation to expectation and covariance this [website](http://www.math.uah.edu/stat/expect/Matrices.html) provides exercises and summaries of how they are used in probability and statistics
# + colab={} colab_type="code" id="eDrWKe1oy8s4" outputId="87e6816e-13c3-48f7-93c1-c285aa8616d1"
# Outer product
a = np.array([1,2,3,4])
b = np.array([5,6,7,8])
print("The outer product is:\n{}".format(np.outer(a,b)))
# Inner product
print("The inner product is:\n{}".format(np.inner(a,b)))
assert(np.inner(a,b) == a.dot(b)), "Dot and inner product are different"
# + [markdown] colab_type="text" id="-QypaWW3y8tB"
# ### Additional matrix operations
# - So far we have manually generated `numpy` arrays by taking a list and typecasting it into an array
# - This is inconvenient especially especially with larger arrays
# - There are several particular method that lighten the burden of creating common arrays
# - Additional documentation about the inputs for various distributions can be found [here](https://docs.scipy.org/doc/numpy/reference/routines.random.html)
#
# + colab={} colab_type="code" id="611MG-ISy8tB" outputId="b07a8a4c-7764-4560-d1f1-5e75bbb9c49d"
# -------------------------
# ---- Simple matrices ----
# -------------------------
# Zero vector
zero_vector = np.zeros(10)
print('The zero vector is {}'.format(zero_vector))
# Zero matrix
zero_matrix = np.zeros((5,3))
print("The zero matrix is\n{}".format(zero_matrix))
# Ones matrix
ones_matrix = np.ones((3,4))
print("The ones matrix is\n{}".format(ones_matrix))
# Diagonal matrix
diag_matrix = np.diag([1,5,10])
print("Diagonal matrix is\n{}".format(diag_matrix))
print('------------------------------------------\n')
# -------------------------
# ---- Random matrices ----
# -------------------------
# Random uniform matrix (0,1)
uniform_matrix = np.random.random((3,2))
print("The uniform matrix is\n{}".format(uniform_matrix))
# Random matrix from Gaussian
gaussian_matrix = np.random.randn(4,3) # note: input not a tuple
print("The gaussian filled matrix is\n{}".format(gaussian_matrix))
print('------------------------------------------\n')
# -------------------------------
# ---- Operations on Arrays ----
# -------------------------------
# Mean
print("Mean of the gaussian_matrix= {}".format(gaussian_matrix.mean()))
# Variance
print("Variance of the gaussian_matrix= {}".format(gaussian_matrix.var()))
# Dot product
print("Dot product of M and M:\n{}".format(M.dot(M)))
# Elementwise multiplication
print("M x M:\n{}".format(M * M))
# + [markdown] colab_type="text" id="qlQnzXF5y8tD"
# ### Eigenvalues and Vectors in numpy
# - Numpy can calculate eigenvalues and eigenvectors
# - Recall eigenvalues are vectors that satisfy the linear relation $\lambda v = v A$ where $\lambda$ is a constant, $v$ a vector and $A$ a matrix
# - Eigenvalues are important in several locations in deep learning
# - Common applications of eigenvalue decomposition include:
# - Principal component analysis,
# - Matrix decomposition
# - Image analysis
# - If you are not familiar with eigenvalues and vectors take some time to review the concepts, they appear every now and again in this course and give you a deeper appreciation of the material
# - There are two ways to calculate eignevalues and eigenvectors in python
# 1. `eigenvalues, eigenvectors = np.eig(M)`
# 2. `eigenvalues, eigenvectors = np.eigh(M)`
# - The second one `eigh` is for __symmetric Hermitian matrices__
# - Hermitian matrices are ones that contain complex numbers
# - Think of Hermitian matrices as transposes for the complex space i.e.
# - Symmetric : $A = A^T$
# - Hermitian: $A = A^H$
# - The conjugate transpose of $A$ is $A^H$
#
# - Both functions are part of the `linalg` library
# + colab={} colab_type="code" id="R7rfXwPQy8tD" outputId="2b43d956-6473-4d46-ea54-81f51e15b61e"
# ------------------------
# Eigendecompostion -
# of a covariance matrix -
# ------------------------
# - Random Gaussian -
X = np.random.randn(1000,3)
# - Calculating the covariance -
# X is transposed to get the
# right dimensions
cov = np.cov(X.T)
print("The shape of the covariance matrix is {}".format(cov.shape))
print("The covariance matrix is {}".format(cov))
# - Method 1: Eigenvalues/vectors -
vals, vecs = np.linalg.eigh(cov)
print("The eigenvalues are:\n{}".format(vals))
print("The eigenvectores are:\n{}".format(vecs))
# - Method 2: Eigenvalues/vectors -
vals, vecs = np.linalg.eig(cov)
print("The eigenvalues are:\n{}".format(vals))
print("The eigenvectores are:\n{}".format(vecs))
# + [markdown] colab_type="text" id="yghMYuIby8tE"
# ### Solving a linear equation
# - What is a linear system
# - Linear system is a set of linear equations such that $A\mathbf{x} = \mathbf{b}$
# - By linear we mean a set of equations that can be written as:
# $$
# A\mathbf{x} = \sum_{i=1}^n \mathbf{a}_i\cdot \mathbf{x}_i
# $$
# where $\mathbf{x}_i \in \mathbb{R}^d$ are unknown and $a_i,i=1,...,n$ are some known set of constants in $\mathbb{R}$
#
# - To find the known values of the $\{x_i\}_{i=1}^n$ that satisfy the equation the matrix $A$ is inverted such that:
# $$
# A\mathbf{x} = \mathbf{b} \Rightarrow A^{-1}A\mathbf{x} = x = A^{-1}\mathbf{b}
# $$
#
# - The resulting values of $\mathbf{x}$ are then simply equal to $A^{-1}\mathbf{b}$
# - Note:
# - There are variety of different ways to find the inverse
# - Numpy has an optimized backend to solve the inverse efficiently
# - Accordingly while the "indirect" inverse method can be used it is more efficient to use `numpy`
# - If you have ever used `R ` you're familiar with the `solve` which works the same way
#
# + colab={} colab_type="code" id="h76l3UsQy8tF" outputId="02c935b9-25e7-4210-b839-ebe78ef2c86c"
# - Method 1: Indirect -
# Matrix
A = np.array([[1,2,3],[4,5,6],[7,8,10]])
# Inverse
Ainv = np.linalg.inv(A)
# Known values
b = np.array([1,4,6])
# Solution
x = Ainv.dot(b)
print("The solution for x is:{}".format(x))
# - Method 2: Direct -
xx = np.linalg.solve(A,b)
print("The solution for x is:{}".format(xx))
# Check to see if close
print("Methods yield same result: {}".format(np.isclose(x, xx).all()))
# + [markdown] colab_type="text" id="hiLCTnaDy8tK"
# ## Data Manipulation
# ### Loading data
# - Here are two simple to loading data:
# 1. Python approach
# 2. Pandas approach
# + colab={} colab_type="code" id="69ytMIxKy8tK" outputId="56b6f02a-a7e0-4324-bdb3-4c499ffe09ad"
# -------------------------------
# Generate fake data
# -------------------------------
np.random.seed(1234)
X = np.random.randn(3,3)
X = np.round(X,3)
np.savetxt(fname = "./sample_data.csv",
X = X,
delimiter=', ',
newline='\n')
# -------------------------------
# Method 1: Loading a .csv
# -------------------------------
# Pre-populate list
M = []
# Loop through lines of doc:
for index, line in enumerate(open('./sample_data.csv')):
row = line.split(',')
print(row)
sample = list(map(float, row))
M.append(sample)
M = np.array(M)
print("The loaded .csv is:\n{}".format(M))
# + colab={} colab_type="code" id="lFaZxwXRy8tN" outputId="3de62673-08f2-49a3-82d1-e23743bff107"
# -----------------------------------
# Method 2: Pandas Loading a .csv
# -----------------------------------
import pandas as pd
M_pandas = pd.read_csv("sample_data.csv", header=None)
print("The loaded .csv is:\n{}".format(M_pandas))
# + [markdown] colab_type="text" id="9QsVyqQVy8tO"
# ### Pandas package
# - Pandas is a versatile package for maniplating data
# - It is built on `numpy`
# - Frequently used to manipulate data before returning it to a `numpy` array
# - General work cycle: load in pandas -> manipulate in pandas -> convert to `numpy` array
# - There are many arguments that can be used with `pandas.read_csv`
# - You will not use most of them
# - For the complete set of options see the [pandas website](http://pandas.pydata.org/pandas-docs/version/0.16.2/generated/pandas.read_csv.html)
# - Some additional utility functions for `pandas.DataFrame`s are listed below
# + colab={} colab_type="code" id="MkT2TyZEy8tP" outputId="550c16e2-640e-4fa7-99a1-0f9e077d00b9"
# ---------------------------
# Pandas utility functions
# ---------------------------
# Type
type(M_pandas)
# Info
M_pandas.info()
# Top values
M_pandas.head(1)
# ---------------------------
# Pandas Indexing Functions
# ---------------------------
# By column
M_pandas[0]
# Column type: Series
type(M_pandas[0])
# Selection by index
print("Select M_pandas[0,0]:{}".format(M_pandas.iloc[0,0])) # type: Series
print("Selection M_pandas[1,0]:{}".format(M_pandas.ix[1,0])) # type: Series
# Subsetting
print(M_pandas[[1,2]]) # Select cols 1,2 (index starts at 0)
print(M_pandas[M_pandas[1]>0.5]) # Filtering matrix based on col 1
print(M_pandas[0]>1.5) # Generate boolean series
# + [markdown] colab_type="text" id="MLUUGrrLy8tS"
# ## Matplotlib
# - Previous sections covered basic data manipulation
# - The next step in data analysis is visualization
# - Here we use `matplotlib` is an object oriented approach to generating graphs
# - As the name suggests the library that borrows elements from "Matlab"
# - Other methods in python have similar functionality such as `seaborn` or `Bokeh` however for simplicity, `matplotlib` is used here and is the more mature among the three
# - `matplotlib` has rich functionality, frequently knowing the simple plots is enough with referencing more advanced features as needed
# + colab={} colab_type="code" id="7QNeDfnjy8tS" outputId="b8c0d68f-f593-49af-d8bb-26904f80da53"
# ----------------
# Library import
# ----------------
import matplotlib.pyplot as plt
# ------------
# Basic plot
# ------------
# - Generating data -
# np.linspace(start, end, number of points)
x = np.linspace(0, 10, 100)
y = np.cos(x)
# - Iteration 1: Simple Plot -
plt.plot(x,y)
plt.show()
# - Iteration 2: Addint Layers -
plt.plot(x,y)
plt.xlabel('time')
plt.ylabel('function of time')
plt.title('chart')
plt.show()
# ---------------
# Scatter plot
# ---------------
# - Read in data -
m = pd.read_csv('sample_data.csv', header = None).as_matrix()
# - Coordinates -
x = m[:,0]
y = m[:,1]
# - Plot -
plt.scatter(x,y)
plt.show()
# ------------
# Histogram
# ------------
# - Iteration 1: Simple -
plt.hist(x)
plt.show()
# - Iteration 2: Adding Bins -
R = np.random.rand(10000)
plt.hist(R, bins=50)
plt.show()
# ---------------
# Contour Plot
# ---------------
# - Library import -
import matplotlib.mlab as mlab
# - Setting up the grid -
delta = 0.25
X, Y = np.meshgrid(x, y)
# - Generating the z coord -
Z = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
# - Plotting the data -
plt.contour(X, Y, Z)
plt.scatter(x,y)
plt.show()
# + colab={} colab_type="code" id="cXFSndBZy8tV"
| Materials/L2 - Modeling and Python Foundations/L2_python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %pylab inline
import matplotlib.pyplot as plt
import networkx as nx
import community
from pyCombo import combo, modularity
G = nx.dorogovtsev_goltsev_mendes_graph(3)
pos=nx.spring_layout(G)
nx.draw_networkx(G,pos)
nx.set_edge_attributes(G, 'weight', [1]*G.number_of_edges())
partition = combo(G, weight='weight')
partition = community.best_partition(G)
# +
size = float(len(set(partition.values())))
count = 0.
for com in set(partition.values()) :
count+= 1.
c = count / size
list_nodes = [nodes for nodes in partition.keys()
if partition[nodes] == com]
nx.draw_networkx_nodes(G, pos, list_nodes, node_size = 300,
node_color = str(c))
nx.draw_networkx_edges(G,pos, alpha=1)
plt.axis('off');
plt.show()
# -
| example/.ipynb_checkpoints/example-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="CazISR8X_HUG"
# # Multiple Linear Regression (다중 선형 회귀)
# -
# - In this practice, we are going to find regression model to predict *profit* of the startup company with various dependent varialbes such as R&D spend and Marketting spend.
# - For simplicity, we will not consider *categorical value* such as 'state'.
# + [markdown] colab_type="text" id="pOyqYHTk_Q57"
# ## Importing the libraries
# + colab={} colab_type="code" id="T_YHJjnD_Tja"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] colab_type="text" id="vgC61-ah_WIz"
# ## Importing the dataset
# + colab={} colab_type="code" id="UrxyEKGn_ez7"
dataset = pd.read_csv('50_Startups.csv')
# -
dataset # 데이터 정보확인
# Sklearn's regression models expect the shape of X and y as
#
# - X : [number_of_data, number_of_features ]
# - y : [number_of_data, number_of_features]
#
# Therefore, we need to convert dataframe's result to correct numpy object
dataset.columns
dataset[ ['R&D Spend', 'Administration', 'Marketing Spend'] ]
X_input = dataset[ ['R&D Spend', 'Administration', 'Marketing Spend'] ] # do not use 'state'
y_input = dataset[ 'Profit' ]
X_input.values
y_input
X = X_input.values
y = y_input.values
print("Shape of X : ", X.shape)
print("Shape of y : ", y.shape)
# + [markdown] colab_type="text" id="WemVnqgeA70k"
# ## Splitting the dataset into the Training set and Test set
# + colab={} colab_type="code" id="Kb_v_ae-A-20"
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# -
X_test.shape
# + [markdown] colab_type="text" id="k-McZVsQBINc"
# ## Training the Multiple Linear Regression model on the Training set
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 757, "status": "ok", "timestamp": 1586353664008, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}, "user_tz": -240} id="ywPjx0L1BMiD" outputId="099836bc-4d85-4b4f-a488-093faf02e8cb"
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# + [markdown] colab_type="text" id="xNkXL1YQBiBT"
# ## Predicting the Test set results
# + colab={"base_uri": "https://localhost:8080/", "height": 185} colab_type="code" executionInfo={"elapsed": 951, "status": "ok", "timestamp": 1586353666678, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhEuXdT7eQweUmRPW8_laJuPggSK6hfvpl5a6WBaA=s64", "userId": "15047218817161520419"}, "user_tz": -240} id="TQKmwvtdBkyb" outputId="493436bf-a4ae-4374-ca16-0b0c25d19457"
y_pred = regressor.predict(X_test)
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
# -
# To check the result easily, convert the reference and the predict to dataframe.
data = { 'ref': y_test, 'pred':y_pred, 'diff':np.abs( (y_test - y_pred) ) }
df = pd.DataFrame(data)
df
| DeepLearning/1. regression/multiple_linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# # Short Demo of a Classifier Script
# ## GroupKFolds with Linear Support Vector Classification
#
# Note: to run this part of the presentation, you'll need `prepare_data.py` to be in the same directory as this jupyter notebook. Additionally, make sure to change the `data_dir` path!
# + slideshow={"slide_type": "subslide"}
#Import modules for this step
from nilearn import datasets
import pandas as pd
import os
#Fetch data using nilearn.datasets.fetch
abide = datasets.fetch_abide_pcp(data_dir="path/to/where/you/want/to/save/data"),
pipeline="cpac",
quality_checked=True)
#Load phenotypic data into pandas dataframe
abide_pheno = pd.DataFrame(abide.phenotypic)
#Create array to hold unique site names
#groups = abide_pheno.SITE_ID.unique()
groups = []
for s in abide_pheno.SITE_ID:
groups.append(s.decode())
# + slideshow={"slide_type": "subslide"}
#Import modules
import numpy as np
from sklearn.model_selection import GroupKFold
import prepare_data
import os
#Define data and output directories
data_dir = os.path.join("path/to/where/you/saved/the/data")
output_dir = data_dir
X, y = prepare_data.prepare_data(data_dir,output_dir)
logo = GroupKFold(n_splits=10)
logo.get_n_splits(X, y, groups)
# + slideshow={"slide_type": "subslide"}
from sklearn.svm import LinearSVC
import statistics
print("----------------------------------------------------")
print("GroupKFold with Linear Support Vector Classification")
print("----------------------------------------------------")
l_svc = LinearSVC(max_iter=10000)
accuracy = []
count = 0
for train_index, test_index in logo.split(X,y,groups):
count += 1
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print("Training model ",count)
l_svc.fit(X_train,y_train)
acc_score = l_svc.score(X_test, y_test)
accuracy.append(acc_score)
print("Finished training.\n")
#Mean accuracy of self.predict(X) with regard to y for each model
index = 0
for a in accuracy:
index += 1
print("Accuracy score for model", index, " ", a)
#Report the average accuracy for all models
print("\nAverage accuracy score for all models: ", statistics.mean(accuracy))
print("Maximum accuracy score of all models: ", max(accuracy))
print("Minimum accuracy score of all models: ", min(accuracy))
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
# + [markdown] slideshow={"slide_type": "slide"}
# 
| presentation/Presentation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: pvoutput
# language: python
# name: pvoutput
# ---
import os
import sys
import pandas as pd
import numpy as np
import time
import tables
import warnings
from datetime import date, datetime, timedelta
from pvoutput import *
from pvoutput.utils import get_logger, get_dates_already_downloaded, system_id_to_hdf_key
from pvoutput.daterange import safe_convert_to_date
# ## TODO
#
# * It's possible that some of the dates in the missing_dates table are not actually missing(!) (There was a period when there was a bug in the code, where it wouldn't always pass the correct requested date to PVOutput.org's API).
# +
BASE_PATH = os.path.expanduser('~/data/pvoutput.org/')
# OUTPUT_TIMESERIES_FILENAME = os.path.join(BASE_PATH, 'UK_PV_timeseries.hdf')
OUTPUT_TIMESERIES_FILENAME = os.path.join(BASE_PATH, 'UK_PV_timeseries_batch.hdf')
INPUT_PV_LIST_FILENAME = os.path.join(BASE_PATH, 'UK_PV_listing_metadata.hdf')
METADATA_FILENAME = os.path.join(BASE_PATH, 'UK_PV_metadata.csv')
PV_STATS_FILENAME = os.path.join(BASE_PATH, 'UK_PV_stats.csv')
START_DATE = pd.Timestamp("1950-01-01")
END_DATE = pd.Timestamp("2019-08-20")
logger = get_logger(stream_handler=False)
# -
pv_systems = pd.read_hdf(INPUT_PV_LIST_FILENAME, 'metadata')
pv_systems['system_capacity_kW'] = pd.to_numeric(pv_systems['system_capacity'].str.replace('kW', ''))
pv_systems.drop('system_capacity', axis='columns', inplace=True)
pv_systems.head()
pv_metadata = pd.read_csv(METADATA_FILENAME, index_col='system_id')
pv_metadata.head()
pv_systems_joined = (
pv_systems
.join(
pv_metadata[['status_interval_minutes', 'install_date', 'latitude', 'longitude']],
how='left'
))
# +
# Filter 'bad' systems
pv_systems_filtered = pv_systems_joined.query(
'status_interval_minutes <= 60')
pv_systems_filtered = pv_systems_filtered.dropna(subset=['latitude', 'longitude'])
# -
len(pv_systems_filtered)
pv_systems_filtered.head()
pv_systems_filtered.sort_values('system_capacity_kW', ascending=False, inplace=True)
pv_systems_filtered.head()
# +
# Links to website for manually checking data:
# ['https://pvoutput.org/intraday.jsp?sid={}&dt=20190809'.format(sid) for sid in pv_systems_filtered.index[10:15]]
# -
pv = PVOutput()
# +
logger.info('\n******* STARTING UP ************')
try:
pv.download_multiple_systems_to_disk(
system_ids=pv_systems_filtered.index,
start_date=START_DATE,
end_date=END_DATE,
output_filename=OUTPUT_TIMESERIES_FILENAME)
except Exception as e:
logger.exception('Exception! %s', e)
raise
| examples/download_pv_timeseries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import fiona
import numpy as np
import pandas as pd
from wit_tooling import *
from wit_tooling.database.io import DIO
from datetime import datetime
import pandas as pd
from bokeh.io import curdoc, output_notebook, show
from bokeh.layouts import layout, column, row, WidgetBox, gridplot
from bokeh.models import CheckboxGroup, Select, CategoricalColorMapper, ColumnDataSource,HoverTool, Label, SingleIntervalTicker, Slider, DatetimeTickFormatter, YearsTicker, Legend, TapTool, CustomJS, LegendItem, field
from bokeh.palettes import plasma
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
from bokeh.events import DoubleTap
import os, sys, urllib
output_notebook()
dio = DIO.get()
# -
def inundation_by_catchment(start_year, end_year):
source = None
for i in range(26):
catchment_name = dio.get_name_by_id('catchments', i+1)[0][0]
if catchment_name == '':
continue
rows = dio.get_polys_by_catchment_id(i+1, 5000)
poly_list = list(np.array(rows)[:,0])
print(len(poly_list))
if source is None:
start_time = datetime.now()
source = get_inundation(poly_list, start_year, end_year, 50, 1000)
source = source.loc[source.poly_name != '__']
print("end query in", datetime.now()-start_time)
source['catchment'] = catchment_name
else:
start_time = datetime.now()
tmp = get_inundation(poly_list, start_year, end_year, 50, 1000)
tmp = tmp.loc[tmp.poly_name != '__']
print("end query in", datetime.now()-start_time)
tmp['catchment'] = catchment_name
source = pd.concat([source, tmp], ignore_index=True)
return source
# +
decades = [(1990, 2000), (2000, 2010), (2010, 2020)]
data = None
for d in decades:
if data is None:
data = inundation_by_catchment(d[0], d[1])
data['decade'] = d[1]
else:
tmp = inundation_by_catchment(d[0], d[1])
tmp['decade'] = d[1]
data = pd.concat([data, tmp], ignore_index=True)
data.percent = data.percent * 100
data.area = data.area/100 * np.pi
# -
poly_id = data.poly_id.iloc[0]
poly_data = get_area_by_poly_id(int(poly_id))
poly_area = poly_data.area.unique()[0]
poly_data = poly_data.drop(columns=['area'])
def plot_doc(doc):
source = ColumnDataSource(data=data[data.decade==2020])
single_source = ColumnDataSource(data=poly_data)
poly_id_source = ColumnDataSource(data=dict(poly_id=[]))
catchment_list = list(data.catchment.unique())
color_map = plasma(len(catchment_list))
plot = figure(y_range=(0, 100), x_range=(0, 11), title='Inundation', tools="tap", plot_height=600, plot_width=500, sizing_mode='scale_both')
plot.xaxis.ticker = SingleIntervalTicker(interval=1)
plot.xaxis.axis_label = "Occurence in Years"
plot.yaxis.ticker = SingleIntervalTicker(interval=10)
plot.yaxis.axis_label = "Percent of Duration"
label = Label(x=1.1, y=18, text='2010-2020', text_font_size='70pt', text_color='#eeeeee')
plot.add_layout(label)
color_mapper = factor_cmap('catchment', palette=color_map, factors=catchment_list)
cc = plot.circle(
x='wet_years',
y='percent',
size='area',
source = source,
fill_color=color_mapper,
fill_alpha=0.5,
line_color='#7c7e71',
line_width=0.5,
line_alpha=0.5,
)
catchment_legend = Legend(items=[LegendItem(label=field('catchment'), renderers=[cc])], label_text_font_size = '10pt', location="top_left")
# this one is not working for single glypy
#catchment_legend.click_policy="hide"
plot.add_layout(catchment_legend, 'left')
def poly_update(attrname, old, new):
poly_id = poly_select.value
if poly_id == '':
return
poly_data = get_area_by_poly_id(int(poly_id))
poly_area = poly_data.area.unique()[0]
poly_data = poly_data.drop(columns=['area'])
sub_plot.y_range.end = poly_area
sub_plot.x_range.start = poly_data.time.min()
sub_plot.x_range.end = poly_data.time.max()
single_source.data = poly_data
sub_plot.title.text = data.poly_name.loc[data.poly_id == int(poly_id)].iloc[0]
poly_select = Select(title="Polygons", value='', options=[''], height=50, width=100, sizing_mode="fixed")
poly_select.on_change('value', poly_update)
js_code = """
const inds=cb_obj.indices;
var data_s = source.data;
var data_d = target.data;
data_d['poly_id'] = [];
for (var i=0; i<inds.length; i++) {
data_d['poly_id'].push(String(data_s['poly_id'][inds[i]]));
}
select.options = data_d['poly_id']
select.value = data_d['poly_id'][0]
"""
js_callback = CustomJS(args={'source': source, 'target': poly_id_source, 'select': poly_select}, code=js_code)
source.selected.js_on_change('indices', js_callback)
plot.add_tools(HoverTool(tooltips=[('Id', "@poly_id"), ('Polygon', "@poly_name"), ("Catchment", "@catchment")],
show_arrow=False, point_policy='follow_mouse'))
def select_update(attrname, old, new):
decade = int(select.value)
catchments = []
for i in checkbox_group.active:
catchments.append(catchment_list[i])
label.text = '-'.join([str(decade-10), str(decade)])
refreshed_data = data.loc[(data.decade==decade) & data.catchment.isin(catchments)].reset_index()
indices = refreshed_data.index[refreshed_data.poly_id.astype(str).isin(poly_select.options)].tolist()
source.data = refreshed_data
color_map = plasma(len(catchments))
color_mapper = factor_cmap('catchment', palette=color_map, factors=catchments)
cc.glyph.fill_color=color_mapper
source.selected.indices = indices
select = Select(title="Decade", value='2020', options=['2000', '2010', '2020'], height=50, width=100, sizing_mode="fixed")
select.on_change('value', select_update)
checkbox_group = CheckboxGroup(labels=catchment_list, active=list(np.arange(len(catchment_list))), height=600, width=300, sizing_mode="scale_height")
checkbox_group.on_change('active', select_update)
controls = column(select, checkbox_group, poly_select, height=700, width=200, sizing_mode='fixed')
sub_plot = figure(y_range=(0, poly_area), x_range=(poly_data['time'].min(), poly_data['time'].max()), title=data.poly_name.iloc[0],
plot_height=100, plot_width=900, sizing_mode='stretch_width')
sub_plot.xaxis.formatter = DatetimeTickFormatter()
sub_plot.xaxis.ticker = YearsTicker(interval=1)
sub_plot.yaxis.axis_label = "Area (hectare)"
pal = [ '#030aa7',
'#04d9ff',
'#3f9b0b',
'#e6daa6',
'#60460f'
]
v_stack = sub_plot.varea_stack(['open water', 'wet', 'green veg', 'dry veg', 'bare soil'], x='time',
color=pal, source=single_source, alpha=0.6)
legend = Legend(items=[
("bare soil", [v_stack[4]]),
("dry veg", [v_stack[3]]),
("green veg", [v_stack[2]]),
("wet", [v_stack[1]]),
("open water", [v_stack[0]]),
], location="top_left")
sub_plot.add_layout(legend, 'left')
grid = gridplot([plot, sub_plot], ncols=1, plot_height=400, plot_width=600, sizing_mode='scale_width')
layouts = layout([
[controls, grid],
], sizing_mode='scale_both')
doc.add_root(layouts)
doc.title = "Inundataion"
def remote_jupyter_proxy_url(port):
"""
Callable to configure Bokeh's show method when a proxy must be
configured.
If port is None we're asking about the URL
for the origin header.
"""
base_url = "https://app.sandbox.dea.ga.gov.au/"
host = urllib.parse.urlparse(base_url).netloc
# If port is None we're asking for the URL origin
# so return the public hostname.
if port is None:
return host
service_url_path = os.environ['JUPYTERHUB_SERVICE_PREFIX']
proxy_url_path = 'proxy/%d' % port
user_url = urllib.parse.urljoin(base_url, service_url_path)
full_url = urllib.parse.urljoin(user_url, proxy_url_path)
return full_url
show(plot_doc, notebook_url=remote_jupyter_proxy_url)
| examples/inundation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dictionaries for counting words
#
# A common task in text processing is to produce a count of word
# frequencies. While NumPy has a builtin histogram function for doing
# numerical histograms, it won't work out of the box for counting discrete
# items, since it is a binning histogram for a range of real values.
#
# But the Python language provides very powerful string manipulation
# capabilities, as well as a very flexible and efficiently implemented
# builtin data type, the *dictionary*, that makes this task a very simple
# one.
#
# In this problem, you will need to count the frequencies of all the words
# contained in a compressed text file supplied as input. Load and read the
# data file `data/HISTORY.gz` (without uncompressing it on the filesystem
# separately), and then use a dictionary count the frequency of each word
# in the file. Then, display the 20 most and 20 least frequent words in
# the text.
#
# ## Hints
#
# - To read the compressed file `HISTORY.gz` without uncompressing it
# first, see the gzip module.
# - Consider 'words' as the result of splitting the input text into
# a list, using any form of whitespace as a separator. This is
# obviously a very naive definition of 'word', but it shall suffice
# for the purposes of this exercise.
# - Python strings have a `.split()` method that allows for very
# flexible splitting. You can easily get more details on it in
# IPython:
#
#
# ```
# In [2]: a = 'somestring'
#
# In [3]: a.split?
# Type: builtin_function_or_method
# Base Class: <type 'builtin_function_or_method'>
# Namespace: Interactive
# Docstring:
# S.split([sep [,maxsplit]]) -> list of strings
#
# Return a list of the words in the string S, using sep as the
# delimiter string. If maxsplit is given, at most maxsplit
# splits are done. If sep is not specified or is None, any
# whitespace string is a separator.
# ```
#
# The complete set of methods of Python strings can be viewed by hitting
# the TAB key in IPython after typing `a.`, and each of them can be
# similarly queried with the `?` operator as above. For more details on
# Python strings and their companion sequence types, see
# [the Python documentation](https://docs.python.org/3/library/stdtypes.html#sequence-types-list-tuple-range).
#
| exercises/WordFrequencies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Update sklearn to prevent version mismatches
# # !conda install scikit-learn
# # !conda update scikit-learn
# # !conda install joblib
# # !conda update joblib
# -
import pandas as pd
import numpy as np
import pprint
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# # Read the CSV
df = pd.read_csv("data\HomeEquityLoans.csv")
pprint.pprint(df.shape) # (5960, 13)
print(df.columns)
print(df.dtypes) # int64 float64 object
# # Data Preprocessing
df.columns = ["isdefault", "loanrequestamount", "mortgageamountdue", "currentpropertyvalue",
"applyloanreason", "occupation", "yearsatpresentjob", "derogatoryreportscount",
"delinquentcreditlinescount", "oldestcreditlinemonthcount", "recentcreditinquirycount",
"creditlinecount", "debttoincomeratio"]
# +
print(df.isna().sum().sum())
print(df.isna().sum())
print(df.columns)
print(np.array(np.unique(df["loanrequestamount"], return_counts=True)).T)
print(df['occupation'].value_counts()) # not including isna()
print(df['applyloanreason'].isna().sum())
df = pd.get_dummies(df) # for categorical features : remove isna() => 0,0
print(df.columns)
print(df['applyloanreason_DebtCon'].value_counts())
print(df['applyloanreason_DebtCon'].isna().sum())
print(df['applyloanreason_HomeImp'].value_counts())
print(df['applyloanreason_HomeImp'].isna().sum())
df.fillna(df.mean(), inplace=True) # for numerical features: remove 252 isna() => mean
print(df.isna().sum().sum())
print(df.isna().sum())
# +
# ###### Outlier, Cap Floor of each column(feature and label):??
# ###### count
# ### bincount() of a numerical column
# print(np.bincount(df["isdefault"])) # count # only for int64 float64, not object
# print(np.bincount(df["loanrequestamount"]))
# # print(np.bincount(df["applyloanreason"]))
# ### unique count of a numerical column
# print(np.array(np.unique(df["isdefault"], return_counts=True)).T) # value+count # only for int64 float64, not object
# print(np.array(np.unique(df["loanrequestamount"], return_counts=True)).T)
# ## print(np.array(np.unique(df["applyloanreason"], return_counts=True)).T) # ??how to bincount() a categorical column/object data type ''occupation?????
# ### value_counts() of a categorical column
# print(df['occupation'].value_counts()) # not including isna()
# ###### Outlier, Cap Floor of each column(feature and label):??
# ### dummy: data type change: object => uint8, NaN is gone<1>
# ## categorical: "applyloanreason" "occupation"
# print("============before dummy============ ")
# print(df.shape)
# print(df.isna().sum())
# print(df.dtypes)
#<a> df = pd.get_dummies(df) # for all object columns # pd.get_dummies(df,columns=["applyloanreason"])
# # pd.get_dummies(brain[["size", "gender", "age"]])
# print("============aftger dummy============")
# print(df.shape)
# print(df.isna().sum())
# print(df.dtypes)
# ###### NaN : Impute (NaN, infite?? ) or Drop # https://datascience.stackexchange.com/questions/11928/valueerror-input-contains-nan-infinity-or-a-value-too-large-for-dtypefloat32
# ### check
# df.isna()
# df.isna().sum().sum()
# df.isna().sum()
# ## np.where(np.isnan(df)) # ???
# ### fill with mean: the mean is mean of each column itself?? NaN is gone<2>
# print("============before fillna()============")
# print(df.isna().sum())
#<b> df.fillna(df.mean(), inplace=True) # for all data types # fill all numerical columns, string colummns no change!!
# print("============after fillna()============")
# print(df.isna().sum())
# ### Impute(mean, mode, special value) : missing values often encoded as blanks, NaNs or other placeholders
# # from sklearn.impute import SimpleImputer # Univariate feature imputation: imputes values in the i-th feature dimension using only non-missing values in that feature dimension
# # imp = SimpleImputer(missing_values='NaN', strategy='most_frequent') # ???# missing_values=-1/np.nan strategies: ['mean', 'median', 'most_frequent', 'constant'] categorical-- 'most_frequent' or 'constant'
# # print(imp.fit_transform(df))
# # from sklearn.experimental import enable_iterative_imputer # Multivariate feature imputation ??
# # from sklearn.impute import IterativeImputer
# ### Drop: after "NaN is gone<1>" and "NaN is gone<2>" no NaN any more, so no need to drop
# print("shpae before Drop",df.shape)
# df = df.dropna(axis='columns', how='all') # Drop the null columns where all values are null
# df = df.dropna() # Drop the null rows where some value is null
# print("shpae after Drop",df.shape)
# ##<1># Remove Space for `FALSE POSITIVE` category -- remove a class of label
# # mask = df["koi_disposition"] == "FALSE POSITIVE"
# # df.loc[mask, "koi_disposition"] = "False_Positive"
# # df["koi_disposition"]
# ##<2># LabelEncoder() change 'a','b','c' to 0,1,2 -- has order, BinaryEncoding() change 'a','b' to two columns 0,1-- no order
# ### to_categorical() One Hot Encodeing change 'a','b','c' to one column [1,0,0] [0,1,0] [0,0,1] -- no order
# # from sklearn.preprocessing import LabelEncoder
# # label_encoder = LabelEncoder()
# # label_encoder.fit(y_train)
# # encoded_y_train = label_encoder.transform(y_train)
# # encoded_y_test = label_encoder.transform(y_test)
# ###### Descriptive Statistics:rigorous statistical explanation about the necessary assumptions and interpretations
# # df.head()
# ### convert label: 1 to "Default", 0 to "Not Default"
# # df['isdefault'] = df['isdefault'].map({1: "Default", 0:"Not Default"}) # ??
# ###### save cleaned data
# # df.to_csv(file_name, sep='\t', encoding='utf-8')
# df.to_csv("data\HomeEquityLoans_cleaned.csv", index=False)
# -
# # Create X, y datasets
# Use `"isdefault"` as y
from sklearn.model_selection import train_test_split
X = df.drop(columns=["isdefault"])
y = df["isdefault"]
# y = brain["weight"].values.reshape(-1, 1)
print(X.shape) # (5960, 18) 12+6(Dummay:1applyloanreason+5occupation)=18
# # Feature Selection
k=15 # remove 18-15 =3 features: loanrequestamount, occupation_Office,oldestcreditlinemonthcount
# # 3.Correlation Matrix with Heatmap : keep top k features highly correlated to the target variable and these k features should less correclated to each other, correlated have same effect.
# # Correlation states how the features are related to each other or the target variable.
# # Correlation can be positive (increase in one value of feature increases the value of the target variable) or negative (increase in one value of feature decreases the value of the target variable)
# # Heatmap makes it easy to identify which features are most related to the target variable, we will plot heatmap of correlated features using the seaborn library.
import seaborn as sns
data = df
# print(data.shape) # (6991, 41)
from sklearn.preprocessing import LabelEncoder
# data.iloc[:,0] = LabelEncoder().fit_transform(data.iloc[:,0]).astype('float64')
labelencoder1_corr = LabelEncoder().fit(data.iloc[:,0]) # label column # labelencoder1_corr: is to data(a copy of df) not df
# data.iloc[:,0] = labelencoder1_corr.transform(data.iloc[:,0])astype('float64') # float64
data.iloc[:,0] = labelencoder1_corr.transform(data.iloc[:,0]) # <class 'numpy.int64'>
corrmat = data.corr() # features+target correlated to each other # get correlations of each features in dataset
# print(corrmat)
# print(corrmat.shape) # if not LabelEncoded yet then (40, 40): 41-40=1 : label column "koi_disposition" is categoral and is removed, so need to do LabelEncoder() first
top_corr_features = corrmat.index # ordered by most correlated to least correlated
plt.figure(figsize=(20,20))
g=sns.heatmap(data[top_corr_features].corr(),annot=True,cmap="RdYlGn") # plot heat map
fig = g.get_figure()
fig.savefig("output\output.png")
print("features correlated to the target variable 'isdefault'\n",corrmat["isdefault"].sort_values(ascending=False)) # features correlated to the target variable
selectedColums = corrmat["isdefault"].sort_values(ascending=False).index[1:k+1] # remove 0target variable
print("selectedColums",selectedColums)
X = df[selectedColums]
# print(X.shape) # (5960, 15)
# encodedclasses = data.iloc[:,0]
# unencodedclasses = labeencoder1_corr.inverse_transform(after)
# print(encodedclasses)
# print(unencodedclasses)
# # Create a Train Test Split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, stratify=y)
print(X_train.shape)
print(X_test.shape)
X_train.to_csv("data\HomeEquityLoans_X_train.csv", index=False) # for doing scaling to test data
# # Transfermation(normalization)
#
# Scale the data using the `MinMaxScaler`
from sklearn.preprocessing import MinMaxScaler
X_scaler = MinMaxScaler().fit(X_train)
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
print(X_train_scaled[0:1])
print(X_test_scaled[0:1])
# what model need y_scaled: Linear Regaress, ???
# print(X_test_scaled[0:5,:])
# # Train + Evaluate the Models
# Three models: `LogisticRegression`, `Support Vector Classification`, `Deep Neural Network`
# +
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, classification_report
####### ======= SickitLearn Model ======= #######
#### <<Create SickitLearn Model -- lr, svc>> ####
lr = LogisticRegression()
svc = SVC(kernel='linear')
#### <<Train + Predict + Evaluation -- lr, svc>> ####
models = {'Logistic Regression': lr,
'Support Vector Machine for classification': svc,
}
print("=========== <1> Train + Predict + Evaluate the Performance Of SickitLearn Model: ===========\n")
print("=============================================================================================")
accuracy_scores=[]
for k in models:
models[k].fit(X_train_scaled, y_train)
print(f"{k} model Training Data Score: {models[k].score(X_train_scaled, y_train)}")
print(f"{k} model Testing Data Score: {models[k].score(X_test_scaled, y_test)}")
predictions = models[k].predict(X_test_scaled)
print(f"{k} model Accuracy Score: {accuracy_score(y_test, predictions)}")
accuracy_scores.append(accuracy_score(y_test, predictions))
print(f"{k} model Classification Report:\n {classification_report(y_test, predictions,target_names=['1', '0'] )}")
print("=============================================================================================")
# # https://scikit-learn.org/stable/modules/model_evaluation.html 3.3. Metrics and scoring: quantifying the quality of predictions
####### ======= Deep Learning Models in Keras ======= #######
#### <<Define Structure for Deep Learning Model >> ####
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense
label_encoder1 = LabelEncoder() # 1)need first LabelEncoder(): '','',''=> 0,1,2
label_encoder1.fit(y_train) # label_encoder1: [fit:y_train => transform: y_train, y_test]
encoded_y_train = label_encoder1.transform(y_train) # label_encoder.transform(y_train).astype('float64')
encoded_y_test = label_encoder1.transform(y_test)
from keras.utils import to_categorical # Converts a class vector (integers) to binary class matrix.
y_train_categorical = to_categorical(encoded_y_train) # 2)need Second to_cateforical() otherwise ValueError: invalid literal for int() with base 10: 'FALSE POSITIVE':need LabelEnvoder() first!!
y_test_categorical = to_categorical(encoded_y_test)
from keras import models
from keras import layers
number_of_features=X.shape[1]
def create_network(optimizer='rmsprop'):
network = models.Sequential()# Start neural network
network.add(layers.Dense(units=16, activation='relu', input_shape=(number_of_features,)))# Add fully connected layer with a ReLU activation function
network.add(layers.Dense(units=16, activation='relu'))
network.add(layers.Dense(units=2, activation='softmax')) #<type3>(units=3, activation='softmax')+loss='categorical_crossentropy'
network.compile(loss='categorical_crossentropy', # 3)'categorical_crossentropy' require LabelEncoder()+to_categorical()
optimizer=optimizer, # Optimizer
metrics=['accuracy']) # Accuracy performance metric
return network
# loss='categorical_crossentropy': Cross-entropy can be calculated for multiple-class classification(type3). it requires the classes have been one hot encoded[1)+2)], meaning that there is a binary feature for each class value
#### <<Create >> ####
deep = create_network()
# print(deep.summary())
#### <<Train>> ####
print("\n\n====== <2> Train + Predict + Evaluate the Performance Of Deep Learning Models in Kera ======\n")
print("=============================================================================================")
deep.fit(X_train_scaled, y_train_categorical)
#### <<Predict>> ####
# encoded_predictions = deep.predict_classes(X_test_scaled[:5]) # model.predict_classes()--Karas vs model.predict()--ML ScikitLearn
# prediction_labels = label_encoder1.inverse_transform(encoded_predictions)
# print("encoded_predictions",encoded_predictions)
# print(f"Predicted classes: {prediction_labels}")
# print(f"Actual Labels: {list(y_test[:5])}") # # # Take number correct over total to get "score" for grading
#### <<Evaluate>> #### not use prediction result
# 1) keras.models.Sequence:
print("---------------------------------------------------------------------------------------------")
model_loss, model_accuracy = deep.evaluate( X_test_scaled, y_test_categorical, verbose=2)
accuracy_scores.append(model_accuracy)
# print("%s: %.2f" % (deep.metrics_names[1], model_accuracy)) # metrics_names: ['loss', 'accuracy']
print(f"{deep.metrics_names[1]}: {model_accuracy}" ) # metrics_names: ['loss', 'accuracy']
print("=============================================================================================\n")
print("\n\n============================== <3> Summary--Accuracy Score ==================================\n")
print(f" LogisticRegression: {accuracy_scores[0]} ")
print(f" Support Vector Classification: {accuracy_scores[1]} ")
print(f" Deep Neural Network: {accuracy_scores[2]} ")
print("\n============================================END==============================================\n")
# # 2) Manual k-Fold Cross Validation : X_test_scaled vs X_train_scaled?? y_test vs y_test_categorical?y_test
# from keras.models import Sequential
# from keras.layers import Dense
# from sklearn.model_selection import StratifiedKFold #10-fold cross validation
# kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=20) # define 10-fold cross validation test harness # n_splits=10
# number_of_features=X.shape[1]
# cvscores = []
# def create_network(optimizer='rmsprop'):
# network = models.Sequential()# Start neural network
# network.add(layers.Dense(units=16, activation='relu', input_shape=(number_of_features,)))# Add fully connected layer with a ReLU activation function
# network.add(layers.Dense(units=16, activation='relu'))
# network.add(layers.Dense(units=3, activation='softmax')) #<type3>(units=3, activation='softmax')+loss='categorical_crossentropy'
# network.compile(loss='categorical_crossentropy', # 3)'categorical_crossentropy' require LabelEncoder()+to_categorical()
# optimizer=optimizer, # Optimizer
# metrics=['accuracy']) # Accuracy performance metric
# return network
# for train, test in kfold.split(X_test_scaled, y_test ):
# model = create_network()
# # model.add(Dense(12, input_dim=8, activation='relu'))
# # model.add(Dense(8, activation='relu'))
# # model.add(Dense(1, activation='sigmoid'))
# # model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# model.fit(X_test_scaled[train], y_test[train], epochs=150, batch_size=10, verbose=0) #?? ValueError: Error when checking target: expected dense_123 to have shape (3,) but got array with shape (1,)
# scores = model.evaluate(X[test], Y[test], verbose=0)
# print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
# cvscores.append(scores[1] * 100)
# print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
# -
# # Hyperparameter Tuning + Evaluation
#
# Use `GridSearchCV` to tune the model's parameters
# +
# GridSearchCV: lets you combine an estimator with a grid search preamble to tune hyper-parameters. The method picks the optimal parameter from the grid search and uses it with the estimator selected by the user. GridSearchCV inherits the methods from the classifier, so yes, you can use the .score, .predict, etc..
# Cross validation vs GridSearchCV: https://stats.stackexchange.com/questions/405624/difference-between-cross-validation-gridsearchcv-and-does-cross-validation-refer
# # Cross Validation(CV) or K-Fold Cross Validation (K-Fold CV) is very similar to what you already know as train-test split. When people refer to cross validation they generally mean k-fold cross validation. In k-fold cross validation what you do is just that you have multiple(k) train-test sets instead of 1. This basically means that in a k-fold CV you will be training your model k-times and also testing it k-times. The purpose of doing this is that in a single train-test split, the test part of your data that you chose might be really easy to predict and your model will perform extremely well on it but not exactly so for your actual test sets which ultimately will not be a good model. Hence, you need to use a k-fold CV method. For example, in a 4 fold cross-validation, you will divide your training data into 4 equal parts. In the first step, you keep one part out of the 4 as the set you will test upon and train on the remaining 3. This one part you left out is called the validation set and the remaining 3 becomes your training set. You keep repeating this 4 times but you will be using a different part out of the 4 each time to test your model upon. K-fold cross validation can essentially help you combat overfitting too. There are different ways to do k-fold cross validation like stratified-k fold cv, time based k-fold cv, grouped k-fold cv etc which will depend on the nature of your data and the purpose of your predictions. You can google more about these methods. A method that people generally use is that, for each of the k-folds, they also make predictions for the actual test set and later on take the mean of all the k predictions to generate the final predictions.
# # GridSearchCV is a method used to tune the hyperparameters of your model (For Example, max_depth and max_features in RandomForest). In this method, you specify a grid of possible parameter values (For Example, max_depth = [5,6,7] and max_features = [10,11,12] etc.). GridSearch will now search for the best set of combination of these set of features that you specified using the k-fold cv approach that I mentioned above i.e. it will train the model using different combinations of the above mentioned features and give you the best combination based on the best k-fold cv score obtained (For Example, Trial1: max_depth = 5 and max_features = 10 and and K-fold CV Accuracy Score Obtained is 80%, Trial2: max_depth=5 and max_features=11 and K-fold CV Accuracy Score Obtained is 85% and so on...) GridSearch is known to be a very slow method of tuning your hyperparameters and you are much better off sticking with RandomSearchCV or the more advanced Bayesian Hyperparameter Optimization methods (you have libraries like skopt and hyperopt in python for this). You can google more about these methods too.
# cv : integer or cross-validation generator, default=3
# # If an integer is passed, it is the number of folds.
# # Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
####### ======= SickitLearn Model ======= #######
#### <<Tune SickitLearn Model -- lr, svc>> ####
param_grid = {'C': [1, 5, 10],
'penalty': ["l1", "l2"]}
gridlr = GridSearchCV(lr, param_grid, cv=3, verbose=3)
param_grid = {'C': [1, 5, 10],
'gamma': [0.0001, 0.001, 0.01]}
gridsvm = GridSearchCV(svc, param_grid, cv=3, verbose=3)
gridmodels = {'Logistic Regression': gridlr,
'Linear Support Vector for Classification': gridsvm,
}
#### <<Train + Predict + Evaluate GridSearchCV for SickitLearn Model -- lr, svc>> ####
print("\n\n==== <1> Train + Predict + Evaluate the Performance Of Tuned(CVed) ScikitLearn GridSearchCV for SickitLearn Model ==")
print("\n=======================================================================================================================")
best_scores_tuned = []
best_params_tuned = []
for k in gridmodels:
print(gridmodels[k])
gridmodels[k].fit(X_train_scaled, y_train)
print(f"{k} model Training Data Score: {gridmodels[k].score(X_train_scaled, y_train)}")
print(f"{k} model Testing Data Score: {gridmodels[k].score(X_test_scaled, y_test)}")
predictions = gridmodels[k].predict(X_test_scaled) # Make predictions with the hypertuned model
print(f"{k} model Accuracy Score: {accuracy_score(y_test, predictions)}")
print(f"{k} model Classification Report: {classification_report(y_test, predictions,target_names=[ '0','1'] )}")
print("-----------------------------------------------------------------------------------------------------------------------")
# Tune Info 1)
print(f"{k} model Best score: {gridmodels[k].best_score_} using best params {gridmodels[k].best_params_}" )
best_scores_tuned.append(gridmodels[k].best_score_)
best_params_tuned.append(gridmodels[k].best_params_)
# Tune Info 2)
means = gridmodels[k].cv_results_['mean_test_score']
stds = gridmodels[k].cv_results_['std_test_score']
params = gridmodels[k].cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
# print("mean_test_score: %f (std_test_score: %f) with params: %r" % (mean, stdev, param))
print(f"mean_test_score: {mean} (std_test_score: {stdev}) with params: {param}" )
print("\n=======================================================================================================================")
####### ======= Deep Learning Models in Keras ======= #######
#### <<Define Structure for Deep Learning Model>> ####
def create_network(optimizer='rmsprop'):
network = models.Sequential()# Start neural network
network.add(layers.Dense(units=16, activation='relu', input_shape=(number_of_features,)))# Add fully connected layer with a ReLU activation function
network.add(layers.Dense(units=16, activation='relu'))
network.add(layers.Dense(units=2, activation='softmax')) #<type3>(units=3, activation='softmax')+loss='categorical_crossentropy'
network.compile(loss='categorical_crossentropy', # Cross-entropy #<type3>
optimizer=optimizer, # Optimizer
metrics=['accuracy']) # Accuracy performance metric
return network
#### <<Create >> ####
# deep = create_network() # no need to create
#### <<Tune Deep Learning Model>> ####
print("\n\n======== <2> Evaluate the Performance Of Tuned(CVed) ScikitLearn GridSearchCV for Deep Learning Model in Keras ========\n")
print("=======================================================================================================================")
from keras.wrappers.scikit_learn import KerasClassifier
deepneuralnetwork = KerasClassifier(build_fn=create_network, verbose=0) # Wrap Keras model so it can be used by scikit-learn(by from sklearn.model_selection import GridSearchCV)
epochs = [5, 10]
batches = [5, 10, 100]
optimizers = ['rmsprop', 'adam']
param_grid = dict(optimizer=optimizers, epochs=epochs, batch_size=batches) # <training algorithm> para
griddeep = GridSearchCV(estimator=deepneuralnetwork, cv=3, param_grid=param_grid, verbose=3)
#### <<Trian>> ####
griddeepfit = griddeep.fit(X_train_scaled, y_train)
#### <<Predict>> ####
predictions = griddeep.predict(X_test_scaled[:5]) # prediction result is already unendcoded, no need inverse_transform() # deep.predict_classes()--Karas vs griddeep.predict()--ML ScikitLearn
# prediction_labels = label_encoder1.inverse_transform(encoded_predictions) # ValueError: y contains previously unseen labels: ['CANDIDATE' 'FALSE POSITIVE']
print("predictions",predictions)
print(f"Actual Labels: {list(y_test[:5])}") # Take number correct over total to get "score" for grading
#### <<Evaluate>> ####
# ERROR: griddeepfit = griddeep.fit(X_train_scaled, y_train) #AttributeError: 'dict' object has no attribute 'Sequential' # y_train_categorical wrong?
# print(type(deep)) #<class 'keras.engine.sequential.Sequential'>
# print(type(griddeep)) #<class 'sklearn.model_selection._search.GridSearchCV'>
# ERROR: print(griddeepfit.summary()) #AttributeError: 'GridSearchCV' object has no attribute 'summary'
# ERROR: model_loss, model_accuracy = griddeepfit.evaluate( X_test_scaled, y_test, verbose=2) #AttributeError: 'dict' object has no attribute 'Sequential'
print("-----------------------------------------------------------------------------------------------------------------------")
# Tune Info 1)
# print("Deep Neural Network model Best score: %f using best params %s" % (griddeep.best_score_, griddeep.best_params_))
print(f"Deep Neural Network model Best score: {griddeep.best_score_} using best params {griddeep.best_params_}")
best_scores_tuned.append(griddeep.best_score_)
best_params_tuned.append(griddeep.best_params_)
# Tune Info 2)
means = gridmodels[k].cv_results_['mean_test_score']
stds = gridmodels[k].cv_results_['std_test_score']
params = gridmodels[k].cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
# print("mean_test_score: %f (std_test_score: %f) with params: %r" % (mean, stdev, param))
print(f"mean_test_score: {mean} (std_test_score: {stdev}) with params: {param}" )
print("=======================================================================================================================")
print("\n\n======================== <3> Summary -- Best score + best params ======================================================\n")
print(f" LogisticRegression: {best_scores_tuned[0]} {best_params_tuned[0]} ")
print(f" Support Vector Classification: {best_scores_tuned[1]} {best_params_tuned[1]}")
print(f" Deep Neural Network: {best_scores_tuned[2]} {best_params_tuned[2]}")
print("\n======================================================END==============================================================\n")
# -
# # Inference/Prediction
# +
gridmodels = {'Logistic Regression': gridlr,
'Linear Support Vector Machine': gridsvm,
'Deep Neural Network Learning': griddeep
}
gridlr_predictions = gridlr.predict(X_test_scaled)
gridsvm_predictions = gridsvm.predict(X_test_scaled)
griddeep_predictions = griddeep.predict(X_test_scaled)
####### Predict
# Ensemble methods: are meta-algorithms that combine several machine learning techniques into one predictive model in order to decrease variance (bagging), bias (boosting), or improve predictions (stacking).
all_predictions = zip(gridlr_predictions, gridsvm_predictions, griddeep_predictions)
ensemble_predictions = []
for tup in all_predictions:
ensemble_predictions.append( max( set(list(tup)), key=list(tup).count ) ) # max( set(list(tup)), key=list(tup).count ) : Ensemble
# Zero Rule Algorithm for classification,return the class value that has the highest count of observed values in the list of class values observed in the training dataset
# When starting on a new problem that is more sticky than a conventional classification or regression problem,
# devise zero rule algorithm that is specific to your prediction problem as baseline prediction algorithm for get to know whether the predictions for a given algorithm are good or not
###### Result
print("gridlr_predictions",gridlr_predictions) # class 'numpy.ndarray'>
print("gridsvm_predictions",gridsvm_predictions) # class 'numpy.ndarray'>
print("griddeep_predictions",griddeep_predictions) # class 'numpy.ndarray'>
print("ensemble_predictions", ensemble_predictions ) # <class 'list'>
# -
# # Save the Model
# save fitted model to file
from sklearn.externals import joblib
joblib.dump(gridsvm, 'pickles/gridsvm.pkl') # Q5 .pkl / .sav?? is serialized??
joblib.dump(gridlr, 'pickles/gridlr.pkl')
joblib.dump(griddeep, 'pickles/griddeep.pkl')
# # Inference/Prediction
# +
######## Input
#<input1>
# import pandas as pd
# df = pd.read_csv("exoplanet_data.csv")
# from sklearn.model_selection import train_test_split
# X = df.drop(columns=["koi_disposition"])
# y = df["koi_disposition"]
# X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1, stratify=y)
# from sklearn.preprocessing import MinMaxScaler
# X_scaler = MinMaxScaler().fit(X_train)
# X_train_scaled = X_scaler.transform(X_train)
# X_test_scaled = X_scaler.transform(X_test)
# #<iput2>
# import pandas as pd
# testing_data = pd.read_csv('test_data.csv')
# y_test = testing_data["koi_disposition"]
# X_test = testing_data.drop(columns=["koi_disposition"])
# from sklearn.preprocessing import MinMaxScaler
# scaler = MinMaxScaler()
# X_test= scaler.fit_transform(X_test)
# X_test = X_test[[]]
# #<input3>
# X_test_scaled=[[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,
# 5.59049805e-04, 1.63201552e-05, 9.99983680e-01, 8.56415545e-03,
# 4.58934961e-03, 9.95410650e-01, 2.39073071e-03, 2.86415712e-03,
# 9.93578471e-01, 6.74264488e-03, 9.45544554e-03]]
# X_test_scaled = np.asarray(X_test_scaled, dtype=np.float32)
# X_test_scaled=[[0. , 0. , 0.21428571, 0.16399133, 1. ,
# 0. , 0. , 0. , 0. , 0.45070423,
# 0.0672242 , 1. , 0. , 0.09795792, 0.63414634]]
# X_test_scaled = np.asarray(X_test_scaled, dtype=np.float32)
# #<input4> randomely generate X_test_scaled(, 40) and y_test(,1) Q7??
# rnd = np.random.RandomState(seed=123)# Setting a random seed for reproducibility
# X = rnd.uniform(low=0.0, high=1.0, size=(1, 15)) # a 10 x 5 array# Generating a random array #TypeError: 'str' object cannot be interpreted as an integer
# X_test_scaled2 = X
from sklearn.externals import joblib
with open('pickles/gridlr.pkl', 'rb') as f:
gridlr = joblib.load(f)
with open('pickles/gridsvm.pkl', 'rb') as f:
gridsvm = joblib.load(f)
with open('pickles/griddeep.pkl', 'rb') as f:
griddeep = joblib.load(f)
X_test_scaled=[[0.46666667, 0. , 0.21428571, 0.19854994, 1. ,
0. , 0. , 0. , 0. , 0.30985915,
0.08014657, 1. , 0. , 0.11313326, 0.2195122]]
X_test_scaled = np.asarray(X_test_scaled, dtype=np.float32)
gridmodels = {'Logistic Regression': gridlr,
'Linear Support Vector Machine': gridsvm,
'Deep Neural Network Learning': griddeep
}
gridlr_predictions = gridlr.predict(X_test_scaled)
gridsvm_predictions = gridsvm.predict(X_test_scaled)
griddeep_predictions = griddeep.predict(X_test_scaled)
####### Predict
# Ensemble methods: are meta-algorithms that combine several machine learning techniques into one predictive model in order to decrease variance (bagging), bias (boosting), or improve predictions (stacking).
all_predictions = zip(gridlr_predictions, gridsvm_predictions, griddeep_predictions)
ensemble_predictions = []
for tup in all_predictions:
ensemble_predictions.append( max( set(list(tup)), key=list(tup).count ) ) # max( set(list(tup)), key=list(tup).count ) : Ensemble
# Zero Rule Algorithm for classification,return the class value that has the highest count of observed values in the list of class values observed in the training dataset
# When starting on a new problem that is more sticky than a conventional classification or regression problem,
# devise zero rule algorithm that is specific to your prediction problem as baseline prediction algorithm for get to know whether the predictions for a given algorithm are good or not
###### Result
print("gridlr_predictions",gridlr_predictions) # class 'numpy.ndarray'>
print("gridsvm_predictions",gridsvm_predictions) # class 'numpy.ndarray'>
print("griddeep_predictions",griddeep_predictions) # class 'numpy.ndarray'>
print("ensemble_predictions", ensemble_predictions ) # <class 'list'>
# -
| .ipynb_checkpoints/DevHomeEquityPDModel - Dev_lm-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # kilojoule Library
# We will be using a custom python library, `kilojoule`, written specifically for this course. The main advantage to this approach is the nomenclature for the functions you will be using in Python will be consistent with the nomenclature from your textbook. The disadvantage to this approach is there will be limited sources for external tech support (you won't find example code using this library outside of this course).
#
# Prior to using this library, it needs to be installed to a location where the Python interpreter can find it. If you are using Coclac, this will have already been done for you. I you are using a local installation of Python you can install the library from the Python Package Index (PyPi) using the command `pip install kilojoule`.
#
# After installing the library, you will need to import it into each notebook where you intend to use it. If you are referring back to this document and want a quick import template for starting a new file, you can use the following code. If this is your first time reading this document, the code in the following block will be explained in detail below.
# + jupyter={"outputs_hidden": false}
from kilojoule.templates.kSI_C import *
# Initialize an interface to evaluate fluid properties
# You can create multiple interfaces as long as they are stored in different variables
air = idealgas.Properties('Air', unit_system='kSI_C') # Treat air as an ideal gas
water = realfluid.Properties('Water', unit_system='kSI_C') # Treat water as a real fluid
# -
# ## Units
# The `kilojoule` library is designed to make use of dimensional "quantities" rather than simply doing calculations with numbers. In engineering, all your calculations use numbers to represent physical quantities and your calculations have no meaning without connection to appropriate units. By including the physical units as an integral part of the calculation process, we keep the physical significance of our calculation in focus and will avoid costly mistakes from unit conversion errors. To do this we will make use of the third-party library `pint` for managing units. By executing the code in the block before this one, you have already loaded this library in the background; it is accessible through the `units` and `Quantity` objects.
#
# We will first define a few property values, i.e. temperature and pressure
# + jupyter={"outputs_hidden": false}
# The Quantity(value,units) notation defines a physical quantity with a magnitude associated with a type of unit
T = Quantity(300.0,'degK')
print(T)
print(f'T = {T} = {T.to("degC")} = {T.to("degF")} = {T.to("degR")}')
p = Quantity(1.0,'atm')
print(p)
print(f'p = {p} = {p.to("kPa")} = {p.to("Pa")} = {p.to("psi")} = {p.to("bar")}')
p.ito('kPa')
print(p)
# -
# We were able to access the quantities stored in the variables `T` and `p` in any unit system by using the notation `var.to("desired units")`, which temporarily convertes the units to the specified form, or we can permanently convert a variable to a different unit system using the notation `var.ito("desired units")`. We defined temperature in metric units, then displayed it in both alternate metric and English units. Whereas we defined pressure in English units, then displayed it in both alternate English and metric units. This system allows us to quickly switch back and forth between unit systems as needed.
# The real benefit of this system is most evident when we start performing calculations with combined units. In the following code we will calculate the change in energy of a mass that is changing temperature, velocity, and elevation.
# \begin{align}\Delta E_{CV} &= m(\Delta u + \Delta ke + \Delta pe) \\&= m\left(u_2-u_1 + \frac{V_2^2}{2}-\frac{V_1^2}{2} + g(z_2-z_1)\right)\end{align}
# + jupyter={"outputs_hidden": false}
m = Quantity(10.0,'kg') # metric
u_1 = Quantity(300.0,'kJ/kg') # metric
u_2 = Quantity(200.0,'kJ/kg') # metric
Vel_1 = Quantity(20.0,'mph') # English
Vel_2 = Quantity(30.5,'m/s') # metric
g = Quantity(9.8,'m/s^2') # metric
z_2 = Quantity(30.1,'ft') # English
z_1 = Quantity(1.2,'m') # metric
Delta_u = u_2-u_1
print(f"Delta u = {u_2} - {u_1} = {Delta_u}")
Delta_ke = (Vel_2**2-Vel_1**2)/2
print(f"Delta ke = {Vel_2**2/2} - {Vel_1**2/2} = {Delta_ke}")
Delta_pe = g*(z_2-z_1)
print(f"Delta pe = {g}({z_2}-{z_1}) = {Delta_pe}")
Delta_E_CV = m*(u_2-u_1 + (Vel_2**2-Vel_1**2)/2 + g*(z_2-z_1))
print(f"Delta E = {m}({Delta_u} + {Delta_ke} + {Delta_pe}) = {Delta_E_CV}")
Calculations();
# -
# Notice that in the above example, the units for each of the terms were in different systems until they were combined.
# ## States Database
# Many of the systems we will be analyzing will have many states with multiple properties of interest at each state. Keeping track of these states and properties in a consistent, organized manner will make your code cleaner and easier to maintain. To aid in this, the `kilojoule` library provides a data structure designed specifically for this purpose. The `QuantityTable` behaves similarly to a nested Python dictionary. You can view the data structure as a table with columns representing properties and rows representing states. Each property column has a defined unit that will apply to all it's values, i.e. all temperatures stored in $^\circ\text{C}$. We first need to import the `QuantityTable` class from the `kilojoule.organization` module. *(Note: this will already be loaded if you recently executed the first code block in this notebook)*
# + jupyter={"outputs_hidden": false}
from kilojoule.organization import QuantityTable
# -
# We can now initialize our states database (`QuantityTable(...)`) and store it in a variable where we can easily access it (`states = ...`). There are a few ways to fill out the table columns with properties and units, but the straight forward way is to make a dictionary with the desired properties as keys associated with the appropriate units (`properties_dict = {'property symbol':'units', ...}`). Note: a few templates, such as the one you imported at the beginning of this notebook, provide pre-built tables for common variables used in this course to make this process easier.
# + jupyter={"outputs_hidden": false}
# Make a dictionary with the types of properties you want to track and units for each property
properties_dict = {
'T':'degC', # Temperature: unit options ('K','degC','degF','degR')
'p':'kPa', # pressure: unit options ('kPa','bar','psi','atm',etc.)
'v':'m^3/kg', # specific volume
'u':'kJ/kg', # specific internal energy
'h':'kJ/kg', # specific enthalpy
's':'kJ/kg/K', # specific entropy
'x':'', # quality: dimensionless units enter as an empty string
}
# Make a database to hold the property values for each state and store in the variable name `states`
states = QuantityTable(properties=properties_dict)
# The states container is initially empty
print(states)
# -
# The table will initially be empty, be we can add property values for different states to it on the fly. Recall that we defined preferred units for each of the property columns. In the example below we will define some temperatures and pressures in consistent units, inconsistent units, and with missing units.
# + jupyter={"outputs_hidden": false}
states[1,'T'] = Quantity(30,'degC') # consistent units
states[2,'p'] = Quantity(1,'atm') # inconsistent units (will be converted kPa)
states[3,'T'] = 100 # missing units (assumed to be degC)
states[3,'p'] = 200 # missing units (assumed to be kPa)
print(states)
# -
# Notice that we originally defined the temperature column to have units of $^\circ\text{C}$, then we explicitly defined a temperature quantity with units of $^\circ\text{C}$ and placed it in state 1 in the temperature column (`states[state, property] = value`). We then defined a pressure for state 2, but we used an inconsistent unit, i.e. we defined it in $\text{atm}$ when the column expected $\text{kPa}$. When we view the contents of the states database (`print(states)`) we see that the pressure value at state 2 was automatically converted to $\text{kPa}$. Finally we defined a temperature and pressure for state 3 without explicitly stating the units. When this happens, it will be assumed that the values are already in the preferred units. While this makes the syntax shorter, it is not a good practice since changes in other parts of the code could have unexpected consequences.
# An alternate (shorter) syntax for working with the values in the table can be enabled by assigning each column in the table to a variable in the local namespace. After executing the code below, we will be able to set the quality at state 2 to 50% with the code `x[2] = 0.5` rather than needing to type `states[1,'x'] = 0.5`. Note: this step will also be performed for you if you import one of the pre-built templates.
# +
# The following lines will define (overwrite) convenience variables in the local name space for each of the properties in the states container
# This allows you to add values to (or pull values from) the database using the nomenclature T[1], T[2], p[3], etc.
for property in states.properties:
globals()[property] = states.dict[property]
x[2] = 0.5
T['inlet'] = Quantity(25,'degC')
print(states)
# -
# The preferred units for each property column can be changed at any time using the `.set_units()` method and all values in that column will be automatically converted to the new units
# + jupyter={"outputs_hidden": false}
T.set_units('degF')
p.set_units('psi')
states.display()
T.set_units('K')
p.set_units('Pa')
states.display()
T.set_units('degC')
p.set_units('kPa')
states.display()
# -
# ## Properties
# During our calculations for this course, we will often need to evaluate material/fluid properties at various states. The traditional method for doing this is to read values from a table that will often involve interpolation (sometimes even double interpolation). You will still be expected to know how to use the property tables for this course (especially during exams), but you will also be expected to use tools that automate this procedure so you can investigate more complex problems that are not easily solved through hand calculations.
#
# This will be achieved using third-party Libraries: `CoolProp` for real-fluid properties and `PYroMat` for ideal-gas properties. Each of these libraries can be used directly in Python without loading the `kilojoule` package. However, we will be primarily using a wrapper for theses libraries provided by the `kilojoule` package, which incorporates the `pint` package to handle a wider range of units and also renames a number of parameters to be consistent with the nomenclature we will be using in this course.
# ### Water Properties
# In the first code block at the top of this notebook, you imported the `realfluid` class from the `kilojoule` library. This module contains a `Properties` class that can be used to evaluate the properties of a number of real (*pure/pseudopure*) fluids. You are already familiar with looking up properties for water from the tables from your Thermo I course. Recall that for a pure substance you need two independent, intensive properties to fix a state, i.e. if you know two independent properties you can find any other property that you need for the state *(Note: there is a little more to the story here, but we will get to that later in the course)*. For now, let's say we have water at $T_1=300^\circ\text{C}$ and $p_1=750\,\text{kPa}$ and we would like to find the specific volume, $v$, specific internal energy, $u$, specific enthalpy, $h$, and specific entropy, $s$. For each of these cases, we could say the desired (dependent) property is a function of the two known (independent) properties:
# $$v_1 = v(T=300^\circ\text{C}, p=750\,\text{kPa})$$
# $$u_1 = u(T=300^\circ\text{C}, p=750\,\text{kPa})$$
# $$h_1 = h(T=300^\circ\text{C}, p=750\,\text{kPa})$$
# $$s_1 = s(T=300^\circ\text{C}, p=750\,\text{kPa})$$
# In order to use the `kilojoule.realfluid.Properties` class, we first need to instantiate it (Python-speak for initialize the class and store in a variable). The following code block will import the class (if needed), set the target fluid to be water, and set the default unit system to be metric with temperatures in $^\circ\text{C}$
# + jupyter={"outputs_hidden": false}
from kilojoule.templates.kSI_C import *
from kilojoule import realfluid
water = realfluid.Properties('Water', unit_system='kSI_C') # the default unit_system is 'kSI_C' other options are 'SI', 'SI_C', kSI', 'USCS_F', and 'USCS_R'
# -
# The `water` object now has sub-functions (or methods) that can be used to evaluate (look up) dependent properties.
# +
# Define known values (independent properties)
T[1] = Quantity(300.0,'degC')
p[1] = Quantity(750.0,'kPa')
# Look up dependent properties correspondign to $T_1$ and $p_1$
# specific volume
v[1] = water.v(T=T[1], p=p[1])
# specific internal energy
u[1] = water.u(T=T[1], p=p[1])
# specific enthalpy
h[1] = water.h(T=T[1], p=p[1])
# specific entropy
s[1] = water.s(T=T[1], p=p[1])
# quality
x[1] = water.x(T=T[1], p=p[1])
# phase
phase[1] = water.phase(T=T[1], p=p[1])
Calculations()
states.display()
# -
# Notice the quality, $x_1$, was reported to be `N/A` because the substance is a single-phase, superheated vapor at state 1, so the quality is not defined for this state.
# We can also use the same functions for evaluating the properties of saturated fluids. Let's assume the fluid from state 1 in the above example is cooled at a constant pressure until it is entirely in liquid form, i.e. $x_2=0$. We could then find all the remaining properties at state 2 as well.
# $$ p_2 = p_1 $$
# $$ T_2 = T(p=p_2, x=x_2) $$
# $$ v_2 = v(p=p_2, x=x_2) $$
# $$ u_2 = u(p=p_2, x=x_2) $$
# $$ h_2 = h(p=p_2, x=x_2) $$
# $$ s_2 = x(p=p_2, x=x_2) $$
# + jupyter={"outputs_hidden": false}
# Independent properties that fix state 2
p[2] = p[1]
x[2] = 0
# Dependent properties corresponding to $p[2]$ and $x[2]$
T[2] = water.T(p=p[2], x=x[2])
v[2] = water.v(p=p[2], x=x[2])
u[2] = water.u(p=p[2], x=x[2])
h[2] = water.h(p=p[2], x=x[2])
s[2] = water.s(p=p[2], x=x[2])
phase[2] = water.phase(p=p[2],x=x[2])
Calculations()
states.display()
# -
# Notice the phase for state 2 is reported as `twophase`, even though we know it is entirely liquid because the quality is 0. This state would be more accurately described as a saturated-liquid, but the `CoolProp` library reports all saturated states (saturate liquid, saturated mixture, and saturated vapor) as `twophase`.
#
# Let's now calculate a third state that would be obtained from an isenthalpic expansion to $p_3=100\,\text{kPa}$ resulting in a saturated mixture.
# $$ h_3 = h_2 $$
# $$ p_3 = 100\,\text{kPa} $$
# $$ T_3 = T(p=p_3, h=h_3) $$
# $$ v_3 = v(p=p_3, h=h_3) $$
# $$ u_3 = u(p=p_3, h=h_3) $$
# $$ x_3 = x(p=p_3, h=h_3) $$
# $$ s_3 = x(p=p_3, h=h_3) $$
# + jupyter={"outputs_hidden": false}
# Independent properties that fix the state
h[3] = h[2]
p[3] = Quantity(100.0,'kPa')
# Dependent properties corresponding to $p_3$ and $h_3$
T[3] = water.T(p=p[3], h=h[3])
v[3] = water.v(p=p[3], h=h[3])
u[3] = water.u(p=p[3], h=h[3])
x[3] = water.x(p=p[3], h=h[3])
s[3] = water.s(p=p[3], h=h[3])
phase[3] = water.phase(p=p[3], h=h[3])
Calculations()
states.display()
# -
# #### Short-form Notation for Property Evaluation
# The procedure illustrated above is somewhat repetitive and can be shortened in a few ways. For each call to the `water.property()` object, we explicitly told the function both the type and value of each of the argments passed to it (or in Python lingo, we gave both a keyword and an argument). However, we can take advantage of the units associated with each of the variables to infer the appropriate property type, i.e. if the argument has units of $kPa$ we can safely assume the appropriate keyword should be `p` for pressure. Therefore, we could shorten the line `T[3] = water.T(p=p[3], x=x[3])` to `T[3] = water.T(p[3], x[3])`.
#
# **Note: the approach will not work when passing an passing an internal energy or enthalpy argument because they share the same units and you must use the long-from notation, i.e. (...,u=u[3]) or (..., h=h[3]).**
#
# Also, since we are using the same two independent properties for each of the dependent property calls, we could use a loop to automate the process. The command `states.fix(3, water)` will attempt to use the specified `water` property table to evaluate all missing properties at state `3` using the information already in the table as independent properties.
#
# To illustrate this, let's calculate a fourth state that would be obtained from an isobaric expansion to a temperature of $T_4=150^\circ \mathrm{C}$.
# $$ p_4 = p_3 $$
# $$ T_4 = 150^\circ\mathrm{C} $$
# +
# Independent properties that fix the state
p[4] = p[3]
T[4] = Quantity(150.0,'degC')
# Dependent properties corresponding to $T_4$ and $p_4$
# using short-form notation
v[4] = water.v(T[4], p[4])
# or using `states.fix(4,water)` fill in the rest of the table
states.fix(4, water)
Calculations()
states.display()
# -
# #### Plotting Property Diagrams
# It is often helpful to visualize processes by plotting the states on property diagrams, i.e. $T$-$s$, $p$-$v$, $p$-$h$, etc. The `kilojoule` library provides a `.property_diagram()` method for each of the fluid property tables that can create common property diagrams used in thermodynamics. This class uses the popular `matplotlib` library. You first instantiate the class by telling it which properties you want represented on the $x$ and $y$ axes and the unit system (if not using the default/same units as the table). You can also use the `saturation` parameter to specify whether or not to draw the saturation curves (the default is `True` for real fluids).
#
# In the following code, we will store a plot object (instance of the `PropertyPlot` class) in the variable `Ts`.
# > `Ts = water.property_diagram(x='s', y='T', unit_system='USCS_F', saturation=True)`
#
# The `Ts` object contains a `matplotlib` figure and axis stored as attributes accessible at `Ts.fig` and `Ts.ax`. We can use any `matplotlib` functions on these objects to add features to the diagram (many examples are available on the internet). However, there are a few custom `matplotlib` routines built into the `PropertyPlot` class, which will make it easier to show visualize the process we will be analyzing.
#
# The simplest built-in construct is the `.plot_point(x, y, label='label', label_loc='north')` method, which places a dot at the $x$, $y$ coordinates with an optional label placed at the relative location provided (default is north)
# > `Ts.plot_point(x=s[1], y=T[1], label='1', label_loc='north')`
#
# An alternate interface is also available if your data is stored in the `QuantityTable()` class described above.
# > `Ts.plot_state(states[2])`
# > `Ts.plot_state(states[3])`
#
# We also like to draw lines connecting states to illustrate processes. However, we do not want to simply draw straight lines connecting the points. Rather, we would like the path of the line to represent the process properties at all points. We can do this if we know something that was constant during the process, ie. pressure was constant from 1 to 2 and enthalpy was constant from 2 to 3 in our earlier example. The `.plot_process()` method accepts two states and a path description to achieve this:
# >`Ts.plot_process(states[1], states[2], path='isobaric')`
# >`Ts.plot_process(states[2], states[3], path='isenthalpic')`
# + jupyter={"outputs_hidden": false}
# Create Ts_diagram instance
Ts = water.property_diagram(x='s', y='T', unit_system='English_F', saturation=True)
# Plot Critial and Triple Points
Ts.plot_triple_point(label='TP',label_loc='northwest')
Ts.plot_critical_point(label_loc='south')
# Plot State 1 using the .plot_point() method
Ts.plot_point(x=s[1], y=T[1], label='1', label_loc='north')
# Plot States 2 and 3 using the .plot_state() method
Ts.plot_state(states[2])
Ts.plot_state(states[3], label_loc='south west')
Ts.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
Ts.plot_process(states[1], states[2], path='isobaric')
Ts.plot_process(states[2], states[3], path='isenthalpic')
Ts.plot_process(states[3], states[4], path='isobaric');
# -
# We can use this same process to also create additional plots for our system by changing the parameters when we call the `.property_diagram()` method, or we can shorten the syntax if we use one of the buit-in property combinations `pv_diagram, Ts_diagram, Tv_diagram, hs_diagram, ph_diagram, pT_diagram`
# + jupyter={"outputs_hidden": false}
# Create pv_diagram instance
diag = water.pv_diagram(unit_system='SI_K') # Note: this is the only line that will be changed for the next few examples
# Plot Critial and Triple Points
diag.plot_triple_point(label_loc='northwest')
diag.plot_critical_point(label_loc='south')
# Plot States 1-3 using the .plot_state() method
diag.plot_state(states[1])
diag.plot_state(states[2])
diag.plot_state(states[3], label_loc='south west')
diag.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
diag.plot_process(states[1], states[2], path='isobaric')
diag.plot_process(states[2], states[3], path='isenthalpic')
diag.plot_process(states[3], states[4], path='isobaric');
# +
# Create ph_diagram instance
diag = water.ph_diagram(unit_system='SI_C') # Note: this is the only line that will be changed for the next few examples
# Plot Critial and Triple Points
diag.plot_triple_point(label_loc='northwest')
diag.plot_critical_point(label_loc='south')
# Plot States 1-3 using the .plot_state() method
diag.plot_state(states[1])
diag.plot_state(states[2])
diag.plot_state(states[3], label_loc='south west')
diag.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
diag.plot_process(states[1], states[2], path='isobaric')
diag.plot_process(states[2], states[3], path='isenthalpic')
diag.plot_process(states[3], states[4], path='isobaric');
# +
diag = water.Tv_diagram() # Note: this is the only line that will be changed for the next few examples
# Plot Critial and Triple Points
diag.plot_triple_point(label_loc='northwest')
diag.plot_critical_point(label_loc='south')
# Plot States 1-3 using the .plot_state() method
diag.plot_state(states[1])
diag.plot_state(states[2])
diag.plot_state(states[3], label_loc='south west')
diag.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
diag.plot_process(states[1], states[2], path='isobaric')
diag.plot_process(states[2], states[3], path='isenthalpic')
diag.plot_process(states[3], states[4], path='isobaric');
# + jupyter={"outputs_hidden": false}
diag = water.hs_diagram() # Note: this is the only line that will be changed for the next few examples
# Plot Critial and Triple Points
diag.plot_triple_point(label_loc='northwest')
diag.plot_critical_point(label_loc='south')
# Plot States 1-3 using the .plot_state() method
diag.plot_state(states[1])
diag.plot_state(states[2])
diag.plot_state(states[3], label_loc='south west')
diag.plot_state(states[4])
# Connect the states with lines that illustrate the process paths
diag.plot_process(states[1], states[2], path='isobaric')
diag.plot_process(states[2], states[3], path='isenthalpic')
diag.plot_process(states[3], states[4], path='isobaric');
# -
# The ability to generate the previous 5 diagrams using the same set of commands (with only minor changes to the first line) provides a excellent opportunity to write a loop to decrease the amount of code you need to write and maintain.
# ### Refrigerant Properties
# All the commands demonstrated above will also work for any of the other pure/pseudopure substances, such as R-134a, supported by the underlying `CoolProp` library, a list of which can be obtained with the following code.
# + jupyter={"outputs_hidden": false}
from kilojoule import realfluid
realfluid.fluids()
# -
# To obtain properties for any of the supported fluids, simply supply the appropriate name when you instantiate the `realfluid.Properties()` classe, i.e.
# + jupyter={"outputs_hidden": false}
r134a = realfluid.Properties('R134a')
T_ref = Quantity(30,'degC')
x_ref = Quantity(0.25,'') # Note: quality is dimensionless, so we define its units as an empty string
h_ref = r134a.h(T=T_ref, x=x_ref)
print(f'h_ref = {h_ref}')
v_ref = r134a.v(T=T_ref, x=x_ref)
print(f'v_ref = {v_ref}')
s_ref = r134a.s(T=T_ref, x=x_ref)
print(f's_ref = {s_ref}')
Ts_diagram = r134a.Ts_diagram()
Ts_diagram.plot_triple_point()
Ts_diagram.plot_critical_point()
Ts_diagram.plot_point(x=s_ref, y=T_ref, label='1');
# -
# ### Air Properties
# #### Ideal Gas Air Properties
# Your textbook treats air as an ideal gas. As a result, the internal energy and enthalpy values from the tables in the back of the book are only a function of temperature. Therefore, you only need one independent, intensive property, temperature, to find the enthalpy at a state, since the ideal gas law is used to fix the other degree of freedom (therefore removing the need for a second independent property), i.e.
# $$h=h(T)\qquad\text{for an ideal gas only}$$
# The entropy, however, is still dependent on the pressure (even with the ideal gas assumption applied). Since the ideal gas air tables are only tabulated by temperature, it is not possible to look up the entropy directly with the pressure information also being accounted for. To workaround this problem, your textbook tabulates $s^o$ rather than $s$. Where the $^o$ is provided to remind you that it is only the temperature dependent portion of the change in entropy. To get the full change in entropy between two states using the information from the tables, you can use
# $$ \Delta s_{1\to2} = s_2^o-s_1^o - R\ln\frac{p_2}{p_1} $$
# where $s_2^o$ and $s_1^o$ are from the tables, $R$ is the specific gas constant, and $p_2$ and $p_1$ are the pressures of the fluid in absolute units (i.e. *absolute pressure* not gauge pressure).
#
# Ideal gas properties for air (and many other gases) can be obtained from the `PYroMat` library using the Burcat equations. The `kilojoule` library provides a wrapper to access this library using the same syntax as we used for the real-fluid library above. This wrapper is provided by the `idealgas` module.
# > `from kilojoule import idealgas`
# > `air = idealgas.Properties('Air')`
# +
from kilojoule import idealgas
air = idealgas.Properties('Air', unit_system='kSI_C')
T_air = Quantity(450,'K')
p_air = Quantity(1.0,'atm')
h_air = air.h(T=T_air)
s_air = air.s(T=T_air,p=p_air)
print(f'h_air = {h_air}')
print(f's_air = {s_air}')
Calculations();
# -
# #### Pseudopure Real Fluid Air Properties
# While we can obtain reasonably accurate answer for many engineering systems that involve air using the ideal gas assumption, there are cases when we need to treat air as a real fluid instead. The `CoolProp` library used in `kilojoule` does not treat air as an ideal gas, rather it treats air as a *pseudopure fluid*. In this context we call the air a *pseudopure fluid* because it is really a mixture (approximately $79\%\text{N}_2$ and $21\%\text{O}_2$) but we treat it as if it were a pure fluid with known properties. As a result, you still need to provide two independent, intensive properties when using the `kilojoule.realfluid.Properties` class with air.
# + jupyter={"outputs_hidden": false}
air = realfluid.Properties(fluid='Air')
T_air = Quantity(450,'K')
p_air = Quantity(1.0,'atm')
h_air = air.h(T=T_air, p=p_air)
print(f'h_air = {h_air}')
Calculations();
# -
# At this point it is worth pointing out that the ideal gas tables and the real fluid tables gave significantly different answers for the enthalpy in the previous example (neither of which are in agreement with your textbook). This is because enthalpy is an integrated property and the libraries used to evaluate these properties use different reference states (or starting points for the integration). While this may seem like a major problem, it is not. The vast majority of our thermodynamics calcuations will look at changes in intergrated properties, such as internal energy, enthalpy, entropy, Gibbs function, etc., rather than their absoute values. So as long as you pull all your properties from the same table your final results will be (nearly) the same regardless of which set of tables you used. However, you cannot mix and match between different property sources.
# #### Humid Air
# Later in this course we will study mixtures, rather than just pure substances. One common mixture encountered in many engineering applications is humid air (a mixture of air and water vapor). Because we will be treating humid air as a mixture of two substances (with air still being treated as a pseudopure fluid), we will need three independent intensive properties to fix the state. The fluid properties for humid air can be reached in the same way as the pure/pseudopure substance, with the exception that you need to provide three independent properties to fix the state instead of two and you need to use the `humidair.Properties` class instead of the `realfluid.Properties` class.
# + jupyter={"outputs_hidden": false}
from kilojoule.templates.humidair_default import *
# Start with air at 30 C, 50% relative humidity, at 1 atmosphere
T[1] = Quantity(30,'degC')
rel_hum[1] = 0.5
p[1] = Quantity(1,'atm')
T_wb[1] = humidair.T_wb(T[1],p[1],rel_hum[1])
h[1] = humidair.h(T[1],p[1],rel_hum[1])
v[1] = humidair.v(T[1],p[1],rel_hum[1])
s[1] = humidair.s(T[1],p[1],rel_hum[1])
omega[1] = humidair.omega(T[1],p[1],rel_hum[1])
# Use a simple cooling process to lower the temperature and dehumidify by cooling to 10 C
T[2] = Quantity(10,'degC')
rel_hum[2] = 1
p[2] = p[1]
states.fix(2,humidair)
states.display()
# -
# The `kilojoule` library provides a routine for drawing psychrometric charts to visualize humid air systems. *Note: this can be used to generate psychrometric charts for non-standard pressures and unit systems*
psych = humidair.psychrometric_chart()
psych.plot_state(states[1])
psych.plot_state(states[2],label_loc='south east')
psych.plot_process(states[1],states[2],path='simple cooling');
# +
from kilojoule.templates.humidair_default import *
p[1] = Quantity(85,'kPa')
humidair.p = Quantity(p[1])
# Start with air at 30 C, 50% relative humidity, at 0.85 atmosphere
T[1] = Quantity(30,'degC')
rel_hum[1] = 0.5
T_wb[1] = humidair.T_wb(T[1],p[1],rel_hum[1])
h[1] = humidair.h(T[1],p[1],rel_hum[1])
v[1] = humidair.v(T[1],p[1],rel_hum[1])
s[1] = humidair.s(T[1],p[1],rel_hum[1])
omega[1] = humidair.omega(T[1],p[1],rel_hum[1])
# Use a simple cooling process to lower the temperature and dehumidify by cooling to 10 C
T[2] = Quantity(10,'degC')
rel_hum[2] = 1
p[2] = p[1]
states.fix(2,humidair)
states.display()
psych = humidair.psychrometric_chart()
psych.plot_state(states[1])
psych.plot_state(states[2],label_loc='south east')
psych.plot_process(states[1],states[2],path='simple cooling');
# -
# ## Equation Formatting
# Simply arriving at the correct answer for a problem is only half the battle. You then need to be able to communicate your methods and results to a range of audiences (in this class your instructor). This should be done following technical writing conventions with a narrative discussion of your process including properly formatted equations and sample calculations. It is not sufficient to simply submit your code and a final numerical answer or a long list of equations without any explanation.
#
# Throughout your academic career you have learned many different conventions (shorthand) for writing down mathematical concepts, i.e. to show a variable is being raised to a power we put that power in the superscript $x^2$. However, there is no key on your keyboard to make that 2 shrink in size and move above the variable. You'll also notice that the $x$ was not written in the same font as the rest of the text. It is convention for variables to be written in italics rather than normal font because it helps the reader quickly distinguish them from regular text (you probably already do this in your head without realizing it).
#
# There are a few ways to create properly formatted equations. While the Microsoft equation editor has improved greatly in recent years, the most powerful tool is the formatting language $\LaTeX$. $\LaTeX$ has been around for many decades and it was developed to represent complex mathematical expressions using plain text (just the keys on a regular keyboard). While there is a bit of a learning curve if you choose to start using $\LaTeX$, your efforts will pay off many times over as you will find that most scientific/mathematical software has $\LaTeX$ support built in. In fact, learning $\LaTeX$ will even make you faster when you do need to use Microsoft Equations editor because it includes support for many $\LaTeX$ symbol names.
#
# The Jupyter notebook this document is being created in has built-in $\LaTeX$ support. In some of the earlier examples you may have noticed special symbols in some of the output, such as $\Delta^\circ\text{C}$. Those were created using $\LaTeX$ formatting and the special symbols in this explanation were also created using $\LaTeX$ formatting (if you are reading this in a live notebook, double-click on this cell to see the source code written in Markdown syntax). You can include inline math, $f(x)=5x^2-3x+2$, or you can include "display" math
# $$f(x) = \int_0^\infty\frac{3}{2}x\ dx$$
#
# To help you convert your calculations into technical writing format, the `kilojoule` library provides a few convenience functions in its `display` module to automate the $\LaTeX$ creation process. The `display.Calculations()` class will trigger the a process that attempts to convert the code in the current cell to $\LaTeX$ and show the progression of the calculations from symbolic to final numerical form.
#
# To demonstrate the use of `display.Calculations()` we'll step through the evaluation and display of the function $\psi=ax-cx^2+\frac{b}{x}$. We'll start by defining values for `a`, `b`, and `x`
# The `kilojoule` library provides a routine for drawing psychrometric charts to visualize humid air systems. *Note: this can be used to generate psychrometric charts for non-standard pressures and unit systems*
# + jupyter={"outputs_hidden": false}
from kilojoule.display import Calculations
from kilojoule.units import Quantity
# + jupyter={"outputs_hidden": false}
a = Quantity(3.2,'psi')
b = Quantity(1,'kPa')
x = 2
Calculations();
# -
# In this example, the lines defining `a`, `b`, and `x` were simple definitions involving no mathematical operations, so they are shown in simple form. By placing the line `display.Calculations();` at the end of the cell, we trigger a sequence where the code in the cell is parsed for strings of letters resembling equations and displays them with $\LaTeX$ formatting.
#
# In the next cell we will define `c` as being equal to `a`.
# + jupyter={"outputs_hidden": false}
c = a
Calculations();
# -
# In this example, we see 3 terms rather than two. This line still has no mathematical operations, but there is a train of logic where we are setting $c$ equal to $a$. While it is important to show the numerical value of $c$ as being $3.2\ \mathrm{psi}$, it is also important (possibly more important) to show the process that led to $c$ having that value, so we show the symbolic form of the expression $c=a$ as well.
#
# Let's now evaluate a full equation with mathematical operations.
# + jupyter={"outputs_hidden": false}
psi = a*x - c*x**2 + b/x
Calculations();
# -
# In this example the equation is expressed in 3 lines. The first line shows the symbolic form of the equation, which shows the reader the process or logic that is being applied. The second line shows numerical values in place of each symbol, which shows the propagation of information from earlier calculations. Finally the third line shows the numerical value resulting from the calculation. *Note: this is the form you should use when writing out calculations by hand as well.* Also, notice that the variable name `psi` was recognized as being a Greek letter and converted to the $\LaTeX$ equivalent of `\psi`. This will work for most cases if you define your variable names carefully.
#
# Using the `display.Calculations()` command will allow you to properly format your calculations, but you will still need to provide a narrative discussion to describe your process to the reader. You can do this in a Jupyter notebook by interspersing `Markdown` cells like this one between your equations, or you can place your narrative in your code as comments that will be shown in your output using the `comments=True` (by default) option for the `display.Calculations()` class.
# + jupyter={"outputs_hidden": false}
# You can place comments in your code to describe your process.
# The comments will be processed as `Markdown` so you can apply **formatting** if desired
# For instance, let's calculate the amount of heat transfer required to decrease the temperature of air from 400 K to 300 K in a constant pressure process assuming constant specific heat. We can start by defining some known parameters,
T_1 = Quantity(400,'K')
T_2 = Quantity(300,'K')
c_p = Quantity(1.005,'kJ/kg/K')
# We can then solve the first law for $Q$ and substitute $c_p\Delta T$ for $\Delta h$
Q_1_to_2 = c_p*(T_2-T_1)
calcs = Calculations(comments=True);
# -
# You may have noticed that in the example above, we stored the result of `Calculations()` in the variable `calcs`. This gives us access to the $\LaTeX$ used to generate the output, which can be accessed at `calcs.output`. This can be useful if you are learning $\LaTeX$ and want to see how an equation was created or if you want export the $\LaTeX$ code for inclusion in another document.
# + jupyter={"outputs_hidden": false}
print(calcs.output)
# -
# The `kilojoule` library also provides a quick way to show the current value of all the quantities and property tables defined in the local namespace using the `display.Summary()` class, just the quantities using `display.Quantities()`, or just the property tables using `display.QuantityTables()`
# + jupyter={"outputs_hidden": false}
import kilojoule as kj
kj.display.Summary(n_col=4);
# + jupyter={"outputs_hidden": false}
kj.display.Quantities(n_col=6);
# + jupyter={"outputs_hidden": false}
kj.display.QuantityTables();
# + jupyter={"outputs_hidden": false}
# -
| examples/kilojoule Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using Static Mapping
import transportation_tutorials as tt
import geopandas as gpd
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from shapely.geometry import Polygon, Point
# ## Questions
# 1. Generate a population density map of Miami-Dade County, at the MAZ resolution level for SERPM 8.
# ## Data
# To answer the question, use the following files:
maz = gpd.read_file(tt.data('SERPM8-MAZSHAPE'))
maz.head()
maz_data = pd.read_csv(tt.data('SERPM8-MAZDATA', '*.csv'))
maz_data.head()
fl_county = gpd.read_file(tt.data('FL-COUNTY-SHAPE'))
fl_county.head()
# ## Solution
# We can begin by extracting just Miami-Dade county from the counties shapefile.
# As noted, the name is recorded in this file as just "DADE", so we can
# use that to get the correct county.
md_county = fl_county.query("COUNTYNAME == 'DADE'")
# The county shapefile uses a different crs, so we'll need to make them aligned before
# doing a join.
md_county = md_county.to_crs({'init': 'epsg:2236', 'no_defs': True})
md_polygon = md_county.iloc[0].geometry
# Next, we can select the MAZ centroids that are within the Miami-Dade polygon.
md_maz = maz[maz.centroid.within(md_polygon)]
# Then we merge `maz_data` dataframe with spatially joined `md_maz`
# to pull in the required population information.
md_maz_info = md_maz.merge(maz_data.astype(float)[['mgra', 'POP']], how = 'left', left_on = 'MAZ', right_on = 'mgra')
md_maz_info.head()
# We can review a map of the selected MAZ's and the Miami-Dade County borders.
# Note that the MAZ's don't actually cover the whole county, as the south
# and west areas of the county are undeveloped swampland.
ax = md_maz_info.plot()
md_county.plot(ax=ax, color='none', edgecolor='red');
# Because the unit of measure in EPSG:2236 is approximately a foot, the
# `area` property of the `md_maz_info` GeoDataFrame gives the area in
# square feet. To express population density in persons per square mile,
# we need to multiply by 5280 (feet per mile) squared.
md_maz_info["Population Density"] = md_maz_info.POP / md_maz_info.area * 5280**2
# A first attempt at drawing a population density choropleth shows
# something is wrong; the entire county is displayed as nearly zero.
fig, ax = plt.subplots(figsize=(12,9))
ax.axis('off') # don't show axis
ax.set_title("Population Density", fontweight='bold', fontsize=16)
ax = md_maz_info.plot(ax=ax, column="Population Density", legend=True)
# The problem is identifiable in the legend: the scale goes up
# to nearly half a million people per square mile, which is
# an enormous value, and generally not achievable unless a zone
# is basically just skyscrapers. This does apply to a handful of
# MAZ's in downtown Miami, but the density everywhere else is
# so much lower that this map is meaningless.
#
# We can create a more meaningful map by clipping the top of the
# range to a more reasonable value, say only 40,000 people per
# square mile.
fig, ax = plt.subplots(figsize=(12,9))
ax.axis('off') # don't show axis
ax.set_title("Population Density", fontweight='bold', fontsize=16)
ax = md_maz_info.plot(ax=ax, column=np.clip(md_maz_info["Population Density"], 0, 40_000), legend=True)
| course-content/exercises/solution-geo-static-map.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
timeseries = np.array([[0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],
[0.1**3, 0.2**3, 0.3**3, 0.4**3, 0.5**3, 0.6**3, 0.7**3, 0.8**3, 0.9**3]]).transpose()
timeseries
y = np.zeros(len(timeseries))
for j in range(1,4):
print(j)
a = []
a.append(timeseries[[2],:])
# +
out= []
out_y = []
for i in range(9-3-1):
t = []
for j in range(1,4):
t.append(timeseries[[i+j+1],:])
out.append(t)
out_y.append(y[i+3+1])
print("Current i: {}, j: {}".format(i,j))
# Current i: 0, j: 3
# Current i: 1, j: 3
# Current i: 2, j: 3
# Current i: 3, j: 3
# Current i: 4, j: 3
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import IPython.display as ipd
# [← Back to Index](index.html)
# # Sheet Music Representations
# Music can be represented in many different ways. The printed, visual form of a musical work is called a **score** or **sheet music**. For example, here is a sheet music excerpt from Mozart Piano Sonata No. 11 K. 331:
ipd.SVG("https://upload.wikimedia.org/wikipedia/commons/2/27/MozartExcerptK331.svg")
ipd.YouTubeVideo('dP9KWQ8hAYk')
# Sheet music consists of **notes**. A note has several properties including pitch, timbre, loudness, and duration.
# **Pitch** ([Wikipedia](https://en.wikipedia.org/wiki/Pitch_(music)) is a perceptual property that indicates how "high" or "low" a note sounds. Pitch is closely related to the fundamental frequency sounded by the note, although fundamental frequency is a physical property of the sound wave.
# An **octave** ([Wikipedia](https://en.wikipedia.org/wiki/Octave)) is an interval between two notes where the higher note is twice the fundamental frequency of the lower note. For example, an A at 440 Hz and an A at 880 Hz are separated by one octave. Here are two Cs separated by one octave:
ipd.Image("https://upload.wikimedia.org/wikipedia/commons/a/a5/Perfect_octave_on_C.png")
# A **pitch class** ([Wikipedia](https://en.wikipedia.org/wiki/Pitch_class)) is the set of all notes that are an integer number of octaves apart. For example, the set of all Cs, {..., C1, C2, ...} is one pitch class, and the set of all Ds, {..., D1, D2, ...} is another pitch class. Here is the pitch class for C:
ipd.Image("https://upload.wikimedia.org/wikipedia/commons/thumb/9/98/Pitch_class_on_C.png/187px-Pitch_class_on_C.png")
# **Equal temperament** ([Wikipedia](https://en.wikipedia.org/wiki/Equal_temperament)) refers to the standard practice of dividing the octave into 12 uniform scale steps.
# The difference between two subsequent scale steps is called a **semitone** ([Wikipedia](https://en.wikipedia.org/wiki/Semitone)), the smallest possible interval in the 12-tone equal tempered scale. Musicians may know this as a "half step."
# The **key signature** ([Wikipedia](https://en.wikipedia.org/wiki/Key_signature)) follows the clef on a staff and indicates the key of the piece by the sharps or flats which are present throughout the piece. In the Mozart sonata excerpt above, the key signature is A major.
# The **time signature** ([Wikipedia](https://en.wikipedia.org/wiki/Time_signature)) follows the key signature on the staff and indicates the rhythmic structure, or meter, of the piece. In the Mozart sonata excerpt above, the time signature is 6/8, i.e. six eighth notes in one measure.
# **Tempo** ([Wikipedia](https://en.wikipedia.org/wiki/Tempo)) denotes how slow or fast a piece is played as measured by beats per minute (BPM). In the Mozart sonata excerpt above, the tempo marking is "Andante grazioso".
# [← Back to Index](index.html)
| sheet_music_representations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import unicode_literals
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sqlite3
import seaborn as sns
from matplotlib import cm
# Set plotting style
plt.style.use('seaborn-white')
LINE_COLOR = "red"
# %matplotlib inline
# -
# Date conversion function
def date_columns(query):
"""If a date column is included in the query, parse it as a date in the
dataframe."""
dates = []
fields = ["Collision_Date", "Process_Date"]
if '*' in query:
dates = fields
else:
for date in fields:
if date in query:
dates.append(date)
if not dates:
dates = None
return dates
def run_query(query, sql_file="./20180925_switrs.sqlite3"):
""" Read sqlite query results into a pandas DataFrame. """
with sqlite3.connect(sql_file) as con:
# Construct a Dataframe from the results
df = pd.read_sql_query(
query,
con,
parse_dates = date_columns(query),
)
return df
# # Crashes by Week
#
# Let's start by looking at the number of incidents per month:
# +
query = (
"SELECT Collision_Date "
"FROM Collision AS C "
"WHERE Collision_Date IS NOT NULL "
"AND Collision_Date <= '2015-12-31' " # 2016 is incomplete
"AND Motorcycle_Collision == 1 "
)
df = run_query(query)
# -
# Number of accidents
len(df)
# +
DATE_COL = "Collision_Date"
CRASH_COL = "Crashes"
df["DOY"] = df[DATE_COL].dt.dayofyear
df["DOW"] = df[DATE_COL].dt.dayofweek
df["Day"] = df[DATE_COL].dt.day
df["Week"] = df[DATE_COL].dt.week
df["Month"] = df[DATE_COL].dt.month
df["Year"] = df[DATE_COL].dt.year
df[CRASH_COL] = 1
# +
# Convert to a timeseries
ts = df[[DATE_COL]]
ts.index= ts[DATE_COL]
ax = ts.resample('W-MON').count()[DATE_COL].plot(
kind="line",
title='Crashes in California',
figsize=(12,6),
linewidth=1,
color=LINE_COLOR,
)
# Set Y range and grid
#ax.set_ylim([5000, 13000])
ax.grid()
# Set axis labels
plt.title('Motorcycle Crashes per Week in California', y=1.03, size=28)
FONTSIZE="xx-large"
plt.xlabel("")
plt.ylabel("Crashes / Week", fontsize=FONTSIZE)
# Set the year between the tick marks using minor ticks
# Pandas uses a obscure Axis labeling system, so to put the ticks between the
# major ticks, we interpolate it using np.interp().
y = ax.get_xticks()
x = [2001] + list(range(2002, 2018, 2)) # Step of 2 means every other year
minor_x = np.interp(np.arange(2001.5, 2016.5, 2), x, y)
ax.set_xticks(minor_x, minor=True)
ax.set_xticklabels(np.arange(2001, 2016, 2), minor=True, size=14)
ax.set_xticks([]) # Unset major ticks
# Add shaded bands for every other year
for year in range(2002, 2017, 2):
ax.axvspan(pd.to_datetime(str(year)), pd.to_datetime(str(year+1)), color="black", alpha=0.05)
for ext in ("png", "svg"):
plt.savefig("/tmp/motorcycle_accidents_per_week_in_california.{ext}".format(ext=ext), bbox_inches="tight")
plt.show()
# -
# # Grouped by Day of the Year
# +
from datetime import datetime
def annotate_year(df, ax, month, day, text, xytext, adjust=(0, 0), arrowstyle="->"):
""" Draw an annotation on the Day of Year plot. """
# Use 2015 because it is a non-leapyear, and most years are non-leap year
doy = datetime(year=2015, month=month, day=day).timetuple().tm_yday
y_pos = df[CRASH_COL][month][day]
ax.annotate(
text,
(doy+adjust[0], y_pos+adjust[1]),
xytext=xytext,
textcoords='offset points',
arrowprops=dict(
arrowstyle=arrowstyle,
connectionstyle="arc3",
),
size=16,
)
# -
# Get the start locations of each month
def month_starts(df):
""" Get the start and midpoints of each month. """
# Month starts
majors = []
for x, (month, day) in enumerate(df.index):
if day == 1:
majors.append(x)
if month == 12 and day == 31:
majors.append(x)
# Midpoints
minors = []
for i in range(len(majors)-1):
end = majors[i+1]
start = majors[i]
x = start + (end-start)/2.
minors.append(x)
return (majors, minors)
# +
# Calculate crashes per day
mean_crashes = df.groupby(["Month", "Day"]).count()
mean_crashes[CRASH_COL] /= 15 # Average instead of sum of years
mean_crashes[CRASH_COL][2][29] = mean_crashes[CRASH_COL][2][29] * 15/3. # Only 3 leap years!
ax = mean_crashes[CRASH_COL].plot(
kind="line",
linewidth=1.8,
figsize=(12,6),
color=LINE_COLOR,
)
# Set Y Range and grid
#ax.set_ylim([700, 1600])
ax.set_xlim([-2, 367]) # Avoid hiding the start/end points
ax.set_ylim([10, 55]) # Avoid hiding the start/end points
ax.grid()
# Set axis labels
plt.title('Mean Motorcycle Crashes by Date (2001–2015)', y=1.03, size=28)
FONTSIZE="xx-large"
plt.xlabel('')
plt.ylabel("Crashes", fontsize=FONTSIZE)
# Fix the X tick labels
(major_x, minor_x) = month_starts(mean_crashes)
labels = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
ax.set_xticks(major_x[:-1])
ax.set_xticklabels([])
ax.set_xticks(minor_x, minor=True)
ax.set_xticklabels(labels, minor=True, size=11)
# Shade every other month
for i in range(0, len(major_x)-1, 2):
start = major_x[i+1]
end = major_x[i+2]
ax.axvspan(start, end, color="black", alpha=0.03)
# Federal Holidays
annotate_year(mean_crashes, ax, 1, 1, "New Years", (+5, -40), (-2, 0))
annotate_year(mean_crashes, ax, 1, 18, "MLK", (-17, 25))
annotate_year(mean_crashes, ax, 2, 18, "Washington's Birthday", (-79.5, -22), (0, -4.5), arrowstyle="-[")
annotate_year(mean_crashes, ax, 5, 28, "Memorial Day", (-49.5, -30), (0, -1), arrowstyle="-[")
annotate_year(mean_crashes, ax, 7, 3, "3rd of July", (-75, +20))
annotate_year(mean_crashes, ax, 7, 4, "4th of July", (+7, +40))
annotate_year(mean_crashes, ax, 9, 4, "Labor Day", (-37, -35), (0, -2), arrowstyle="-[")
annotate_year(mean_crashes, ax, 10, 11, "Columbus Day", (-52, +30), (0, +7), arrowstyle="-[")
annotate_year(mean_crashes, ax, 11, 11, "Veterans Day", (-80, -25))
annotate_year(mean_crashes, ax, 11, 25, "Thanksgiving", (-47.5, -32), (0, -3), arrowstyle="-[")
annotate_year(mean_crashes, ax, 12, 25, "Christmas", (-90, -0))
# Other Holidays
annotate_year(mean_crashes, ax, 10, 31, "Halloween", (+15, +35))
annotate_year(mean_crashes, ax, 2, 14, "Valentine's Day", (-55, 50))
annotate_year(mean_crashes, ax, 3, 17, "St. Patrick's Day", (-40, 60))
for ext in ("png", "svg"):
plt.savefig("/tmp/mean_motorcycle_accidents_by_date.{ext}".format(ext=ext), bbox_inches="tight")
plt.show()
# -
# # Weekends
# +
DOW_COL = "DOW"
winter_str = "November to April"
summer_str = "May to October"
df["Season"] = [summer_str if 5 <= month <= 10 else winter_str for month in df["Month"]]
# -
def make_violin_plot(df, colors, season, ax):
# Set plot size
df_temp = df[df["Season"] == season]
gr = df_temp.groupby(["Collision_Date", "DOW", "Season"]).count()
gr = gr.reset_index()
ax = sns.violinplot(x="DOW", y="Crashes", data=gr, palette=colors, linewidth=2, inner="quartile", cut=0, scale="count", ax=ax)
plt.xticks(np.arange(7), ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"], size=14)
ax.yaxis.label.set_visible(False)
ax.xaxis.label.set_visible(False)
ax.set_ylim([-3, 103])
# +
f, axs = plt.subplots(2, sharex=True, sharey=True, figsize=(12, 8))
f.tight_layout(rect=[0, 0.03, 1, 0.95]) # Used to keep the suptitle() above the plot
f.subplots_adjust(hspace=0)
FONTSIZE="xx-large"
f.text(-0.02, 0.5, "Crashes", va="center", rotation="vertical", fontsize=FONTSIZE)
f.text(0.045, 0.88, "November–April", fontsize=22)
f.text(0.045, 0.46, "May–October", fontsize=22)
f.suptitle("Motorcycle Crashes by Day of the Week", fontsize=28)
# Winter plot
axs[0] = make_violin_plot(df, cm.winter(np.linspace(0.2, 1, 7)), winter_str, axs[0])
# Summer plot
axs[1] = make_violin_plot(df, cm.inferno(np.linspace(0.3, 0.9, 7)), summer_str, axs[1])
for ext in ("png", "svg"):
f.savefig("/tmp/motorcycle_accidents_by_day_of_the_week_and_season.{ext}".format(ext=ext), bbox_inches="tight")
# -
# ## Exploring the Leap Day Bump
# +
query = (
"SELECT * "
"FROM Collision AS C "
"WHERE Collision_Date IS NOT NULL "
"AND Collision_Date <= '2015-12-31' " # 2016 is incomplete
"AND Motorcycle_Collision == 1 "
"AND ("
"Collision_Date == '2004-02-29' OR "
"Collision_Date == '2008-02-29' OR "
"Collision_Date == '2012-02-29' "
")"
)
leap_day_df = run_query(query)
# +
DATE_COL = "Collision_Date"
CRASH_COL = "Crashes"
leap_day_df["DOY"] = leap_day_df[DATE_COL].dt.dayofyear
leap_day_df["DOW"] = leap_day_df[DATE_COL].dt.dayofweek
leap_day_df["Day"] = leap_day_df[DATE_COL].dt.day
leap_day_df["Week"] = leap_day_df[DATE_COL].dt.week
leap_day_df["Month"] = leap_day_df[DATE_COL].dt.month
leap_day_df["Year"] = leap_day_df[DATE_COL].dt.year
leap_day_df[CRASH_COL] = 1
# -
leap_day_df.groupby(["Year"])[CRASH_COL].count()
| files/switrs-motorcycle-accidents-by-date/SWITRS Crash Dates With Motorcycles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from scipy.stats import skew
from scipy.stats.stats import pearsonr
# %config InlineBackend.figure_format = 'retina' #set 'png' here when working on notebook
# %matplotlib inline
# -
train_df = pd.read_csv('../data/orignal/train.csv', index_col = 0)
test_df = pd.read_csv('../data/orignal/test.csv', index_col = 0)
combine_df = pd.concat([train_df, test_df])
train_df["SalePrice"] = np.log1p(train_df["SalePrice"])
# +
numeric_feats = combine_df.dtypes[combine_df.dtypes != "object"].index
skewed_feats = train_df[numeric_feats].apply(lambda x: skew(x.dropna())) #compute skewness
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index
combine_df[skewed_feats] = np.log1p(combine_df[skewed_feats])
# -
#dummies对非数字有效
combine_df = pd.get_dummies(combine_df)
combine_df = combine_df.fillna(combine_df.mean())
X_train_df = combine_df[:train_df.shape[0]]
X_test_df = combine_df[train_df.shape[0]:]
y_train_df = train_df.SalePrice
X_train_df.to_csv('../data/offline/X_train2.csv', header = True, index=True)
X_test_df.to_csv('../data/offline/X_test2.csv', header = True, index=True)
y_train_df.to_csv('../data/offline/y_train2.csv', header = True, index=True)
| process/EngineeringFeatures2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (sm)
# language: python
# name: stat18
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sbn
from modules.normal import Normal
from scipy.stats import norm
# # Opgave 4
#
# ### Del 2
mu = 2.6
sigma = np.sqrt(0.56)
# ### Analytisk
normal_dist = Normal(mu, sigma)
x = np.linspace(-1, 6)
y = list(map(normal_dist.pdf, x))
plt.plot(x, y)
normal_2 = norm(mu, sigma)
x_percentiles = [i/10 for i in range(1,10)]
y_percentiles = list(map(normal_2.ppf, x_percentiles))
plt.scatter([0 for _ in range(1,10)], y_percentiles)
# plotting the fan chart calculations
pd.DataFrame({'percentile': x_percentiles, 'value': y_percentiles})
# ### Opgave 5
def Ln(theta):
return 5 * ( np.log(2 / 9) + np.log(theta - theta**2) )
x = np.linspace(0.01, 0.99)
y = list(map(Ln, x))
plt.plot(x, y)
| Ugseseddel 10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
print(os.listdir())
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
users = pd.read_csv('ml-100k/u.user', sep='|', names=u_cols,
encoding='latin-1')
users.head()
r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
ratings = pd.read_csv('ml-100k/u.data', sep='\t', names=r_cols,
encoding='latin-1')
ratings.head()
i_cols = ['movie_id', 'movie_title' ,'release date','video release date', 'IMDb URL', 'unknown', 'Action', 'Adventure',
'Animation', 'Children\'s', 'Comedy', 'Crime', 'Documentary', 'Drama', 'Fantasy',
'Film-Noir', 'Horror', 'Musical', 'Mystery', 'Romance', 'Sci-Fi', 'Thriller', 'War', 'Western']
items = pd.read_csv('ml-100k/u.item', sep='|', names=i_cols, encoding='latin-1')
items.head()
dataset = pd.merge(pd.merge(items, ratings),users)
dataset.head()
# +
import sys
import numpy as np
import scipy.sparse as sparse
from scipy.sparse.linalg import spsolve
import random
from sklearn.preprocessing import MinMaxScaler
import implicit
# -
sparse_item_user = sparse.csr_matrix((dataset['rating'].astype(float),(dataset['movie_id'], dataset['user_id'])))
sparse_user_item = sparse.csr_matrix((dataset['rating'].astype(float),(dataset['user_id'], dataset['movie_id'])))
# ## Initialising ALS model
model = implicit.als.AlternatingLeastSquares(factors=20,regularization=0.1,iterations=200)
alpha_val = 15
data_conf = (sparse_item_user * alpha_val).astype('double')
model.fit(data_conf)
# # Find Similar Items
# ### Finding the 5 most similar movies to Twelve Monkey(movie_id = 7)
item_id = 7
n_similar = 5
similar = model.similar_items(item_id,n_similar)
for item in similar:
idx,score = item
print (dataset.movie_title.loc[dataset.movie_id == idx].iloc[0])
# # Find User Recommendation
user_id = 300
recommended = model.recommend(user_id,sparse_user_item)
movies = []
scores = []
for item in recommended:
idx,score = item
movies.append(dataset.movie_title.loc[dataset.movie_id==idx].iloc[0])
scores.append(score)
print(pd.DataFrame({"movies":movies, "scores:":scores}))
# All these are for user id 300
| ALS implementation/ALS Movie recommendation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import h5py
def load_data():
train_dataset = h5py.File(r'./dataset/train_catvnoncat.h5', "r")
train_set_x_orig = np.array(train_dataset["train_set_x"][:])
train_set_y_orig = np.array(train_dataset["train_set_y"][:])
test_dataset = h5py.File(r'./dataset/test_catvnoncat.h5', "r")
test_set_x_orig = np.array(test_dataset["test_set_x"][:])
test_set_y_orig = np.array(test_dataset["test_set_y"][:])
train_set_y_orig = train_set_y_orig.reshape((1,train_set_y_orig.shape[0]))
test_set_y_orig = test_set_y_orig.reshape((1,test_set_y_orig.shape[0]))
return train_set_x_orig, train_set_y_orig, test_set_x_orig, test_set_y_orig
# +
#Load dataset
train_x, train_y, test_x, test_y = load_data()
train_x = train_x.reshape(train_x.shape[0],-1).T
test_x = test_x.reshape(test_x.shape[0],-1).T
train_x = train_x/255.0
test_x = test_x/255.0
# +
def sigmoid(Z):
A = 1/(1+np.exp(-Z))
assert(A.shape == Z.shape)
return A
def relu(Z):
A = np.maximum(0,Z)
assert(A.shape == Z.shape)
return A
def relu_derivative(dA, Z):
dZ = np.array(dA, copy=True)
dZ[Z <= 0] = 0
assert(dA.shape==Z.shape)
return dZ
def sigmoid_derivative(dA, Z):
s = 1/(1+np.exp(-Z))
dZ = dA * s * (1-s)
assert (dZ.shape == Z.shape)
return dZ
def initialize_parameters(layer_sizes):
biases=[]
weights=[]
for l in range(1,len(layer_sizes)):
biases.append(np.random.rand(layer_sizes[l],1)*0.01)
weights.append(np.random.rand(layer_sizes[l],layer_sizes[l-1])*0.01)
return weights, biases
def forward_prop(X, weights, biases):
Z_values=[]
A_values=[X]
for l in range(0,len(weights)-1):
Z=np.dot(weights[l],A_values[l])+biases[l]
# print(Z.shape)
Z_values.append(Z)
A = relu(Z)
A_values.append(A)
ZL= np.dot(weights[len(weights)-1],A)+biases[len(weights)-1]
Z_values.append(ZL)
AL=sigmoid(ZL)
A_values.append(AL)
# print(AL)
return AL, Z_values, A_values
def calculate_cost(A, y):
m= y.shape[1]
# print(y.shape, A.shape)
cost= (1./m) * np.sum(-np.multiply(y,np.log(A)) - np.multiply(1-y, np.log(1-A)))
return cost
def backward_prop(AL,Y, Z_values, A_values,weights):
# print("len weights",len(weights))
# print("len zvalues",len(Z_values))
L=len(Z_values)
# print(L)
# print(AL.shape, Y.shape)
m=Y.shape[1]
grads_weights=[]
grads_biases=[]
dAL = - (np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))
# print(dAL.shape)
# print("Z_value[L-1]", Z_values[L-1].shape)
dZ = sigmoid_derivative(dAL,Z_values[L-1])
# print("dZ", dZ.shape)
# print("A_value[L-2]", A_values[L-1].shape)
dW = 1./m * np.dot(dZ,A_values[L-1].T)
# print("dW",dW.shape)
db = 1./m * np.sum(dZ, axis = 1, keepdims = True)
# print("dB",db.shape)
grads_weights.insert(0,dW)
grads_biases.insert(0,db)
for l in reversed(range(L-1)):
# print(l)
# print("weights[0]",(weights[l].shape))
dA = np.dot(weights[l+1].T,dZ)
dZ = relu_derivative(dA,Z_values[l])
dW = 1./m * np.dot(dZ,A_values[l].T)
db = 1./m * np.sum(dZ, axis = 1, keepdims = True)
grads_weights.insert(0,dW)
grads_biases.insert(0,db)
return (grads_weights, grads_biases)
def gradient_descent(learning_rate, grads_weights, grads_biases, weights, biases):
for l in range(len(weights)):
weights[l] = weights[l]- learning_rate*grads_weights[l]
biases[l]= biases[l]- learning_rate*grads_biases[l]
return weights,biases
# +
def model(X, Y, layers_dims, learning_rate = 0.01, num_iterations = 1000, print_cost=True):
costs=[]
weights, biases = initialize_parameters(layers_dims)
for i in range(num_iterations):
AL, Z_values, A_values = forward_prop(X, weights, biases)
cost = calculate_cost(AL, Y)
grads_weights, grads_biases = backward_prop(AL, Y, Z_values, A_values, weights)
weights, biases = gradient_descent(learning_rate, grads_weights, grads_biases, weights, biases)
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration "+str(i)+": "+str(cost))
costs.append(cost)
# plot the cost
if print_cost:
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return weights, biases
weights, biases = model(train_x, train_y, [12288,20,1], learning_rate = 0.04, num_iterations=1500)
# -
def predict(X, y, weights, biases, threshold):
m = X.shape[1]
p = np.zeros((1,m), dtype=int)
A, _, _ = forward_prop(X, weights, biases)
for i in range(0, A.shape[1]):
if A[0,i] > threshold:
p[0,i] = 1
else:
p[0,i] = 0
#print results
# print ("predictions: " + str(p))
# print ("true labels: " + str(y))
# print("Accuracy: " + str(np.sum((p == y)/m)))
return np.sum((p == y)/m)
scores_train=[]
thresholds=[0.1,0.2,0.3,0.4,0.45,0.5,0.55,0.6,0.7,0.8]
for thresh in thresholds:
score=predict(train_x, train_y, weights, biases, thresh)
scores_train.append(score)
print(max(scores_train))
# +
scores_test=[]
for thresh in thresholds:
score=predict(test_x, test_y, weights, biases,thresh)
scores_test.append(score)
print(max(scores_test))
plt.plot(thresholds,scores_test,'r')
plt.plot(thresholds,scores_train,'b')
plt.ylabel('accuracy')
plt.xlabel('threshold value')
plt.show()
# -
| ml/Neural Network/ANN_equations_Implementation/NN_Implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VacationPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import json
import gmaps
import os
from pprint import pprint
# Import API key
from api_keys import g_key
# -
# ### Store Part I results into DataFrame
# * Load the csv exported in Part I to a DataFrame
#import "clean cities csv"
file = "../output_data/clean_cities_data.csv"
weather_df = pd.read_csv(file)
weather_df
# ### Humidity Heatmap
# * Configure gmaps.
# * Use the Lat and Lng as locations and Humidity as the weight.
# * Add Heatmap layer to map.
#access maps with unique API key
gmaps.configure(api_key = g_key)
# +
#create coordinates
records = weather_df[["Latitude", "Longitude"]].to_records(index=False)
coordinates = list(records)
weights = weather_df['Humidity']
#customize map
figure_layout = {'width': '500px', 'margin': '0 auto 0 auto'}
fig = gmaps.figure(layout=figure_layout)
heat_layer = gmaps.heatmap_layer(coordinates, weights = weights)
#assign marker layer
fig.add_layer(heat_layer)
fig
# -
# ### Create new DataFrame fitting weather criteria
# * Narrow down the cities to fit weather conditions.
# * Drop any rows will null values.
#drop null values
vacation_df = weather_df.dropna(axis=0)
vacation_df
# +
#select only ideal cities
#Criteria - Temperature >70 <85 degrees, cloudiness <50, humidity <60
ideal_cities_df = vacation_df.loc[(vacation_df["Temperature"] >=75) &
(vacation_df["Temperature"] <=80) &
(vacation_df["Cloudiness"] <50) &
(vacation_df["Humidity"] <50)]
ideal_cities_df
# -
# ### Hotel Map
# * Store into variable named `hotel_df`.
# * Add a "Hotel Name" column to the DataFrame.
# * Set parameters to search for hotels with 5000 meters.
# * Hit the Google Places API for each city's coordinates.
# * Store the first Hotel result into the DataFrame.
# * Plot markers on top of the heatmap.
#create hotel df
hotel_df = ideal_cities_df.filter(["City Name", "Country Name", "Latitude", "Longitude"], axis=1)
#rename column headers to match requirements below for map
hotel_df.rename(columns={'City Name':'City',
'Country Name': 'Country',
'Latitude':'Lat',
'Longitude':'Lng'},inplace=True)
hotel_df["Hotel Name"] = ""
hotel_df
# +
#query from Google Places API
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
#loop through coordinates to get responses
for index,row in hotel_df.iterrows():
#get location from df
latitude = row["Lat"]
longitude = row["Lng"]
city = row["City"]
#add to params
locations = f"{latitude},{longitude}"
#set up params
params = {"location":locations,
"keyword": "lodging",
"radius":5000,
#"rankby":"distance",
"key": g_key,}
#create URL for request
response = requests.get(base_url, params=params).json()
#extract results - print for testing
#print(json.dumps(response, indent=4, sort_keys=True))
#in case there are errors
try:
print("------------------------------------")
print(f' Hotel found within 5000m of {city}')
print("------------------------------------")
hotel_df.loc[index, "Hotel Name"] = response["results"][0]["name"]
except (KeyError, IndexError):
print("------------------------------------")
print("Not result found... skipping.")
print("------------------------------------")
hotel_df.loc[index, "Hotel Name"] = "N/A"
# -
#drop any with N/A value
#hotel_df = hotel_df[hotel_df["Hotel Name"] != "N/A"]
#view hotel names on df
hotel_df
# +
# NOTE: Do not change any of the code in this cell
# Using the template add the hotel marks to the heatmap
info_box_template = """
<dl>
<dt>Name</dt><dd>{Hotel Name}</dd>
<dt>City</dt><dd>{City}</dd>
<dt>Country</dt><dd>{Country}</dd>
</dl>
"""
# Store the DataFrame Row
# NOTE: be sure to update with your DataFrame name
hotel_info = [info_box_template.format(**row) for index, row in hotel_df.iterrows()]
locations = hotel_df[["Lat", "Lng"]]
# +
# Add marker layer ontop of heat map
marker_layer = gmaps.marker_layer(
locations,
info_box_content=hotel_info,
display_info_box=True)
# Add layer
fig.add_layer(marker_layer)
#update figure display
#gmaps.figure( zoom_level=16, center=(36.0999,80.2442)) #coordinates are for Winston Salem, NC. I wanted to default the zoom if I could.
# Display the map
fig
# -
| starter_code/VacationPy_APT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/7/7d/Copper_Price_History_USD.png" width="600px" height="400px" />
#
# # Descarga y manipulación de precios históricos
#
# *Objetivos:*
# - Aprender a importar datos desde archivos separados por comas (extensión `.csv`).
# - Descargar el paquete `pandas-datareader`.
# - Aprender a descargar datos desde fuentes remotas.
#
# **Referencias:**
# - http://pandas.pydata.org/
# - https://pandas-datareader.readthedocs.io/en/latest/
# ___
# ## 1. Importar datos desde archivos locales
#
# <img style="float: left; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/8/86/Microsoft_Excel_2013_logo.svg" width="300px" height="125px" />
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="https://upload.wikimedia.org/wikipedia/commons/0/0a/Python.svg" width="300px" height="125px" />
# ### 1.1. ¿Porqué?
#
# - Muchas veces tenemos bases de datos proporcionadas como archivos locales.
# - Para poder analizar, procesar y tomar decisiones con estos datos, es necesario importarlos a python.
# - Ejemplos de archivos donde comúnmente se guardan bases de datos son:
# - `.xls` o `.xlsx`
# - `.cvs`
# - Excel es ampliamente usado en distintos campos de aplicación en todo el mundo.
# - Nos guste o no, esto también aplica a ciencia de datos (ingeniería financiera).
# - Muchos de ustedes en su futuro académico y profesional tendrán que trabajar con estas hojas de cálculo, pero no siempre querrán trabajar directamente con ellas si tienen que hacer un análisis un poco más avanzado de los datos.
# - Por eso en Python se han implementado herramientas para leer, escribir y manipular este tipo de archivos.
#
# En esta clase veremos cómo podemos trabajar con Excel y Python de manera básica utilizando la librería *pandas*.
# ### 1.2. Reglas básicas para antes de leer hojas de cálculo
#
# Antes de comenzar a leer una hoja de cálculo en Python (o cualquier otro programa), debemos considerar el ajustar nuestro archivo para cumplir ciertos principios, como:
#
# - La primer fila de la hoja de cálculo se reserva para los títulos, mientras que la primer columna se usa para identificar la unidad de muestreo o indización de los datos (tiempo, fecha, eventos...)
# - Evitar nombres, valores o campos con espacios en blanco. De otra manera, cada palabra se interpreta como variable separada y resultan errores relacionados con el número de elementos por línea.
# - Los nombres cortos se prefieren sobre nombre largos.
# - Evite símbolos como ?, $, %, ^, &, *, (,),-,#, ?, ,,<,>, /, |, \, [ ,] , {, y }.
# - Borre cualquier tipo de comentario que haya hecho en su archivo para evitar columnas extras.
# - Asegúrese de que cualquier valor inexistente esté indicado como NA.
#
# Si se hizo algún cambio, estar seguro de guardarlo.
#
# Si estás trabajando con Microsoft Excel, verás que hay muchas opciones para guardar archivos, a parte de las extensiones por defecto .xls or .xlsx. Para esto ir a “Save As” y seleccionar una de las extensiones listadas en “Save as Type”.
#
# La extensión más común es .csv (archivos de texto separados por comas).
# **Actividad.** Descargar precios de acciones de Apple (AAPL) de Yahoo Finance, con una ventana de tiempo desde el 01-01-2015 al 31-12-2017 y frecuencia diaria.
#
# - Ir a https://finance.yahoo.com/.
# - Buscar cada una de las compañías solicitadas.
# - Dar click en la pestaña *'Historical Data'*.
# - Cambiar las fechas en *'Time Period'*, click en *'Apply'* y, finalmente, click en *'Download Data'*.
# - **¡POR FAVOR! GUARDAR ESTOS ARCHIVOS EN UNA CARPETA LLAMADA precios EN EL MISMO DIRECTORIO DONDE TIENEN ESTE ARCHIVO**.
# ### 1.3. Carguemos archivos .csv como ventanas de datos de pandas
#
# Ahora podemos comenzar a importar nuestros archivos.
#
# Una de las formas más comunes de trabajar con análisis de datos es en pandas. Esto es debido a que pandas está construido sobre NumPy y provee estructuras de datos y herramientas de análisis fáciles de usar.
# +
import numpy as np
import datetime
import scipy.stats as stats
# Importamos pandas
import pandas as pd
#algunas opciones para Pandas
# pd.set_option('display.notebook_repr_html', False)
# pd.set_option('display.max_columns', 6)
# pd.set_option('display.max_rows', 10)
# pd.set_option('display.width', 78)
# pd.set_option('precision', 3)
pd.set_option('display.max_rows', 30)
# -
# Para leer archivos `.csv`, utilizaremos la función `read_csv` de pandas:
# Función read_csv
help(pd.read_csv)
# Cargamos hoja de calculo en un dataframe
file_name = 'Precios/AAPL.csv'
aapl = pd.read_csv(file_name)
aapl
# #### Anotación #1
# - Quisieramos indizar por fecha.
# Cargamos hoja de calculo en un dataframe
aapl = pd.read_csv(file_name, index_col=['Date'])
aapl
# Graficar precios de cierre y precios de cierre ajustados
import matplotlib.pyplot as plt
# %matplotlib inline
aapl[['Close', 'Adj Close']].plot(figsize=(8,8))
plt.show()
# #### Anotación #2
# - Para nuestra aplicación solo nos interesan los precios de cierre de las acciones (columna Adj Close).
# Cargamos hoja de calculo en un dataframe
aapl = pd.read_csv(file_name, index_col=['Date'], usecols=['Date', 'Adj Close'])
aapl.columns = ['AAPL']
aapl
# **Actividad.** Importen todos los archivos .csv como acabamos de hacerlo con el de apple. Además, crear un solo DataFrame que cuyos encabezados por columna sean los nombres respectivos (AAPL, AMZN,...) y contengan los datos de precio de cierre.
#
# > Leer archivos usando el paquete `os`: [link](https://realpython.com/working-with-files-in-python/)
# +
import os
# List all files in a directory using os.listdir ---> os.path.isfile check if is a file
basepath = 'Precios'
files = [os.path.join(basepath, os.listdir(basepath)[i]) for i in range(len(os.listdir(basepath)))]
files
# -
# Read the data of Adj Close for each file
data = pd.concat([pd.read_csv(files[i], usecols= ['Date', 'Adj Close'], index_col= ['Date'])
for i in range(len(files))], axis = 1 )
data.columns = os.listdir(basepath)
data
data[['AAPL.csv', 'AMZN.csv']].plot()
# ## 2. Descargar los datos remotamente
# Para esto utilizaremos el paquete *pandas_datareader*.
#
# **Nota**: Usualmente, las distribuciones de Python no cuentan, por defecto, con el paquete *pandas_datareader*. Por lo que será necesario instalarlo aparte:
# - buscar en inicio "Anaconda prompt" y ejecutarlo como administrador;
# - el siguiente comando instala el paquete en Anaconda: **conda install pandas-datareader**;
# - una vez finalice la instalación correr el comando: *conda list*, y buscar que sí se haya instalado pandas-datareader
# !conda install pandas-datareader
# Importar el modulo data del paquete pandas_datareader. La comunidad lo importa con el nombre de web
import pandas as pd
import pandas_datareader.data as web
from datetime import datetime
import matplotlib.pyplot as plt
# El módulo data del paquete pandas_datareader contiene la funcion `DataReader`:
# Función DataReader
help(web.DataReader)
# - A esta función le podemos especificar la fuente de los datos para que se use la api específica para la descarga de datos de cada fuente.
# - Fuentes:
# - Google Finance: se tiene acceso a su api a través de Stooq Index Data.
# - Quandl: solo permite descargar datos de equities estadounidenses de manera gratuita. Es la base de datos más completa. Si se desea usar hay que crear una cuenta para autenticarse en la API.
# - IEX: los datos tienen antiguedad máxima de 5 años y de equities estadounidenses.
# - Yahoo! Finance: su api ha tenido cambios significativos y ya no es posible usarla desde DataReader. Sin embargo permite obtener datos de distintas bolsas (incluida la mexicana), por eso le haremos la luchita.
#
# > Enlace de las API disponibles de DataReader [link](https://pandas-datareader.readthedocs.io/en/latest/remote_data.html)
datetime.today()
# Ejemplo google finance
ticker = ['AAPL', 'KO']
source = 'stooq'
start = '2015-01-01'
end = datetime.today()
aapl_goo = web.DataReader(ticker, source, start=start, end=end)
aapl_goo
# ## - Precios desde `quandl`
# >Página oficial de `quandl` para crear cuenta y tutorial de instalación de su api
# > Recuerden que cuando se usa anaconda no se debe de usar el comando `pip` o `pip3` sino `conda`, por ejemplo en este caso sería `conda install quandl`
#
# > https://docs.quandl.com/docs/python-installation
#
# 
#
# Tu api_key lo encuentras en los detalles de tu cuenta después de haber creado un usuario
# !conda install quandl
# +
# Ejemplo quandl
import quandl
######################### USar la api key que les arroja la página de quandl
quandl.ApiConfig.api_key = "<KEY>"
ticker = ['AAPL', 'MSFT', 'KO']
date = {'gte': '2016-01-01', 'lte': datetime.today() }
column = {'columns': ['ticker', 'date', 'Adj_close']}
data = quandl.get_table('WIKI/PRICES', qopts = column, ticker = ticker, date = date)# ticker = 'WIKI/AAPL' #'AAPL.US'
# Poner los índices como las fechas
data = data.set_index('date')
data
# Seleccionar los ADJ_CLOSE de ticker y renombrar las columnas
data_n = [data.loc[data['ticker'] == ti, 'adj_close'] for ti in ticker]
data_n = pd.concat(data_n, axis=1)
data_n.columns = ticker
data_n
#### data.loc[data['ticker']=='AAPL','adj_close']
# -
# Gráfica de precios
data_n.plot(figsize=(9,8))
# ### Uso de Pandas para bajar datos de Yahoo! Finance
# * Intentamos con la función YahooDailyReader y con la función DataReader
# +
# help(web.YahooDailyReader)
# -
# YahooDailyReader
ticker = ['AEROMEX.MX', 'GCARSOA1.MX']
start = '2015-01-01'
end = datetime.today()
aapl_yah = web.YahooDailyReader(ticker, start, end, interval='d').read()
aapl_yah['Adj Close']
# Observar que se puede usar usando las dos librerías
closes = web.DataReader(name=ticker, data_source='yahoo', start=start, end=end)
closes['Adj Close']
# Para efectos del curso y debido a que en yahoo finance podemos tener acceso a activos de la bolsa méxicana vamos a utilizar de acá en adelante el paquete de DataReader y la siguiente función para descargar precios de distintos activos:
# Función para descargar precios de cierre ajustados:
def get_adj_closes(tickers, start_date=None, end_date=None):
# Fecha inicio por defecto (start_date='2010-01-01') y fecha fin por defecto (end_date=today)
# Descargamos DataFrame con todos los datos
closes = web.DataReader(name=tickers, data_source='yahoo', start=start_date, end=end_date)
# Solo necesitamos los precios ajustados en el cierre
closes = closes['Adj Close']
# Se ordenan los índices de manera ascendente
closes.sort_index(inplace=True)
return closes
# Ejemplo: 'AAPL', 'MSFT', 'NVDA', '^GSPC'
ticker = ['AAPL', 'MSFT', 'NVDA', '^GSPC']
start = '2018-01-01'
end = None
closes = get_adj_closes(tickers=ticker, start_date=start, end_date=end)
closes
# Gráfica de datos
closes.plot()
# **Nota**: Para descargar datos de la bolsa mexicana de valores (BMV), el ticker debe tener la extensión MX.
# Por ejemplo: *MEXCHEM.MX*, *LABB.MX*, *GFINBURO.MX* y *GFNORTEO.MX*.
#
# Como se puede notar, en este caso se consideran tres activos
# - Nvidia:NVDA
# - Apple: AAPL
# - Microsoft: MSFT
#
# y, el índice
#
# - Standard & Poor's: 500S&P500.
#
# Todos almacenados en la variable *closes*.
# El objeto *assets* tiene la característica *items*. Con estos, se pueden verificar los registros almacenados
closes.columns
# Acceder a alguna posición específica de la variable *closes*
# Usao de la función iloc
closes.iloc[0, 0]
# Si deseamos encontrar los precios de cierre en una fecha específica usamos
# Uso de la función loc
closes.loc['2018-01-02', 'AAPL']
# O, finalmente, los valores del S&P500
# Selección de alguna columna
closes['AAPL']
# ### Actividad
# Obtener datos históricos de
# - GRUPO CARSO, S.A.B. DE C.V.
# - GRUPO FINANCIERO INBURSA, S.A.B. DE C.V.
# - GRUPO FINANCIERO BANORTE, S.A.B DE C.V.
# - GRUPO AEROMÉXICO, S.A.B. DE C.V.
#
# en el año 2014.
#
# 1. ¿Qué compañía reportó precios de cierre más altos en *2014-07-14*?
# 2. Obtener los precios de cierre de cada compañía en todo el año.
# 3. Comparar, para cada compañía, los precios de cierre entre *2014-01-02* y *2014-12-31*.
#
# > Revisar los nombres de estas acciones en yahoo: https://finance.yahoo.com/
# +
# nombre de los activos mexícanos en yahoo
ticker_mx = ['GCARSOA1.MX', 'GFINBURO.MX', 'GFNORTEO.MX', 'AEROMEX.MX']
start = '2014-01-02'
end = '2014-12-31'
assets_mx = get_adj_closes(tickers=ticker_mx, start_date=start, end_date=end)
assets_mx
# -
# Encontrar los precios en la fecha 2014-07-14
assets_mx_20140714 = assets_mx.loc['2014-07-14']
assets_mx_20140714
# Encontrar la acción que reportó mayor valor en la fecha 2014-07-14
assets_mx_20140714.max(), assets_mx_20140714.idxmax()
assets_mx_20140714
# Acceder a algunas filas particulares de los precios (iloc)
assets_mx.iloc[[0, -1], :]
#encontrar la diferencias entre dos filas en particular
assets_mx.iloc[[0, -1], :].diff().iloc[1:]
assets_mx.pct_change(periods=1).hist()
# # 2. Graficos de las series de datos
# En primer lugar, se toma como ejemplo la serie de precios `AEROMEX.MX`, así como el volumen de transacciones.
# +
ticker = 'AEROMEX.MX'
start = '2015-01-01'
end = datetime.today()
aero_mx = web.DataReader(ticker, data_source='yahoo', start=start, end=end)
aero_mx
# Se extraen los precios de cierre y los volúmenes de transacción
clos_aero_mx = aero_mx['Adj Close']
# Se extraen los volúmenes de transacción
vol_aero_mx = aero_mx['Volume']
# Se verifican las dimensiones
clos_aero_mx.shape, vol_aero_mx.shape
# -
# El gráfico de esta serie se obtiene de forma simple mediante el siguiente comando
clos_aero_mx.plot()
# De forma similar, se grafica la serie de volúmenes de transacción
vol_aero_mx.plot(figsize=(10,8))
# Usualmente, es conveniente graficar al precio de cierre de una acción en conjunto con su volumen de transacciones. El siguiente es un ejemplo de esta clase de graficas para el caso de Aeroméxico.
# +
# Gráfica de los precios de cierre ajustados y los volúmenes de transacción
fig, ax = plt.subplots(2, 1, sharex=True, figsize=(12,7))
clos_aero_mx.plot(ax=ax[0], label='PCA')
ax[0].legend()
ax[0].set_title('Precios de cierre ajustado')
ax[0].grid()
ax[1].bar(vol_aero_mx.index, vol_aero_mx.values, label='VT')
ax[1].legend()
ax[1].set_title('Volúmenes de transacción')
ax[1].grid()
# +
############## Forma de graficar 1
top = plt.subplot2grid((4,4), (0, 0), rowspan=2, colspan=4)
top.plot(clos_aero_mx.index, clos_aero_mx, label='Precio ajustado en el cierre')
plt.title('Aeroméxico: Precio ajustado en el cierre 2014 - 2016')
plt.legend(loc='best')
bottom = plt.subplot2grid((4,4), (2, 0), rowspan=1, colspan=4)
bottom.bar(vol_aero_mx.index, vol_aero_mx)
plt.title('Aeroméxico: Volumen diario de transacción de la acción')
plt.gcf().set_size_inches(12,8)
plt.subplots_adjust(hspace=0.75)
############## Otra forma de graficar
# plt.figure(figsize=(10,10))
# plt.subplot(2,1,1)
# plt.plot(clos_aero_mx.index, clos_aero_mx, label='Precio ajustado en el cierre')
# plt.title('Aeroméxico: Precio ajustado en el cierre 2014 - 2016')
# plt.legend(loc='best')
# plt.xlim([clos_aero_mx.index[0],clos_aero_mx.index[-1]])
# plt.show()
# plt.figure(figsize=(10,5))
# plt.subplot(2,1,2)
# plt.bar(vol_aero_mx.index, vol_aero_mx)
# plt.title('Aeroméxico: Volumen diario de transacción de la acción')
# plt.xlabel('Date')
# plt.xlim([vol_aero_mx.index[0],vol_aero_mx.index[-1]])
# plt.ylim([0,.8e7])
# plt.show()
# -
# Otro procedimiento que se efectúa con frecuencia, es el cálculo de promedios y desviaciones móviles para la serie de precios. Los promedios móviles se calculan mediante:
# Realizar una media móvil con ventana de 20 y 100 para los precios de cierre ajustado
short_rollmean = clos_aero_mx.rolling(window=20).mean()
long_rollmean = clos_aero_mx.rolling(window=100).mean()
# Grafiquemos los precios junto con las medias móviles que acabamos de calcular
# Gráfica de los precios de cierre ajustados y sus medias móviles
short_rollmean.plot(figsize=(10,8), label='Media móvil con ventana de 20 días', c='b')
long_rollmean.plot(label='Media móvil con ventana de 100 días', c='r')
clos_aero_mx.plot(label='Precios de cierre ajustado', c='g')
plt.legend()
plt.show()
# Las desviaciones estándar móviles se calculan con
short_rollstd_AM_AC = clos_aero_mx.rolling(window=20).std()
long_rollstd_AM_AC = clos_aero_mx.rolling(window=100).std()
# y los gráficos...
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(1,1,1)
ax.plot(clos_aero_mx.index, clos_aero_mx, label = 'Precios de Aeroméxico')
ax.plot(clos_aero_mx.index, clos_aero_mx+short_rollstd_AM_AC, label = '+ Desviación ventana 20 días')
ax.plot(clos_aero_mx.index, clos_aero_mx-short_rollstd_AM_AC, label = '- Desviación ventana 20 días')
ax.set_xlabel('Fecha')
ax.set_ylabel('Precios Aeroméxico en 2014-2016')
ax.legend(loc='best')
fig = plt.figure(figsize=(12,8))
ax = fig.add_subplot(1,1,1)
ax.plot(clos_aero_mx.index, clos_aero_mx, label = 'Precios de Aeroméxico')
ax.plot(clos_aero_mx.index, clos_aero_mx+long_rollstd_AM_AC, label = '+ Desviación ventana 100 días')
ax.plot(clos_aero_mx.index, clos_aero_mx-long_rollstd_AM_AC, label = '- Desviación ventana 100 días')
ax.set_xlabel('Fecha')
ax.set_ylabel('Precios Aeroméxico en 2014-2016')
ax.legend(loc='best')
# Podemos graficar los precios de las acciones americanas
closes.plot(figsize=(8,5))
# Sin embargo, vemos que los precios de cierre del índice S&P500 están muy por encima de los precios de cierre de los activos, lo cual dificulta la visualización. Entonces, obtenemos el gráfico de solo los activos
closes[['AAPL','MSFT','NVDA']].plot()
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME> and modified by <NAME>.
# </footer>
| TEMA-3/Clase21_DescargaHistoricosOpciones.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/olgOk/XanaduTraining/blob/master/Xanadu4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="hPqkd2RGkoek" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 211} outputId="c4ceae86-f96c-40a6-d3ad-f4533231c44a"
pip install pennylane
# + id="WiA0DztKlNq3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 558} outputId="cabbad2c-882e-4c6d-b551-c0083eeb152c"
pip install pennylane-qchem
# + id="xRxz0mHElU5V" colab_type="code" colab={}
import pennylane as qml
from pennylane import numpy as np
# + [markdown] id="VebhJDBl0bBx" colab_type="text"
# ## Qunatum Chemistry with PannyLane
# + id="ChbFF0BVzbhj" colab_type="code" colab={}
# Specify the molecule and its parameters
geometry = 'h2.xyz' #atomic species + x,y,z coordinates
charge = 0 # num of electrons in molecule
multiplicity = 1 #occupiesity of orbital of molecule
basis_set = 'sto-3g'
name = 'h2'
# + id="VZhwGbh40P_e" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 363} outputId="c6d5afa5-0b5c-4845-fb58-2d610bf550ae"
h, nr_qubits = qml.qchem.generate_hamiltonian(name,
geometry,
charge,
multiplicity,
basis_set,
n_active_electrons = 2,
n_active_orbitals = 2,
mapping = 'jordan_wigner'
)
# + id="Vp3BHJPu08ME" colab_type="code" colab={}
print("Number of qubits = ", nr_qubits)
print("Hamiltonian is ", h)
# + id="XExwK2EF3Xa0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 130} outputId="6ac32763-7164-4323-cef4-ac7a1b889ea2"
dev = qml.device('default.qubit', wires = nr_qubits)
def circuit(params, wires):
qml.BasisState(np.array([1,1,0,0]), wires=wires)
for i in wires:
qml.Rot(*params[i], wires=i)
qml.CNOT(wires=[2,3])
qml.CNOT(wires=[2,0])
qml.CNOT(wires=[3,1])
# + id="t8x6NIA54CRc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 165} outputId="26e1d12a-6315-410f-d984-b1e48f6ebd0f"
cost_fn = qml.VQECost(circuit, h ,dev)
opt = qml.GradientDescentOptimizer(stepsize=0.4)
params = np.random.normal(0, np.pi, (nr_qubits, 3))
for n in range(max_iterations):
params = opt.step(cost_fn, params)
# + id="dmTF3Kwx4zL4" colab_type="code" colab={}
| Xanadu4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# + nbsphinx="hidden"
# Delete this cell to re-enable tracebacks
import sys
ipython = get_ipython()
def hide_traceback(exc_tuple=None, filename=None, tb_offset=None,
exception_only=False, running_compiled_code=False):
etype, value, tb = sys.exc_info()
return ipython._showtraceback(etype, value, ipython.InteractiveTB.get_exception_only(etype, value))
ipython.showtraceback = hide_traceback
# + nbsphinx="hidden"
# JSON output syntax highlighting
from __future__ import print_function
from pygments import highlight
from pygments.lexers import JsonLexer, TextLexer
from pygments.formatters import HtmlFormatter
from IPython.display import display, HTML
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
def json_print(inpt):
string = str(inpt)
formatter = HtmlFormatter()
if string[0] == '{':
lexer = JsonLexer()
else:
lexer = TextLexer()
return HTML('<style type="text/css">{}</style>{}'.format(
formatter.get_style_defs('.highlight'),
highlight(string, lexer, formatter)))
globals()['print'] = json_print
# -
# ## Custom STIX Content
# ### Custom Properties
#
# Attempting to create a STIX object with properties not defined by the specification will result in an error. Try creating an ``Identity`` object with a custom ``x_foo`` property:
# +
from stix2 import Identity
Identity(name="<NAME>",
identity_class="individual",
x_foo="bar")
# -
# To create a STIX object with one or more custom properties, pass them in as a dictionary parameter called ``custom_properties``:
identity = Identity(name="<NAME>",
identity_class="individual",
custom_properties={
"x_foo": "bar"
})
print(identity)
# Alternatively, setting ``allow_custom`` to ``True`` will allow custom properties without requiring a ``custom_properties`` dictionary.
identity2 = Identity(name="<NAME>",
identity_class="individual",
x_foo="bar",
allow_custom=True)
print(identity2)
# Likewise, when parsing STIX content with custom properties, pass ``allow_custom=True`` to [parse()](../api/stix2.core.rst#stix2.core.parse):
# +
from stix2 import parse
input_string = """{
"type": "identity",
"id": "identity--311b2d2d-f010-4473-83ec-1edf84858f4c",
"created": "2015-12-21T19:59:11Z",
"modified": "2015-12-21T19:59:11Z",
"name": "<NAME>",
"identity_class": "individual",
"x_foo": "bar"
}"""
identity3 = parse(input_string, allow_custom=True)
print(identity3.x_foo)
# -
# To remove a custom properties, use `new_version()` and set it to `None`.
identity4 = identity3.new_version(x_foo=None)
print(identity4)
# ### Custom STIX Object Types
#
# To create a custom STIX object type, define a class with the @[CustomObject](../api/v20/stix2.v20.sdo.rst#stix2.v20.sdo.CustomObject) decorator. It takes the type name and a list of property tuples, each tuple consisting of the property name and a property instance. Any special validation of the properties can be added by supplying an ``__init__`` function.
#
# Let's say zoo animals have become a serious cyber threat and we want to model them in STIX using a custom object type. Let's use a ``species`` property to store the kind of animal, and make that property required. We also want a property to store the class of animal, such as "mammal" or "bird" but only want to allow specific values in it. We can add some logic to validate this property in ``__init__``.
# +
from stix2 import CustomObject, properties
@CustomObject('x-animal', [
('species', properties.StringProperty(required=True)),
('animal_class', properties.StringProperty()),
])
class Animal(object):
def __init__(self, animal_class=None, **kwargs):
if animal_class and animal_class not in ['mammal', 'bird', 'fish', 'reptile']:
raise ValueError("'%s' is not a recognized class of animal." % animal_class)
# -
# Now we can create an instance of our custom ``Animal`` type.
animal = Animal(species="lion",
animal_class="mammal")
print(animal)
# Trying to create an ``Animal`` instance with an ``animal_class`` that's not in the list will result in an error:
Animal(species="xenomorph",
animal_class="alien")
# Parsing custom object types that you have already defined is simple and no different from parsing any other STIX object.
input_string2 = """{
"type": "x-animal",
"id": "x-animal--941f1471-6815-456b-89b8-7051ddf13e4b",
"created": "2015-12-21T19:59:11Z",
"modified": "2015-12-21T19:59:11Z",
"species": "shark",
"animal_class": "fish"
}"""
animal2 = parse(input_string2)
print(animal2.species)
# However, parsing custom object types which you have not defined will result in an error:
input_string3 = """{
"type": "x-foobar",
"id": "x-foobar--d362beb5-a04e-4e6b-a030-b6935122c3f9",
"created": "2015-12-21T19:59:11Z",
"modified": "2015-12-21T19:59:11Z",
"bar": 1,
"baz": "frob"
}"""
parse(input_string3)
# ### Custom Cyber Observable Types
#
# Similar to custom STIX object types, use a decorator to create [custom Cyber Observable](../api/v20/stix2.v20.observables.rst#stix2.v20.observables.CustomObservable) types. Just as before, ``__init__()`` can hold additional validation, but it is not necessary.
# +
from stix2 import CustomObservable
@CustomObservable('x-new-observable', [
('a_property', properties.StringProperty(required=True)),
('property_2', properties.IntegerProperty()),
])
class NewObservable():
pass
new_observable = NewObservable(a_property="something",
property_2=10)
print(new_observable)
# -
# Likewise, after the custom Cyber Observable type has been defined, it can be parsed.
# +
from stix2 import ObservedData
input_string4 = """{
"type": "observed-data",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"created": "2016-04-06T19:58:16.000Z",
"modified": "2016-04-06T19:58:16.000Z",
"first_observed": "2015-12-21T19:00:00Z",
"last_observed": "2015-12-21T19:00:00Z",
"number_observed": 50,
"objects": {
"0": {
"type": "x-new-observable",
"a_property": "foobaz",
"property_2": 5
}
}
}"""
obs_data = parse(input_string4)
print(obs_data.objects["0"].a_property)
print(obs_data.objects["0"].property_2)
# -
# ### ID-Contributing Properties for Custom Cyber Observables
# STIX 2.1 Cyber Observables (SCOs) have deterministic IDs, meaning that the ID of a SCO is based on the values of some of its properties. Thus, if multiple cyber observables of the same type have the same values for their ID-contributing properties, then these SCOs will have the same ID. UUIDv5 is used for the deterministic IDs, using the namespace `"00abedb4-aa42-466c-9c01-fed23315a9b7"`. A SCO's ID-contributing properties may consist of a combination of required properties and optional properties.
#
# If a SCO type does not have any ID contributing properties defined, or all of the ID-contributing properties are not present on the object, then the SCO uses a randomly-generated UUIDv4. Thus, you can optionally define which of your custom SCO's properties should be ID-contributing properties. Similar to standard SCOs, your custom SCO's ID-contributing properties can be any combination of the SCO's required and optional properties.
#
# You define the ID-contributing properties when defining your custom SCO with the `CustomObservable` decorator. After the list of properties, you can optionally define the list of id-contributing properties. If you do not want to specify any id-contributing properties for your custom SCO, then you do not need to do anything additional.
#
# See the example below:
# +
from stix2.v21 import CustomObservable # IDs and Deterministic IDs are NOT part of STIX 2.0 Custom Observables
@CustomObservable('x-new-observable-2', [
('a_property', properties.StringProperty(required=True)),
('property_2', properties.IntegerProperty()),
], [
'a_property'
])
class NewObservable2():
pass
new_observable_a = NewObservable2(a_property="A property", property_2=2000)
print(new_observable_a)
new_observable_b = NewObservable2(a_property="A property", property_2=3000)
print(new_observable_b)
new_observable_c = NewObservable2(a_property="A different property", property_2=3000)
print(new_observable_c)
# -
# In this example, `a_property` is the only id-contributing property. Notice that the ID for `new_observable_a` and `new_observable_b` is the same since they have the same value for the id-contributing `a_property` property.
# ### Custom Cyber Observable Extensions
#
# Finally, custom extensions to existing Cyber Observable types can also be created. Just use the @[CustomExtension](../api/v20/stix2.v20.observables.rst#stix2.v20.observables.CustomExtension) decorator. Note that you must provide the Cyber Observable class to which the extension applies. Again, any extra validation of the properties can be implemented by providing an ``__init__()`` but it is not required. Let's say we want to make an extension to the ``File`` Cyber Observable Object:
# +
from stix2 import File, CustomExtension
@CustomExtension(File, 'x-new-ext', [
('property1', properties.StringProperty(required=True)),
('property2', properties.IntegerProperty()),
])
class NewExtension():
pass
new_ext = NewExtension(property1="something",
property2=10)
print(new_ext)
# -
# Once the custom Cyber Observable extension has been defined, it can be parsed.
input_string5 = """{
"type": "observed-data",
"id": "observed-data--b67d30ff-02ac-498a-92f9-32f845f448cf",
"created_by_ref": "identity--f431f809-377b-45e0-aa1c-6a4751cae5ff",
"created": "2016-04-06T19:58:16.000Z",
"modified": "2016-04-06T19:58:16.000Z",
"first_observed": "2015-12-21T19:00:00Z",
"last_observed": "2015-12-21T19:00:00Z",
"number_observed": 50,
"objects": {
"0": {
"type": "file",
"name": "foo.bar",
"hashes": {
"SHA-256": "35a01331e9ad96f751278b891b6ea09699806faedfa237d40513d92ad1b7100f"
},
"extensions": {
"x-new-ext": {
"property1": "bla",
"property2": 50
}
}
}
}
}"""
obs_data2 = parse(input_string5)
print(obs_data2.objects["0"].extensions["x-new-ext"].property1)
print(obs_data2.objects["0"].extensions["x-new-ext"].property2)
| docs/guide/custom.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Downloading cell metrics as a .csv file
#
# The first step requires downloading of the Allen SDK. If you have not previously downloaded the Allen SDK, please visit http://alleninstitute.github.io/AllenSDK/install.html to read the documentation and download.
# # Select cell search criteria
#
# From the Allen Brain Observatory web interface (http://observatory.brain-map.org/visualcoding/search/cell_list) use the filtering features to select a specific group of cells to analyze further.
#
# Once you have selected cell filter criteria, click the "Use the current filter with the SDK" link (at the bottom left-hand corner of the cell search returns).
#
# Copy and paste the resulting snippet of code into the box below.
#
# <div style="background: #DFF0D8; border-radius: 3px; padding: 10px;">
# Note: The first time you download the cell specimens it can take some time. Be patient. Once it has been done once, it will be much faster in the future.
# Confirm that the number of cells matches the number you had on the website
#
print(len(cells))
# This next step imports the pandas library (http://pandas.pydata.org/), sets your data up in a readable format...
import pandas as pd
data = pd.DataFrame(cells)
# ...and displays your data for you to view in this notebook
data
# And finally creates a file called output.csv that will be located in the same folder that you saved this notebook
data.to_csv('output.csv')
| BrainObservatory/Download_a_csv_from_search.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/a1764879/Angular2-GettingStarted/blob/master/Copy_assignment3_task1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="DMOJD0_jdzzg" colab_type="text"
# # Task 1 Simple Perceptron training algorithm
#
# This code is written with numpy as the matrix manipulation module, a tutorial for which can be found [here](https://docs.scipy.org/doc/numpy/user/quickstart.html)
#
# You need the address the section of the code marked with **#TODO**
#
#
# + id="-sqkIpLjpVsh" colab_type="code" colab={}
import numpy as np # This is for mathematical operations
# this is used in plotting
import matplotlib.pyplot as plt
import time
import pylab as pl
from IPython import display
# %matplotlib inline
# + id="63hM2wTGugE5" colab_type="code" colab={}
class Perceptron:
# input_size: dimension of the input including bias
def __init__(self,input_size):
# we store the input size because we will need it later
self.input_size = input_size
# weights (w) in randomly initalized to be the same size as the input
self.w = np.random.randn(input_size,1).reshape(input_size,1)
# we will store our accuracy after each iteration here
self.history = []
def train(self,X,Y, max_epochs = 100):
# we clear history each time we start training
self.history = []
converged = False
epochs = 0
while not converged and epochs < max_epochs :
# TODO
# 1. add training code here that updates self.w
# 2. a criteria to set converged to True under the correct circumstances.
# curent strategy is random search (not good!)
self.w = np.random.randn(self.input_size,1)
for i in range(len(X)):
# random_index = np.random.randint(self.input_size)
random_index = i
if (Y[random_index] == 1.0) and ((np.dot(X[random_index], self.w)) < 0):
arr = X[random_index]
self.w = self.w + arr[:, None]
elif (Y[random_index] == 0.0) and ((np.dot(X[random_index], self.w)) >= 0):
arr = X[random_index]
self.w = self.w - arr[:, None]
# for i in range(len(X)):
# dotVal = np.dot(X[i], self.w)
# if (Y[i] == 1.0) and ((np.dot(X[i], self.w)) < 0):
# arr = X[i]
# self.w = self.w + arr[:, None]
# elif (Y[i] == 0.0) and ((np.dot(X[i], self.w)) >= 0):
# arr = X[i]
# self.w = self.w - arr[:, None]
# after training one epoch, we compute again the accuracy
self.compute_train_accuracy(X,Y)
epochs +=1
if epochs == max_epochs:
print("Qutting: Reached max iterations")
if converged:
print("Qutting: Converged")
self.plot_training_history()
# The draw function plots all the points and our current estimate
# of the boundary between the two classes. Point are colored according to
# the current output of the classifier. Ground truth boundary is also
# plotted since we know how we generated the data
def draw(self,X):
pl.close()
out = np.matmul(X,self.w).squeeze()
P = X[out >= 0,:]
N = X[out.T < 0,:]
x = np.linspace(0,1)
pl.xlim((0,1))
pl.ylim((0,1))
pl.plot(P[:,0],P[:,1],'go', label = 'Positive')
pl.plot(N[:,0],N[:,1],'rx', label = 'Negative')
pl.plot(x, x, label = 'GT')
a = self.w[0]
b = self.w[1]
c = self.w[2]
pl.plot(x, -a/b * x - c/b, label = 'Estimated')
pl.axis('tight')
pl.legend()
display.clear_output(wait=True)
display.display(pl.gcf())
time.sleep(1)
# This computes the accuracy of our current estimate
def compute_train_accuracy(self,X,Y):
out = np.matmul(X,self.w)
Y_bar = (out >= 0)
accuracy = np.sum(Y==Y_bar)/np.float(Y_bar.shape[0])
self.history.append(accuracy)
print("Accuracy : %f " % (accuracy))
self.draw(X)
# Once training is done, we can plot the accuracy over time
def plot_training_history(self):
plt.ylim((0,1.01))
plt.plot(np.arange(len(self.history))+1, np.array(self.history),'-x')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.show()
# + id="Umh6ObbBuj3t" colab_type="code" outputId="71bb4e5a-4b4d-441b-bb18-969d8eef5b02" colab={"base_uri": "https://localhost:8080/", "height": 546}
number_of_samples = 100
max_number_of_epochs = 10
X = np.random.rand(number_of_samples,2)
X = np.append(X, np.ones((X.shape[0],1)),axis = 1)
Y = X[:,1] > (X[:,0])
Y = np.float32(Y)
Y = Y.reshape((number_of_samples,1))
p = Perceptron(3)
p.train(X,Y,max_number_of_epochs)
# + id="qOHu1X5_D8vQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="58786920-9127-48a6-aa0f-c3a6d0508dd7"
p.history
# + id="AyEINU4lDlhY" colab_type="code" outputId="b3c5146a-018e-403a-cc87-ccbd5d847946" colab={"base_uri": "https://localhost:8080/", "height": 295}
xx = np.arange(max_number_of_epochs)
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.title("Accuracy varaition till max_number_of_epochs")
plt.plot(xx, p.history)
plt.savefig("accuracy.png")
# + id="kmWWgOPgNnPf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="9048d476-add7-472c-f9cc-144bee6e3d02"
X[1].shape, p.w.shape
| Copy_assignment3_task1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.3 64-bit (''base'': conda)'
# name: python37364bitbaseconda138a267d089641acb9983778490912c8
# ---
import torch
from transformers import BertTokenizer, BertModel
from sklearn.metrics.pairwise import cosine_similarity
import umap
import os
import json
model_version = 'c:/Users/aadam/scibert_scivocab_uncased'
do_lower_case = True
model = BertModel.from_pretrained(model_version)
tokenizer = BertTokenizer.from_pretrained(model_version, do_lower_case=do_lower_case)
# +
def embed_text(text, model):
input_ids = torch.tensor(tokenizer.encode(text)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
return last_hidden_states
def get_similarity(em, em2):
return cosine_similarity(em.detach().numpy(), em2.detach().numpy())
# +
abs1 = "The polymer solar cell (PSC) technology has continued to be developed, and the power conversion efficiency (PCE) has now exceeded 10%. The rapid improvement of PCEs in the last decade has mainly resulted from versatile synthetic efforts for conjugated polymers as electron-donors and fullerene derivatives as electron-acceptors. This Feature Article highlights recent exploration of unique, attractive building blocks, i.e., quinoidal units, phospholes, porphyrins, and fluorinated aromatic rings, which can be incorporated into low bandgap conjugated polymers. As candidates for the next-generation acceptor materials that replace the benchmark acceptor, [6,6]-phenyl-C61-butyric acid methyl ester ([60]PCBM), fullerene bisadduct regioisomers are also overviewed. Furthermore, we summarized recent attempts for the construction of one-dimensionally confined, organic donor–acceptor heterojunction nanorods and their applications to photovoltaic and optoelectronic devices. The topics in this article are not intended to cover an exhaustive list of PSC research studies, but involve the fundamental aspect to stimulate further studies for getting new insights into the structure–property relationship in PSC devices."
abs2 = "In recent years, organic semiconductors have emerged as a promising and, in some situations, viable commercial alternative to traditional inorganic materials such as silicon. Organic-based light emitting diodes, photovoltaic devices, photodetectors, and transistors have attracted intense interest in the scientific community. In this review, we first present a discussion of the fundamental electronic nature of organic semiconductors, processing techniques, and their application to two main classes of optoelectronic devices, light emitting diodes, and photovoltaics. The second part of the review introduces organic photovoltaics in depth, including their operation principles, development history, current state of the art, and routes for further improvement."
abs3 = "We study the (relative) character variety of the three-holed projective plane and the action of the mapping class group on it. We describe a domain of discontinuity for this action, which strictly contains the set of primitive stable representations defined by Minsky, and also the set of convex-cocompact characters."
abs4 = "This work studies hydrogenated amorphous silicon germanium films, deposited by hot wire chemical vapor deposition, to be used as low band gap absorber material in thin film solar cells. Material properties, such as the bonding configurations, the ambipolar diffusion length and the optical band gap, were examined as a function of the substrate temperature and germanium content. Our best materials were incorporated in single junction solar cells with high long-wavelength response and a tandem solar cell with an efficiency of 10.42%."
abs5 = "This letter describes the fabrication and characteristics of high‐efficiency thin‐film CdS/CdTe heterojunction solar cells. CdS films have been prepared by chemical bath deposition and p‐CdTe films have been deposited by close‐spaced sublimation. A CdS/CdTe solar cell of greater than 1 cm2 area with an AM1.5 efficiency of 15.8 is reported."
abs6 = "Power generated from sustainable and environmentally benign solar cell technologies is one of the key aspects in the development of clean renewable energy. Earth-abundant and non-toxic materials with suitable bandgap and absorption coefficient for photovoltaic application have drawn considerable attention in the last few decades. Here we examine Sb2S3, an emerging thin film solar cell technology that also has exciting opportunities for Si-based tandem solar cell application. We conduct a systematic analysis of Sb2S3-based photovoltaic devices, highlighting major advancements and most prominent limitations of this technology. This study also encompasses device performance simulation, providing a roadmap for further Sb2S3 technology development"
abs7 = "The authors report on carrier transport properties and spectral sensitivities of hydrogenated microcrystalline silicon-germanium (μc-Si1−xGex:H) alloys fabricated by low-temperature (∼200°C) plasma-enhanced chemical vapor deposition over the wide compositional range. Hall-effect and conductivity measurements reveal a change from weak n-type to strong p-type conduction for x>0.75 and a monotonic decrease in photoconductivity upon Ge incorporation. In a p-i-n diode structure, the Ge incorporation into i layer reduces quantum efficiencies in the short wavelengths, indicating an increased photocarrier recombination at p∕i interface. Nevertheless, under reverse biased condition, a 0.9-μm-thick μc-Si0.6Ge0.4:H absorber yields a large photocurrent of >27mA/cm2 (air mass 1.5 global) with spectral sensitivities extending into infrared wavelengths, offering a potential advantage over conventional microcrystalline silicon solar cells."
abs8 = "The focus of the most recent experimental studies of the welded shoe-base connection has been the fatigue strength in the long-life regime. A fracture mechanics-based life prediction model developed for the as-tested case compares favorably with experimental results. Scanning Electron Microscope (SEM) results were used to better understand the role of striations in fatigue life estimation of as-tested specimens. In addition, a parametric study using the finite element method investigated geometric parameters affecting the stresses local to the shoe-base detail. Local stresses from the parametric study provided a basis for fracture mechanics models of shoe-base details with altered geometries."
abstract_list = [abs1, abs2, abs3, abs4, abs5, abs6, abs7, abs8]
# +
abstract_embedding = []
for abstract in abstract_list:
abstract_embedding.append(embed_text(abstract, model).mean(1))
abstract_embedding = torch.cat(abstract_embedding, dim=0)
print("Score for abstracts about semiconductors:",
get_similarity(abstract_embedding[0].unsqueeze(0), abstract_embedding[1].unsqueeze(0)))
print("Score for abstract about semiconductors vs math:",
get_similarity(abstract_embedding[0].unsqueeze(0), abstract_embedding[2].unsqueeze(0)))
print("Score for another abstract about semiconductors vs math:",
get_similarity(abstract_embedding[1].unsqueeze(0), abstract_embedding[2].unsqueeze(0)))
print("Score for abstract about thin films vs math:",
get_similarity(abstract_embedding[3].unsqueeze(0), abstract_embedding[2].unsqueeze(0)))
print("Score for abstracts about thin films:",
get_similarity(abstract_embedding[3].unsqueeze(0), abstract_embedding[4].unsqueeze(0)))
print("Score for abstract about semiconductors vs thin films:",
get_similarity(abstract_embedding[1].unsqueeze(0), abstract_embedding[4].unsqueeze(0)))
print("Score for more abstracts about thin films:",
get_similarity(abstract_embedding[3].unsqueeze(0), abstract_embedding[5].unsqueeze(0)))
print("Score for thin film abstract 3 vs math:",
get_similarity(abstract_embedding[2].unsqueeze(0), abstract_embedding[5].unsqueeze(0)))
print("Score for 2 silicon germanium thin film abstracts:",
get_similarity(abstract_embedding[6].unsqueeze(0), abstract_embedding[5].unsqueeze(0)))
print("Score for silicon germanium thin film vs other type of film:",
get_similarity(abstract_embedding[6].unsqueeze(0), abstract_embedding[4].unsqueeze(0)))
print("Score for silicon germanium thin film vs article on fracture mechanics:",
get_similarity(abstract_embedding[6].unsqueeze(0), abstract_embedding[7].unsqueeze(0)))
| Paper Browser/abstract_sorter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# ## Formas de Criar uma lista
# ### Usando espaço e a função split
data = ['1 2 3 4'.split(),
'5 6 7 8 '.split(),
'9 10 11 12'.split(),
'13 14 15 16'.split()]
data
# ## Passando int para cada elemento
# ### Com a função map
# map usa os parâmetros (function sem parênteses, variável)
for i, l in enumerate(data):
data[i] = list(map(int, l))
data
# ### Com for e range
for l in range(len(data)):
for i in range(len(data[l])):
data[l][i] = int(data[l][i])
data
# ### Com for e enumerate
for i, l in enumerate(data):
for p, n in enumerate(data[i]):
data[i][p] = int(n)
data
# ## Criando um Dataframe com split
# * para criar o index e as columns pode-se passar uma str e a função split
# * o split usa por padrão o espaço vazio como separador
# * se for usado outro separador, deve ser passado como parâmetro
data = [(1, 2, 3, 4),
(5, 6, 7, 8),
(9, 10, 11, 12),
(13, 14, 15, 16)]
df = pd.DataFrame(data, 'l1 l2 l3 l4'.split(), 'c1 c2 c3 c4'.split())
df
# ## Tipos de Seleção
# ### Selecionar coluna
# * em outras palavras, selecionando uma series
df['c1']
type(df['c1'])
# ### Mais de uma coluna
df[['c3', 'c1']]
# as colunas são apresentadas na ordem que foi passada
type(df[['c3', 'c1']])
# ### Seleção por linha
# a seleção por linha obedece ao padrão de fatiamento de string
# não se usa o index sozinho, usa-se os ':'
# e mais ':' se for usar o passo além do intervalo
# ex => [::2] => seleciona tudo dando 2 passos
df[:]
# ### Seleção de linhas e colunas
# selecionar a partir da linha de index 1
# e ainda, as colunas c3 e c1, nessa ordem
df[1:][['c3', 'c1']]
# ### Selecionando linhas com o loc
# * o loc permite selecionar linhas usando o label da linha
df
# pega uma linha e transforma numa series
df.loc['l1']
# obedece o mesmo formato para selecionar mais de uma linha
df.loc[['l3', 'l2']]
# para selecionar um elemento, usa-se a conotação matricial
df.loc['l3', 'c1']
# ### Selecionando com iloc
# * o iloc tem a mesma função do loc, mas usa o index da linha, não o label
# selecionando o mesmo elemento com o iloc
df.iloc[2, 0]
# ### Selecionando várias linhas e colunas com loc e iloc
df
# usando o loc
df.loc[['l3', 'l1'], ['c4', 'c1']]
# usando o iloc
df.iloc[[2, 0], [3, 0]]
# ## Exercícios
# ### Crie um DataFrame somente com os alunos reprovados e mantenha neste DataFrame apenas as colunas Nome, Sexo e Idade, nesta ordem.
alunos = pd.DataFrame({'Nome': ['Ary', 'Cátia', 'Denis', 'Beto', 'Bruna', 'Dara', 'Carlos', 'Alice'],
'Sexo': ['M', 'F', 'M', 'M', 'F', 'F', 'M', 'F'],
'Idade': [15, 27, 56, 32, 42, 21, 19, 35],
'Notas': [7.5, 2.5, 5.0, 10, 8.2, 7, 6, 5.6],
'Aprovado': [True, False, False, True, True, True, False, False]},
columns = ['Nome', 'Idade', 'Sexo', 'Notas', 'Aprovado'])
alunos
# ### Resposta do exercício
selecao = alunos['Aprovado'] == False
reprovados = alunos[['Nome', 'Sexo', 'Idade']][selecao]
reprovados
# ### Resposta organizando por sexo, colocando em ordem alfabética e refazendo o index
reprovados = alunos['Aprovado'] == False
# cria uma variável em que a coluna Aprovado tem valor False
alunos_reprovados = alunos[reprovados]
# cria um Dataframe apenas com Aprovado == False
alunos_reprovados = alunos_reprovados[['Nome', 'Sexo', 'Idade']].sort_values(by=['Sexo', 'Nome'])
# sobrescreve o Dataframe apenas com as colunas desejadas e com o filtro criado
alunos_reprovados.index = range(alunos_reprovados.shape[0])
# refaz o index com o range do tamanho do nº de linhas desse Dataframe
alunos_reprovados
# ### Crie uma visualização com os três alunos mais novos
alunos
alunos.sort_values(by='Idade', inplace=True)
alunos.iloc[:3]
| 02_fundamentos_pandas/notebook/11_formas_de_selecao.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
list_images = os.listdir(r'./datasets/train')
len(list_images) # total 25000 training images 4% we will take for validation set that is 1000
cat_nos=0
dog_nos=0
switching ='cat' #cat/dog
for i in list_images:
val = i[:3]
if val=='cat':
cat_nos += 1
if switching=='dog':
switching='cat'
print('switching at ',i,'with cat_nos ',cat_nos,' and dog_nos ',dog_nos)
if val =='dog':
dog_nos += 1
if switching=='cat':
switching='dog'
print('switching at ',i,'with cat_nos ',cat_nos,' and dog_nos ',dog_nos)
cat_nos, dog_nos # equal no of dogs and cats
cat_list = list_images[:12500]
dog_list = list_images[12500:]
len(cat_list), len(dog_list)
# ## Let's create folders to store them
os.makedirs("./datasets/final/train/cats")
os.makedirs("./datasets/final/train/dogs")
os.makedirs("./datasets/final/test/cats")
os.makedirs("./datasets/final/test/dogs")
# ## Let's first create source folder
os.makedirs("./datasets/source/cats")
os.makedirs("./datasets/source/dogs")
# +
## Let's first store split in source
# -
import shutil
from shutil import copyfile
in_path = r"./datasets/train"
# store in cats
for img in cat_list:
source_path = os.path.join(in_path,img)
dest_path = os.path.join("./datasets/source/cats/",img)
if os.path.getsize(source_path) >0: # removes corrupt file
copyfile(source_path, dest_path)
for img in dog_list:
source_path = os.path.join(in_path,img)
dest_path = os.path.join("./datasets/source/dogs/",img)
if os.path.getsize(source_path) >0: # removes corrupt file
copyfile(source_path, dest_path)
# ## send images to for split
import random
# +
def split_data(SOURCE, TRAINING, TESTING, SPLIT_SIZE):
images = os.listdir(SOURCE)
images = random.sample(images,len(images))
train_size = int(len(images)*SPLIT_SIZE)
for img in images[:train_size]:
PATH = os.path.join(SOURCE, img)
if os.path.getsize(PATH) >0:
DEST = os.path.join(TRAINING, img)
copyfile(PATH, DEST)
for img in images[train_size:]:
PATH = os.path.join(SOURCE, img)
if os.path.getsize(PATH)> 0:
DEST = os.path.join(TESTING, img)
copyfile(PATH, DEST)
# split cats
split_data('./datasets/source/cats', './datasets/final/train/cats','./datasets/final/test/cats', SPLIT_SIZE=.96)
# split dogs
split_data('./datasets/source/dogs', './datasets/final/train/dogs','./datasets/final/test/dogs', SPLIT_SIZE=.96)
# -
# ## now data is splitted in train and test
# +
# first lets see how many of each we have
tr_c =r"./datasets/final/train/cats"
tr_d =r"./datasets/final/train/dogs"
te_c =r"./datasets/final/test/cats"
te_d =r"./datasets/final/test/dogs"
print("train cats:{} dogs:{} , test cats:{} dogs{}".format(len(os.listdir(tr_c)),len(os.listdir(tr_d)),len(os.listdir(te_c)),len(os.listdir(te_d))))
# -
# # RUN FROM Here >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
# ## use DataGen
# +
from tensorflow.keras.preprocessing.image import ImageDataGenerator
train_dir=r"./datasets/final/train"
test_dir =r"./datasets/final/test"
train_datagen = ImageDataGenerator(rescale=1./255,
rotation_range=40,
width_shift_range=.2,
height_shift_range=.2,
shear_range=.2,
zoom_range=.2,
horizontal_flip=True,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(train_dir,
target_size=(299,299),
batch_size=64,
class_mode='binary')
validation_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = validation_datagen.flow_from_directory(test_dir,
target_size=(299,299),
batch_size=32,
class_mode ='binary'
)
# +
## Now create callbacks
from tensorflow.keras.callbacks import EarlyStopping
early_stop = EarlyStopping(monitor="val_loss", mode='min', patience=10)
# +
import tensorflow
class myCallback(tensorflow.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get("accuracy") is not None and logs.get("accuracy")>.9):
print("\nReached 90% accuracy so stopping callback")
self.model.stop_training=True
callbacks1 = myCallback()
# -
# ## Now we will use transfer learning to create Inception Model
from tensorflow.keras.applications.inception_v3 import preprocess_input
import tensorflow
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Input, Conv2D, AveragePooling2D, Flatten, Dense
# +
def my_InceptionV3_function(input_shape=(299, 299, 3), classes=2):
base_model= tensorflow.keras.applications.InceptionV3(input_shape=input_shape,
include_top=False,
weights='imagenet')
base_model.trainable = False
# create the input layer (Same as the imageNetv2 input size)
inputs = tensorflow.keras.Input(shape=input_shape)
# data preprocessing using the same weights the model was trained on
x = preprocess_input(inputs)
# set training to False to avoid keeping track of statistics in the batch norm layer
x = base_model(x, training=False)
# Add the new Binary classification layers
# use global avg pooling to summarize the info in each channel
x = tensorflow.keras.layers.GlobalAveragePooling2D()(x)
#include dropout with probability of 0.2 to avoid overfitting
x = tensorflow.keras.layers.Dropout(.2)(x)
x = tensorflow.keras.layers.Dense(classes, activation='softmax')(x)
InceptionV3_model = Model(inputs=inputs, outputs=x)
# Let's compile it
#LeNet_model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return InceptionV3_model
# -
model = my_InceptionV3_function()
model.summary()
# +
#tensorflow.keras.utils.plot_model(model, to_file="model.png", show_shapes=False, show_layer_names=True, rankdir="TB", expand_nested=False, dpi=96, )
# -
# Compile our model
opt = tensorflow.keras.optimizers.Adam(learning_rate=0.01)
model.compile(optimizer=opt,loss='sparse_categorical_crossentropy',metrics=['accuracy'])
model_history = model.fit(train_generator, steps_per_epoch=8, epochs=100, verbose=2, validation_data = validation_generator,validation_steps =8,callbacks=[early_stop, callbacks1] )
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
pd.DataFrame(model_history.history).plot()
# +
a = pd.DataFrame(model_history.history)
# -
Final_DataFrame=pd.concat([Final_DataFrame, a])
Final_DataFrame
Final_DataFrame.plot()
model.save('./saved_models/model1.h5')
# ## Let's load the model
model = load_model('./saved_models/model1.h5')
model.summary()
# ## predict
# +
from tensorflow.keras.preprocessing.image import ImageDataGenerator
test_datagen = ImageDataGenerator(rescale=1./255)
test_dir=r"./datasets/test/"
generator = test_datagen.flow_from_directory(
test_dir,
target_size=(299,299),
batch_size=16,
class_mode='categorical', # only data, no labels
shuffle=False) # keep data in same order as labels
probabilities = model.predict(generator)
# -
probabilites
import pandas as pd
Submission_df = pd.read_csv("./datasets/sampleSubmission.csv")
Submission_df.head()
type(probabilities)
probabilities.shape
Submission_df.info()
# ## check
output = model.predict(validation_generator)
output.shape
import numpy as np
np.rint(output)
pred=np.argmax(output, axis=1)
y_true = validation_generator.classes
y_true
pred.shape, y_true.shape
import sklearn
from sklearn.metrics import confusion_matrix, classification_report
confusion_matrix(y_true, pred)
print(classification_report(y_true, pred))
final_output = np.argmax(probabilities, axis=1)
len(final_output)
Submission_df['label1']= final_output
Submission_df.head()
Submission_df['label'] = final_output
Submission_df.drop('label1', axis=1,inplace=True)
Submission_df.to_csv("./saved_outputs/sub1.csv")
| Main_file.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Sample 2.3
# This sample demonstrates how to derive pdf of a function of a random variable.
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
def func_g(x):
'''
y=g(x)
'''
y = np.arctan(x)
return y
def func_f_x(x, mu=0.,sig=10.):
'''
probability density function f(x)
'''
f_x = 1./(np.sqrt(2*np.pi)*sig)*np.exp(-(x-mu)**2/(2*sig**2))
return f_x
def func_h(y):
'''
h(y), inverse function of g(x)
'''
x = np.tan(y)
return x
def derive_h(y):
'''
h'(y), derivative of x=h(y)
'''
h_prime = 1./np.cos(y)**2
return h_prime
def func_f_y(y, mu=0.,sig=10.):
'''
propagate distribution from x to y
'''
return func_f_x(func_h(y),mu=mu,sig=sig)*np.abs(derive_h(y))
x = np.arange(-100,100.1,0.1)
fig = plt.figure(figsize=[14,12])
ax = fig.add_subplot(221)
ax.plot(x,func_f_x(x),'k-')
ax.set_xlabel(r'$x$',fontsize=12)
ax.set_ylabel(r'$f(x)$',fontsize=12)
ax = fig.add_subplot(222)
ax.plot(x,func_g(x),'k-')
ax.set_xlabel(r'$x$',fontsize=12)
ax.set_ylabel(r'$y=g(x)$',fontsize=12)
y = np.arange(-1.5,1.51,0.01)
ax = fig.add_subplot(223)
ax.plot(y,func_h(y),'k-')
ax.set_xlabel(r'$y$',fontsize=12)
ax.set_ylabel(r'$x=h(y)$',fontsize=12)
ax = fig.add_subplot(224)
ax2 = ax.twinx()
ax.plot(y,func_f_x(func_h(y)),'b--')
ax2.plot(y,np.abs(derive_h(y)),'r:')
ax.set_xlabel(r'$y$',fontsize=12)
ax.set_ylabel(r'$f(h(y))$',fontsize=12,color='b')
ax.tick_params('y',colors='b')
ax2.set_ylabel(r'$h\'(y)$',fontsize=12,color='r')
ax2.tick_params('y',colors='r')
fig.show()
fig.savefig('propagatedist_1.pdf',bbox_inches='tight')
fig = plt.figure(figsize=[6,6])
ax = fig.add_subplot(111)
ax.plot(y,func_f_y(y),'k-')
ax.set_xlabel(r'$y$',fontsize=12)
ax.set_ylabel(r'$f(y)$',fontsize=12)
fig.show()
fig.savefig('propagatedist_2.pdf',bbox_inches='tight')
# +
x = np.random.normal(0.,10.,size=10000)
y = func_g(x)
xgrid = np.arange(-100,100.5,0.5)
xcenter = (xgrid[1:]+xgrid[:-1])/2.
hx,xedge = np.histogram(x,bins=xgrid)
ygrid = np.arange(-1.5,1.55,0.05)
ycenter = (ygrid[1:]+ygrid[:-1])/2.
hy,xedge = np.histogram(y,bins=ygrid)
fig = plt.figure(figsize=[14,6])
ax = fig.add_subplot(121)
ax.plot(xcenter,hx,'k-')
ax.set_xlabel(r'$x$',fontsize=12)
ax.set_ylabel(r'$f(x)$',fontsize=12)
ax = fig.add_subplot(122)
ax.plot(ycenter,hy,'k-')
ax.set_xlabel(r'$y$',fontsize=12)
fig.show()
#fig.savefig('propagatedist_2.pdf',bbox_inches='tight')
# + jupyter={"outputs_hidden": true}
| sample2.3_propagate_distribution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from helpers.utilities import *
# %run helpers/notebook_setup.ipynb
# + tags=["parameters"]
protein_levels_path = 'data/clean/protein/levels.csv'
clinical_data_path = 'data/clean/clinical/data.csv'
# outputs
aptamers_path = 'data/other/relevant_aptamers.csv'
# -
# # Proteins: exploration & quality control
# Protein levels were measured with SOMAscan platform (SomaLogic company) in version measuring 1.3k proteins.
#
# SOMAscan uses peptide aptamers (short, target specific protein fragments) binding for protein level quantification[1]:
# - designed for ~1.3k human proteins[1], with a newer version [capable of measuring ~5k human proteins](https://somalogic.com/somalogic-launches-new-version-somascan-assay/)
# - The company [claims](http://somalogic.com/wp-content/uploads/2017/06/SSM-002-Technical-White-Paper_010916_LSM1.pdf) achieving high dynamic range (10^8) and the readouts being directly proportional to protein concentrations
# - "Many of the proteins are either secreted or known to be shed from the cell surface, and thus the platform is particularly well suited for plasma biomarker discovery." [(Ngo, et al. 2016)](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4963294/)
#
# Were there any previous studies on related diseases, sample types (CSF as in contrast to blood), and validated for application to signal from (potentially) multiple organisms?
# - previously applied to other species (e.g. mouse)[1] - taking advantage of non-specificity to human proteins/close homology of some mammal proteins; the [company report](http://www.somalogic.com/wp-content/uploads/2016/09/SSM-019-Rev-4-SOMAmer-tech-note-nonhuman-sample.pdf) demonstrates that non-human orthologs from dog, rat, cat and mouse can be used to measure levels of certain proteins in these species
# - previously applied to TB:
# - [Sequential inflammatory processes define human progression from M. tuberculosis infection to tuberculosis disease](https://journals.plos.org/plospathogens/article?id=10.1371/journal.ppat.1006687) - may be worth looking at as they also used both: SOMAscan and RNA-Seq data (plus the data are published in GEO!),
# - [Highly Multiplexed Proteomic Analysis of Quantiferon Supernatants To Identify Biomarkers of Latent Tuberculosis Infection](https://jcm.asm.org/content/55/2/391.long)
# - applications to CSF: [Neuro Psychiatric SLE patients (somehow related to meningitis)](http://www.jimmunol.org/content/200/1_Supplement/100.11) just an abstract!, [Alzheimer study](https://alzres.biomedcentral.com/articles/10.1186/s13195-017-0258-6) in which many differentially expressed pathways were in agreement with previous studies (which supports the case for application of SOMAscan to CSF).
# - The company [was developing some of its reagents](https://somalogic.com/somalogic-announces-extension-of-funding-for-devel/) specifically with intent to diagnose for TB for many years, which was funded by the Bill & Melinda Gates foundation.
# - The SOMAscan was qualified for use with CSF for biomarker discovery according to [the technical whitepaper](http://somalogic.com/wp-content/uploads/2017/06/SSM-002-Technical-White-Paper_010916_LSM1.pdf).
#
# Technical validation and updates:
# - ["Assessment of Variability in the SOMAscan Assay", 2017](https://www.nature.com/articles/s41598-017-14755-5) report discusses the variability, data processing and normalization for SOMAscan, importantly sharing the data for variability. They compare performance on serum and plasma across multiple plates.
# - According to the company that coefficient of variation in healthy humans has [median of 4.6% and 2.9% for plasma and serum respectively](http://somalogic.com/wp-content/uploads/2017/06/SSM-046-Rev-2-Verification-and-Validation-of-the-SOMAscan-Assay-1.3k.pdf) (n=166) which is consistent with the findings of above mentioned study
# - ["Complementarity of SOMAscan to LC-MS/MS and RNA-seq for quantitative profiling of human embryonic and mesenchymal stem cells", 2017](https://www.sciencedirect.com/science/article/pii/S1874391916304006) compares SOMAscan to traditional platforms. I have not read this one yet.
# - SomaLogic released updates, informing customers of removal and changes in the reagents[1]. An [update from 2016](https://metabolomics.helmholtz-muenchen.de/pgwas/locuscards/updates/SSM-064_Rev_0_DCN_16-263.pdf) explains removal of five reagents due to specificity issues
# - A [technical note from SomaLogic (2017)](http://www.somalogic.com/wp-content/uploads/2017/01/SSM-067-Rev-1-Technical-Note-Characterization-of-SOMAmer-Reagents-Binding-Specificity-in-the-SOMAscan-1.3k-Assay.pdf) characterizes specificity for the proteins in 1.3k panel; specificity measured against related proteins was confirmed for 53% of the reagents at the time. Not a bad number, but I believe that we need to be cautious as that did not test cross-species mixtures!
# - https://www.nature.com/articles/s41598-018-26640-w
#
# Other useful resources:
# - [Web Tool for Navigating and Plotting SomaLogic ADAT Files](https://openresearchsoftware.metajnl.com/articles/10.5334/jors.166/) allow to visualize raw ADAT files (the files with raw data which I did not get); the website generates PCA, heatmaps for HCA and other plots and was originally hosted by NCI. Importantly there is a source code available to consult and trouble-shoot in case of any problems with SOMAscan-specific analysis/visualizations: https://github.com/foocheung/adat (Apache-2.0 license).
#
# 1. https://www.nature.com/articles/s41598-017-14755-5
#
# **It might be useful to learn about the normalization procedures employed for SOMAscan.** What I would worry about is how they handled samples i.e. if these were placed on a single or multiple plates and what normalization followed.
#
# Overall the platform has a good performance but I am worried about the effect of non-specific binding of bacterial or viral proteins, which seems that was not tested before (the validation studies were all performed on healthy humans). On the other hand, if the signal for a protein is strong and discriminates TBM patients from other ones, it is a useful biomarker anyway (though with validity potentially restricted to this specific platform).
#
# Articles addressing my concern of non-specific bacterial/fungal/viral protein binding:
# - [Potential of high-affinity, Slow Off-Rate Modified Aptamer (SOMAmer) reagents for Mycobacterium tuberculosis proteins as tools for infection models and diagnostic applications."](https://www.ncbi.nlm.nih.gov/pubmed/28794178) - attempted to create a diagnostic test for TB using novel aptamers (created specifically for Mycobacterium proteins); in serum/urine concentrations of TB proteins were too low - and this is a hint that I need not worry greatly (as even SomaLogic scientists were not able to pick up signal from MTB using novel aptamers), but again they did not try CSF nor did they systematically check the aptamers from the 1.3k assay for noise from unspecific binding)
#
# I barely skimmed the topic, looking for things relevant to data cleaning and validation, will continue the review later.
#
protein_levels = read_csv(protein_levels_path, index_col=[0,1,2,3])
protein_levels
# ### Can we get better ids for the proteins?
#
# The Ensembl gene id is on the gene level so not necessarily accurate. Can we get better?
#
# Yes: the aptamers metadata is available in R package "readat"
# + magic_args="-o aptamers" language="R"
# library(readat)
# -
aptamers.head()
relevant_aptamers = aptamers[
aptamers.SomaId.isin(protein_levels.index.get_level_values('soma_id'))
&
(aptamers.IsIn1310Panel == 1)
]
len(relevant_aptamers)
relevant_aptamers.to_csv(aptamers_path)
'7596-2' in relevant_aptamers.AptamerId
# #### Were the five deprecated targets already excluded?
# It would seem so as there are only 1305/1310 rows. See [SSM-064 Rev 0 DCN 16-263](https://metabolomics.helmholtz-muenchen.de/pgwas/locuscards/updates/SSM-064_Rev_0_DCN_16-263.pdf) for details.
deprecated_targets = ['2795-23', '3590-8', '5071-3', '5118-74', '5073-30']
relevant_aptamers.query('AptamerId in @deprecated_targets').empty
# Just to make sure, I double-check using full names and the other data frame:
deprecated_target_names = [
'Alkaline phosphatase, tissue-nonspecific isozyme',
'Complement C1s subcomponent',
'Reticulon-4',
'Desmoglein-2',
'Tumor necrosis factor receptor super-family member 25'
]
protein_levels.query('target_full_name in @deprecated_target_names').empty
# Great!
# #### The variability coefficients for confidence assessment of particular target measurements
# The variability study[1] resulted in creation of a website: [foocheung.shinyapps.io/SOMACV3](https://foocheung.shinyapps.io/SOMACV3/) which enables checking the variability of measurements for each of the reagents.
#
# Unfortunately, these data are only available for serum and plasma and I did not find an easy way to download the data in bulk.
# ### For which patients do have SOMAScan data?
protein_levels.columns.str.split('.').str[1].value_counts()
# ### Which target description is the best for visualisations?
protein_index = protein_levels.index
protein_indices = DataFrame({
name: protein_index.get_level_values(name)
for name in protein_index.names
})
protein_indices.apply(lambda index: index.str.len().max())
# For visualisation purposes I could use the Entrez or target symbols; Later I show that Entrez symbols are not unique, so I opt to use the target symbols.
# SOMA ID is short, though not easy to interpret.
# Note: The entrez gene symbols are not atomic, which is inherent to the protein biology (multiple genes coding for the same protein)
# ### Duplicates?
# #### In the data
# Chances of having exact duplicate in data are very low and this would be suspicious:
assert not protein_levels.duplicated().any()
# #### In the indices?
protein_indices.apply(lambda index: index.duplicated().any())
# Entrez gene symbol is not unique.
# +
from helpers.data_frame import extract_duplicates, set_duplicates_group
data_duplicates = extract_duplicates(protein_indices, ['entrez_gene_symbol'], ['target_full_name', 'target'])
data_duplicates = set_duplicates_group(data_duplicates, 'entrez_gene_symbol', protein_indices)
full_table(data_duplicates)
# -
# Major observations:
# - Mostly isoforms in here - good to know!
# - There are viral proteins, e.g. "C34 gp41 HIV Fragment", "Protein Rev_HV2BE"
# Please see further work on the Entrez id matching and gene-mapping in the [Gene_level_mapping.ipynb](Gene_level_mapping.ipynb) notebook.
# ### Quick check: do we see more of the HIV protein in patients with HIV?
clinical_data = read_csv(clinical_data_path, index_col=0)
# Select only patients for whom the protein levels were measured:
clinical_for_protein_study = clinical_data.loc[protein_levels.columns]
patient_hiv_status = clinical_for_protein_study.HIVResult
is_healthy_control = clinical_for_protein_study.condition == 'HC'
# As we assume that there are only HIV-1 patients, I expect to see a correlation for "C34 gp41 HIV Fragment" but not necessarily for other viral proteins
viral_proteins = protein_levels.query('entrez_gene_symbol == "Human-virus"')
viral_proteins
df = viral_proteins.stack().reset_index().rename({'level_4': 'patient_id', 0: 'value'}, axis=1)
df['patient_hiv_status'] = df.patient_id.map(patient_hiv_status)
df['is_healthy_control'] = df.patient_id.map(is_healthy_control)
# are there any HIV-1 positive healthy controls?
(is_healthy_control & (patient_hiv_status == 'Positive')).value_counts()
from rpy2.rinterface_lib.callbacks import logger as rpy2_logger
rpy2_logger.addFilter(lambda record: 'notch went outside hinges' in record.msg)
# + magic_args="-i df -w 800 -h 400 -u px" language="R"
# (
# ggplot(df, aes(x=target, y=value, fill=patient_hiv_status))
# + facet_wrap(
# ~ is_healthy_control,
# labeller=as_labeller(c('TRUE'='Healthy control', 'FALSE'='With meaningitis'))
# )
# + geom_boxplot(
# notch=TRUE, outlier.shape=NA,
# position=position_dodge(width=1)
# )
# + geom_point(
# shape=21, size=3, alpha=0.4,
# position=position_jitterdodge(dodge.width=1)
# )
# + xlab('Protein')
# + ylab('Protein level')
# + scale_y_log10()
# + theme(legend.position='bottom')
# )
# -
# Surprisingly the assumed higher level of HIV fragments in HIV-positive patients is not clearly observed.
#
# I will not jump to conclusions, though it might be interesting. Caveats:
# - this is just one HIV protein fragment
# - what is the reasonable detection level? Am I looking at experimental noise or meaningful data?
# - Median variability coefficients for the C34 qp41 HIV Fragment: 6.3% (https://foocheung.shinyapps.io/SOMACV3/)
# - it may be due to low specificity of the measurement for this fragment (i.e. there may be another, similar protein); also how specific is the platform/protocol when given a mixture of proteins from across different species? I mean, if they measured specificity with "clean" healthy human tissue, their specificity claims may be invalid in our setting, where patients may have severe bacterial and viral infections (all comes down to the lab protocol, I guess that I need to trust that someone has thought about that for now and revisit later).
# - this is CSF, no guarantee that we will have viruses in there, but according to the literature there is:
# - ["Discordance Between Cerebral Spinal Fluid and Plasma HIV Replication in Patients with Neurological Symptoms Who Are Receiving Suppressive Antiretroviral Therapy", 2010](https://academic.oup.com/cid/article/50/5/773/327515) - low sample size (11), studied RNA, but maybe relevant
# - ["Cerebrospinal fluid HIV infection and pleocytosis: Relation to systemic infection and antiretroviral treatment", 2005](https://bmcinfectdis.biomedcentral.com/articles/10.1186/1471-2334-5-98) - a larger sample size (100); the paper suggests mechanisms which may lead to occurrence of HIV in CSF: "transitory infection" (by infected CD4+ cells traversing from blood) which may lead to "autonomous infection" (with the HIV cycle being sustained in the CSF surrounding cells); Mycobacterium and Cryptococcus infections are highlighted as possible variation of the transitory infection.
# - I have not finished checking/transforming the data this is just a preliminary check, which may suggest that there is more cleaning to do!
#
# Questions: what test (precisely) was used to diagnose HIV? How sensitive is it? What was tested - blood?
#
# One possible explanation: some patients do have HIV but were not diagnosed, some patients were diagnosed but do not have the virus in CSF, some patients were diagnosed and do have the virus in CSF (the values close to ~1000). In this case, level of ~100 would indicate just noise/non-specific binding in the data.
#
#
# I will leave this for now, but an interesting thing would be to explore the relation between the bacterial and viral infections later on. Possible further steps include CD4 count and anti-retroviral therapy status inclusion. The lesson learned from a brief literature search is that the interplay between HIV status, CD4 count, and ARV therapy needs to be accounted for in the further analyses.
# Also I could see if there are any known HIV biomarkers.
# ### Are the remianing proteins human?
set(relevant_aptamers.Organism)
relevant_aptamers[relevant_aptamers.Organism != 'Human']
set(relevant_aptamers.Type)
relevant_aptamers[relevant_aptamers.Type == 'Rat Protein']
| exploration/protein/Exploration_and_quality_control.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # HPDM097: Class coding basics
#
# > For a more detailed treatment of classes see Part IV of *Lutz. (2013). Learning Python. 5th Ed. O'Reilly.*
#
# For the more advanced coding you will be undertaking in computer simulation it is essential that you understand the basics of python classes an object orientation (OO). **The key takeaways from this lecture are that class aid code reuse and design (although when used unwisely they can overcomplicate designs!)**. We will try and do this in a fun way so you can see the benefits.
#
# **In this lecture you will learn:**
#
# * That we have been using classes all the way through the course!
# * How to instantiate multiple instances of a class.
# * How to declare a class and define a class constructor method.
# * What is meant by class attributes and methods.
#
# > It is worth noting that in python you don't have to use classes you can actually achieve everything with functions. However, the abstraction benefits of OO are really important for the design and organisation of complex projects.
# # Working with objects: An object orientated text adventure game
#
# Back in the late 80s and early 90s a popular type of game on micro-computers, such as the **Commodore 64** and **ZX Spectrum**, was the **text adventure**. These were games where a player had to navigate and solve puzzles in a game world with only text descriptions of locations as their guide!
#
# These types of games lend themselves nicely to abstraction offered in object orientated methods of design (and python command line interfaces). For example, in a very simple implementation we could have a class of type `TextWorld` that accepts a limited number of commands (for example, "n" to represent 'go north' or "look" to represent 'look and describe the room'.). The `TextWorld` would encapsulate one or more `Room` objects that describes a location and contain exits to other `Room` objects.
#
# ## A text hospital adventure
#
# Before we look at creating instances of objects, let's first take a look at a basic game. In this game we are limited to just exploring. We can move around a small hospital setting by issuing commands to move.
#
# ### Imports
#
# The function get_hospital_textworld returns a `TextWorld` object. It is setup for the hospital text adventure we can play.
from text_adventure.basic_game import get_hospital_textworld
adventure = get_hospital_textworld()
print(adventure)
# ### A function to play the game
def play_text_adventure(adventure):
'''
Play your text adventure!
'''
print('********************************************')
print(adventure.opening, end='\n\n')
print(adventure.current_room.describe())
while adventure.active:
user_input = input("\nWhat do you want to do? >>> ")
response = adventure.take_action(user_input)
print(response)
print('Game over!')
play_text_adventure(adventure)
# ## Text Hospital - let's build it using `Room` objects
#
# We will start by creating a text based hospital world. The game will be comprised by network of four `Room` objects: a reception, a corridor, a ward and the operating theatre.
# ## Imports
from text_adventure.basic_game import TextWorld, Room
# ## Setting and getting attributes and methods
#
# Each object we will instantiate has its own attribute and methods. You have come across these before in a different context.
#
# An attribute represents a **variable** that is local to the object. For example, each `Room` has a `description` attribute. You can access the attribute by following the object name with a **'.'** and then name of the attribute.
#
# A method is the same as a **function**, but it is again attached to the object. For example, objects of type `Room` have a `add_exit(room, direction)` method that allows you to pass in another Room object and a direction of travel from the current room (these are stored in the attribute exit - a dict).
#
#
# +
# Let's instantiate some Room objects to represent our network of rooms
#start fo the game = reception
reception = Room(name="reception")
reception.description = """You are stood in the busy hospital reception.
To the south, east and west are wards with COVID19 restricted areas.
To the north is a corridor."""
corridor = Room(name='corridor')
corridor.description = """A long corridor branching in three directions.
To the north is signposted 'WARD'.
The south is signposted 'RECEPTION'.
The east is signposted 'THEATRE'"""
ward = Room(name="ward")
ward.description = """You are on the general medical ward. There are 10 beds
and all seem to be full today. There is a smell of disinfectant.
The exit is to the south"""
theatre = Room(name="theatre")
theatre.description = """You are in the operating theatre. Its empty today as
all of the elective operations have been cancelled.
An exit is to the west."""
#add the exits by calling the add_exit() method
reception.add_exit(corridor, 'n')
corridor.add_exit(reception, 's')
corridor.add_exit(ward, 'n')
corridor.add_exit(theatre, 'e')
ward.add_exit(corridor, 's')
theatre.add_exit(corridor, 'w')
rooms_collection = [reception, corridor, ward, theatre]
# -
print(reception)
#let's take a look at the description of reception via its attribute
print(reception.description)
print(reception.describe())
#reception only has a single exit
reception.exits
#corridor has three exits
corridor.exits
# +
#create the game room
adventure = TextWorld(name='text hospital world', rooms=rooms_collection,
start_index=0)
#set the legal commands for the game
#directions a player can move and command they can issue.
adventure.legal_commands = ['look', 'quit']
adventure.legal_exits = ['n', 'e', 's', 'w']
adventure.opening = """Welcome to your local hospital! Unfortunatly due to the
pandemic most of the hospital is under restrictions today. But there are a few
areas where it is safe to visit.
"""
# -
print(adventure)
play_text_adventure(adventure)
# # How to build a class in python
#
# Now that we have learnt how to instantiate objects and use frameworks of python classes we need to learn how to code a class.
#
# We will start with a very simple example and then take a look at the `Room` class from our text adventure framework.
#
# ## The world's most simple python class
#
# We declare a class in python using the `class` keyword.
#the world's most simple `Patient` class!
class Patient:
pass
# +
#create an object of type `Patient`
new_patient = Patient()
#in python we can dynamically add attributes (and methods)
new_patient.name = 'Tom'
new_patient.occupation = 'data scientist'
print(new_patient.name)
# -
# ## Most classes have a constructor `__init__()` method
#
# In most classes I code, I include an `__init__()` method. It is the method called when you create an instance of the class. This is sometimes called a **contructor method**, as it is used when an object is constructed. A simple example is below.
#
# Note the use of the argument `self`. This is a special method parameter that must be included as the first parameter in **all** methods in your class. `self` is the way an object internally references itself. If you need your class to have an attribute called `name` then you refer to it as `self.name`. This means that any method in the class can access the attribute.
class Patient:
def __init__(self):
self.name = '<NAME>'
self.occupation = 'coder'
patient2 = Patient()
print(patient2.name)
print(patient2.occupation)
patient2_1 = Patient()
patient2_1.name = 'Tim'
print(patient2_1.name)
print(patient2_1.occupation)
# ### including parameters in the constructor method
class Patient:
def __init__(self, name, occupation, age):
self.name = name
self.occupation = occupation
self.age = age
#example of an attribute that is not set by the constructor
#but still needs to be initialised.
self.triage_band = None
patient3 = Patient('<NAME>', 'ex-coder', 87)
print(patient3.name)
print(patient3.occupation)
print(patient3.age)
print(patient3.triage_band)
# # The `Room` class from the Hospital Basic Text Adventure
class Room:
'''
Encapsulates a location/room within a TextWorld.
A `Room` has a number of exits to other `Room` objects
'''
def __init__(self, name):
self.name = name
self.description = ""
self.exits = {}
def add_exit(self, room, direction):
'''
Add an exit to the room
Params:
------
room: Room
a Room object to link
direction: str
The str command to access the room
'''
self.exits[direction] = room
def exit(self, direction):
'''
Exit the room in the specified direction
Params:
------
direction: str
A command string representing the direction.
'''
if direction in self.exits:
return self.exits[direction]
else:
raise ValueError()
def describe(self):
'''
Describe the room to a player
'''
return self.description
# # The `TextWorld` class
class TextWorld:
'''
A TextWorld encapsulate the logic and Room objects that comprise the game.
'''
def __init__(self, name, rooms, start_index=0, max_moves=5):
'''
Constructor method for World
Parameters:
----------
rooms: list
A list of rooms in the world.
start_index: int, optional (default=0)
The index of the room where the player begins their adventure.
'''
self.name = name
self.rooms = rooms
self.current_room = self.rooms[start_index]
self.legal_exits = ['n', 'e', 's', 'w']
self.legal_commands =['look', 'quit']
self.n_actions = 0
#true while the game is active.
self.active = True
#limit the number of move before the game ends.
self.max_moves = max_moves
def take_action(self, command):
'''
Take an action in the TextWorld
Parameters:
-----------
command: str
A command to parse and execute as a game action
Returns:
--------
str: a string message to display to the player.
'''
#no. of actions taken
self.n_actions += 1
if self.n_actions == self.max_moves:
self.active = False
#handle action to move room
if command in self.legal_exits:
msg = ''
try:
self.current_room = self.current_room.exit(command)
msg = self.current_room.description
except ValueError:
msg = 'You cannot go that way.'
finally:
return msg
#split into array
parsed_command = command.split()
if parsed_command[0] in self.legal_commands:
#handle command
if parsed_command[0] == 'look':
return self.current_room.describe()
elif parsed_command[0] == 'quit':
self.active = False
return 'You have quit the game.'
else:
#handle command error
return f"I don't know how to {command}"
# # More complex OO frameworks
#
# ## Classes are customised by Inheritance
# > **A note of caution**: Over time I've learnt to be somewhat wary of complex multiple inheritance structures **in any programming language**. Inheritance brings huge benefits in terms of code reuse, but you also need to learn good OO design principals in order to avoid unexpected dependencies in your code and avoid major rework due to small changes in a projects requirements.
#
# Let's work with a simple example first:
# +
import random
#`Patient` is refered to as a 'super class'
class Patient:
def __init__(self, name, occupation, age):
self.name = name
self.occupation = occupation
self.age = age
self.triage_band = None
def set_random_triage_band(self):
'''set a random triage band 1 - 5'''
self.triage_band = random.randint(1, 5)
# -
#subclass `StrokePatient`
class StrokePatient(Patient):
def __init__(self, name, occupation, age, stroke_type=1):
#call the constructor of the superclass
super().__init__(name, occupation, age)
self.stroke_type = stroke_type
# +
#create an instance of a `StrokePatient` and use inherited methods
random.seed(42)
new_patient = StrokePatient('<NAME>', 'Teacher', 45)
new_patient.set_random_triage_band()
print(new_patient.name)
print(new_patient.triage_band)
# -
# # **Has-a**: An alternative OO design.
#
# In the previous example `StrokePatient` **is-a** specialisation of `Patient`. An alternative way to frame this design problem as one of **object composition** where a `StrokeCase` **has-a** `Patient`.
#
# This approach provides slightly more flexibility than direct inheritance. For example you can pass in a different type of object as long as it implements the same interface. It doesn't however, require a bit more code to setup.
#
# E.g.
#this time patient is a parameter instead of a superclass
class StrokeCase:
def __init__(self, patient, stroke_type=1):
self.patient = patient
self.stroke_type = stroke_type
@property
def triage_band(self):
return self.patient.triage_band
def set_random_triage_band(self):
self.patient.set_random_triage_band()
random.seed(101)
new_patient = Patient('<NAME>', 'Teacher', 45)
stroke = StrokeCase(new_patient)
stroke.set_random_triage_band()
print(stroke.triage_band)
# # Using inheritance to allow a `Room` and a game player to hold inventory
from text_adventure.advanced_game import hospital_with_inventory
game = hospital_with_inventory()
game
play_text_adventure(game)
# ## OO Implementation
#
# * A `Room` and a `TextWorld` is-a `InventoryHolder`
# * An object that is-a `InventoryHolder` holds references to `InventoryItem`
class InventoryItem:
'''
An item found in a text adventure world that can be picked up
or dropped.
'''
def __init__(self, short_description):
self.name = short_description
self.long_description = ''
self.aliases = []
def add_alias(self, new_alias):
'''
Add an alias (alternative name) to the InventoryItem.
For example if an inventory item has the short description
'credit card' then a useful alias is 'card'.
Parameters:
-----------
new_alias: str
The alias to add. For example, 'card'
'''
self.aliases.append(new_alias)
class InventoryHolder:
'''
Encapsulates the logic for adding and removing an InventoryItem
This simulates "picking up" and "dropping" items in a TextWorld
'''
def __init__(self):
#inventory just held in a list interally
self.inventory = []
def list_inventory(self):
'''
Return a string representation of InventoryItems held.
'''
msg = ''
for item in self.inventory:
msg += f'{item.name}\n'
return msg
def add_inventory(self, item):
'''
Add an InventoryItem
'''
self.inventory.append(item)
def get_inventory(self, item_name):
'''
Returns an InventoryItem from Room.
Removes the item from the Room's inventory
Params:
------
item_name: str
Key identifying item.
Returns
-------
InventoryItem
'''
selected_item, selected_index = self.find_inventory(item_name)
#remove at index and return
del self.inventory[selected_index]
return selected_item
def find_inventory(self, item_name):
'''
Find an inventory item and return it and its index
in the collection.
Returns:
-------
Tuple: InventoryItem, int
Raises:
------
KeyError
Raised when an InventoryItem without a matching alias.
'''
selected_item = None
selected_index = -1
for index, item in zip(range(len(self.inventory)), self.inventory):
if item_name in item.aliases:
selected_item = item
selected_index = index
break
if selected_item == None:
raise KeyError('You cannot do that.')
return selected_item, selected_index
class Room(InventoryHolder):
'''
Encapsulates a location/room within a TextWorld.
A `Room` has a number of exits to other `Room` objects
A `Room` is-a type of `InventoryHolder`
'''
def __init__(self, name):
'''
Room constructor
'''
self.name = name
self.description = ""
self.exits = {}
#MODIFICATION
super().__init__()
def add_exit(self, room, direction):
'''
Add an exit to the room
Params:
------
room: Room
a Room object to link
direction: str
The str command to access the room
'''
self.exits[direction] = room
def exit(self, direction):
'''
Exit the room in the specified direction
Params:
------
direction: str
A command string representing the direction.
'''
if direction in self.exits:
return self.exits[direction]
else:
raise ValueError()
def describe(self):
msg = self.description
### MODIFICATION
if len(self.inventory) > 0:
msg += '\nYou can also see:\n'
msg += self.list_inventory()
return msg
# # End
| oo_lecture.ipynb |